From 1973f3ba3a7c92b10ca3e65829a0dc1b18375409 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Tue, 30 Jul 2019 17:08:56 -0400 Subject: [PATCH 01/12] Documentation for the v1.2.0 release This commit: - Archives v1.1.0 docs (moves stable -> 1.1.0) - Sets up 1.2.0 redirects to stable. --- docs/{stable => 1.1.0}/.buildinfo | 0 docs/{stable => 1.1.0}/__config__.html | 0 docs/{stable => 1.1.0}/_images/CELU.png | Bin docs/{stable => 1.1.0}/_images/ELU.png | Bin docs/{stable => 1.1.0}/_images/Hardshrink.png | Bin docs/{stable => 1.1.0}/_images/Hardtanh.png | Bin docs/{stable => 1.1.0}/_images/LeakyReLU.png | Bin docs/{stable => 1.1.0}/_images/LogSigmoid.png | Bin docs/{stable => 1.1.0}/_images/PReLU.png | Bin docs/{stable => 1.1.0}/_images/ReLU.png | Bin docs/{stable => 1.1.0}/_images/ReLU6.png | Bin docs/{stable => 1.1.0}/_images/SELU.png | Bin docs/{stable => 1.1.0}/_images/Sigmoid.png | Bin docs/{stable => 1.1.0}/_images/Softplus.png | Bin docs/{stable => 1.1.0}/_images/Softshrink.png | Bin docs/{stable => 1.1.0}/_images/Softsign.png | Bin docs/{stable => 1.1.0}/_images/Tanh.png | Bin docs/{stable => 1.1.0}/_images/Tanhshrink.png | Bin docs/{stable => 1.1.0}/_modules/index.html | 0 docs/{stable => 1.1.0}/_modules/torch.html | 0 .../_modules/torch/__config__.html | 0 .../_modules/torch/_tensor_str.html | 0 docs/{stable => 1.1.0}/_modules/torch/_utils.html | 0 docs/{stable => 1.1.0}/_modules/torch/autograd.html | 0 .../_modules/torch/autograd/anomaly_mode.html | 0 .../_modules/torch/autograd/function.html | 0 .../_modules/torch/autograd/grad_mode.html | 0 .../_modules/torch/autograd/gradcheck.html | 0 .../_modules/torch/autograd/profiler.html | 0 docs/{stable => 1.1.0}/_modules/torch/cuda.html | 0 .../{stable => 1.1.0}/_modules/torch/cuda/comm.html | 0 .../{stable => 1.1.0}/_modules/torch/cuda/nvtx.html | 0 .../_modules/torch/cuda/random.html | 0 .../_modules/torch/cuda/streams.html | 0 .../_modules/torch/distributed.html | 0 .../_modules/torch/distributed/deprecated.html | 0 .../torch/distributed/distributed_c10d.html | 0 .../_modules/torch/distributions/bernoulli.html | 0 .../_modules/torch/distributions/beta.html | 0 .../_modules/torch/distributions/binomial.html | 0 .../_modules/torch/distributions/categorical.html | 0 .../_modules/torch/distributions/cauchy.html | 0 .../_modules/torch/distributions/chi2.html | 0 .../torch/distributions/constraint_registry.html | 0 .../_modules/torch/distributions/constraints.html | 0 .../_modules/torch/distributions/dirichlet.html | 0 .../_modules/torch/distributions/distribution.html | 0 .../_modules/torch/distributions/exp_family.html | 0 .../_modules/torch/distributions/exponential.html | 0 .../torch/distributions/fishersnedecor.html | 0 .../_modules/torch/distributions/gamma.html | 0 .../_modules/torch/distributions/geometric.html | 0 .../_modules/torch/distributions/gumbel.html | 0 .../_modules/torch/distributions/half_cauchy.html | 0 .../_modules/torch/distributions/half_normal.html | 0 .../_modules/torch/distributions/independent.html | 0 .../_modules/torch/distributions/kl.html | 0 .../_modules/torch/distributions/laplace.html | 0 .../_modules/torch/distributions/log_normal.html | 0 .../distributions/lowrank_multivariate_normal.html | 0 .../_modules/torch/distributions/multinomial.html | 0 .../torch/distributions/multivariate_normal.html | 0 .../torch/distributions/negative_binomial.html | 0 .../_modules/torch/distributions/normal.html | 0 .../torch/distributions/one_hot_categorical.html | 0 .../_modules/torch/distributions/pareto.html | 0 .../_modules/torch/distributions/poisson.html | 0 .../torch/distributions/relaxed_bernoulli.html | 0 .../torch/distributions/relaxed_categorical.html | 0 .../_modules/torch/distributions/studentT.html | 0 .../distributions/transformed_distribution.html | 0 .../_modules/torch/distributions/transforms.html | 0 .../_modules/torch/distributions/uniform.html | 0 .../_modules/torch/distributions/weibull.html | 0 .../_modules/torch/functional.html | 0 docs/{stable => 1.1.0}/_modules/torch/hub.html | 0 docs/{stable => 1.1.0}/_modules/torch/jit.html | 0 .../_modules/torch/multiprocessing.html | 0 .../_modules/torch/multiprocessing/spawn.html | 0 .../_modules/torch/nn/functional.html | 0 docs/{stable => 1.1.0}/_modules/torch/nn/init.html | 0 .../_modules/torch/nn/modules/activation.html | 0 .../_modules/torch/nn/modules/adaptive.html | 0 .../_modules/torch/nn/modules/batchnorm.html | 0 .../_modules/torch/nn/modules/container.html | 0 .../_modules/torch/nn/modules/conv.html | 0 .../_modules/torch/nn/modules/distance.html | 0 .../_modules/torch/nn/modules/dropout.html | 0 .../_modules/torch/nn/modules/fold.html | 0 .../_modules/torch/nn/modules/instancenorm.html | 0 .../_modules/torch/nn/modules/linear.html | 0 .../_modules/torch/nn/modules/loss.html | 0 .../_modules/torch/nn/modules/module.html | 0 .../_modules/torch/nn/modules/normalization.html | 0 .../_modules/torch/nn/modules/padding.html | 0 .../_modules/torch/nn/modules/pixelshuffle.html | 0 .../_modules/torch/nn/modules/pooling.html | 0 .../_modules/torch/nn/modules/rnn.html | 0 .../_modules/torch/nn/modules/sparse.html | 0 .../_modules/torch/nn/modules/upsampling.html | 0 .../_modules/torch/nn/parallel/data_parallel.html | 0 .../_modules/torch/nn/parallel/distributed.html | 0 .../_modules/torch/nn/parallel/distributed_cpu.html | 0 .../_modules/torch/nn/parameter.html | 0 .../_modules/torch/nn/utils/clip_grad.html | 0 .../_modules/torch/nn/utils/convert_parameters.html | 0 .../_modules/torch/nn/utils/rnn.html | 0 .../_modules/torch/nn/utils/spectral_norm.html | 0 .../_modules/torch/nn/utils/weight_norm.html | 0 docs/{stable => 1.1.0}/_modules/torch/onnx.html | 0 .../_modules/torch/optim/adadelta.html | 0 .../_modules/torch/optim/adagrad.html | 0 .../_modules/torch/optim/adam.html | 0 .../_modules/torch/optim/adamax.html | 0 .../_modules/torch/optim/asgd.html | 0 .../_modules/torch/optim/lbfgs.html | 0 .../_modules/torch/optim/lr_scheduler.html | 0 .../_modules/torch/optim/optimizer.html | 0 .../_modules/torch/optim/rmsprop.html | 0 .../_modules/torch/optim/rprop.html | 0 .../{stable => 1.1.0}/_modules/torch/optim/sgd.html | 0 .../_modules/torch/optim/sparse_adam.html | 0 .../_modules/torch/quasirandom.html | 0 docs/{stable => 1.1.0}/_modules/torch/random.html | 0 .../_modules/torch/serialization.html | 0 docs/{stable => 1.1.0}/_modules/torch/sparse.html | 0 docs/{stable => 1.1.0}/_modules/torch/storage.html | 0 docs/{stable => 1.1.0}/_modules/torch/tensor.html | 0 .../_modules/torch/utils/checkpoint.html | 0 .../_modules/torch/utils/cpp_extension.html | 0 .../_modules/torch/utils/data/dataloader.html | 0 .../_modules/torch/utils/data/dataset.html | 0 .../_modules/torch/utils/data/distributed.html | 0 .../_modules/torch/utils/data/sampler.html | 0 .../_modules/torch/utils/tensorboard/writer.html | 0 docs/{stable => 1.1.0}/_modules/torchvision.html | 0 .../_modules/torchvision/datasets/cifar.html | 0 .../_modules/torchvision/datasets/cityscapes.html | 0 .../_modules/torchvision/datasets/coco.html | 0 .../_modules/torchvision/datasets/fakedata.html | 0 .../_modules/torchvision/datasets/flickr.html | 0 .../_modules/torchvision/datasets/folder.html | 0 .../_modules/torchvision/datasets/imagenet.html | 0 .../_modules/torchvision/datasets/lsun.html | 0 .../_modules/torchvision/datasets/mnist.html | 0 .../_modules/torchvision/datasets/phototour.html | 0 .../_modules/torchvision/datasets/sbd.html | 0 .../_modules/torchvision/datasets/sbu.html | 0 .../_modules/torchvision/datasets/stl10.html | 0 .../_modules/torchvision/datasets/svhn.html | 0 .../_modules/torchvision/datasets/usps.html | 0 .../_modules/torchvision/datasets/voc.html | 0 .../_modules/torchvision/models/alexnet.html | 0 .../_modules/torchvision/models/densenet.html | 0 .../torchvision/models/detection/faster_rcnn.html | 0 .../torchvision/models/detection/keypoint_rcnn.html | 0 .../torchvision/models/detection/mask_rcnn.html | 0 .../_modules/torchvision/models/googlenet.html | 0 .../_modules/torchvision/models/inception.html | 0 .../_modules/torchvision/models/mnasnet.html | 0 .../_modules/torchvision/models/mobilenet.html | 0 .../_modules/torchvision/models/resnet.html | 0 .../models/segmentation/segmentation.html | 0 .../_modules/torchvision/models/shufflenetv2.html | 0 .../_modules/torchvision/models/squeezenet.html | 0 .../_modules/torchvision/models/vgg.html | 0 .../_modules/torchvision/transforms/functional.html | 0 .../_modules/torchvision/transforms/transforms.html | 0 .../_modules/torchvision/utils.html | 0 docs/{stable => 1.1.0}/_sources/__config__.rst.txt | 0 docs/{stable => 1.1.0}/_sources/autograd.rst.txt | 0 docs/{stable => 1.1.0}/_sources/bottleneck.rst.txt | 0 docs/{stable => 1.1.0}/_sources/checkpoint.rst.txt | 0 .../_sources/community/contribution_guide.rst.txt | 0 .../_sources/community/governance.rst.txt | 0 .../_sources/community/persons_of_interest.rst.txt | 0 .../_sources/cpp_extension.rst.txt | 0 docs/{stable => 1.1.0}/_sources/cuda.rst.txt | 0 .../_sources/cuda_deterministic.rst.txt | 0 .../_sources/cuda_deterministic_backward.rst.txt | 0 .../_sources/cudnn_deterministic.rst.txt | 0 .../_sources/cudnn_persistent_rnn.rst.txt | 0 docs/{stable => 1.1.0}/_sources/data.rst.txt | 0 docs/{stable => 1.1.0}/_sources/distributed.rst.txt | 0 .../_sources/distributed_deprecated.rst.txt | 0 .../_sources/distributions.rst.txt | 0 docs/{stable => 1.1.0}/_sources/dlpack.rst.txt | 0 docs/{stable => 1.1.0}/_sources/hub.rst.txt | 0 docs/{stable => 1.1.0}/_sources/index.rst.txt | 0 docs/{stable => 1.1.0}/_sources/jit.rst.txt | 0 docs/{stable => 1.1.0}/_sources/model_zoo.rst.txt | 0 .../_sources/multiprocessing.rst.txt | 0 docs/{stable => 1.1.0}/_sources/nn.rst.txt | 0 .../_sources/notes/autograd.rst.txt | 0 .../_sources/notes/broadcasting.rst.txt | 0 docs/{stable => 1.1.0}/_sources/notes/cuda.rst.txt | 0 .../_sources/notes/extending.rst.txt | 0 docs/{stable => 1.1.0}/_sources/notes/faq.rst.txt | 0 .../_sources/notes/multiprocessing.rst.txt | 0 .../_sources/notes/randomness.rst.txt | 0 .../_sources/notes/serialization.rst.txt | 0 .../_sources/notes/windows.rst.txt | 0 docs/{stable => 1.1.0}/_sources/onnx.rst.txt | 0 docs/{stable => 1.1.0}/_sources/optim.rst.txt | 0 docs/{stable => 1.1.0}/_sources/sparse.rst.txt | 0 docs/{stable => 1.1.0}/_sources/storage.rst.txt | 0 .../_sources/tensor_attributes.rst.txt | 0 docs/{stable => 1.1.0}/_sources/tensorboard.rst.txt | 0 docs/{stable => 1.1.0}/_sources/tensors.rst.txt | 0 docs/{stable => 1.1.0}/_sources/torch.rst.txt | 0 .../_sources/torchvision/datasets.rst.txt | 0 .../_sources/torchvision/index.rst.txt | 0 .../_sources/torchvision/models.rst.txt | 0 .../_sources/torchvision/transforms.rst.txt | 0 .../_sources/torchvision/utils.rst.txt | 0 docs/{stable => 1.1.0}/_sources/type_info.rst.txt | 0 docs/{stable => 1.1.0}/_static/basic.css | 0 docs/{stable => 1.1.0}/_static/css/theme.css | 0 docs/{stable => 1.1.0}/_static/doctools.js | 0 .../_static/documentation_options.js | 0 docs/{stable => 1.1.0}/_static/file.png | Bin .../fonts/FreightSans/freight-sans-bold-italic.woff | Bin .../FreightSans/freight-sans-bold-italic.woff2 | Bin .../fonts/FreightSans/freight-sans-bold.woff | Bin .../fonts/FreightSans/freight-sans-bold.woff2 | Bin .../fonts/FreightSans/freight-sans-book-italic.woff | Bin .../FreightSans/freight-sans-book-italic.woff2 | Bin .../fonts/FreightSans/freight-sans-book.woff | Bin .../fonts/FreightSans/freight-sans-book.woff2 | Bin .../FreightSans/freight-sans-light-italic.woff | Bin .../FreightSans/freight-sans-light-italic.woff2 | Bin .../fonts/FreightSans/freight-sans-light.woff | Bin .../fonts/FreightSans/freight-sans-light.woff2 | Bin .../FreightSans/freight-sans-medium-italic.woff | Bin .../FreightSans/freight-sans-medium-italic.woff2 | Bin .../fonts/FreightSans/freight-sans-medium.woff | Bin .../fonts/FreightSans/freight-sans-medium.woff2 | Bin .../fonts/IBMPlexMono/IBMPlexMono-Light.woff | Bin .../fonts/IBMPlexMono/IBMPlexMono-Light.woff2 | Bin .../fonts/IBMPlexMono/IBMPlexMono-Medium.woff | Bin .../fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 | Bin .../fonts/IBMPlexMono/IBMPlexMono-Regular.woff | Bin .../fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 | Bin .../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff | Bin .../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 | Bin .../_static/images/arrow-down-orange.svg | 0 .../_static/images/arrow-right-with-tail.svg | 0 .../_static/images/chevron-down-grey.svg | 0 .../_static/images/chevron-right-orange.svg | 0 .../_static/images/chevron-right-white.svg | 0 .../_static/images/home-footer-background.jpg | Bin .../{stable => 1.1.0}/_static/images/icon-close.svg | 0 .../_static/images/icon-menu-dots-dark.svg | 0 docs/{stable => 1.1.0}/_static/images/logo-dark.svg | 0 .../_static/images/logo-facebook-dark.svg | 0 docs/{stable => 1.1.0}/_static/images/logo-icon.svg | 0 .../_static/images/logo-twitter-dark.svg | 0 docs/{stable => 1.1.0}/_static/images/logo.svg | 0 .../_static/images/pytorch-colab.svg | 0 .../_static/images/pytorch-download.svg | 0 .../_static/images/pytorch-github.svg | 0 docs/{stable => 1.1.0}/_static/images/pytorch-x.svg | 0 .../_static/images/search-icon.svg | 0 .../_static/images/view-page-source-icon.svg | 0 .../_static/img/aliastracker_graph.png | Bin .../{stable => 1.1.0}/_static/img/dynamic_graph.gif | Bin .../_static/img/pytorch-logo-dark-unstable.png | Bin .../_static/img/pytorch-logo-dark.png | Bin .../_static/img/pytorch-logo-dark.svg | 0 .../_static/img/pytorch-logo-flame.png | Bin .../_static/img/pytorch-logo-flame.svg | 0 .../_static/img/tensor_illustration.png | Bin docs/{stable => 1.1.0}/_static/jquery-3.2.1.js | 0 docs/{stable => 1.1.0}/_static/jquery.js | 0 docs/{stable => 1.1.0}/_static/js/modernizr.min.js | 0 docs/{stable => 1.1.0}/_static/js/theme.js | 0 .../_static/js/vendor/anchor.min.js | 0 .../_static/js/vendor/bootstrap.min.js | 0 .../_static/js/vendor/popper.min.js | 0 docs/{stable => 1.1.0}/_static/katex-math.css | 0 .../{stable => 1.1.0}/_static/katex_autorenderer.js | 0 docs/{stable => 1.1.0}/_static/language_data.js | 0 docs/{stable => 1.1.0}/_static/minus.png | Bin docs/{stable => 1.1.0}/_static/plus.png | Bin docs/{stable => 1.1.0}/_static/pygments.css | 0 .../_static/pytorch-logo-dark-unstable.png | Bin .../{stable => 1.1.0}/_static/pytorch-logo-dark.svg | 0 docs/{stable => 1.1.0}/_static/searchtools.js | 0 docs/{stable => 1.1.0}/_static/underscore-1.3.1.js | 0 docs/{stable => 1.1.0}/_static/underscore.js | 0 docs/{stable => 1.1.0}/autograd.html | 0 docs/{stable => 1.1.0}/bottleneck.html | 0 docs/{stable => 1.1.0}/checkpoint.html | 0 .../community/contribution_guide.html | 0 docs/{stable => 1.1.0}/community/governance.html | 0 .../community/persons_of_interest.html | 0 docs/{stable => 1.1.0}/cpp_extension.html | 0 docs/{stable => 1.1.0}/cuda.html | 0 docs/{stable => 1.1.0}/cuda_deterministic.html | 0 .../cuda_deterministic_backward.html | 0 docs/{stable => 1.1.0}/cudnn_deterministic.html | 0 docs/{stable => 1.1.0}/cudnn_persistent_rnn.html | 0 docs/{stable => 1.1.0}/data.html | 0 docs/{stable => 1.1.0}/distributed.html | 0 docs/{stable => 1.1.0}/distributed_deprecated.html | 0 docs/{stable => 1.1.0}/distributions.html | 0 docs/{stable => 1.1.0}/dlpack.html | 0 docs/{stable => 1.1.0}/genindex.html | 0 docs/{stable => 1.1.0}/hub.html | 0 docs/{stable => 1.1.0}/index.html | 0 docs/{stable => 1.1.0}/jit.html | 0 docs/{stable => 1.1.0}/model_zoo.html | 0 docs/{stable => 1.1.0}/multiprocessing.html | 0 docs/{stable => 1.1.0}/nn.html | 0 docs/{stable => 1.1.0}/notes/autograd.html | 0 docs/{stable => 1.1.0}/notes/broadcasting.html | 0 docs/{stable => 1.1.0}/notes/cuda.html | 0 docs/{stable => 1.1.0}/notes/extending.html | 0 docs/{stable => 1.1.0}/notes/faq.html | 0 docs/{stable => 1.1.0}/notes/multiprocessing.html | 0 docs/{stable => 1.1.0}/notes/randomness.html | 0 docs/{stable => 1.1.0}/notes/serialization.html | 0 docs/{stable => 1.1.0}/notes/windows.html | 0 docs/{stable => 1.1.0}/objects.inv | Bin docs/{stable => 1.1.0}/onnx.html | 0 docs/{stable => 1.1.0}/optim.html | 0 docs/{stable => 1.1.0}/py-modindex.html | 0 docs/{stable => 1.1.0}/search.html | 0 docs/{stable => 1.1.0}/searchindex.js | 0 docs/{stable => 1.1.0}/sparse.html | 0 docs/{stable => 1.1.0}/storage.html | 0 docs/{stable => 1.1.0}/tensor_attributes.html | 0 docs/{stable => 1.1.0}/tensorboard.html | 0 docs/{stable => 1.1.0}/tensors.html | 0 docs/{stable => 1.1.0}/torch.html | 0 docs/{stable => 1.1.0}/torchvision/datasets.html | 0 docs/{stable => 1.1.0}/torchvision/index.html | 0 docs/{stable => 1.1.0}/torchvision/models.html | 0 docs/{stable => 1.1.0}/torchvision/transforms.html | 0 docs/{stable => 1.1.0}/torchvision/utils.html | 0 docs/{stable => 1.1.0}/type_info.html | 0 docs/{1.1.0 => 1.2.0}/autograd.md | 0 docs/{1.1.0 => 1.2.0}/bottleneck.md | 0 docs/{1.1.0 => 1.2.0}/checkpoint.md | 0 docs/{1.1.0 => 1.2.0}/cpp_extenstion.md | 0 docs/{1.1.0 => 1.2.0}/cuda.md | 0 docs/{1.1.0 => 1.2.0}/data.md | 0 docs/{1.1.0 => 1.2.0}/distributed.md | 0 docs/{1.1.0 => 1.2.0}/distributions.md | 0 docs/{1.1.0 => 1.2.0}/dlpack.md | 0 docs/{1.1.0 => 1.2.0}/ffi.md | 0 docs/{1.1.0 => 1.2.0}/genindex.md | 0 docs/{1.1.0 => 1.2.0}/hub.md | 0 docs/{1.1.0 => 1.2.0}/index.md | 0 docs/{1.1.0 => 1.2.0}/jit.md | 0 docs/{1.1.0 => 1.2.0}/legacy.md | 0 docs/{1.1.0 => 1.2.0}/model_zoo.md | 0 docs/{1.1.0 => 1.2.0}/multiprocessing.md | 0 docs/{1.1.0 => 1.2.0}/nn.md | 0 docs/{1.1.0 => 1.2.0}/onnx.md | 0 docs/{1.1.0 => 1.2.0}/optim.md | 0 docs/{1.1.0 => 1.2.0}/py-modindex.md | 0 docs/{1.1.0 => 1.2.0}/search.md | 0 docs/{1.1.0 => 1.2.0}/sparse.md | 0 docs/{1.1.0 => 1.2.0}/storage.md | 0 docs/{1.1.0 => 1.2.0}/tensor_attributes.md | 0 docs/{1.1.0 => 1.2.0}/tensors.md | 0 docs/{1.1.0 => 1.2.0}/torch.md | 0 docs/{1.1.0 => 1.2.0}/type_info.md | 0 369 files changed, 0 insertions(+), 0 deletions(-) rename docs/{stable => 1.1.0}/.buildinfo (100%) rename docs/{stable => 1.1.0}/__config__.html (100%) rename docs/{stable => 1.1.0}/_images/CELU.png (100%) rename docs/{stable => 1.1.0}/_images/ELU.png (100%) rename docs/{stable => 1.1.0}/_images/Hardshrink.png (100%) rename docs/{stable => 1.1.0}/_images/Hardtanh.png (100%) rename docs/{stable => 1.1.0}/_images/LeakyReLU.png (100%) rename docs/{stable => 1.1.0}/_images/LogSigmoid.png (100%) rename docs/{stable => 1.1.0}/_images/PReLU.png (100%) rename docs/{stable => 1.1.0}/_images/ReLU.png (100%) rename docs/{stable => 1.1.0}/_images/ReLU6.png (100%) rename docs/{stable => 1.1.0}/_images/SELU.png (100%) rename docs/{stable => 1.1.0}/_images/Sigmoid.png (100%) rename docs/{stable => 1.1.0}/_images/Softplus.png (100%) rename docs/{stable => 1.1.0}/_images/Softshrink.png (100%) rename docs/{stable => 1.1.0}/_images/Softsign.png (100%) rename docs/{stable => 1.1.0}/_images/Tanh.png (100%) rename docs/{stable => 1.1.0}/_images/Tanhshrink.png (100%) rename docs/{stable => 1.1.0}/_modules/index.html (100%) rename docs/{stable => 1.1.0}/_modules/torch.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/__config__.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/_tensor_str.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/_utils.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd/anomaly_mode.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd/function.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd/grad_mode.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd/gradcheck.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/autograd/profiler.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/cuda.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/cuda/comm.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/cuda/nvtx.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/cuda/random.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/cuda/streams.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributed.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributed/deprecated.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributed/distributed_c10d.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/bernoulli.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/beta.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/binomial.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/categorical.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/cauchy.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/chi2.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/constraint_registry.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/constraints.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/dirichlet.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/distribution.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/exp_family.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/exponential.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/fishersnedecor.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/gamma.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/geometric.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/gumbel.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/half_cauchy.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/half_normal.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/independent.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/kl.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/laplace.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/log_normal.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/lowrank_multivariate_normal.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/multinomial.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/multivariate_normal.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/negative_binomial.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/normal.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/one_hot_categorical.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/pareto.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/poisson.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/relaxed_bernoulli.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/relaxed_categorical.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/studentT.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/transformed_distribution.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/transforms.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/uniform.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/distributions/weibull.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/functional.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/hub.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/jit.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/multiprocessing.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/multiprocessing/spawn.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/functional.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/init.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/activation.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/adaptive.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/batchnorm.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/container.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/conv.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/distance.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/dropout.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/fold.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/instancenorm.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/linear.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/loss.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/module.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/normalization.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/padding.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/pixelshuffle.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/pooling.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/rnn.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/sparse.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/modules/upsampling.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/parallel/data_parallel.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/parallel/distributed.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/parallel/distributed_cpu.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/parameter.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/utils/clip_grad.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/utils/convert_parameters.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/utils/rnn.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/utils/spectral_norm.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/nn/utils/weight_norm.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/onnx.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/adadelta.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/adagrad.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/adam.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/adamax.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/asgd.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/lbfgs.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/lr_scheduler.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/optimizer.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/rmsprop.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/rprop.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/sgd.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/optim/sparse_adam.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/quasirandom.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/random.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/serialization.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/sparse.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/storage.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/tensor.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/checkpoint.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/cpp_extension.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/data/dataloader.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/data/dataset.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/data/distributed.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/data/sampler.html (100%) rename docs/{stable => 1.1.0}/_modules/torch/utils/tensorboard/writer.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/cifar.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/cityscapes.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/coco.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/fakedata.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/flickr.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/folder.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/imagenet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/lsun.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/mnist.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/phototour.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/sbd.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/sbu.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/stl10.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/svhn.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/usps.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/datasets/voc.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/alexnet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/densenet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/detection/faster_rcnn.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/detection/keypoint_rcnn.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/detection/mask_rcnn.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/googlenet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/inception.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/mnasnet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/mobilenet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/resnet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/segmentation/segmentation.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/shufflenetv2.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/squeezenet.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/models/vgg.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/transforms/functional.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/transforms/transforms.html (100%) rename docs/{stable => 1.1.0}/_modules/torchvision/utils.html (100%) rename docs/{stable => 1.1.0}/_sources/__config__.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/autograd.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/bottleneck.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/checkpoint.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/community/contribution_guide.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/community/governance.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/community/persons_of_interest.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cpp_extension.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cuda.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cuda_deterministic.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cuda_deterministic_backward.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cudnn_deterministic.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/cudnn_persistent_rnn.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/data.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/distributed.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/distributed_deprecated.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/distributions.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/dlpack.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/hub.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/index.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/jit.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/model_zoo.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/multiprocessing.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/nn.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/autograd.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/broadcasting.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/cuda.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/extending.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/faq.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/multiprocessing.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/randomness.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/serialization.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/notes/windows.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/onnx.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/optim.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/sparse.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/storage.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/tensor_attributes.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/tensorboard.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/tensors.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torch.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torchvision/datasets.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torchvision/index.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torchvision/models.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torchvision/transforms.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/torchvision/utils.rst.txt (100%) rename docs/{stable => 1.1.0}/_sources/type_info.rst.txt (100%) rename docs/{stable => 1.1.0}/_static/basic.css (100%) rename docs/{stable => 1.1.0}/_static/css/theme.css (100%) rename docs/{stable => 1.1.0}/_static/doctools.js (100%) rename docs/{stable => 1.1.0}/_static/documentation_options.js (100%) rename docs/{stable => 1.1.0}/_static/file.png (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-bold-italic.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-bold.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-bold.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-book-italic.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-book-italic.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-book.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-book.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-light-italic.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-light-italic.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-light.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-light.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-medium-italic.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-medium.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/FreightSans/freight-sans-medium.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff (100%) rename docs/{stable => 1.1.0}/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 (100%) rename docs/{stable => 1.1.0}/_static/images/arrow-down-orange.svg (100%) rename docs/{stable => 1.1.0}/_static/images/arrow-right-with-tail.svg (100%) rename docs/{stable => 1.1.0}/_static/images/chevron-down-grey.svg (100%) rename docs/{stable => 1.1.0}/_static/images/chevron-right-orange.svg (100%) rename docs/{stable => 1.1.0}/_static/images/chevron-right-white.svg (100%) rename docs/{stable => 1.1.0}/_static/images/home-footer-background.jpg (100%) rename docs/{stable => 1.1.0}/_static/images/icon-close.svg (100%) rename docs/{stable => 1.1.0}/_static/images/icon-menu-dots-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/images/logo-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/images/logo-facebook-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/images/logo-icon.svg (100%) rename docs/{stable => 1.1.0}/_static/images/logo-twitter-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/images/logo.svg (100%) rename docs/{stable => 1.1.0}/_static/images/pytorch-colab.svg (100%) rename docs/{stable => 1.1.0}/_static/images/pytorch-download.svg (100%) rename docs/{stable => 1.1.0}/_static/images/pytorch-github.svg (100%) rename docs/{stable => 1.1.0}/_static/images/pytorch-x.svg (100%) rename docs/{stable => 1.1.0}/_static/images/search-icon.svg (100%) rename docs/{stable => 1.1.0}/_static/images/view-page-source-icon.svg (100%) rename docs/{stable => 1.1.0}/_static/img/aliastracker_graph.png (100%) rename docs/{stable => 1.1.0}/_static/img/dynamic_graph.gif (100%) rename docs/{stable => 1.1.0}/_static/img/pytorch-logo-dark-unstable.png (100%) rename docs/{stable => 1.1.0}/_static/img/pytorch-logo-dark.png (100%) rename docs/{stable => 1.1.0}/_static/img/pytorch-logo-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/img/pytorch-logo-flame.png (100%) rename docs/{stable => 1.1.0}/_static/img/pytorch-logo-flame.svg (100%) rename docs/{stable => 1.1.0}/_static/img/tensor_illustration.png (100%) rename docs/{stable => 1.1.0}/_static/jquery-3.2.1.js (100%) rename docs/{stable => 1.1.0}/_static/jquery.js (100%) rename docs/{stable => 1.1.0}/_static/js/modernizr.min.js (100%) rename docs/{stable => 1.1.0}/_static/js/theme.js (100%) rename docs/{stable => 1.1.0}/_static/js/vendor/anchor.min.js (100%) rename docs/{stable => 1.1.0}/_static/js/vendor/bootstrap.min.js (100%) rename docs/{stable => 1.1.0}/_static/js/vendor/popper.min.js (100%) rename docs/{stable => 1.1.0}/_static/katex-math.css (100%) rename docs/{stable => 1.1.0}/_static/katex_autorenderer.js (100%) rename docs/{stable => 1.1.0}/_static/language_data.js (100%) rename docs/{stable => 1.1.0}/_static/minus.png (100%) rename docs/{stable => 1.1.0}/_static/plus.png (100%) rename docs/{stable => 1.1.0}/_static/pygments.css (100%) rename docs/{stable => 1.1.0}/_static/pytorch-logo-dark-unstable.png (100%) rename docs/{stable => 1.1.0}/_static/pytorch-logo-dark.svg (100%) rename docs/{stable => 1.1.0}/_static/searchtools.js (100%) rename docs/{stable => 1.1.0}/_static/underscore-1.3.1.js (100%) rename docs/{stable => 1.1.0}/_static/underscore.js (100%) rename docs/{stable => 1.1.0}/autograd.html (100%) rename docs/{stable => 1.1.0}/bottleneck.html (100%) rename docs/{stable => 1.1.0}/checkpoint.html (100%) rename docs/{stable => 1.1.0}/community/contribution_guide.html (100%) rename docs/{stable => 1.1.0}/community/governance.html (100%) rename docs/{stable => 1.1.0}/community/persons_of_interest.html (100%) rename docs/{stable => 1.1.0}/cpp_extension.html (100%) rename docs/{stable => 1.1.0}/cuda.html (100%) rename docs/{stable => 1.1.0}/cuda_deterministic.html (100%) rename docs/{stable => 1.1.0}/cuda_deterministic_backward.html (100%) rename docs/{stable => 1.1.0}/cudnn_deterministic.html (100%) rename docs/{stable => 1.1.0}/cudnn_persistent_rnn.html (100%) rename docs/{stable => 1.1.0}/data.html (100%) rename docs/{stable => 1.1.0}/distributed.html (100%) rename docs/{stable => 1.1.0}/distributed_deprecated.html (100%) rename docs/{stable => 1.1.0}/distributions.html (100%) rename docs/{stable => 1.1.0}/dlpack.html (100%) rename docs/{stable => 1.1.0}/genindex.html (100%) rename docs/{stable => 1.1.0}/hub.html (100%) rename docs/{stable => 1.1.0}/index.html (100%) rename docs/{stable => 1.1.0}/jit.html (100%) rename docs/{stable => 1.1.0}/model_zoo.html (100%) rename docs/{stable => 1.1.0}/multiprocessing.html (100%) rename docs/{stable => 1.1.0}/nn.html (100%) rename docs/{stable => 1.1.0}/notes/autograd.html (100%) rename docs/{stable => 1.1.0}/notes/broadcasting.html (100%) rename docs/{stable => 1.1.0}/notes/cuda.html (100%) rename docs/{stable => 1.1.0}/notes/extending.html (100%) rename docs/{stable => 1.1.0}/notes/faq.html (100%) rename docs/{stable => 1.1.0}/notes/multiprocessing.html (100%) rename docs/{stable => 1.1.0}/notes/randomness.html (100%) rename docs/{stable => 1.1.0}/notes/serialization.html (100%) rename docs/{stable => 1.1.0}/notes/windows.html (100%) rename docs/{stable => 1.1.0}/objects.inv (100%) rename docs/{stable => 1.1.0}/onnx.html (100%) rename docs/{stable => 1.1.0}/optim.html (100%) rename docs/{stable => 1.1.0}/py-modindex.html (100%) rename docs/{stable => 1.1.0}/search.html (100%) rename docs/{stable => 1.1.0}/searchindex.js (100%) rename docs/{stable => 1.1.0}/sparse.html (100%) rename docs/{stable => 1.1.0}/storage.html (100%) rename docs/{stable => 1.1.0}/tensor_attributes.html (100%) rename docs/{stable => 1.1.0}/tensorboard.html (100%) rename docs/{stable => 1.1.0}/tensors.html (100%) rename docs/{stable => 1.1.0}/torch.html (100%) rename docs/{stable => 1.1.0}/torchvision/datasets.html (100%) rename docs/{stable => 1.1.0}/torchvision/index.html (100%) rename docs/{stable => 1.1.0}/torchvision/models.html (100%) rename docs/{stable => 1.1.0}/torchvision/transforms.html (100%) rename docs/{stable => 1.1.0}/torchvision/utils.html (100%) rename docs/{stable => 1.1.0}/type_info.html (100%) rename docs/{1.1.0 => 1.2.0}/autograd.md (100%) rename docs/{1.1.0 => 1.2.0}/bottleneck.md (100%) rename docs/{1.1.0 => 1.2.0}/checkpoint.md (100%) rename docs/{1.1.0 => 1.2.0}/cpp_extenstion.md (100%) rename docs/{1.1.0 => 1.2.0}/cuda.md (100%) rename docs/{1.1.0 => 1.2.0}/data.md (100%) rename docs/{1.1.0 => 1.2.0}/distributed.md (100%) rename docs/{1.1.0 => 1.2.0}/distributions.md (100%) rename docs/{1.1.0 => 1.2.0}/dlpack.md (100%) rename docs/{1.1.0 => 1.2.0}/ffi.md (100%) rename docs/{1.1.0 => 1.2.0}/genindex.md (100%) rename docs/{1.1.0 => 1.2.0}/hub.md (100%) rename docs/{1.1.0 => 1.2.0}/index.md (100%) rename docs/{1.1.0 => 1.2.0}/jit.md (100%) rename docs/{1.1.0 => 1.2.0}/legacy.md (100%) rename docs/{1.1.0 => 1.2.0}/model_zoo.md (100%) rename docs/{1.1.0 => 1.2.0}/multiprocessing.md (100%) rename docs/{1.1.0 => 1.2.0}/nn.md (100%) rename docs/{1.1.0 => 1.2.0}/onnx.md (100%) rename docs/{1.1.0 => 1.2.0}/optim.md (100%) rename docs/{1.1.0 => 1.2.0}/py-modindex.md (100%) rename docs/{1.1.0 => 1.2.0}/search.md (100%) rename docs/{1.1.0 => 1.2.0}/sparse.md (100%) rename docs/{1.1.0 => 1.2.0}/storage.md (100%) rename docs/{1.1.0 => 1.2.0}/tensor_attributes.md (100%) rename docs/{1.1.0 => 1.2.0}/tensors.md (100%) rename docs/{1.1.0 => 1.2.0}/torch.md (100%) rename docs/{1.1.0 => 1.2.0}/type_info.md (100%) diff --git a/docs/stable/.buildinfo b/docs/1.1.0/.buildinfo similarity index 100% rename from docs/stable/.buildinfo rename to docs/1.1.0/.buildinfo diff --git a/docs/stable/__config__.html b/docs/1.1.0/__config__.html similarity index 100% rename from docs/stable/__config__.html rename to docs/1.1.0/__config__.html diff --git a/docs/stable/_images/CELU.png b/docs/1.1.0/_images/CELU.png similarity index 100% rename from docs/stable/_images/CELU.png rename to docs/1.1.0/_images/CELU.png diff --git a/docs/stable/_images/ELU.png b/docs/1.1.0/_images/ELU.png similarity index 100% rename from docs/stable/_images/ELU.png rename to docs/1.1.0/_images/ELU.png diff --git a/docs/stable/_images/Hardshrink.png b/docs/1.1.0/_images/Hardshrink.png similarity index 100% rename from docs/stable/_images/Hardshrink.png rename to docs/1.1.0/_images/Hardshrink.png diff --git a/docs/stable/_images/Hardtanh.png b/docs/1.1.0/_images/Hardtanh.png similarity index 100% rename from docs/stable/_images/Hardtanh.png rename to docs/1.1.0/_images/Hardtanh.png diff --git a/docs/stable/_images/LeakyReLU.png b/docs/1.1.0/_images/LeakyReLU.png similarity index 100% rename from docs/stable/_images/LeakyReLU.png rename to docs/1.1.0/_images/LeakyReLU.png diff --git a/docs/stable/_images/LogSigmoid.png b/docs/1.1.0/_images/LogSigmoid.png similarity index 100% rename from docs/stable/_images/LogSigmoid.png rename to docs/1.1.0/_images/LogSigmoid.png diff --git a/docs/stable/_images/PReLU.png b/docs/1.1.0/_images/PReLU.png similarity index 100% rename from docs/stable/_images/PReLU.png rename to docs/1.1.0/_images/PReLU.png diff --git a/docs/stable/_images/ReLU.png b/docs/1.1.0/_images/ReLU.png similarity index 100% rename from docs/stable/_images/ReLU.png rename to docs/1.1.0/_images/ReLU.png diff --git a/docs/stable/_images/ReLU6.png b/docs/1.1.0/_images/ReLU6.png similarity index 100% rename from docs/stable/_images/ReLU6.png rename to docs/1.1.0/_images/ReLU6.png diff --git a/docs/stable/_images/SELU.png b/docs/1.1.0/_images/SELU.png similarity index 100% rename from docs/stable/_images/SELU.png rename to docs/1.1.0/_images/SELU.png diff --git a/docs/stable/_images/Sigmoid.png b/docs/1.1.0/_images/Sigmoid.png similarity index 100% rename from docs/stable/_images/Sigmoid.png rename to docs/1.1.0/_images/Sigmoid.png diff --git a/docs/stable/_images/Softplus.png b/docs/1.1.0/_images/Softplus.png similarity index 100% rename from docs/stable/_images/Softplus.png rename to docs/1.1.0/_images/Softplus.png diff --git a/docs/stable/_images/Softshrink.png b/docs/1.1.0/_images/Softshrink.png similarity index 100% rename from docs/stable/_images/Softshrink.png rename to docs/1.1.0/_images/Softshrink.png diff --git a/docs/stable/_images/Softsign.png b/docs/1.1.0/_images/Softsign.png similarity index 100% rename from docs/stable/_images/Softsign.png rename to docs/1.1.0/_images/Softsign.png diff --git a/docs/stable/_images/Tanh.png b/docs/1.1.0/_images/Tanh.png similarity index 100% rename from docs/stable/_images/Tanh.png rename to docs/1.1.0/_images/Tanh.png diff --git a/docs/stable/_images/Tanhshrink.png b/docs/1.1.0/_images/Tanhshrink.png similarity index 100% rename from docs/stable/_images/Tanhshrink.png rename to docs/1.1.0/_images/Tanhshrink.png diff --git a/docs/stable/_modules/index.html b/docs/1.1.0/_modules/index.html similarity index 100% rename from docs/stable/_modules/index.html rename to docs/1.1.0/_modules/index.html diff --git a/docs/stable/_modules/torch.html b/docs/1.1.0/_modules/torch.html similarity index 100% rename from docs/stable/_modules/torch.html rename to docs/1.1.0/_modules/torch.html diff --git a/docs/stable/_modules/torch/__config__.html b/docs/1.1.0/_modules/torch/__config__.html similarity index 100% rename from docs/stable/_modules/torch/__config__.html rename to docs/1.1.0/_modules/torch/__config__.html diff --git a/docs/stable/_modules/torch/_tensor_str.html b/docs/1.1.0/_modules/torch/_tensor_str.html similarity index 100% rename from docs/stable/_modules/torch/_tensor_str.html rename to docs/1.1.0/_modules/torch/_tensor_str.html diff --git a/docs/stable/_modules/torch/_utils.html b/docs/1.1.0/_modules/torch/_utils.html similarity index 100% rename from docs/stable/_modules/torch/_utils.html rename to docs/1.1.0/_modules/torch/_utils.html diff --git a/docs/stable/_modules/torch/autograd.html b/docs/1.1.0/_modules/torch/autograd.html similarity index 100% rename from docs/stable/_modules/torch/autograd.html rename to docs/1.1.0/_modules/torch/autograd.html diff --git a/docs/stable/_modules/torch/autograd/anomaly_mode.html b/docs/1.1.0/_modules/torch/autograd/anomaly_mode.html similarity index 100% rename from docs/stable/_modules/torch/autograd/anomaly_mode.html rename to docs/1.1.0/_modules/torch/autograd/anomaly_mode.html diff --git a/docs/stable/_modules/torch/autograd/function.html b/docs/1.1.0/_modules/torch/autograd/function.html similarity index 100% rename from docs/stable/_modules/torch/autograd/function.html rename to docs/1.1.0/_modules/torch/autograd/function.html diff --git a/docs/stable/_modules/torch/autograd/grad_mode.html b/docs/1.1.0/_modules/torch/autograd/grad_mode.html similarity index 100% rename from docs/stable/_modules/torch/autograd/grad_mode.html rename to docs/1.1.0/_modules/torch/autograd/grad_mode.html diff --git a/docs/stable/_modules/torch/autograd/gradcheck.html b/docs/1.1.0/_modules/torch/autograd/gradcheck.html similarity index 100% rename from docs/stable/_modules/torch/autograd/gradcheck.html rename to docs/1.1.0/_modules/torch/autograd/gradcheck.html diff --git a/docs/stable/_modules/torch/autograd/profiler.html b/docs/1.1.0/_modules/torch/autograd/profiler.html similarity index 100% rename from docs/stable/_modules/torch/autograd/profiler.html rename to docs/1.1.0/_modules/torch/autograd/profiler.html diff --git a/docs/stable/_modules/torch/cuda.html b/docs/1.1.0/_modules/torch/cuda.html similarity index 100% rename from docs/stable/_modules/torch/cuda.html rename to docs/1.1.0/_modules/torch/cuda.html diff --git a/docs/stable/_modules/torch/cuda/comm.html b/docs/1.1.0/_modules/torch/cuda/comm.html similarity index 100% rename from docs/stable/_modules/torch/cuda/comm.html rename to docs/1.1.0/_modules/torch/cuda/comm.html diff --git a/docs/stable/_modules/torch/cuda/nvtx.html b/docs/1.1.0/_modules/torch/cuda/nvtx.html similarity index 100% rename from docs/stable/_modules/torch/cuda/nvtx.html rename to docs/1.1.0/_modules/torch/cuda/nvtx.html diff --git a/docs/stable/_modules/torch/cuda/random.html b/docs/1.1.0/_modules/torch/cuda/random.html similarity index 100% rename from docs/stable/_modules/torch/cuda/random.html rename to docs/1.1.0/_modules/torch/cuda/random.html diff --git a/docs/stable/_modules/torch/cuda/streams.html b/docs/1.1.0/_modules/torch/cuda/streams.html similarity index 100% rename from docs/stable/_modules/torch/cuda/streams.html rename to docs/1.1.0/_modules/torch/cuda/streams.html diff --git a/docs/stable/_modules/torch/distributed.html b/docs/1.1.0/_modules/torch/distributed.html similarity index 100% rename from docs/stable/_modules/torch/distributed.html rename to docs/1.1.0/_modules/torch/distributed.html diff --git a/docs/stable/_modules/torch/distributed/deprecated.html b/docs/1.1.0/_modules/torch/distributed/deprecated.html similarity index 100% rename from docs/stable/_modules/torch/distributed/deprecated.html rename to docs/1.1.0/_modules/torch/distributed/deprecated.html diff --git a/docs/stable/_modules/torch/distributed/distributed_c10d.html b/docs/1.1.0/_modules/torch/distributed/distributed_c10d.html similarity index 100% rename from docs/stable/_modules/torch/distributed/distributed_c10d.html rename to docs/1.1.0/_modules/torch/distributed/distributed_c10d.html diff --git a/docs/stable/_modules/torch/distributions/bernoulli.html b/docs/1.1.0/_modules/torch/distributions/bernoulli.html similarity index 100% rename from docs/stable/_modules/torch/distributions/bernoulli.html rename to docs/1.1.0/_modules/torch/distributions/bernoulli.html diff --git a/docs/stable/_modules/torch/distributions/beta.html b/docs/1.1.0/_modules/torch/distributions/beta.html similarity index 100% rename from docs/stable/_modules/torch/distributions/beta.html rename to docs/1.1.0/_modules/torch/distributions/beta.html diff --git a/docs/stable/_modules/torch/distributions/binomial.html b/docs/1.1.0/_modules/torch/distributions/binomial.html similarity index 100% rename from docs/stable/_modules/torch/distributions/binomial.html rename to docs/1.1.0/_modules/torch/distributions/binomial.html diff --git a/docs/stable/_modules/torch/distributions/categorical.html b/docs/1.1.0/_modules/torch/distributions/categorical.html similarity index 100% rename from docs/stable/_modules/torch/distributions/categorical.html rename to docs/1.1.0/_modules/torch/distributions/categorical.html diff --git a/docs/stable/_modules/torch/distributions/cauchy.html b/docs/1.1.0/_modules/torch/distributions/cauchy.html similarity index 100% rename from docs/stable/_modules/torch/distributions/cauchy.html rename to docs/1.1.0/_modules/torch/distributions/cauchy.html diff --git a/docs/stable/_modules/torch/distributions/chi2.html b/docs/1.1.0/_modules/torch/distributions/chi2.html similarity index 100% rename from docs/stable/_modules/torch/distributions/chi2.html rename to docs/1.1.0/_modules/torch/distributions/chi2.html diff --git a/docs/stable/_modules/torch/distributions/constraint_registry.html b/docs/1.1.0/_modules/torch/distributions/constraint_registry.html similarity index 100% rename from docs/stable/_modules/torch/distributions/constraint_registry.html rename to docs/1.1.0/_modules/torch/distributions/constraint_registry.html diff --git a/docs/stable/_modules/torch/distributions/constraints.html b/docs/1.1.0/_modules/torch/distributions/constraints.html similarity index 100% rename from docs/stable/_modules/torch/distributions/constraints.html rename to docs/1.1.0/_modules/torch/distributions/constraints.html diff --git a/docs/stable/_modules/torch/distributions/dirichlet.html b/docs/1.1.0/_modules/torch/distributions/dirichlet.html similarity index 100% rename from docs/stable/_modules/torch/distributions/dirichlet.html rename to docs/1.1.0/_modules/torch/distributions/dirichlet.html diff --git a/docs/stable/_modules/torch/distributions/distribution.html b/docs/1.1.0/_modules/torch/distributions/distribution.html similarity index 100% rename from docs/stable/_modules/torch/distributions/distribution.html rename to docs/1.1.0/_modules/torch/distributions/distribution.html diff --git a/docs/stable/_modules/torch/distributions/exp_family.html b/docs/1.1.0/_modules/torch/distributions/exp_family.html similarity index 100% rename from docs/stable/_modules/torch/distributions/exp_family.html rename to docs/1.1.0/_modules/torch/distributions/exp_family.html diff --git a/docs/stable/_modules/torch/distributions/exponential.html b/docs/1.1.0/_modules/torch/distributions/exponential.html similarity index 100% rename from docs/stable/_modules/torch/distributions/exponential.html rename to docs/1.1.0/_modules/torch/distributions/exponential.html diff --git a/docs/stable/_modules/torch/distributions/fishersnedecor.html b/docs/1.1.0/_modules/torch/distributions/fishersnedecor.html similarity index 100% rename from docs/stable/_modules/torch/distributions/fishersnedecor.html rename to docs/1.1.0/_modules/torch/distributions/fishersnedecor.html diff --git a/docs/stable/_modules/torch/distributions/gamma.html b/docs/1.1.0/_modules/torch/distributions/gamma.html similarity index 100% rename from docs/stable/_modules/torch/distributions/gamma.html rename to docs/1.1.0/_modules/torch/distributions/gamma.html diff --git a/docs/stable/_modules/torch/distributions/geometric.html b/docs/1.1.0/_modules/torch/distributions/geometric.html similarity index 100% rename from docs/stable/_modules/torch/distributions/geometric.html rename to docs/1.1.0/_modules/torch/distributions/geometric.html diff --git a/docs/stable/_modules/torch/distributions/gumbel.html b/docs/1.1.0/_modules/torch/distributions/gumbel.html similarity index 100% rename from docs/stable/_modules/torch/distributions/gumbel.html rename to docs/1.1.0/_modules/torch/distributions/gumbel.html diff --git a/docs/stable/_modules/torch/distributions/half_cauchy.html b/docs/1.1.0/_modules/torch/distributions/half_cauchy.html similarity index 100% rename from docs/stable/_modules/torch/distributions/half_cauchy.html rename to docs/1.1.0/_modules/torch/distributions/half_cauchy.html diff --git a/docs/stable/_modules/torch/distributions/half_normal.html b/docs/1.1.0/_modules/torch/distributions/half_normal.html similarity index 100% rename from docs/stable/_modules/torch/distributions/half_normal.html rename to docs/1.1.0/_modules/torch/distributions/half_normal.html diff --git a/docs/stable/_modules/torch/distributions/independent.html b/docs/1.1.0/_modules/torch/distributions/independent.html similarity index 100% rename from docs/stable/_modules/torch/distributions/independent.html rename to docs/1.1.0/_modules/torch/distributions/independent.html diff --git a/docs/stable/_modules/torch/distributions/kl.html b/docs/1.1.0/_modules/torch/distributions/kl.html similarity index 100% rename from docs/stable/_modules/torch/distributions/kl.html rename to docs/1.1.0/_modules/torch/distributions/kl.html diff --git a/docs/stable/_modules/torch/distributions/laplace.html b/docs/1.1.0/_modules/torch/distributions/laplace.html similarity index 100% rename from docs/stable/_modules/torch/distributions/laplace.html rename to docs/1.1.0/_modules/torch/distributions/laplace.html diff --git a/docs/stable/_modules/torch/distributions/log_normal.html b/docs/1.1.0/_modules/torch/distributions/log_normal.html similarity index 100% rename from docs/stable/_modules/torch/distributions/log_normal.html rename to docs/1.1.0/_modules/torch/distributions/log_normal.html diff --git a/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html b/docs/1.1.0/_modules/torch/distributions/lowrank_multivariate_normal.html similarity index 100% rename from docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html rename to docs/1.1.0/_modules/torch/distributions/lowrank_multivariate_normal.html diff --git a/docs/stable/_modules/torch/distributions/multinomial.html b/docs/1.1.0/_modules/torch/distributions/multinomial.html similarity index 100% rename from docs/stable/_modules/torch/distributions/multinomial.html rename to docs/1.1.0/_modules/torch/distributions/multinomial.html diff --git a/docs/stable/_modules/torch/distributions/multivariate_normal.html b/docs/1.1.0/_modules/torch/distributions/multivariate_normal.html similarity index 100% rename from docs/stable/_modules/torch/distributions/multivariate_normal.html rename to docs/1.1.0/_modules/torch/distributions/multivariate_normal.html diff --git a/docs/stable/_modules/torch/distributions/negative_binomial.html b/docs/1.1.0/_modules/torch/distributions/negative_binomial.html similarity index 100% rename from docs/stable/_modules/torch/distributions/negative_binomial.html rename to docs/1.1.0/_modules/torch/distributions/negative_binomial.html diff --git a/docs/stable/_modules/torch/distributions/normal.html b/docs/1.1.0/_modules/torch/distributions/normal.html similarity index 100% rename from docs/stable/_modules/torch/distributions/normal.html rename to docs/1.1.0/_modules/torch/distributions/normal.html diff --git a/docs/stable/_modules/torch/distributions/one_hot_categorical.html b/docs/1.1.0/_modules/torch/distributions/one_hot_categorical.html similarity index 100% rename from docs/stable/_modules/torch/distributions/one_hot_categorical.html rename to docs/1.1.0/_modules/torch/distributions/one_hot_categorical.html diff --git a/docs/stable/_modules/torch/distributions/pareto.html b/docs/1.1.0/_modules/torch/distributions/pareto.html similarity index 100% rename from docs/stable/_modules/torch/distributions/pareto.html rename to docs/1.1.0/_modules/torch/distributions/pareto.html diff --git a/docs/stable/_modules/torch/distributions/poisson.html b/docs/1.1.0/_modules/torch/distributions/poisson.html similarity index 100% rename from docs/stable/_modules/torch/distributions/poisson.html rename to docs/1.1.0/_modules/torch/distributions/poisson.html diff --git a/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html b/docs/1.1.0/_modules/torch/distributions/relaxed_bernoulli.html similarity index 100% rename from docs/stable/_modules/torch/distributions/relaxed_bernoulli.html rename to docs/1.1.0/_modules/torch/distributions/relaxed_bernoulli.html diff --git a/docs/stable/_modules/torch/distributions/relaxed_categorical.html b/docs/1.1.0/_modules/torch/distributions/relaxed_categorical.html similarity index 100% rename from docs/stable/_modules/torch/distributions/relaxed_categorical.html rename to docs/1.1.0/_modules/torch/distributions/relaxed_categorical.html diff --git a/docs/stable/_modules/torch/distributions/studentT.html b/docs/1.1.0/_modules/torch/distributions/studentT.html similarity index 100% rename from docs/stable/_modules/torch/distributions/studentT.html rename to docs/1.1.0/_modules/torch/distributions/studentT.html diff --git a/docs/stable/_modules/torch/distributions/transformed_distribution.html b/docs/1.1.0/_modules/torch/distributions/transformed_distribution.html similarity index 100% rename from docs/stable/_modules/torch/distributions/transformed_distribution.html rename to docs/1.1.0/_modules/torch/distributions/transformed_distribution.html diff --git a/docs/stable/_modules/torch/distributions/transforms.html b/docs/1.1.0/_modules/torch/distributions/transforms.html similarity index 100% rename from docs/stable/_modules/torch/distributions/transforms.html rename to docs/1.1.0/_modules/torch/distributions/transforms.html diff --git a/docs/stable/_modules/torch/distributions/uniform.html b/docs/1.1.0/_modules/torch/distributions/uniform.html similarity index 100% rename from docs/stable/_modules/torch/distributions/uniform.html rename to docs/1.1.0/_modules/torch/distributions/uniform.html diff --git a/docs/stable/_modules/torch/distributions/weibull.html b/docs/1.1.0/_modules/torch/distributions/weibull.html similarity index 100% rename from docs/stable/_modules/torch/distributions/weibull.html rename to docs/1.1.0/_modules/torch/distributions/weibull.html diff --git a/docs/stable/_modules/torch/functional.html b/docs/1.1.0/_modules/torch/functional.html similarity index 100% rename from docs/stable/_modules/torch/functional.html rename to docs/1.1.0/_modules/torch/functional.html diff --git a/docs/stable/_modules/torch/hub.html b/docs/1.1.0/_modules/torch/hub.html similarity index 100% rename from docs/stable/_modules/torch/hub.html rename to docs/1.1.0/_modules/torch/hub.html diff --git a/docs/stable/_modules/torch/jit.html b/docs/1.1.0/_modules/torch/jit.html similarity index 100% rename from docs/stable/_modules/torch/jit.html rename to docs/1.1.0/_modules/torch/jit.html diff --git a/docs/stable/_modules/torch/multiprocessing.html b/docs/1.1.0/_modules/torch/multiprocessing.html similarity index 100% rename from docs/stable/_modules/torch/multiprocessing.html rename to docs/1.1.0/_modules/torch/multiprocessing.html diff --git a/docs/stable/_modules/torch/multiprocessing/spawn.html b/docs/1.1.0/_modules/torch/multiprocessing/spawn.html similarity index 100% rename from docs/stable/_modules/torch/multiprocessing/spawn.html rename to docs/1.1.0/_modules/torch/multiprocessing/spawn.html diff --git a/docs/stable/_modules/torch/nn/functional.html b/docs/1.1.0/_modules/torch/nn/functional.html similarity index 100% rename from docs/stable/_modules/torch/nn/functional.html rename to docs/1.1.0/_modules/torch/nn/functional.html diff --git a/docs/stable/_modules/torch/nn/init.html b/docs/1.1.0/_modules/torch/nn/init.html similarity index 100% rename from docs/stable/_modules/torch/nn/init.html rename to docs/1.1.0/_modules/torch/nn/init.html diff --git a/docs/stable/_modules/torch/nn/modules/activation.html b/docs/1.1.0/_modules/torch/nn/modules/activation.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/activation.html rename to docs/1.1.0/_modules/torch/nn/modules/activation.html diff --git a/docs/stable/_modules/torch/nn/modules/adaptive.html b/docs/1.1.0/_modules/torch/nn/modules/adaptive.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/adaptive.html rename to docs/1.1.0/_modules/torch/nn/modules/adaptive.html diff --git a/docs/stable/_modules/torch/nn/modules/batchnorm.html b/docs/1.1.0/_modules/torch/nn/modules/batchnorm.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/batchnorm.html rename to docs/1.1.0/_modules/torch/nn/modules/batchnorm.html diff --git a/docs/stable/_modules/torch/nn/modules/container.html b/docs/1.1.0/_modules/torch/nn/modules/container.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/container.html rename to docs/1.1.0/_modules/torch/nn/modules/container.html diff --git a/docs/stable/_modules/torch/nn/modules/conv.html b/docs/1.1.0/_modules/torch/nn/modules/conv.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/conv.html rename to docs/1.1.0/_modules/torch/nn/modules/conv.html diff --git a/docs/stable/_modules/torch/nn/modules/distance.html b/docs/1.1.0/_modules/torch/nn/modules/distance.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/distance.html rename to docs/1.1.0/_modules/torch/nn/modules/distance.html diff --git a/docs/stable/_modules/torch/nn/modules/dropout.html b/docs/1.1.0/_modules/torch/nn/modules/dropout.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/dropout.html rename to docs/1.1.0/_modules/torch/nn/modules/dropout.html diff --git a/docs/stable/_modules/torch/nn/modules/fold.html b/docs/1.1.0/_modules/torch/nn/modules/fold.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/fold.html rename to docs/1.1.0/_modules/torch/nn/modules/fold.html diff --git a/docs/stable/_modules/torch/nn/modules/instancenorm.html b/docs/1.1.0/_modules/torch/nn/modules/instancenorm.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/instancenorm.html rename to docs/1.1.0/_modules/torch/nn/modules/instancenorm.html diff --git a/docs/stable/_modules/torch/nn/modules/linear.html b/docs/1.1.0/_modules/torch/nn/modules/linear.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/linear.html rename to docs/1.1.0/_modules/torch/nn/modules/linear.html diff --git a/docs/stable/_modules/torch/nn/modules/loss.html b/docs/1.1.0/_modules/torch/nn/modules/loss.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/loss.html rename to docs/1.1.0/_modules/torch/nn/modules/loss.html diff --git a/docs/stable/_modules/torch/nn/modules/module.html b/docs/1.1.0/_modules/torch/nn/modules/module.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/module.html rename to docs/1.1.0/_modules/torch/nn/modules/module.html diff --git a/docs/stable/_modules/torch/nn/modules/normalization.html b/docs/1.1.0/_modules/torch/nn/modules/normalization.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/normalization.html rename to docs/1.1.0/_modules/torch/nn/modules/normalization.html diff --git a/docs/stable/_modules/torch/nn/modules/padding.html b/docs/1.1.0/_modules/torch/nn/modules/padding.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/padding.html rename to docs/1.1.0/_modules/torch/nn/modules/padding.html diff --git a/docs/stable/_modules/torch/nn/modules/pixelshuffle.html b/docs/1.1.0/_modules/torch/nn/modules/pixelshuffle.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/pixelshuffle.html rename to docs/1.1.0/_modules/torch/nn/modules/pixelshuffle.html diff --git a/docs/stable/_modules/torch/nn/modules/pooling.html b/docs/1.1.0/_modules/torch/nn/modules/pooling.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/pooling.html rename to docs/1.1.0/_modules/torch/nn/modules/pooling.html diff --git a/docs/stable/_modules/torch/nn/modules/rnn.html b/docs/1.1.0/_modules/torch/nn/modules/rnn.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/rnn.html rename to docs/1.1.0/_modules/torch/nn/modules/rnn.html diff --git a/docs/stable/_modules/torch/nn/modules/sparse.html b/docs/1.1.0/_modules/torch/nn/modules/sparse.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/sparse.html rename to docs/1.1.0/_modules/torch/nn/modules/sparse.html diff --git a/docs/stable/_modules/torch/nn/modules/upsampling.html b/docs/1.1.0/_modules/torch/nn/modules/upsampling.html similarity index 100% rename from docs/stable/_modules/torch/nn/modules/upsampling.html rename to docs/1.1.0/_modules/torch/nn/modules/upsampling.html diff --git a/docs/stable/_modules/torch/nn/parallel/data_parallel.html b/docs/1.1.0/_modules/torch/nn/parallel/data_parallel.html similarity index 100% rename from docs/stable/_modules/torch/nn/parallel/data_parallel.html rename to docs/1.1.0/_modules/torch/nn/parallel/data_parallel.html diff --git a/docs/stable/_modules/torch/nn/parallel/distributed.html b/docs/1.1.0/_modules/torch/nn/parallel/distributed.html similarity index 100% rename from docs/stable/_modules/torch/nn/parallel/distributed.html rename to docs/1.1.0/_modules/torch/nn/parallel/distributed.html diff --git a/docs/stable/_modules/torch/nn/parallel/distributed_cpu.html b/docs/1.1.0/_modules/torch/nn/parallel/distributed_cpu.html similarity index 100% rename from docs/stable/_modules/torch/nn/parallel/distributed_cpu.html rename to docs/1.1.0/_modules/torch/nn/parallel/distributed_cpu.html diff --git a/docs/stable/_modules/torch/nn/parameter.html b/docs/1.1.0/_modules/torch/nn/parameter.html similarity index 100% rename from docs/stable/_modules/torch/nn/parameter.html rename to docs/1.1.0/_modules/torch/nn/parameter.html diff --git a/docs/stable/_modules/torch/nn/utils/clip_grad.html b/docs/1.1.0/_modules/torch/nn/utils/clip_grad.html similarity index 100% rename from docs/stable/_modules/torch/nn/utils/clip_grad.html rename to docs/1.1.0/_modules/torch/nn/utils/clip_grad.html diff --git a/docs/stable/_modules/torch/nn/utils/convert_parameters.html b/docs/1.1.0/_modules/torch/nn/utils/convert_parameters.html similarity index 100% rename from docs/stable/_modules/torch/nn/utils/convert_parameters.html rename to docs/1.1.0/_modules/torch/nn/utils/convert_parameters.html diff --git a/docs/stable/_modules/torch/nn/utils/rnn.html b/docs/1.1.0/_modules/torch/nn/utils/rnn.html similarity index 100% rename from docs/stable/_modules/torch/nn/utils/rnn.html rename to docs/1.1.0/_modules/torch/nn/utils/rnn.html diff --git a/docs/stable/_modules/torch/nn/utils/spectral_norm.html b/docs/1.1.0/_modules/torch/nn/utils/spectral_norm.html similarity index 100% rename from docs/stable/_modules/torch/nn/utils/spectral_norm.html rename to docs/1.1.0/_modules/torch/nn/utils/spectral_norm.html diff --git a/docs/stable/_modules/torch/nn/utils/weight_norm.html b/docs/1.1.0/_modules/torch/nn/utils/weight_norm.html similarity index 100% rename from docs/stable/_modules/torch/nn/utils/weight_norm.html rename to docs/1.1.0/_modules/torch/nn/utils/weight_norm.html diff --git a/docs/stable/_modules/torch/onnx.html b/docs/1.1.0/_modules/torch/onnx.html similarity index 100% rename from docs/stable/_modules/torch/onnx.html rename to docs/1.1.0/_modules/torch/onnx.html diff --git a/docs/stable/_modules/torch/optim/adadelta.html b/docs/1.1.0/_modules/torch/optim/adadelta.html similarity index 100% rename from docs/stable/_modules/torch/optim/adadelta.html rename to docs/1.1.0/_modules/torch/optim/adadelta.html diff --git a/docs/stable/_modules/torch/optim/adagrad.html b/docs/1.1.0/_modules/torch/optim/adagrad.html similarity index 100% rename from docs/stable/_modules/torch/optim/adagrad.html rename to docs/1.1.0/_modules/torch/optim/adagrad.html diff --git a/docs/stable/_modules/torch/optim/adam.html b/docs/1.1.0/_modules/torch/optim/adam.html similarity index 100% rename from docs/stable/_modules/torch/optim/adam.html rename to docs/1.1.0/_modules/torch/optim/adam.html diff --git a/docs/stable/_modules/torch/optim/adamax.html b/docs/1.1.0/_modules/torch/optim/adamax.html similarity index 100% rename from docs/stable/_modules/torch/optim/adamax.html rename to docs/1.1.0/_modules/torch/optim/adamax.html diff --git a/docs/stable/_modules/torch/optim/asgd.html b/docs/1.1.0/_modules/torch/optim/asgd.html similarity index 100% rename from docs/stable/_modules/torch/optim/asgd.html rename to docs/1.1.0/_modules/torch/optim/asgd.html diff --git a/docs/stable/_modules/torch/optim/lbfgs.html b/docs/1.1.0/_modules/torch/optim/lbfgs.html similarity index 100% rename from docs/stable/_modules/torch/optim/lbfgs.html rename to docs/1.1.0/_modules/torch/optim/lbfgs.html diff --git a/docs/stable/_modules/torch/optim/lr_scheduler.html b/docs/1.1.0/_modules/torch/optim/lr_scheduler.html similarity index 100% rename from docs/stable/_modules/torch/optim/lr_scheduler.html rename to docs/1.1.0/_modules/torch/optim/lr_scheduler.html diff --git a/docs/stable/_modules/torch/optim/optimizer.html b/docs/1.1.0/_modules/torch/optim/optimizer.html similarity index 100% rename from docs/stable/_modules/torch/optim/optimizer.html rename to docs/1.1.0/_modules/torch/optim/optimizer.html diff --git a/docs/stable/_modules/torch/optim/rmsprop.html b/docs/1.1.0/_modules/torch/optim/rmsprop.html similarity index 100% rename from docs/stable/_modules/torch/optim/rmsprop.html rename to docs/1.1.0/_modules/torch/optim/rmsprop.html diff --git a/docs/stable/_modules/torch/optim/rprop.html b/docs/1.1.0/_modules/torch/optim/rprop.html similarity index 100% rename from docs/stable/_modules/torch/optim/rprop.html rename to docs/1.1.0/_modules/torch/optim/rprop.html diff --git a/docs/stable/_modules/torch/optim/sgd.html b/docs/1.1.0/_modules/torch/optim/sgd.html similarity index 100% rename from docs/stable/_modules/torch/optim/sgd.html rename to docs/1.1.0/_modules/torch/optim/sgd.html diff --git a/docs/stable/_modules/torch/optim/sparse_adam.html b/docs/1.1.0/_modules/torch/optim/sparse_adam.html similarity index 100% rename from docs/stable/_modules/torch/optim/sparse_adam.html rename to docs/1.1.0/_modules/torch/optim/sparse_adam.html diff --git a/docs/stable/_modules/torch/quasirandom.html b/docs/1.1.0/_modules/torch/quasirandom.html similarity index 100% rename from docs/stable/_modules/torch/quasirandom.html rename to docs/1.1.0/_modules/torch/quasirandom.html diff --git a/docs/stable/_modules/torch/random.html b/docs/1.1.0/_modules/torch/random.html similarity index 100% rename from docs/stable/_modules/torch/random.html rename to docs/1.1.0/_modules/torch/random.html diff --git a/docs/stable/_modules/torch/serialization.html b/docs/1.1.0/_modules/torch/serialization.html similarity index 100% rename from docs/stable/_modules/torch/serialization.html rename to docs/1.1.0/_modules/torch/serialization.html diff --git a/docs/stable/_modules/torch/sparse.html b/docs/1.1.0/_modules/torch/sparse.html similarity index 100% rename from docs/stable/_modules/torch/sparse.html rename to docs/1.1.0/_modules/torch/sparse.html diff --git a/docs/stable/_modules/torch/storage.html b/docs/1.1.0/_modules/torch/storage.html similarity index 100% rename from docs/stable/_modules/torch/storage.html rename to docs/1.1.0/_modules/torch/storage.html diff --git a/docs/stable/_modules/torch/tensor.html b/docs/1.1.0/_modules/torch/tensor.html similarity index 100% rename from docs/stable/_modules/torch/tensor.html rename to docs/1.1.0/_modules/torch/tensor.html diff --git a/docs/stable/_modules/torch/utils/checkpoint.html b/docs/1.1.0/_modules/torch/utils/checkpoint.html similarity index 100% rename from docs/stable/_modules/torch/utils/checkpoint.html rename to docs/1.1.0/_modules/torch/utils/checkpoint.html diff --git a/docs/stable/_modules/torch/utils/cpp_extension.html b/docs/1.1.0/_modules/torch/utils/cpp_extension.html similarity index 100% rename from docs/stable/_modules/torch/utils/cpp_extension.html rename to docs/1.1.0/_modules/torch/utils/cpp_extension.html diff --git a/docs/stable/_modules/torch/utils/data/dataloader.html b/docs/1.1.0/_modules/torch/utils/data/dataloader.html similarity index 100% rename from docs/stable/_modules/torch/utils/data/dataloader.html rename to docs/1.1.0/_modules/torch/utils/data/dataloader.html diff --git a/docs/stable/_modules/torch/utils/data/dataset.html b/docs/1.1.0/_modules/torch/utils/data/dataset.html similarity index 100% rename from docs/stable/_modules/torch/utils/data/dataset.html rename to docs/1.1.0/_modules/torch/utils/data/dataset.html diff --git a/docs/stable/_modules/torch/utils/data/distributed.html b/docs/1.1.0/_modules/torch/utils/data/distributed.html similarity index 100% rename from docs/stable/_modules/torch/utils/data/distributed.html rename to docs/1.1.0/_modules/torch/utils/data/distributed.html diff --git a/docs/stable/_modules/torch/utils/data/sampler.html b/docs/1.1.0/_modules/torch/utils/data/sampler.html similarity index 100% rename from docs/stable/_modules/torch/utils/data/sampler.html rename to docs/1.1.0/_modules/torch/utils/data/sampler.html diff --git a/docs/stable/_modules/torch/utils/tensorboard/writer.html b/docs/1.1.0/_modules/torch/utils/tensorboard/writer.html similarity index 100% rename from docs/stable/_modules/torch/utils/tensorboard/writer.html rename to docs/1.1.0/_modules/torch/utils/tensorboard/writer.html diff --git a/docs/stable/_modules/torchvision.html b/docs/1.1.0/_modules/torchvision.html similarity index 100% rename from docs/stable/_modules/torchvision.html rename to docs/1.1.0/_modules/torchvision.html diff --git a/docs/stable/_modules/torchvision/datasets/cifar.html b/docs/1.1.0/_modules/torchvision/datasets/cifar.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/cifar.html rename to docs/1.1.0/_modules/torchvision/datasets/cifar.html diff --git a/docs/stable/_modules/torchvision/datasets/cityscapes.html b/docs/1.1.0/_modules/torchvision/datasets/cityscapes.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/cityscapes.html rename to docs/1.1.0/_modules/torchvision/datasets/cityscapes.html diff --git a/docs/stable/_modules/torchvision/datasets/coco.html b/docs/1.1.0/_modules/torchvision/datasets/coco.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/coco.html rename to docs/1.1.0/_modules/torchvision/datasets/coco.html diff --git a/docs/stable/_modules/torchvision/datasets/fakedata.html b/docs/1.1.0/_modules/torchvision/datasets/fakedata.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/fakedata.html rename to docs/1.1.0/_modules/torchvision/datasets/fakedata.html diff --git a/docs/stable/_modules/torchvision/datasets/flickr.html b/docs/1.1.0/_modules/torchvision/datasets/flickr.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/flickr.html rename to docs/1.1.0/_modules/torchvision/datasets/flickr.html diff --git a/docs/stable/_modules/torchvision/datasets/folder.html b/docs/1.1.0/_modules/torchvision/datasets/folder.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/folder.html rename to docs/1.1.0/_modules/torchvision/datasets/folder.html diff --git a/docs/stable/_modules/torchvision/datasets/imagenet.html b/docs/1.1.0/_modules/torchvision/datasets/imagenet.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/imagenet.html rename to docs/1.1.0/_modules/torchvision/datasets/imagenet.html diff --git a/docs/stable/_modules/torchvision/datasets/lsun.html b/docs/1.1.0/_modules/torchvision/datasets/lsun.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/lsun.html rename to docs/1.1.0/_modules/torchvision/datasets/lsun.html diff --git a/docs/stable/_modules/torchvision/datasets/mnist.html b/docs/1.1.0/_modules/torchvision/datasets/mnist.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/mnist.html rename to docs/1.1.0/_modules/torchvision/datasets/mnist.html diff --git a/docs/stable/_modules/torchvision/datasets/phototour.html b/docs/1.1.0/_modules/torchvision/datasets/phototour.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/phototour.html rename to docs/1.1.0/_modules/torchvision/datasets/phototour.html diff --git a/docs/stable/_modules/torchvision/datasets/sbd.html b/docs/1.1.0/_modules/torchvision/datasets/sbd.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/sbd.html rename to docs/1.1.0/_modules/torchvision/datasets/sbd.html diff --git a/docs/stable/_modules/torchvision/datasets/sbu.html b/docs/1.1.0/_modules/torchvision/datasets/sbu.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/sbu.html rename to docs/1.1.0/_modules/torchvision/datasets/sbu.html diff --git a/docs/stable/_modules/torchvision/datasets/stl10.html b/docs/1.1.0/_modules/torchvision/datasets/stl10.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/stl10.html rename to docs/1.1.0/_modules/torchvision/datasets/stl10.html diff --git a/docs/stable/_modules/torchvision/datasets/svhn.html b/docs/1.1.0/_modules/torchvision/datasets/svhn.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/svhn.html rename to docs/1.1.0/_modules/torchvision/datasets/svhn.html diff --git a/docs/stable/_modules/torchvision/datasets/usps.html b/docs/1.1.0/_modules/torchvision/datasets/usps.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/usps.html rename to docs/1.1.0/_modules/torchvision/datasets/usps.html diff --git a/docs/stable/_modules/torchvision/datasets/voc.html b/docs/1.1.0/_modules/torchvision/datasets/voc.html similarity index 100% rename from docs/stable/_modules/torchvision/datasets/voc.html rename to docs/1.1.0/_modules/torchvision/datasets/voc.html diff --git a/docs/stable/_modules/torchvision/models/alexnet.html b/docs/1.1.0/_modules/torchvision/models/alexnet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/alexnet.html rename to docs/1.1.0/_modules/torchvision/models/alexnet.html diff --git a/docs/stable/_modules/torchvision/models/densenet.html b/docs/1.1.0/_modules/torchvision/models/densenet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/densenet.html rename to docs/1.1.0/_modules/torchvision/models/densenet.html diff --git a/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html b/docs/1.1.0/_modules/torchvision/models/detection/faster_rcnn.html similarity index 100% rename from docs/stable/_modules/torchvision/models/detection/faster_rcnn.html rename to docs/1.1.0/_modules/torchvision/models/detection/faster_rcnn.html diff --git a/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html b/docs/1.1.0/_modules/torchvision/models/detection/keypoint_rcnn.html similarity index 100% rename from docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html rename to docs/1.1.0/_modules/torchvision/models/detection/keypoint_rcnn.html diff --git a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html b/docs/1.1.0/_modules/torchvision/models/detection/mask_rcnn.html similarity index 100% rename from docs/stable/_modules/torchvision/models/detection/mask_rcnn.html rename to docs/1.1.0/_modules/torchvision/models/detection/mask_rcnn.html diff --git a/docs/stable/_modules/torchvision/models/googlenet.html b/docs/1.1.0/_modules/torchvision/models/googlenet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/googlenet.html rename to docs/1.1.0/_modules/torchvision/models/googlenet.html diff --git a/docs/stable/_modules/torchvision/models/inception.html b/docs/1.1.0/_modules/torchvision/models/inception.html similarity index 100% rename from docs/stable/_modules/torchvision/models/inception.html rename to docs/1.1.0/_modules/torchvision/models/inception.html diff --git a/docs/stable/_modules/torchvision/models/mnasnet.html b/docs/1.1.0/_modules/torchvision/models/mnasnet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/mnasnet.html rename to docs/1.1.0/_modules/torchvision/models/mnasnet.html diff --git a/docs/stable/_modules/torchvision/models/mobilenet.html b/docs/1.1.0/_modules/torchvision/models/mobilenet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/mobilenet.html rename to docs/1.1.0/_modules/torchvision/models/mobilenet.html diff --git a/docs/stable/_modules/torchvision/models/resnet.html b/docs/1.1.0/_modules/torchvision/models/resnet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/resnet.html rename to docs/1.1.0/_modules/torchvision/models/resnet.html diff --git a/docs/stable/_modules/torchvision/models/segmentation/segmentation.html b/docs/1.1.0/_modules/torchvision/models/segmentation/segmentation.html similarity index 100% rename from docs/stable/_modules/torchvision/models/segmentation/segmentation.html rename to docs/1.1.0/_modules/torchvision/models/segmentation/segmentation.html diff --git a/docs/stable/_modules/torchvision/models/shufflenetv2.html b/docs/1.1.0/_modules/torchvision/models/shufflenetv2.html similarity index 100% rename from docs/stable/_modules/torchvision/models/shufflenetv2.html rename to docs/1.1.0/_modules/torchvision/models/shufflenetv2.html diff --git a/docs/stable/_modules/torchvision/models/squeezenet.html b/docs/1.1.0/_modules/torchvision/models/squeezenet.html similarity index 100% rename from docs/stable/_modules/torchvision/models/squeezenet.html rename to docs/1.1.0/_modules/torchvision/models/squeezenet.html diff --git a/docs/stable/_modules/torchvision/models/vgg.html b/docs/1.1.0/_modules/torchvision/models/vgg.html similarity index 100% rename from docs/stable/_modules/torchvision/models/vgg.html rename to docs/1.1.0/_modules/torchvision/models/vgg.html diff --git a/docs/stable/_modules/torchvision/transforms/functional.html b/docs/1.1.0/_modules/torchvision/transforms/functional.html similarity index 100% rename from docs/stable/_modules/torchvision/transforms/functional.html rename to docs/1.1.0/_modules/torchvision/transforms/functional.html diff --git a/docs/stable/_modules/torchvision/transforms/transforms.html b/docs/1.1.0/_modules/torchvision/transforms/transforms.html similarity index 100% rename from docs/stable/_modules/torchvision/transforms/transforms.html rename to docs/1.1.0/_modules/torchvision/transforms/transforms.html diff --git a/docs/stable/_modules/torchvision/utils.html b/docs/1.1.0/_modules/torchvision/utils.html similarity index 100% rename from docs/stable/_modules/torchvision/utils.html rename to docs/1.1.0/_modules/torchvision/utils.html diff --git a/docs/stable/_sources/__config__.rst.txt b/docs/1.1.0/_sources/__config__.rst.txt similarity index 100% rename from docs/stable/_sources/__config__.rst.txt rename to docs/1.1.0/_sources/__config__.rst.txt diff --git a/docs/stable/_sources/autograd.rst.txt b/docs/1.1.0/_sources/autograd.rst.txt similarity index 100% rename from docs/stable/_sources/autograd.rst.txt rename to docs/1.1.0/_sources/autograd.rst.txt diff --git a/docs/stable/_sources/bottleneck.rst.txt b/docs/1.1.0/_sources/bottleneck.rst.txt similarity index 100% rename from docs/stable/_sources/bottleneck.rst.txt rename to docs/1.1.0/_sources/bottleneck.rst.txt diff --git a/docs/stable/_sources/checkpoint.rst.txt b/docs/1.1.0/_sources/checkpoint.rst.txt similarity index 100% rename from docs/stable/_sources/checkpoint.rst.txt rename to docs/1.1.0/_sources/checkpoint.rst.txt diff --git a/docs/stable/_sources/community/contribution_guide.rst.txt b/docs/1.1.0/_sources/community/contribution_guide.rst.txt similarity index 100% rename from docs/stable/_sources/community/contribution_guide.rst.txt rename to docs/1.1.0/_sources/community/contribution_guide.rst.txt diff --git a/docs/stable/_sources/community/governance.rst.txt b/docs/1.1.0/_sources/community/governance.rst.txt similarity index 100% rename from docs/stable/_sources/community/governance.rst.txt rename to docs/1.1.0/_sources/community/governance.rst.txt diff --git a/docs/stable/_sources/community/persons_of_interest.rst.txt b/docs/1.1.0/_sources/community/persons_of_interest.rst.txt similarity index 100% rename from docs/stable/_sources/community/persons_of_interest.rst.txt rename to docs/1.1.0/_sources/community/persons_of_interest.rst.txt diff --git a/docs/stable/_sources/cpp_extension.rst.txt b/docs/1.1.0/_sources/cpp_extension.rst.txt similarity index 100% rename from docs/stable/_sources/cpp_extension.rst.txt rename to docs/1.1.0/_sources/cpp_extension.rst.txt diff --git a/docs/stable/_sources/cuda.rst.txt b/docs/1.1.0/_sources/cuda.rst.txt similarity index 100% rename from docs/stable/_sources/cuda.rst.txt rename to docs/1.1.0/_sources/cuda.rst.txt diff --git a/docs/stable/_sources/cuda_deterministic.rst.txt b/docs/1.1.0/_sources/cuda_deterministic.rst.txt similarity index 100% rename from docs/stable/_sources/cuda_deterministic.rst.txt rename to docs/1.1.0/_sources/cuda_deterministic.rst.txt diff --git a/docs/stable/_sources/cuda_deterministic_backward.rst.txt b/docs/1.1.0/_sources/cuda_deterministic_backward.rst.txt similarity index 100% rename from docs/stable/_sources/cuda_deterministic_backward.rst.txt rename to docs/1.1.0/_sources/cuda_deterministic_backward.rst.txt diff --git a/docs/stable/_sources/cudnn_deterministic.rst.txt b/docs/1.1.0/_sources/cudnn_deterministic.rst.txt similarity index 100% rename from docs/stable/_sources/cudnn_deterministic.rst.txt rename to docs/1.1.0/_sources/cudnn_deterministic.rst.txt diff --git a/docs/stable/_sources/cudnn_persistent_rnn.rst.txt b/docs/1.1.0/_sources/cudnn_persistent_rnn.rst.txt similarity index 100% rename from docs/stable/_sources/cudnn_persistent_rnn.rst.txt rename to docs/1.1.0/_sources/cudnn_persistent_rnn.rst.txt diff --git a/docs/stable/_sources/data.rst.txt b/docs/1.1.0/_sources/data.rst.txt similarity index 100% rename from docs/stable/_sources/data.rst.txt rename to docs/1.1.0/_sources/data.rst.txt diff --git a/docs/stable/_sources/distributed.rst.txt b/docs/1.1.0/_sources/distributed.rst.txt similarity index 100% rename from docs/stable/_sources/distributed.rst.txt rename to docs/1.1.0/_sources/distributed.rst.txt diff --git a/docs/stable/_sources/distributed_deprecated.rst.txt b/docs/1.1.0/_sources/distributed_deprecated.rst.txt similarity index 100% rename from docs/stable/_sources/distributed_deprecated.rst.txt rename to docs/1.1.0/_sources/distributed_deprecated.rst.txt diff --git a/docs/stable/_sources/distributions.rst.txt b/docs/1.1.0/_sources/distributions.rst.txt similarity index 100% rename from docs/stable/_sources/distributions.rst.txt rename to docs/1.1.0/_sources/distributions.rst.txt diff --git a/docs/stable/_sources/dlpack.rst.txt b/docs/1.1.0/_sources/dlpack.rst.txt similarity index 100% rename from docs/stable/_sources/dlpack.rst.txt rename to docs/1.1.0/_sources/dlpack.rst.txt diff --git a/docs/stable/_sources/hub.rst.txt b/docs/1.1.0/_sources/hub.rst.txt similarity index 100% rename from docs/stable/_sources/hub.rst.txt rename to docs/1.1.0/_sources/hub.rst.txt diff --git a/docs/stable/_sources/index.rst.txt b/docs/1.1.0/_sources/index.rst.txt similarity index 100% rename from docs/stable/_sources/index.rst.txt rename to docs/1.1.0/_sources/index.rst.txt diff --git a/docs/stable/_sources/jit.rst.txt b/docs/1.1.0/_sources/jit.rst.txt similarity index 100% rename from docs/stable/_sources/jit.rst.txt rename to docs/1.1.0/_sources/jit.rst.txt diff --git a/docs/stable/_sources/model_zoo.rst.txt b/docs/1.1.0/_sources/model_zoo.rst.txt similarity index 100% rename from docs/stable/_sources/model_zoo.rst.txt rename to docs/1.1.0/_sources/model_zoo.rst.txt diff --git a/docs/stable/_sources/multiprocessing.rst.txt b/docs/1.1.0/_sources/multiprocessing.rst.txt similarity index 100% rename from docs/stable/_sources/multiprocessing.rst.txt rename to docs/1.1.0/_sources/multiprocessing.rst.txt diff --git a/docs/stable/_sources/nn.rst.txt b/docs/1.1.0/_sources/nn.rst.txt similarity index 100% rename from docs/stable/_sources/nn.rst.txt rename to docs/1.1.0/_sources/nn.rst.txt diff --git a/docs/stable/_sources/notes/autograd.rst.txt b/docs/1.1.0/_sources/notes/autograd.rst.txt similarity index 100% rename from docs/stable/_sources/notes/autograd.rst.txt rename to docs/1.1.0/_sources/notes/autograd.rst.txt diff --git a/docs/stable/_sources/notes/broadcasting.rst.txt b/docs/1.1.0/_sources/notes/broadcasting.rst.txt similarity index 100% rename from docs/stable/_sources/notes/broadcasting.rst.txt rename to docs/1.1.0/_sources/notes/broadcasting.rst.txt diff --git a/docs/stable/_sources/notes/cuda.rst.txt b/docs/1.1.0/_sources/notes/cuda.rst.txt similarity index 100% rename from docs/stable/_sources/notes/cuda.rst.txt rename to docs/1.1.0/_sources/notes/cuda.rst.txt diff --git a/docs/stable/_sources/notes/extending.rst.txt b/docs/1.1.0/_sources/notes/extending.rst.txt similarity index 100% rename from docs/stable/_sources/notes/extending.rst.txt rename to docs/1.1.0/_sources/notes/extending.rst.txt diff --git a/docs/stable/_sources/notes/faq.rst.txt b/docs/1.1.0/_sources/notes/faq.rst.txt similarity index 100% rename from docs/stable/_sources/notes/faq.rst.txt rename to docs/1.1.0/_sources/notes/faq.rst.txt diff --git a/docs/stable/_sources/notes/multiprocessing.rst.txt b/docs/1.1.0/_sources/notes/multiprocessing.rst.txt similarity index 100% rename from docs/stable/_sources/notes/multiprocessing.rst.txt rename to docs/1.1.0/_sources/notes/multiprocessing.rst.txt diff --git a/docs/stable/_sources/notes/randomness.rst.txt b/docs/1.1.0/_sources/notes/randomness.rst.txt similarity index 100% rename from docs/stable/_sources/notes/randomness.rst.txt rename to docs/1.1.0/_sources/notes/randomness.rst.txt diff --git a/docs/stable/_sources/notes/serialization.rst.txt b/docs/1.1.0/_sources/notes/serialization.rst.txt similarity index 100% rename from docs/stable/_sources/notes/serialization.rst.txt rename to docs/1.1.0/_sources/notes/serialization.rst.txt diff --git a/docs/stable/_sources/notes/windows.rst.txt b/docs/1.1.0/_sources/notes/windows.rst.txt similarity index 100% rename from docs/stable/_sources/notes/windows.rst.txt rename to docs/1.1.0/_sources/notes/windows.rst.txt diff --git a/docs/stable/_sources/onnx.rst.txt b/docs/1.1.0/_sources/onnx.rst.txt similarity index 100% rename from docs/stable/_sources/onnx.rst.txt rename to docs/1.1.0/_sources/onnx.rst.txt diff --git a/docs/stable/_sources/optim.rst.txt b/docs/1.1.0/_sources/optim.rst.txt similarity index 100% rename from docs/stable/_sources/optim.rst.txt rename to docs/1.1.0/_sources/optim.rst.txt diff --git a/docs/stable/_sources/sparse.rst.txt b/docs/1.1.0/_sources/sparse.rst.txt similarity index 100% rename from docs/stable/_sources/sparse.rst.txt rename to docs/1.1.0/_sources/sparse.rst.txt diff --git a/docs/stable/_sources/storage.rst.txt b/docs/1.1.0/_sources/storage.rst.txt similarity index 100% rename from docs/stable/_sources/storage.rst.txt rename to docs/1.1.0/_sources/storage.rst.txt diff --git a/docs/stable/_sources/tensor_attributes.rst.txt b/docs/1.1.0/_sources/tensor_attributes.rst.txt similarity index 100% rename from docs/stable/_sources/tensor_attributes.rst.txt rename to docs/1.1.0/_sources/tensor_attributes.rst.txt diff --git a/docs/stable/_sources/tensorboard.rst.txt b/docs/1.1.0/_sources/tensorboard.rst.txt similarity index 100% rename from docs/stable/_sources/tensorboard.rst.txt rename to docs/1.1.0/_sources/tensorboard.rst.txt diff --git a/docs/stable/_sources/tensors.rst.txt b/docs/1.1.0/_sources/tensors.rst.txt similarity index 100% rename from docs/stable/_sources/tensors.rst.txt rename to docs/1.1.0/_sources/tensors.rst.txt diff --git a/docs/stable/_sources/torch.rst.txt b/docs/1.1.0/_sources/torch.rst.txt similarity index 100% rename from docs/stable/_sources/torch.rst.txt rename to docs/1.1.0/_sources/torch.rst.txt diff --git a/docs/stable/_sources/torchvision/datasets.rst.txt b/docs/1.1.0/_sources/torchvision/datasets.rst.txt similarity index 100% rename from docs/stable/_sources/torchvision/datasets.rst.txt rename to docs/1.1.0/_sources/torchvision/datasets.rst.txt diff --git a/docs/stable/_sources/torchvision/index.rst.txt b/docs/1.1.0/_sources/torchvision/index.rst.txt similarity index 100% rename from docs/stable/_sources/torchvision/index.rst.txt rename to docs/1.1.0/_sources/torchvision/index.rst.txt diff --git a/docs/stable/_sources/torchvision/models.rst.txt b/docs/1.1.0/_sources/torchvision/models.rst.txt similarity index 100% rename from docs/stable/_sources/torchvision/models.rst.txt rename to docs/1.1.0/_sources/torchvision/models.rst.txt diff --git a/docs/stable/_sources/torchvision/transforms.rst.txt b/docs/1.1.0/_sources/torchvision/transforms.rst.txt similarity index 100% rename from docs/stable/_sources/torchvision/transforms.rst.txt rename to docs/1.1.0/_sources/torchvision/transforms.rst.txt diff --git a/docs/stable/_sources/torchvision/utils.rst.txt b/docs/1.1.0/_sources/torchvision/utils.rst.txt similarity index 100% rename from docs/stable/_sources/torchvision/utils.rst.txt rename to docs/1.1.0/_sources/torchvision/utils.rst.txt diff --git a/docs/stable/_sources/type_info.rst.txt b/docs/1.1.0/_sources/type_info.rst.txt similarity index 100% rename from docs/stable/_sources/type_info.rst.txt rename to docs/1.1.0/_sources/type_info.rst.txt diff --git a/docs/stable/_static/basic.css b/docs/1.1.0/_static/basic.css similarity index 100% rename from docs/stable/_static/basic.css rename to docs/1.1.0/_static/basic.css diff --git a/docs/stable/_static/css/theme.css b/docs/1.1.0/_static/css/theme.css similarity index 100% rename from docs/stable/_static/css/theme.css rename to docs/1.1.0/_static/css/theme.css diff --git a/docs/stable/_static/doctools.js b/docs/1.1.0/_static/doctools.js similarity index 100% rename from docs/stable/_static/doctools.js rename to docs/1.1.0/_static/doctools.js diff --git a/docs/stable/_static/documentation_options.js b/docs/1.1.0/_static/documentation_options.js similarity index 100% rename from docs/stable/_static/documentation_options.js rename to docs/1.1.0/_static/documentation_options.js diff --git a/docs/stable/_static/file.png b/docs/1.1.0/_static/file.png similarity index 100% rename from docs/stable/_static/file.png rename to docs/1.1.0/_static/file.png diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-bold.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-book-italic.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-book-italic.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-book-italic.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-book-italic.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-book.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-book.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-book.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-book.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-book.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-book.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-light-italic.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-light-italic.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-light-italic.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-light-italic.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-light.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-light.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-light.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-light.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-light.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-light.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium.woff similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium.woff diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff2 b/docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium.woff2 similarity index 100% rename from docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff2 rename to docs/1.1.0/_static/fonts/FreightSans/freight-sans-medium.woff2 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 b/docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 similarity index 100% rename from docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 rename to docs/1.1.0/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 diff --git a/docs/stable/_static/images/arrow-down-orange.svg b/docs/1.1.0/_static/images/arrow-down-orange.svg similarity index 100% rename from docs/stable/_static/images/arrow-down-orange.svg rename to docs/1.1.0/_static/images/arrow-down-orange.svg diff --git a/docs/stable/_static/images/arrow-right-with-tail.svg b/docs/1.1.0/_static/images/arrow-right-with-tail.svg similarity index 100% rename from docs/stable/_static/images/arrow-right-with-tail.svg rename to docs/1.1.0/_static/images/arrow-right-with-tail.svg diff --git a/docs/stable/_static/images/chevron-down-grey.svg b/docs/1.1.0/_static/images/chevron-down-grey.svg similarity index 100% rename from docs/stable/_static/images/chevron-down-grey.svg rename to docs/1.1.0/_static/images/chevron-down-grey.svg diff --git a/docs/stable/_static/images/chevron-right-orange.svg b/docs/1.1.0/_static/images/chevron-right-orange.svg similarity index 100% rename from docs/stable/_static/images/chevron-right-orange.svg rename to docs/1.1.0/_static/images/chevron-right-orange.svg diff --git a/docs/stable/_static/images/chevron-right-white.svg b/docs/1.1.0/_static/images/chevron-right-white.svg similarity index 100% rename from docs/stable/_static/images/chevron-right-white.svg rename to docs/1.1.0/_static/images/chevron-right-white.svg diff --git a/docs/stable/_static/images/home-footer-background.jpg b/docs/1.1.0/_static/images/home-footer-background.jpg similarity index 100% rename from docs/stable/_static/images/home-footer-background.jpg rename to docs/1.1.0/_static/images/home-footer-background.jpg diff --git a/docs/stable/_static/images/icon-close.svg b/docs/1.1.0/_static/images/icon-close.svg similarity index 100% rename from docs/stable/_static/images/icon-close.svg rename to docs/1.1.0/_static/images/icon-close.svg diff --git a/docs/stable/_static/images/icon-menu-dots-dark.svg b/docs/1.1.0/_static/images/icon-menu-dots-dark.svg similarity index 100% rename from docs/stable/_static/images/icon-menu-dots-dark.svg rename to docs/1.1.0/_static/images/icon-menu-dots-dark.svg diff --git a/docs/stable/_static/images/logo-dark.svg b/docs/1.1.0/_static/images/logo-dark.svg similarity index 100% rename from docs/stable/_static/images/logo-dark.svg rename to docs/1.1.0/_static/images/logo-dark.svg diff --git a/docs/stable/_static/images/logo-facebook-dark.svg b/docs/1.1.0/_static/images/logo-facebook-dark.svg similarity index 100% rename from docs/stable/_static/images/logo-facebook-dark.svg rename to docs/1.1.0/_static/images/logo-facebook-dark.svg diff --git a/docs/stable/_static/images/logo-icon.svg b/docs/1.1.0/_static/images/logo-icon.svg similarity index 100% rename from docs/stable/_static/images/logo-icon.svg rename to docs/1.1.0/_static/images/logo-icon.svg diff --git a/docs/stable/_static/images/logo-twitter-dark.svg b/docs/1.1.0/_static/images/logo-twitter-dark.svg similarity index 100% rename from docs/stable/_static/images/logo-twitter-dark.svg rename to docs/1.1.0/_static/images/logo-twitter-dark.svg diff --git a/docs/stable/_static/images/logo.svg b/docs/1.1.0/_static/images/logo.svg similarity index 100% rename from docs/stable/_static/images/logo.svg rename to docs/1.1.0/_static/images/logo.svg diff --git a/docs/stable/_static/images/pytorch-colab.svg b/docs/1.1.0/_static/images/pytorch-colab.svg similarity index 100% rename from docs/stable/_static/images/pytorch-colab.svg rename to docs/1.1.0/_static/images/pytorch-colab.svg diff --git a/docs/stable/_static/images/pytorch-download.svg b/docs/1.1.0/_static/images/pytorch-download.svg similarity index 100% rename from docs/stable/_static/images/pytorch-download.svg rename to docs/1.1.0/_static/images/pytorch-download.svg diff --git a/docs/stable/_static/images/pytorch-github.svg b/docs/1.1.0/_static/images/pytorch-github.svg similarity index 100% rename from docs/stable/_static/images/pytorch-github.svg rename to docs/1.1.0/_static/images/pytorch-github.svg diff --git a/docs/stable/_static/images/pytorch-x.svg b/docs/1.1.0/_static/images/pytorch-x.svg similarity index 100% rename from docs/stable/_static/images/pytorch-x.svg rename to docs/1.1.0/_static/images/pytorch-x.svg diff --git a/docs/stable/_static/images/search-icon.svg b/docs/1.1.0/_static/images/search-icon.svg similarity index 100% rename from docs/stable/_static/images/search-icon.svg rename to docs/1.1.0/_static/images/search-icon.svg diff --git a/docs/stable/_static/images/view-page-source-icon.svg b/docs/1.1.0/_static/images/view-page-source-icon.svg similarity index 100% rename from docs/stable/_static/images/view-page-source-icon.svg rename to docs/1.1.0/_static/images/view-page-source-icon.svg diff --git a/docs/stable/_static/img/aliastracker_graph.png b/docs/1.1.0/_static/img/aliastracker_graph.png similarity index 100% rename from docs/stable/_static/img/aliastracker_graph.png rename to docs/1.1.0/_static/img/aliastracker_graph.png diff --git a/docs/stable/_static/img/dynamic_graph.gif b/docs/1.1.0/_static/img/dynamic_graph.gif similarity index 100% rename from docs/stable/_static/img/dynamic_graph.gif rename to docs/1.1.0/_static/img/dynamic_graph.gif diff --git a/docs/stable/_static/img/pytorch-logo-dark-unstable.png b/docs/1.1.0/_static/img/pytorch-logo-dark-unstable.png similarity index 100% rename from docs/stable/_static/img/pytorch-logo-dark-unstable.png rename to docs/1.1.0/_static/img/pytorch-logo-dark-unstable.png diff --git a/docs/stable/_static/img/pytorch-logo-dark.png b/docs/1.1.0/_static/img/pytorch-logo-dark.png similarity index 100% rename from docs/stable/_static/img/pytorch-logo-dark.png rename to docs/1.1.0/_static/img/pytorch-logo-dark.png diff --git a/docs/stable/_static/img/pytorch-logo-dark.svg b/docs/1.1.0/_static/img/pytorch-logo-dark.svg similarity index 100% rename from docs/stable/_static/img/pytorch-logo-dark.svg rename to docs/1.1.0/_static/img/pytorch-logo-dark.svg diff --git a/docs/stable/_static/img/pytorch-logo-flame.png b/docs/1.1.0/_static/img/pytorch-logo-flame.png similarity index 100% rename from docs/stable/_static/img/pytorch-logo-flame.png rename to docs/1.1.0/_static/img/pytorch-logo-flame.png diff --git a/docs/stable/_static/img/pytorch-logo-flame.svg b/docs/1.1.0/_static/img/pytorch-logo-flame.svg similarity index 100% rename from docs/stable/_static/img/pytorch-logo-flame.svg rename to docs/1.1.0/_static/img/pytorch-logo-flame.svg diff --git a/docs/stable/_static/img/tensor_illustration.png b/docs/1.1.0/_static/img/tensor_illustration.png similarity index 100% rename from docs/stable/_static/img/tensor_illustration.png rename to docs/1.1.0/_static/img/tensor_illustration.png diff --git a/docs/stable/_static/jquery-3.2.1.js b/docs/1.1.0/_static/jquery-3.2.1.js similarity index 100% rename from docs/stable/_static/jquery-3.2.1.js rename to docs/1.1.0/_static/jquery-3.2.1.js diff --git a/docs/stable/_static/jquery.js b/docs/1.1.0/_static/jquery.js similarity index 100% rename from docs/stable/_static/jquery.js rename to docs/1.1.0/_static/jquery.js diff --git a/docs/stable/_static/js/modernizr.min.js b/docs/1.1.0/_static/js/modernizr.min.js similarity index 100% rename from docs/stable/_static/js/modernizr.min.js rename to docs/1.1.0/_static/js/modernizr.min.js diff --git a/docs/stable/_static/js/theme.js b/docs/1.1.0/_static/js/theme.js similarity index 100% rename from docs/stable/_static/js/theme.js rename to docs/1.1.0/_static/js/theme.js diff --git a/docs/stable/_static/js/vendor/anchor.min.js b/docs/1.1.0/_static/js/vendor/anchor.min.js similarity index 100% rename from docs/stable/_static/js/vendor/anchor.min.js rename to docs/1.1.0/_static/js/vendor/anchor.min.js diff --git a/docs/stable/_static/js/vendor/bootstrap.min.js b/docs/1.1.0/_static/js/vendor/bootstrap.min.js similarity index 100% rename from docs/stable/_static/js/vendor/bootstrap.min.js rename to docs/1.1.0/_static/js/vendor/bootstrap.min.js diff --git a/docs/stable/_static/js/vendor/popper.min.js b/docs/1.1.0/_static/js/vendor/popper.min.js similarity index 100% rename from docs/stable/_static/js/vendor/popper.min.js rename to docs/1.1.0/_static/js/vendor/popper.min.js diff --git a/docs/stable/_static/katex-math.css b/docs/1.1.0/_static/katex-math.css similarity index 100% rename from docs/stable/_static/katex-math.css rename to docs/1.1.0/_static/katex-math.css diff --git a/docs/stable/_static/katex_autorenderer.js b/docs/1.1.0/_static/katex_autorenderer.js similarity index 100% rename from docs/stable/_static/katex_autorenderer.js rename to docs/1.1.0/_static/katex_autorenderer.js diff --git a/docs/stable/_static/language_data.js b/docs/1.1.0/_static/language_data.js similarity index 100% rename from docs/stable/_static/language_data.js rename to docs/1.1.0/_static/language_data.js diff --git a/docs/stable/_static/minus.png b/docs/1.1.0/_static/minus.png similarity index 100% rename from docs/stable/_static/minus.png rename to docs/1.1.0/_static/minus.png diff --git a/docs/stable/_static/plus.png b/docs/1.1.0/_static/plus.png similarity index 100% rename from docs/stable/_static/plus.png rename to docs/1.1.0/_static/plus.png diff --git a/docs/stable/_static/pygments.css b/docs/1.1.0/_static/pygments.css similarity index 100% rename from docs/stable/_static/pygments.css rename to docs/1.1.0/_static/pygments.css diff --git a/docs/stable/_static/pytorch-logo-dark-unstable.png b/docs/1.1.0/_static/pytorch-logo-dark-unstable.png similarity index 100% rename from docs/stable/_static/pytorch-logo-dark-unstable.png rename to docs/1.1.0/_static/pytorch-logo-dark-unstable.png diff --git a/docs/stable/_static/pytorch-logo-dark.svg b/docs/1.1.0/_static/pytorch-logo-dark.svg similarity index 100% rename from docs/stable/_static/pytorch-logo-dark.svg rename to docs/1.1.0/_static/pytorch-logo-dark.svg diff --git a/docs/stable/_static/searchtools.js b/docs/1.1.0/_static/searchtools.js similarity index 100% rename from docs/stable/_static/searchtools.js rename to docs/1.1.0/_static/searchtools.js diff --git a/docs/stable/_static/underscore-1.3.1.js b/docs/1.1.0/_static/underscore-1.3.1.js similarity index 100% rename from docs/stable/_static/underscore-1.3.1.js rename to docs/1.1.0/_static/underscore-1.3.1.js diff --git a/docs/stable/_static/underscore.js b/docs/1.1.0/_static/underscore.js similarity index 100% rename from docs/stable/_static/underscore.js rename to docs/1.1.0/_static/underscore.js diff --git a/docs/stable/autograd.html b/docs/1.1.0/autograd.html similarity index 100% rename from docs/stable/autograd.html rename to docs/1.1.0/autograd.html diff --git a/docs/stable/bottleneck.html b/docs/1.1.0/bottleneck.html similarity index 100% rename from docs/stable/bottleneck.html rename to docs/1.1.0/bottleneck.html diff --git a/docs/stable/checkpoint.html b/docs/1.1.0/checkpoint.html similarity index 100% rename from docs/stable/checkpoint.html rename to docs/1.1.0/checkpoint.html diff --git a/docs/stable/community/contribution_guide.html b/docs/1.1.0/community/contribution_guide.html similarity index 100% rename from docs/stable/community/contribution_guide.html rename to docs/1.1.0/community/contribution_guide.html diff --git a/docs/stable/community/governance.html b/docs/1.1.0/community/governance.html similarity index 100% rename from docs/stable/community/governance.html rename to docs/1.1.0/community/governance.html diff --git a/docs/stable/community/persons_of_interest.html b/docs/1.1.0/community/persons_of_interest.html similarity index 100% rename from docs/stable/community/persons_of_interest.html rename to docs/1.1.0/community/persons_of_interest.html diff --git a/docs/stable/cpp_extension.html b/docs/1.1.0/cpp_extension.html similarity index 100% rename from docs/stable/cpp_extension.html rename to docs/1.1.0/cpp_extension.html diff --git a/docs/stable/cuda.html b/docs/1.1.0/cuda.html similarity index 100% rename from docs/stable/cuda.html rename to docs/1.1.0/cuda.html diff --git a/docs/stable/cuda_deterministic.html b/docs/1.1.0/cuda_deterministic.html similarity index 100% rename from docs/stable/cuda_deterministic.html rename to docs/1.1.0/cuda_deterministic.html diff --git a/docs/stable/cuda_deterministic_backward.html b/docs/1.1.0/cuda_deterministic_backward.html similarity index 100% rename from docs/stable/cuda_deterministic_backward.html rename to docs/1.1.0/cuda_deterministic_backward.html diff --git a/docs/stable/cudnn_deterministic.html b/docs/1.1.0/cudnn_deterministic.html similarity index 100% rename from docs/stable/cudnn_deterministic.html rename to docs/1.1.0/cudnn_deterministic.html diff --git a/docs/stable/cudnn_persistent_rnn.html b/docs/1.1.0/cudnn_persistent_rnn.html similarity index 100% rename from docs/stable/cudnn_persistent_rnn.html rename to docs/1.1.0/cudnn_persistent_rnn.html diff --git a/docs/stable/data.html b/docs/1.1.0/data.html similarity index 100% rename from docs/stable/data.html rename to docs/1.1.0/data.html diff --git a/docs/stable/distributed.html b/docs/1.1.0/distributed.html similarity index 100% rename from docs/stable/distributed.html rename to docs/1.1.0/distributed.html diff --git a/docs/stable/distributed_deprecated.html b/docs/1.1.0/distributed_deprecated.html similarity index 100% rename from docs/stable/distributed_deprecated.html rename to docs/1.1.0/distributed_deprecated.html diff --git a/docs/stable/distributions.html b/docs/1.1.0/distributions.html similarity index 100% rename from docs/stable/distributions.html rename to docs/1.1.0/distributions.html diff --git a/docs/stable/dlpack.html b/docs/1.1.0/dlpack.html similarity index 100% rename from docs/stable/dlpack.html rename to docs/1.1.0/dlpack.html diff --git a/docs/stable/genindex.html b/docs/1.1.0/genindex.html similarity index 100% rename from docs/stable/genindex.html rename to docs/1.1.0/genindex.html diff --git a/docs/stable/hub.html b/docs/1.1.0/hub.html similarity index 100% rename from docs/stable/hub.html rename to docs/1.1.0/hub.html diff --git a/docs/stable/index.html b/docs/1.1.0/index.html similarity index 100% rename from docs/stable/index.html rename to docs/1.1.0/index.html diff --git a/docs/stable/jit.html b/docs/1.1.0/jit.html similarity index 100% rename from docs/stable/jit.html rename to docs/1.1.0/jit.html diff --git a/docs/stable/model_zoo.html b/docs/1.1.0/model_zoo.html similarity index 100% rename from docs/stable/model_zoo.html rename to docs/1.1.0/model_zoo.html diff --git a/docs/stable/multiprocessing.html b/docs/1.1.0/multiprocessing.html similarity index 100% rename from docs/stable/multiprocessing.html rename to docs/1.1.0/multiprocessing.html diff --git a/docs/stable/nn.html b/docs/1.1.0/nn.html similarity index 100% rename from docs/stable/nn.html rename to docs/1.1.0/nn.html diff --git a/docs/stable/notes/autograd.html b/docs/1.1.0/notes/autograd.html similarity index 100% rename from docs/stable/notes/autograd.html rename to docs/1.1.0/notes/autograd.html diff --git a/docs/stable/notes/broadcasting.html b/docs/1.1.0/notes/broadcasting.html similarity index 100% rename from docs/stable/notes/broadcasting.html rename to docs/1.1.0/notes/broadcasting.html diff --git a/docs/stable/notes/cuda.html b/docs/1.1.0/notes/cuda.html similarity index 100% rename from docs/stable/notes/cuda.html rename to docs/1.1.0/notes/cuda.html diff --git a/docs/stable/notes/extending.html b/docs/1.1.0/notes/extending.html similarity index 100% rename from docs/stable/notes/extending.html rename to docs/1.1.0/notes/extending.html diff --git a/docs/stable/notes/faq.html b/docs/1.1.0/notes/faq.html similarity index 100% rename from docs/stable/notes/faq.html rename to docs/1.1.0/notes/faq.html diff --git a/docs/stable/notes/multiprocessing.html b/docs/1.1.0/notes/multiprocessing.html similarity index 100% rename from docs/stable/notes/multiprocessing.html rename to docs/1.1.0/notes/multiprocessing.html diff --git a/docs/stable/notes/randomness.html b/docs/1.1.0/notes/randomness.html similarity index 100% rename from docs/stable/notes/randomness.html rename to docs/1.1.0/notes/randomness.html diff --git a/docs/stable/notes/serialization.html b/docs/1.1.0/notes/serialization.html similarity index 100% rename from docs/stable/notes/serialization.html rename to docs/1.1.0/notes/serialization.html diff --git a/docs/stable/notes/windows.html b/docs/1.1.0/notes/windows.html similarity index 100% rename from docs/stable/notes/windows.html rename to docs/1.1.0/notes/windows.html diff --git a/docs/stable/objects.inv b/docs/1.1.0/objects.inv similarity index 100% rename from docs/stable/objects.inv rename to docs/1.1.0/objects.inv diff --git a/docs/stable/onnx.html b/docs/1.1.0/onnx.html similarity index 100% rename from docs/stable/onnx.html rename to docs/1.1.0/onnx.html diff --git a/docs/stable/optim.html b/docs/1.1.0/optim.html similarity index 100% rename from docs/stable/optim.html rename to docs/1.1.0/optim.html diff --git a/docs/stable/py-modindex.html b/docs/1.1.0/py-modindex.html similarity index 100% rename from docs/stable/py-modindex.html rename to docs/1.1.0/py-modindex.html diff --git a/docs/stable/search.html b/docs/1.1.0/search.html similarity index 100% rename from docs/stable/search.html rename to docs/1.1.0/search.html diff --git a/docs/stable/searchindex.js b/docs/1.1.0/searchindex.js similarity index 100% rename from docs/stable/searchindex.js rename to docs/1.1.0/searchindex.js diff --git a/docs/stable/sparse.html b/docs/1.1.0/sparse.html similarity index 100% rename from docs/stable/sparse.html rename to docs/1.1.0/sparse.html diff --git a/docs/stable/storage.html b/docs/1.1.0/storage.html similarity index 100% rename from docs/stable/storage.html rename to docs/1.1.0/storage.html diff --git a/docs/stable/tensor_attributes.html b/docs/1.1.0/tensor_attributes.html similarity index 100% rename from docs/stable/tensor_attributes.html rename to docs/1.1.0/tensor_attributes.html diff --git a/docs/stable/tensorboard.html b/docs/1.1.0/tensorboard.html similarity index 100% rename from docs/stable/tensorboard.html rename to docs/1.1.0/tensorboard.html diff --git a/docs/stable/tensors.html b/docs/1.1.0/tensors.html similarity index 100% rename from docs/stable/tensors.html rename to docs/1.1.0/tensors.html diff --git a/docs/stable/torch.html b/docs/1.1.0/torch.html similarity index 100% rename from docs/stable/torch.html rename to docs/1.1.0/torch.html diff --git a/docs/stable/torchvision/datasets.html b/docs/1.1.0/torchvision/datasets.html similarity index 100% rename from docs/stable/torchvision/datasets.html rename to docs/1.1.0/torchvision/datasets.html diff --git a/docs/stable/torchvision/index.html b/docs/1.1.0/torchvision/index.html similarity index 100% rename from docs/stable/torchvision/index.html rename to docs/1.1.0/torchvision/index.html diff --git a/docs/stable/torchvision/models.html b/docs/1.1.0/torchvision/models.html similarity index 100% rename from docs/stable/torchvision/models.html rename to docs/1.1.0/torchvision/models.html diff --git a/docs/stable/torchvision/transforms.html b/docs/1.1.0/torchvision/transforms.html similarity index 100% rename from docs/stable/torchvision/transforms.html rename to docs/1.1.0/torchvision/transforms.html diff --git a/docs/stable/torchvision/utils.html b/docs/1.1.0/torchvision/utils.html similarity index 100% rename from docs/stable/torchvision/utils.html rename to docs/1.1.0/torchvision/utils.html diff --git a/docs/stable/type_info.html b/docs/1.1.0/type_info.html similarity index 100% rename from docs/stable/type_info.html rename to docs/1.1.0/type_info.html diff --git a/docs/1.1.0/autograd.md b/docs/1.2.0/autograd.md similarity index 100% rename from docs/1.1.0/autograd.md rename to docs/1.2.0/autograd.md diff --git a/docs/1.1.0/bottleneck.md b/docs/1.2.0/bottleneck.md similarity index 100% rename from docs/1.1.0/bottleneck.md rename to docs/1.2.0/bottleneck.md diff --git a/docs/1.1.0/checkpoint.md b/docs/1.2.0/checkpoint.md similarity index 100% rename from docs/1.1.0/checkpoint.md rename to docs/1.2.0/checkpoint.md diff --git a/docs/1.1.0/cpp_extenstion.md b/docs/1.2.0/cpp_extenstion.md similarity index 100% rename from docs/1.1.0/cpp_extenstion.md rename to docs/1.2.0/cpp_extenstion.md diff --git a/docs/1.1.0/cuda.md b/docs/1.2.0/cuda.md similarity index 100% rename from docs/1.1.0/cuda.md rename to docs/1.2.0/cuda.md diff --git a/docs/1.1.0/data.md b/docs/1.2.0/data.md similarity index 100% rename from docs/1.1.0/data.md rename to docs/1.2.0/data.md diff --git a/docs/1.1.0/distributed.md b/docs/1.2.0/distributed.md similarity index 100% rename from docs/1.1.0/distributed.md rename to docs/1.2.0/distributed.md diff --git a/docs/1.1.0/distributions.md b/docs/1.2.0/distributions.md similarity index 100% rename from docs/1.1.0/distributions.md rename to docs/1.2.0/distributions.md diff --git a/docs/1.1.0/dlpack.md b/docs/1.2.0/dlpack.md similarity index 100% rename from docs/1.1.0/dlpack.md rename to docs/1.2.0/dlpack.md diff --git a/docs/1.1.0/ffi.md b/docs/1.2.0/ffi.md similarity index 100% rename from docs/1.1.0/ffi.md rename to docs/1.2.0/ffi.md diff --git a/docs/1.1.0/genindex.md b/docs/1.2.0/genindex.md similarity index 100% rename from docs/1.1.0/genindex.md rename to docs/1.2.0/genindex.md diff --git a/docs/1.1.0/hub.md b/docs/1.2.0/hub.md similarity index 100% rename from docs/1.1.0/hub.md rename to docs/1.2.0/hub.md diff --git a/docs/1.1.0/index.md b/docs/1.2.0/index.md similarity index 100% rename from docs/1.1.0/index.md rename to docs/1.2.0/index.md diff --git a/docs/1.1.0/jit.md b/docs/1.2.0/jit.md similarity index 100% rename from docs/1.1.0/jit.md rename to docs/1.2.0/jit.md diff --git a/docs/1.1.0/legacy.md b/docs/1.2.0/legacy.md similarity index 100% rename from docs/1.1.0/legacy.md rename to docs/1.2.0/legacy.md diff --git a/docs/1.1.0/model_zoo.md b/docs/1.2.0/model_zoo.md similarity index 100% rename from docs/1.1.0/model_zoo.md rename to docs/1.2.0/model_zoo.md diff --git a/docs/1.1.0/multiprocessing.md b/docs/1.2.0/multiprocessing.md similarity index 100% rename from docs/1.1.0/multiprocessing.md rename to docs/1.2.0/multiprocessing.md diff --git a/docs/1.1.0/nn.md b/docs/1.2.0/nn.md similarity index 100% rename from docs/1.1.0/nn.md rename to docs/1.2.0/nn.md diff --git a/docs/1.1.0/onnx.md b/docs/1.2.0/onnx.md similarity index 100% rename from docs/1.1.0/onnx.md rename to docs/1.2.0/onnx.md diff --git a/docs/1.1.0/optim.md b/docs/1.2.0/optim.md similarity index 100% rename from docs/1.1.0/optim.md rename to docs/1.2.0/optim.md diff --git a/docs/1.1.0/py-modindex.md b/docs/1.2.0/py-modindex.md similarity index 100% rename from docs/1.1.0/py-modindex.md rename to docs/1.2.0/py-modindex.md diff --git a/docs/1.1.0/search.md b/docs/1.2.0/search.md similarity index 100% rename from docs/1.1.0/search.md rename to docs/1.2.0/search.md diff --git a/docs/1.1.0/sparse.md b/docs/1.2.0/sparse.md similarity index 100% rename from docs/1.1.0/sparse.md rename to docs/1.2.0/sparse.md diff --git a/docs/1.1.0/storage.md b/docs/1.2.0/storage.md similarity index 100% rename from docs/1.1.0/storage.md rename to docs/1.2.0/storage.md diff --git a/docs/1.1.0/tensor_attributes.md b/docs/1.2.0/tensor_attributes.md similarity index 100% rename from docs/1.1.0/tensor_attributes.md rename to docs/1.2.0/tensor_attributes.md diff --git a/docs/1.1.0/tensors.md b/docs/1.2.0/tensors.md similarity index 100% rename from docs/1.1.0/tensors.md rename to docs/1.2.0/tensors.md diff --git a/docs/1.1.0/torch.md b/docs/1.2.0/torch.md similarity index 100% rename from docs/1.1.0/torch.md rename to docs/1.2.0/torch.md diff --git a/docs/1.1.0/type_info.md b/docs/1.2.0/type_info.md similarity index 100% rename from docs/1.1.0/type_info.md rename to docs/1.2.0/type_info.md From 95f3b68fa9a3ac27bb8e205f2ff56daffc79b4d5 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Tue, 30 Jul 2019 17:11:33 -0400 Subject: [PATCH 02/12] Update the version selector with v1.2.0 --- docs/versions.html | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/versions.html b/docs/versions.html index f3034d96e67a..e54cc351917b 100644 --- a/docs/versions.html +++ b/docs/versions.html @@ -21,7 +21,10 @@

PyTorch Documentation

master (unstable)
  • - v1.1.0 (latest stable release) + v1.2.0 (latest stable release) +
  • +
  • + v1.1.0
  • v1.0.1 From 2c2b09bf5df3e42b1146b9d20f512ac5e9c94751 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Thu, 1 Aug 2019 16:49:01 +0000 Subject: [PATCH 03/12] auto-generating sphinx docs --- docs/stable/.buildinfo | 4 + docs/stable/__config__.html | 543 + docs/stable/_images/CELU.png | Bin 0 -> 26184 bytes docs/stable/_images/ELU.png | Bin 0 -> 25942 bytes docs/stable/_images/Hardshrink.png | Bin 0 -> 29659 bytes docs/stable/_images/Hardtanh.png | Bin 0 -> 21614 bytes docs/stable/_images/LeakyReLU.png | Bin 0 -> 26523 bytes docs/stable/_images/LogSigmoid.png | Bin 0 -> 26980 bytes docs/stable/_images/PReLU.png | Bin 0 -> 26863 bytes docs/stable/_images/ReLU.png | Bin 0 -> 24843 bytes docs/stable/_images/ReLU6.png | Bin 0 -> 24611 bytes docs/stable/_images/SELU.png | Bin 0 -> 26599 bytes docs/stable/_images/Sigmoid.png | Bin 0 -> 22818 bytes docs/stable/_images/Softplus.png | Bin 0 -> 26500 bytes docs/stable/_images/Softshrink.png | Bin 0 -> 29832 bytes docs/stable/_images/Softsign.png | Bin 0 -> 23584 bytes docs/stable/_images/Tanh.png | Bin 0 -> 22229 bytes docs/stable/_images/Tanhshrink.png | Bin 0 -> 29240 bytes docs/stable/_images/add_histogram.png | Bin 0 -> 48135 bytes docs/stable/_images/add_image.png | Bin 0 -> 47119 bytes docs/stable/_images/add_images.png | Bin 0 -> 76694 bytes docs/stable/_images/add_scalar.png | Bin 0 -> 45941 bytes docs/stable/_images/add_scalars.png | Bin 0 -> 99156 bytes .../cpu_threading_torchscript_inference.svg | 681 + docs/stable/_images/hier_tags.png | Bin 0 -> 160926 bytes docs/stable/_modules/index.html | 667 + docs/stable/_modules/torch.html | 853 ++ docs/stable/_modules/torch/__config__.html | 531 + docs/stable/_modules/torch/_tensor_str.html | 827 ++ docs/stable/_modules/torch/_utils.html | 883 ++ docs/stable/_modules/torch/autograd.html | 690 ++ .../_modules/torch/autograd/anomaly_mode.html | 615 + .../_modules/torch/autograd/function.html | 906 ++ .../_modules/torch/autograd/grad_mode.html | 662 + .../_modules/torch/autograd/gradcheck.html | 920 ++ .../_modules/torch/autograd/profiler.html | 1345 ++ docs/stable/_modules/torch/cuda.html | 1196 ++ docs/stable/_modules/torch/cuda/comm.html | 681 + docs/stable/_modules/torch/cuda/nvtx.html | 591 + docs/stable/_modules/torch/cuda/random.html | 676 + docs/stable/_modules/torch/cuda/streams.html | 714 ++ docs/stable/_modules/torch/distributed.html | 531 + .../torch/distributed/distributed_c10d.html | 1958 +++ .../torch/distributions/bernoulli.html | 626 + .../_modules/torch/distributions/beta.html | 604 + .../torch/distributions/binomial.html | 653 + .../torch/distributions/categorical.html | 642 + .../_modules/torch/distributions/cauchy.html | 591 + .../_modules/torch/distributions/chi2.html | 544 + .../distributions/constraint_registry.html | 759 ++ .../torch/distributions/constraints.html | 879 ++ .../torch/distributions/dirichlet.html | 609 + .../torch/distributions/distribution.html | 781 ++ .../torch/distributions/exp_family.html | 574 + .../torch/distributions/exponential.html | 597 + .../torch/distributions/fishersnedecor.html | 597 + .../_modules/torch/distributions/gamma.html | 595 + .../torch/distributions/geometric.html | 610 + .../_modules/torch/distributions/gumbel.html | 582 + .../torch/distributions/half_cauchy.html | 577 + .../torch/distributions/half_normal.html | 577 + .../torch/distributions/independent.html | 612 + .../_modules/torch/distributions/kl.html | 1253 ++ .../_modules/torch/distributions/laplace.html | 598 + .../torch/distributions/log_normal.html | 568 + .../lowrank_multivariate_normal.html | 714 ++ .../torch/distributions/multinomial.html | 623 + .../distributions/multivariate_normal.html | 732 ++ .../distributions/negative_binomial.html | 612 + .../_modules/torch/distributions/normal.html | 610 + .../distributions/one_hot_categorical.html | 612 + .../_modules/torch/distributions/pareto.html | 567 + .../_modules/torch/distributions/poisson.html | 584 + .../distributions/relaxed_bernoulli.html | 651 + .../distributions/relaxed_categorical.html | 645 + .../torch/distributions/studentT.html | 605 + .../transformed_distribution.html | 670 + .../torch/distributions/transforms.html | 1212 ++ .../_modules/torch/distributions/uniform.html | 604 + .../_modules/torch/distributions/weibull.html | 576 + docs/stable/_modules/torch/functional.html | 1341 ++ docs/stable/_modules/torch/hub.html | 972 ++ docs/stable/_modules/torch/jit.html | 2644 ++++ .../_modules/torch/multiprocessing.html | 592 + .../_modules/torch/multiprocessing/spawn.html | 688 ++ docs/stable/_modules/torch/nn/functional.html | 3788 ++++++ docs/stable/_modules/torch/nn/init.html | 974 ++ .../_modules/torch/nn/modules/activation.html | 1566 +++ .../_modules/torch/nn/modules/adaptive.html | 789 ++ .../_modules/torch/nn/modules/batchnorm.html | 1019 ++ .../_modules/torch/nn/modules/container.html | 1084 ++ .../_modules/torch/nn/modules/conv.html | 1449 +++ .../_modules/torch/nn/modules/distance.html | 582 + .../_modules/torch/nn/modules/dropout.html | 700 ++ .../_modules/torch/nn/modules/fold.html | 736 ++ .../torch/nn/modules/instancenorm.html | 792 ++ .../_modules/torch/nn/modules/linear.html | 676 + .../_modules/torch/nn/modules/loss.html | 1813 +++ .../_modules/torch/nn/modules/module.html | 1673 +++ .../torch/nn/modules/normalization.html | 748 ++ .../_modules/torch/nn/modules/padding.html | 955 ++ .../torch/nn/modules/pixelshuffle.html | 560 + .../_modules/torch/nn/modules/pooling.html | 1574 +++ .../stable/_modules/torch/nn/modules/rnn.html | 1469 +++ .../_modules/torch/nn/modules/sparse.html | 848 ++ .../torch/nn/modules/transformer.html | 853 ++ .../_modules/torch/nn/modules/upsampling.html | 744 ++ .../torch/nn/parallel/data_parallel.html | 722 ++ .../torch/nn/parallel/distributed.html | 1044 ++ docs/stable/_modules/torch/nn/parameter.html | 558 + .../_modules/torch/nn/utils/clip_grad.html | 584 + .../torch/nn/utils/convert_parameters.html | 597 + docs/stable/_modules/torch/nn/utils/rnn.html | 938 ++ .../torch/nn/utils/spectral_norm.html | 802 ++ .../_modules/torch/nn/utils/weight_norm.html | 633 + docs/stable/_modules/torch/onnx.html | 698 ++ .../stable/_modules/torch/onnx/operators.html | 536 + .../stable/_modules/torch/optim/adadelta.html | 592 + docs/stable/_modules/torch/optim/adagrad.html | 610 + docs/stable/_modules/torch/optim/adam.html | 623 + docs/stable/_modules/torch/optim/adamax.html | 602 + docs/stable/_modules/torch/optim/adamw.html | 628 + docs/stable/_modules/torch/optim/asgd.html | 598 + docs/stable/_modules/torch/optim/lbfgs.html | 984 ++ .../_modules/torch/optim/lr_scheduler.html | 1275 ++ .../_modules/torch/optim/optimizer.html | 732 ++ docs/stable/_modules/torch/optim/rmsprop.html | 616 + docs/stable/_modules/torch/optim/rprop.html | 592 + docs/stable/_modules/torch/optim/sgd.html | 622 + .../_modules/torch/optim/sparse_adam.html | 618 + docs/stable/_modules/torch/quasirandom.html | 636 + docs/stable/_modules/torch/random.html | 635 + docs/stable/_modules/torch/serialization.html | 1098 ++ docs/stable/_modules/torch/sparse.html | 648 + docs/stable/_modules/torch/storage.html | 652 + docs/stable/_modules/torch/tensor.html | 1001 ++ .../_modules/torch/utils/checkpoint.html | 750 ++ .../_modules/torch/utils/cpp_extension.html | 1666 +++ .../torch/utils/data/_utils/worker.html | 713 ++ .../_modules/torch/utils/data/dataloader.html | 1437 +++ .../_modules/torch/utils/data/dataset.html | 788 ++ .../torch/utils/data/distributed.html | 581 + .../_modules/torch/utils/data/sampler.html | 726 ++ .../torch/utils/tensorboard/writer.html | 1452 +++ docs/stable/_modules/torchvision.html | 548 + .../_modules/torchvision/datasets/cifar.html | 688 ++ .../torchvision/datasets/cityscapes.html | 721 ++ .../_modules/torchvision/datasets/coco.html | 637 + .../torchvision/datasets/fakedata.html | 572 + .../_modules/torchvision/datasets/flickr.html | 668 + .../_modules/torchvision/datasets/folder.html | 724 ++ .../_modules/torchvision/datasets/hmdb51.html | 611 + .../torchvision/datasets/imagenet.html | 685 ++ .../torchvision/datasets/kinetics.html | 575 + .../_modules/torchvision/datasets/lsun.html | 672 + .../_modules/torchvision/datasets/mnist.html | 969 ++ .../torchvision/datasets/phototour.html | 723 ++ .../_modules/torchvision/datasets/sbd.html | 638 + .../_modules/torchvision/datasets/sbu.html | 622 + .../_modules/torchvision/datasets/stl10.html | 691 ++ .../_modules/torchvision/datasets/svhn.html | 628 + .../_modules/torchvision/datasets/ucf101.html | 603 + .../_modules/torchvision/datasets/usps.html | 599 + .../_modules/torchvision/datasets/voc.html | 746 ++ .../_modules/torchvision/models/alexnet.html | 579 + .../_modules/torchvision/models/densenet.html | 756 ++ .../models/detection/faster_rcnn.html | 852 ++ .../models/detection/keypoint_rcnn.html | 831 ++ .../models/detection/mask_rcnn.html | 831 ++ .../torchvision/models/googlenet.html | 747 ++ .../torchvision/models/inception.html | 868 ++ .../_modules/torchvision/models/mnasnet.html | 721 ++ .../torchvision/models/mobilenet.html | 676 + .../_modules/torchvision/models/resnet.html | 857 ++ .../models/segmentation/segmentation.html | 620 + .../torchvision/models/shufflenetv2.html | 715 ++ .../torchvision/models/squeezenet.html | 651 + .../_modules/torchvision/models/vgg.html | 697 ++ .../torchvision/transforms/functional.html | 1357 ++ .../torchvision/transforms/transforms.html | 1802 +++ docs/stable/_modules/torchvision/utils.html | 619 + docs/stable/_sources/__config__.rst.txt | 7 + docs/stable/_sources/autograd.rst.txt | 107 + docs/stable/_sources/bottleneck.rst.txt | 59 + docs/stable/_sources/checkpoint.rst.txt | 28 + .../community/contribution_guide.rst.txt | 357 + .../_sources/community/governance.rst.txt | 154 + .../community/persons_of_interest.rst.txt | 130 + docs/stable/_sources/cpp_extension.rst.txt | 12 + docs/stable/_sources/cuda.rst.txt | 59 + .../_sources/cuda_deterministic.rst.txt | 5 + .../cuda_deterministic_backward.rst.txt | 5 + .../_sources/cudnn_deterministic.rst.txt | 8 + .../_sources/cudnn_persistent_rnn.rst.txt | 9 + docs/stable/_sources/data.rst.txt | 413 + docs/stable/_sources/distributed.rst.txt | 422 + docs/stable/_sources/distributions.rst.txt | 342 + docs/stable/_sources/dlpack.rst.txt | 8 + docs/stable/_sources/hub.rst.txt | 137 + docs/stable/_sources/index.rst.txt | 70 + docs/stable/_sources/jit.rst.txt | 1169 ++ docs/stable/_sources/model_zoo.rst.txt | 7 + docs/stable/_sources/multiprocessing.rst.txt | 180 + docs/stable/_sources/nn.functional.rst.txt | 516 + docs/stable/_sources/nn.init.rst.txt | 21 + docs/stable/_sources/nn.rst.txt | 877 ++ docs/stable/_sources/notes/autograd.rst.txt | 117 + .../_sources/notes/broadcasting.rst.txt | 113 + ...pu_threading_torchscript_inference.rst.txt | 124 + docs/stable/_sources/notes/cuda.rst.txt | 304 + docs/stable/_sources/notes/extending.rst.txt | 209 + docs/stable/_sources/notes/faq.rst.txt | 150 + .../notes/large_scale_deployments.rst.txt | 136 + .../_sources/notes/multiprocessing.rst.txt | 136 + docs/stable/_sources/notes/randomness.rst.txt | 56 + .../_sources/notes/serialization.rst.txt | 34 + docs/stable/_sources/notes/windows.rst.txt | 290 + docs/stable/_sources/onnx.rst.txt | 695 ++ docs/stable/_sources/optim.rst.txt | 169 + docs/stable/_sources/sparse.rst.txt | 145 + docs/stable/_sources/storage.rst.txt | 12 + .../stable/_sources/tensor_attributes.rst.txt | 136 + docs/stable/_sources/tensorboard.rst.txt | 95 + docs/stable/_sources/tensors.rst.txt | 477 + docs/stable/_sources/torch.rst.txt | 358 + .../_sources/torchvision/datasets.rst.txt | 226 + .../stable/_sources/torchvision/index.rst.txt | 17 + .../_sources/torchvision/models.rst.txt | 397 + .../_sources/torchvision/transforms.rst.txt | 108 + .../stable/_sources/torchvision/utils.rst.txt | 9 + docs/stable/_sources/type_info.rst.txt | 55 + docs/stable/_static/basic.css | 763 ++ docs/stable/_static/css/theme.css | 8 + docs/stable/_static/doctools.js | 314 + docs/stable/_static/documentation_options.js | 10 + docs/stable/_static/file.png | Bin 0 -> 286 bytes .../FreightSans/freight-sans-bold-italic.woff | Bin 0 -> 39560 bytes .../freight-sans-bold-italic.woff2 | Bin 0 -> 31812 bytes .../fonts/FreightSans/freight-sans-bold.woff | Bin 0 -> 32396 bytes .../fonts/FreightSans/freight-sans-bold.woff2 | Bin 0 -> 25672 bytes .../FreightSans/freight-sans-book-italic.woff | Bin 0 -> 33944 bytes .../freight-sans-book-italic.woff2 | Bin 0 -> 26832 bytes .../fonts/FreightSans/freight-sans-book.woff | Bin 0 -> 31612 bytes .../fonts/FreightSans/freight-sans-book.woff2 | Bin 0 -> 25120 bytes .../freight-sans-light-italic.woff | Bin 0 -> 29304 bytes .../freight-sans-light-italic.woff2 | Bin 0 -> 22720 bytes .../fonts/FreightSans/freight-sans-light.woff | Bin 0 -> 26908 bytes .../FreightSans/freight-sans-light.woff2 | Bin 0 -> 21012 bytes .../freight-sans-medium-italic.woff | Bin 0 -> 19420 bytes .../freight-sans-medium-italic.woff2 | Bin 0 -> 16000 bytes .../FreightSans/freight-sans-medium.woff | Bin 0 -> 32072 bytes .../FreightSans/freight-sans-medium.woff2 | Bin 0 -> 25460 bytes .../fonts/IBMPlexMono/IBMPlexMono-Light.woff | Bin 0 -> 50680 bytes .../fonts/IBMPlexMono/IBMPlexMono-Light.woff2 | Bin 0 -> 35916 bytes .../fonts/IBMPlexMono/IBMPlexMono-Medium.woff | Bin 0 -> 51872 bytes .../IBMPlexMono/IBMPlexMono-Medium.woff2 | Bin 0 -> 36648 bytes .../IBMPlexMono/IBMPlexMono-Regular.woff | Bin 0 -> 50664 bytes .../IBMPlexMono/IBMPlexMono-Regular.woff2 | Bin 0 -> 35536 bytes .../IBMPlexMono/IBMPlexMono-SemiBold.woff | Bin 0 -> 52936 bytes .../IBMPlexMono/IBMPlexMono-SemiBold.woff2 | Bin 0 -> 37592 bytes .../_static/images/arrow-down-orange.svg | 19 + .../_static/images/arrow-right-with-tail.svg | 19 + .../_static/images/chevron-down-grey.svg | 18 + .../_static/images/chevron-right-orange.svg | 17 + .../_static/images/chevron-right-white.svg | 17 + .../_static/images/home-footer-background.jpg | Bin 0 -> 38907 bytes docs/stable/_static/images/icon-close.svg | 21 + .../_static/images/icon-menu-dots-dark.svg | 42 + docs/stable/_static/images/logo-dark.svg | 30 + .../_static/images/logo-facebook-dark.svg | 8 + docs/stable/_static/images/logo-icon.svg | 12 + .../_static/images/logo-twitter-dark.svg | 16 + docs/stable/_static/images/logo.svg | 31 + docs/stable/_static/images/pytorch-colab.svg | 24 + .../_static/images/pytorch-download.svg | 10 + docs/stable/_static/images/pytorch-github.svg | 15 + docs/stable/_static/images/pytorch-x.svg | 10 + docs/stable/_static/images/search-icon.svg | 19 + .../_static/images/view-page-source-icon.svg | 13 + .../stable/_static/img/aliastracker_graph.png | Bin 0 -> 5572 bytes docs/stable/_static/img/dynamic_graph.gif | Bin 0 -> 334931 bytes .../img/pytorch-logo-dark-unstable.png | Bin 0 -> 6940 bytes docs/stable/_static/img/pytorch-logo-dark.png | Bin 0 -> 15625 bytes docs/stable/_static/img/pytorch-logo-dark.svg | 33 + .../stable/_static/img/pytorch-logo-flame.png | Bin 0 -> 1010 bytes .../stable/_static/img/pytorch-logo-flame.svg | 33 + .../_static/img/tensor_illustration.png | Bin 0 -> 11230 bytes .../_static/img/tensorboard/add_histogram.png | Bin 0 -> 48135 bytes .../img/tensorboard/add_histogram_raw.png | Bin 0 -> 27930 bytes .../_static/img/tensorboard/add_image.png | Bin 0 -> 47119 bytes .../_static/img/tensorboard/add_images.png | Bin 0 -> 76694 bytes .../_static/img/tensorboard/add_scalar.png | Bin 0 -> 45941 bytes .../_static/img/tensorboard/add_scalars.png | Bin 0 -> 99156 bytes .../_static/img/tensorboard/hier_tags.png | Bin 0 -> 160926 bytes docs/stable/_static/jquery-3.2.1.js | 10253 ++++++++++++++++ docs/stable/_static/jquery.js | 4 + docs/stable/_static/js/modernizr.min.js | 4 + docs/stable/_static/js/theme.js | 1 + docs/stable/_static/js/vendor/anchor.min.js | 6 + .../stable/_static/js/vendor/bootstrap.min.js | 7 + docs/stable/_static/js/vendor/popper.min.js | 5 + docs/stable/_static/katex-math.css | 48 + docs/stable/_static/katex_autorenderer.js | 11 + docs/stable/_static/language_data.js | 297 + docs/stable/_static/minus.png | Bin 0 -> 90 bytes docs/stable/_static/plus.png | Bin 0 -> 90 bytes docs/stable/_static/pygments.css | 69 + .../_static/pytorch-logo-dark-unstable.png | Bin 0 -> 6940 bytes docs/stable/_static/pytorch-logo-dark.svg | 33 + docs/stable/_static/searchtools.js | 506 + docs/stable/_static/underscore-1.3.1.js | 999 ++ docs/stable/_static/underscore.js | 31 + docs/stable/autograd.html | 1362 ++ docs/stable/bottleneck.html | 583 + docs/stable/checkpoint.html | 637 + docs/stable/community/contribution_guide.html | 906 ++ docs/stable/community/governance.html | 684 ++ .../stable/community/persons_of_interest.html | 699 ++ docs/stable/cpp_extension.html | 770 ++ docs/stable/cuda.html | 1453 +++ docs/stable/cuda_deterministic.html | 521 + docs/stable/cuda_deterministic_backward.html | 521 + docs/stable/cudnn_deterministic.html | 524 + docs/stable/cudnn_persistent_rnn.html | 525 + docs/stable/data.html | 1304 ++ docs/stable/distributed.html | 1520 +++ docs/stable/distributions.html | 3791 ++++++ docs/stable/dlpack.html | 557 + docs/stable/genindex.html | 4675 +++++++ docs/stable/hub.html | 748 ++ docs/stable/index.html | 610 + docs/stable/jit.html | 2159 ++++ docs/stable/model_zoo.html | 560 + docs/stable/multiprocessing.html | 769 ++ docs/stable/nn.functional.html | 2986 +++++ docs/stable/nn.html | 9793 +++++++++++++++ docs/stable/nn.init.html | 882 ++ docs/stable/notes/autograd.html | 638 + docs/stable/notes/broadcasting.html | 642 + .../cpu_threading_torchscript_inference.html | 675 + docs/stable/notes/cuda.html | 793 ++ docs/stable/notes/extending.html | 734 ++ docs/stable/notes/faq.html | 663 + .../stable/notes/large_scale_deployments.html | 659 + docs/stable/notes/multiprocessing.html | 660 + docs/stable/notes/randomness.html | 585 + docs/stable/notes/serialization.html | 563 + docs/stable/notes/windows.html | 800 ++ docs/stable/objects.inv | Bin 0 -> 11478 bytes docs/stable/onnx.html | 1368 +++ docs/stable/optim.html | 1486 +++ docs/stable/py-modindex.html | 631 + docs/stable/search.html | 527 + docs/stable/searchindex.js | 1 + docs/stable/sparse.html | 912 ++ docs/stable/storage.html | 765 ++ docs/stable/tensor_attributes.html | 698 ++ docs/stable/tensorboard.html | 1101 ++ docs/stable/tensors.html | 3923 ++++++ docs/stable/torch.html | 9393 ++++++++++++++ docs/stable/torchvision/datasets.html | 1723 +++ docs/stable/torchvision/index.html | 598 + docs/stable/torchvision/models.html | 1779 +++ docs/stable/torchvision/transforms.html | 1687 +++ docs/stable/torchvision/utils.html | 571 + docs/stable/type_info.html | 623 + 366 files changed, 227217 insertions(+) create mode 100644 docs/stable/.buildinfo create mode 100644 docs/stable/__config__.html create mode 100644 docs/stable/_images/CELU.png create mode 100644 docs/stable/_images/ELU.png create mode 100644 docs/stable/_images/Hardshrink.png create mode 100644 docs/stable/_images/Hardtanh.png create mode 100644 docs/stable/_images/LeakyReLU.png create mode 100644 docs/stable/_images/LogSigmoid.png create mode 100644 docs/stable/_images/PReLU.png create mode 100644 docs/stable/_images/ReLU.png create mode 100644 docs/stable/_images/ReLU6.png create mode 100644 docs/stable/_images/SELU.png create mode 100644 docs/stable/_images/Sigmoid.png create mode 100644 docs/stable/_images/Softplus.png create mode 100644 docs/stable/_images/Softshrink.png create mode 100644 docs/stable/_images/Softsign.png create mode 100644 docs/stable/_images/Tanh.png create mode 100644 docs/stable/_images/Tanhshrink.png create mode 100644 docs/stable/_images/add_histogram.png create mode 100644 docs/stable/_images/add_image.png create mode 100644 docs/stable/_images/add_images.png create mode 100644 docs/stable/_images/add_scalar.png create mode 100644 docs/stable/_images/add_scalars.png create mode 100644 docs/stable/_images/cpu_threading_torchscript_inference.svg create mode 100644 docs/stable/_images/hier_tags.png create mode 100644 docs/stable/_modules/index.html create mode 100644 docs/stable/_modules/torch.html create mode 100644 docs/stable/_modules/torch/__config__.html create mode 100644 docs/stable/_modules/torch/_tensor_str.html create mode 100644 docs/stable/_modules/torch/_utils.html create mode 100644 docs/stable/_modules/torch/autograd.html create mode 100644 docs/stable/_modules/torch/autograd/anomaly_mode.html create mode 100644 docs/stable/_modules/torch/autograd/function.html create mode 100644 docs/stable/_modules/torch/autograd/grad_mode.html create mode 100644 docs/stable/_modules/torch/autograd/gradcheck.html create mode 100644 docs/stable/_modules/torch/autograd/profiler.html create mode 100644 docs/stable/_modules/torch/cuda.html create mode 100644 docs/stable/_modules/torch/cuda/comm.html create mode 100644 docs/stable/_modules/torch/cuda/nvtx.html create mode 100644 docs/stable/_modules/torch/cuda/random.html create mode 100644 docs/stable/_modules/torch/cuda/streams.html create mode 100644 docs/stable/_modules/torch/distributed.html create mode 100644 docs/stable/_modules/torch/distributed/distributed_c10d.html create mode 100644 docs/stable/_modules/torch/distributions/bernoulli.html create mode 100644 docs/stable/_modules/torch/distributions/beta.html create mode 100644 docs/stable/_modules/torch/distributions/binomial.html create mode 100644 docs/stable/_modules/torch/distributions/categorical.html create mode 100644 docs/stable/_modules/torch/distributions/cauchy.html create mode 100644 docs/stable/_modules/torch/distributions/chi2.html create mode 100644 docs/stable/_modules/torch/distributions/constraint_registry.html create mode 100644 docs/stable/_modules/torch/distributions/constraints.html create mode 100644 docs/stable/_modules/torch/distributions/dirichlet.html create mode 100644 docs/stable/_modules/torch/distributions/distribution.html create mode 100644 docs/stable/_modules/torch/distributions/exp_family.html create mode 100644 docs/stable/_modules/torch/distributions/exponential.html create mode 100644 docs/stable/_modules/torch/distributions/fishersnedecor.html create mode 100644 docs/stable/_modules/torch/distributions/gamma.html create mode 100644 docs/stable/_modules/torch/distributions/geometric.html create mode 100644 docs/stable/_modules/torch/distributions/gumbel.html create mode 100644 docs/stable/_modules/torch/distributions/half_cauchy.html create mode 100644 docs/stable/_modules/torch/distributions/half_normal.html create mode 100644 docs/stable/_modules/torch/distributions/independent.html create mode 100644 docs/stable/_modules/torch/distributions/kl.html create mode 100644 docs/stable/_modules/torch/distributions/laplace.html create mode 100644 docs/stable/_modules/torch/distributions/log_normal.html create mode 100644 docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html create mode 100644 docs/stable/_modules/torch/distributions/multinomial.html create mode 100644 docs/stable/_modules/torch/distributions/multivariate_normal.html create mode 100644 docs/stable/_modules/torch/distributions/negative_binomial.html create mode 100644 docs/stable/_modules/torch/distributions/normal.html create mode 100644 docs/stable/_modules/torch/distributions/one_hot_categorical.html create mode 100644 docs/stable/_modules/torch/distributions/pareto.html create mode 100644 docs/stable/_modules/torch/distributions/poisson.html create mode 100644 docs/stable/_modules/torch/distributions/relaxed_bernoulli.html create mode 100644 docs/stable/_modules/torch/distributions/relaxed_categorical.html create mode 100644 docs/stable/_modules/torch/distributions/studentT.html create mode 100644 docs/stable/_modules/torch/distributions/transformed_distribution.html create mode 100644 docs/stable/_modules/torch/distributions/transforms.html create mode 100644 docs/stable/_modules/torch/distributions/uniform.html create mode 100644 docs/stable/_modules/torch/distributions/weibull.html create mode 100644 docs/stable/_modules/torch/functional.html create mode 100644 docs/stable/_modules/torch/hub.html create mode 100644 docs/stable/_modules/torch/jit.html create mode 100644 docs/stable/_modules/torch/multiprocessing.html create mode 100644 docs/stable/_modules/torch/multiprocessing/spawn.html create mode 100644 docs/stable/_modules/torch/nn/functional.html create mode 100644 docs/stable/_modules/torch/nn/init.html create mode 100644 docs/stable/_modules/torch/nn/modules/activation.html create mode 100644 docs/stable/_modules/torch/nn/modules/adaptive.html create mode 100644 docs/stable/_modules/torch/nn/modules/batchnorm.html create mode 100644 docs/stable/_modules/torch/nn/modules/container.html create mode 100644 docs/stable/_modules/torch/nn/modules/conv.html create mode 100644 docs/stable/_modules/torch/nn/modules/distance.html create mode 100644 docs/stable/_modules/torch/nn/modules/dropout.html create mode 100644 docs/stable/_modules/torch/nn/modules/fold.html create mode 100644 docs/stable/_modules/torch/nn/modules/instancenorm.html create mode 100644 docs/stable/_modules/torch/nn/modules/linear.html create mode 100644 docs/stable/_modules/torch/nn/modules/loss.html create mode 100644 docs/stable/_modules/torch/nn/modules/module.html create mode 100644 docs/stable/_modules/torch/nn/modules/normalization.html create mode 100644 docs/stable/_modules/torch/nn/modules/padding.html create mode 100644 docs/stable/_modules/torch/nn/modules/pixelshuffle.html create mode 100644 docs/stable/_modules/torch/nn/modules/pooling.html create mode 100644 docs/stable/_modules/torch/nn/modules/rnn.html create mode 100644 docs/stable/_modules/torch/nn/modules/sparse.html create mode 100644 docs/stable/_modules/torch/nn/modules/transformer.html create mode 100644 docs/stable/_modules/torch/nn/modules/upsampling.html create mode 100644 docs/stable/_modules/torch/nn/parallel/data_parallel.html create mode 100644 docs/stable/_modules/torch/nn/parallel/distributed.html create mode 100644 docs/stable/_modules/torch/nn/parameter.html create mode 100644 docs/stable/_modules/torch/nn/utils/clip_grad.html create mode 100644 docs/stable/_modules/torch/nn/utils/convert_parameters.html create mode 100644 docs/stable/_modules/torch/nn/utils/rnn.html create mode 100644 docs/stable/_modules/torch/nn/utils/spectral_norm.html create mode 100644 docs/stable/_modules/torch/nn/utils/weight_norm.html create mode 100644 docs/stable/_modules/torch/onnx.html create mode 100644 docs/stable/_modules/torch/onnx/operators.html create mode 100644 docs/stable/_modules/torch/optim/adadelta.html create mode 100644 docs/stable/_modules/torch/optim/adagrad.html create mode 100644 docs/stable/_modules/torch/optim/adam.html create mode 100644 docs/stable/_modules/torch/optim/adamax.html create mode 100644 docs/stable/_modules/torch/optim/adamw.html create mode 100644 docs/stable/_modules/torch/optim/asgd.html create mode 100644 docs/stable/_modules/torch/optim/lbfgs.html create mode 100644 docs/stable/_modules/torch/optim/lr_scheduler.html create mode 100644 docs/stable/_modules/torch/optim/optimizer.html create mode 100644 docs/stable/_modules/torch/optim/rmsprop.html create mode 100644 docs/stable/_modules/torch/optim/rprop.html create mode 100644 docs/stable/_modules/torch/optim/sgd.html create mode 100644 docs/stable/_modules/torch/optim/sparse_adam.html create mode 100644 docs/stable/_modules/torch/quasirandom.html create mode 100644 docs/stable/_modules/torch/random.html create mode 100644 docs/stable/_modules/torch/serialization.html create mode 100644 docs/stable/_modules/torch/sparse.html create mode 100644 docs/stable/_modules/torch/storage.html create mode 100644 docs/stable/_modules/torch/tensor.html create mode 100644 docs/stable/_modules/torch/utils/checkpoint.html create mode 100644 docs/stable/_modules/torch/utils/cpp_extension.html create mode 100644 docs/stable/_modules/torch/utils/data/_utils/worker.html create mode 100644 docs/stable/_modules/torch/utils/data/dataloader.html create mode 100644 docs/stable/_modules/torch/utils/data/dataset.html create mode 100644 docs/stable/_modules/torch/utils/data/distributed.html create mode 100644 docs/stable/_modules/torch/utils/data/sampler.html create mode 100644 docs/stable/_modules/torch/utils/tensorboard/writer.html create mode 100644 docs/stable/_modules/torchvision.html create mode 100644 docs/stable/_modules/torchvision/datasets/cifar.html create mode 100644 docs/stable/_modules/torchvision/datasets/cityscapes.html create mode 100644 docs/stable/_modules/torchvision/datasets/coco.html create mode 100644 docs/stable/_modules/torchvision/datasets/fakedata.html create mode 100644 docs/stable/_modules/torchvision/datasets/flickr.html create mode 100644 docs/stable/_modules/torchvision/datasets/folder.html create mode 100644 docs/stable/_modules/torchvision/datasets/hmdb51.html create mode 100644 docs/stable/_modules/torchvision/datasets/imagenet.html create mode 100644 docs/stable/_modules/torchvision/datasets/kinetics.html create mode 100644 docs/stable/_modules/torchvision/datasets/lsun.html create mode 100644 docs/stable/_modules/torchvision/datasets/mnist.html create mode 100644 docs/stable/_modules/torchvision/datasets/phototour.html create mode 100644 docs/stable/_modules/torchvision/datasets/sbd.html create mode 100644 docs/stable/_modules/torchvision/datasets/sbu.html create mode 100644 docs/stable/_modules/torchvision/datasets/stl10.html create mode 100644 docs/stable/_modules/torchvision/datasets/svhn.html create mode 100644 docs/stable/_modules/torchvision/datasets/ucf101.html create mode 100644 docs/stable/_modules/torchvision/datasets/usps.html create mode 100644 docs/stable/_modules/torchvision/datasets/voc.html create mode 100644 docs/stable/_modules/torchvision/models/alexnet.html create mode 100644 docs/stable/_modules/torchvision/models/densenet.html create mode 100644 docs/stable/_modules/torchvision/models/detection/faster_rcnn.html create mode 100644 docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html create mode 100644 docs/stable/_modules/torchvision/models/detection/mask_rcnn.html create mode 100644 docs/stable/_modules/torchvision/models/googlenet.html create mode 100644 docs/stable/_modules/torchvision/models/inception.html create mode 100644 docs/stable/_modules/torchvision/models/mnasnet.html create mode 100644 docs/stable/_modules/torchvision/models/mobilenet.html create mode 100644 docs/stable/_modules/torchvision/models/resnet.html create mode 100644 docs/stable/_modules/torchvision/models/segmentation/segmentation.html create mode 100644 docs/stable/_modules/torchvision/models/shufflenetv2.html create mode 100644 docs/stable/_modules/torchvision/models/squeezenet.html create mode 100644 docs/stable/_modules/torchvision/models/vgg.html create mode 100644 docs/stable/_modules/torchvision/transforms/functional.html create mode 100644 docs/stable/_modules/torchvision/transforms/transforms.html create mode 100644 docs/stable/_modules/torchvision/utils.html create mode 100644 docs/stable/_sources/__config__.rst.txt create mode 100644 docs/stable/_sources/autograd.rst.txt create mode 100644 docs/stable/_sources/bottleneck.rst.txt create mode 100644 docs/stable/_sources/checkpoint.rst.txt create mode 100644 docs/stable/_sources/community/contribution_guide.rst.txt create mode 100644 docs/stable/_sources/community/governance.rst.txt create mode 100644 docs/stable/_sources/community/persons_of_interest.rst.txt create mode 100644 docs/stable/_sources/cpp_extension.rst.txt create mode 100644 docs/stable/_sources/cuda.rst.txt create mode 100644 docs/stable/_sources/cuda_deterministic.rst.txt create mode 100644 docs/stable/_sources/cuda_deterministic_backward.rst.txt create mode 100644 docs/stable/_sources/cudnn_deterministic.rst.txt create mode 100644 docs/stable/_sources/cudnn_persistent_rnn.rst.txt create mode 100644 docs/stable/_sources/data.rst.txt create mode 100644 docs/stable/_sources/distributed.rst.txt create mode 100644 docs/stable/_sources/distributions.rst.txt create mode 100644 docs/stable/_sources/dlpack.rst.txt create mode 100644 docs/stable/_sources/hub.rst.txt create mode 100644 docs/stable/_sources/index.rst.txt create mode 100644 docs/stable/_sources/jit.rst.txt create mode 100644 docs/stable/_sources/model_zoo.rst.txt create mode 100644 docs/stable/_sources/multiprocessing.rst.txt create mode 100644 docs/stable/_sources/nn.functional.rst.txt create mode 100644 docs/stable/_sources/nn.init.rst.txt create mode 100644 docs/stable/_sources/nn.rst.txt create mode 100644 docs/stable/_sources/notes/autograd.rst.txt create mode 100644 docs/stable/_sources/notes/broadcasting.rst.txt create mode 100644 docs/stable/_sources/notes/cpu_threading_torchscript_inference.rst.txt create mode 100644 docs/stable/_sources/notes/cuda.rst.txt create mode 100644 docs/stable/_sources/notes/extending.rst.txt create mode 100644 docs/stable/_sources/notes/faq.rst.txt create mode 100644 docs/stable/_sources/notes/large_scale_deployments.rst.txt create mode 100644 docs/stable/_sources/notes/multiprocessing.rst.txt create mode 100644 docs/stable/_sources/notes/randomness.rst.txt create mode 100644 docs/stable/_sources/notes/serialization.rst.txt create mode 100644 docs/stable/_sources/notes/windows.rst.txt create mode 100644 docs/stable/_sources/onnx.rst.txt create mode 100644 docs/stable/_sources/optim.rst.txt create mode 100644 docs/stable/_sources/sparse.rst.txt create mode 100644 docs/stable/_sources/storage.rst.txt create mode 100644 docs/stable/_sources/tensor_attributes.rst.txt create mode 100644 docs/stable/_sources/tensorboard.rst.txt create mode 100644 docs/stable/_sources/tensors.rst.txt create mode 100644 docs/stable/_sources/torch.rst.txt create mode 100644 docs/stable/_sources/torchvision/datasets.rst.txt create mode 100644 docs/stable/_sources/torchvision/index.rst.txt create mode 100644 docs/stable/_sources/torchvision/models.rst.txt create mode 100644 docs/stable/_sources/torchvision/transforms.rst.txt create mode 100644 docs/stable/_sources/torchvision/utils.rst.txt create mode 100644 docs/stable/_sources/type_info.rst.txt create mode 100644 docs/stable/_static/basic.css create mode 100644 docs/stable/_static/css/theme.css create mode 100644 docs/stable/_static/doctools.js create mode 100644 docs/stable/_static/documentation_options.js create mode 100644 docs/stable/_static/file.png create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-bold.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-book.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-book.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-light.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-light.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff create mode 100644 docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff2 create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2 create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2 create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff create mode 100644 docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 create mode 100644 docs/stable/_static/images/arrow-down-orange.svg create mode 100644 docs/stable/_static/images/arrow-right-with-tail.svg create mode 100644 docs/stable/_static/images/chevron-down-grey.svg create mode 100644 docs/stable/_static/images/chevron-right-orange.svg create mode 100644 docs/stable/_static/images/chevron-right-white.svg create mode 100644 docs/stable/_static/images/home-footer-background.jpg create mode 100644 docs/stable/_static/images/icon-close.svg create mode 100644 docs/stable/_static/images/icon-menu-dots-dark.svg create mode 100644 docs/stable/_static/images/logo-dark.svg create mode 100644 docs/stable/_static/images/logo-facebook-dark.svg create mode 100644 docs/stable/_static/images/logo-icon.svg create mode 100644 docs/stable/_static/images/logo-twitter-dark.svg create mode 100644 docs/stable/_static/images/logo.svg create mode 100644 docs/stable/_static/images/pytorch-colab.svg create mode 100644 docs/stable/_static/images/pytorch-download.svg create mode 100644 docs/stable/_static/images/pytorch-github.svg create mode 100644 docs/stable/_static/images/pytorch-x.svg create mode 100644 docs/stable/_static/images/search-icon.svg create mode 100644 docs/stable/_static/images/view-page-source-icon.svg create mode 100644 docs/stable/_static/img/aliastracker_graph.png create mode 100644 docs/stable/_static/img/dynamic_graph.gif create mode 100644 docs/stable/_static/img/pytorch-logo-dark-unstable.png create mode 100644 docs/stable/_static/img/pytorch-logo-dark.png create mode 100644 docs/stable/_static/img/pytorch-logo-dark.svg create mode 100644 docs/stable/_static/img/pytorch-logo-flame.png create mode 100644 docs/stable/_static/img/pytorch-logo-flame.svg create mode 100644 docs/stable/_static/img/tensor_illustration.png create mode 100644 docs/stable/_static/img/tensorboard/add_histogram.png create mode 100644 docs/stable/_static/img/tensorboard/add_histogram_raw.png create mode 100644 docs/stable/_static/img/tensorboard/add_image.png create mode 100644 docs/stable/_static/img/tensorboard/add_images.png create mode 100644 docs/stable/_static/img/tensorboard/add_scalar.png create mode 100644 docs/stable/_static/img/tensorboard/add_scalars.png create mode 100644 docs/stable/_static/img/tensorboard/hier_tags.png create mode 100644 docs/stable/_static/jquery-3.2.1.js create mode 100644 docs/stable/_static/jquery.js create mode 100644 docs/stable/_static/js/modernizr.min.js create mode 100644 docs/stable/_static/js/theme.js create mode 100644 docs/stable/_static/js/vendor/anchor.min.js create mode 100644 docs/stable/_static/js/vendor/bootstrap.min.js create mode 100644 docs/stable/_static/js/vendor/popper.min.js create mode 100644 docs/stable/_static/katex-math.css create mode 100644 docs/stable/_static/katex_autorenderer.js create mode 100644 docs/stable/_static/language_data.js create mode 100644 docs/stable/_static/minus.png create mode 100644 docs/stable/_static/plus.png create mode 100644 docs/stable/_static/pygments.css create mode 100644 docs/stable/_static/pytorch-logo-dark-unstable.png create mode 100644 docs/stable/_static/pytorch-logo-dark.svg create mode 100644 docs/stable/_static/searchtools.js create mode 100644 docs/stable/_static/underscore-1.3.1.js create mode 100644 docs/stable/_static/underscore.js create mode 100644 docs/stable/autograd.html create mode 100644 docs/stable/bottleneck.html create mode 100644 docs/stable/checkpoint.html create mode 100644 docs/stable/community/contribution_guide.html create mode 100644 docs/stable/community/governance.html create mode 100644 docs/stable/community/persons_of_interest.html create mode 100644 docs/stable/cpp_extension.html create mode 100644 docs/stable/cuda.html create mode 100644 docs/stable/cuda_deterministic.html create mode 100644 docs/stable/cuda_deterministic_backward.html create mode 100644 docs/stable/cudnn_deterministic.html create mode 100644 docs/stable/cudnn_persistent_rnn.html create mode 100644 docs/stable/data.html create mode 100644 docs/stable/distributed.html create mode 100644 docs/stable/distributions.html create mode 100644 docs/stable/dlpack.html create mode 100644 docs/stable/genindex.html create mode 100644 docs/stable/hub.html create mode 100644 docs/stable/index.html create mode 100644 docs/stable/jit.html create mode 100644 docs/stable/model_zoo.html create mode 100644 docs/stable/multiprocessing.html create mode 100644 docs/stable/nn.functional.html create mode 100644 docs/stable/nn.html create mode 100644 docs/stable/nn.init.html create mode 100644 docs/stable/notes/autograd.html create mode 100644 docs/stable/notes/broadcasting.html create mode 100644 docs/stable/notes/cpu_threading_torchscript_inference.html create mode 100644 docs/stable/notes/cuda.html create mode 100644 docs/stable/notes/extending.html create mode 100644 docs/stable/notes/faq.html create mode 100644 docs/stable/notes/large_scale_deployments.html create mode 100644 docs/stable/notes/multiprocessing.html create mode 100644 docs/stable/notes/randomness.html create mode 100644 docs/stable/notes/serialization.html create mode 100644 docs/stable/notes/windows.html create mode 100644 docs/stable/objects.inv create mode 100644 docs/stable/onnx.html create mode 100644 docs/stable/optim.html create mode 100644 docs/stable/py-modindex.html create mode 100644 docs/stable/search.html create mode 100644 docs/stable/searchindex.js create mode 100644 docs/stable/sparse.html create mode 100644 docs/stable/storage.html create mode 100644 docs/stable/tensor_attributes.html create mode 100644 docs/stable/tensorboard.html create mode 100644 docs/stable/tensors.html create mode 100644 docs/stable/torch.html create mode 100644 docs/stable/torchvision/datasets.html create mode 100644 docs/stable/torchvision/index.html create mode 100644 docs/stable/torchvision/models.html create mode 100644 docs/stable/torchvision/transforms.html create mode 100644 docs/stable/torchvision/utils.html create mode 100644 docs/stable/type_info.html diff --git a/docs/stable/.buildinfo b/docs/stable/.buildinfo new file mode 100644 index 000000000000..2259a36b125f --- /dev/null +++ b/docs/stable/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 151805b264b841595033b97d1a80e49c +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/stable/__config__.html b/docs/stable/__config__.html new file mode 100644 index 000000000000..132b82bdc08b --- /dev/null +++ b/docs/stable/__config__.html @@ -0,0 +1,543 @@ + + + + + + + + + + + + torch.__config__ — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.__config__

    +
    +
    +torch.__config__.show()[source]
    +

    Return a human-readable string with descriptions of the +configuration of PyTorch.

    +
    + +
    +
    +torch.__config__.parallel_info()[source]
    +

    Returns detailed string with parallelization settings

    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_images/CELU.png b/docs/stable/_images/CELU.png new file mode 100644 index 0000000000000000000000000000000000000000..a9cd9d58074fbd83736d31e2ace7895a869a6d1a GIT binary patch literal 26184 zcmd@6WmJ`0_XiALl$4ax9SVqqAkrmifXJb{LAtw6kxpr(OS)sz-TABy zdXDG5@Bf!~ykk5c9>+LG1oqz7HP>2m{$kGS)$?akSQw-j5Cma8ekA@E1R;7u5W*DN zHSnDWJ!5m=FJwE>#|miRk2{({5coH`^&?d~2*S~Y|A&wvoNfZX$zv~}VlQuHWbdSF zYY18B+FP4j*_)f_Q9Byi+L>5c-e+ZJWoM>-VQ+8E$Hw;0&snW(joEOGh)5ua8hR}L zP{BE2b=*ZmVg4L-vx^t0J|F8tkS}e>8%)+0te*|t-m48(mN{A}9lSfxlBrlLHY-_G zR?4*~+573x`qiVNSS9WqAuU>PkT+un^H;`u_w14;sTzO$Aj=5}{zZm<0d$L&mUf$i8F~f(!F5a#@K0K{dc@b@pDk_q z{~!2q%Nk>_Z!@Kvbc|thQ`1aZyk)Q9@MwuubB0_hE-^8hgxu>lZ&IG}Ka=VT4azW7 zfo_mUJ34Y1b;S7}9y+Tq6RPAWg;ENVK8<0OaBwKHs~#FsVGN$xo*XLBPuvmQAJTu0 zkJ8xK*lSpQv=9%zt~uQfX=`SXj`bcI8sb{ff9igI1S=9Id-94p|$ae8`cJsZZY zSL=IrYySnaaN&=Q@bx3Wxa-Y zDw>*DRLT1%J4J?Vgv=`W5cjzKO;Itic!78C-=iz89d2|E4-dDrwbj>LoLQaj<$0}b z&L_7Ak>JU-f!)Iql#oC!9nXcu&dz?gKWc?XLbBv;zP~y=RANOXbh27V-?cPR#ecq= zF6>}0?_gY@Vy|ADoSe+sCg{4$2yxym4tF3SGBJeHIw&n zW1@=PYC?cQ?WZl3$1Yw|1XV;v2IJw;(MRPxE$7p%=4or5xQ>&ZE@Az#QX4c(OfelD z>T6h}exIMlAVQYqOZ4C-Z$NS6BbMEVh$)+MXoADLR9>nvi#Y zzrvV(pG;@{%6Pd$Jxj@$@0&L_V0M<L$TTU4oEzX(SI3%k6z?YJ`t>mtLY$xp(K2;Pt$prWB!lc{mp(i%UfnA(tvybja)A;K16o zhI#$EUrmjdI#-^=g!_&RG78FcC;x`{^XItLXZz{`+bwh%85yV(;l9 z($dkvBW}O)6RW~$qbWVvGuUNkLHFkuRYpd}+5jxFO|Zzxw(n<~Bc#ClOn4k<8b|i) ztc?^sl9q1pMnFnv8?W&awyZuf0`Jk`@<`lv-IyE-QS-X<0gb?euh~t5>Kq?g8d_g;R5stkA16JDA?#=Jeu7(aIgWNYx@! z0;aon+0A}3gYbg;A&sJ!sXISAnO<4J{80V8db17_d>JxoS%FnhP|(uZ*~F0S?#qzm zT=yCTm*j@{r;6=uTLh8QX&kQ?TXuSSddN`B1tE*tT*5|=6cqs^E+-ecTQuQ3i-jO! zK3P3JKMR6G1kOXcg}kDoVxdW)QO6Ag1ca2dG)XiXzi~?<8yg#1dU{E6e#bAZm2qjj zUP*#(#jnp^AyQL%?8?D5`qE{I@bK{JEKB&)2?z->6crom{4r&Rs|T``F`=12Do^WY zZVT^?k)g<_s6O-a*jQqAy9H_3hSQi$AVke&F*28+q^f#nW@bj*-296*54k8BP4&q- zKZ3~a%Ao9!ITx%Fq$_agYHD$9)m$%}&&wTF;-qU9Q$iXfh&21n&)?rS=2A80<>f{6 zUK}ecWMpA+cy_Uv8i5RLwK630q=%8*mqbQI4XLcGq|Y06J|kwQqc!EFM5O2N4i3hE zM69hhNd&FtS|W$?b;Sz3z#Umd7$qsqR)t4LM>DdrHu)221!ZM1tqvEWLizg5*mHAp zQ{nvUFC2x`XX`7C^_KfHc|eBXvK(7X*aw^7VIQY-BHw`FiK(gSa}ABqolbrn=ygKE zSeBs|$e+aQddRG$Q;4u{A3jizIgVPibaY5aN=il@S%Iw>+hGLTAkuQC@je#&M4=1b z8{~>8)|!kAG~v@tB+IgS!d-ABt#;}ao?|R@v{toZwc^)wiqsJLiDZmv1q8@Jws3Ld z%zXdO{V+W}efDD@WnIan=L)ka+1r>HqztMKj9M3!VwO+}enJ`=8kGu17S@VY4rnjX zE9%V~>rP%659}@rOQSCeX1-AeDeOaV)TmS1_V)HY+qo8_zLUKbn0uv-wk&HFX>fsl zb9j;Ik@2#9n-$~N5!od2Hv&Gm(Cd61}i6Z z?xSq8!ddT@q;+aWeBDA#4@#M`mJ5n{iDlyvEP3){n`P+-Ya^qy9cy)cw}W-THOh}5 zJ*Is6guF7C*Br~PpU`J#Ya7=-2(A;V&DE@UR(7y0jl=yck&nQtYL5^UO0w07Dl*X^ z`arJ;3jP4FhSx9&nQQ4H1ZEPFqN5#@+QMJGdT3;1RJeo{SUGy4*P*Mc%Zd}WFh5_k zyf-sDi!k*~TXO6U*p%TgP_Y}T4kkisYenGot1*&JDo!vdF>$R@`iVOK!I&L5Yd*@_ z9t2$RIj#Cso`*Ep6nuW!dDX{n^Lp|J)3&tYMn=@&L6cb^B54(B<#uIncLJa$AxYTZ{(esF(1i zC-T@Jg5r#!r?1~r<8?t;R8!=-x9l(Iw*Mg`qlMQ3)|q&?)l8+n=L%BfcM!A9)iWZ-(g4+W?~DqY_A~nV`|B>WzmMg2Vg>1Z)T(Cu%hX?r1Z>rZw7F#qq_=M0 zo?eKzk~A>51pvUy-CcY4y#ZxtIpi-0{*v7CWu{nZdH@z=%H6fKwWZ@Y(anB-`3^2C z-+60>K0+M;DU$ow;F*1SdAYmF&FOT*>%y!!@axuBD)-HYaF|hDU0u2mXOFRHyVwTJr*i2^7#$L#aS00oW=%ej8-`1 zm4cEa0n+Kc_zqeg9u_tP=;IvlI?^sS$ zs%qFY62LzN`P#MMhcB@M2|a?hVnZSPTfo!az(pnl9UjcQ9waDR1^#t)P` z!a;4XDl{xC0>Ti9P-1pb_v58>nT1Hrrq9#c%vP0K0V~gCcid_GoWUiFySrDyZ~xAn z$MB`?ZL1a7X}{l*w2^gD;kagNWLTggo-E|iP>&;Qz6eKk_%zN+EZ}%Wa!-3Y{r^HO zDsQ%^c5`B(prD{(V++lmyt@J3Ovn#6=-rQoh*0ILd=+*?cZG zEFo#w{hgXOO7f@FEG4mYNL#>Ok(^%p?c6E(fh053C0ehmojqV0gm{mU5ltyO+ROe~ z9sG6Vg^~JPw(XM7BFwYQC{#8d^ZhjMLdD1I+!#e~OT+OIHbtn-mYvi?cqA?^PH8;Z z^Q8TJ1KWTy(=P=o(T?*ji`fr|cq}0yku%ZJM}aAV^D&DFfQ-t0gP-!c@HY36Je=c-ndY%oyyWkF7wHrVHOZ7oKPR0Fd#Iyf|bA~G^qrD7$&BX|{wwdgQhM6(ju zY)?d=ZlT$t>0W)WiI}yPJB&*+JYDxP43EYT-n6 zBlk-#+OqbE=AA%_dt`TV(x z8tR60ezp9rW5*HdWPVpsBCbcq#%e_v#QkEPBNA>hc&V>!z)#Z{J424ix*ZKoxe-@9B`xGmnOL z^+(Q;Px-va)P#j7r{+OceXzmLaUvPV7q~ zSU>X7f8r$yH8?ONf4L}9tb2GdR6h|$gi619DGS)0>G&ZZE{|~>fT|1V`E}!FAN|-u zQ&j9|#d`C5C!>Bbml`p5)aXR>KM`c)MZs=jq@Q8a8;wi_g>Z-I{(^+Z_L9JGaAQN9B1MM zr4XQ5R7t%aD%3jDX`0F6Cb?QlU%IY8js@6DiLCM3wolnThw0r5VT|oUOy>`5AaKWZ zGkJ;DIPkk&lI%m)I;J7(I=5mYj}y^wriKTkA)}ao?rF!BMxptZykxr5$!X@KN&@m0 zz9ljJYULVjOT_$RH~l(rvihezN3?xi%p9j4`D1wsF%h2A;K>I_?3}h@&kUq`#Y>N}!CQxRfrQ(LcIq1|({;7uXRqd+o&j$pPl;DB>FfF3=Bm2IIH6Ic>YNdU~_R95=@T}sBeL{ zJ@t@*Z{nmxj@ZWJXO&!g5yI!h%9;)Ya9shENJ=4gDW0A~gp1Ox=o zvXcq-bu(pr5G&C>PEMin+$<)13EtTy2Ar4}8Pw$(%|wB51CEDiJ#!IqVq{(Ru2KiB zilgd%49df$ZpkC2f(Uz5Dq*8b>mH_Ud;N!1Yj-?Mv-b@AYTP8p_eqUs?|ATKcQHsb z_U0Yv9XXv)BU?Wc$9*R&E1KMbt9dfFR#^;<0UoL4z9lgR0F3jL4{E;0iDP>rKao)( z$$jHa`=bPj5JKJWz9=1O>1k=SASpw*l$7Cs3(dh`simbuys`j@q@<+021t*Y9D$mi z{@V2P^iY)>H=qy+eK&91002Mo-MgQ29sM<4UVtNf`t*s@q#j@_-NIoY`pgI%G#GHu z+_{4eI7`cqAGV!l1uCy&V}pP=FgHKXn+^cWty{M~N)B8`S$?J5kZ@7ZyG=8S6azTOS=Z(vBQ4z&Ex)eE15Q2 zNxkXCYO-O4TW^Jm90jA5_%}_?YE+>()lIV8OsTm5dhH!Jb_w=gDu4VRBlBUwu^JmY-13fNNHE zwwS4D)_pSBMR~KqJ{(nvQZr4`C$jyt-R5<5W+yaCcN~pUjgx=;_<>7ExG?Zi1gvG~ zt5@F%lr`Wedw_SXkx09t=6sKhoXm3^Px0B z8#g}LwRtvoS+P+fbGYs<8dZ27tdEC)HBMBscLXRPBf~r9=T3UVKe>D&N#>#IgS4vc zsKUdOJ{Q5Q@4}Ka)lXj!8tZdvsYB;G$rlX(Vfq1*e**-oK1(rs^T*vh9e`L}xpNdb zWyF+}2xZM3{-`RIyzy2Cl%&NX*mZc9j(m;+Y3*!q6Fs}a5a94O06}rAk=}T}wfI7|Y zLnlkB{MHB}E(GUM=r`#X$FGCJUGi32&3{yRL*$7k2{e@^R}d?l%XaGS%z&S|vy9js z#TZ<7x9jP>f8YzZ;f~_5?pUEE1C!}$zMA`H``q_Zrh<5TPpcnI6wYgmQ^mFLNY7cA zJEA5X>)}isvJ*kC@hO5qeWWgz_pZj`M>;*-pLs}~B#GuTGou%tYx_qO1O(%eZY!3a zH|BKCZ2ZKW-CX~&qpoo3;>xuifbt;_;0PBN7jn({Kk}T5Emv|nf!^s?nMeEz;oXpS zC6^sBCHW+kQS!U!8%YUa{4>1X;F4}B%EM}JCa*P(3J*-iS*S=8zmuWz&3NvWs4i)I zRS(PF%DW>LUc`rn)*YWLouAEUmF@)FO_|LghyxPZEyfs(m_6{LN0If9h%gxL$o-%Z z$QQz5Fr`?;f6-(oe{FU~2rtJ|Q1(aDO>6v3)hR|0>TS*k4%p0xw_020Q|H2&HqK1kyfE#~quX^aPdE2j=xkuo9#egiu#+-7 zuB0HqSn=>&3bNGrDUTQ_HqKmeA%OFY1%w}<<9$!EYl(@|hr_73&m1_)diz-htNgCy zS%_mcN4&8fkF8=Au=_XdnIS>H6h<^09M~cg(LX4)Q}ZjH=+wCO_SRzaQbuMUF5YUO zVPD`>Ia1_{mNGc?K2ZBO*(=n6cIf$npdh(Tgncdupe<>nx z&8pxEJYPXS$u9cT4~T=Tlc6~B7&?CujPV!aOjMdU2jY84b~TN{BU0H~{2MR>WI*zO zj0Sn)=nl?J?L)ki5Ls$~)*h5326}0Xk(r_vFjOSXue2cCkvh6B|_MKZi3pzZtTFZ{UmEDm8sOqT>Et1w-kGHMXIUs3NN;_ zJj!5-SWXW82Qh}efnza?ODy&^<3*dF*v%kJoCfgWuZanIbK3;T#wggNdK~p2&<(zGMMUandlAk@@+KEr~77R!;c_b_Q?_HQ%Re0p$ zh2F$6>KFE2Vsl4stN)Xa3Li@=OR%sdcaz>JOMsdA!Q($X&8P`pr70lg-mX zw~y!_lw(A`RO&JwJK7~zDZ?)dj-4ZY2oy&)yNd1%Rpt>x_e@?m37iKapxM8y@-)yG zbA=;k)3$28cJ+AQlqX~WBssg!KpI|_I92fyl-?kfJy#?^M@Rp6Dl1at!j{ol;#O5T zw;=R`I+sZOqVEULel)WtEBZETQ^8J1=uHVYcpgQoJ>NfoY>;r6JTv2asl64;TqG<`?PUCL)1^~o=}mA!(BR#;iK zlaAXuJ5-BB=QcBqex|$SZ~+g{)o*N%C#9C|jN_AMXNkQ|#z{J!6_F;!|Lj!Qg1M<= zZT$UF$s3c>+O$T!X_;EtKWH#ZM_T7c-7V$!&PRb~kT=43jtZ|nqaGisqk=qtLS!)+ zormX&zuRSQ0zyK0O=f7&KZjB6O~-eA#Zr$w>{XuU*Pv1p1?Gi_6-;5OE}^-$wI&rF zj3F^=`#*0OP>YMtR85D5@ddLYBwq-rDtFWEwuXvcQEHzefDI7c`V=&p`*o!+~_O!MI{;U)BZkw$rc6H$TR; z$JrY!b2#Ss;i&(u%SzDsO!+l%6=nm-ENkID98ltx#uJooihd|ACj0`6zf>TugaR7m zeXyw_*eq=by}T+ih<=VaCJhpKfnYSQVAc*78tb1PZk3*Hx9wT}Q0?KwA|WC9FZS>8 z))sBBYWIFGCR6vF?oe(k%J7rCbxiH3L9a2}3U=Z=O60FV#s&g2+?`^Hn=~r?ZdAzY zm6wqLJ+FCMp{r7n!U&F0T-t-9cd7p;b-)_zZn~d1(c@t|u>W(nRgr6b0*-%0&Zy>_ z*6a_2isoHSPbc;P7#?utsGAg|vv4cI_VG_%Z(6oMg`j$U(EYYePGhS#UdM< z0=pzo9Due?!_Pme%7Z8(EqxvMQw44Zmi_(xJn0O#Zq=F=(Xz1Qw8m$8U7Ulx`{?mw zlimOyB-A>iLYmvR8(lp8TMb12`s)U8nWO zRUBmI{FDc}%l`WK%4!6|ZYIm1{H(daXW1N0S=76$D(5Tct9Nq)5T{aD2Vx)GA`i6N zrygn+Jn5%gxx>QJQ8o^Ej^@c*;O$z$ie8EcoobGzGE;llh+#q>*`N-JVp-bvKB>2oHF&DiTvz3wUF_z zt97iP;CEVY05mGzJ<}EjUpzkEUu*%DAA347;{J2R6%psxRD{~X(02xG-a?yZp(6X^ z_xqW-x!5KqCY|QHtMaO^r=vrN~VcT?@Zoa|9UItqE6DjX~TDzWgVh#$|uN{FZsv9Yl&mF7^cU4yIJEKKL2 z0OJ1m@dQ{{{9SITuD>(3RhQ95BoOGVIKGBc24+D!to*2UtZP_NS2>Cz{`GGJ z{L)MS8=QHS^EcS6yu>l%NfBns=8`{qW`of7>%A^qDHk-8peQx%Zf7$tmha}Fjb;Z> z`B$4+$*On}aq(;RbwWk5?8XbDp z$N+~^E{(j0ClXa3vIIgOuZO@Z6+Q}Kh(AoR^fz$Wc?X0Ym7+C>(EW{%T19}R66U)u zV&Cb4#7|vY@66R+4ikqAh9g>HUWV;GliBh;IHr=xsaY8N>_M0Byscx8!MQTVVTyTr zxoGxj;PLEuO=84o)3;N1VcCu}mV0FS({MU4 zxA!A^#d^{$^N{209yAqvuDR8|_Y#-1R;+sNJv>`SDnD&yt4~mD<#GN(T1}6#leZoi zFV$?DkLbdARfU01cK-cu@&davj7GCLvOs=18RPaGh8wg#7eUY$sAp~eB3rvp{;LTN zxai+FFVc-EI}4~=e{Jop(?Q-gNEOqW!q~n8Gud`+8FW{$HvktA2-1H5gwFB8qX&8T zv+peH01m4KV%#CsjRM=IZmv7$V}kMf4PfJ3aU>uV{P{Y~LO9T>?rM4)<0bmg1xt?~ zRp(4ta=HsvGJfQ_9w#HE!_`!02Z~tBY%~zSavk^!*bC7h>$NFH0bUbo-_t|(M-#8q zg+NXEX2JWp)Wd-zTw!PmmO4{lc9Y%0nG==nwX*)D>iN*wndjoe-x$*_#+*}T%0KL6 zb1a~$8yR=H>k>6B`}|9jnqPa63fT$65rr;yih;C5G}qXXB?eHJkRW=jrSN73 zY!PalqZ@}uSk|~f%~LFne`|h{T}A+2g{l{$$A1l;-Y~1XKZsLjCm+T4>F~ZQm#ZGQ zApplW=|}%oMN^GtyOEP7^NF}C_wNTYS@5Kr;R+?)Cyfe}a8e-)jSItr?~Q6}T3TB- zsk^54tYk2X*VzytX$r!y=3E)SnqeRl1cknWqrb8u8C8ix*a$v8Ri1?RupdJnSfsHYN?n}FeM;#BTDaC*^ zXJ}|+AC37tsQ|ER*(DdMUXBi4gf|C!2?+NTP%lYS4*U4Y==4EA#A+!;(R_e&{s-Po zSLSlS#tNzV(a?6O7%W;g1}}->kx>qPT;n0I}~+)Ss!pfS?F9S5HBA1J@P z5Dh6qpzXk^$L4VaFXa7QP&8k!={bN*F9zq6u&zG)_lc1^X<^H$1IKa5C7&GH9K@Wl z-WaOBcarb?p%6$n;fVXhacMMyf{B0d;36k3?RBOGAgSc<=HJsV>) zH)E@qNS06tAa7No#@Y3|C$)nHr&XTf>p}2+va~##h~li{K^i`~3fBe-fj00)2+x%} zq$RZg5Y?q;g-$}XV*d^l*$4-n@M>Or4b;xaLRFNZ7W7pru<)wlFp zhZv!aUrm{fJ)1OG<<&q3DuG5X*$(Sd?}&J+?RiJQo6(j0DH3la7n?Z|{5i+_`V+y}!7gM6DUQWg2@ zgTV3GndR4C`gTiPW&GfE-rX@va7u09wI#)|tvVmpYpT@D;zCn(e%5gD0%!*m1O3>|Ym%LhhKZMU)Y-<6;C_tH zu=&q38AI`32d3>-UyhF;`rK2XBs}{NKLA$}B8Fpj_YY@qrX1~1;)vUxSX65zj$o`~4NR}vcZtO@R|bdKbT&Bqopk(Aw5+>|32vs0uRbJeX>UWK11 zEe|y3o)igUp!fN{d7NKQ#0@uzHjBkABmH_rSiS`Rz`UW21%- znwDOiDYzG`fSS1NsafE>P3?Eh`G7_nY(2@=rWeVRuA9qlB8NzoWIi%4RWzDRioxcI^BiJ10YB&XpiG?M}+Ske<>YrA-Qu&(HgUH5R$)u zHY=z*Q=TDF-F5*8qmq)>IA!`O?gad@&TM4Rdkqs*$lUNekhoPobgFEe8wk>Vfr0u!o6gA0w0S1`=#lq;sf3!ES`@cD=oZxFijRErruP#T5-PS|hmqKm zC&4?X7*VZ$pf6KFNs0FE-8SC5M!ko6dbCfTJORK7mS732iPx{a>*_@D2?_bqR|e9- z@Am)v3HtNud2=3k1V=;w+F}#+3xxxu{5mQMiN^2#FRuaQ&DHUW!C@-d#lGP$>FsaS zn2*X3rDTVuvzbb>R?tt&A_C6dFIkSJ?*qf(DjHwXzkTC0<}xZ&9Tc*uW#~dchT7lX zM}>D@dLB-rf_9e2FuNkudwhJPpFe-@v_sn+Ly86QX=rGmXBb*O!)imDT=&QA_-X^F z+kk5?Cl;Isz3|hQptw{_puj z^E`z%B|=58f4L37geWiP`}FxUZ(&cNtf8SHXyV#gwu7q5A$-v(&=}DSS|YtcrxD29 z)1gUSQIB4=L`5fMfW4kJRnblh)joKcx;f{7gloPvP-ZA@9GE<*C4^=Gq{ z!qU*coLW+)U7RqG=Vm-~j}dX_apN=!U0otqn9mvhh79)&n`VHIa>(KrwCL2>YypQi z=0CE8-+T7HUWcn!$j2^wQYev9GZk?`LHV~Uh;VXteK;YF^sLoqdJF zZx#J)+PHL|I-(=C(+dbY+)JwVv~A4*r{8?_3jKuT zS&40JsqL;2hDN|NyjKEtUu>BkRy zU3q%u35n>vC4ggNb7!63gaN+Hm{JJIDB-|BpyO9i3L90vO?M-G$3g_s} z;OvzwOoNPpf$=xI0)zmIMCzX968$YIMzQ=xRJMtG_?YMSK3p0HX@c;5_B$es%*=DY zBVspbZn{QFdBKTPat8~5llP_$f8 z9Tx_vw$Iv7Y~YG`tjD2da!?$+yw4$2k0}PZO+1w_45OequZCO2xr>vEpo&F4gd?Ef z6lwv1gug?g`L$q}!g;&!59k#yge+KxIAZaB{Y@Ap+++BMF}VI<5%S^9jw)DrjsofY zl-Mdg`60CVVCywoPv(Dk2MIleN1R2_&39H!xncxpn467c+*r-0*(VN)4(aW7uKcdrb9_kH;P*;1ylD1-z2aa?A zh7y(Y9ws_yqKILr4GeUDwRjzF9oQmKt?~q+3J>{bsiE?r#uXT--C&hoOabt?MdlY4 zujh95Ukm$sn_IjP`=u4!T`s0zcq3>w;z0ey&BGb zQ*h8?!-#Qj<16x*rwO7o6I<#-d#?XU8kQ0}%C)y`I)Xcf+~#=7kx!;;yeMArvkEuk zHYQID6O&x#g#W=Bs=o5-7S2vbIO=+A(_D!PmQQYgH*k2t|I8nbk1w$`{6tXiljqj1 zUeME3EFvqQl&<^`y3`g(b@Hh2TrNR0-%U}Sj(vB5eH%V=N38KLgv?;U>1Ozr#yM%X zp9k=u-V+j{1oBo#T1<2_jgXL5AYmDmq9PbmsQ*p;2jzwpe)j9G$KkhYCtr)#tZ(!v zpW1ODA}v3hg@a!J-U^8IaC9s!S;?`b3YWOzu>!FvdOu36r%%(5_FqZc6p;C4<@V57qONE@1(mb*C)QcXx5 zHzvYBhve4iz~f6l%&ZIE1OrPK5IA45?C0YY_gBexZ+f0UcJpUin+e+`(1dSkP0U&V z@DMXDlvUaJ- z1qTaP6sg^!JYm0K4de~@#_TDA%u5MDo+K}}n(Wl97oY2KT!_)_TsD}CQK}#OG$dK2BYmX{~!@#=iyTTs5~-; zLmZ*hdb&v5k>0(oU- zW0bI?AEo5VP!4UxJwT-7FnAO-xw5t%Hwj57y~2s7&r{99{DsgO7NOWJ}Lz>ZQM`Xi(W$ONpl{JE8?(&>R@>u=8gj{eZxoEh{ltxg&yF zmR=ZQou7_$scbtCaw13zm7og6-6w_FKUm0Q5*N=Up26K*E13-6TN#{PN>6SAE#o*4 zXj-`oAGdk?_U#mC59u%S7Zm&qY8TY|92YBs^ zYGGPy&?M-Pr2(iEK4mb7Xg&WU+8InoSRZccUZ)fwZi``S4#J}wv2YQe^8}dUX?17a zk1)`3)Lo)}5A>M@PvAXt&bZ;$H9$W-y>wyOLKkVcBNR1~SmkpLNh)<@BZKz~EOqxa3b`lHPm3(j~7*SF2qX}ql)KFAp z47P3Rlci+29ghux22;>-`#j1@nLY|#6zxUaLc-5Kou9{OJKaOydl^}t`41H6mZVyN`LWJ!aDKT3obBgXdNXZl zi~*}x$=7*UxCDBH(J-=vi5UO?>=7M3wB&gI`t4gkU_AhlkQY{@!VG%R#UDRL2eTjv zeK|^ZZ``=?Tty`a5I>lG)iF2@ejxs!*N-Q?C{1>2YYWWISibpX1F5JG+uGVv`(3te z@r~{&JU3}JTXJ~RnpcB0Kje&QM1Pd}RYw?iI?2N{V(%N6y@HuJKY@4WJ?LzbKkg^I zw1nP=0r{m!_Wk?!n~tEUKgR*u_9B?s1M?s=onl;7IvA5@Z z^7LsU40o{%)w(&HVho9K7|Gf8(*FUJFd?B`&>I^SZbYz9g;ckQJ0v0j)p3lE2l65l z{tE9XTkc!>DP?v7`phCPf15RJa9}|9@~swA)<7Zw?izaoWi6P61XicZ9f=26gbs4ryU9*#gFy7Pfi8$jj!=ju}^# zu9&5?qGqy|OIn{HLtB!-__TFxb91wyI=+O>5uY(%RVv`3T;*||S>|I!vgvW2X%+{2 zdk*iz_z#w(2m2y_s0nO7B;_*tiNixSUU5IE+!A`Seej^IN%OdWVx`rbi~CnA-KB@g zpsdyh+U3~Qk}F@)>_b^u!S#VS++;3pVEVuwj5Ln3UK6yPdjy*85kYQ3g@8N)_Mcv7 z0&nTX`HAd_JKz+yRzQ+2DPEW!zA#M5`iW2&5q#3T-zbx+_c1HUOA3e9sFo zOXKtEY-yVBJ}_diR_*p0MVy?R@E>#*1;U3%z(`euz&08-l~5>9EZ|quaV?+}>9R9s zUZBL+6M()-_INsi2P&tE^b=2o&}g9SMpWP;1>UJoiC^8@X);#}z7jw$VI5W~&g|@L zfyF47{Zen^QS7j}B*?cFR<{5cGZ{3)x)I5)Ffg*UL1nwydoY}ln=Smwqc$=A~ zZ+!CTQ}MZ0dlpDvPY?1QoTW)|ln2dbz(3xcX)-6z`9jqw{LPq=GOn8n-t#lYM@#EL ztcSnp1sVnAg*?Gfts@*mr7-5JFbm|>0f8LWI$X+h>gsni3ox4AX>Cj2{%}+wulg@} zO`pAiaVkGQZL$skr{{`_D22nJU`q7uyLZd|U*%^(zuD{^LAj8DQS7M>mNLfUoXr)= zWw=b)v_bltOavm58}aOmp^d)H%ehiZuT!CPw+%PfH^9VsuxPK zqd>lw{b2+~3Mz|viB_U;>WuP)bLPx3gk^uEb#`XM_oDyiD;sJdh1IvP*>q#1HUe z(Wc1X6Gwm#G;tqkqWOUA_C=)t5eR}mGZn=RZk07MY?_!N2+6QCB-KZEE&;9qZ^)4* zAq-1OM6x%h!HYZrujjFuX`rvU8|=%fSw7YKl&!I_ODJ1Ddn^Pjk6O;y$~4ea?%M(( z*nlXp78r3;ef@|q3Vx_oN)ZmkWBCL~^*fgk<@v{oJ6eVp!+!6w0AiFeFm-h!Hb$ z*M^W(l#~L1{DceUzZ}A^VPj&}e>3UZ%=0?K{P^)BUrDzhCplL{sKD7?M^&vdO~ozo-zL zI)cLw=!Ga~A}}yEP6apHL0Pf=<8A@?9(+ulujqA1DElhQN8PE57&qv}hI{e2y9~TMrOjazvT>dJ z=H%(^EielYW&kr^s?R3@pHW&mbfV+lEhDr^Aee_)XV3a0ZjUJk6&9A}n>W9kpIP5+1mop-!Jg0z5R(gTPa=R-$c$qH-Hr2|A(fwe^ zx8*rzis&gGf$2koAmJ3zGk8b}rhU+QO~i94#~doI6t7E0y_e?#hQ9IvGCiH8x~M%p z+Uc>DF$I(MjReX%jll$9nBr&QtGRhU+-~IOuh`&UM^;v%S6qVKdTY^5aQHDMfD)VJhS} z58NqZ(fni_QN(t%tpkamnl7u01Br|plAn{F*x(zFyxI6rVyCU%QN@9#p+^N&k_Y#^ zVpUE-69U#aX6-f5uLS0?{U$nIM5|g|U<`CobAP3j{;*Zn(i)CPa)HausC@|0^qv{ zP&NPdso^K09^ZTYi)@L`p&BAWf3noT+{JU!vru{*CLs2l!4@JORAN7KO{52ZhliMi z-3gwVIqHZxJK4c+t?Y=g+5Y+w%1RMATf6vSb-{fHL!rpiCCiR-{=j7*YC(~7;q1&N zCNH}@_Ml`*h#o~K3xg^OQ9BATDyvReaZ3L$FSjq}TgT1#hv!`5pO`ik2k5&F>0w95c;oVb5ha10a_g|fyz^A+>;D?I{ z25~xfju)MNA&cK*c0daK&xf|)^jL>bBq{g&dV?#G2`-ofPkldB%Ff6TdGrVkxDgIi z$f5u|vD9~2tt0GS+42-|Ke?PDHgEm+r|jbXTig0oc;K4d|F{G&wg|Y(-zxVnv zobU4Bd}ItB3^(AT_|H3432;LTtoM2Q7N3V4`@e>nEq3@R?aQ{4^ zrjlSx^uQM>RvUto!sG{S78_(38a%M19rs)G`_}5h$uN_uWKUP&{=R}kEDQ5*1wBoYWMmfR zo~E(^slR^;|3X^@x`v#d{m)B&eG?Ia{J;}=AjtbQ`aEKO zYO2_vmRk-O^v+62p_Kt+)POwG&I;XE zEi?-InWvq%Hd+#K>~Xq_K9@eb=P3rio2v_tPaZj3IvTxuq=i$kc#CnICU3ygO~(7{ zXiEdlNtu&W5j2^S)dRBi;NHCtD$MWp z4HV_&Q>&|mib_h-Db-a}GDz+ly_l@_7%9+~T6P5ur@-d!A0N+IUtjNc$XieVzy1CD zLyL(@UWYa31qD^r%%r5GZ(LE`3D14PN&llc?o%D~5C79j}lN3zs1J44$cl0^N+x%+S_6-aV(g~&I?ze*&KMD&ATZN2Dg)0A!^+11r1GY{;LG!FlqaM5VDn!X+EduO;Q#2k%_!BlB zaB`Awu>;PT>9w)E+*|3g!|AlHA$U0TQtIpwXbwz_jKnr)-3;5)w2|$oUn2I(_wapP;)3rRDldDd>h;$4GVv9 zxkdJ*Eo?oW4VTNlADKH&;NVT-#j=_$;q`y~!4Zm(Fg8vHPf1D#0^6cf%}L_XEB9NK zLHLmOSpAOEqYKJf5SxL)!IcEN&K@8V6#?l4__t5N5!!Zr(cIynxwpU1^l9aY?vE1` z#&v$<2g(${MBe~ewHPavi(-%kk6}?=PS%K)rXQG?=xj~94nJ#uAG#VpTZEzRZt6jY z`{&>cChR~orGcwXphcc#pz@bMt1ED&bp2qD#jYuVl1 zQ&5Fki20EHTYL|VWvqBYwn0LykM>p$MZ z8siYzIvDLTD%||uKGWs7<>lo(n$ZZq69GIUY!MN9BSeB593SuaueZpDkRb?-1PJaB zL{pD=kD9z$&cx(SdN#+0f5HGFS9fRXX6+qX2(rcLnp}mhU8~V+zD%E2_f2MT>W`u$ zviY>w;`cW$QxQm=e`HBr1Y%r}ZNTXb7BBr=|L@36<8Pw>892K4(pgniiG7MB-B*eF zSA^kjlDm>sQWEn&euM@V2|O(ADttO3;b=galedi27jjv;$j5=T_9? zqT*uP|FIBAbzuDt4($HNkN&+zL62^wfam2rQ#KU&Cl7-#PtqDRF);6fR(*28W8)47IW~PZqkh^Ho(; z7@KVbC_*Ox2-iY7m#6FbJ_f%x!Zc5KN>2t%TftNOU)Kj;HtX^q|GAdGwjO+Qyzgd% zP_JDBDUhz&20x^+k`fz&Y9A9F%hz&GIH!(`j_v{FF*`H!@q-5sQa!b65@|v92WV!; zmALwk4~y3xIb1s$#r!yd0Dq#|9+<>71&3A(XWp+YHydPEe|N!TynpT7823EXsc0|; zZ3Z5Ja@?1E{;BU7zlFlaeUQa{e0=&xtE#Hr2%U#7bZ63oYkMXBR+_#~sP+G~LWVhy z*zKSR66uT9H@CBLaulVcnlmP>2C@{@%53L@R)9NI+!Ri6_c0R-3wRWSzvKECypHti zgJE1wlipiZU<5wXKlK*av>D&NDOgxoEN^VcYUX8S{RK)=Tzq_Ab!?h!a_$RGzL~vX zFlF6eTkEY?pfVDrC?gYgd<@jii6mEZUEPmH?Xe#LzO>2p@Vz|Ox;%LBT9yna>7M;F@QggUA(Z6@-fiz#`j!W>BMxIh zRjvY4_s4+rWdH?16-=QnFD`cXXUaz$lh#V}djohe0$h{j2gq9M5ZC|H+Lgyco%iuy zx6)>-r&`z8N1{vGqR7=MVariOi=ks%M@feXgJG*pmMAJIktkP2L^5XRSgdWAtYb=t zBxKxW#JHaKS9;db^F04N|9LSpe#iIsc^{w8_dRNAX_FkAsadWBB76E|2bt8S|h+EaJF zkokg{*4*9{uu-q4r$@5zwYo_`$dQ|TQ(+-S%TVKnb42r7D_7*0U{ zAE!4NWE2TC_BJtZ3=@MPh>MUZi9DIr@htLzf|;gYtZ<;WE#76iJA={q%QE(3q^4cR z)Rb|QmZycBzxhUrdP9rKn{;QAx^+Y35tFKC{&sn5U8*v+stoiMpcV;*PKeSwEW;{! zT{9Z1ZcUT=5=5@0r6qFz?|z?nBf*sJ&5PYBe`HAgqSqaE5G8V3^Ke-vFlK<6cP?9g z3vOm>9$e7J8-Ib#?ga2_*iR1x8>R<&Gg>EiH(C|Rh{V^x7_3tdj^EQEJ>s(Vl^2+nRNL*EHbhWR zYWf0y4;i!kaobF+o{s(H5Q3ShP^3uU*2ftg3qQyUsFG0x=xa zU&hnD;e) zrapWP%WI%ZZ?W&>=TDv_fT-%r&;Bz?+Xl3U11t6ja~d$>i&?Xlo@nZc9m`MUCb(AT z(nB+L=g;UzCTGRDCFX8zybnpaxps(etLW~iU7cHvqCa)&)NQUGy1Kp-b!%SQ5Pu1@ zO<~T~RS{cy6+$-3AC_sjm@1*Gyq6vxhxLGmDc;BZe0aDLp8=+8&YyLAVBT+(V>j;$n`2d;U>nr|->@ux*I-B&A& ze6Yey{NCP27o3EG3Ip#gHa0ebg6czsvK)~hd65yCPN$G-auc}@+xT>Mdcg;Jrt^7A zwynl%5t7PMkkRs8D7u#0w{DF=_arGkUP*Wn?M35Dszzm_KEPk2P>80}nub(CvXwbP>kDI;HlM)lR+S^BirI?2(FJE4c z1hWj+YMquRX$u)Vs6-?{3R+t&-QC><9IGREbtx448rPSRoTFMtl5XB4g422!291mY zfQ#DM?90O{7wF8dOtCS4Km9>ux@Yt(*SY6V{)hEmfe#e0NoK&N*r;MB;Ngs%^}MME z(O;}W;^=HSY@ka-+{Sja^CJd2LVVwbZeC&3|2>_4g+$f$cNgjwcjrbNr5-(cl!+aU z<@?0A|1ZTC!}?+;R2Cv`uOiPvMtBIVx~=FHp`#pXO%>E0Vz{{_T)XzbJMY-BiAbgk zy32Yyx98wl!}@S5S`(uwKmB2<5=@GQ1_znIT6Ia?;xs$miX-cyzWI^Eid&PtE`A|h zw=yk6a<$@~%eGNuji^e=mJVs-rXjfpma!_6VC7-0BT_F5T#w8MTi-&_0xuun7@2Ad z&(T=jyFh{*|GcCoYO)_pLWZjW9@|*aSDzqB_Ll6uJ!#nFilZR*i3s7B7Fg%-T~0GQ z8rDK0q^6qI7Mu>NZTFflVK9pHF3Rw_3gjM=j@Uv9vFMC^)l1*(Imv&x?!bQnQO5;- z6h-aI|5Un$Fa`sljCo@u%6|}nr-XfmV+toVb>pgt@?crb&xtntGf1-GIyv;uR;;N1 zpMZ9dZbhflBiUmpfH)+pp<)=;`uL9+C(?pzK|jxp(^q39+Hw1|k!qR9L?CqcqG!XN z)kg^Zq-PbOgx;iYsLt=~>SAimL1}q=Ttdgp)irgfhz6&Q7B2o#Sr56=MHAa6cM63@ zq0kfBwCV8CEPZ@@*3|~BM%h&&U`_}DhogGnxq3-RCMseQmum~7Xl6@*qWB0g0h^D* z216z^TVKA$TTMpOPYLqN@!gvuJBfwH;MWcIJ6QUEuM_B%6O4woGY40A5i}0^NM_!o z>`2@FGynMdqlg$vL|N|X>HEQVGzE~8nTU6YNwOx-9H~dg04+&+#1M=LHiRr)vZQpo z6}?V)#(VG1o$FC<;mf7%_@k-DBk8}`itm4gkGPrO5-1$z;ma`oR3kj2l?4Zl?osoz z)Y=1)#p{}u(-{@ptJ3!d@*lpt9^Lmw2_KFW=`cvnp8-!k!esl32#2UdxR!rLYG6ae zhddt5@@*P5GXZ(L)(FcfzosA9iR{M43M=~xtK4_Ks zX|9sjmwsUdJqbyNbm=ww&9rFz2AvNYKHW9`__NgjLO~mUnnp82d^b7f!M+Av4kU#d zM!&St)U4uq-frc6umkh=&{n-G7U`kY%6b3`_EMW*EYVr&#Ipz-LV9Mim@mg zrlg%jk$Y=)1I-=XKs$-rb2&)XeU4D-8!t^1=CmRR`T6!_OHcKsgV>KBvaHAlV-h^t zFRoaL*#L}Icu3DTtMJNsh$!RHLK_a`w5<7n`rX0&>Nh{xdlb&m0uGk^*;ClHl0pK* zJ?qXpJ2`bQ9dmD?;3(U1caZKWqsl4XQ(LBbok#msglD%2SDGY32=g#V#=#huIUk=u z(13@eWm@nbzOZr^8^!d>XoTWe1>*MIdQ}akPx|Xre7Cx|q|}#0r`|4JxB0e1No6Wa zxGdFwO2@)i>#K5oaul4II@tgVUj7;zt#p; zY#9c0++HyFp4#Wk0mTMTLU`lRDj_=hHpFFfv=|86qR1#SUfT#AUx~2=C%jc019Eqz zq@^>l0CC}$A*P*=Nj+S$>tsekRGF1~odhn0=!g>d8^pEe`)T}u;Z4w$Q-d;0<#2@; z!+b%_p;$&6`PW#fraC=wg}2o;Ha+_hjb}kqQyGR6TZLy#14@r-*y1Yd4EB`l8|dq? z=6a+VEcB07rgJR7Vwi{@$30WQjuvy`Q?LemtCMftdfZgg{T-I-lm)g|bJ}uk$i-Tl z*JDg>3HD^=k;7t1+Rw@!DISw@a%@6XeMFt*LLlOHOG~E#K|4iFIYz!CK!BC58?mAD z565Twt1^s`jeZH+3H}00#~^G@ zKd(DqMHN;Em#ZaV@b*f2dU{5KywN&55<(DKFI9ON)MNiG7gv5eS8v+p$AhtPlp(V(HUio1D2ez^ZvwC|M|>d z={titFsx75cH6xn#`WP(p6F-{8pSY18MR_xpOKQ1+Ll-H!J-G{H^tQZ_1yT*yy;Zo z@qdf@FFoK!aB5CUyEUa`IU}{?lKP=*mN$2t?fRwZ9#ToIUsj)h-S}uUCTr_X^A)v&py@ zhfG+JlLb|C3hB8o!MciQVQU0ZU%(e5TvhRD+>HAxT~QdV>CSG=Y16^#NydZw;>8P5 zt9zc;sv8q2C7`&USpu6~`L6B}DNCt7E&E{9RTP|-E~BmX9q{(4m9BmquOnR2|JFcv z8>%Y!{o14uxzn2%1(FWG9g;DQRNsoh#&S#wm=;)2uTG>i~ zk0=T9@XFBiqt0vwyAt~uTRr1=E$YFcMQ;+Sb?5YS`-ZcdZm{!fJDO+CqD;QQ*|zJZ z<_(BWWg&at+vu+o)8{G};m{LxL-Kb!g@kO1g{i9Kd5${x8XtIXkmQ0h0g#y(s8F`g zwIxZw?7RV{(O=mOQOTH`2T#$17sLe25e0dJHwZ1+iPGbU6)tFOEcG4iwO6?@fifV3 zcEABs*`#D{r_)oa`|8&+o6`zW@!X8$2E}^P3Tb7UZQNLaz&XW^XN^1(tE%3TE1FiA zSu9oK+s2YuyeO~Mj@?q!L5pCoZgFes>o}RhQhay8k>7p2#3+xSWY>={P|%p9s6Icd shm_ddqocbW!(P#US(<-ys`^37xqfM>>Ac`Ics*sLZ?ciCxAWM)0OQZO$^ZZW literal 0 HcmV?d00001 diff --git a/docs/stable/_images/ELU.png b/docs/stable/_images/ELU.png new file mode 100644 index 0000000000000000000000000000000000000000..ddb68ff4bbe243ad33cd87ee3f198dacd93c3858 GIT binary patch literal 25942 zcmdS>1yq$=*fk8_G=h{M-6#S|sdTqN3JB6AAuZi09U_WIhfpyVs33>q(f=@{H2wVx!FA?RwmXvl!n&U7TnCte}2zo{?dRMTc3akf+(Sf zqCyJxG0S5P&bZG{QPw+dm`07`sU*BoQK3^(H7rpR6I~Y;BtZIMQDHk^bG=-K7f#(m zNocnB#Gx$7eko&N+9Sz5>C;zPx`MCDhUk0?aGxHAJGi>_iVJUP8TGW^*m9`SFy#55 z7dL+|yq)9TmL49JH-rQJ5cxb@LdFI!apB`{Qdm=uKFNRf**&~?6(#wuh)7mIftTCLa{$5cN}5({K89sY_hF_c}t`RIEL7p{Uj6JEhxaj!J(pd&UL&UGVvKQ z>DL?b#UvAc&!i6KCB5@54fP9(IC$D5`KOACuXxtVagB_OP|?vvL`4yqN*Ab2CMqi$ z$}uo7A_d&|5BJuvKKBI-4l1kJ4n3!%qZ5>q!_m>vfgp7JI~KA1ao|1f5d%<4N{Y^C zu_a?fVg`fb&d*myJt-wAd>i$+oLk@Tb!Dk?Aflk??ypbTAFULP#ii1rg^=?Fyw`Lc zDp)qoEgp@(9bEGlz>lxs(oQx}xb;347yUs7o*!pXDJcvF z28Pj7#ivhSbVpP;?MznDu;nqh2;k{SP$-XlapQUx|!K>Nj5DR5pCyu#FH;^(mqK9IT6zI(vX&=6ZX zwZ~LNTRRaI4ec5_0h6_q)6^>hLay^*Y8mhy5wvh7Grr z&CN~l9_NFXMF(c(8{#O zZSx}Vd8{G$4fV~N4W`3Nnl3jXKQL`ArjFX$+JQE%Zex(8>M6K7f|%U;dL34gy_sr%D6_t(o&qVN z@DCdCVB$U_FF{xH`3dK-!3@REvdYQ}w(8G{%*NCN2=MTr<$i9JLTphKWaN)F= zgkYfogM@vuGf^axRo7?4nZjiohumdbPVf2i8vXo|l%gFcq`j396o`;T%YDQ;u)*`= z#~TAPZH{}ZV>-{|Sx-rMY`@Rw?(FPjJ4owyC5aWE?2cIO4jIe{oE>2CA1&X@)T|<>r>B=y>{~MB z9}gnBN9kRyYF>c^*>AOR;1Ch{gQ!qEH&kp%!&LI)8u-Rcl>)SQ=glkqIhvtE`354W zU5P8kv?OFB{j;rT1Gp$Q$K#tj^&=qYFP#tK#*G`492^5Px(*HwU=VXFMH7w1mNVB!(AA}ZM?m1K>`x~Tdn_qgpC*@_YrCTIJ(h=Ht@4N| zMe@V+uDbPF`4(p8)7!A|7cDY_b&q~O5%W+(gZ#~#H)=gAwmfL)=$7LS^RZ6^q!kqu z@Sv}RQclxy1@c`*=9n|Ev1b_5OokQw8$y44Sa>?u}Y4 zj92i0NNU?Xz!KfY7}8x1b`MxU_mQvZ@~KHlN$|IyW*Tk~w{>7$xdPYuC70_1(LH@F zkVEak6Dunx*EPMJ1Cmb?otZ>aKtKTRcpLTo`(G7X`N80|@|1H;hVB+s3Y;BprKP9W ze0&(LGIm<{;K2j;A(KXq3FjGhm%a=|Q`a_dF=5oHL+aHTvI&Xb<8`htwOPsqn-#yO zgLg}^D49{s!nV{ z#K9uGr+*cew>kvQNIAysX-@Z>1gJSV3B6FT8ixyvyTA-uuMA&LPfrIS=N;48W-D`3 ztlhXjEaRw`*_vAF8Fhbrs9b7IPb(Se4z|w>cn*|HUfw+1SsIu=`~sfanXL|irtVLV zalnSIuNRGRJzQjrAOh*hVQWsi#uF)$MH?Qr6@SdmYJ({vbj~ z%~@GIB$uPz1_=rmZxLV2f&;duU=rWkIpx>D;)uAq@`Fo5ii&xbGvYxc9PKu$La=F- z_IO$s>YrvKz01bJIkhAp%|1aB$~zsk1|G2K!VxoEe}8{!78Y569{nt58vQ}iD9Dfk z7-2euXB9gyFAq$Eau7M6iphBSvmi2_r{HhD>GF|9yS=q@b zgPs&A9e@RDb@GZ`ZhrUIM6fFFN|r!}ra@N!+|(5BcIpro6LSaTw|bDe8$fKI+N8N zU}%0IXIplO5%|Zu9yYUulHa3F+LiO3>??#pXz^O(|6dUHRIK2zkfI{U@RYo~$8XmR z9)a%~#uiS<&c?K~v}E}ZT zf<4H1I5>$$svui@$--{)qQuie>SlkOc+ zw8pqnk3c}Hb2w-{;5zE33>AuO8BO^4p{nD#gX{$e!6Zd2F3YcvMh27ShxJfL>)PJJ zKeoFyKB#h)?r98b?(K-S7D1L}NNb_B=$t-;O&lD^lFgMDVmP2ICD2t%&4gQS?Yp9~ zM9Zog50pAY>B24ClBC7GcJ10#C)TCre_UYcyUF;ekpQ`~8%hWpHx3!Tcd9{(u65M{ zre-#?oHw@clK6hc@UV)gxOh@Ziujg%HEiyc%ee%|wd}d(N!*4GbP)n5pDrINc7Z zHF;t&kAz@xGV~Cv`cWTKswYxHJj(~k2h!4W-|k`kW7rP2NvIG{w@#SteGe zWGVCCMxZ&g-g)vk*aLh}7Ox?f=7TF%e%R%$f`Oas$&YmUIjsGdoQZh?RhJ-?&G;{h z;9P&PA&!gjibC$#yJu1K=~Mg|>C}FyF0%72b9vpBHd-e)Hd^K*e2V$E)YTs+Jsx_k zkM_|h9~a<+LWek~HGf}8pJpJ!#2ee*NFE2@N}>@@@T_f(bi>sjrmk=a!ikZCUSBSMjH@dF#= zd$6@<={3z~=(GfC!VE5U(B-4$${bA*n89K+B<`Gdgnb8&mY4hH_1pRex%*3$yznp* z9wn#fVNgk5{pM`NL;m`MAk2nmCRw79D(h zT4SX)X*oF(Rjw!N(}B!7^?o>6j+Nu$I9##PASz@N zg61AeW(?m|1V7u0nJ`%*6oyrD&BnOv>+2~61SsU>Di7MGg;57r zY+ZNUQU39@-q@V7ioOnOYmIhD?;!W|I>(tXdV176!a~xnAZ-b_v#t0uQDT+o9e6lmJ(D#`DMgi39#%#Lh0h9s%4XhOu6hKj!MjTki zN_a@L;a4&;ruXxgz>9#Sn9Xfm-`3U?1gZFJ=@>KiXew`uBi-K7M)}zBA(zeG-NNl= zdG^ZjTjp7}5{!IpoOxU-WX9EGnyQLUKFN%$$T;qor4E>`?VB8bl35uRpr_?MIUv7t z_R|0rZhMW~XEwi^m6H8Q4?j_?HhMLnkUBd*`_575gv*}xQhyeV7-;7yTL9+Z3N! zkcqPwo0~x<_Y;(s4jG9yTA!=|8~s~L%S&Ss6%}F=c9Jh&zNihvfgJ&;Lo!G>2L}h) z4p~ocKY8**_SrLk02)eCBt%6ixwwcvefmVl$Y>)qkRh|UxCqF@x3)GLDdWfS0AT`f zHx&__v)8P;M9;v0_UhFuY~kWB8$0|C#(Zq|?`w6$@nxUxi^P|g#D#P;Y99W`+Oc+K zKkZH}%udbFmF6w#@mNF0LV$cEhAfsSk}Q|CsIWh)#{&|lW$iC-M+a#K6;YI!7byL;5DopzUngoQoD8DCIPA-*s+*6wsX{rD%WwJ!|4RiLQ*?q+Uz-OcT= z&d7eP@m?4nuKvEsc4IEmq-oKZ52I?K7T`Af2lMfH`fbF=M-yj_Kt1_trV^c&5paC$ ztTFiWuV_ca70ql%QD6U@WMihX4iyWn8pI`g>qo?y9#ZdR3Asr4?k-cl4x^En22xaA zxx*Grf4D(QJDr{yS9@JaEzzb2+@G^}vV6l#{Or`k#4-}}aVMZ27Tp%`Gb~#d^scHx z@YO&kVRhKl%Ze5f75xz&@cE}@&(g?iMKuhlcSj>bGMNJ#y1kta76MJ%-+$t)g=w=@zE+T!JMSO5rwLloRLRj<2b zJJYZh06~iz>bcHOk*CXi3`cV1t51qNP(1cQRsb>%XK4$XK5jD7H9XAE>f0U%<5-O|*hNdDK51PrzwO4HLaj`+nOuq|A><~N^1?s0Sr z!{=s#%_naGgPA~k44B;dknKMAMrf$rR3D>Mkv>Tn_N(Xlcd-X-)>l$-;+rAa0}ao40d@sG@!@+Ys9H0{NK3UkH*_-MHrPG z4`gHRB{)TEcEo?#Kp{mM#UvF2dFM1GvCALnY|MYajSd+KdbAwy{NBCJd);7V%6*T_ zG2u5zgi!&&w6K2nR{)=18aj4P!*o;IAk#QYBn*Hb<*lUXKi;vvO3$D@zC{969 z%*cpdLdl`p6IKH?fk-LR zw5c^^=2_RjS1z=TTi#pb#%-xF#Lr^rI$>;1bk*SIuM6}IQTX+@=AM_ zxQds>OaTH}`BZkiNXxgpvD31$X8-xI&oFp+IwH(lXvRUBQEj9yk_+IW&-Q9}`U)qJ z8OLGm`Ca=e4vbjNtB!0OWQ#!V{ziNogS}LKxi8#$L$?EApeZc$&`9$-3`E~#!^lO9 zT%Y_=v){9rx?yX)UG{5UyuA@avNzrT@F4EnB#PJVccUxpr`;SZGT{BY5|vqb5a=z# z$Vx|dCvB6Ne?L93SpbvSl^2kaY_`Bb@2>{eCAD^A>!A=4qb=RX}f5?s097MS=HM>rud(IfdYuhwsLQxekB zn2wft_|t&*;D9_0et`rx;GNRdy6U(}tL*^r!SiU*owvzNVx}7X%G@m*g4EbE3CIt0 z8H8OX(YxE(J&jKm$_s*}t*? zpcJVL>)Qke%BRoyy`jTB)Vh|Ty4RyLqmLO^oJY;JU;U4)!ht5*s{o^THrRCVOA#8x zVx`ARUdf46gP+e4m3F?2cSmJ7W2Iqo-lFwCv0TZn{#IMuOG#%(n!@EQ;k1SzK=x+x z(nXvr_YsQ67BFDEqB8w=X!$Ol%{asv!w``@;iX6dihN!=G`TNrk>BXlmXGmDS(bHS zo!7I78M3&1qBSRcvc!DhAf*^rV)7VX!hXRCfu9nG4X|zXfJSqL-+N@@d+pSrxHl5g zWL~;aB<~IBCH*kC^`{mq38|{h_uXnz`d-XRRm=Pp-Ll$Jy3(ipJ2uysIh^32^a|?S zm%A)CtkG98Q*62UOga8ZE265>xBdQXJC*RZ)4HAF#U0bZ-N$UuWYoPQt?_yfRJ8Rq z`AXOnC%c~@N+>m7a*D{{(Y5DWRk6<_zb>nq9DO3j>4$i?Ql}!_9tKPhxIslE6;n8% zRF{OQZx+^vp#MBL~6hSLyEo;(XJ$uXdIa>IZoOIiJk zmufw75!Y{!8u_YDNo#(&K&yRB(vw9*l3P~zfstUCBLn$RfNNWI|@HjiFIulV)c-!A*bdL&g z*_KDSqp1E*h7m?xrq62Z?Tc1xn3!nVZ6I+w@|p@103kzXAKs!phAZ;E(Y1D-HQub~ zZl?d4MU-HD!JONQ&5HS=KZI0gR-u;k3*FrBNU|C#l!ri&O__Kad zKc7e#wTMH_ibTf7(O|Dxeq^Nkmdwz;V|;l*oO_BuOH)zLz|oJk9qVJKk#kJ^(YMOU zad>xG+T#_)O6`)sF+4>0p%Y1_?SX`vLJys$=CZs*MSK*+K&gzl!61BYaWP@h=u32< zJA-=MaK`){jdDUzf-_|0G6TcUo#7z*hd7|?*aP1cx-TWxn;suPuHpoueJb1E_!89GP1 zD?qcgd49nkUUHHSR=V(j;@~L1!of?}B1(U6q&NiQ+a51OuwSTC>m7;p!$hye?RkRZ zy6p~V#$!UZD)MDx2Tj%5f~Zn?PTCtoQb3;hW*`xp8EDiy5;T{8*<z1^)1VGN~XQ!<;e#5^IAZgi|{`dkc^WT&8kwz_9M-)|P|z=ig{BSswb3%QiRs^Y!u=XL~X z>Y|VMRs(uE<_&U@QfOuZRRU&&BLuX#GP-ZpbO64@6Y~HO*yj3g&>i$97#rT$bsq`+ zG#b53M63h#c0@;e^*4U?PaUj}*iFv|k?daKkqj8gvAntn&0IyaSLm1MN-HyfsqA$Q zWx?ZaIC^&zd9{iQwcZ?+3b0qW{w>DKkbBG*O@{J<#o?f6pRNoKhO>a+G@nh}l`-8h zk#B^hpUiv*h(;M1gGJ^Pe<9zk0T`ZfkjG(CEx2;Tu+;VWG0XVz5z+q0P*&A=hi3i; zGWh5@*1MWBr%bczz(W$hdItfD5hz9>k7q|z7q)MCe~b&$#qNI1oPP0LJ>37qHu}kiv$DUv#yseK>*SXJR z>MG^doI(RzS-MHvNjk8bCKS}on_|tM&upoqz(Ke@j?*CHVrydn&8?TH_-u|6yNb$N z@ZrNNf8X7XiR-Y^D}NdD`uQ@ zCec#a`WbPpXEDhQ-RTG!QBl_w-&%&zZAaQ)+Sl>pCor^uLi0K?4`7;rBuZ}XD7Vhv zXzJh*dj>=9i@FZwQdcYmxEl@LI$*$rcq5hsCr6LiRbUssu*erJC7`4QgLaXc9+QIiV5|4o;G^B6jnqApufL{ZQmSzF zk`g(#0Z_+V;)<1Rf9mvhV5qZ7f{;IkhqyRTO$PMH)Om%Y)DeG^uiQSm*c=z(f$I-@j_DtAE;z!b<*&dI zz*D5ULkG1QJfEa^pR$;=-@{G9rwb-pzS>n7)dL9kf5GHbMfPAYF6RhegQ_8P=)uZ$ zkL&q9i89uPsf9Co#bcmq;d1UkDz!^EBs5bA_y4Y$0S67l454h%Vv5ST?@FBbIpe^d znC^m|5)>Fskh99fnF1h*-xQVhW8;glJJj)w@SQS|lC+@i`WL}1X>Vy5i#A^f;JGux$;LT8A@%JZTgW^tP87=O}-T(xhIzFXdjbp)% zfbg+;T}R!l@N$0@7=HZ4aL8iYQh6S+S`LYuOlW$2n-t`=>qF&bambaya~m}SkhXFi zFE<_(dj3fPY!uirf~D`A>Jey+mt#RJ%U_w4`JD(IZLaM8e93*S2oz&r0m$F)ftA>G z304CsTHA?sPo(%n_A-#f$?8-kKM@`zp_y#l1@rYVd8MQpIXf0_*UEhF#TG*}b@A3qz!75WEaLdWhISyJ5{``M0UizXz53knc(6)Xce+%}D~{BKY^{HBey3>V8K7vUluia+7t z<-12VM&rEqHs{v)1zlk@DG9Ua|B&mxnh}Rl<(^4<7dp zZF@?MM12}nYYM#L%x?A^BSk;`d_AC6rnl*m+83}#C$po#>Qv)nwm{IM#Cs|j}MoX|kWgButirQUSY2oL@Sa5l zT=pK#=5rr%Bwp=WM3_O1Mk2%;sH`q~W4@ri4onXOpsreV`aGQ+CNEs`bMw{TE9NiTwoB!wpvRU`jw>x#HE^U#9nSr(EAf%`26W@ZUs{@Bm?;6vWkEKEnr@VIE{dR#7z_v&3&&4 zA>&yGo|8xO&&ust@$V>i>NwikvI8d^1a)$V>%p4kdQ{K(QWv%j#P>aEK7hOGDwj4p(_F zM*>vsNAp{M$_mD)U;mU9&`kQMHD});KuqMszGClFs7Hy@EL;7CfxPR51^?lm0p=+vwrRCRv7usNNj!T-)Y8&o zu{_9{6>ZycF3!F3SaP60r|dy-ZCI}qBlow(o!^}+SNQ5tKF{y<;Fw|DcV@p;2jk(D zgr$&Lg(KUG7cVT@x#lU^+530Qf%i;>W94o7KPwF4ZQ_bZPhEHJ>7PvCzwKiOB@eu{ z4x-?p)c84Kh3CB{Dqb*s#04;JY*#cY>O-jpoO5j}_&|!FM>C=eJyi0cnBnV0yX&H^ z{%bF&scTa_(MC7L@mrQma*?BQ*F)1I`OAxz(kO5a2mk^C)?9VxvQqMMKyUEYBqpPJ zlX)v|InX%`{=q7_kD}65k(Z$si&jBzq*P%6Bbgb*tWgb^R9?9(Yl>i|9{oxRRr^94l+6EpE;nuoqbevUte1*kXrQ1#z&`h07lsBEWc{%NiDcRd1KgxY zhA;U7uw7hYV?ZfqF4HaG3ouXyz|UumeYooq%Y=jXNGaO+(BJECo9!6y8?%p}%TAT+ z!~oxT(S0_mZp$&R!xB@3+`-)W_|jteu)R)o_78$ty2Id{-`RnHS3;&Jv4V<@Rt+>t zi{?(wtY|S&Vwh5oCkBcxKw7#`>SL<(dKMHm>d%0TH2`ERdP^KwVf;|kxA1`NBx|pi#Yt86luMDrJg-NKiGZ~0L6U#=zS?|cnxfu>YulLv}-8jE1n!5II@%ZpRf0S%Lg$z`nq;$FhhgdUQ1{iSHE9k1IeSQ8IIpwTu2HDIAS+XTA%qB8`KIa!|y+f_Zx*} zxJ`ukRk{XsbA#mU8V?x$Vs)Wwrx+Dk(#Hvt{zDjkk6P(V9#@R=_6AdVxdR2HHE5)s zYw{G?QR0>L;9ZxSB#akZav(=?MWMZY2j|5f+>xXuzru-g zp7$J^=OV}aeFa86INV0DAWoWelDN1JA*(n~+lD{BH@T2Be>2LT4-Qb4SO65=CFO+| zgEQmeAmRv?@foVm>7smrb-}YQ>W;s`e^$Q3Qj?Ba`LyaXmpvlul#u;OPU=km*5T46 zWq@H#d$d6rK+siHTQ6%ofHFv!V)E|SV3tQaT%fMU%rX*^_Oibz!zDnr27rg{pgkKD*6M!*lRdOlBgaWh4{LszC7AiX)rkT$Kn;CaRa zOjn8T`vK#^`SYtL0h4Xq3YY6>C{{ej(y5uc17`Dmwe5x9D!uQKJ11DbzFIy-60UPQ zee&n$99yS5EG*ctZihEFAAdEB>iB1wB0Kp@x~XA(#hTtsc+6d>+QG|l5_pI@Z`M!u z&060$CkY`#3xO#}T3dE-H)&~gfTw}?zNDzDsgot!TxA@1Zgo-b-!9%u658Kd;Fra7 ziQl|QeU=<(456c=7ns50$oT>_15|Hw?Z=qzb0axQAnC%0JMn}VY+-~JlOVNXi>Zeo z?I-$jr{An>1)P_Q>%)Me{rPRj6n#n0E}F?M%FpB9qlC1CAIThB4P%QUqc|pih?@Mf zDjL;9L_}s)9PI2#09ODr{lZ8Q^WA&**j@}ZZ)kXmTHpOpMcOC@o%S#+ocN9T`1c8O%cf0EQD({w+sLi;Tv2;{nSFd-ra3W63(`+oK1uh@`Ov+;3LTWeamFzLB;HPCCx0LGcCRRgc)ywOm+ z*>bzUEJS89`q9Pr6za*_^oH+MyV2uo- z5Fp>0YZI}zFBWIy;o?&l9X{tx?<7cm?MLJMbWWW*Q{((x)+%_&!bqGM=8;MiD&2P; zqq^9Re9x0pPl}#eZx94bAPl+&BvtF=8J?ze)0|@yP(PXTd^Eb*$WEG(UqftITl@G4 ztBa_4RYy1siG#Z9VN#%6b<=GoCUHy4y#BUzE>iWI=TUgH`HN)+yo^gFiV|-TlJ8Fw z7#6^qk9*ZBK|JFKiU;Rzu7yA*G(q1w4-^V#Z19jR^rT`PZZCR$`}Tl$y&A#L#H6>> zK~$8<_b0K%?AF9gddF5Gn%3d^_`cSv;r3c$$74l5KVGnilhJ0SS6~M1)14f*si1r9 z5CnX%L|gM662Ni%G)E)L65Zd=-F)k|#nP~88mEqgUqg^Y>c{CX+7$(6_&8nkJz{Xh zz`Iet(MV!O4%s99bbR^(G%? zYCtM~M^g)ifC#FVR)Boa=TNJF?rfbl6zO5H;o)>r$zym(t9vdh@?tsW1-Nx3Y zpFc%$wvRH`9y_AlstzpB*=4KTYECN!guI}D>@Y{>&1`_BM2nKx}@KB-o1ZLS- zh*bw4!FzSv560u53t`}P>(`N${l(OH7V~l5f%{-LZ?*GaMRS<%?vqBh%{~3t-`@{! zcqrP#oAFQ%4& zYCr!dE;NOZ5pr9noqLfu`oqzXLfuNy&&HJzp$GoK;ATX+ik!SVcAKM@nis99Z{0!x zrYk_0UAthmGv7L3x4I+?U5q4IM|#$!bWoY~q9<_2mr4bOhOTu!f2yERy=a{z5lU{> zZ_pX;~{kUCR z?>fhr&r9Jq*OgLHlC$-gJI6SVdU2MFU{pm zq`i9c<{GSLE}Gr+WiT(Yd)ZngiKg2LTjkm55&qw{vUC1dY&ybD4)>N6j+YhY9peCn zBYpi%)P$ZZREYh@vOx_?n--Z}{uahiHVLt3l??aTipV?WoyEO=3DqM&2w#8r&`je_ zo&pk$&PbstFZ@E{N^o#6aN>10;8g$o`9f4ojOdV9P@NQxRKS&Y++|N4v^O=FI+~g? zf(8#y=BnfFrPiR35YV~TSmT8s}3(19X5a2=;MzyIZQ9)V`1skWv?7fl~75a?zW7M!!4o;(oVpkoAH!YqCl?njy7?o{0TF4g!T zQ344_iWV0TN|m5;KI;W+(9zSUt%>Y%xR?qHOUjkbT#%rmVvOyuQ6!HY6Re+c!i8S~ z2j_s@SH7V&dQZFgY*W_UuJ)Q%Z3Qm!G$@K!>usL#IARlB-H>=^A=?W)Si~abbHp3aa5D8}%ufdBQAx>necgf% z%}x!b-HEr;8n|K1RlE--Ly5}9A_iV8SpGa1;Bh46ik>)Uw=n5f3cVT#7jyyeVsb35 z$NTu9D4>%nq}|3FG_@p@my=@gZDWI5ejOJV0-Ex^HdJG4CY+be)CZn+ngh^}lh36* z;(m+2wQl6Y5fvh+uT;C>qD%p^u9?J1P^6UcuY%LISPy)z!?rfiI$ZasP=Kwrm^@$k z4(PUV@-@FdOP|6c;KO4Vt^G;PGf>oYjf-rSXL_xDQQ17oSMa-a7FL_^1y>EDGnzg zR^oUiYyt{+C`#tfeH!9djPQ(=zmq&`Q zfKQfG-3DHt1~}RVNu@J8`1}e~1M^tVibe!K)r(Cs8{E{{d|k{=v9Yyd!hM0r0r{{7 zmPJ89LFmz=M_^@20;I{e^zx&wz?RI)%1Q{LLBySBNphc^1C!`LMRQUP3#bk;arpp~Cmhr9PuO5n zJbz!jItT}WO65GpPY)uR8B5zU!blzfO�Cjeiw>0kEW-)SsP|;?NN|MNixb0&7(M z8Q22X&_A#Dt%~fw+@m^vqPXVx9dsdp`f{=T15&03iyll=_!z2>L@NQ@B_oB)%+78E z4X>dEpp8QuK|)MS8+4`GZ+A&_eR@m~thzuFB@^A$czU`QO@sk@MYu`8^BwX=#bij; zpV8^|#>qItfI2r+4XTIRAG3?;*zj?mI3*by&R=&?F-SJ}J=CvIW=2LWkwFZwvtv#M z+|I5BSOtQBDrgUC`s<*9R15SAF;?DstpaSl0C_KTC8J0v#R7kqjV(KH6~5dZ&{DNz z1D>M#P%&T2`Zx)<6o0g<%(h0#3B=V3ci^@iRN=Lu@h#$5Z_c%II1xuE%Va{ zcns@^9*zNTZ;S+rpC5N*3eqI#;R^)-t59qM1>#f9?!9{H1#sG}uR7gfK0aAC=9H#!dB$GN^?uxtBV{T=6TlFXlsioq!0%v%BQBxBj zXpG864@s%S3j{~z#2C|{Va|5 z#rw*Tp^ucT+<2g~KvyetJ>_gKNN_DpE7Ey;fgD!a`bC%xRUpti|Ed^d<2=_BN98-K z$w$DhPVT%eBpJyfl-z%Suc#;o5DS3Bk&@*@CmTUgz!xB4f(eSE$-u66UBZM;Ku1DM zy{7}bOiRm8%q%6J-N76rW0V<)qod_Tbt!$7AX6l}?T;y@V@(ID(qa>?Jqew#SGIN`m-mv;0PJO#+zm+2DobgrBiZKrwb`)Y_Rn78PN#Mv^Sf>OYUMlYI zo@Uxo0!`k}I@xYu`?k8)4Au2ulU(-{yeIGu7l+7Wj$$4c7dH=dhLD0r5zt8hxq~we z4tfjRCQj5mI$21fs>YH^USH2XgQf>4qSnf&5qUe-qUOC1ERVNT_){_%0$jO#G~Zr} zluVf+KQbMU$mt8PEj?LIlp-G)-yUx3P<&-^GBI?VKdhjfE=;<&R_xF>GskyoSuk7P zu9@0AXTzzeFwkF9y5q#D0GG{lYDoG#uU=8Huv@ zn)_?Yd$nHh*hD%)dKQ$_UI?xyZj_~ns@NM#9bq*dpbJTK02|$taZ^4YV2)=Yceex3 zd_FW#;Dl)#YC!CP0Rgq38~3Z3c(mHtPZ<`Z#UHfW69&d(@MPgwI-H97;hwj@hBxq@@6d4A$|ALF<(s5k`G)W%wwx@`SngHmQcVhM>*_2)vWqq&Mb5j>~D< z_+wbz7UMulU?%CjN9xfp3rP*gm7GwtSPImw2+bC2x*e6w>bB*0;c7IkG58?{K8;|PDMcpVgwV9JVsIxIr}f;~VNg~6b8aY zr~S`yeii4```=@L)eKNQKr!jJI{7OO!3OxB^HI)y)&ObzPj4PaYnP!5&>0Mqtn&T$ zRU)QbYXOzJeH&dmRsK(b@v=Gd+*O1+p>WP!|C*%pO7`Cewfw6jon$zSU-6IbsMJ)G z?XPG3XuvxQ4Dz6}`o5d|;)Sex=>j{q5d6z%dZC`2d#?YS0~17rUY(RgeLTzTCZwZ7 z?djzOYZ!-}wE#MRR5*eGBuT3ydWp9V)Pewz6m0qu9I3DABA^kSj>=OU;yF{RGExodme$%6{W+po`C@w1-g&%G8nRnOCT6~U~@|MM`~yfPiM zYd?Gg3abk0>RBrzMW2g`IF2G)S{_M7-G2xUjoURjQnK)*j~Yz zCn<%6g)!zS;3OCN50VV;9?b@6HwF}Q%|;_DZ0}oTq^HZ7o105V3&+b> zuF8YnY#0xAh4VnV{sxwh#1E&tk>O^0|)6Wn*ONg*4gWw2^#asiD}t%VP1 z!#&!;=%fP59-N@sH@~^5Qu11)=pXbO@N$EZ8KpQqTw#6D03f_hJ!xade zd1D0j+YsXqN;^nZRdopNzAYqiKIv8{xt~70A`^_~!ob4%eDQLLW<*q5`k+*1(OM-d zD{C1#4C0HGzWwgeD;*SziAopXPMDXdmb2`U;z%zkIYKy zKq2S+F~CeuFZb|abK2688w~|r*t&q0%g2E%g4U4oxMn>CHMM0P{3c81s`fAqfi5fV zDAq%D0p)1n*q)kfip9aEAflO(l-ICb@K%ymqTqaolxki~UO4RE@c85pJ>S^1qvc~n zU!bf%u(q)=S{*Hk;&Uo=-HM)wtO~>OAqazqWbN%wLl%M7+Sm6fC51NDVeW%Ug<}Ee ztk=O+6#ud7jm$y0;iA*h}Rz1q)$ z^cJ35nIqw!uRs653jL(>Bpwa~z4Dzmw}oixws?1TcQ?7kDgHSUWk}q6sdTu|lqB`@ zzb>i`hYGHB?+ye9K{`H2TB4jG$v{hxRys%s4rXT`9yHWV1}~;+=_hro>8T+I4Xa~f z84~xIclf!wI%oY3>+&tI_u?|$foN)N)OWVGElOKZ&L`s6JzfF%0vsO3S_h8}aK1il zNHd(^Ihy>>SH!^cBgas~Lka8a$8DHedh7T+zsK?WBAy&Wh5ww5|J=!qYgfR7$q+Uf z5hD6)QO?Iw`%1pL$z5I)g7C=Ws)pc#t`6Ar4GlFfGW-wI25xg{Y34s;yqqse2on6B z3kCU!QYQkn>VKcMrP3GX=JWr<_tDgD@nT%P`oB+`D0P@DP{01OlJuC4&^N3mOC6^- zi?SE1b}?;I#kp5Q{Um9V6&OANTmH7Juo(0egUZ}$u-8j#ADq_;ZryfOY-UhNmSDf4Ryxd?i=-*f6Aac>+kVcID6c`SPHF zJ4QuSRZ&yZZ7`msP+s(dq&Q$-LRd6!|GH!J$I-cbZYWg!Jnpa$#DNnsvNS5V7ZOSD zb!>qn3qD5X%)9&<1?*9RO`gyYI2H@A+BAXVDJ<^AkKve0R>&wQY@D2tC&f8AGTm-x zqWNbE;WM0%Zd`k*&N0v7KppmahEu}X<%bddQt6F?qN2~)*-Y~?N?=TzyfWEK`Z+5U zGv`riljNxDd-GPgoGF?MEJ9{4y{PlSh~l!+FT6-<^x@5*5ljl){1E#q{#56&)G*T| z7TCX2Q&TbQa*?pm&mG%tgu5Lx%@lzy*s;|+I+`gKh@YOAC1p{}*yuh-1Yx zIO^v3T)17%_7v_jD39H1uoNe*qPf1P|0cXvWyPh3Q zECC@zK~65L&}=LN=>FuJZ0@c+?w}cZxXjLcmkm*96$1JC3xeS?6k-BPqVt$qj~}D+ zxbs^PlaoKacsU?Z2ur9o@!gBoyCvRa)uVZ-Jw~6Nq{}Wtt0P4+v$L~#t{7S$Z6R~u zK4RnGpx9;w*;A#;wL-&T_WHZGZ+{%Ch=_QBQ}%K|Wv716>hYsT;w~ZEXJrT|k8T`m zYB>B~%xV@uOLpQ?tsNU1D+R~#?feLr$;iu-3%@19pP5s48Od=*;CR(QO-sMM=}5E6 zuK$?<+3qy632%oyKr4-*6wFOiF8Emg3ZHzPeirIVv9Yl+CYwV4h^#jwc-(2EKHL>! zJ$}du0tdbA-+86G`-z6ql-ECk&*M8XaP#P?;IK(LUS79+1VW;?`ShZq_XBqi|CNB? zS*X^S}t zNwwh!ak7^(Qd50<6#4&(E6E^|Lg(@DGL^AVlQzkTi6Q;xE2i+)S^v(=pj-B@xCe_k zu#13>*uQe@c_=;)gn#aYI_yE(moHEJ5sR#E{4>%1J4+U4e{OG={%-;T(*|x+Q@s$CRii%kk! zR3lx;Xd0WQsjZkO$|cuwzl0$(8Rz>(XVc#2oPW-L=MN8$-^0xB{jGPc^<8Vdp9x(b zxN?YT`R{XGy?4>%c#FCFpj`sTFZ(u-tc64znkP_)L}{E|6sWIvnt8WR`^^GP%}dp^ z6G1Z%4tl)u8zXfPHvH8{zGeJu@1i!RB%NzRvmmS{&3Yfni2n6bm0A56&2BD*;KE+N zd2>Bwk09yc!yON5hk&FaM;2Ra$0#TqIDA+~ z;%sAcC9Y3;+O{uomcaRy@{T+5FeVa;@9xhtnu_XoEX;5lu{Sgyc#T=|+Rvp{KwJ_oT5nq* zqqBDhPc1VkXGIhZ4 zQxcoKl#v`4w|eHxnX&BFzH^>2tF!vvUov(tb5Ch{+KBXy&*wkdS zC&!kHPTrsUN?)x>d?Co|dHBLdhveKyPW5c>5z8+Y#g_D|KEodJy+=Pn|J*^vRDC`Z zu{!wi7`@UM(zrmM@_f>b8}|>qp4Z!x=TnFSzVQI7WQ$e4st>gkwaghQxUQtE+?aHO zoexf290(}4Y z5aoZ8T4;Xs-vrRqFR|>w?ys`!ODqcuYv;&)BTjaE$kC?K){LO6sI2UXRLgwYw%m-% z<&Ex z)`d#9Xii4PpS(AO1W63&bowvsZxRFlA##GeC0Y>SjC>ma(ac40 zNYJ%9)c=n-kg9Y4<8rg!-rk5M_95d>yFOA0Qp?H(9 zFwnbCP5zpX3`pz#rmGLG7JT|x?SgTJMjUxB zLQ?Ka-8`qa&X^HWP2*GsO!1|DHwWdF`WzeS9aDqmCFejpTx|PXi2h!3OrVkn$^{KT z<>X_uDqdE8V%<5tcfuTP-c4<<`gKNQ=UX~H>aj)bZEoEzzbx#}nME!xIKO;@%e7|o zJTp56E>yiid5@`Zl|d!d=d*JeIzQyCTUl8lzO1))ua5_6a*J@4{3~z~uk(LWZUj&G zHGX%=G(q$>&wDclM|a`!B_G@+2Te63K*~_al?2+W^`K(dxyg&y5)p}7+;xn?Y)8^> zGh1s-9 zf6I5D00ykJ`mLt@PD^cYep3A%CU2+{Y$l zceh8)4->U_-ps?w=<|H4TFiXJp(-jWOE3r)OCkQ)P_vk=4v# zTBRPm{76#P+5k4FIqH-V5fS$QV`LdCFBmLWr`1*Z6~fBzVT#ZC=nGw=vG4sFnwvMx z3Vn&z_JyOpmdqC_*?$@i{JDS}t|`PX2ABj^{u2fOw7 zG0>EgQd2kkNJ`2(&pCOnDzZKvMRlvW5ED2NMkfChGO>o1Yt;9)&q|BQ8AurHx|-g) zgNpW^7DP(%to_cOs*aJm4u#iwJe#z4?s$ZT(-hKja9o_znV#omiW$y=EPwgoD*9Om z@g$3Jo*Q(C6g)OLAhe2b>MFTh+LeZ_G6vqJio?#Ockd{}8zigs2oJ-dGRvQX;YVz& zrjNFsvNgX(I$9h(+R-1Wqob+Jd<4ilMNo=FD?`Gyx+*Nx2`sVg6^q;fShlpV2M?7r zk`{)4Zjt#xE+k>Za$?Z*GSGYS0g9hmhJ96g2WTKPW5rKkc&mMt{p1|+<| zXAztypAn!@)jiyI@1jKTUmbw%d&^sj>OUNoDH&tRjFk_obrXNk<0YcYVg-P$@2{p& zDxK$7RxiHNbGEp+_y}8b>+Ka}%D^%Geb^fK3v}a!E?D|DUF%GZGV(fW7%0$d7$(YIUFm{%nC6! zdY3mbEh>46(?fHc-i9r&g~Ax1X>Bm|9BlZx4fv_s>UCFT55Q!^Wv$)`4C%*U5>C2w zFC_eO+L(0X4X-n`}i3y3blpn?5t~Y>uzhZ$; z);NFyW*h+4cAl12A@QHVhGG+}C+ZJ$pYHLVx2!WZZ%;k=68mherXM{|-p=y3FEi5~ z-Hu+UkrZKrk@5`j5wQvZk6)l*FrSHIOTVW}f$yE#zHj8IWK4USks&Teu4r?ax1>1& zP}r9ATI!1{3`X5mesndzxvz!9E@hr_4~a;s-)QlEZK>X0CLN-kzs9OlaUVW>Xp;0q z&*7n&CQ1qRYVwon$yC_=p|z(cCnhqA_$)a)!3lREI>J#BC{t@Nb z{!A=jNUy?euS+ZU&H(>7vLn3JtJQ;3b!L9&lPOMi_&ft~l``)-lt;E$3=#7ZTzFD4 zN6q_sLV_{b|G@%FN=0}IWyCmn7Z(>|RuH?QOUa&VuyL)@9IbQrz_t>Sy_y}wHM1Fs zQ$JVy4K7&-FQL%bI6IUXb>#QCsDUcZHhuN@w4$zK>4WNpN)(zc1nuLML}v+LvLv$Q z-iKQ*h&41URa-%sqd~4`ffatl@JCf+b|bZ`cJ}zb-tzcbx3>=U$Cfcp4reKzMJo0= z4@muG4b{Lu8;2}Giq5nkt*4@miI%z5q0izLU4HINok>wOKw{CxAZm*0UN?;X-ydnRa_#skS5gXc5k^`NFGbgDO vil0n!fXukrO@qtL-_$?;la0c#pZCe^cdgHF<*;VqpHrA?Ojom4{^0vh0FJvj literal 0 HcmV?d00001 diff --git a/docs/stable/_images/Hardshrink.png b/docs/stable/_images/Hardshrink.png new file mode 100644 index 0000000000000000000000000000000000000000..8d11945ae9facf2f0933ac777de5bdd8fc7d1c2f GIT binary patch literal 29659 zcmd?RgOt&wZc&;N72l?m2AOYp*reoMV1#tXGdD#qe>+aZo4}zW6=i$0!u08w!Onbn*ne z!`|FC2LC#4eMem8B)ptX>UhD=r!4L%TBA?|n#eyGUj^d~;Y}_Z5e1tk=6W{vnpV0f zQ%xHS6LT9AL#<18x>nYP=4LmUSeaNEE*aR^Snx12|NDC;b1Qx3GkPRsDAXmCxbSTm zbogBVQx%!msSNgvaw3F;r?5zbh)GU_TD!QS-$&7`lGc@QTTiUI6gm}-S?D#cIyGue z@HTNbT@aTh!V-qp7#p4ur<)hNJ`m#yQNxR*9P4>$+Uy~vy4B!3pWx*o^I zE-x=X#>&d7kY%FiyABhE#c9jJ>0od3-o1N5>3R)I`#X!rQ1yB@BOPY0AIHKf?d&A6 z@3X}AqY_jb<*u!*T^b9Lf{$TRQc|8geY(;#yWjK2k00c-v2k&u!OCXOLPAJ-G7Q~0 z2c23cHrqQDoj1Dm(bHc-e#S~rpr1bdl#$W4z^>39S9~z@be=V=fmFbWYjSVyxK7?xF}%}k#5sge9k_%7cY*_Hf44X5)u+}Sq!0E z_SWTRv&@Tr>7hnudpi=9UsCcrd{j)=JBf$)V{dcN{~VoUSHkMgy9A|GRoHZr;U!*# zG()4K?jJv1;@g=^Pf=hfQ;Aj1n|AZG}l;IoHKHdoa&Gv10s4{ptAmdx0-# z#QcWfrruedXc%5x^nXd^LQ2A<@^yJjO5oS=LWlL|FjAP9m=>)PoWt1JYMHk#p;Fk>)I-1fl@@-GWDCAmQ^~584UPbAMCo3!4m1Rm{w>aPn zH_T~wxeB?CpYig{yYuF*$50u@osA22Rj}<@Z8>&l6o<{yYd%&SCFM(sn zvGh4sD4Q;hlatfZhz~O+N-5Lm>f9#WpXH@;f*Ad#$k3bmrzInJJnRRZNqFt%gdRUW z`+?KkOtn)hRpaN!+c^t)vr8Mj=E!A~6+B)2nWW0QJ4$tM#%vU>p|bd3&wQ>wpM=V3 z2?O=fesQ2|Jy~yUs@d!H>mT9V)^9>Xy^u>TEiFZl`f?`B)`xSSODtUecGz-TRde5ruY5TsoxR}kb<#YVTC-q_%9Mm|RWU}r1 zZWB2rsDQ7@*=zIWLeyQPQP<#j?Awp_^3`2_eHm!VH|yv5l92Flx-&WIEGg;ND_5_o zX7+nWUxWML-EESkXtyz^_(e8eu9n81PfK-UwMh`Z{o9kc4)kfo;KYN?jSaaNyv+U;DcDl2cm%!^N5c-Ji_ zBXbP~>HcJ6#5*4nW~I)Zi7;~_RPEYyYhoQ7G#T|sui=C4YIuL1?Gq>l(%Q=Mh>p4}wiE?awTs3n~KtLepV1Fl3Fzg}i+DtpH zm_KD=>cZCYsOk35*Gb0f*Go&D9kZPJdD)~tFH=sPTk95iBF&pLhM2EkPr^V$(M{oQ z*nz(TU#!0!Ac?nEA`5Z0> zd-mZ1&g4+3p!CPt{z}m-|Cy>S=Cm`P%}%+xS$xnxFB<&?IT+JMoz_k4D>;{4VmcXjkIR^<>6Z z+CI8^_3F~*X7>ICGBU+cZfh$m4HieO#cVJ-P843#JSyQLAX0cFCbIQ{f zd33C6_g3umbagM^x<$mOl<{?ab^jf!cBRH>rEgcJF$uDY5)v4oFi$Qvllb@*@b?~FT?!^W88v$RG0g^>%OU<|?Qs&ePX~Ax3(k2*Yb^BT zvRj?`1#@Nzc9Ku3`F09LPTU;k1FxxEcM11)Hj~$_+dAj72TwCIGbgC#6Qfe>=9OU9 z_E|Uaci7J46;rqyP_=~^2rn3&qn)w3@U^s=jF6k2*iG1XY&N;rv)iOnUftQ_)Hzy> zsh^)nSu}(qyuU9kSyZRJhEWO|CnF=X>m)dJs`SUJbE%f2qB{EeF}iiZ=)Lt$iZ=gB z7OfvAowkNwDr6eT`qr>e&vqt@0U+Wq>2kxSM5B;8?S^sek(!!Wx>~;7{F{)Fm(kJD z@dE0qlAI2Xj@nyG!wT7E7hyVmU^o7mZLa>p(^IMQTit%rd=_8~jIO~!ID?Ks&fuBm z{E-o@u<`a7v-M^usBnU7YisdIlMH{Z=d;7-;gtaYz`(P}vhHY-=6AFkF7x21OXgcQ zNL4UW(I&od;q}j-k3t(Jcrx?9@of#cE96*Q9&lXb=R4S4b3wLaSo(TMfdoC_62qrKu-qTBWnr`{B?$QVdu0(@Hqa#6)T~uT?-L2mc-7ZG z#G~XRO=Hl8b(u3BtjzLsMF%D1PszX3DkZ47zxxZOwTO<+m8ARyp5cv`r!V5DtE(e$ zhlSjil-=lU!{TgOu_G*Tm_x&vooaVs2PTBOvcI>}*%h0g&pUt~#pl~yDxd01rmFP2 zXuRhg`KA?WdcB{ z!vSKH-_}==`Fs)wrwn#)Z(j<|XxD@NE!fQoiHY@8tLo}-pu)?J>+-ujbrxHJleo{ujzjj74lJC{#pexm&>j6Iir9(6Yn-O~v5xTFS!AQrIS60XABj?QAl$e9~_Y)u29tmjr=b1%-vIR%7=HmMbYZ z2F*JaVx^+|Zt6E`z&5IoZ^wuLCy6kQ_9<7@`VZ{50RaID83u&N3_)-O~ zLKPJip_ncT!D{%P1=}+03+GkyY*Gi_Mlxw~Xw{3p?)GA(VAZ3E)#Dh9ozR@<#J2c! zM--RYzTnoivzu!^*+u^O)WmN1kDPlto0_D~VSV)XC)nNH-HNux(xa&~`4Q?%nog+A zp=G3Fom%$%y*gEi{*Dz!%{fjRlV={<`Jc#cph~_l4{9A=c%Jo`4H>QThZnD(=)Sq4 zasR;J;rt+_ndq_m59r&>B4-p(GcQUtgwW`mI<?lko%7dFmkL|o+HY9IXv0?wwj3(Ks7_0;dvfdL@4+~{ECp@-Aj>qoY~~)nrgQ$~ zmoS?8>=^fVs%_c51@i#a*xr#Q0$%8TERX$g1pkbMae@hvNa+XX+|cEAjL>+UjFr`r z{7t*^b!og5ufZ+fX=k!6#e75uj5RtYhI_UC^nY%IU#+M#kyF~FRKd0c z&yDXoFSZ=Dn!C*Ic^M3U49+`2?8b9hJg}}&XPf>%!+Rk>!tm+USm#Jj^VSs%RJI*n zW$&hqh+B3Ip@hx#U9aeK#6DOxH8sG*j5tNC|J?S&W3b-}Ya6W~EwbP(lBjzbcDw9B zInS{B@+pS>nO8J!JY^&oV21q}ADAoaHE@aveNXYyi+R27`#k4p2-957F`KfJ5TeQW zX?vuqNZiPM^lDN-s&o@u5(9OjGNVk{-rl1)B)^u?+T~Dixn~`?&*xJUA7~N1Mg36p zm>Ovh;i#jSP#{-bCn1_X=fuyJM4j)&8m+CZL9Q3%HU6IOa=pl1!tPC5uAI30HSoMj zYZ>l?N?S1%W!IOPA`cb^jfPKtlmvPkRP%%60`Gq=x1BaDeUg4@E|18B+lCy)!oYOg zTC}3HDJahAqHnsJ?em*W|168c@!6B*D5jz>OY!JU8UJY*+J}cW71n26?0QrcvGV1^VX+9ffz$MUd98;6+fPp5zq3&Bp3s#Sq=<;s<|nPsIg5uH>8ja%gHW}dYFnat@53~o9lf$Dx5rBW|1 z(+lNaGG{1E#^%Y`Fio>FnBP;zXX3>~18V_%i=EwPh06h3IdFCzJw576CEUe|D$+zU zGBQi?*mM6G{@80cl0@$`dn)gC1<0O6k+v5ys$C?+We%zfrulB!typ4SuzaGdtSkl2 z5yALSXi2#&F7;#@$Jw>chy?3?j*A0DCNnIW`#-ZdOE)vq{hw{9Fh{gdrv4yz0gRJ!yI99{Qbog70K9c-b|cz zaB!gJ;2`$!@PJJ;m1%Qz^{$Q0t(Dc)fT$>Q)g-wnkoAD@K3iL}nN8bXyME&a4&Zx^ zZZ5~D3z<=%y8v3k7m-f*{YGtFo#ob&rm^eV&%;qlZ+?LM9aEDiv-9^wmm7HorP$ct zQ|NXWP#?wI$fVS$ynN!)o}F3Wej5;gAwQWGB=nH>+}X2dQ$WX>O#_;&HCW^ftR*kdWxFeyV+s+)f)^4u>h^SGu}aA3u4L*gf@0YQ0~o7-rke z*_KB(MVJG$Gkx@~3?dFCGi99}%hNXcYc3#bURyE_ErEd$|$eue`-s|#9fQ6jj-&=ImZuu;3#F?n3qbRDUN6T!I>HhCFsKOl7|pv}@FlyJF(%%!8}kgxlYq((t}ebG4|LAB zx5_pcPvqz>b7zyHmL4shd`QtlNNENXuOrt6<11a@mF2KWtba>#{kybZ`z7ML)~Y!7 z>*9wdr#(3r7raX&lyN)lznFwSt82uxJKhKfrk2-EO4C?~y{nKqN{aeutYFPR*^95q zSkPvwaB=Byq^EqNufc0*?#5p=|KVcyt|A)4p1D_-ai4OL;+5?=v#+wtahoRL__#t5 zZ*b3^S`aid zdl_e*PrM)+DP&{EfU*URee2gROS^X$|E+|vaVA9%1)h;`d>(f4z+x`+F5y6t2@b&e(+bFrk1}xN?H$b*cySv7&Qty-}xSS3quAJ$AM1=M9 z>C^vS9JeEfm}JOsV+C)DgE`ZcQ=vV@{Brzu5qG@>^_xV^5fsNzSKZV!`JYj&f1mzq z*MBs}x%sqC_>H?C{jmjH#(4PnQ;zjruqlkXsUIU&Z~}alY7f<~U(42*2{iVosxVN6 zy_r9rJB||0#Ya6Uk~wz>j!51CG7FNsI#NAt&2q0K=vY$4XXT!9gCSx3LT!w%=K`-> zW?^}g9{=~IqkjlG_V`I|7V)^P+$Sq7LJ*``&L`D6(cai}ozWXyxH%Yy3o^Wc+xooP zHZ?v4g3dTqDPH`F$*2B_pfmQ1MXuIoz08(~dH+Vw>n1I$_bscU=et6~v14wRS-bOU zjNj!C`@Gfaz>(yM^-*$(w)gK3v?K6?F5j9(Yx?vBMlSy|u2N1L*|V(66B{hIn%7;F z8Y*Rh;=-K0NKc>qqZsaFZ<`ZJ=)kWu)Lu&dI66$mjO^Phgf|38e!0k8I@bQG`g#G1 z)BVLHw*8M7PEn5TuT+L4Fg;mF@lKvP^{liceOvyoVv(p5a}7tJ=MaRw!0~yFfUsr& zVJ;GGMy4(IIc!epV{oGM5!%x6R^{st>!`S)onvt*)G=+P;l%v>vcWo4K&#k;#oVWa zIgg5mzI=D-Uz9*UC|voqrcc%|d~V3>Ez9TzX~BUIfdrAOk^bE*G>!rtk;rXe*#Gk} zT{y8|^QOu^iI7!8gHux2D z`6EN4rK}M>BRib>7_*)Xb%^)z-!JfuXo!sQAkDL&$kGdc_A(5^deOT)f3uQTWVs5C zp5glA5$L+lS5~YVlhXdG#N_l8J_#Dr%wkK8{~)p4WL};uyU1pGO3t1##FD&M-uU+Y za@z+T6G1R*bK)H-3LNzH(Y&R|RPd*S+2q^v^fO8-LA>%k+f6`7th0>@eL-hp?b7dI z{~i+9(*JwLezld<^l1ABw8VSirvRy#j=NtS&7;YCm+Y?8vxe?7z(s1{l^R`rw!QQJ z;(Tjij=1}E%RltRyPPvlqF87z<0bbI`smTd(0uqxj+`-@3LOJ6ZeUQ5W~xG)nfC+> zLPU?a*WL7A^GjrS&h`{`RE*C9=deW7M%pl4V8O`WUE9ReO9jO3GtW z_LEfWH1mfP^>d*5>4?f!;~>k8J6E0f`dI8`E_m9XlFe5f!~7pg!HH3f`ES8*=LkbK zx#Fgho#KmFC>yiUm&hm;e6fGRo>^l!ozvX1ldZ&si4-R=CBE&8eG&P$V9SM>_(c`E zU5^iXv!>6A8u~Oq7|M}B!`HXwTpI#2-hwzq(!%cD5!F8Y7;p-~3tX=>akFWro3yqZ z@`ej{@<-qCYT2sqV(^(|c!Gl|EbN;QeZMkX0y|Z!iWMc)nKGfkmqk&gg#OmcVx@RU zqR5-0JV;oc+EPFE4?&?Q^I6X};V+yFqL3;Ja6qr^HTC6MUlu9iuA8Foilm}gm4 zNx+k_&$Dgx>XP|{9|7gVk_YA53cCR_T)3o%AQen3*pP<7m;`GG*Jzz9v4J{Gfq{j5 z-Swil^S?Jav_k9l7rdj*j~5&DlZFq4TV;1QY@$KNEFv$%s_%$j91xDdIpi!B?JPwd zEZXz!{K*3BVq;)n7;(l8KoJoU!BS}I{PAz?@s1v{aA6f?>@{TE-ycZ6D6rGJ&ZNG( zoYPDk!i=Jm`0zrr+*7l}4G!?-HWV)h2Ob?ox%}%N%KtrvI&D-*_2$NqklJ-6bXdyD zaO76!8$S!qyxm?#e+pmAY4J-@&KqQB&z(yJ%Ty`RoFhz=BP>xcBsdr|=hkOc{zTQl zE1)h{rfOyb*;`etl9Q2d1-Mr2KlMl+gXHEvfRF(k!!K?h1vQQYfLOr3nQV zTFOrtA$q@fw4=yp7r$+98g16J=bB)I0vVu-L}Lo>Z_)7a^Bb>+Le@b8r~`X&Tx@Jv z)np5P{;^}n09E?+^{EgM5h-+nGJD&=;2P-m2_7`47cXDZu&|uLCZGfhA=maPR0mLh z-UJ8htyU48J9jLo=Ec~UP8bK`5u0$(aC3J%ID0f32s=6PfpPuj{d+^zPVkOXQ&X=o zFbGRZ`q}0J?Eu{ZID`N0uG6el<~9)(mA|()4p2JfkoGt!pb&t-QfiKv6w#S8ZgX=c z*SxhFj^2<_Q}Zi-NuazF^HPA@N>DY@S8ZEnaL=Y71IK5d%bx~|AfG*91(>ehPr#=;*~UKLd~x*Y&3B__@}r9G@jRPW|1x^V1GFpJ0%!XRPEAoMdZVwMzt6Aa zZG1~hJ8NiY*kRH(_r%hY708l`!&xr??I2TgL@}X;z^@jq^1HaayV?|EJ=y5`>q58) zD2X4KWMnieL$Fg8{6|r41M^B+rJ?BKJM9av&wle+9At9B>FVsv77du3pZ5)BRPOk` z)k=P-AD#fIdDhn4=G5;JR^Oh^?erZlNI*h(;t*7w%U1KL+3PJgc%vc^7O%szbJO$DWsJv?Ontwody=)o;cH%eoU%gk4i;2E3YAh(l zK;DVZPos2_YV<~T8+JOZ{RphNlgPk*meL)67n*e$fAF)~c4MwG&vDIxDRjmX$G1K< ztK&x)54K?-(kYS2ul|UnnvW7a)J$eY6$+FCBQ8 z+zp5Fx1%gqePB-=w}jbq%iJ^5wHU)S|Jz3A&NMrPm<9~5F@%V!$KRwir zGM_J-w)XdyC+E6v(dxx8A~YpfcWiRf`Y)QI-*6~;854^Z7gIdFI__mK?SX&Jar7C* z1^dFNbiG!hP~WU21OvAemRalZr88kE-)uyo%ofSLyJ>jKJ3()&VLwEAs=Z;VFRsU* zv;5wMN5FYZEtiIPa|3Tw=eU*O5kms$V+E_>k5C$#u0vCY6Kux+{Ffl5X97z4_J9Ig z5-*R(tpSmP#z0?k_+krUi(?kV0Dd_luHPJ;&aCMQRzx6`GDQJT^vwc(u7%r92Pe1y z`l5X<{=qGw9qHYGukMnVm1ossqVp(_yT{`C&?$(>RjEl-0>&Cp)^YPR#y@Iks}TpW zVkxg6Wu16@fc>F{*F;4_ScH50ea!bTYnahtEpR_Mfv!S z{^v`p%JF?F?aJo@EI(Y^Z)+5-LvArqiX^O$mX^nVA>LRyj(5al=B`oCHWAHWs;`p3 zr^-%D_@jML)~FG>4_ph=Ln-}(J&3=t3HOCZqAE~sFCD}*dVcyXSv+xV^RFg5oE#E6 ztb0U}sv*wr_%N%aGys&tqdzI(`E(5j6tcgDQbAky ziJD<)_~Pl27*%eR+d}89s%Xpp=GdnDNnQTJf zDCDFpb&2#di~N2#Y33nr2(Wlz=^NX_?OTNJVbQ z3~VNF6u)Wp1+k}JUSCotC~!3AbI8V($u|^c^38s3jAHD+AG?jq!OzcsS$tR6nfniU zF-_-&?G217?Yk|fP(p)KLIpQzDZ#OlXm6thlK1dIx54zDGII8(KxAfnFLMynLjBLD z)3`&`y~X3tTCUYUCA~8A{@KMPf0ucUpTDlF9}RT4YwdZAp}Fs{DIYN)RYnRC=@g@; zDAH6NY`;iu{|=YPtvPc}jTXlFBRY`e3441~_Z4itc%Z2Rw+vjD$Rw!^Yex z01#lTYX9&etgfzpln;86By0}5_MNPKnKpEO=hcJ&%BR?s;fD@kcJ2^z-wcVw;zKWK z^nvL=1&SenC9r^*H4gUo?nz0#%+B6a7f>@!_X1A%S~RAo;pdeDtn!KxgLxa-}iJHnb(bBv7 z_wS=nkcxSfd)1b%j;*D&^>STWk3{o@-zVk_nUh~xF{IhM_G6KSUMa*4Df^3s%TvAq z-;2K)W5Aig91TCdBACW7iiB{Y1e|~7TI=V}g2E2}n{5J6iekbyl$SCNxpL}+I|lsS zN~x@SDM~zWreL>rdL??iuRR`qTRdyyLU4osC}a+5EDM2@&94`NI`bQM*y% zLA081GrEeL^DQF&%M+@wQD;a=y`WBxO-w|8{8*AFdGFrIJ9qB}ynCk$6ZsM!--Tz- zo^hdstc!_>8J?VcnUQfTOfWVyloVVGuoQYTE;BKm)=MV?AF*SW{wN464PG`R zC1&#?BO|?heMK}iFI!nzCDiTh?w+}L(HA%Z;0EQ8`1ZK1Ei`yZm-zYn^AZG{y}iBf z+`j!xf&(#G=HozahqMh$K;z$UfP!*Cz^SW;ihu3f<(=y>DVVq$Z%5XC-N>Q1f@fq) zNnrEF^46$Z>&Ca#jn?rhU=d?pdmyUoHBgs#L z4jZDV^0Kl=GBP@ao|qMoAIlPd4rOQm`7@{-L}=qYiu|OMrdC z<$FScu#wSsb_)JAAcg-2P1q(E3SmN>7b$&v{mD`ViMj@4vAz8Kl3GWf5?&{qR&4%- z!7<$ z(9-ju&ni)Ze1P4duVo|o|5dPh;}|4vwu7tsxl}z z85Lh}OTg*)^!f8~2&>(^bLURN0mOG=z`J`GtgEA^w^Al|q_11Io9HN(A_n7}tKF!M zA$Roig5~kg0;Kl+>>c8=NgpIkkNzZUi<2$pt>mqy*7Ox!W2u@orteS z(M&c#$+RbT`}2d{c(6j}>Ti^si@G`rXex5yM15ZR&4uaa4xSk?;q5Z;&98{J{K$?s zCOoVMNrAp1aw$`j4;lIbPbp$VyE~_h6)ogsy-QwVO$wAalq--g$IbbKNW6BP5Bn&Cf+d8;vp>=FLK7VPtRZhd~LtA|8gqKMGm z)?9gZ@~1I}UkRdAWLbnqc~y&qGhDA~YZvcwOaP^AA>zU$sH_Dd)3QGso@uKQo*zpO!y*Q#%kbwMW$1l`*Vi>TzK|qE+=!`+Rbv+UU zyZR!Cq3bS^g*vUIi22e?+=yw0LVOiQUTG)7C-QW|{^98;H*E8hRk(L~m)|ny^8G`k zAj0y`!GSPTtvw;#Uf|dbuqG~wG*hrsw};!5e&`>>vLf60|HT=&jXBQCvZ?LSUi3fM zw12CTAl#X?zh!q{9q$#cWfw;{dsytzm>iOZh-$_>w;B-C0+sPl=+9DD_SrwvJT%)) zkP^;9cKGR%iM?N2liM z-?${u^X?c5vI^D=y(%HwYf!H_*{ zsGI_}JjHKvZxd#rel0(>1V<@`M#`H~312r{W#Zk}%RMZ5e6q$ta%s(nSmqzv@Re0J@<#b* zpsY?a7Z9^Zko<|cuV-^fa`PpllSeZILQb#;{4+x`(-k)znatD+F20nCxceplLjCCYt$0^V3C zYz*!%i0RI`=<)btEb=u5 zQcvVjZtVlj53-3F2S57Sqs%|A{KrD|7cK**P{;&tDa-QSS#b+>`fcdQ}Ert0n;K!%P0Iyq*WAg}|S z47Y`a;c(UhfGv=ogSKzy1XWd^A&N$7W&L4Ph!;EA#rn*7VzN*BkqWnxC>&m7TTI!J z<0AM4spcZ>m==KYP)yD@AYWI|u-Mn%54O;=r>hg+ns3E{Tk|(jA*T;(S-HPEyDH-p zVs{qB^6?1^va~)pzQ2$cQTgM~MN4O*684!s!j?MMo9%zMT==u`_1uck0v^I#kwa9r zJLHF6tGstFFiYy!*?@h65%Lny0W-sdgxd^2((t}m#iHMO8zhYke2_FiY8Obz?UA6N zCi(IT(*iad`Mo``&BMN4ZLEV^!o%~SQ?~2SoC=LeyI%C@nQM|%h5t18S?=1(qrUn1 zCB^UaX+C_zImFn=fYL5t^(88EhDAY7NB|@kI5|0?AL8_@SFdhl&ds2iZ&Pp6#|-v* z6jpg_u#qo~q)4KdJS`$uu0AFkEM{NfcM}&E=Q!9{97J@^K8M*vB;5=tw|8O4Q zNZ)m^+9wqtkYg+4li^-1b9+Hby^=2lO8sJ(Gs{vl!cN0dUA_ z+H-4b+i11zRWK8E;{J095aCo+w8!op76xwWu+}f*Q|ykoS59&9IDR!_n>bu^8E<`q z2a4OHUl9tq5NMQ(BeAN_+f?s}51HG9mr4Br1;#Bh{Jh%GKUWZ)Ykf91H@CW>0iukO zFVo|XiY2&8?VYHZ`E9{}?~~d)gi5T`F_#YxQo+OorNXVaSWwMj`l^ha-1mMy>S%{= zTk057pVHx~!lP&4}uwBr5y*6-Ll`d zQ*&{k&^M2t+6pYmkGw#W)9C`|KLC!4B}P9UTc3Np%QXn`9VANPPjZc1}jx5ANga%(A3;K=uY!>2-3?LK+w zpS6Fk6GeOVswTu%khFB3-8>Eo5xv21F)Lh`>$B}t?XcxE=^A=t3LPdx&<|AtxvGRZ z2q&JvBXvW<;m(k)%DVpe!Gi}FDj&LK_~XU@xFXhXDDXmxPDCfwC5JO!RXUBrK79ch zT@wLSGKBD8D()XJe@^_{9eMmWx=wXHaDnW0`v-7Lt}i!fCG-Tjf3I68!w`Rdyv=Ep zkO5d#TJ%Tg`(VVxOi)(_BZ~-?T5CP0{+y!2M^NyCLq}3 z=g=Sj{d-!CC;<`=0{op(BRVgT%OgSL))2;7^;J5?Bp5PK>@ys=I5<$We}0#x4;xQZ z$}EFUj5z^Vr`+JO==qL+`}S>Vc^Lx*;v+TiBbd#IuaN|irRNad)zQ^e1WJCAfItj_ zTc;`cynwOHu7K_W?%8EV#)b+1>$J2dXoF4xTjDVF^6_!+HW9V5N?Z3WaL6f4NWgpl z{(Xlns1J~A@+m93bJ+E>1E1yLA^-L37vwLpndIZ8mY0{me*G%s;E-pl=)4WKh6jk+ zVJ6|=EA>c$gI3+x=m{M!$Qz5hp&s*M`>y>b>iD~d;w;kNDqFYKrP@PEYu0)S{3nP0 z!|+-S;m&n9!4JMQI*&7@pyj~A_CwA&FLH<>BQ1w>eYveXtE8w%#|7@)?b+n6bbTq` zzav>;KiV%~JiLE@?lCa`)t+YAA)spdRJC_uVZn@(LVx~=$9Rj+-V)QNoavrR`Tp^~58}9+fC_(? zW>T`>&~DOUYv(dUZiqvF`l0(52?e0mB%^~)KKj-1EVF*#y58h<@JfYfw`g~4jm{nD zB1qA#Bc5eb3{HiAhY{Y^FYT5EJ?No7sF$kb&IVIIaQXE{uN@X<$}cRt9|A^m?03Mr_`rl_iN0>M6mUK0lI=#Mbt zAWh!UDvfwXt=b&tPv`A@piH+ULO=pSMZPMbDyyoXs|q?Xkrst5X9$C(7&J$V_U?zW zYFkXylRb%p2r@Tzf2#hbFwdfdXcfSR7e7kM82RLYv ztMQnzMq?zu@#-Sj*PxJ<_;L<-wa3fH_w&VlyoLiWqk$Rk&<5El#xU=;-oRq(9yl@e9{9*W-VA^eccr`)cn7nL-zi@ zzCLIlii5qsT3kl`0Q;8HaX_?9w^&b&#U~e~{Za%QXL3?|d8A^hHO~KjA~m&s`qP6i z%t7`yRr0Wz2?@L707e1o;}Cohh$MQ~76+-I4e!Y}*+!AS5@!#YyTdu>N3T zD)BPIFLr-(_3_!mT z0km10QqcH<4M!KF7bHk<*mZcA%ieRj0wQLuUH^>k%=Mw@XzD(Ta(n>X$D!p=z-FfH z6zWZ6q#tyyYr%%s1zMf&;;GLcARpl3A^?r86^;yQ0G)w|PS(q!HSdxa-Os%xo-mjR zDrz3R63>$3!9Oe%VJNb=B}YA|m+9$oVTbs>od4_?wicjM^kjs+jL~I6!-i)Ch}imK z?)fU{&j?GT?IXfE2AG$y{loV-H%_xYuk1mWe9>iW6bwcsf>@w^=Z;4(RKH7~KNu(0jtnUflD-q6YHQ4EvtCN$V|zhJad`#T-i82F#q z|1ln{QOI^tm-Mz7^x6cQv>g)a-AeAU^&+B(`tH+?;B|lXII>b>7{5niQrw&j7tS|V#B=7e zdQ;HLS}#02zI6*z8)=EVMplf;igSS)0yZc!-n`dxAq~3!3}L{rZ!YvXz;%%LJ+wrN zK+qQT(RpX1V8g$dd12cjS^BCP{GUX7&f?)I3MsMo>xhOQ21 zH@LuM;dVsqecyv@$a3%!udmpN0H^VN^QU*rMUgF4hW{OogJ#ce-Cvd*ru1fh;%B?8Y7`-;@OB`T8{;bqw<$LPWBQ zST=0~$&OGJ=@{b{{yR7R(2EL*Nq)UV1t|()VRhm52&R;Z9c`;R%r3uby}HsTXgyUNZ)TaY&T@QfG27A< zrpu>|J~O?8!y?eM$Vgh{)5TcgtjfRG^gsko{qR1*(#rJrnF;VCO^DFz@UqLtA zE`0rmn`$Z5ivFiV=r@&3?a^5^H4@lge|n>~Ue;h?-0$h8O+{ja61ygSJ48o+_u9oU zZtra^XmRD=hc{Fi52d`GP)~6Dr1g^S+(Dy?!g@NddJDQ$HMIXWQZqbYbCfA>1|iQ- z_U!425KPpej@NI;*yp=je^2-`Hl${%D?@7NmdZSDg(s(c{E~fYwdzLh?)Vd{6Zok9w3}NM!Zwx+>waEBt1h8QyNy0V>Gi5tiFp@ zHHsx+oaRX1G4kjOq>XcDbMcT`BAv@%j@TteiTyF}oSg}iW8l<%DxS}k;lfUTmQC^< z_^j9WVpua>9dmNGe(P3q2R-?lIbUaXU5Aoy9y@mJY%T`upPYCDsXVd9shewWOS*omw3=E^R%nOW^B9@0WYM7NI@3}KzoF`L}o zw}?0&;UA3`esi)Q(qGJ9Y*q8bKvXAL_@-0iFBT~L^xX#Y>&p0^CaA*q{Ei$*uP*di ziwcT9kh;WZn%oH{1~e!IoAG+dcbp$eX!ZU2^HRZ%+xcTjK9BQnwJ$$Cn!1Nx;cT`N z8h^-D1_%fn+e+GIYVUkX?|fu$d;28Q8~gnJNL7i$z~}8?U6n3ncx1u7K%4ealEvV& zzQ=^r)F>hTN>Px{7S;ek`!oi}%EnG~<|rdSoq48uT{w|Q?iC8xs32y)Cxs6xAE-}` z#LrowgdDQ%(E7q`JQ=4jET+XcewBwiJq_b-=em(jLC8Vg z#)n7sJnJ`m*`Q~m)xJMh#dXLon^X4uZcxhyf^IR}x;U@O9-Fg`^`W5UB| zA}uo~bTXBOEx1zPkq=E>nf2E}8F}~a9e&#KCO7*!*TwHvBZU_;xzK5pXNg`CJWFPL|*@AmwBiBCG5mFlb|v=PIJ9qsuDfgJD6vsYC+yrtH za1|95RWR1rTrB^&8NzmU9P+5bwgnp=&GhbDshH^PHA9H)LL^4*AxKm}j6w>fw6vlp z4}ko!f_V-h$x+~B1tXgTAjmy>^z-WIWK=Z?+xr4_KG)3AXdhi>N|<%qrG0HB$(WOBOFOfyQ2lq2MM zG5N8o;ssuBcSY2mo~LC1D6Y#`PkZW#Suv&dK^(%-}DpNHScn`A*vpbPvq$fHVb`ZAbLd}?7m~D17UBhk(T-O2MOE4 zbE6?TqhFG(hb?My5*-~A!njU;J9W@MZ`Ni){%_zcw0LrloNo7Ab z_g`h^)1T1@TpCGs92ymSOxWBLZ3nmPGe%y|@jE&;kO*il_GY#2SL5nqY zZ~E+~!@s#~a@ejTv1@C~$abGY)>e`)-HN2B4gyX&h5Fd_MbK=VfkPxvd97Oo_pi$; zU%=eJTA-#*tQN4rITx%my7Vpm9ro-%KmPE-4d%5E z4d0ZoDU~yhB_}q#d?2?pE5NF7)ACXvZ?@^$CU}oUziGi?Rta*8yFVKPgF;idD8^;i zQ;vr&wfu}kUrWSANQ`t$3dWTcnzV!=cPej|$sK2lI-ESw%t{!Ssq?*}^DWTuQA^)O46>h<1pH z+UU99recrWP9^vC<8FSbT&8+V9c^6FX>Nwu43rT;Vg0gdTh<(Ailqg< z9_XbQQ{)5LQyp7&yFz`gG3M}igh`VR6u*-qU;^M$9JY5sS?W_A_bAr0E~xYybX~bQ zFz^Vah~c?8`?~$yi4jjfO@SS<{SOik{ysN$c!bzm>;Cza z(|dH0t{oVcmNVH2L;b!Uq1oTgcw_ecyo|Rh7JKQN`=Z<)=)}l9xgrj}(nu_ddG~5q zO#6Ch#x06oZ%IrPWe)4qUE;=q()Ad?LcRZtH9aiAi z)bL?<{fl2A$A#(poGOhmw%z2$_r{S@|Eyu=Fal^A$jil*+he`>s`(S6+&}K)p-5#D?CE!;V-&>s!25pfgJ9M(i3mKVsZDg>A7YTQ0+fD4o&*s z@F)Bq-ge6GxFHWfRBAv8?uLIUK&qX$AMI=^ATTJ0iJGFKdiInc_EMIqSM576IN-5W z(AM?-A?(AGuw0dHgt@=VB;6Dt-jI(~B9C_^Qg%M_{1VLKsI zA{<1auniFz63LKxj0_DrBJ+@p8A`}D%T%U9B(uyjnWx`+=$z`j@9%eA-{1TG)pd2* zeD?Eso@d?bUiZ3JZAl@shw&scwdAes_W%bKo$`G0{5|;rehq6?8ux}v-k*6{5mfV; zw$MgTFW7j29ha##(zEVLR>vWM>#f9iFLeZg#}CSK4qh4Rf_QOhw#=zk<#QJSwF z{^9+Aw%!eL6@qc^_+!bLndq6qxg$2J8}!%vc_vP)5kc9RqIfU$Eyc6$_f%GN1OLl< zwCN8vR5;9z>qOcXP|+}Yzg^N66gaMcp2&~Uhm!T9da_Oa%$(@mE7|h`7}g?f@j91M z|3!nc?Jd&c06VkX=p-BPzWB7l8FehNoVmbM0M9{YaiS-?Bu;&E3}kXXTz9lqE+kjt z_a)gmWcRvi?{F$1DLcmyCRF@KsOs`u(AE`^5U}@DR0o z?eML9=WW7+>lyImDlVoKueoh5PX8r%>^Rx@QOZx}=SgSo+WhQNPvF%-qD8g)I7O_} z`!qpUYg!0v23Yy9+iMH-Yf@eMa|4%(yJvo2%=0UJ7te)dJMM$+DR|R! zw%hA?z95?#x$K)sBerW z7&UBp)c5x4UkEN%R>i6tzOLm>E6=aIdhC>{FXoB2$@IhnIa0gUuH!xAe(poUVWb7SYhLz6tX`-eZAQ-EFCRyeSzM`D zS(o~?&<`)h(y3L}dC8kKr#&io2Nt6bXIb{Mo)dP%#E6FeY{2O#r;$F%qdjUGs2Ax| zk<`H6#M8H;w;*loNZ1nvQuwKC0*(rR2V2uIq_b74MOL+BD8`|PJJqSNy)0VZeJTWB zC+)7h)PRP8i24_Hr&{Wi+fE4#y*})4^QNTED^Io~3J;C$OZw!O2+WF&V$UI(bsKcG9lGE}x8=)+C`YmH8=#T9>hddS9ZgAXMXj z*;`t*vgVmRW&+>IdRG_5XTjvq5Gq4vp`VQW<4gOjN_#g7H7)Z3OV$(iFS%qw&2AzfMQqBZyyFYKAgO5 zWAhO_ryA!G5=cU}>#mwLAZD=Wks5VbJVR5c3fFnLA=kMhT0Nfenu+W8;t2 z)YZLf&o+D<9_|nJhO(zEfe3}dKo#cz-hNsZnvO86;28h~D^Q%BzkWS&SQ6kHXaXyN zG?fTCf_Yb6eFT00id%5YECq6La*(X?e| z`F$}_LZ#BJMaoKkd~JJ7%y^#7+=8{gpI<3Zm2t=Xhwlzd+sw6Er)Fl#fgaIB5;86R zhs{1;My{jei_SP9^wdiKAA{e%IlRig!jsxy3N8f#VmKVpA7JND2p*~c@qwl--r95m z=Sd2XYnmjwyPHsj95(gMx0f`R*1Sg1Ux=A+p%`(!tw?eNWlzx~}ZL zN>FeJ@>#Sq_e!R0j{X z^qNCW6P&3I((-D3TgV1S&`=4I<>|3fpFfXIhid9h=8s(a5>VY`DGH2b;b*9fwN2Zg zbT-L6C3`NA=t=088N?M#8tV zsjP2(SzberTzDO=@{raL>@uy0Cyz(Wh_kZ6z^+AKKEJT=5VA|vNWbC%f>S{KZ&iDI z@qRgjdTZQ*ql!!q+S;iO+^FKVUn=)&`2)p#_MpD;yao*nQyL^7l$J3iu9@In8)Uhe z0}-I5rG=G;ha%W0J?bSOMLZL^;H^t+2Z{5G&mE%;obY|V*8me3RUh($oB4rrGCWYz zJ7gKcoeeJlsnIXYVhi(Tb>{8e%nlVZh&DB^6I9i|5d4X`k;a5g6T|=X(IX5|^qwUr zse^~AFX?qy0R+>a6*BH_Caq8tOTnQglp(n4N&u!U&u`K60_c)fbw?;o;3P4CF0)X8 zN#E#pA{ls}ecrI8p;0#xw3f$i;+z4CoOnr&?x-@4{)e=XceHu_1CS8+N`IR!E7OWj z^}}j)w_#Xe-Mve;%HAZzy+M600>Y;>woyUj%tA!OcJmJ?D#qrtWRtY3(rs7hH`?3= zE=N%piTfDVdwakFeveeT^b;cOpdB&j3IQ$3ojm;5Oo~|NQkjPH%sDCytI%lz?wAX5 zXD^S-y6r169w7e*!UDgNgJFlm$sp?8iweSLkdCHS$X`(lB5!9HESX&B5y z$_amysfoRcrEm0J#)evi6a9nv5s&>NujtW=x#VG@F(Dh&<%4DOaBz~*qZs{i2Cc{i zljYTVCDwwSE{%=b_Uc8=L7^qJmDGAhaaB`ix0ZLOM+n!LfI*o_bLVd_{bx?$3M-<2 zo%bI9I-TTKEkX>vlcFfkMnye@(RU_N6?(5t_xzV7F)Qv;e*oC64A92om%I*Z#l`f_ z=bW~8^(R=8mj1CQ%epH?q>8yLgqY61_TOJ({_{*Vb_XdLQS79kVAN~sYTvqPXd(ar zWRW?=gM(mzRL@sFkCUym(^>P#vLe>(XEO6MskmcX{9Y`OuB737R7FtoMe?EU7*7v* z)g3Gkxjjm?c-`J9M^*R$+%Eri9e;7y>ImR9NV1bF!=<30gZ(G7-A*3?-RG>beFem3 zwX`0S~DUz`&hw4+q|(Y5uFc8buFaEg7EAD+Qs zjSVd~Y{UnBWnrQxiA9gfq~aKv!zaU{P!L514?IPSk88-zrC&w$qsoX*BIg*>Y_w@vpw#oM$Z+6X?^D=a0E=!1!n3dQDFtQML!QS6JXX~ zA?87p;imT0%Nc&b9GPTS1IA|kGMvldicyXIbPS#=`xejjUM!Wu25vSfe-1%wta_P?mYHo#>4AoWj{QehyNIrpeobOT>S5!7A@8|hT zv$f(Hwl6X6^h{IzY`Em0Q})n%*?qsSD3kP@!c!XOWkV{IXj)!9a~7`=CiRNN(cUZp zaWgV4G8UN816<~Z(G6I?2E>DFvnZ|5^k`64uk<#6X4iuqLk&9ybbU(QU_{$1|0mF7 zM_LzllL;8C-g4^mU>aB-`)j(=IV6br_vgkU*EO{#b|-6Jd6rmoKeWdll-6*6H}d(4 z06fFH<*ljY7sd%Dzn}J-`O5iSmhBeU{I01q*NIt1apsjf;zQ1E87pfz1+H{vsf%>= zEKNBU-*;q$f9IuE#24i<21Cd6NKc@gt=fEI_g>+?R=ks;jG2zmLEJ#R6_3Wyr9{N@E# zW@dk(-_hT1e`C|*OuR*|X+GR&l@9RG2F!%MAABmfv_+P!Kks^stf|P5o;GmI*AfT@ z9n%kZHLUF4{gXfP+T1%=&tovuG-yTu`4uj{JbY^W7>}&$ccG!J0iAi#lP}4OnMF(Y z*RzyHnf#JsY$wHp$u?W-NV|66vA6q!i1# z!~VY=p#U}&0y-KR0t~XmwC}!43Pu4A;C&3%Y#yK}laZBmbcqDEzsqu|Gb1B|#A@nr z{mMVZk3kZZ+fQo2EIz|=y}wJXyGl{zvqAqSTsuyK7_;0%Fzc|o$fUpLkG39eko`S9 zy(FbK0L=BbNK7f1o~s4iSik*-+usv72`Dl;b>CgxMul*NSX%O}E>4dB$U*+2q(oVh zvbQDW(kvKZ>n=({RuN6Kg&NYpW*6hGBDHZaO5DC`Zt%k_I1@X$O#G`zV&kU9`9I87 z2qWrAFzt5v%;a{L;~G+8ZDYNyH!2>{npa16b_qn(XKB4mPDwdqU=TO#1d68w%}iaS zRRJs|F#w?E0@PhVhk4?}2?+$T=hHQ~;ms{hd>LTCKRv~H^k_Vo3ScAZ=g&@UXDB~& z7OtHN(Ipp`)1X6=m`7>B-AKMVS$>qs6bLXC)z#8fALrt#eiBr9P1RNz>;k<$j*c;w zg75SIm>X|bCD>h8N4b!&8CiQHclE|L@RLKX^@W?O8zZYDP7fr(8Og!d@<|)oxX6rdj0w> z;YgxgQu7{2>hIH(HYce-{$Ab$+F&zc0Z>3f-CMIz55#_uw|Iln;x40fQuBQ&jaCSiU27L<0=ZA!Ga*v&Av_QK0y3%AEpkl3J=G+A5L1>#EM+j;=*uc!x?I*~ zFdHVUGEa&DkM*pqN8%BMzamG3}YJV1B#n1mfk@UA}# zc=u3SDtUh4C^m-rJsgnv0y6$z?<@r61t<9Q^=20M*nfkI@fDskQ$!li?E~h{yn(;J z%Zg4wzgLM>>yo*>0Wj4L#+8;W;@1&oT$T4?>{>tW?+Jy_teBF6za|Phq__xcNjzgo z)Ru~K2#y5kMwe>JE5fTsWqfp3<&w_B9VIWn$6oX9FV0vf>fIpEB)I%I{@9gQMF)Xw zE}IJeC@4WceLBoDqCj>VKvIp;Gf9WEw205Hr|P74tMA3sNCFxD+TDEJZbkP#hdk(u5g+^SWkDHEeg{qlN<{NF zIUQ~)NglFfi0Zi4TSR)PNlG_@MM@5eT5_GlbxN$fh_4%tPf&Nnh(P!mD zBd>>4z=be0)W$x9?pNS~a>^_M3j(}?is48hOE%x|Zo>KlC;?^JD3o{!`9PFawOi%kF{ai%)Iqeb=TJ|&cnpU;nnLs6 zy%2i>(XvXsA2cM&NzocC+OPEn)4avrXP<;M3sr!vvQ>sF?cCKb1W?go;`QOM zH?I7^BF5E1g>1WKf=VjCg7Tf}+G^>8{wK?=c&Tg6-G)#i6PS;d3|rDCEBDl%a+Gqd z$^9FP{UZ`7BxxdL#^n$x=9PH-{ihLG`BMOaPrHqq=PiYj2B zzK@#Gohb$nC4?#5G0|U2{=F)PJ9mKzm5h;q1o5BS|Ni*&y&Zq;GN`G+`kbT(yq){s zKsHl<04S|yJ{Wc2*OhJ@oKA3~U%G_I={A$>qWze2?E2iVR=_jUJg#_}!XjV@u&j09Npddh_Xu!vR zKM4LMT~_tMzQ-)|I;vHe4ZaU?0ONE)@XCUL59A=erj~thD8=q-_*|V(BP$lG0Yo!u zB7x))x#xqjer8x|ivfltNg^BHH;U%gFjIsRb$*yOzd-U*h;fyN?K?fd?>+?=o|$1!{z{HdWUg?UJsX4QSZe=XUa`PB6SE1PI?$9Le1>8tKDeBXt%WXrTg#Oj)U^z3@{79@(M`!S=@WU!&p?N$DqYVtxC_*nxm7GkAY>vq0^2zWy>EEy*qA?XRy{oMf2ZfHBy*|Fah7;}CVc46DP*K?rMrvU0ON4=*mblxt zV`K0Zy}ri9#bMkny9yW_MG~Ng1KM@qt5idjuv%ZXZfpz8ofCn%6cTeaN6_w967BMa znQrm9xwJ5QMFTIa4pSZW!o;8|=;ZeTW3)z*$Ivly=E4QP_pK@1JUkP@=4^;Ox0~!> z*kr6tigqL$9UVnkFVfUo^b{Xvab1pMYZSOa33O~Y== zyCe7J*LbNXej{2xLy*jKCs^#TKdjlK?+!6Qf*cQbGt63Z^jd=s86R@Gei;057JM|2 zgCH24H@1o1>ZXtnhW0BUgPBIB!R{KiZWYNAgW$DY%uW-6I&K>a4lt7p zRq;SRC^ELD%|i=4exY=A_ZoU>++8nJQ!0gvvbIhHY%5grZW$$SfpNW+hZI&%?Hr z%=55qY{R)9>3vgu&-eV#`meM8XZ_x_dW*fE=`-B-bzj$YKld&u%hGIN+=4=(XyoP2 zUP7VBy-+B!?#-LvHv)B?-{G%~_GjePHp72zo3Hx7&r~*Ym+es~S_9+{S%Or&Dg08{ zK}O3#)%v=FlY!kel%;`#jfJ&?g{dLC<25^bQ){b}yvKQubF-T`IM|5t@%{TbueF^q z-}dVadr>HMl>FJ#YR+LJI2SiHLgC6J@i|pc0_PqYlZSgY#~n40=Q2!I-;pw4Nw^f; z+?$l3u5-FfTPHY@y8WqkOP*7%nfb6@7Is)WC0nBd%XfR@&Fwol4vANx3WCVIutmH}QqHLf-OyB=_(S^L=03PWtz|M8H#Q-+R2668`pL@ugr#p-z2x zjoOGp`Rt+hf?w_^CEpHrX{d!FN1@Jcr9K7U^D!XX42K=(MvRfpmB5WIdP&sU%(p@E7RK3)jme^aQnW~!@q%10*KVFp2 zTv5m3)cLhiSkgDr&+G6NvdvfD8S`M_kc7IIvg6$smm+iU9y%@ho`u>vI_^xj*oKCO zF-wFN^TGO9vBjd$kPrv>DLF3gx;LKKr;8no#m@HzV|t%x4t@1zYd&7V+|!|TvwgGA zO>vltDBPaHH6jr*JpumF4l3)N%K5S9r>DQ(| zW_!OrjD6nXROJEr{pTJ$>=CwOK19eg@vN9^*0DByf1B#);lmeXWbSxOhvKh>=^Goz z7m;w|W)t1-+OGwpX9#JY=wywQg)h8WSy>^Xn53gekE&g`@Ir#vsoEcA@2`=jhwIO- z4CZUe?T<)~lJ-6-C8ZA+s&y)wX2D=EgHhhB^`8z!a?6W;^VxrC+M~x$h%>f$lTGjE zC!Z@T8@@b0N$RY4e}}qP*ng~uWF=o8B49pKIPR`uRQ{l@5{tE;QW2J2x>GtjR3aosKvV{q$@&thos`wLe*<7(RGj?65( zv1Mjw57jI1HBEdieG(a&W>9)}@XN^>H3bC)b|$im?{}YK_iau^CqI4qRI(yJKcB=5 zOO~Vtr|t=m(sOZf!7k7DW4_*D_>4g-g$nYFW>YipHYUbLBvJFeJ=GuX6fui)(wDE# zDI)MJ&E^z0!McdMMJuV5R=k{7w9u3m=RQVtDC~udFa2PhEL+oP4n8$7aHo8I_q$sp z34bEqQ~yx7LtwQ8Q8ygZd>Pw)gOVRJq{d=XLY9=2bUuGnh5;S(?VBOm35U_nb4VO} z+p|q6QW6~?wLEv7sc2y+d7=9jEi#PeH4?HeA){K_)1@l0%DqCDL} z#S7-<$<7`7R&uvV zE!s4-w3rXnMzwpcEMZD%ga+Ep)0*b{Bhw&is0BJqyrVP@%jkCzg>w<$kjwoMuBl1M z$%Ls&zG3*&GSyp&f&CkgTTaR8s&7z{N}=W}xNzsroxJI4G0eAnhlWaNr5eZb$0H^s zw~37$lUST6743O&a^^{zn~ZUUOW*aH(msn%#CLg^VTXYzZ?ktdDMVq3&)ng2JI%wx zgN0=>ljga^_0Z2tZW3jY0nbt!l-ag|EGPHs1;Q}NE&Nl^A=llzcbitg+Ln;-Qs66m z>{t*NS03~o?fmtQX@!c|D8EV|Oi4?uKhABsDuUDcQ{Vyjv5exLxk~-zK+##4YnS=? z`7YRe&GBUyWm$T2Q$s~P24jP8HEgi8#re z@j^U|Ykko7rTH4qG5b(>-cB)iN>w(95K=@=7ub)cIZOg(kA+O@V;Up4Dl?H$BoVvmv=_mK426$iithfZnN7K?@nw&rl)^6 z!gG1vRE3KbI}mmLV33)GMd!Ck2M33Q*jO3;#cvD;eYir>6Eo7rH z3R#5}Efdm+^NT4WQn}5WH;W?UVFHnPF9T;;S($qT<8lXnIc_*LUwOP>+P^tXKXTUE zyd|~i#pzp@*aG$(kFII4u&_|$rdM!%81xv6bygr4GrNu#Iz`s-@$xoTuz7a&&&B2N z@bW&5d60u^g$T>{rXC;Sgk4T)scT!j5r(V12%Q zV#ZoeFG5-P>+Qx--Ik^rkJ*zrIJ@?ntM>vwT^+(_&nA|Ym!G3y5mkfTW?JDIncC%0 z*BLqeTEgPf5}#gy7DNKW=CPA4jAW-P$hS|t`&QLI_vX!s13Re- zC*D)%EzNe27Uz9OvwCuhdweQ-_7?l+eDa+jxpw%u_b}mWJFJUSD*1|ao0^&!g>7EI z{XiDaz^!n9JQ5!k4U1GqKtLcVHMN;eMLcSMfcObQp1l0RWPs-@^VHmPNXD`%m^?R{ z-C=dV%rNA*QTaAf#sQsByWp^T)x#l^8USUV-pv5*`v4sk0gDRZ}kfW)K2@vw2!!-1C#hL$J=3SgcJ<n@?ZP>SkkD|!A(VS zKISQ(euU#IDdLj#%B(ePu0$z;hT+)`>);;YMEkmJqAW;sLOS{sXfaa0~ujBt3|JYG+vDF&!%Ly@!}jDN=|D=76RFZO#^Rx{3|~T zze!9;kVBG{2aIDeV3`Yq|!Y!+3FVNN6bG zdyvWyl1NhX$C8GI%*uLfaSjbi+WC$tfFM*or)#IGtUXj$CfQb^U^8e29a0&amJKSN z%}N`DFk0JY9GQY-a|pQ$xN(p8`}gl>cK9)iTtaZ=Sn-O-ZhCsNV6a?9z^05R1YCZALXv6IU$S2YNNU5ya*(8t_L%R5oVuF#ohPoi z&wcgQAqemlFWN~E8dXK-zXxHeMfwN4Hf*a!5LKsr&EPU=yydNJk6qG^?Si^w8{J+5 zV1WUR2<#DF=pq)y!=vl>M|u|V=6(2%xN~F;Nwn7PF=#^~+4Mf)Z07_d|?+1KE-lS=x-puAGWwi3rSW(Q_SS zTbc7#79HeV8gnXcfY|s7zt|^)lmU)iF2?fpu+b8cn_zRFd9(^_utSMSX=(V8B{t8+ zNd!4Jc6CK$lX6UL=g8oYE-jml8ii-4v@NOAa`J>r9N;yrNw(fZWaNyj2 zS^q|X7TuR(;{~eth2|UpkuxdFAsZ;SH6p;;y5`cC_F~?Uh5NSqK zjP0!^Mlb#R1O?k$Z#KrIq$pjxb}g}D@~|NsiH)4)IeK)<2XdS4k{ihJOO#wD|9suV zh}tmpaIqU#h>F_=s7Uuvmpt?-Gd#-27ck;`S;!kX<K$2d6nQ0KYVm7il` zvb4xdhkt}#`K|oDBKN`YIL!B({G%JzpJQ>~z(X76LvO^MOH!{Ly?Pod`lny_JyJZV zuB!U->WM+(1@iUxO-)_0{j2xqf3FEg-MqD~&hFY`#k6rSHkBM7s z1F%^d0b*v=NRkGS>TqtdCud;#R<`fq=ZZ&J14t4NPDTHiS!5b`4qM)wog4N3s7JOq z*AsyGxX;_9#ET3IfJt3nKTXHFP#utk2d!nYb3}YzMy$Uakw5WkE- z{Vum@rBR60d642UT`v94BOBV<6057L8!Id=EdvF0>&I}-W5WK1>Cbdqlt?^>=zTxg zhIz2z9r6n>y6qDkO>q@HlW)aq6i7FQS&Tdb#s@^MF_N0U98nF`ja1F94$Ay0^_eN) zWJ~|!s_D$h8b__Z$0^ghZm*0{5L~T$1Pz|63VKFhS8(GZGU%%eUTkg?H=q=dbSLKH zn~-4Qgm>3Og5A!0x2r{mUp~VLTZmykkCzXDGrp?zgFfc+l;3q#eDLw1r zCC8ywBtjzt1F%!MyylYw#p_qE6wRAo$rt)G)^K@2a6w8<*BdGLVgrh=Ij%L+3zw=sDtRl#eFQ9PTD*-SX;T8~R zi30#5x-`?U#L9?5l{oHhLze{3WTm+)>66-97FmJoEZMfZjmyiRN7#SMmMxb&iQ87z zharWlPGrU#@`3*RDDfj+ir1(rC{X7opLp@_7Kc6hp8Oam+?|op{=a;*jy^1bcql6) zBQuivdHqvR=X>Rl{dOr;Rl2{NZ}n34_9y@H(Vv$(aNxjyzm&5t%g^Ax7M|(7_Pq9yuqp&|+TexE6oppRxoY6Q@=y@gM#fww^J#(T@Uqt=8%y{H1 zZ_dn(GNi(D^a!5^(sJbj#g6q^6`8iNqkqihIvRU9EpqYuAvYe@7yJvXj* zp_uq1I8XN{67fqBs*N&^WN$6nPN3~R9l#QrG}SL$crEikH93g`hr6)QQXDaB*m(8C z^99T3jEsyLYW0(7GHx!e2B>o`@o^BQw_LHbOo7Z)Up`8-);c{m+BIDuxt1bRht0-1 zb0JteBq^oVAju~_UKS~1o*g%;_elKQi=<7<_JdpPKR3G!jg4slK1ovAgP@5<94;j< zFHen0s=u+lJ?Y!GZ*tb;kV0M;7D|lfP|J(L-DTlDNQ0D8b2rWfur583a+s(8r&+`ZwVTLAYvaY9Gduw*D5eshyc#`))>gb(Z!BX`-Y{r^NyJr2i2^ z5v&iF5)%3|y#DP{moB~kua~-&(~1-e|D*+^YS^>0;8XT$(ev{*H&Xv5s7Z6s`zB^) zs-^FVcTim)z*{K_Xx5F}>XfvipOF0#1Yth0 z1BJ4q4ZuSMLDPZ}@LYCt)Vy};p9g|}AYok&aJ#?%w!eJDzD`Qqx5Kwg?egVMe+=CB zc?r~2?;hbtxKA^ee~!yq!re-H@9K%fkPyZLLGpisd&p>;rPS7!FU;%aYeK`OgI&)0 zKm&0`MMRw_K4E_J4Vn<12|)efyu&Hj?`0_h0@ZfF7);F1eg> zeXwK$t(t*B)ZMLnqk&j6`TG8j2Q4)UbtV7)3~#rQzsR}xFuVKP#3xP>W@ZMGl2hvo zT&C5a2m%tU_oS4RNxo0urx+-Z@uizNf7uE*>kpd1J^IX$+o*y7R{wCH^h{r<_0a3m zZKN+9l!Hu%;CTp^+3I0)G zyWsBDFgn$2Yg%fp8Oj<^(K98^H%gi6^k7M%=w<(7(-EDM@rr9Pum0}%l3&I1Vf)X0 zF70#1V%$h<+w$I%cypmJ$2Ro!mm{H4DupX!#IR_8itd#VLCcW7s?hkHE^K8%)vL=H zKo)7qIC*C4Wg?{vNR6S&)>nGBabWRJFB}K=(77{8pBD=OrhgrR12Hv()t5 z7i48+hn%lniv{jWO;a;?wv^wj@h}j*O^uB)egF3YqAyOpg=89lC(?rxYHIpIL69|MvBY4vL_qQPT zvX)+VGccHgssba@bPx(ch}?B`QJ|y6_R-xHxi&3s*?Dkv2-*#wN@o7_0=!j%lF@G( zxlywqsN2Kom?T|jUT~T&%nUA+vMs+TUhL&#JZ^Z~C0|h!Rr&dI=Jl$OM)S>!1>{;f zI>|`Y#?ZR3#H6v^BcCXqrm|L-4(El4Ii*522saCY34ok3I9S)2YcFv9%d<@VBs&)* z^M__rl_GUnUVc8pnV2EB3B{X}Q!>=%P31KrS=rf1Z{G^nr;$CFA+MblM?2KXVEdjM z&y+J+C%k#{MnAH~SB`A8UXXR8`-p%YkPI{e#HW0K5y~FoL=^xfnCUpGtrRAFF*Y_faRrg?JZMw!|EUU!5xOy<1G~~ET26m8 z32^`#{3@)>zMO~sZA;&S{b*VCYkY$nsjq)O%y@Er>ieHm*E~a<2dFs<7My%8S*~56 z;=!i69y52%fNzsm9vnFgyxXNv0$QS|%X9l{)Yf}i+;R2wm&y!v8*#gk{6i626_1v$ za`PyPm>a)lcPl%>xlhF-fc-5Tc#0{l!l>J{$d}NXz6^ZLA0O>^FE-r4k1UQpW2TzvneQu$3N6b95u$l$8s0B0 zqxghohR;s8sGiqvQ2{IRfobQ z^7`6bu-Ixl`TAhuk-Aa*tLERfe=(HJ5t>p_~ z6*4?+9~vihLdRFrJ^I;xcOqA0!yP)}MecI3L#U6uV4CSZ33oam6>Qo!J>AsSV-^tY zhI9GckGW7b=0QtyN{-?i#TJleL9xR`_JDell#noCa5fjHro@7RPGXDt#?`WBG(m8b z1(!FM&WI)`hKsuDvMlGm!+!lHy`gZfhqMYUZXVdKi3;XLCtW?!*6$1g5hx*Jr9O5l zmq_(i5?8CMjlOBrC7@yR5>|@A zox#X&a$%?k(k{^CN{AK4;A(CK$V^(VPXQ?*1vSM@B-LcE&PPYF&`|k`_KVb*a+~>3tI9n2=Eze8n`|H$ zd(e`hvXQk^f`2-6h>!PzQ+>nhZ{ZF^Q~e8=%g2^qpY$4_`anJb+DYq3u|9}2VkVrq zTrfFr-UPO&UtL8&ZDtL5MGC=E>dBhJn&$)k6WdjV!ud6BIWj)$ryqD?;Y8T)5j!BMEGYasBr16k%ce~NL z@r24FO*&`#TEQ3w<()v=;QccG+YZ(5;R{tfh7{W~jcx ze5U0ty?p)Sp{RwgEfpqLJlH*oyLeY0fIdd>GLkl$v&@jY2wGI?@ez!Rr z8a7GYTbpR=S4f^KDmWa@gO(O`Nz!b*cJInOEp9np#!h1J`-Z!Nxr5#&c!^u{mv*B* za&A{4(?`i82-c-y3F$@3&9}YrT2ZqcUoH_#lLYwdJ=Hj>N z6b2g!8kqU_j`>ok-1tL-L;moz5kg(T{qm&I%L(}^q?@yQPt=Y_k(Fprecw15bzOz>8ZBphymYajOv|Zh@;GAJ!}` zz8K{InHNMy58C|?k(z<$xuRK9o)vnrm3hhj6Qg{6BHd?LZt6$(Y#+V8IzQCsLtjXE zkoIj)BMXys&qD}OjH6?mKm2bQaZ}+=D6%kBj&>jQ50&J$>V^P!FP( zfa6GOCO#GF3Gtes;k-nnLZgxKNfwECMI(HOh)YCE{14jU69!!sDx}g=?)QVM?-9?n zbC^~bA7t>vO&m%(_k@0^S~V%FhE+yHEZ!m_9x?+s4FUFYu?Bb>BA`)1Lc-MY{V{}F zhbWgR=uvZIh&&zD32OpzJH@eZadCB|65HzQqv2p!Py0vbSY1b=K&#~I>HiWUpm5T- zdc?jUJ6mG|rIjh26oDJ`x=Rp9?IZO0*3;Ld3@5>|y; zjtxDa2ZJwhZ^buZF%`?CnjNs;=5Q_w|kp|pA4Fe|BY?`-s1m7^Mm%#dRCb!Dhz+QXtb0Utq< z(0HX)XWTyuR<)SI*>>~%rQ-v)wu#DtsjwDYi3jBE(3*&w0b)QZSL)kuC#53MXHSjk zWM40glM`rWpV)e;vBP59D$%nFsWdb-D{YxY{9i)uBg_-S9ovs! zFjlT}eL}^F>d6{|;QDET*sO*|r+~pUv8Mc?E{OR+;ay5gDOri~<4Lj()Z?|f*=+D$(_HA2qI%d1R!3wqvgOPVI! zFhtN-uy5x0-&7ef?AY$+tC`Y}iLYvQo#e zLN~KyG}{qfrr+9OLA^@lD7^-}^GqH%2#=|t$bmow-bvTPBJWoD%_;0>z`oSxp(^=- z3-463LMEO4nw<_1%I@Q4-XHKDny8^!9#L7=H?Rz%L9hro5U4(wqj+Eo!A6~O-Gj1| zEB)-K6w|4-oO~?**0{JXNI=)7V7K;^SiNCvp+~fDr8#5 zvdo%}%AY6Qsy$uK%TeMvo?@~(4d2uw5BJFhvl2wm&L1Sjd#Ou4^f;jeZ-DpNtUc2d z(X$ax4r2BONiPI4I2i3=GOA%R0Q0U&RoS{$z+ixLO3XZjEi6z77C`r9W%dOK z4`4q>=@bn^Ev>P zz(EX1(gav3MU?CbDAO^X%)`}Rf*{@CnXcw{WQ}p zy8bi;c5#t!NQ_SQ&D;hrk14{IEB5voKns2GWCLj!B31eZ*~ie+a8X{KYL)us(QMDe zI1GQrw+segssdy76PcoQlv(4yp?7wSO4`TzvHjp>k*e!8P-+UnyiKnSN>414prXm_ z0KZcA#ojnz1V9)s_TC|vncjOQwB0icd8nDMA%xGqB6;6hmmJ1|Wi5!5k919X7}hq$ z+`_Jj@1KG&peF%~mRwVez%c~>A{`N>AMI$Z(-Cy{(NesoVS_bK-92Jq3T?})7QOtj zm_x-&2i%8i@3YuZ?E7fB_=9SvqQ?u}cmM$x^h<<31Q(!`OGG>cHT?j%_0*w6X7!je zkn(v4XBH(s&)b{Z!s|!8|x9vIXH09b)YY(+_biOgJbKSo9zcT|NHnh zt*;HRP_{ULzjg?LXiJvt&`5$fw~>AI`Sa&RW)3eFi)wE+b7V>j2&xCZ9Y+(aNrSI$ zc~9rAixY zcGI`S0`~P1=KH;ysa8q8@=y!o-?f9^1cEvey;wnN^lvKA;@)SLC4zRpOA%d0$A9x$ z{o~hq{TF#>ouz4&yaE>YPt79dCN)S&9Qi?m8SDKS276g|0%!s82^dL!qcDX|B>jq=tHjotL`0FO^pLf9>HY3i zitOIm_e8cN_Vwy9!bY~De-qK)|0<#@ax!=!n-l4P*|GneNqo3tJ&gS3>!P zXmpP{DHg$OG6-w01!)6upKX(!?yFj@drnphOXqajRnmKg|50SEDOW!$sX*!yMXRFu z>xhPkbZ}(P+25R@*%dQN_?r*S#edDR)lxOmIIf!cl zb!OFd)l_ai{ExDLyVPteA0p*hRPN6bI@_Tx&KlhBVuu#Oy!usOIJD011O8^y&bPKd z{XYK`Eb$0fM@&bSKWJ@`VPdlW;kz5!=Op42jK~fTLd^%w131ESm6huC#mTnEoKGG< zPADkQt$fM@P8fwDLExqlJ0xs@Fm2)Uh|TBx`SY1q-jRdjDaKz@Lj%}7IjAH|r4*r_ zyu|V70;H??JfzJx|8M1YXcLNtWJLNpBxO=1VI771WwO4dx%FfuY4a=v)+(NQk0 zrq0e+eSLk6w#9u2q(jK~mX>&b%c?L)!$4gJHviBU)O2-U9Z-1sak@b52koyaE9*_q zqy0EU1(ahjq^*i9I{l_UjA+%WXf!%WiQ~}(Mj4ud)Ee&$9q9%Hd`1W?#@DSg;4zSH zo5X-%$dS;*p-Xp2xe;+aRe4y&ae0AhQ#<3j($LN~k(#LfFV9PNpX%At9k(j>6p#zM z&RRf0@$A<+LuhU2K~XS=iddKI%dy$9F)J8`jo-Fh`C+nh*&@u1bWlO>8zv(MF-ElI`0&Czt)*!GgHA)R86}}mbJkDh}fc$wx)$qYfp2J{np#~I4FL2z3PfLymgzmL&OTCkU%=VQna(q zb7{P)1r)x>+O9Dzt^zgF{Q;~;#`@TC){blr%;H1HszW^c(B1_CFh8MK7i_5j*jw8k zuNQcq%JHJ&mBWwLOrKb$1<`1PBX_l5^TdDFJ4SfBEv_)^K%sKIE3CmlbGP5c%+&NN z=D}|^G(@5XKT3M~qrX^{eo;K{g6<1?DOhFxjG@0+%SUU5h#$)Ps%Z>F%eFscdq*odVTrA1pr5GEDE*1WYaaCl^X0A?`@eugvEO%-1PZZy%uZ-Q$)$bD^5>- zpUYFf*w-KFqZ~n;21>`jCjr{p#=pY<bZPxP3YhoB)uDuV^m1@5}R-;Naw0?)-1b ztX>nP?=~q@IQ5weLE4L~p9NvZLUg=8hn3vFidNUGXms3=%1ZdSG*cT9q7doU0%QI( zy}L4iOR&?bvgKrD$+u51DUGLsJig1f?0cS7yO0YveNd0^l4Q}K)sf-1oK=xh`p{mc zL>dLpbmg=-(puLnR=&)Nue1NmKb!JAsNcnXEnk629h?F{v%dWq)Pd+nqr;&MMRiSB z|Eb$zvDo@{#9}Lou*Ig9i->Q_j%>gsB2GCx$=Z|D!6@ZX@RS#wCle|3zu#9JT{FD9 z;{VjlCg7#1YkS8kZ!oT^>pD=*Sm?f<`9Pc9IcM~&98NK3g82x4Q`gC+Xk-| zxX%WM^6p)qu{QUl~wf^D3g17|w z>`$@yC(x(k{@rT(3-|Dg3u!fc!KHvQ_L*$?%v(>XOjJ4@dB9smyg*0w0P<1RsWd- zf4AX*h$s}_8v0}Y#jI5T4c?`8-Qv5$kswa%1f(bF9=j};H@ z4yagSJ;(lY{((lMe=K;~b#YdjGm<<)!ZjPA!GOX6mUkLfOcOL}hBj!TBx+Q_hs5zV zVnuO1h~k*vH32=H?DBaKzAzn@Ii_&cJVLXMfbF-lJldIZ(8Q)wUNiik>pUgW!GJPD zJLY(Ng%FPC8cT2t;2rak)^yWiG&DK7L>y|ts8(PrT37Xf=jBBL_EYQZOhVQX(3}V+ zsZ~1@6y``mgi>qGPixw+*2bZtqN4pze}`C#AKw0c-US|1+S_kikWR|b!YXK$xpZh5 zY1zDYDB`d@7Rn3(5b7kLcQLQ~CiMxAvHuUuXYIGt|IcZ_NzmKy7MNNPi?ORUSS@~e zR!9jXAkU>?eL^pvrwG9s$p`+TLx-0JWTigO@Tv?@d(v1R+53x&;UfyWQI zq#dcNs<^zM!ySTue1YdKTZG{@_ z-7QLUcpFHhOa6v!!l&TX1ln(H49v({ak!LNh2SLc&MYD?4_H?fR&tSCJF)2_aTP(I zVy331PWYGEhCR@aFRpN!3Cq6z;JLqbs#0pTseJ`WY!gFIa{g%n<2&(S>nN9*JNoC;A9 zJh30I6&9CeW@XKb?>qt?16&@)3?#pwuCRYPO%QvSq?PQv87Fm#Wo=g%uRM@8(=MDS z)Qe&;)(e2gr`Y=|HIj1y`T*$z*a)AK1(iY~G_nDi+xkHvxOC5FDaFay!X#PW{Xri@jxMu|C+tu{PC4m?&pgM zJKCzOr$b$ro^5XUa8%LoV?866RI{0BhEL$UI=?yhk-L2@Cb;SO4Nj4yQM%D#)$y0a z>Gb)Yg7I8}cv@^kxP4xWKXCFBD_YuRZTtZlrmV&d;?;33VM^1PCzvkREH1Gdj~?LQ z;0WA9&AwyDjz?FSdFXv*95ydl3yX@rH{p+#rqpX&(TDE~hxXYDR`pnfR3Im++qS%8 z^XdW4RL^{TeB@4Wd!2><0u9JGk<)EmJDnD)BRwpPc_SsIbbQ0Rsw$d>cohZ~dDSAf z`J2FQp1Qc}@slU_{P)AFZg$giYoYY@^#>e8=SZYd3&9?c{i0t|+0IQE%#7pNfJ!=J zV{;5z_+(^vf;6}dxVW3XzSP*+k$Zy;@xWx9ao|>JFLV{!CG!p#o3pZYx4{7b?*yV` zWo4}$bl4+kvBwHu#3TB225u|y%NNBj;bIIIFJ64>O5{z|FZOTCF!uG2JpymTkQwhP zl&Pq_4PuB^ibhOK47_%MQrM;&6&MthZrgLNuC5(yxBagnygsYC4!xU(kU5`;yPSYZ z^{g!25WG>~*mFf_kKbfxZ(mni>jioKE4*SU`ladT6rG%hfq~?OdVHey4;HwS=%b>d z23phi^XeBp@TO%mTv=YSvA3sV@l!K%0`90$(tRNToI7+ZG8s_w#cX5bmvf)!zOm(0 zp?$yqfLsV28(Z4K!oq$3x;}6Qr-UdT>~J(PdfA@uWGw&!>XoCTz#FTQCA6fX;T$Ks ziapadZQPiUo0~fXH`v%)8SLxN*$MBdX-qe&xbJ^@<)$&JBMOY6;9f3$_m1qwjT;lQ zv*l3h*sd(Q*VXYyWoOgZ)zt}OhIWZMjd6?mUk4(y6o*4QJP;%hhBY5QethNIhkMxq zl$4Z;VZuAW6-ORqs^ac0fs!K%Or1O!MAZ zVBO8)TcS`hS`RCQ3`Ort5h`ST&e2h@v$OLGKjUOy71O)H50}(;KYH}Y=ia>=@2+fT zk1sZ{NLZ2%Wak zJtk+cSW%Esb}%p;61^WM?!K@~US1wv7gy4hteJlG(@t)k?5(DzrqM6u11KhF-*_xx z58t0^@RSi{A5pc|;?5 zH*`jSNJZyw-?PUD?B@GKMHxSR`h?tzbvcppav@fLWB-27ynx6^Hc*gJDCxUf(^JI6 z#7b*wXlo)Q%&UCfXF3Q%hCO`r=uPNs!WnJ4MFu-|bS$U-X?3qR1E1cN#ktANR8;Rk z_t6#!pN0O~4gUWA2`MRAI%z2>cY=e1VbpdqG5L;+SnQ&wkG94aA)^M8BP{b{cHiYX zw4q%TJmIzjwfmrRmI96@r@P-XI%-MG*o*>O-viiWr|%6=(w9j2#K#{Jmyqzx)xU6I zdsuil?7JJV;K)(1#xJO=Q?Ri`Hv1nrt6*$D(S3TPGq)jo>024KVubkK{_1dP8yg$* zbGxEr?a7M5=b3>27=4g_>y{hC=CH7^?Ynl}hPMiJR35yXqVc`Acb}l3hK*>tntB<~ z0gs+MxdS+V4%r|vd%?m{Qs_z?X@>Xa9D{)VN+(CTeTotVTTn@lcVM!oVszHHF7h_T zYZf~UMn+L_>zC^%p;Mlcjg4&tm(PHLDLg0~t_7BvV4NfDC^FRY^75s~s)W?kyLBeF z0|IDgM?1V>&9$)xjecoKQcpexO*ebCZhF8f!AM0#H8(f8AFd5C;`7xLUm-TVwfRb} ztE+24JnJ~zOd}eu40jGchjdc6#VRm3J3GsH27Legglosnoj1|>js^g*4)gMs!<5Ao zyni9U?KRriRp?q5B~6J0vq&k=a}XoJvNPYVFp~zs=i=qd_dwA+D5IsWeme2PW?I?` z&?R1DhBXS@=6Axo)k@&4Ss{xCJ{Ez2fhg3umvvSW0s@5KoV^bV&iOIkdG(6hqV+Xd zM~=03S{mQZojbSh*s)JEWM-Fo_W0Op#19%j$Oa*w<5N0nC#)~|yA%)qZxg~j_>{Ah zTn-NA+)ICo!z>sUvgLSxhlX;P@QpECe$HS&C&z;4>wizzF7dL?2OOCOqwQHXPEOvz z!3^B=zBtMI(d=|e?@5ovbxV^vjFhcP5{=;GTn z)>Y{1wN)qT*~s)YifWkYS6zLXX>y|~NN zMiCK_)?&|M+%$8ojiqg{3sFXBFmq-|dhzLrDAl=d-f*>gtC=Ch-N(e}|gi-<`-5}lF z-~3>^)%$*)@BQ~Zj@M)F!vL<|TC--&oaZ?+*W>F-@^~jtpF|J@@5*JF8wi5#jUZ@4 z*vH^sF0}TI!Ec!M(pPR`!!LJiqX78(37gBe?Gc1PAN4<4s$_~O{G*VAtcHWCwXuVf zzTI8KQs2SG!rH;Y)PVl}T|0YIYb!oZ9!?&1`g;xzHX>YHzrW6DZD+zoXiQFtAoR!; znTt1_qi6eFv`E$tjxTmLR()inXBm1-f0ZpD(dFhXowugOw+3Ds@1-pWa^!q$Q~Hpp z{qkpc|N7jbl4AMbS5A)1uhw4~=o1qe8}k zY?T+3$EJ2F+%u5>x&xnCjkKg_0?I(Sw=?2Wo~xlG5lQ&7e@;I z%1BWC|G<}d3lg25zvL!hEQEn zA!&>fRF1g6OC!$5Xm=c}Wj5KaEa{iXvN?FLpW>v-*`097Kb|FUFD@(jlu!mH>a9(8cookI+y4~u@NAA2zD|PWk55xHc3^BY*M-S(35L7=FpB6+)!#Vptno zIaK*jKc_p(dQhTdXTEr{H=pbNeBa~9NQ%-Zr<$|O%!WT3Urn~BDCZ?z8T2~vh~X4H ze;$X=q*=z*k^iB+??$3!huiCl%!BFNn_Zt!8>UX~f2lSzVPY`_1 zGgt5P*yvxc6YR6WK<%z3h2DiFb_i?ktT2aV1*zep*~wx_Iv%ORVcOtHy_0{#!5EZ~J)eZ8Mxbizy)?5fB(y zHBu8~o*V3Surs*0yj+!Q+D`8)1uL~G-&;3ZdGY*tyngi~=g9hrBUwb5z=#;{M4yaHVdU2;MM44L0F1W0yVG=|B{jtvlU}w zZO#N-)W7 zV;=E-Ow8O9u?G)ExAFAoooY{6`b?}$&(BW+8^70?K?BEx!Qfl%4$iVX_C+J#ibmxA zEH)h-os7KvXz@Z>v0lcD2qA2Stb47DZc)h4+X$VH2jw}PH^u#QT$-j~^{q+r1RCsQ zGBPqlIV}?D>)Sp)=n63+q{G9*D7!bj=y`aE>?fLQVB=X<{USRhLF8O=4i5Z}@M%tRadBC6WuXyB?sD@hR=>IO z>`lYw&_ja7Ft1$Y$8{#T-gLKo%OqV>lT^o>f{61rx+~c-e8$HFCSmF>zY}#u>CQ? z0p~Nc9;+&iagubfIw`ntNX7nO+G~DOpMg?0!||^#oR@0IX<{|A4X_lLo|Q(5IWN+K z9sP;U$i+L({1Ac&FaTF(b-thK%arH-3=*l%Eo-aS}{9IdWFj^ZDDPVaT*2Cj8ujRj$Z`B`o!TiV2us_|SxH!6>EpcJR z^F=uFq0|@QTVps!!DOG|{)M{D7akWJbmyxLqw^v%uW8-i+u?EFwCVFY*o)-WN0-(xb+J`VZKQ zY2oLty~IWgCYoc~TqF*5?Gw_{x<1|ws$Sg~a8pJIdp}v`D2v+vRLJWjpcc9gE+nLY zgM&kyKj4~9C>`K5;3Cfw$f)DFb=TkJhN>zSlI^@?;6HWAA0Opi((U{VzJya#Q|R82 z!uHAaU%n94h0+jfezG9yFLokFKBuXrQ|k$TS*VY$1xB(0{FGLz05 zrj~x|1Y!Vg(eZPtzc_yq{@+{LSt#d10zQA1@!v{JOq412+?x%%cJ-=nld*jyv$(kS z=H_MqoL_g!TwkFoLU`>Z`@2uGCy1E>va+~$M&Zm)AR{l-b!;Q8Hy4McRY9fbgUg#;E&s+cqyaV6_(gNh;afw{ICBroeXb z4^TEiNXfg@dp=oGSrtt&@!$J~prN6`Lu!L5`H=WKXqydOZ+3Q$Y9Q%K43+_`n@5nRNSo)QQ+^r$$53Vf<5N;Ta%CXjBae|DF9N%|5x?{7_{ zuca*W-|N`_{<6GxYhq%;@auEddu?w{On0RF4-M(%TJ@i<&@CJv8;eg(#RgFDKJPvj z7FL4S!9K*iw;nx#r00jm$NfYXO4=n9+xZ(~M9IU$!%-3Y`gKB|hY!P_JQ>Q(%6wey zPmtuY@^f)z#XeHUrKrd*M%}C zP`Qg2(NIxBZ1vlvb)S?p$Sj7_-`{O6H*oP7*5bND*qr?FfvN{kf#pDb{pCJ~c16US z;nJfDBi`XxCb1U>%RJ`f8`*jn$DS!0K#Eq{Y5!!|dzuLY@)J@4HjI(*`q@b7zhxvB zO8povfniUsX+TJbUuGs3q!y0+z8e$ySd`+f{7g#G0R;s@sE`T)dlDAz#n z=g1)Ir2-5H2n&-x*x%!FK5Jh62rui-2LKGE)!_kYDFNvxPoAuT)3e=M>r8@U>MC+P z|60&`o?AIdF2rW$L;eW3&*uW$(JVkTrw;d6O(C z;*5|G1w>+bGqbez2~|~96+k8a2Gj1$%CVo7CHH(^@tKT3F3D0>zIHAB6T6p}*KBvr zgpXxWmud(stNIdwxRE4Y34l9=IEjA!*p77dB;IOQO+m^toE#t?<452G*{zunknxbY*^ z1d{L9``|!qw^vOOFB*?j*W84xEL3C+FLMIOurwMbIRc9+*!me2Gq64eap$6)9dEK? z)Ujt@e8Dj~Q~Tj^p&(h7?Wo*NCa15jFA7ULAM9;}$HZvQT@V%)zTmoWmFd|DjAOVJ zkmPN~>WTa(LaBv)@TdgzD!kF!&hWihK)We<)!s89J&g^81K=w{d*d&^b_&Anm6Fw0 zC5BI-5VE1am7(=`ydk36Lcp*qOTVMX*>mUChdXL>u3v&~-yc^jK5u|{CpG-1( zEPr8ES|As=`<0~6zC|1bO!k+;y!0T|Om?J`TL@5riGyC8S4ZV|)HOJUOM&Ec2lrYP z;}M9j5<&>`LwN%O1A|h~hpXTtH5-FoVL&nT;3jNtO~e5}wD#Knp=m7(PsoN$fP0!B z8@~O*eCKmkRc`A6v6A&3bB!{0VMMPrQMO=zdj=Bc!{fBhC?1D`CHH5$9wf2$qJ5tD&5$r5-mWc^{?Qffl zt#=vXKct+l@mZo+dg@x9+$m86XPe6Q3CtbO4!Lo<{y-w-rGjhQdD14uuS$2AB zYYXnQI1oBgx%mu~qKeVbk&2a|9^G0eMSCPcI&Y ztZEux@yY7zC+zM0$kgQ$ahyKOP8NhsKtK+uw`dcBTJ*rJq-6o_7oG=uPQw!u%vL2! zwPyLUE}O&t0FE4{+RQvQi#4-RNUh3fJWWGYKtEmN}p`tQUQr`Ev zvJxnG41DXqem#YJ4&o}CRXe7RpK z;OF}aAHdQB0|R9MgkvHQ&2?|OmA!c*BLPN{36dZvy6z&NGBg_+E9G8Gk- zs)mM!;rq-JgCjE741qyGHQ(Q<$^v=v1L_Arjs%6<_Fl5S7#b7cl)h;p4<2dr+Eh{J z01!tYobt(>ZuUEQX6FVxcPytY^_mBD{A^-PbaY}P#auq#YfbcnJd}91;iW;9?+L&@ zof2V0D7GC)STgdWJ_f~PAj0cMaqe{N05DAH5Lwbut5RXira1^1Y};`0h6Z447b# z))E){)HD-X978l`O5pN@r%$Ij@qw-~dCJJhC?0m^DtL*U9pOTGzIPyA>hwbPbfs|) z+hCAo#YC6BVNkaN!x?ik68*v~` zOJihbZ#3jGE+sTRQ}=s~>by}MUbHnUGwVYZukbFGNjRH0u)F5rrLBw?o!UQDf0u=g zO&U@eAlTF$Rz2Y1%1+6-)Ge9Us?>`^(Gk}?%izK!1ELWV+sLpT>w)9nEbqO3^ybak zd#{t7RB`>g>v^bz|M^5MZIfrN?*9CyrLIT^YJ{6U z?Zg1}043gx+e{yCXWgM>h-^VX}tBjLCPa@0bULuMfHo6G;gs zu$*i(<~tY?s{HMOjdB@G-O*y&fz0YPqba4#2$EmRU!Cq$Q!!YjT9y}*mVxh}#9`uP@kM-`pR&%~u#9+9KlYM_0I}CN#!>#xghe~w`hCVE8R3@Lu z#W(ZzGc2Yra+!-+emi}1aX6QiZZB98RbsVvAv#{Tt38mQF&JufPt`G?qKcip} z|EK=f<$fU@BfqeSz}xxqhDQ0Z0V$R2d+nZ$JGONEXt{VxHHq2Utr_nINzMIR!BC4` zXS@Mn%4F)4e#^Jkb>ErGM=_{7cTS^YXsL`EvOOh7$L`R|`x+EKSc>PSM^u zSMv$O1@SH}`t-S>I#FeCHhVn4O6)$#BBX)PZ{M!7-~Nax?z$=kl#oZS0s)CmZXsFw zlw$jn=xAKU_EoFE;lG~S`}F0*1RN*5-UnjeR|NE%1Dc)9jZDbAM3%a~cV_B_J-|4Q z(%Lr1BXqwd%HoLaE>=4aJvja*GyBd5z~$?8va+%W8@{PJzPUOHR$3aU4L8M$Gt$^d zWD6&!CI})T6pyo&X~#(R8}VMm9$u_V{TMBu-Pcg- zotpMYYg)UxmNUock@jKD$hX{0u75qvc$Cwl=>6Wa#E}@f4gJdGJwo&Xw$J1^wVxBU za~>6~e`$BIKyzLky2!3mLJ=mpFJ#;p`5;ErX{1%2&ac~|*7pQ)blFcJrzqf%uBd$Z zvNtdpgr0>3TVG$lzP?@xNWMob=l}^o;e>34yi08`y+L%q#KK}YcMc0E_xcoy^=)j( z8yF;3`?IpKoixs&w6(Rxw+wjv_(6x6td>^vUJo&VZa`rqjCrxv4qx4x;DI`drRN9nJR5sF`|cw5MIaN}5;8Um4#N z-#d#8O-r4pEoQsVm#izr9mP z6rJMq>A>Ax_Z`_cuC!IOJB>)Me~`$o^Dz0*4{(?kF74hqX7Rr$M7dKy_aFdOnk$ zk1t1Vp;85+y-es=%hLnf`gCO05(qN+t&uLM_WnhgtjuPP9h{9pS~2W50FXNH(H461 z3E+%H@+OzD8Lr>FiAnHbf-B&60uU3Adhe4%u~%umKC&=xen)?m8GhxZ0*IbSri&2=Q$G=$1H;D4Ua!?SQwx&@64!KooeSX|G|!uApomY18rTk zIt}5ifhEV(d9=86)8?88LZ2*qiJiI^OFx8avX8fj5Eh1}SO0{8g4ZZupoyN5(e4W6 zmdRrM$Nqo@76j>Y(-ul5up*%2+CLT8>f(SlsYD_>95Zq5cbsZ|_OUzC0k{2DO;YL` z>U}b<65&L?V{(EonisBg7odmHU};ea1)_>hA$fg|I?MWl&h_G>AqJN)vh);mp1t^s|necFPA3%4X3EJFe%oxqi|0I$4G z9$p--rq&+E{0#*P?F9zU8jt?i(tHwmJHUH;VAEMzP|VGoq*3R>`Ez~3RO+A2QH8L} zX>S@IdF=^e#U0#QYPaaGLg(>PAm=AYO|fs7v^%PqrL@^A8@5FhKZ({}aw|HRZ9f(^ z=uTRqDE43yl0(r6S(TxMg_ggUEP8jvQH#m={guyCH>gfu;2xa|k7-TOu0K~ECwg}j zU+Cl}uMG*H%)+RryoZ?Wqa-hah~E*c+HKYTmYX0u@v?(iyg|GXD*AfjH-xl* zVc|=tu~m*+QXp~S+}+dCyYAdzY%Su}g65Y~Y~9W$L($7{sUdL5Wc+-dN8R#4@pDlD zJ5mv%j)k%x(b!CqeM1KIky#d*@Be{onkT!@nvm*G`h3tJkHC7CHuO%jXSm(1ST}s6 zWwP7(G$1b@$&Va=pjo*lK#E+RqQUwLR@Ie#?e~6s^BE`5dFa2B>*wm1vB7615F6Nu zf(M1!BTsq7a+rt?6zh*6tLAh*`G?>vrSWqf$k?~V$tstOJhUD#@68l@h>MaK9i!3LWm_7Xfi#D$;V$y zrOT~l+rYZaC~ahWZ5jHkg8i_7GKJ%u7g9Y$Vs+1n5${SK#ibtYm?7)dgH$?7^27Qv z`P#&=yVyqu!H*}e>8;;b+I_;(*Plb`WfihdhVqg$wyLVCzrCdRM)$qf6+7j$pQ9I4 z_1iJ;9KTQjL65M%L_dae!_KCdhwNWBQNJ_5dOlWzG_2{JFyfsM6J@v_!?Ul#Gm=tvx;3V=aR|@MDWhHQ~(1*{JjQhWU-D0F5kUCU9L*dt#C_pUUu%J{su>*W6OjCi4HVV~>V7 zIa)rBxL$@g`>PkgUpA@iRk2NxL!T?eLtL|+eJ!yfG5o@m_o)+>RSkekH#<1itjG${ zP*g!p4KlplUq_>^+1W#{+ZCjuAojx|IEY4x_HPG@vFLF_56cX${=#>6ac!x}+^I(Ia>`Fv z!5}fj>*PEy#Z-zBn{}3Hw#Md}p$YweLtWu(V^!mJZWBYcfrB-AB;F9m3uPxrdRp(2 zb!_E&tb%h7e&B>1hHpKnY9CyuB>J5`I?hHVF}Dnb!_syxqai~BGgUkuHg22Fg&VB8 zR)V!vDiu~fN$GzCp!tQ5TM|2|_&&RJTu;Hs4DM0BKW!X`?)J?8a{ZAKhep>D95_nR3?6-TZ3A zJH7fa8RlX`GMrhx`+TnU-j21n^RD_6sE+9O71(Zq(nLf-k?Lg6`HB!7u&185?jLmg z1Va{AG2+BAV8yk+-&&0KQqno3j{NYBjp9^XdB!vhx@e7}VE-HFstAIcs-y(>)m5;@ z=63{}pD9r2TN+rn{FO~Q>6o#*NVaG!<#*Zvrm(?&yJ8-SFj~xy?pt=VJ$X2yPV?_u zcW%Qxu>{(+wztr6az)|abT1b1D*9sN zLs&J8q265^6yy1JXYBo4L>se7cY$l1nYlO`sn_%LPwfDh0U=MD^lEPda{hwtwGo^< z_o4^Wc#j#YVP6OxnWQE2c1bd570WVso_T)#U5146iiv6h>2+=6hNy3DGj6n`+Jw5CE@+8^CKf8vz_Xo z-Gb1SSWrM=YHGUf!a;_Un3NPA5#bM|rQVdCot-nhyr+OT)HgSOQtnJw&d$ze;oy+9 zv-{}L^LCXLS||z&3-SLk1Jg4VeCxPU$@y<7JzD>A29Y~AL$SPK zYxnzdt|P1X5<&h&n8O;|I-4$q#&OdI;^&x{dh)4qy5F0}L1lS(O6)#ieUamgO&yJs zkXEo+PWS8})B9WIc{pk@G-$YZc(R_JWqXV_#KRD#vuEi=M0yhXUA?M#tMc6#Wb!Pu zN|p2SV)+#>fwm9*6Zd`*o01ahrKKfcQns5n#=oK{hJ&X7EcBYTvbht~VAj3}shVAS zl1cCrcLBS+{nfgKR<`nm7|(1E6uSUQCQc>3bMyLjZ=g)zUL$aXwzk>T`8WtlwxhKd*!F_~dyI@2Y;J9JXSyYw{FQo@TZ{*n3>3EQ$lH{nebNxsZC$@Fdp%We z*l6RL{J5zMhqKNKCYYXXYio1XPyaV#E2E&y3sF99hv8dqVubt%CF$I^ygK#1Du#2v zBgG0yF;*pRtkn=($ByTV614tNd;$=KRLX~O=lhn7KWgrxAyxsnRedVi*vo6Jg?D8l z9-$&pD;5e-Ew^d^BZ7?z>iT)DTVbe;n=068vm^b!i7Z(f>?Ijhzc5~OiV?_PZIYpS zKOY%Ae`$Vi+PcywTl<@MN%-o8*r!cz^#p21vXi{cG2~IuHw9pog6^|`gR?jL>@#$C z2pcI|DML<2I$BY79QzmhMn&QLSn2qs==6z4iNErr_^5Y&|Jf5ffLg(ob4#t8R$x>3 zQbVN$K$G~WZ~S=v1av?DNc;1z|AAUgWyLi9E6wq;QsVD->++lzYhUXwOk{L_KW1#m z4q`H}6rJI2Ua?XUw83*U7)T#=2d4caC%NkF0g?RRRI5Wu5JncE;{D~C)9~b%oJ9fF zNQqPoruCB(dRTVmM|J^nKS9U-FA|$a0X;fv#k!fnIoVf=5b(v>soP0oE@5zPiT9t2 zDyo2WCwX%n*(KSB@2#_)nwY18Fd9NZ-Aw~lU~@v&Y2=VoooS$3sh&oajOi6!3xVtBrk?q z$BUPGE8UTbRI3EkKBr$Pf^XDEqLjk$Ydem*_{kXomj4%3-U+3^ooSO^Feair zeFS-5UobzWHhz0=<=@YLs#vI4-AA`E+~0JZ9`jq-J317y6sRmK%KO67O+l3N8}q&i zo2|-;{K$^)MV%%zE}CI-UQF`rNiyr$<+tT^dxFOw6ySk8_wr@!AB1tW*|>dxm}0k? zY}b~Bu1rD)rAhBQ$_Fz9N|xsE@Pg-yVa+o65`uvLCsua7`(GFeymt5^UdBC9PTyWA zUGR{hmuq)9zCjS11;^n?i2`x=l2yrR`LV!5t0e3_ai>L`L@Knlo++yd6KSvvZ$IqWdL2GTHZ$fh=?`@SEJ)j6-3> z!=n%XMFegN@-DaNL8uClyn~Hwmd|L80{^R?tr>a1Nw+abjWsI(dE&;6Q}XJKkWF;^lCMs3SnJLh_cC#e{4cY`Q?vuG&}!~T2T8@G892v zeL*eYL8%3A1zx|yu@ba{Ab%uCh}}QiKKKg_Lv`V}osvE%7+D({JS13=efb>8t6{zo zL2=9D_o2cQT$3khYGjhrO-N3DJgVu>VvJK^_xpYi5>y?gv@Vcn%)HiQW37T1qo>e) z>9|dCAtB0l&?n_K6P_Mcv5v66$u&#o;Tds_wFwoZB9AC!sd-Yvce8k&RoCRuw4-{c zQk$dI|B~!n1w-o3pon#%2;`GAP%Qj1H{#zYT*`_g_!&0uW8TaEqu5l2HstRz!*vqR zq>R9S=(T#gg_35&QeTm{MzJU{Vspue5T#)}$!vLHHceVLC)a$ro5J+q134|4194zt zg@wS(egZG!ukw@P$17hrtKPgVw&9-Ozlr5+ASJq+<3yEe<@q(@=!`q*>|_z4qG{=0 zV3izIdU^}BpShqAoTMs)8ZVZ01(%nVtv*PrHkcKKasC;1IWUUU)bIW3o zz(Ky{YjY2rrVNt9YlJ8g(0o{4;p~lVlvdCw!tq~juDGVgZK)?JW^>6`o_g(!oh+@# z$xRTTWjE&gp&;%5ja@sRAtusJ{!XR^6eO3Re2(gq}Mc<3OL*uesDY!az_k z5LW-f1&R@xR)tv5QXmIEucD(9O37!echuA+2W=Gl>uYjUvPL=Gdc3bztsXJ_3R1ur zlwO!Q|4hepFP@-4hLZ6WxlLr4F~4ujs+RRxje(ePSdf@RUv!5y5bP5tD!IyceW|NUawog8liFPXD!;##zGthVLgXEIspmy?52@^fv^0PY zY6KZ+X(we>I9<25s>koPKM0g;oKO~gr9dbxzv)5=@1v4w|8TEjQvPnXO1`M z&FoP^N$nXaX>pcLQ|;sLQ>t?AI!~vIZ}N-_A$oc@v2}|1j^VY$EIZanbkw+}74;Wf zi!)nUxw%y`;IZt$j(t~~Z|H8MK8ySEn}89BMA4-Ei5-4=3CSGYuFU|^t^ zcZZ0a+@!n=8WtF!Yyj;86cvQ|QK@$4Rm_u9>_SDVh) zlne&hj5r9Fs9|A5Mc0bY@lz`(+I35Gbo?@h;UP=Nfzm?J`eO_U7d(0d)g-;SJ(IZv zRjEX1+-%#5{!I@~qyMG{!|0l&<@%Jvy`_YScm63RfT}JbK?`U-*W{?)a1%U!bJO|r z=g&g!@9f4Uw7N=*vuDr3 zw*D>AfplmrKqcPdiSn}{w;i3(aP_evKtFLN?j|_ck zZsOSq4PDG}hGJ@BL-k(1KLI-G2++0szJ<3BKAca;m{>j*`_W)JP{`s+dO_rVGbhL5a#rZQq0!djlZ^SFl`jwx>F+$yQn*d) z53r4WfX%pnfapwyl_|ghnRygGzfMv^=%g-q6m7-MQAIIj4NdSj9@G{K6wCTBGmPALkn$mDmNWhAjBO{5G;_GH!oM{0 zzH?E$QICWNQ%3f5@zIgQs2#)Bt_*AfGn+39Q=Hx3u}~54U)m5JewQq|=FBxG{myW7 zq{7#{n%l$Xc##sdS{hZTfm zPE^!4WJdw_WK+2R|57RY4hximwHs$7w%C1e-T4!4jHbY6!LR}Ei#$zcmXsNh;-+rvc&!hKGBh#4I^?xf<5NolNVH6l+t09P;uR;_x#l=x8-mEZ!aYNd&Ft-1t3> z7Ak0asBKbFT(u%2q9pn6I)c_YD)U0ToP#JIuLU7oWU_}1S*dfl*if;KCe#A&{YhkR+cudi%OgbXDN5INgHts~0$Ys92mMrGS4DM#uYi9HE~#E8(YtzZS*(rg6w{0RR{A39BVGx)57eT6Rc}|5 zoM|u00?XlBv!b3SD?KGPmk(S=vZFG1WtLK$1mtRu&iSYrCp9(T7a>*3JcM4moo-pi z>P*Pcqk8%hab7ZbfO;+sdDYxeG3rn+HzITkBSj%o8~VRs)-j=@PyvQ(hMG|s9|qS4 zWuH8)OR8QtKf|VcXg^R*_47mB;klr7=UE&qNh4;{ZKcYEf2scfw!2dUq*EOL;uZ zLe=q5p3#16v2H|WfYL(%RY6mbDn{?{11d&buOmbVZd99Sj#l8g=el!X-B&;aZLPVI zpSmqJq5GVWmNw&3gH{-boRFSjJY{okTI}$#9d}W+mZe^OxKkw0d-(aS@1ZmkTSpDO z51c6(HpPeTS*zi|f%xN59g82)tb`5(>e!X_bvYQ88gQNA5P$vC4!9dD>+mnZBR$h%lW zMF|_u#@CPcb)KgVGysezGvK%6S~Q?Y&zQz1jm_SDA3M`C3e`l^P~ob<|LMGl z|FKCpm*wk^SP+w@*N&AFy3WIX=f9uoe4EC`c;<|Yw%?Iz)p1Jz%j7DLr6aoMWeI$w z;9L=_4&A^BpkbYIe2$gBPsJL|X{xPe*!K2qN*EIvhVFR*%N{+Ld=bf;t=iP|k#S%s z4vqc{f$vPt5PKsNX$#(zw_C=EQMgz!wg;9^c98jj(%#`g=t_JN6B7c3cT|77t6PjT(WG0+*uE z@7Dq}g2}qis6h#s9*|%7S%B%8dQ=Z6er~a>95M1mZE4eG;RI6gM5$?U8tPIx1^*Ht z2(HwRpzt0K{nvnST?HQoq{`i={aOe67=;YnsKcgH#Btit)wifdMUv5u3+y@361T9Z zf&E~1*1{cT5h(cYJyH6=$e2c{NUX8jfL5|sEdiyWfcRq?{2Av(_*WXufedWo@?gIK z%!D7NrUuQT+|W+x53SMFEiK9^FSR4XIT=5cR3_IL47Wa7IjjT#Z?aRq&EqIb{prm( zIx;`mhMtK#6L3u8(Wa+1^C5c^K6BUb&0f9^?h<+623iS>vHiuK%$_!dVz{76O-dx1 z`b)QXX4XR~Y#p!Vw>(2JC)7PL2vDg|yK7`bf9A}k1>*wh(2OJYI4^xv-BPG72=lCd zY;YdHKsMxCitZ-9jEzwgS4E{3ibRB+i>q#>MKTPU#~H=Ms9`b*x?~wplQPd=yqM}2 zV`MBQt~pmQF*Yeec7ZNl-o9Fl+Vh*?5LVHu`8? zSRHg5`-4jx43xNN(v%lVmtJuHk-P0+Ca#)%L2@gW(GYI+o{de_-u%W5|~ z7HWU(Nu)X@9H1GsGzTA^2xJnuUay%-!cIgfR*Vv88!u2JkJlY$HRbzjTne#bh0QN_ zpmlBZ!ExG(&yR_TNJ;%*nv4ME9qLd6q$UmT&mVT0RUvC7M=fE@z7@(}x+%oGaP}{^ z$U}Dyb>n;;9>n@$|Z>yef zVCUediSycj48Gf&@-8hf%Btvu=>1un?bGQU)7RgSk&>dNZpx_&r(IDYNliPc*iJ2A zr>2&`@>Nz@(ku75g$Q!?+_?m(Tj398C@>NG5`N=vlKAjf_n35m8LHlqrbI3+g)N16 zC*HCMd+qJ*&4jR=q$hc9K9P)rrbz6anAm50$zX&wci|gaV45^Bx;Hu+*0=ywuKj{G zPdPeTg>ifQ3EO$PcosH`*T!G@TzRXTjDh|-Puq`V({KFxx*1Y6ZfYI`bA8ikk@H<-Xi}nrdzqhZ8ePA6;}M-I zn`bje(JRRT%6>}As7<%si7RU7KV^UZECVGUV04ADsM^(whzL=mQG8VWL30&&GC|S^0(D3|(=28q>Yhu5U?+i7Fae!4HqO zpTBQK$Q|YJ?UUu%<>eJye!iJLCDnLO4BcE@yqd?v!b0KCa2^E4yz#0=_)Xg|Z+F*S zzpim_(B+tbnQw($i0$e5yMiqDNn8neyyiQkIa z7s7pFeCncB0^%Ii1fyAX;lae*2iDhQ!x@mAFoJ2pd*0w*NpIeaX$g3Tg<(mM;RFUM zC*;0w9Jo&xgf%c=OhUfGN%`(qF93N`Uvf9j@t_*?4kPKMckSQ!ZLNyDYiNuuY)FqyRDhEgod#nqZ>p%h9r!^*?c8XMAW@gq+e)Tc!UKx5YlZr{EQ9sXFT zu0T2$Ttr!Xjl^?noO~DNuRC-`XR-BQbj8YZPBYSd`oJ+4^M$C1%6LG%t~?8yQ{U^& zbiGtplB8*S{4jAlMHTJqq*7`msaI~JVL_42w)T?O>xleJw;mBl9k16$K&RM39;qvw@Ce6 zZem@(KAyeZskE_6v#u*THH~$#Vf_oiD$9~@4msUg9RFxshgI?D!sik(aUreSV^?r( zF|VtsQNx&HPkk>8ZpX_8kvZPyn}oEK2&qmZOAif^YUhXf*OSkfm23H0#8&b%GpS`) z2PFwA0Du%dl<=x!kE+YQz8&SlhC@_;@?0H_IA}`H_oNqdSoG~WkQ5fCgshx%h@@cz z5c-CLL^StdR4o*Sc~Rs4F#3G_XH%3vy!gOy~Bs&8-ZEVAhlD9reGbsz92p#NjIOHmnOpNn?W^8`^mf7&2fZ zZ{EJ`&ioKx{<~x12+aWMkBp7QXTXF?)nXl;7rl^B*M2m}ZI>@!j(gf`W%EdWu0;myG%o201h@7=t2r_k@8V_0)42*HlPga8gHo_>X4BBQpMo0*e|Z z>K6;Z4^5h*AHWCfZLfvL#bv5Ca0VwqKT&`d!NwqWtuWj_L%Mv4XK@R!C3WF3UzFBqWplVi8caf%gUcQMLJ}HUa0J;=Qu6 zVmp`D$;ih?3^RGa`DhRXMuaw3+m)AKNLU}bqC4h^FL%H&EkJEZGQQ>6&a}VV7>`y5 zd<+N<77PPI3XF&J*%3>{t%rcj=2vJxG5sBoBO~KR(hTbmpl^M64@gI#B!XJ$08Dm+ zM0m^pB{&38A(u8z1$+#=`QcjJLRa<~Q2p*>X^s_<$f9O~|Hmz0>A_dv{S9Y5B%mt; zUc69bhi9jMIJJYDmbOzsTm7r@^N)59L@$pUW`)IeW^|I0$! z{m-F)G;bZbTM+^J6f}1OY2yJ?L-ePW#zwt(0OtSv;Kxx>&4gH64hV9-MfZ(`WD5M> z?Nk3efy|pj^V4V8(IPYo8G;JA?uN1F<+eemdDrE@Yu7REWqaavb@r>2dAVJ(`p+3V zjPDuPS~bto5m)!scF~7^@lj0)8tFSG#7u`N$ftGP#9=-{)m%n(=alNx}VSz4mW&HV0Qwzq1$NbLLWrf)$)6$FyMR9WVmKs}XdELz_atD}I zO^4U%)H9$Xh>X&ZHI}-X+FKa$oQ9V?TsJpQKRX4ZpyPR7@CqR2D?>OH7=T6{TV=_y z#Dhy-Ts{l(adVKl$G2KJDk@8h-0ap`xhR_x9d2k7R0G!t5%1mV6({Tg9E!r>qKrG5 zn!?)S4+R8zy6zb!g@nK#d~t9p0v5%TwY9ZNzI-D1{e`Z3#GLF18iE)=^96!PHD)2f z*aHT2zdr^(ugXugg4tVD20_N?V#~YHf^j8Yt< zhZp~c4+)TAOnCkJw37N({NW1!S?cf&idEzZRp{vF^|S5ae{WZxl9p)7;m*68SEn3A zCKG(||1rej$Dbf4^Cqeie1Y2S?`-rA?-VX-LCnCdJ{(=C@5%L zNGaa%1l8|v@2dLFq|h>WQU5qMBXyk$?fTsCH3zA{6|a#37~}V zgdjY`4Ol}k&FTo3JuNNmD%|uAXo=6&t3+R0Tf-9PhXp<3e3{}65(Sp{*4VS{p3bsl z7z0*^ahG;@tq4E7Bm*3HHnm)-ONr=hyD3Z+V2TG{PyB5s4tC3`^<5fY4Nhbar2Cua zZ@vEpFR=*Swy#>HVo|7@sebg^44=63PPDH$T|fDdR!U=buv`~LScPT{UESO|dV8a` z3ewZBwK01o3frW8v86tD>O^5i`oqp2k7cAoN2yoVDpTIRpLzPdtu3*qr{}$-HY}!o zMXWF*s_?#EaGmSk&6Y3LvvqvTEi3q@&lc>zc6iunAMOO+Ucqa*?4eXxqzs4F)YO!g zp5BoAWkYy;S_03C*zfRnTwJ5P$5qjGj&a*NJ2^d=;wc4&Xe_2MMsTOheT&Fzw;0|6 z!f_{e^2v;K`Q9g>-Aq@YT}t)t-M0YrGMby48w~*~U2t8!R|;>1dF7XMR_)fUwD<27 zs01voD=RDibX8Ogf$7cfeSOhuP5kh7nsd*tyuI=6J&!`PkVD9M4ZY$q%bab4a zpTA{pZvHbHs#34H^?jfqmIRa4cQYcMKTl0gmZPDenVy`i*CH@eLVfqlOgo4ge4U{t zN*x^?)3dXUW=Gp+&)f*QC1g7B) zFA}p8+jGdRn>X(*43xg&7^8PWK%8p8kI%*sDo`wIVT};=R#a4+MYbDF_4K5U{-{OQ z$8-{PMAHiko$zXfZnRZOH!rX98}Dwi+($O%dQ;lkZg^r+ljir?1wl%2?5ew|u5Pjq zX3~b)^mdrWUHf*4fM#iioRsukYAT${anCb!?BR=&dLHjPKae-zm&GcjsA*_SijvjY zx|cU+>qvIki^TG5YfEZHxyQ5*XVA@b`QuWK^fJXwJWtx`tU(9 zBqT&+BHBLJqMLVapi~oB`Zq`cKD0fyD~E9}aLRQ*>+fs(*$%A$X5%dfN^YmCXa0!f zF@!K1yTz;vSK!BwTS@}|5t~lI&3=D7XUrvbK@x7d3VFj}%#6;4m{4acy_BEsPxMzz zis*_(Z^lw0LPMT-Iqw@E=jIh_Q8h7P&NN)T_-`CP46s;wG6fmw;BU4?UyuLw42Wx} zs8Wqi2n@~`6=6q(pu+jz5jGlK0xg;m{m>{)_i<* zH$wlt6B)+v>n<*Zr%qT+>HVEZgs|Mvg=8@isr})tD4L3T8*TLuA3j89EdRIJV4E2k zod3PXU-p80`xzIq-{rHXATEskS7Q24v%xXEd`b2{Jdc|Z4o_K`;Qws4v!_mUeE)9q zpC=hn6@T2j&xV@$6Z5N3-`@cmm7W_G11_huLom9U(teTPoWtLa3Q32s?A=k?|Hp5b z#59njtx2IW-0+7XC8@}Pt3NArvuSo0_^hbDiz;t8aa1@e!or3i*y|1&EnR06g;S z#;<+=mnpRQ5T<1Hg@|+|rMK@Ay+z?up(NCqjc#i~{%p)H{yue6qOx?UJy5 z?8LjwOl2P3mYqNHU5LOTF7WwiKcUq0icj8aere> zzKs3Q&1a)RSI?fZ(-xc&bYTpCGAdN(?y+dfI#YBuFE=;!(UQgW$hYzLK)v^qPWNf>bW8-*zMIMs}KRgsum_Vzi`)6>5KJ4A4O zRDQ$2z_@Lh$c+l>`}_Ov1unL*&KndWt76JI#!YQ~h4yN?x|vY!pzWC8&?(V?can*W zZTDK2e*m6-<>}Y2UvJ*N{SIDHlb(~qw-UL$Y!^Z+erKU<$I5t^<;sf}FCgQ!L+bn9 z-u|pmM^!cH>C;me1O-hz_(JF;b-oxaKj}QL^nYzO>qMIW%4YD=C>5{m_PA9Ln$zRs zrGQGSsi}1k>0(UgV`!+TB!CoXtaFKQ41ee_Me*N9j_0FbgWd70ckdq4NW}@`;o_zq z{c+~k5$x^ly_z_6AIYg37#z&J$wnGTQ{l0*{tjL{^&=B47?Q~USKOJ0L%p_fe59kr ziBwuN(TS3BXi+I!rE;WWiIQPxw`D2DGTEvlrDHk~Qnp@cP>qs_25~6r95D)`)KF1& zg|RR1_gCszdaw7d_j>;_S6tIG&+mEe`~H5P`}qurKLmJULFY>3Ys%fjLBy2?NlD2= z0~BxXEZnxeipvd!T`uPW1I2pC^+T;rz>93#9~<$p?;0X!=}7};RV%C{Ol(eM~b9Psf>Ip zw^Pker*WXGs`%v$M<*wqq^ZSKIMS49)22xQCTHZm;A`GsRESlV5VTA;RjWxuW}HnoD=aQ*)| z-lIJnF2HEdc^kFI!%+kkN>b`e{=gz|}y?Zx=x2kt^@%|;ZlmWz~I$nT$j#I;x zxFSh5_b3^##c@G$j=oR~>FXyS&X~uKt9>Wv&Xr&_CdI{_dcI3@Y_uFvjprc~p_a!v z-nh4TGwb7&>RA#bQcu7KCpVJpsk910SAC0#SDg0=&6R*&8m%crM`;Rtl0_GONyyB! zI6E=Tb;SrV0cG`_+(Oh9V>J&kZFridrgO%P@!NcTk!X9jAh)2xJvKJCjGH#3cs)JQ z@4Vk!2|PR|GvL2Z+l;fn+1c5Rdy;zlxWXG9RT(oYLw4wF{x(~9k?ViRW6QPYw(&a^ z;-A;Y2F%znI;*32nOtQuLZ3l>N`I|3782_7<8Ej9g*%)UyHqPzC@Lw1y;D8GSbSu} z;}tc&vO>dmlK+C7GtOikRLviql2(k)6vS+*QV)F*zW)>?pUbEHStQ~Rvy97)1x__7 zHPytFO54TNMkZCQ-`}Ud5N6@;;AF-DhiM*8b&C)Y5y8D?PAr%^B*Eg$b{%;!YTTPi7FB0sgs$y!U#~mP|TptU7i!%qxB}5KcFEJ%$XJwj` zskOEB`<1wyFQq|G zrhG2gZV7z8B_a6aZ*VvpXzuPe*z7g!6$o>`URtr~cxO`yqSY!M5RN9utPF-2&DHfF zYtkAf{u+AS7Up7vJ=91;gB{Jy?J)a`fDH``YXD#>+_=Y&MZ)X-iW3a)790rJaoJN{z=Zbmj|gDB(NvRvV~OvYYQT; zZ##4T%Bn-?+6+uZmMNTYJ3HHo$j^}5J)VGMEL*k=mCK2$$CLEr`mHQI|CgFC9mK{C zn4^ssbfk?ifG{O$QuaWz6a7P!y9tFtH?cbP)sd3DC+wj}{605#^V7)n#(+58k&=>1 zg@lc>?Dr3XCDqbWj?4PBs+q-a{~WPJVdy+`6L$rN0QF}Cu*Qkd61OPq?I24{rtc|u zN~5yx-8+`@WV9v=uTUa#=G(xp#WA7(Oe`!48<&ldRA-wNsx3w)nY?y&tfN3d|JBjM zAbcerjSWELchbYK^!qAh5c#m$$E%^SE?seZDW6~TU6-zr12dy~oERDvjEN!PW9{CZ zKkhBY>a*&f-A1bu!U~+|l;ZH+C`>p}nra`Jmsl*8pUxs!T;8CXp|IYGMq+N>6v+`; zG`IA9PP<#AyQ~>0+OMU>QFkZSVU8?jzaS|i!`wRZ=r(TNi4LVtPKUgq3{cZM5@1`610ikR6Gz6bmzM~Lu! zwljHA*}`w{Hz5n0S@}hf8{c+*lk2}pe8PPrL&Mno{B4r6+d>9LFD?(~$tXn@9`2S{ z@x@ymRhWDg^cC>LlTV)(nYLhq32w7;8j^0Dl-`*@C>s__AMreN`wtqC8yx^f#WBe( zn2IW{b%x+}>x<4-Gz6B5t1bF4*+;OPDC*!|+1c1|(f=;d@yCQT*k@FK7 z_Lugk78>{%p!Fl&hWRSQKr<0k6fOE_e!YBcIV*iL*sD|NHYeykwc%?(C(Hr> z5FocuxPpR5%05P5s2Jf>h7#0nI^ZR{%+2R_7;b(8^PCGV+fCpAZ2<42fUwz;)n$qB zETW;}aVBzb(5Llz%wTF4P68%rT;%rlRSmTOE9+^gr*}^NiFt;b16O-$6k4eYZlL*| zK9~7w>l|tgZo$HF+Yr?WG+r|#n`(}ce=&=ip{~mZWsr>jIW8lk@M=t|G2dk1+8)|CznnT+p{_`v%-T>HM z=bqgcrLWeU8XGW^(%03>J0l@`QaWFujK^bwS8N<^<>UFDvgiUA{2~_7FqCt_zZeZ= zixu3mb;PXEiqqQ`o*gOZlW!U9j8%%kzBb2<$56N`%Uv2&xmJ71XbiZ%O=G~*a4*2w>SBZpX0u3-K(-i;YJg8l3- zAMLRj79=Ip<0w~qN+{fIn@53?Nr{OiT7#d&y}L51%BHP*6$=5TSa^4NccfFp~GoWeK;Ly@<8)F4}u+kFaXJ$%9hSU;jl z=^KHWOe4y)R<93gmseXeuzpX(2FyO5InPS^er=dJ?n&Rw)4a~%P~Zs%UqkPPHH$Dr z@ekly4n868-^;j%<9PcH6o{}%mp1Y+g=-Gv|NeL&vBM^V%;oc0I6$-|8UjHz)o-iI zWx}#X1^E)aNaX_Dp0~r1uu#x43s&|ZU8C!j3mwZw1tJwoWGX>H1n)Q=V_oR-bCiA~ zc+w>hlF6uah_eFK-0F+r;no^+be{nQnrt2Ixg=KN4GDL}58_U~)cRvJ5;E&xr9%AL z;wX|+6Ranv3jzI-L%V^(%Aj+F@X=HUpPs+18c?78<@E}~CHAO+X3sUGRsHwV@q6ku z1GhnR6NJ53sW11oOZeo$Rf!75*KP1M1QFS5pU|kcEVC#)D_`%NIMJ{Vp1u682Tbc2 z@7>B-9l?lT2yixDh%TlsE-vM4&|w=h+l!u~t#NkcqZm0V_`d=Dv`t{j1~$1EX+no> zLR=;m<7~w}u?Xy_{_N_X#Rp$QXVxTj&fX}OqV;An;`7(;2v%`?mA0z9Z#Knj8XJcvgow(1J?~=SWdoumA!@5nsG6M9n7TUWL{hC z-{%_|NjlNqm(5X@q` zZYLt8%Ac_%*pu literal 0 HcmV?d00001 diff --git a/docs/stable/_images/LogSigmoid.png b/docs/stable/_images/LogSigmoid.png new file mode 100644 index 0000000000000000000000000000000000000000..3a7317a158fdb662fb109de444b5e077c7749979 GIT binary patch literal 26980 zcmeFZ2{cyw`!@a{lrd9CD9VsBhm0kXLYYElGS8WXhh!*IgUmz7kSUQNWFA7Mka)%?ZzF+p<&;CsJb=}u}-Oqy?3Npk5M+s0U6tV0T zDJ2vN+Y5!l8a;>yKRMSi_!<5=U@s}FauEKwAG{p^|NoHf6%Bh7ic}x@4=Y_F%?y4h z+{x&Uy_tA@3U2iHXHaDmu5=_E$^JC%$HZCu{P|(>gOUL}#3#?3xQ)g8^!EJb zE*Y=!lTD@*PX|TpB^G)DsXu>i{ct`(q-M7@>h8ShRd!q^_>)IGG)r=rjEqci-2m%~ zl$6wMzB8x?0RaJ)ez?cs$B$cbQ74h>XpeZoZ>0okuphy1-y5r=$l=$0M1&WR$HeIW zfBxp`*i^IPsrb7e=IhCZ?AyfJ`RCFL3qHDpb7~XFcP{R(k2r4|qIZ@W+WCnY(K~iU z5&|gEmG(>To0{&`j9sv{&SCf1;4wClqHKk^p}%+yYjL$l7rAGx&qQb@E5E%0jV{*W zJmx%T7b#$MRL5;P#d_h|3(Z_TtYMc4Vr7QyM%IMdk15yWT`e287b-$Et>5BKG=w%h zIdv^puL@gv+psPupgJx!Ov%{!-un!Av4?&6CWb>Ljx=I!4n<<~nXk$p(()M}7ZjwX z)^RyOPC=olq;#)4Q;k_z_$Z^t%44)~mDYJk zdtrF}ThWjc9fRA~6J0$$HPa4GGluv-Ueqy^MoPe`O>AB7{i72b-@9^l4a^M`kw)mau@71cx~+cmjp8#gn5YkK=Rd2r_OiH`#;`u< zkQ+)tR`$@w`QG#Puwp!yLm7x8ntf+yEk$=%Ixn`>bn+A1Pqo$|({|#LAAI%^c*3QN z7qL*I(`gCy&oBCn-g(PyH(vXEs`}m5w@jLI=9C78hL6L-?vL!P74^QgH>nh;Vi=fN z9j~Xek-Bq-mz@K|=`r+`L4jpC8_!$NCU*pWE8(gW_VL*gH+^SbWK6iE1i! zF&RE>>|Ldzfcb{S_ZqWygENyiy68~FV4KCmX|ERJ}sO`#OxgsC(%G%jxQ zTMrg{X^fw`7Owa4h3pks*^6>=M1DlH_v!+v@du4ZE4_QmT=~Q|KT}`~ug40JdAWNI zbbJ=G_;jYWp}}yfH7-)zLwI~*;&E(j5HgXAo9;3nZg%pY(a1Z1Ps*^eZZ`cfWn*u9 zVLGCvN$Aa;x2bjA$qXApq(d3R9z{k5 zC|?(&TByL38}TtcJTSH%9{b+l+sq7)-MEw`JOQR!+Sr&y$75CXL-aYq?(Xj5y{#E6 zerl%9RQa0z0t>sj%499*%t`f6imx$U)n4c^MP?rXDC zRBBq@$ROcA9tH8u+wL1<{z6l+_95m$1qB5zaI)<>q^ZZ6I9^FNbJ63_zT=ngVm+WAdF`Yh5U{k)y zlJoY>EgA13o=H=P?3c)3RHWMjjr7wedEB?vVKz-yU_HDr;9M|2_I7N zO6E|}dvFr8JXX8M4a*j*2{*P!Fd+pNJ>z{Q3Am_O_wSA4YchALaH)^;@lg;`bJlLH z&MeL5w=tbMg-a^B=!K5iT%MYMAy+}z3PM&vA&o-UW0gvLXYo?3T|I61hX6JvHYW13 z8hKJ!H)cL%Qxs{uF@22vT*rM$8m)QhlFv~=>)3~(p=vOdSJ_i^?#`XVy>Rx{oA(uQ zZBxomS>%;aT@d?r<{*T`Vn2^nNE4oW)7ho%Jd`57yP`a~yxA`%jyh*EEQ)D^Eu<{w zwsJ$nd48pcii+wbM7i#@Q=}K_YHQPw>oPWj2p89NtQ$RA_VO3doaGziP7{Yx zU%r%eeK+h?xVyccS?;l&(ac%;VPtL0pYb&6Tggn0g)^g{{sxAgnmWz6HMY+>CVY2< z_e*s2Q7Q_GWb5+HFJ5!VHcT;#K7ID=Y7xD#L;BFJd=#&=k&zMZ8)AB475PZ+#GyhY zVq-rS-}=g&qSEj^{=9uEyxF_;VXtk`rCN&abj9R;v74HFCao%Vc6L}+r%s)ki%&b- zKc64IJLIOzmeS=q6_>WPGChHLo1T7Ux+JF+c7;>h+^6k^hqPqABAy52x?~s1mPue` z{R25m-j5uirw^XVDJ8)tIabM<;Es(_e{F!9l$>lg`AluAsMB#sg08FIC_>whisPo< zcj&+OT54*I<^4^=Z;GG9;CiDImHBug>&r7u}sJ=-COp+Aka~ zO>bR9XAd%C)w=jD-8u*g+9k9hb; zM@N%LtiArc>o2i$VsuJb;e1^MrsPs?!$^@0qq&6z7Rsh<2@j(8kn_lpHCe;!*#z4U z0q@TMX%c?<<_+cf^XKPC^jKqU?_t)@qSI1SlVm~}um*^a^0R&2Re;#A@NS(UBQx{0 z+p0Mds8Ui=B845zOMQ=@U7PLm+1lEAzRoVDFq4Ff0>n6kx^n$GX@5Byv*%?;59DKR z-W~$v-Uz<(ty=`zJp&$HM`z1VVm2BixD9F#0=rn5?!<>^=(0Umg!3?SeB9tgN=l;h z!mx}-g@iFg7B&u!n&@*DJE2Q0f+KqH1FQj$z0IjeQCHsa@$vRdHLjDatP>$3^C&OF z(=jP3j>{Bc(TkE>crrU#8<>U@5*4U>5z?S3~0<52KE&FTyXL4YuLX%<2i?tNj zN3Wx|DApE6LSa$`Z?AJ@-+9*m3^(W4>UbcR!KW;Z_pPn8$-CWc&VNa z>v{9`!S~w>>au)yWNvZmPfi^^yaiP05aER+g{JrKFDNQ1milbLnV-RUG><@dn7et= zdZc_eoiWqsnVGb-bo)r!ix-uXcW~deg^0Fxn9}xq%GMm%wT7s*H*&4$91%`kUEQ3u zGd~~%`UD3DA?#IQ&UY(HNVtE>dfl&K;!rQAhzQ-}}Yj>8uW=ecovV-tooq)a!YTy zhX@`iL(pV%#I{M=t+qQm-ynpO!-uwCqD-o}iQKoQ(}Vy8xt-^=P?Z1yi&glHqa8Ag z(zk9M<>lks4b-`JetS0m@uNpx2)zS(Eri)w#_>eg)0$LHI~er8G27pBqm@{qKskXx znhD+&G4EnKqfY<`J();wwMJaFFr%YioqTJsZ${tN`BHg7-I1MNTBUT)dP>p1>^H6t>T zrn}VH#k6VvQmdadZC(O#%?Jt#^0JDFj_$fdD)s{QTo%02RRx8cfJNb3aQJry+#x`J z>r%e13Lr$_g?Ykl#f8(^e=0&N36}wT-a=F329vB+Q z(Jt)*a7>2TqC{aP75a6?U(|Jcrk~~Iyox(QT6LvJ!r|+qlAZ6(sxX0AslrF|jk1B8zA(86Jl`CJ{ogaX(@z^y? zDLpgu5*#Xrp?G`!GboY7va(~-)6>SzKA?YuT-4B@#UrB8a}t!R!p^0*RX$OWl|^J- zG|*MP;V%ApYRa%SfNYN|Db4u9<$BB8KmRgLu>=nBI6i(}B4mJ!uu{Qvv$to?Nizu* z6dLM3Fek9U8HG2D`=1jWX|!DZ(QkzOqf5mwMopT^j^?oMQIP~ZDsf2k7^Rvt_Y zyVo#*#p*Ed>w)`^Haz^S2aoM0pbBKjxvM z{q>iHJ}tuucX7t=-@lJ8Z+@76ILZBw-_qv4J_2LA&vp9mJNEzAjNWg;k}ht~p_^N$ zqrwR-OSCvmqm9jVv;6)smiJjSD@-m!Jt%SUyHoIu}At!Ek3|DIg@IzI35Hu)11O^!t0}D{^xDr4|iR?7pBC^cKfc|Nf=} zN4((Zqdsb~qD4~fUzDIF!R!%)35AmuQ=C^a9Lw-tIGM&MFau<|;2Zb&Ru&qE@d2e+AJuz@mT_W(a8ZLlO^Og#0* zeA=g1b604U)7PF-?TzGrvw!6%@R&FCMtS@#Pn)&K+15hbQG{-TXfcr*V_i+}Jm?Mi z?igfGK5+7Z8T$+yk5&8bQ;M~n9d{5fa8OU4JbCo^u{R)z%FUZwMY8hp2Fv(VDJVPdIh>htHyIj7|3&ySDR@dKy|#VLHAoWxzx-&h=5bbI%{xp_H3 zcg$AkQA|u=Nr^Be71d^u<6A8nVF$LgxdDI3xzVKqbC)5hIeq%{4MoK<8$k=Xw{PE? znwnnF*H5gNQ?Czu_RJTsz{|4QLoTuZI>b^oc6M*2rUd!oPAtl6SeLG@uKU4sf(9^hESrpKSL9`_@*mvi@imt?Cm_HiAu*C5e=-PO`6aMM zTwL5e%MZtt9UaesfN1sZU~M6;-sQ`9o=btcqo56}7M=ac6rAP@7< z8hGSvH9)9=QxAmbfRZpPH7uOUu9wO@j^9-BOE1D}c4b?N;Lwq><4nH8tF--!4^0T|QXDedE@xrf$`X{|#oy zto%bQzkE=>bJVHyZOQ3_|A^TTHa}IXR>oESn#5MrWug?yrsn29U5U*tN#p1J@F77? z4#B;D2EEAOg8%6B+>)lzelK2}{KK`jRJN?PREX-oF}4)L=SAkD`}%Jn`4#t>4^-UN z{2RS~%+P;?f0`2x+A^TWImDH|Is3mI8_bxBO6?zKj7*ZHg*ihO>9B2OwUR`u!3+qXD#r|-OdZWjI0&@46Ym@&i`fbxz0)-uYa*eZ!uT9jawpJ>?)c!-562wx}%;wrJ z9FilzkQer<$xyt!ypN!|a8|Rha$;#?!2Zr1h#87EZX8}TfQ08@tO6_WC+sb5 z_YGT@cRB!Qn{BTk#e3hjEvJ5*%!vGs4qZ~zqTi0?Fwwuhf@s?aisJcQOqL%$xDPoCum&@ z8TL-!{q0D}f&>uda_uHAL$O43R9;mG0oRvS0*|?o7p||r3~YP*Z#YZZ$JHpqjGUay z1_nvl)ZV>?sI1IPLKkN&6r`T@qg6h3GucH3y}9}lZj4&eu(>HcU3LnWWGW9GIyCpN z3}hRS4W`l`N@OZtP0Yz5fgkTJ52yb|O)XyUo*uoT|38BipI4PTBu2*0-T)`*&31Ty zr62oPaObUyj`V+#Ez|3gcP0W~)uf>#u~Uz9t6ZcpzD9i zvHN7$x+vxkQBVta@X)VWKr)3JS0UGl{lk?=yac^9J$>|B#_``yE#ZF(!OZtl&8}R% zI{t@i)v%ng)c}6Td>I)k&g0PVY*2~(GnLtgMF6|SYlTMt8TRjGX(S_pQzY5*_S%5K zZ~4f7#s)J6a^D|kjOGO7eA(HPe|XF-NR%>MxAY$>C=wT}6J zc>$JOivK_melNL0vNj`p=gyr!TuW}`gwMv$FP}emHI?x{P~#_a5GrP6WulOy=9e0r z|Igg&9T*F|`Wxa4|AzQuYE_Q+o|Rusoy3lT+GTWVR6lj*zfw^SBg2%5A`KxNniIgZ zfa2~-8%h87v+j?6`#s&W_-*6Xf5IKD_9f&o{y=l`Nc%lr|9zC`4$ zo*o5Ep+M^0pl17|rm_s$HPH9IF{2Oakt(1!0{qwgem=D|PVBkM_U zH{^=Bj##~)K!bV=+9zm`2SES)7HVyi!usf0c#kIWKbflrI~@lkbOtk_UIS1*$3m^l z4-I^dcZmJpq+z)PHQcr}#DaQ|0@4Urs<&H}>2Q_lyuwTY<3Y73_n&RTN<96armJO5}x>eb}K56E`Wl}%$=XWl(E|-90Tbm+nVyli{27+X`N=@B8x|#PNA?-Us~e= zF*{Qd>hIjnfnZuUcdrGNnwFNFkah`UUli+6qVASDW0Fc z9ykPG{UU#Ux-t}=1gr-G5FNAZCBf7_Hny;k*F!qo)!Q2YnJfBlprWLt+{ov#mxIzb zAecChJDU={7=})-n{7&{PwNadqshvA_E>g&jNl96G7Y+3_=R^zH;_&c!q z{{Lw8!$kG}JX(dbq4M+ca-qcIGzU8m{7SruGGj9PGNP@c>hCLcs%e%4RSA^@JJ|(1x1Lg#S%RSW~9{f2uOFiOs zwg9sz5cYzFz6qlWquUAI^WH&aB zxl;WnS-<8^y$FQ^nnim;wx)sAO;^}ECt^sjFUxnY&CvNbsH-E#)#HXr^zu|&1DF?#oYboEbX9)m zJNkXthf(I{<~KmObrM7vAk)KBN4!PXdJVB8I8u<01CYZ)N(i#=V1!H$<-*kuJc){; zUChs!xuT#@|3dbuEtD0QPn@WN*N%Foz-0BdGPQZ~UaDE@TAlen3EPLOed^O2R!6RV zKz$eXic2#)3KfcJuG1X96R2Xj z8%3_mlEKrvhzF7K{23mcjgQy3^ua!ZRI8BUb`T8@wwksh6=JeMlp^h|Ui9|B2)FI% z1*m(+I((E>FLknt5NCks^|U!bkd^7x=a@CPfAzKOys)E(v zaN>qnLRHoW(;)ze?5>Kf?pHel{>zn9csqv`Ns0?11{i3 zVJavrJaS>VDK%VGK05Vck0XsA+2Y9_NBZbg$KrQ$=Ik%&Q35u@L1fIA50sVOLFhyk zHxXL%8ly=6@Q@+Fr*%Wxr%D;xKN3OK8tz=PPL`l!=Rf`>XM!0J)KtBCrhT!uOCaV7 zO=rg}YYHWiDL3V)m8E2In^)Tn(-n3a0#jbfGU8yi!93ja66YDBQ!kkv4Z}Vf zyn}xO751_H^Kz958<1aR6hEpZpxkYWV2h#6~s6MD3XzN z(z$PohA+dt&lR!bI=T$hRCpx@3C#7APFO#~;wL8981p2NuwK?ZyN<)gKN8_2gaa>M zbcYM%;^!767>o(JJUg#pY@77C8A|2C-R458Z7kyU=k)W=*GAC2!}jSUP9_hqbJbqy z?3YUWL2EBv;o;!m7+c;13)P6%8tl&T0hyv{caF-;%#4kbbB#I#g{ak_z*+X?;WC6ajKn{EiUVxt-o1NXm=)M3*lC>) z%F4>T#1R{y&MRt&A6WRAke_#TcPFN&zpuz{o~#D5*K3;*@v!jlt$`tHLEr~SC;$x* zU8-T&1d0EpOE^GvcdZ(DQ?s(pK(dH;`$r-TW(_IuKZYso{iH7pq4*$3FNd-MKWRcJ zQqHlbN(4S3j6p0yxz?Ak%EWPPo=SQ=_1?a$nFD71)dxc>46(Q{dLlrWL}rAx`A z^5Qt+zF)BCOg$Ej@C6E^7Mvo7aA<}v89P@wgm17M<_n96NCu0`26#huI6|>T!!@n< zD-~dz$QZKv2R}Py$7g~PwCt^zh6jk@g~Jw`{9(CfIFtPrbmgA54CTtafCoz4BUvgaRqoGJ0<4zh~rw zk_f~#D7%xGj(BV|(#nBd)WE=?zo6&?$(z)fs_U<2O-mi}EC-w*GlbOsxkm^8*{{0} zZ{C$QIGphy`sDe-vf5Ej#(cqaBsn?R$C#oO5ozuS$Iay(TIZp+C{-fp_CNZ%wAJTb z0fXgW_x)-|kzC9TiVPBg=ihGNWRUQ0VBTT$aH1u(-kwi(s69{rbu36StJ9l70bi?w zq*iL$A0sp>x?_{p`(+U`K$}uCZOS=#i=dcI`_h{Ue>A3jc*proPECioh|CPZp+&T) zIH+HoPNvD{WAKO=_b58^UuI|5G0M+*Zvt$j&j>rs_4|T|<+TQe;^(lhw4HdqK2;Dt z1WwhuiBJiW=bJnC!Qw=Mcz>;mhSrjcrkoFvE1oo6p-oR;(=%}tTkCSo&8+tsD$tqD zS=DpS#xFG&>s6kS_G#N#60u;u+A|sw>^DG#>ukwSuBB*OniPYny?mmod#Ch(8vnS# zu>i-*Ct9b4v__>98^$EL1p2+%{7-#4t8`#=Y2Y($C1eGd3PM7{*A_jTm^L5|95=Hr zVD@%PXP0bBrddA3o~KBb39avaZTQ->g9-7afNnL9K-5)s2n{Cy;3ZZSTJ`~*iJdNm z(y7O7X(ZzkTc+@R`t2FTLX1CJt*eW1wIS=Bn^f}kSPC(1k{88>k7=lV#O)MTLhO^S zyDyJC5R1kPaeO+Y_J(0?(^2ZAn2ls`pL*a+Ts<(eW9)ZjpRyEedstd#p`CI#5Hru+ z_4zvRy#;HRI-P)o4&K_di2LL*P-l~pm~;%;_bVHDwjWZD$&l~IjgKkKv0ZB{{yht>jv z?d7Fo!c6AI4^%5L>u8K~?FeSxxnd?Y{yrBJdj28q8Q)G0s}9*+eb143Sx4iXK)9-* zvMaG@@Z8AB(j>=YBZ640FG}nrGw6J_PaRLQ!FFqK9O^MC99`b`4bA#TE zzhBy)d1y&=DKR(ch^}W5De~~4b=7onN|&fiQ-8k`FRg>*7JLU+bZDZ638Os5&p3KA zBna*k+fN1$u(=~4uLtW8y2=KlaX$&@~ig)8j)PVdgHbF_bWQK(wcX57hQkWrozvnu#SWM8B809 zA>(WMCXuhr)%A%Qn{~a_!gp+0>BJizq-i*;Mr-x58XnvN`GS7y3lB_nMk51CqJYbR z2@HQHD@;;a8aLi${7?|1#PACpGMJw0$)pKKRL6H)N*Bk9?&dD&jh-24=NDAimE~1v zk*!Y;lFF$+#nh>hgo+jSaI0*KPCaq{JS~`&tC6Hd=~Z=Ls+n!+kmxI>AE$w_IrYm8 z=9|`i!Rvwx)oIetup*46Y3YJHL$24~pLn{oe6jJ0f+>c6d@QK#hatk^b?xcVBXHJm zy}Sq|HEBU8?Q6bhLm}RqM`vT5RmnQA_Pk@g2A!~bw{D#J($;v;2@f8Yd_jp%r}DUA zb-8iWwY7s012|asmS5p1$jR0EQtJdjO9|sUw^iP^{$q)ghRa=DDt~2US*K!#8`(E* zYn~b&R1Z(e`rzSHagWGygXm;=|MTFL!Lr8Xq zf~xolq$Nc)^X6C}SGlQk+y(PCZX!h2fzoGL`X`(O($+Ho|TJNa)X$oga_ zvABkNb17bVzQfUU?%5pJeybfR%tY0BIaR;bD?aw!@uXZ6bF2--)&ve@YqcG$3l*Nb zi;#qjlcG0pK;8YuD}HJ0sny+FOKsYlR((D!$(v>6k!mVWDa1zxb?(0NF&RF@E|W97I~A|4D&ACGuwjG) zQbBtTrR=|cNq&;^MH*}VhDA+lTg<*jTYn4Y^y!l)NRyEvG&)>F>q6fS2$IhBud1K! zE6v=cbm1WII5l5p_btmaFIzNXA8vYe;nGvA=bL$FD;Un9`08Ka$M*Daw2JpR+#iY; zq(|!2`%g}ji@Jww2M)EI`<>H78*x1|bx6Jjx@qT&VP9$}$=nr+l^d3v+PyI>=} zET_i-)i*S{Pl#e%4~1uQ)Fk?SnORFYS*!PH!@Jn_gWXs9sh!7nk25{AOw~Oo3_T4C z<=dQyLm99ORSA9`mw84qNA;?ZqoM^RGz&*mMi&iqh~^`_I20PI-!gY@(3eR``A6xX z3PHQ81#E7azuf!Lk^be&mp=}BV3=h&dv^5%qjf`r=K9uvLZfW;v$We|VX=mqjx_9j z4!A$$GsCmtTb!e|H4Ee?c|(;5EXoJ`8`P{Cq8HY%P{&IA>}68N*3*o~?yHouJYW-I zsK5#zBLn9rD0Yyq{ZQ?Xg!HkcEOvYn+gMe$uwCNQ)o*d$K0McMI~M8S?y`JfXCY|t z5hZ#o5&_bm@i;2f>!tmEg#&lr z&t&S!5Hon9ImG)ul@-@ax7#(gh18s*z&~;RME&PssQPe$*DGm~!>PC=L(G^t-&|n8 zsb{QXy-2_A%a;Xg=dVXOX~a-H3QZSp1xS2*rhbl&st)z~mpdRoA2L%o)buV6lL9IXrh7TVinI@kCuS+Q zPcwIfi5GB-&~g0q&V8zeX(-WdF4#FzwtTj5;XDdO)0>8GEoRT%dhF9eWcgb#5r!}z zf#;_0WI-`zPSRa*aA%JGfmo8=?)3TMjTc0oYFuQBg8Eqd_6j&$KO87SQS#kYc%t9; zZkCm;RdA~;OUH82B|OMvHBL6Mbcm4flv(%Caqkt}@ z+IVb(cum=4jH2>L0ENc{`SdvH=<=56rrhp+VoFCWX{muPq(Ry+)aMdu0xpC5@LyI$M=*LMJzC=5TI^!2Q{S=UDU-)@ zAl{g3q9Db81bmH06&S5eQ z;tGNsimVk?-m}!b4sW%}{BF^GxP0WHB`{^%1x;TUkqT_oqbG-bN>fRyy2xJY#|i!< zF1-Ik@#<5QWwbk<&PE<(Xjug@ewlpxIlV89ICjW;9f(-vocXBu_c#i`gz~exMQghIdC8NlDhc%RHUGlsT3s|l^`fn{Xlz~AAToVp)AT&B19&n4eU~y|a!2|W)NVAKyE&XC9|Qk#N!qdqgPc`-DAJ~CymH+ue(S6t;i9soU=Yk7ZO}p zza)rInvg(ua3qV?ezguvlF&Y8kH>xG?tmrYcIm$!92^WL5gZeg&&oS+MuI#S9*LE; z?<--Fzz(4l9C7D^-V?`KEAX%HU^uzB8j!9Hq{{=1*&ac!>=_yu9H!{>>^5wsczaHp z)bokE%s|Rhqk+P0cX4RK#+`#~;o?HtzL(Z;+Aw!!HH!ByFTfhJ5k-!nu+m+btvxfv zQ+rcByS;bQH{+IH$q-j2w4TR$=O*McjIX!LL;vz!@^<62L*dV7Vz}RO0}SzYGLy1`pwexyV4b1z-k(5rb>RDDaBTG%B=RDbK&S@ff34U zW%#wa%kaS+QP#%K@nmF!Z1F;7Y@ut+{k@ES%*O{%+?@$rqMMp$3XY3&VcA0+oT1cYP ztC^)|b>^VB%})azI5;;e_Mm?RY|2~yY~&ud-cXxxZc%c*+T}v&eJLi&DjuVZ4Rif` zjS=hvxAP{!Bk$DVo-8UF>C#IajyMJ&9J^4n&@xJHDk{H{Pbu z=Ro<(f&c~yniE7d7tGAqfe`5XTZ#sY53b)l!Rhwp+H6*i417qL`(zt8CZgsrHFLr= zToFR`_uXUXy`=PWfSy^qpCTPtKZZhSxqq}PUY?~0QQv^N$-AY47b#N#=;C*ZX+pD4 zOFm6}6noeQzm3tm6E7`t=p0nmgpekLPJU?I0{0~oc|JSHJc94?$mp6guuvwX3;x?_ zq%sRan73VP7A(nr-}R2~F` zkcW_vre!fEGb^7@}hf{wq zU9~;x!I`XYdN_M4m$8P~dvQ^Sv=OSqP2ShI#Iha)wk%p;ZGl@~2gHge3*2Y3mzl2i z*bV_@Gd2)NIu={Z3VIB(M*Q;dT?ypW*ayh0`?>@-7N?vEmc}DmM!xW{onk;9$c(o_ zuxaS4DKPZ6CFQRf^6gKJJ6F7yKzE~T{WwZ*4KkS34^_6(B69PzWQw0hmtq1hFyU1g z$Q&W*qw%X&PN1A9W0jCu8uq!O5Kx8D7C5q;N<8;*oaadkG$8$sR701C$*9P&y z5LWRs_Y%E8L45tSS}TaQcL10SSs%fVEd%F5QIQNe7rvI|uS5fMB?|CDv&}j8T18$< z48I$r>m{6X!@dCl9~!{q_RGnfB$97t%DV{Z*wV^3=yujIp$L zEEyswhq*2r!)Z;*L`JAPGFy2*)Z^g}{kgB*%tx>53KI~v(phhghyLRKpv+mkUh8-e z68%>R{%ez^xmfTOI^mdL?O;f1Mo z*CbjL+ub3_i5ChW6oXm$j?BuVwi_LC)ry{Tw+pSgt0ajSjDfqoi})3h!6w2fY+Jc=pXINdJ*sdY&tK znRD>_E%6M}b4mW4-kVb-b%32-slE#`<$sg)pgwiN=EZ|h?1PR@{Dd|WFd3)i_qiPP zD4q$H-`FJ_dA@an*>BZ(BHIETG7_XfI`gko2f5z_#e7g#CC{+McqT_!YyCT(<&ZW$ zs`z`D+)i6qmF;Ee*1N3#dQtFqD;~beUbbPxK99aj2zkzval^&;xXe6btc}Lb!C)Be zgo!G64V4pX*RzZqgw8l97KVf%e>5Mt>=MuZxnkqrP_>QIOL_IgY`@S?u)X|IGMl+C zi+$LiE@Dz&EgN6KKSZ3xR8p0&R?Gg$m?Hq~Eku*ru!4f+J|z#+92*z_=j zCFXL1w=|<=%~M9|OYFxv&Xa^{yD<5!F7yS9&3XQ7(yo1uZM3%6(mu8Q+OjEwWb9rV z27Py(fm-!#5%JC~K5j4Uhxf3(k5Mi^I(DDwy7c7Jw?lWQo<=>>*a|ZmW1o|syI*o` zStWJNbZ)ml$B98H-^W;_sGNbJ)2i{}g#)5q2LtZe-SDP0&YmK*A8vn3Wu%uK<>(=i zuopQHHRh~qVd@E8MSR;!W0=yxo#3kn>%wJmw97YfHcH+y7&F&f)HY-w*TT3IZ#Lp<=}weV5{$j1%-j{%?qkc`7^ZrSTwS4(h%Xf7giiza8JOo z_X)b7d)lS7`KX$Jq1q?}yPoa4l-{hcnmyj$7tKYWmaD$`jy zlLWY=v%9ajI`)`j>{0k*G(oU`k@}h7JjJ^TZfVVUHxwz6E;#tYh6|FC*aQRw(JRI9 zO(ce`vFCe&7{%RbhKGkyC^Q;<-`5w4=m()hP`G^n7x#pa5KTiUW6`+~)ZrsX@ZbUn zFVAywS_K%WsBm?4b#EIR3v3yE>&~Q>W9dBC!?6ZmrLelX3g5tBvtkTg$ARz-9$+;e zn4g)RegNWZa#9kuudi>R9o!3C9%)P+EAo#AL-g;|eg`3h<>#TH( zuKlj+M!4Qq!I8tS#Fz$D#-Y%naGq6$kyS!MB39Iu{9#hBj$o@es|UW`qsuG-?IjB7W&%zRtPJv_f zdlXYxD4!CSaUY)D_MBk<&eoSNCj?8eBcr36s4j@un?v^-wEC!_=7MzNf;m1kDmFA4 zyzU*oR*c=amc4^(9NEY!`LH>=y)uJ`nyO1cO}VVux|-#atR%F9cRG*Utb;@DWn zMyWOhIXTi(jx@D9G0u#Q*|ivYVJd_d@8bFFHgniGX*s-}?Vh-qNo8$q?PT%zWaIqu z;H-;9+w@KIu#<=GhN|jqMbM@foBBnRlG*PK5-M6#nfBCQTAgr6?(Y^-n4h0_(iLnK zg{fnG+UOuJfJ|=iKy!WH)aUex+g!+o6D#6P%gpcSk6o!vxrvJT2JK?O;rFjyy^3_= zTMibJL9y!Kp+jaLmu;yZ7e>k zBNZE)w(jx}=&AV;w0<0Y&0@T7Ot&>IdwQ00Ity5x*Se?QyvCHAPF@~wt-h)v`bFFw zUv!8ofu-GEK(P8>cRIYqb}3j~wB6HYVpXvyG?)*!%o0;S;H5it8Juz|mZ(Pd$!Fbq zM=om%d^^L7 z?Jk<@pmGM!VVPR|GFRUq!LLXQFWsNitX&yWtu2+ES<-q>L)XY)W~>=~U?qMYOP!Y& z4@kUdy0VO87%mmphSBr?`lN$S*bS zvTe8JG4EfI573}+@#uw*S>Fj)BlnE(oTlP%P((8n_gzC*E{bqD?=RsUYfF^u(=~dI zhOX6VAvI-EQb8Q0y)L@zgH%m{{_wSU$GnK22R=G^sJy&fVdVHZb)z%8YU1L9jzkZ! zr5wN4KV;n=U%QZ?>8YUBn0ku)N6h_+L-W|yc=Jqmvc{Wk$!Wy+=@OuEQKEKgY{OfI zHZD=K=rJKOvX1Q0Z9zfrVj^k98%j#!Z%Q-wqCD`>8+u%x>^oHA4A$yeWW3Rd!0qAntG);n5~uw|a~qMn=R&+7*xU!iaF^Fepa7nZ{j ziIcB-6)ABsLN@}&Fz-lNo4AXQ&rdaxmMo(aA9m zm!jvzSn#WSJ?lqy=um|X&!*#pR2_-k=I!PJw<`a2d|Zf+7eodnU6s2PAF za7=7*w(=m2OH)KZz!g&4;Ybc*({~?V}KDy@{VUbJGwV zBp|p~^?Y7qx9OU3S>71;-ZN!9luNqv@gt%~gPnBdGrC_qvwRcquU%?OZOdm%t!U^^ z&}R5CnmojjAT?hNWH^{2NND z{)-`2i{00DmmDf=<@wR(#>tNVp7AsgrqW&fR_Agyl3MLUvhC zqfpEfm$;|KhD{AW&R=_3kY{)K-){~3VNE4xgDE>joRt)I{1OcO+<7sb}DS4 z``EK!s8Ri(hJVMzld>(tjVm6;?8cU&LD5@yZLe}8assS-cgpraxN&7s!6WgQkA z|1+xo4BkB@A9WjYib}spdf@2jB!s95(qE2~u7A#kwv6GuLfyhx8E5i4dSibb&%2hv zZ6|+JAzb%<$8r5FpCO*H_#E!yMRr{nQ;E3tD-`srQUSwQ{Qmu3LIR^7DPxY4EJ-pO zf%u577iMQ6!8UJ)>0ZC9!S1s2M9K?m%RNX)92AN;ypTc!d?yhPeEZ!X8ncUhEy2y3 z;r&MErBt(V^UH;U7WmpwC?pM#4y~_zl`XrU%UmIJcuQFPzdAefc&gL(kAEdhk!4DS zv`A@?CCZxYQKZO{JzGNd9Qzh!EQt!!Ld7JEY!M-hsHVj-WG|8>+1f-|exLi)Jk$K1 z=lSR9FRz^Ad(Qp6zxVyQuKW7Dud7!X`uh4(q>iws22FLC1+UE=6CgTVIIHGfan=@j zD^^;1#4lZ0h`So7dZKsVr(<;a|ma7Znqu z2D2*21S2gXp@pLT{XO?5hqLM1^Hu(ed=J$SMFk1fPfyaWZIQV6q_OYFWxjt-%R7op zfiLeZp{hqgbV2$421Ug+dy_;}kV}K}6}7M|*aUf5x$TYXaLN`&c z##%x-X+Trl6LE`nqO1SxNRR$heMqHUeGZ>gffRC`*2DalCEcd^CqH_!xX&c-OS12p z%WLs1>d%!Eq9l0Z+FAs7t1QQ&uYHSgO7v3cYFy}0QUXv9AOxAw6(qkpKfQrsU*CWG%jl;Vc}k9oTVGf+)_Iy5=o>#X}i1u~rJf$#T|}qNU?OsNZ#AnJ4@k zzH&cJbwIGvq}fmKH(4TRPl^cy^du(6Z|*S zHFDW#jFURIWY&%_UwZK5WRoL8FHzVI&ZMQe1+eSs>A}#(hYA-+tXpwkK7S^VNl1z< zTelX+-JZI16QOKmhf96J#@#Nqkj@VtXrX#{Eub^N zsQ~Vs%X^PVIk|VTb4q;1zmPa@qR8XCoZL%Udk2TkLdjorvY-BhqC)>6{WdC9fyR!{ z&{nBuXD3K-9He%MR3BD{F&b-G4adDmjbo#IOQ*KbYLwo>8WvE8+86FDj}t zEz7~df&Vv`3Lc1@{VS&FeqL8NbfeR4(_D8iFXxZ_g1oerx9%+-o;2F+i*Zs7B<`}?W^p#I2tW2ueVc_HB(kw< z$bI~J#O2lwp`9LC+|?He{DQik|7Bgw z0n{Ijj5rZWERJuD%0KUjTCuKJ)pkEZvsc|Gw(kUE#fSBR99GE&u|e{-_3ur?bOGJ%<}0sUV_@;nVCX) zIr%5PS8F80)HA-wCnBj`pCqhGFO85XrjT= z%qIelV|cOqcpi444E2u`yc9P(-5V22s*~DD@6YZd+S&dCutfDv{)vA|ciKjC{C)&inJa1zwKa z>mVXuvb}HaKtX9{fHJEwtc~i%?yDqZ!fsvIJIGv7xPkG{yN|08R?G<~*mRv!VRxQi zp`iln0?qi}?lUW2wb7Y?>&zjR1sq>9oz%q6l_yR= zUn^{k&{^I8K65^k9k=megw87tnuj;7?^|Fi<8R=Ij;Lw$R7k@lsjAxbpRP`0kg07g zk3!He#Ip45RMMK1+-;TS?6pInT_VBr}5~jq;@r zWe$E=@((dao;)Oi7*Y?-N!iiFye_Vu5cT)e#5 zK0FvUZ&siwIDh={nh#I5HU4~|XnID*VkIS|Y&YjToTnotg%^02r)x)i|8S)w6(ZV$ zf%OM=_LDVN8pb#knY!7SPpEjMtqBYYGF+%@7XCi}U}O65I%?CMlCKB{l~m%o>hg$4 z@bqaNiA4o{YOW90tJRw6TtINRd-eY^m-Dxrp$}1KjqZB-yJ`&xIZa)@_S?JL>9G6P zY_i-g4oxVtXsG4ciIC9iuxgy^a$`C zX<346>_#a`r~T1p0K{0e6D6>+7w>I~0OeKjV~fFXd%;Gyi5fvF1o}_hDf!m`WJ_9L zRg{2zK9*00ori~~9T;1}!!CZ*e9qV^h`Xky#wq#g60#eJKAIj*j|WyP4(88w#E|Bk zS9#s%Xs#V8;(7%R?FBnhfGq0*Iai5W z9xRvEotYU*BV4`>^75u*`qF)ERv~q8x%G~Zp9UhT4}~-)g?3A3XJ=c);#PRdt&Gtq z8!PELS2Z{0=9GNx(2J0T%I5Ul9Gesr%owhKWSeD?I4Ejfx*wUC{pGPnv^ERcPq=tV zmr;)45_WbYAi?u&3_HE1Mh`KC76yEB#qh*p1KK1kyr<)CP`M}ShJ_8T>l`Q-Y<-d< zsm3Ggex=j?3_l_o6(|9?wocYqE1R3IfklgW)a~r;vn(!b2Iz~PFb5hO33I;UM@_$h zW8yvBs4}+A^gPtiRkc$RzLVBnDK*yBz>5@wECh3l{F#auQH?#$uly6wo|SZI&)3Xb zSNR3pS_0>UfVyUoxetoDmVn@qMVVZYzgOPlRzB)ore|h0z``@skY-#a ze;jH40^9pGAT|P3xW9bO6M3J})k)%3r{N4$8Phj4^z)O$Gb&FT?{=(!y+zh*Fm{jO z(Rk5EfD8+RR~V`=UaB_cDSa4C1GIE?>t<)(w~A>P?7Jizc3#AoXgy+$e@h^}Fc(gP zsNO23`>CChPk=vHH8JDOJZ->Kj3QSRv*QRK*@`@{iMTckT{ zP=nDTR%R%@J^6)*m{>hXm=XBSHaK_?Hr!{L1pGYQY5)rD!^9wYdfb*8AR!qbK{3E9 z4ke-7+v@Q}84kd$eF46d44(A`3x3a7vv<5z{g67rOJguBKPe?7BtV`rQ9!mo+A-eO z7Y0#!N&a~Dro9Js4T?I-yo6BZ3n><_xsZgBZ%W@?-ksTeeb7?_TMQ7lN*^X(tRpQQ zSdBAx&d<*y6aXgKS=th+q`F%9e0Fov&&!1f2$wV?fLd=6+r6BfUp@kB$~Dxsmq;QW zfY->nE}HvBUf!uD$Abl`A93Wc-CulBC~?ICB`a*GZk*5HEx^FQfW{)FeRF^TAqWeD z+mCIHEGK1MV6uPz{?PpV{3J00ZVY2bp*^8EAG`f67Y#S^T1c*~hpq`3qq_vM zdnDD587!SrAjb2(U+bG!_;LZKV1QL*laP?@lMCCY!A9-v@3$@AT!?iXL9sq2{mk^V zxz|=ZoP9MF*zWGJ+Bk#Njrax8uVU@mfKf?0xHn|BK&(N+aT(Gm#V}2kmX#5BKA~aS z*a(1B_pqu`-RBqtCR#JpV&2?58o{k`AWQgoEbsQE_>{F~Zk3T>sgo`cyJjbAxAUoe zZ~zvS3dI-K>_?pTaGAK&`Z>a!Ex>6yX<SE$p9RQ#vG}~y*$@LNv#4P6Oy-qtZ-*aFOK|xtsT1Lgj z-h-?7*uq`R0j6J}EbWAcl+1OOjenc8oiyk<6l72n&<qo4i67&-aZi??v8a+(-xam z`Jj)1F^2cj67qW{8ts$j?M>R@qK|ZgD{9pb-k8hajju17ZyNb=ZJ~PlY0v zvjI;Ch974(N<<)_ONX?7ps%kiQB8O_uYe(o(;8Y*6M(krh5;rfYWkufTAbkRCCxJ!r;Hyr|l>Stlj*K)U9J_3J70WjILxV}d_b zRb7dU(#jOnCXc$=T$fr@R7*AYUxMZ9oO5+rrL3O8RKa^P>O+Lc-!qLUr2lD}zOR}M X^`2niJmQItNR+n5KJ}Zb*5UsL-wW^J literal 0 HcmV?d00001 diff --git a/docs/stable/_images/PReLU.png b/docs/stable/_images/PReLU.png new file mode 100644 index 0000000000000000000000000000000000000000..fce7ed911f3663469f39a2d4b89729911ef386fb GIT binary patch literal 26863 zcmd?RbyQYs^e(zU2}KE&E(H}-QjrdoR1ic!5s;K_=|(~tL=;p?KuKxomPWd}ySwAe z4|Q+%@0>Bt9rwRGE@SWEfDhJs*L>$Y=QE%A%;oh+QuOR;veO8HoV_n5^cX?Voe>0W z7#kCQa=WE}9R4|JcJIC{HhkG*zwm_bPnn24GeZzOHPruT@d9zW@Iy{>VR>^IV=Z$l zwU?TRp_;jgfw8%PuKGnw&6j4n#zwc8SeaNEF6x+@n{YET|NA+U@k?!Hd@W*91i6UZ z7rHBJ9X3B;rzER+h_&4G@zn^wv(kkN1Q!VLg=G|PwAE+SD`eJ-7wC2!y#LZ+`Aol{ zyTP*HV@Osnd!OFJ%=T{!gIc-@&%8fhjHaO>o}f8{eTHRf=Ri%uzh2|?iK+VfsRP@c z>y2R}yiHSKBlc5aYhf|imri=ZSB*Z&cO!%VzAPAtoQ2>^`8CEx_%<*^4NVNb9oLjc z&cXLKr2l{T*v5_DVOMo?zULEz-F$aC36JHst3Ml?nySBsD0??IKce?`*_i9nZ}lIF z))WzHC&IZybGC4$iu}=q%EP9iA?ky@Em08>On(Y4^+@BfDnHycQa=$PQPHtp{ZwHi zBO{NHqc$ml`%}8lH|>^+rq(xneb3N)w=S}OuMTEZT<+Egh|15;7Z1TfxVX5i=hLfA zmWRujR5oYoKi$UZ>FKeaO^93WR5VuGULN_BC7eo1Ag#!+z~NjI%6GUuCMG6!M?-^_ z-FVP<;%CU%Sb#uP%JTwz5)z$A5uA#QJ3_fA8oR?ppX{#Q|Y7#gqYzwyIK700T zcfQ39D%EM~b` z5DyKZvYTh^w;FOloZZ|`?&+>{w)xSioY;nqyDNQRe{bl>DATuzfA5TU^l(f7KRR-O zh-lQGXAyM)e0+RoPfu)#aPE!~wn=j1!dD40ap(wB{xr>A28U5UcInQBp=bbAB|Ne3 z;y^(;_>AD|GrDaNu2^{Yq%ZJp^_y95bUbazF;>~$*-@+TBj}QD0f)7ww)U>7>P4o)<$LwvJj6c4%vG*<{GQ|EFOamOEm`nZIL-{4 z3w<7N=q_KrtP1P@z-Pyvn3Tk7Jjm;@^E5@ttG8ESe@4p5C7OO4HroH1Ozd#!s}su` z8y5M#@XbXAhBM$999s1sUU{9Tqv7P-KGWqL(-6v`vCx~9JLb=Wf9{-1r4RAgC;gPN zC_#_;KK1+ePl@KZT{G1Ld>y-a~-KM?*s!T^=dFhkdE|a91DgPT}G1Ty$Zf(z|!>Jf#ExzIV?hcl@n*FN0HGwqYxKHj^DZx1>ts zEp30!Ac0F88ykkPj7qr>+GtMW;4HME>_>3!+$c+OmP&Gj!{#%zI~Oio_`Vaqva+Jy z`R&|bGDYJecLG|G?y1`PdLc(geyzqZue!Piva+(DqodKQ1E{Mj ziB)`!OAd$<6D=)>3Gne%kB-Kw17hIQsc`D*9$Pt9 zUsk^Bwl(+n`)28P^~O0I?o7p}q*TGr!m?BI@<`vme?Q%oO8w)faW;h zg-6C#qxqBa<_m00Tnf*Qq^Dz(p~jddqwB^mUVJ%F5Ff5csBy<3R)-)$&&2c^KlD>g$ zv=VEpt6{|vAtBn4qE~r%$nCZkD@%?JjQdT>&u-4B*sr*k+k1I=A$=j&NWnTiGr2Jfr9-KFHp zF=%~4Q&SUhHf!X zu+bGhxMVn3XoG#~R2e*yGlbiVe0-GPJ|YGN3@^>hR%Vk@X|7(yT$^eVFeJxeMMt>*b4wka=is-8x_DMMI3B z&=u@0WpQzF?rPnQ1$U2IDF37l;6-@k=$g&NL%1A!g+*1`@l?ex&ul8XxR~l^2(#$X;Xxl?+Qdv;WYtuplRv9Q z)o^y7ksxg31990=wGuwpjx?c!K)UuvhwW)JXwWLx_sb&+p|o ztoEl=uX}fmhKA-IEYAyE)pWq3%PX~bH?d(=^T^xVdw$Ik{9L{D%Y7PNUZw5rZO;fk zJ8yuNm5xcDhTaAR(N+aeGlMG#L$t}==u}+DA$^vv?p|J4V7YMasft4u0NV41hllC_ z(MAD(SbmuDqKm-iv)jA?nPMcEC2kNiR?g9}xJV_G!`NtTH(jH;!jEH+o`#lI*u*65 z1B;qVccykg+t5~%)1l1et5;9Qv`m{61j!hi(zxM^`ctGIDMKy-7w%nuYDrFfNwPm; z0+pn09Y_ov?My}b04zHNzkzv%$@-6NlLw8vD)RF5M!6%H1cZcF_V@RN z)YYRuKyFS`NZzzr4~F~|hh26<#|si;;VC$pV5ila49M~KXQ2FmFXnReH>gtXCxn{+ z91Y83w!!ZAm$zX_V-I}cMJ^~0T%Z0Qr0M@A@j~}-<|_}tUGW9E~4iV z9Zj=7+aW%urQ?F3iuIj8DI-1}TPyQfTXwerJ46#TRGuO8`;;wg{K1bRGf;#5Wxa@h zPmS)!2B*viX~X3M@y3r^7@LWm!rwS2XtWmXer#hvcs_4@Pwg@26hl*f!yKVt51nf|D;<_gNq9Gs4Ixq%?_nNd34drP3a1QK;W( z!X2kfaF0f;{KbFtn;r~nu$8Low&vS>(m*`N6#L~>@(PCq2G482*98rT2dL z=*CAT*i!cITw{p{S)&f+Z)(uL-kO<=4z|T2a;AcEC6%&vUGYD=pGQM3u5oL!B+~Cl zD+CH3DiXh`SlqkTZ)yo2u3z8x9=&A1PJZ*&Ew#*^??&E>ET}C9BOPC3PKW)OM-r>< ziq<|y!3BOy45uO*($ogB#?jPp>yyo{%Xt2O`57yD-sTHhw^DLN+|ii;vlfH%(?C&t zz2%{?--PGDE(5LqlFomegp7(x+C+CS{(JQLl>0a_SG#j}Od1kLfhY z%_oa@Of61Gh@7AbrLa*|4IA)mpj#R)~-J-JCk`e&{ivN16RV?h%a&Ij9 z{z4m{jx>g?6;4IQ$MQFwH`+A~TiOm4Z}6f|U%DXts`kYwH3i*&Ocj2gz1p8+Wbii6 zN=HNt;(Vi3J#G0D+lK?2x~>k+)7^q5;~)R~yhC-9ajtT?>MK|6v!>`J_%V68Z195{ zDe6X3gO;&Ttc|XS9ES3tr9i}yJ+ndLoqI1(AA7gUcZIKh&Hm*PSFg`~%bgks-_|nb zLWZ|y(yeD_*-q}I^UfGZfUA4v_itfcXEpA?rAU__nbIg7E0L~55M6W(C4-(J3DTQVp@&||<(;wO?E8YTW8cYB;lsiW=l{B?8$NwXV&(pZ-SWWU1ElnJx$THoZ2~r%jUiVwX(}bgeoo23SyNL} z`E+#*R22sGuafP|*Xh2T)XsK`^XeVJ71- zyfgPYZM)60E4Emz98=;03z|%vWF^!(u8@t6>;L4O!RrnI9eVB^dRa#X>GDVz=a&KQ zZ_lMKz!`%85!94Ieg>1?`(D{!a9LK)+3;%4uAvz1yA=BK+n4XYc`jLjF~CWF7oqJ1qKGz zG&BfnXv8?!K7M=-08K__CMCbbwQJW{K1$To*PH7IDkzYBdvXI#`a)L(IK-E@xR~s0 zqMe^0Q$=~DVqk>}2z z2Zm2Ebeo-BeQB_0^U(TUq*7kQu~EQA<}cYJTH`iT z;u0~*mHP%Hu*bC%F~eK;c~xuj;3`d1JAp$Qr)}PS?;R z{_=Qg8{2n)k>8af~H$l8qEc09}Vj)*VGD~q{KwZr`#>;0AL zz&CH4g?)%dfJErZ*M$c(is^|R?P*$Fp3T8R5M4gObo0qa6{06~`)_hnrZOUEssIu{ zu<5?-Y`926^HN+`ZXUjwdE|ngxJER@S)HQQ!Hhk)7^xe-Y6vImsR;%o4?1EaES6G z@}=5ct=2cnbB-w9u)Gm{Ye<xy=I@ar_NsQO z{=q4!#wyAcG877Yt7nisl5jLRn!F^%`^nJSo|CAF#Jf8U@lzC`EIS5>Vb|k1{ke{M z{KHz$H?xf#&&B+KlBS@Hm6H@0{buN`@`*AAOJOefwp)bKp^sZkXuv}nuAooYo}Hf`B(TnzK08@-KcW+;CvPQVDT zPa|JCuqQ!G980hZITnNb$YDY6PEKl3Rte_{kMw+CY9RpL{4Do zS#$paIYJ^LFaK*4CREa2loGZ!FE?UiPXZiRK1wRMQc&S3sTf=EVvLdVbnb~`N9lhU zo`vD`Jj8F~;}ygiXHRs*eeU(eo83}l!4{N@C0y7Q8p{Mczku^E_`7BB@&QAxUai)* zCWconpN9{BVh;QD(B^;HZnI+_e2wwhV^}bI;$I*knTlG>=9c@1{eUHt&!e2`_YTr$ zvxi3f(+^aqqvsCRKl$bSvIf<+>~vD6XQ2Si-lHQ`XML;IWIQhN)_gJjDJFglD0yW! zN620jW>UJp-&kg)w;s-TmAm8O#>6u#x?A}RD^^rgl&TItJCOMkEI#;!DQ6Je8y-5w zVOyVTW23d@k>Gj?Z?&TG+xkJu8Byo|vB!FBY!oS8VTikwRR|iw^l-*sJC)L<`HqU8 zVk~vPlNr8e^&>e#$PyIKCr5a^Ire95z83n zTgb5|USF=u!CLO0vPnxoKNWZv74sfz2=Z9uQI;); zsQhDOj=}j~iIB3sA&Y zR8)qhv8GZu0vfz^np)*WU7Ih+Wj_ z&&IO&!OsQGoUqGnYPn&=vHJrREX1no^pkV#EUvOIB_W8?t#8B;AHuxvoVOZjckH)? z5NNLVv-otqVz}3;!VPqLKWp{Xi%NEq%*mm`RLd9y}QR?`fCo>6U#JQe0R4 z_Gm&7pvsmiLnzmo`vZbX?X4-*U=h@L7nUxxyxeB|SIzeHg;_*F%u!S0L1#66ohb|wyx_|^U4q_01ZhI$l;)hc!%z!vL$eQR8SVARaxTlwi|4yNZ-{u zxwkX?QNNW%*W_Dg0}XSjXqkm`1l5%`Ti1uc;_#o`zwmDsf|`P@PnLJ<8I|(RCW5Fw zl0xP2*V~?-Q}pyg+S)8*;tq12KtUu(?~NW*`(?-DLBPN9U{NB2%l;dcRdMDC9$dK- zh*o45)*2nITV#NCtJO#%5CC>Z@P^tep!HzdVo2)`cyemV~#~$5ArM(@zsp0gb z$9%=xCvpd-&jMWr{oxF#0&xUtE4`O+(UI2`?rJlCa$b%8aUnG;EEiknuBKCIzZa!tFVQfOIj z!fP&k+DD+3LKN);^u5Sa3KqFPG-xTAw+PFB*J&Xwk}{6*gi^v+q^ZRJ3*Wy_4UB8l zGmMB`W2V8~Vtm9z@>lwcP(vfjkyNzpFU8OH!bnI;D&Oc*32-`Gi^Rsm>&oX^ zxaxLCM&`VHQ%>c-k>eirJY#az?VmZ_J|#!wZNy1Nd*NNgPW?vfm2)_yTxOHQ-ZxT8 zfq~94df)-jJ}itPJ~7ck#@ypyv+~z4RCO>PIS*VRGRL_&;zuqz%p8K}mfA9BmD6uz zUX_IP>YGB=>AoD}>TJW9*{>vjv;cjLI*v&lr;|GLj(XJ75l&OF0t@+qXnHtPkK{T= z6#dIooulcs`3~AgRx@Twy^Np6Npg_+;osx5A6DqI&TcEnmul=CjNs?`Sp|hd;>O9a z?HK!)JOhAAm89sLviZKE;Hm9)L0p_+YVFnKZP7gIscJ#QnP5dto{f<*eS}VACto9y zdv2|LH^0+i)e=x25h0-(aGu+vKFq^QOHLw7&vH#J5i!U&YuP?|LgeBq#hb=+^X55- zgY?(0-xC&gcZm4#fuy3UN*$=e%^w5^urbi&^Xl(c0=}a+j_%5pG6Qy69-h9D@^^3F zf+xGx*GqV?fBE{gOfL)g&o)N~_ZA%?pqKUZ^#da#jU&6DTMCWM_;25Y)YPIJ@_P(K zp!@*75fFl2WQts5Yg=1Od%GL##@_eM^=r^XBqSun?a8%6VQ*l(p~Zw;a^oYnS_(?) zl!3^&xj3M|6hOCL`+ja_+<^D1)WUNq51^J;*G%V=bez#uC1g(=vay}h#IVC39gA?7 z_1$A(O0yFyDBG<47GYBI$)e zdLw7-PNgZy+0%=8Uj&S(+lVnwNNK#?QwO3D*co7UQj<(vexKbbMilk7S}iEQtcLqZ zL3W*GPouUzilikjbe(VT?L)icRY;enlu84}j7R`fwFUq==G*zLFmetSvZjZWE=G5+ z0`N{LJ>US_pSg4rdMu^3#f*QRHBLJ-DHk&+*q}=E*;f?OG8w%C@o)Lo^+R61P=8f+ z$imHn5LkZvxaXhp@}HF@pupY|BNpofwc;cBX8{o4thbItMzV35_H6~E8mb7vyvwM9 zVAb~a*#_(3A_n{Xf-{K8{RdXORtxxUn7exYH=`Sm3xYxg097}i7*&sGoL#W_-}QWHDy7F9Lo0^45EGIzuk!yHX$@Sop)=Zn3cAISA^u1Bl0h|r4$WtH_S`IfZQWgIaO)w<>g6lglR@{ak{p0Ts*9bQc8^nbWe0Q-uYa;l@F z54+!~yh2oHBZ@3pls^Oyf-MF!Th3*+5JPq1g4YxLhj*X3O&)rEd6d}B^q|D?j!iP* zK>?PAVAakCZr+riy`Q@b)}3K{0^H9N1orXh=~fiVeeAaVd=6nTw5FgcxK4<6HKvM@ zwv`Sg^BV7Dp9mRS?D!ZTuYd1zt!G@SWchd$UzQP3V9Ia23q3j(66+^NvVVX}(Tg^9 zne^$O_6&Wwc=wPKc5bRY8|BR$*X1+WXReU*ssCk(UixI+(n717D`REXkia*cVOFUoHNAN`UIyy|hTY;Cd48xD?4Y z;gvWGcH1h0b@w=DJ~w=HA|Lm^oJ05h!_0@3W|0#k?~U;g1apH;W5!o)`aEg-*9A^W zBMzv&0Ct`u2wWt5$}!ydzAydY(QQO^)5d|tY4pzaj*e4%Kn>+FzInb!Glh++6X0ZF zuWXfNcHvfOx~hK%t0?~4arkS^N>4T0+M`FI`WW6=0t-Jb6y&4b&-3dPzuz%hB zPZbbvc0sO#Rs$+gUZoRA?u5(9ey5&0AME9MA61(kHAt6IL3d$(uqgDehqJI`k6gT7 zi7ElsOwQgrVZYLQwtS`Q{xu`%ebWMH$?X1+DW$rH{i>=;B5Y_snF9?0R9S{PYRWusXL9~|WIaGXztGZxYCw13!oZ&z0*U|&9^A3IE1~Ie--rUst`^#~ ziS6&Sq8X{|qxBm}dY}#lVvFw>|Em!JM{1!qHmdfuD1W#6Z2@Xb#P@-3DYS_Q3@wLb ziW^OiO?wB8oO|@<26}0WTwM8Y=*#n*{T}wW{So}D%zL<4bpbIRJ=94?1->d1!%0sJK~&)nUp+1W^?iK<&!NfEgyJoZUQ3<#|a`7{=MOb z49S>f-qIIP?9y= zfiau_*p7gb(hsCIn8eD~BgT!Xac9M(%I7v-|3(RPZl&1=qAOEdRx#7^C+z)BPpU-t zk~b}lZ9Idc*CxYm+v}uXnIwf4nHU?3i8LoGa`-8}+?16QhBeB{b>v^m|7~?sN$gin zhTP5{k9JQ9SCip)F(OL`LpHfHE+-qTW^GGlzIvFCYSdT3crHtATg11XE^bHlnDa$|JO31dsE6N4rq218g9ytTTh^! zO8~-VtEA+28W;1bx&_q4?$2u~R`W92-{OLD$fah2ZU{&vr|J*T8CRn(6Pr!emw~P{ z&1Ow&h8KsJsRG2Ky%J5poST@iAQ%5XTO)umuBB$2@^MTXo?ao^Cc7Xp?MUH7@p<&l zpi$0(jCS%LV|2<`T4wW+LusxX$vGU-_B5GqHycEpkhr%o)quZd)?D=>+|1c-)7m-=BBh74vL{_SmAs%MM#TUEQ+A zaGt>6;K}8$b>VyM5gYBT>gD=Floe}{OTs?ao9SrqnIDq71h1mou`hPUH7ZFpG&G=; zG1_CNj;~;3ByzmQe>)7tn9WHzcP^b=EL@ZPrkH~Z>R_SAk6WJ=MG^;|3#3be)`UPw zs8{MSLxl?r)lyxC{0F;Vv(gfVxYJtM8Ft7~p<{%q0K z_z9JkF_e{>#E*~S)I!g`{3CZSX2Ty{HWSNhtiv-&ZE4oF-1qh!iugl+HSAS@qgN`n z=hD;DLurqJcbTeK41S;IH2;1G9R|v&^=F2M>%Qj%7LU?>DP`bD%WLRZ&sWS!n#z`lw?IH zMCBE!M?>=?_<-<3&h_&XSx>h2$dQEsPJCg+N;+IsjbSt#6V&zPC|0HMz)ByakJ@ zl5&|$Qw&tPSaLlj7|d6l~6Kiagk zDAciX6~$@IT+_|f6GL%YG=9I#@^=ft<^g*HJ7JSUgvZljE(8(tyA4#@G4@&G0r^BP=Sgyc_)#xY=u*9=Pi>Us-5xH^^qUyWs1i)enCPItbmmE50@EMpuMKq zNFZUad)|?W_cgu~HA9Ib3DLge8*y&|tESjnC`bE+BtZJY$T;zbiZtsofSQhXM;h12 zW{s|$jZH%=^^rZV6&sV3k7>`%{@bA`rKnx~RYu`S%gP1#T3M=UOW#I@Cvna#muj9+ zTOF}-pm~YP?lmXu^OG)Y6@|F>RuQ>jZja_A2%?HD;6a7BQIt7M4Aj(!{R6(NYO8wH z%iE3`Dob*RfJWQEJw`t_c+=W~XPA0)UZJv!!wCH_?l3*zuU5ccHP+fZmV}Cb;V)XL zopa&8yDHnBl)oet>k$p2&#rv<{^Az=GCtL`4`HUL>fuD~^T+!$&`SKr94DzW7>^Yf zVal1h&@(Et=&6yAP_>NNIcpb|D)dWs8phVLuuNP@&!dC2n+YM zbIeL1{_*%FOor7n$I$Oj%`j6DaT8~9Uz=w$gxcdS+M)dn($62ue1@BIR#Yg$9p-2P ztK9JAvy!m`47<4nIWa`AUZ6dnuM6;S^-+EVUK56=i)X@!escIxSWJ0v_|Y4lR`gr7%GNxHs#gQI-OT z7Qfx~>6e-l%ue{lIpouXKCiqD8%>FIV@{2YKG*37ilLR>`i-7ZXXO)-Gda6K+R+iH zXJ3Q-X|IVr^sI`*1|eweWA6U!4}iulwEaQiSMogkn3BkAtQWz4l~cl8ah`f2x;B=E zy`5TI3%8tWWrXRur1psAuX?svM*)$H;3WmB5(Ct!FRdH8x`ie z`Tf(~fCj}^XjIt<;81Hrm#B))s@hBjiT%VCil?y{s&hro=gyr=Gwfrh zBD+cx5Nwf4X7YWHd7Mv=P&@6fv=txL&<2=Gi}#0=sonvm@gI)psiq7{RW#V= z)nw)m;8%R%RmpQ6k|?y|6rI?sxu|jkWzc+UqUGi&K}tuM{1Lf(7tP+@z8MBP42FuI z#{`E{MP3mTipxA`X6?^=i3PDw^(NVJ*ZtbQCmbcsU^iTyK$`t>-; zQen2I=eiWJ>mAr(3P*O5GcX+TBiD3{#p%!Nf9joq4>mBpCJ?ppUsgNCPdkg3hMFoXr7;m6uW4izc;cg_q13np< zcgf*Ks*aISXTFV)kfo?T`AW6ZfirPQKaLzC(=_9bce7FSXmPht*4?v)^m!P1G0e`S zTP`TOcQ| zf*^2DH$B06Cdr=Rz?Rw#!Q8|yYyDW>%*-u67fV2p<-;U`CJGa8p~l5`U%t#JUkAcp z2?oIEnVFZ~O{9Rlo1@?4gtFs~W*A74pshmyG9=f+LS7h?JDS|vVE3-k$}H1i(;GPR z{8$l*>g^P!K>S-M4Zwi@A1STfSj!EW;ib8OkH;j(OWRrz<2_?W)B%Ul9^RFT^C8M z9^S*Ye=p%JXW_6G`4*+eP(OkGqAt(LOSza5zgrVC=43WUE&5<`Uuz z5}@dWgj2*S_N=iIAJrqRU@VB~nHF5z@o-1GK2l(azX7JH_P1Al35=k8^PuTKb$M3& zVl8BY{&acNXgQ$Ub7A6JTqL)`19nQpa%s@7%oU5rzThUR)pJKqZtwel6l3>1*?%`W zWaTQM{zB~SyT5uFRkCe>ef6?ZY zv;grQh784|q&!-5);&@Bv#jsmIbZx!G+5S%%<INH^-<0vuix-!n4J0iq>juLWd%Qp* zdiwcQ!dTdXJ1qjUJ+x3A=H=z-7#Mt)l6vsK1v)%Xr<8kK16BLgpi>_|e#A8t6Bl>w z&=ZE(=OAjtMiv+m;SW<=<5N?8QyW`bqI!DQuiv;KEibPVX~Z5H`0kxsYwKf(oxO4T z9dYaH=4`ibU!tR{=1=BxcpxDm1dS>|uoB(V;%N%*PG>$%i*7k3zy4N*`D>Y<>sRNrcV4KW6^nV@*0{5!y3Qs$ z_F7z&+diRH5VgVi>Zr|B-P>FMN$Lw*GqYQk^W9~%&XT=@AV!1M+j9!EZz(Av*4DXC zRZhhF&^p8XYI$##J^>NYB+Gz$y6Wn9?e@^o;nb?8h=|A!9k$V$Kw8|k017S_Dn46I zhk(dPY7nsZN*E8kk)M{6^2qd%GmWz8|=9Q|&$GyHvzWMIi8yUU3MhO^PSBQY^6Ltt%BX8RD!WMezOL~n} z>r2I_Kne{JiTDx+y5R8efve=kN4!~*93LhAXz_kEJ|*mS&Gp!A=Ylu@9^C;@m64Ib zPHPO~RNmm9mc0$1oA|brd?~oj!HfwS0^JtS#nIBzo^aavsR9%d2%El~y>t?U5Zw-s zqe5i#$^eZ&3f~uATVqv)I_gVB1pVe)jd~Ii4Use~fz%Ms@!X@%ZEV#iPl6AQ%wE2H z^yG=wQIA&U2#k&j!?yDn?!bH}9t<_Q!;GWGcugQG{P|N0_`{@VdRuVMGpPEKlZC~^ zurE}uQ43qJLvhK2;?<=enNo1k5!&W`C4wT3D_r1ax7)k!N0+PJE?%kfZI{2bD|E7~ z+S+nxd*Bjz@^8*`UAPUM1+yy^G|a&;;bx3kxLWUbU5LlkFh$~$k#Q5m znPad-!&@vBlwbvz`P$1OT)zQg{2T*4@?+AJ>q-el&yB2$so#h?gR^Nj&zyckLTF!i ztsh^=W4quBqwP--so(AR-Dq93Zth(auoXDmrUeo2eCkJus}SC;c17L>2DT(@kzJ0? z202)Q?tWJD-bBq{RHAL%8rDZ&66?Aw+Y|X77fUO`u?eK@05?`uSD!$Zss+c(plSn= zmR0MuezNi4&gj^(ovb2IrEkuyE2Q?!fwl))dZf^J@@7hLc3f-VklNcl%hBH~%xJ0t z%C@1Q$Syff5CvNMV%q_S|`ppkCaLm?+wS8lM^5PAZa(uk%;|84EX%v*7DY~WSZ>bX{T#*G8=0Tecg9<`s?iW{^vgmgFYScrZz%W_ zLr*v&;}Xs4MS$pZQW5jZnb48@g$tMW*P8U(uWv=up9Vp)RjZ`6IB?k@Vh@y27-KoT zr8;5CNj^&mV(wC^OP*)XZ4Wpri!G3PqYVh+S5(KKEk`WG{!x@bZkH}i1_~IhbmVoj zZI!gpm-kws-e4s|krApSD@WrZSjirCe<%61EiY`Bz^zVN8s>_F99)0o$Pk)r8)A}@n8^6} z_%Mtt9itJLkN%z~busH};&p)Ar~yI$6mItd)U*V&dg%tlc7)Qcw&h*hc3cnaCCOgt zi#(8^f18h0KD1T8H~oi&IQAw~3|U^OruG1dG^A;`))sr}b5PKu^tmfgj_9@Di*8gi zOuuv_Hjuj2xi^8WVi>b#H@9&HhjT`xj!tA`^EBS@Sc|7;{Bt%&qzgvWpDNpYeYk>! z@Y$}HLO!wDA38dq#<}xSU#>co!hd=qSRrjmhf*P|U#Y1p{^B+63dh-Oi_1njpSAlH z1E46l+lHP7YH?74w02>gC3AzY&SVg{_xJ(^2-6Ewet4zSK~Zr!Y1WA=@u`{m1tTvE zO`@p)Q5>9g;g@)s<-@+k(q$|=<^x3=zss3_%T&m0qr+vcFj^kc)`fHWG|=`rOh)hR zZbJPL5YUxRn%_>^!^YN=7FWOGMQT^4Cq4ASjN8dx=<|V&@?1jyher$4-i^P@@_#I7 z^26mcSGK>wEzPQO%sFkI#0`Pw<<0n&Fyu~7nQbQ?m3B)3pCn6h7GW=nypoQCk zI_(MlF5Y93)B%$)i&0ADA7_HKj(yh?eo*z_yR6;4Snh!*3S}xxmbtz@G6A2;!I_&U&RTCiQ_cS*9vy9 zvqxI<_TG2B1p-?&LtQ&#K36!&n3t}wuZPIwUMk_GYEW?vJ`cjOut^RVNinffx=4E$ zjcWf(0s_=XbE=Af@}4LYiuAKMk^P=cU8aK{uz(&j*@J4E#o4ry4*~&>ApY5>G zB?`3(K~aX=fK&QpWMqVd>*bfu1`|clWdg)hc6cr<UqZN?zi$CJ&*mww;H>>7amMTo0vGw&DH9|A% zAljAkbP;ImA+AfxU#i?Z7JeFaKAvIv6bWHvIav66}P~zV!RZlMs$D3w{R_&HpayfQC zAiefy;Q~~QDT>bJe(!=b>Zb{*nkNTs*+Ux^GG`bf(gw_le&+yG&LVRD;^obqBna3= zy$rgiaa*0YNY73W(RPoB)Sb|%$3ELUg^J3a>+sGFi|e7+A3ZYd=f4<}HGLyBi#xnT z@y*N^*eWrvQ^M-9`1S+SgSH@ev8@JL^E^C`xeZl1YWSu%xbEIeowD`|1$`{nN|0 z)k~QJL^C6QED*R{6mUGcNE4khmAtnUWQsR`37Tg{MocjA`FwXFu_spD$tJwqVFLuy zoKJ@4%?}|adl2MaL%CM`NYMuO4RWbtD)*1N^H^Vc=_DCBIS*TYA~d$_B(JZ&{K?R* z_{8I?VxLUzui$HF^o4?SzAMnk{PfcoV+sGN41cx7nJ*uQQNxH2rhj6nBk7wJ{iv=I zJM4Xj9cwle$6tZoT>ebaphn>F6bm{>PPisd&$4wL{s+|d z_QFfUqBzCbk_)rzht}Umgauofhp2kp?G~SvUuX)bVvZW)3>=uSV1qyWeT7p*M9yei zYQ~R4Ej1n0t74x&-~G%nv$Jg`a;D=;Ig#yo(U5q1YqicHMdpypLHA+cM-NJox}Rgk zmF0h~@oN>BZgcs)eI08pMYbM>jopJQ#Z%QWgyYrbgYiejZhJ?KRI!76eYX#O;A~f} zzLBtH{%|*xHpJneYZ_(RcMfP^nXW4OZVjM~$iD8KvF~8R8(*8QYW$d)-ll>*yqsMEnzTi4eAFSxa zJUD%{x4KYm{6&dXUFXS=Vv{LF<(TP^Q-X`<@gx5J`;$00r3}_FnkDKOg72x#%wcP8 zS`J63C`S`$hwW~4ieC+SaQR$fLW0`)8sy)7`ekFSvhbr@_p{_8WjlJxW#2TZ=phKc z0Rxgbe5As1HDe1OL7e-~gRFt%^VhG!UCMUJFrA$WWZAofg?(P2d2ltT|Eb#N-x~!_ z!ANiDvbn$DA;yFhp@=SEuv%fT&40(vWu2H_tZ&Zs^}i3CeKG4)>spRVF4nRJ79LeQ zNCy13KSV@OZLF_tk+vLrR~@an43lYU=HDu1?eY zy=iK$@S>PohZgTcLg>0I-ak0l@{7asTd@v^T;#IxI}n;2&tLq^p2r_&5FRIc{VVD< zQ83m%^ZO@)k&3q4{*_9RGTDu7rR-`S;9nV#ev5>%)L&DaN=Qv*j`15Z^M;v|x9}E~ z#P>IzQf*8OeH9PUt9^UL)xYFmvyY73;KSvFZ{H*!gZJi{#c^|Uw+38eX6}Nw>})jh zEPWyNe=9339R(E~u(AmS1w0OgiHQ%cT)CoWU{JrU`H9msdUPfCoR?4)3A_~H8(84V zIWJ$jBzQ9by!0a>FOTb>8w7_BKe62hnxM^}^5|eY7l1w}SQCnho-Zsc%#1lUB*eyw z6crWq=9+5kc6-$)WMn+1q@*M@eCZ(B!z~3fe7(gE1xj5Hb80uZH0O9NtgQ5Z1~bm= zxT#6@@Je~)ocbA-VG)6Ad_I^NSI#wwg12E=R#XF5 zGXpOO5_cNt?M)PCE|P`H%NB5y|8|k`Qv}9Eli-D0am<(YY%oj)@-39m*isN&gP=LH zx=F&)*RL^C5Zn5Mk`fho_m2|VIwybHLz2mN$!qOhU2!mGW{T0QG4J5uK)Q5ddMXT; zg7Y&lMat;7KJaP*VNfw}jcNds)OJ*qoc!>XR(&(P{c2;_6_09Tiul5X_|KnJn^y## z0`Z7_Y>dK*RnzHk+G^J>YG%Ku@+6t3=;j3I7(0=vll=+vjYM#wY zg0HQ!YbQ{)sT^Cm`^#>N9ap=iU}@v1f5#_;CfU*+Hcwu-5|xi`&8wiOcw3|D%)?Oj zhb4OhOZ6o+qCXyff%roeQ9`@R@0^^RoS2&0#WfWcLwMtf4opJ=Z4+X-&^xi!EcszB ze|COeR;X)DROolWK90LCSKi{3!mC>J;YBzKnOc58`^JF$X5Hzf-1Ya5fga)s{VT>- ziG-?pjiZ}1hH<@o-nAwG=U0um7ULp<+<6B%ohtuzmv zge+w)F($@!kA&~v?18$n8XC#D1lb3`cd1&v$NDSK>6~X^Kp@i6z{_9YRuyT-w~__j z=;C#}|3;>64EuB4!Y7eCiz7RsH=LtJ@>6dBL;|Ld*FMCbpF%@Cv?rlHo!|Dx|thWur4XbAp#hLZ^5NkZg&iNL2)R#`bk-dBbj z|2UKWlMpS@B2QWppdoMUtPdwotLl7teC;*|M`l6YiU0l{Tm+tzn}?_8e}B$95Qsh1 z(6AvqvoaupG?2NYqg@8%sQyVKzm|%#eL2ks8-jNE8pl&akoNrpU3lfqe9G#b|G0}g zhN@U@O48z@k>yjHze2Ged)fbL@7%+oPWwMTDeZPzTiaokW;)OoEro;(B^AQt6djm$ zbwCk0hK3HC+CrhG9Fydfkq4C#)7I9Y9CGf!*oF+MahSpwJnwJxE8AYb=kMpbp1)id zgYWnL-uM0fyg#4M{rcXQ!lu9AS6h-4iDWPp1AOY?uW2~4^wj|OGh|55{?att?V~z> z+Sog}_}9YoCQB|D%14?k86+DweKkQANi3d?xpKFgq|t#Skx0`YZ2BYi#DXJ{PZCzw zFJHbqlCN19s^Iv=FHEtBUO){T_)Q5zrWuNTP{^_R+k7Q%@n(L>|MS!?6{(Q?{rs#f zEf;g$BMs#x+I=Wta&# z-yfyA$Cy3(oy#U^Sy|>#mruHO)aCvPLh;bLcy?6P=ydwVd7RdnUgvj`*sOFMmT|;+ z-n5Ji>&=@VuROwoNzC0{9KMdr0|;f>JPng@tF2qFqaN_~Wu7IJq>UTNWbPLUp85f{ zR5@jnQCdn0GGmvcqN1uB8e(?X}P4h0_fQrH2L4w+fp&-;P!rIg{ z1dt+}vW{UKz)_JUrcu`bFLHunwDF;bn4A{={`t>eyr|+1Mp@$1e*XTsU<-<}ez358T`lCPvS-qRn13A}6|xSU`Hf4Kdz3sn z46QY}eVWTJEOY94UHUtFOT+j(IAV{S%$rkTPT=mHQ7KH(8|n)4>u+Hqm`J{Oadrqr zZz!dg39^sWr3ijghrn)C~)8kX&);(xSd> z6i~ox+Z?7VPHF9(J#*$%F}aOn^UZhAHlF?AyWEWEJ>QUNOCn0j%X{U{@HOM~T0aIp zu|+=?gi=yXS!Hd8+%TUvU>SBDhc-CT(@rA}d;NN^x5nT0s~N>gLE}T_tu~t;pdg1WYo^Vp z#_TfG3{6A?^e(+~?B=Hs4ki2T)8%|tGJo0sDcS#&?7vIN4E)CRz4RzgWwFdQY}laq z^UpQbHg+w&=b)lyc}0vFMJNSjz=E{_lv=xoX?J~Sh>=0(Gzxz~AQp4z{7dPVJYFU? zOD8xJHntF2U+V;yIp(UD5zNH#9(*hR#X>w*xx}%Y$2irz`5uCBXBQXi<+H1*Q!T9# z_b6rFRZY$$G@`N5euEn|(CQ0;L{?!@|BOzUI(Z|YJZouzVoPGlf^e7F-qDc@R!7K> zEARIVK>Mnzsv?Hi+1Zwrn})Ay<)jzG_WrsO(sq{9oMmeg%9rcyI+~w!gCCv)bPEo} z?UQsUs;JOd9f}u>lfvfD<#&KX4-)tP7;AU3eWp;2tDwM6N|;F&-z1rq;8kbOZQ+_ zq1f4|5|y{BV5SLx(WypDcWZyaW~OpYxWeX%^8y+g^>kB59h{m0Jrq=^t?u~ z#F#xz+|&{4(`ck3r+{cqA80!rrYkiPPCe z#R&uX8M3skkZ#C7!g~*K7OFu}U}d1DoIHPx_q8x)WF!u;8pGxxI;K&2_nQ%)e*3oO zQOLY1uGwxg^r^KA1VxS`I!MeM{B8@|bu>DF5+Pv7X9 zPnOMVP0h+$*|3BhHlUphUaC^NzL!iB3a+^ad4-F0ufs(W$T#KuH>!h3Syw19cZLsT zv)S5q0MUxO!UEs)f9d&z3p;xzH-YhYN6K;MIJtVlJ(Vg2jTpU4aMql;bLZZX61WHK z`ag$F89&w zNZ=(gbUEZ@wn>|D*MXwC`_#VZopFTq*CA7?KRQ0X{pj5K_|+bxJpRQ57c=u2X-Yv{ zKCr5lKv5>4Kfo5Qe|up--jKMC;|v5KuA6#2{tx9vB+?<7a^Avx7)5Y?Sg78X#!+_= zutGfP9JoLSdx>-iTp*siyL*;gaQQYJxiMST#UlIhfOnqSdc!sW-5cpxN$ED3wbzUy zB;?vjo8gvRUH9&b_?t01Z}&0YpEYIFX#6-Ml%E2A$$;YDed8PDrUSd?Oec}@f?E%= zNGDI?Vg!QkJNp0lB0e8+O@w4)G9n89%ISQJ@;^C^lPCC^ip!S$@|DGSBc(g(%_1q% zg5FL^>8yJdZgw^J^u{V(p9W`+y*|L*pw{;oK z+Kt@oC%4o+NM5VFMwEG}tjsKa(<0;*3$V8lR{i5HI%(#YyJ+?6iXQLC%#Hsd=}fY- zvkSilLZMh^28rZ$7jdq7K%W~Y_^&B&jVCaEw{B_7nlophFrxqu5(SkS92QudpIEH_ z+0X4b_Z(i1#ZnF-Q@3Z?stZr@CpQ_vw#`-BdzSa02q!aN4XAA0^7&>S&5qPCFX;9$ zRA&|t!}Dn5Np#OvL1UkA-GVj99az!nnUVneXOywo#&4oALtx>VL1Uc!m*!1s#h-Eh z@3aukrKY49B8S9oPfM4OEv|?}q7>wtLitP28}?D<=xAXPfTdo8qxaTUR@Xq@bbNYs z12(Dm{*HxxNFt^}m{M=v+=7IbA3tQZgd8rssBD)8Oy?G~DrtAz$o7QZ5urIlM(G@* zJ0iZwU9Q2i%9J6a@I4YO?`tv{*1&NGxO?{?V4NTPZO?P1R$Pl?h@FVF<X!rkJAp(Eyi*&fnH4~Lin8MeO?r8OoWkA$VLvse z>K)*d73li80k%n2ZU+3ZXJkFt-dPA`yjKi+1@9%=2+JFg;8ZHMyX_jwTR)lT%;V12 zW8CFE(0oh4%Y6WwZOg=omNkANN1wLJB;8O&U!-XH#)rUq-GjgMBc`P*Bq$#Y*Ioc$ zy$<4-IVZR|ADGe70syEbR;As(Z9}k+foSa(W#KQNiEk&Etx;i%rw{;jV~ggY zP`P4swu*jJ30TM%(y9kBCCpu1GC*)n%`Vg43}zc1@Ovdruyof9x1}yGZkdMX_;&qz zWO#sAmtC$)j8sis-F`3&bi>N%tPW>nyrT313&9{5B%AGWX<%C`*|0upcJCIm^w7}n zuySLMe0V0yMZ4qywEI0yHkYO;UZ^x(-Ea+F&o+KYl#E|ppaVibOj`x~FA^MA4&YxS zZEb-u8yf26bnM`XFA^M3s#uW7kombAHI;Kb09dvbv}h3; zlQD0pXe-bye^sqntAUHMvaw+j4JDl5fbDYonGbSr-D>ZNAHGZ6JCv+n{m=vg@t?yO z^dsIDKumb=cP~@Xdt?sWu+ZApR?zDvrGS}xY>ILP(PoS%)E>=~TqiOP140g_+y*k` z5p%t3Ud!y#%WAEs!jO)(f|HhadiCe&Ha?^=?Z%Djd|{!#Vz40E!6o}2T&9xeup+fq zIXEXb*9kr^O7{qx9MJiPr4GAknUWHMoH+D4#<~Y!F3er!97#)@e6b^wV~^3jVSl(~ zR*0_D5quwX#7h zsifb|&20npqv8je4)Kl#ODzNZd`?@UQXq1}hV+%853lx0t*#?6<%vDa4z~EiZB7O@myId~qHvT&guUma_}Ct9pd_glS{l2?1}Tp?*ZjA) zij;@s+t6M?7J08jOf~7Phq*Gcr20r+5VH}?VnPDa5CVb&$CNLP(W+{F+TRi5QC8gN zSAX-DaAIpT5Ib+&zds$}vNg&Aqp9IGy)(}*WMcHT(TBx#);o5j1Gi|{+u##VP>FF3 zb;5*xXRLSe1Io}9z2W|HMP+3(-Va(38H6DHI@*Cy7a8`P?z3uT6=1r^_WR3odSZo+ zI3~b`n}7dMg<+1>f0B5rhK2XP2ds3xbANXWk_UBoX~BCCc(GrZ?XP3C6fm2dz;k{M z*dD4L+Kvm~uvxUkZZ6Q%iD)%x57v_;Ac9Fkg`xnJqT?DT4NB9j)8&m&2$1E1SbjdS zac_`-ji0@LqE;@z`LjBN$EjU>Zb#w3yQ&-8vL)p0&04P2L%jLOQNFtB6M<9UYyZp| zlRNwrRgB~>p;licPDeE>XiJpZJ~MGo0F6nEoA}S4bltZPuO8QA@Hq{spIKOFkM8E< zM58g>iGJe7kjhsNnxWvSC&BSpz;h961X10Ep&q8apHV1ZfH1Vp;k1E10w~nX zh{^G~EH)iGS_#_zLe&I;<4MPkNiPU8^c6kb5)p8)eR`CVQsx#4hY?DhgbuSX7y z&f}Di+XJCY+p_nzZKjLEkuwWzYo*2O+7SQ?IjU3jsh1{`V${(gwg^i^o8Qshedz78?4S7c`*eFQucK2sX00U^DIlY7 zaR1!G^{R#5a+^}E+AneRW)BF4mF?!l1mdwazsemR}V8L!aFU}+3f9{ z{?daYyqtyD<-&FSCiQ1=$q2!tfoIGBOTP`AqLRKLkh1q9)5%6(v^uOr15oJsYOb-OX z7{omYzd7IB`wjkwZ7p$G2^W4`aBus7xJ0P~!EcvvU?J|pU-?B@? z5RFw!pY5z-TcWv2+w=`DVoWLcxlPHOEF_NpE!qD7JqZa(NLd)7xBZ$Rqgpu=oG}Z%&P6Jw(oNE1@RCd1>6U zE7Q<8W2&#fe&p*_PH7z-dQ#~NH*P#(-sY!f=V6So2D4mZ>35oBXZ0K28XFt)Nzp9O zTbb%&Wo5m)Fv)Q%m)I}n^~aA(A8L1|a|U{M+=f3r8gGu3dAiv|s_ioW&_KGsAyO#a zVn{+sN$GBei~WzuN$xveNS}p1{P4)7iCT{r5(V6@6|nAek% zk%`UDCKub?;z1uj)tQc!n3$NRo~&-4+U7D}Nuio)5ZJ>ww>CdAw`iOvrY&SY^C?E! zpUZj0IksmoK?qs_$l6%gGTgEG*2Wx^Ls#_gc!l=iIe%s`K@2I1Y0hnYYqy z%xyD%9VtC--|Lx{#wEPkYu#Ds$P*_QnPfS;bfH#)pPpX)=_swJ3nfBDrs4Ef`J$A~ z%o<7HrLl`aGp$Wh{%1qDBo}5)MzEr~!tyAuWH~#e< zJ|-q+Q>LN3@b*%R1wTz7?Pe`!iO$yAJT3tNYWahNG`?hKZX$?5V^rXI<95WO)@c5z zT+zX~3L-JD7}m8RA37@bk?7qm7MP@a_f8}90s_Y}4I3}Y${zl3^NgVVOhAu$$?-dP z?lg?C!OReeED2Jh=sdr&(8KHYW-K>81VTQ4gi@~(hTe}mD z0?Qw_J+W|JvmScEuHoIGWm<&Dj#V3l}b|%@oYeeS0b&lazFbvvBVIqNPT*N!h1IM`06tt}l+o zT8}k+{`|SrFxu4{{y74dU)0rQ$TsP^Z{P1Y7El|(rHgt0{{6X~%~>J;w@-P^dVO+g*%0Pmmv1|HT~jl# zV78drFJ=%%QvI#wX}bHa+uG(P7QX^(Fpa>Got>TB$&`YoSQ(;k-@XmD$ct6-Gz!Po z!Uhw@WBmG%Z@@(nAs%qsOlCcGie!C#eHa!PDU6*F7W%ibF(En)O-*(%JIhSrqFiVP zD+dQT0|Nucw7kgjGlc%il`9STsLLqW-uA*fDy5{xwKZD@y{G zfr^Ss%EsoSub>K& zpRKH-;y*rq7ZxSakOjXNv4jh(V@!|no}ARQp6Q0_6xDPIg!t5{Q)bmpUu(wP=gM$6 zreiM#9)Umime$lV4e~x@_V(Vw5VskJ_!Al`)FZb3?I?!0%f>`}LpE&FTi7@_{)L6Y zp&i1u+vD6!jDq&U2V~#!sAxH!EU%~_Id<$(xLH1)r>7_CI$)B%yVSvExUQh!32z7v zvUR=Dgm)yypQC(rz&-WyK+i{`ku7&y+w)hhUgfs>cB!?sRcE%p2=VyzNeW(cxOj8M z*hZu$G2XGU`lC^c#oC;%Nl$it*9xp431wy3aNc^;-FwEy3hL^9{+z{>$m7S4o8I1W z2ggV$>cY=aFm={Y%Eg^wP>-detFj_+b+#B33x^1g3wlMnZQxyf!Di{$+nT7DZapR^ywoJ=8yqYH^Pr?$dA{RTOU$;=^DYy+ z9uBX&Unfub4X@NS=?$|vv{1`wpFQ^dd%p0@ z?Eyta#m3InMUg&!)9!Pq8|uurJ#+T#S$Z!EfolQ1wah+TOaU#PKB`$|$$vM-8CWILfd&+pMA3}md%=$QEzUtg+`Q#y>1)FMvj zyou<7Mr%XSfQ_K88?~BR2KMsua$uC=jT=^4wYhWU_|571GKJs;VNY_DY_5$2D_pt2v?N~Q zw%w4pd#w#(Fu1RdJCUlNsC7$Ml2ucqTDG5)d9EXc%oF95<* zVW}jMh}S8_xWVL{{&MZz4HbC751Iw!IlWf+-iMFfpJ<6^Lf81#JdWt@mURFnH|Bel zR6EDOP%S4saEP`>JTGM(rqZin_obP1ovnLy9v_@~1?+4zJe*)=9uDN|FD>=Nvh2{b zk4z7S`%XznN^%>wv!MY1Ts;IvBPENMvdrjn;iBWiBO{hQra26ZjF`@=Gt#i(igz}Q z5f;B2NEn|vZB-yO4y+^?+11e5+1cQlr!4yLNPfF1N^nfy7o5ZEy}_~3K=>C1OxB!b ztl|OzC@|=HRQ^;oojXlSyu43IiC9X(jG$q&b&?F7E9qoMYIRnJmcQfT_cyMaD-N5} zIU(WU{<}M?CG2i8d*Fk!Xz2(%+NWe|l#}0OjZ4dQS@~0C<(s6Wq|2#!R-;#L*>&Ba zJbN1#bxeTqM&O;-wiX*BSLs&b0(?IcB*)Pm4SCPwO6FfNL=4-G6#m$(Cq11l+0_c5 z=92@Dg8RPhwA0GeDZokWZkyA5WBPJ#1OWx^2AN%V1hn1O-%;`y1f+GDln)GO!z26w zDwH&LWPRRRYLOov_U9xw?fyujnQucvB({~dwlSFsab|mUpg`EJI_UUt()*MnWo2cI zTwK==)9_#apq}+*eG|fyfmmMXBDmb!GwH|*>kKUOw2n@uNqvF(rJ-C6G;wsC0NTWzRB;4(k_yq9PkSZqts1Tm2z<@L`VEi8*8x~ zpYxnkalw3iT%2HLhI(?0{cQj6BbKNpTNIy?GB_x+qlr$)T&~dnhlHo_=p~+nB){@< z2?%}BtFBie|G7jhfvcpV5_>h2!@KAx0e-0gyr9(W+i%I(!AA^ zG^$HRRJ>CSIHYGdCR%9E7#_Yz-HPGyhI2q~FM|^`92mM+=g3L&(kJN+YiW_bKDW6E z=i19_2}wyU`OdHOF~*r>nUBWA#2j|!o_xtgEnvV@ZSpNFkR~(4xZXr`Ynwi5ji0P~ z@;EO3p;EtzdCKN9A1Xs9uaPo%HRT`ehxL63FjB&Y53O6ITwwO*w$}55kAhp~2D~01 z9wbGAgQG_Cp z_bH~ZMcaY3#%YT4r+3D&qcB%UByd~X+wbGpQ2${>1E!7}UAPQsmiD4i-4O?!ANa{- zBOk&RlatERhq`aw5 zZ>m0W?l`IEKEWM!?GSBYKHp5S@ImIMmL=VzE@{XT3G-9D^z`%y$L|OKnnw9MOkSl0 zC0m~HVpnA}ANctapSf928SqA=+c_b=suZ(HhSg86^QEj|#ZSBLjlSV%mr`51epwu; zOVllFBsWH0hAsRo?i){Xd7I|zYb2F2{4|6oPM$0qvbftv*7w)Ucl4S(A-btE{N?zX za2~E3^8xqhCW_QXsxQN-j~uX@eCCLPVETR=DKG9e3{gXfNT-J4Sb{ySjcLch_lAU3 zgEMc~nOGdghGjmODXf5l zRS1-2D2Gjrjn%u>oP3hZzIyq<_@Pfcg-LI31)HY~=X6gSlmYnrh}&hJe)vLL#>` z9F0}lgNQ68vFAHJLXY|{ip5b?YNew0c^dau?5|gjWDg%8e5POiWx6wem5a@dc}xF> z`-dojuu5%wcn-|=*MOu$g&X>WT3gKcgv#`SY{X!Yi?=8mBNyIP?QtO?%wK}KEq6Da z=Ol09A%pVsnsapoSj42I3vb_X>%v#uvn`q8tSBUS#IJNm#v>Zff0xuJQIg4V`}P^` zCAPu)_c4VRJ~OQWEKyKYe4th2G*r(w@Co8xV@N|EGg+U!a`^i9nqe`dI($*AjU+)j%&p5S6bq0Yr8#$}BHF@IR&#eZKsfWzQ{FL+S5Ex=(Z2)Efr_M%}Yrp4~gwrYVL`%KBM zoA64ferJ(0-}m}(9RJGuXrkoW@b%r*^t;V57pK9Y;~qMMiFiGH$Z-1fAxI=y+1YJH znE{bNbiDu(F4F?r1K1t(0wyLb$O;=)Iy+SXJiUGUHn;F4q3E>rc< zR3gilRdJhH_b2LUQc`E8A<@R_?Cj5hwKg?*0NBwf^}yhuODc3kCobM_1Rot;QfjJ< zl~o20wKgS;u_d_cq{H9?(r0w>R4jKX^Aj!LDk>Fr?mAu`o76VxdOw+BeEAz!8 z`kHIFTd|$v$>Iq>Hl~?j5X>bdEp5~q`<+Ar0@k5}$zzSazr9*O)HU5oOIi16L&N#i zG5>;*SZ5AtS%=3%na$J{BCc{`6??~*kCY@X^ zOU*K>X*w9>oLT`u0kCNo(*V7Z*{;dN$hbhYOi0G*68WgZt(OR)Y&wsP`ZgsW@j&N` zhI*Zt#0axogzuxHEcf~d(7pbLI=`U@)15_Ib#B@X9ipUr)H<0PLk~|q;NnQhT$;c5rHY!;jy#RUf*j0zEU>7| z)3Bu1pU+(oL41Qve3W?(GG(}GmPI%TMm_lL(Xj;g1-b3WH- zn!|A3e(dwzLig2^V@g}=PQ3lz2VMtXS0;meI6`yuk1)J$#jE|6R2LlnrBew~1=%}m zs~_GtxJZbpZZ#x7n;SJLBuT|UqkLkw6He5+FxKpr%W%7iWUNHGRh5p7QOCuRC|g^tol#p+W41I#z9ns^!v!gw8(&csaOD#8 zQ9VENa?E14{xZF&B(5i5&xRDM2fr6up-8x^Pw=q(d-w7U?L~lbh3Q3UGeQ2sH?|y| zd5eDsmosfWE;3GS+PAGI58et%OF*W zA1LI1jrt@t19tx|JDt3#oTO=yUr-r_s(?NhN6+6Y}DlyEaI zmsNBuRcU1UWL=q$Uo{CpEi!m5dvp)R?tuciQ~-%{GmVa6OJC$WwVe)=Fx)iBoz&~) z$)}J_wke9%%H%$+g#|^AZxu<#lk?+0aT#S#Jd;+$9xj93qrL~Kzs24lys0;{f&J}y zTWRCl!4Tt(LC#R=oNfiNubku_qUE>GqS-0)(Z(Oq`$&H;QZ79SB*rwK74|r(JtZ9} zDcxabns)EI&fy8#B(>2wf6v#W4hlfLE*JB zT9xiy}tE}A@|psI%NAB`t3JLMy$-5Qyr5u2&^ zPolwJ@Z}F^Vw`NEq&yfcHdCEpes<{kftU)+bf@n+pKS4rnn!tYa;RO0*ai&-*(cg~Ka-=cVStCZ zCF@|f00{!^ELkNTe0%dQx#XA>u4i|*`rpe}{WP`D>A5efn{x{}YS#}m@dy(eo6+7E zjVu})CxaXJDr9fup<>E(Y*9Ti3DlTAKTo;Z=aOiV&D(c0kpF06zb z53c(-&o?r0dE2t(A*71r>&VZg!pr;O@A;6wGjjdFO_Tbnq}dDxY>Nx=^dDXdV5djk z7in#2uil%E4P0r3!$MCdOn$F$NGNEMk}9_d9<4dL3m`fhX_tg;HEFt4>!QYE408L% z@oO#UfF^$Y>>0LC#T#1{8=v1E=2duou-Zw=5j%PbzJJb#Ax7{!t%)?|g)moJ-0(@v9m3rBK%4h@9v}K7NgZR` zthNcGg65VTjWfcS<4mN|o|SsdTi4#n{EoEt99=ZRHy${5urSUCA3{o}t~%?Kt%>5! zElw*I_nIv|%=B=@HdOiJjeEN?)|rzjkKUXSAEqNqQ`?^V5zYCjd~(F4cnu58OAw4! zP_xqV50YIisVfZ^JB}cS{Y)T&|BQJTy(`fyD&fGk!p#SHlSd!Bq#Z#lAifEoOCe%& z6ULM29%^58k8)j6gGv)h^G~J_lY|7XoqdOU^ro52LfMs%#*@Jv=Ewc2S(fzYWnPW2 z=^zF6vsFO5>SlIv8Prh%4%mu_iqBD#0^-3Sb5>jCE9vi-9<$)To{D+3_;abd-6#ga z5pbyT%drQ|S(?;=&hhL;CVWmk?#5+vbQgkXMf@NUIJUddQ9?{bm1JxGN*32MKVL8{ z$~@rrs_`QJd^)K#(AZwkzoaDS$0GuJO_R0Z;GV!@%3>sf$B>X&Hq7HpYT=E^lym+` zNtfj1eUg&Y?WOI1uiBZXZ!u-vmMPwn-F)Nv`ha;K@8Mvr9-JM6idnO~zVQG+bS9l? z7Gzkt2dtrw=`9@OINN~O;mVb zVCm%aggkrpt+2SRR zpDG*qwd!XtU*g5`Or3ZPH5N*~UW*5{l?jAC73)UjW;uNM*W3C?NCyy$DCE2};f(d94_){wRBQmv$|9sKMW zsUfd=P-09B=9@QfyrZxI8mHywcaOG!C%5Y{9dC&zLaQE76W#rgT80MZDka4Zk9g6` z6*why*vPH!Osp63!7|l@)qqH}@NQd{7cjI^2u)tyq^(QFO?ds|(Ol$iUFC!E(AyjYo zt>?Ne6W*p1MKP7bY>+?q?ibatYb=MTVhyZ0;AYAd+}A=-+HT3m2Q(pBJw>p#S)I7X znvNh`XL%2E%KOWxUiYXHVY)}P2k#6YD=0k_m_4bp&wZhIcdJA}MEC6cIc7%`(nu(f zswUsmQW^S%xf@tf(ruumVw;H#L5!*ygB!1v*>!Wn|f5sibD@Y!=`wPFdofXFR@Lkmaq5DC-kl;Q zibmHgie*C2&nu>BPF2JZ2s*xXEIl_-$INV|_>~SU$dR45b(ni_8xR%lABakL)EK+h z-TH`cBfzz^w8wwI1Hk(wW5>(Z$a>~Ys~A-4n1#3F`2ik$VycYJF>HAPuZB{UNlCEd zegC}Am^bP4+z%pwu#HAd)(;<~g9%O3C!(%e)}D+i=x@}>G$N?GsO>-iue@Hkpl_SI zyM4l>mJI{BC6<8yLYCI!H8OLsCM zjrUGwvZ-CK>IwBhov(u_>h}JZI#iJjTo5_<`r?Mk3cq(3{K>>cXrZ7EFLIj31+Yi)H|Uv@qm<(5@+|GHvzN^5wN)cM1Q!i>t-cthrHA3I|zx?#d(nSoOIBYVzk z{}PvSq0(A=y0XOn8zyql7QgJrOG)CcVpD&P9D&?EsFeI;l$aMG+FF2}j--w0oJ0nL zYGdZ-&b#W&>(7hVS8wwfVr(8(RtCvRRhRNVJ5pURJoSb@m)oe_lz{4^$GkfZ+wE`G zHb1#(DQsC508746u2?E6@$U=(stT>`LTO9>hYO3&5FuP|orONU62R8%$(kRxQa_Rd zoNX*8ig7Pl<6Hs0ARikb3I%#E}VguWu9OS4s7N>4z-$HGmvW zx#M2kMMC$$CVNfJLUi{tn6$3^#sFp9vL1kP7FI)hJLC`1IqE*}eNcP@HL| z*!Rj64YviUe=RSTjG9vdeyATew>(dJAWSHZ;=48pz8I(2Xzer&lnWTA*#kwSzXx}CK|M}W=1y#T3Wbv z@}Kh16}0~3aL@X5|L!zBGWaBnP?cMR_PA-k^Oug}W_++u#n8l2!M|2iDbV>J^5A!< zQ@0QN{CNKo2x|`ph=yzAZzyP_G79q}XX1&fPKpTK?K{sXIr3K?K;~C2-x{y?laGQl z0K%KY@Q9Mh6*dG32q37aZ5al#qe)%U)E3t6(zC$7$_xJA%}%!11DuBaF;KMMOM`9OX$9xXQQp85&nW75R~_eDu!P!;(FRbz2~yAn}l2qw@tQM z`cTSME<;()XwDtPjL)##Wa%N4&wW2Lp;SVrOyZi2JOiR5J8MzMf1VcTA*$5?U%9=S z$}9G)*GH^7P7aEz5p2mGqNVDfwhs#6{-hwrXnuQKu@p*y%IP*eA;hUO$ZYRxI{9l2RScCj3zFgUo*eOiriYY1cNNm@ zdgghzqrEfUC9m)E#7XL+>5?p+am+cJlP6D(ccwF;)c!5E^-)9D!YE#Pk!z_P|13yi z%mQn|5zpN?hTYGWYvE*Ux!Ig%)Xf!hYOwQK6nUMx-Hj`o;yvV5)YR~hy932VRU!iJ zKy8SK98XHR_q*j9k~3Rf=f3N`#Z*nwRZJkdHTf=gJL7J>{ST+oz15A+;<-hq ztZX^px~luhmt=gPq@<_1bJ`C68*5ZnRz~D#zKRMYYbub!jsxeZNR;wQmYLPhs((6O zJ~T87i)Du^y5Ed9oWaY&u_AJ6WIa1)C9HS3=#;)yXkboojp8I8fBJNEw>*>_SUOq~ zXH9`Rw{BM)op%z>I7O!Ml?Vf=TAuVZWFZp&yc@Z&T*aOB$w|<*VE#F`tpg##D}#7|-}vWHt+E&$C1=|I!x$)TA~&%#oz#*Znkq(llA zJPrv#ISWH)C<&PGQs207<5s`y_QvvqN5M%}E|=xX zRa;y&C)?y{DYTPi&x(@kGlN=8l&UjjL2NXV_$-dVd zs#cQj*c#jCXO1Z>TI`z6U8V=_zYH{t{7O zlJZ&{038bYb&7vy4*{f?}e~;uSt#9yHKxv(Sz7UJ@vGd{9dlY;z zrPC2yv4_MY+wv0%LMXoJ$&|k(E9PjSA}o?9hKY5!=so?yWfv7#m3E`gba%X^Xi~B( zewddJi!E0_!!HnkxEErMLUU5-=E|J+lO%f~=3-6Nn?oNqIH=F$7)X+dn^e;mS}hWP zvODxbw3@Z++yV5W4L$H$^_7rmeXA!_=@>31xZL?t@gNc=_>r1ibAZm2t$X18^MsR@ zd$ueF)(%o=t&Z{iqy)+aK~pyZw9}6ZS${zhS#8U~q4n^Ck&5$UlU?U<_MT7<7zDOH z)YJaKP1^NwWpDJlI$q|E25Mh-`bg+1h@-#QTWe7Y4pLW`AQTnRNp#aaYT@MBy5LY%tWQj5Asm} z^c8m>9>aG11->FuZ!6ATpC9H^b6EDFLBwxSP@~wGR@d{I+coW)xRd)&{soVnZeF)U zk@xUwA<$`Rp;EeKYsJE&SG@uJuz0Q=X23p#k1z0xUxl)Pc?%I*6j>9Q#l4^Grs}*% z%(kKE9!%=qA9d~OUJwLscLP^hSwTtZ(=X68mfQsy-CDKbMDB=K$(xus+6Va$2mKwH zET=tigV`(glLc6O_-IXKfnB{K-%;qEiERZ?op8X_a z;(R#Ni)eRQ4?u_>PAI~taPk`VUuWWuldrxx`9LeP84Hb@OAk8$=Quu!coG}7sD-&g zt|F=6ArR|4RD4E%s9LdD$|ed$IgXyeNAc%6Wlkj|TGO+Az8@N2sejuf7nn{)&dQ(I zSW2q&_Hc+t%O2AjA8HcRA&_QaO~t$Of(Xey5r=hCHdsa`Tscz|0`WeI(dNg`J;v|v z>#Kfa<;}f1wNQ@_k}^;u+B&0WIok=ez!nuj%=e#3c2W{>MBjgzx3FfTnC{Z>y=0t3;xZ9=K_ ziYj<+(BtNG^-BvBRD=EYss-G9 z$7I&~J?A;7?_lGcTyRlJ^a@1>QU&qn!-o$F&eGBc^z`)3jfbpqzmx^4Z1~rUKoKFR zo0`jA@(8)f70;pzmE-+GYkE7EG%4KovKJIDYtmo{TMQyJgfg*q5Ent7LE!YwRj&Vb zIMv>8Q_g3gd_%7-9{=^5SZGOU#A&1>-LUID)p^@fY z=6|EK;pWkgC-$&68XV#d4A#tI`hlQyrmw*9ZkVr&`a;>EI?YgDu`kAh<~QfYvs^7V zeQB?gx<)UYWE_a~H%6?jt+Qq@0ks1pua73wnF!sJ4B?wT0D4{L`fVX}UJkzV$FxB8 z>Ra0&pzXRMGOx9-jw(zk-_QGnpI^IkO@wyCf~fei80edrQ+T2ZjOZo0*PLU*Uk%#r ztIUeWhqX!{;1JoZ=M@*z=H=zhEiOs|rx)`0@oR;~hJe*0Ct`RmuZ`f&z?kp*jiiNL zgVe1!cXXH)cKBDy)d9hE+EUPthq^7NCEL^H>h_Ii_7zll+k(eq_orB( zDN4Ua_K(>(JcD4avm5AWOL0Vt+w4oDcsR3mnT1?N*pFl+ko8R8Vyk_QCPtMO**N2`Hah=@dDpzArjj{o4CB z-1uuspo{@&WJf-=mQUvUeT9~gA}^t`PGwG4*m`O-+#b{!bfHst^{njidjXT%d)Yo3 zYV%nUYAUL^Pe2@`%Bj=E(*a~Mbm=r#zbd2{R5j`Lg4GcqxO>MDSo z9qIU>_JBgm0RuoB<+;1WHA9b~v3An4EkXn6bfAjrgIKm_E^WM+_k0b20+ zG8{U39-gCcm+<7`GOz#VA&XKIUgJIj&AlMrFl6z1NmXPwLM#aacLkTr}S_XCEJ?Dh28P@I-vHcb}PkcGt9>@M0YgU~e;j_SJWxTII0v9`%?N z5I`zX@jkWK4+Qq8M*D91oorGDY!Irye&D3FU%)vkp=0bs0_{4-OTNEgYSOC$jw0P= z0jc=Y>qcP8$;y5L?MrjA2H#<-bGQhK%OSfM@;j4K3Qatb!F?Z%p31yXQ&I65aNj+S zPtInM4SMfej-$^zvy4M~#vp$SWy}Ko4Kg=}a~b#UrhEK-(gTMO3?$y{+ci_M?LbCj zoa=7ia3y-OtY1D%VD%f@`1(cPs%Mos08ipvLsc#4pX_iw7Z$9%yuEoFs;;7XD4W)P z4|5*5-UfD#a&NL0oHr^g)Lz8bmD@(snV(J+_9!dcs-@K2z8}Ln*5|ena7C@4%fzge zLT$5aV+N|IPoCU|EGewsAes{D4vLbJ53ceSbD_ekjSZ(zlaI7dn)x58;)DGb*M#Qt3d0 z`Q*uyTU8`~l#2GGY2mzasD#XT?F*V?$<RDFB2{0wfatI|uqD;NjO8I+M6DHyF1 z&}^1(H|-ZGWG`q)i&YApRsnQwYZxV43s+;ua)mG{p>>E%`$WvO!QEm^C@+Z9?M0`3aseJb?w^{MM+{ITX zlY>>qNAIdhN@5*3as*haYtF8&B1euMb%ehTOhQ)o?Acp?CJ!<)GH5wGzjWRspC1z% z0#s4Y08YT#C>6f$?d7L)ef9&6E-!U;b-BXp`EAA@U)_8o>C=>FZJ5)Dhl?u?Kd~!a zOnT=AJmeytBvyG~aOdZi-jv_>iXrt1V?jSoNGtRj*cMO6yHOg+6_^w`#snMQ%TZ+u z%-Anl<<+9)EIGw08z){T>Zc*&!?>?&Zc9Kq9{V!KhlmdCf76(%ea)Fa&Ulzkl;-`- zWK0Ay76{<_Grf5Zp!;gvaM=?REr|BI8@ph02?%tp?=CIbkW%c<3ZC$(@JubfkO9q2<1bQ_4fMm^S?_ks(0YMwoNHV8eQQkIt7AX%JmQ_(*EigK6$$}uRz(0yow z?kA)4ONp#~jvd7oKt4m;rrY+i3N%Fuxf`adHl^!VlMh__-VoUnGjJYlJATUtk5*7n zKuTvvKuN=RPdY#Bz!o%sjs=vIfNRu9Wa+>9?~2q!cen^bMl^<>Ix%EEb20B6UL+V?&jRGR~>RipJp|k=qEo~53W*B-^ zTN70Jr4>j)Kl%>>|HrrhZ0j&Z6W_jtdSo%NdLx<%KmfAXO@3gEpv_!{4K$!beFOcV zQ3io)Ch;wRmtE8}7%9bz-ndkl@)D9b|N1hj_Kw*vSk ze?LEB3JR8OS73S+Zr$qj6Isymi=k&@3;3hd^>fNlNX^B@b{L0{=JAdwA0Ndu^h_2J zI&wo{20UY!?5oDfe>EUL&T>u@AL4HXv|k40=jUH6$?TbSfaVgkXYXzk&uoRp6xzA* zkM&WU7qfoiF}MjvpwNKcxezVtO7A6p@!~-($5Ao}U4@GcLh5$S#u0@zc6&`C6l>Pp zzq9~;GrtOlKTWm~V}#KOk#ed#3m?@=le4J~Zkf?bD_FECtLQHOXakLQYlc4kA4f(KAmeTCDN?sV1j=}i;T44k%R_H(y|uX^b^8iqh^cVb+_3t7 zp1n6cdz;3!xvj{fh+y_4s(y3L-EA)`@~Gk2+sVBDV{PCAi?LrqGS8U)RS*7W_wC7k z{yZ>XiKMXd?{)+f;QeAW_8$11tc|1DD4w-QiVl525Ke%oz7 zbK5Hu|8cHD|Dlv)OEza-KGjY$%2J1}sW{i>UxfJn4s-f)QGU}rc@7t}tfU9X{iPTA zo9S_swn>^B}HyB z1(iEz@k=&f?f{%jgXTeKuf!^HT9$x5dQ^wsYgNmx3SHp>B|-fLBmy*qfM}8n-dz35 zI#8PIhYk&{+Zf{w_?(3vKsf5b4jkK}5iu2adZm@h_xI)d6_@#zd@LtOejcq%_e9Tg zgUB87?saP@J$f9CzAM*o?*{1sNE(xphFiP4LBXT&Y|%{2HT&6z;|MeeaajzChsvt} zaV>$Hko3bu=ug%EKR96WGBhT{#pyM@zVJ)20&@u5GPIxC|Kz6yML<3Nf5K?#2sU*3 zi$b^Cj}P(gfI9Y(n7o|aMd)5dz_lEPK!i4UfwX>+8?1?*XkcY!74eV>fl4L=TyX5z zv4rGg+wAJ9s&b&xLHjaV-R$-1TK)eH!KJi? zp}hcriXpTcYA9Bb9zFU1_|DLu${Z-^e6Fv5NL>DMzci5uO{i*WY7O%sfDz973C;lK zqLLkE`491Xau--^E!=ku49dq{evr<^28;TQMLEv|1lN52EDnwtF3fh++5!KF)=21&&ErvH@h z>*+BDBl#03Bm+=sV~L%%FSrptFm;C3VGyG(N@O!2_|c)YCm85hLZOlb`YM{mN6VUW$`W`oWQG|f3 z&yjAXgkVF&nH<4()`?Ag=qWn!=#wUt)}3{^Td^8xk5?{fkukRo6u;;{Q1m z25rsygSYQ{jVSch zai|V&;p(HiopHxvnz=m(gBb)dJzqY2lwxycROQ$irjJ;MBnt9gtn@E#>syuF$~9-T z7>aOiSPWT{A<9`^bfnGSoQPko7$u#v9V8_|kO`kLb2|i!iQGRiJw2pF`Epa$MQUnO zlaJ}m@E3Po3`*jZNhrew{*RAv5rp_G8|2D}g!cg*aVbWU=PzDZ<)6mu?!2oX8yLvT z3x65mjKC}=j9|n>=(p##>IrppKIw6Tyw2i3*Rdg3<+hr(aNyDZ_-Owj^avvUISo1N zd+9_Bq}*~UColXSEVb#@$(BhB`tvl^c+;2T&b;|Kg1w>al%Ex*k7-T-mVq?zViB$1 z?0;UA-bJzN_qgFM_cR0>L2!?ed%$^33>6w>nmA%b&+FFRb$D1DJQLG#Qw)+>!=Fl8!sqMrHO5`O~7XH4b9OzyE37-@n8}5HC0$2|+v_<85Q6Wo9bE zsSZ{&GPbt1T0}SqGU$!JeQFR67J8qat^n;(?U1r5tEqL3G%oj?in)6(3DWIUpP{RN zJZ$}4;6PMK5%G`XrO+!k35OgsXWXeRr2S%Rt*HX|TJdwo;$?J>e@}MJk`tmO;I@?u zmHia4o%!H=U?-iH1R^6Ne@smUxc9cVGf%`u-))G#2V)DlBn3@dL!6E$j)aWxtGI3t zx7=vAy?pQP-Rpqa%jH!dc3Ekvz%_2uTa>9Q8}(6&8R^=!5w|*u-d@XZJf*yPirn7_ zAO)4Ei$meh-%tf66&Gv4+t2e(h{h}R#ryYQ>&hkO%lGy5;hEm&LVuN#lH$dy)p%-3 zA`h|AR8fqVYYR9Api=o=TwENjYd;)W^iikm{?!yTq5IKSkaHq2E$uo;00o_v64W&` zMuJaqaNGoxvAoh_UQH47L`hve2|8l{#mgT*etdos4$Jt`*r?*_>RQg%@|Zyq7(LsX zNiv}7;u;#{Kn;6RSWWrn&7`6t&A`CG`RecAzTE{)CU7s$g5a>2lBnqDR9K!_A3r9% zdw0^U@4WzA{i8R}y1T&Me5*UFF7d+$+2rJ8JkzhP=(~aIcN&Yd|CVp|(+r8Rv64Vl z1T?bXT3;D;%}={_{rj_|O&i_t*LA1JRuhYhMf;i0oT+UQ0Nzksmm;A9fg_<*{3$q# zr}P06#ls;g((Y>&bRYGr3Gdv#4d>=uesGW=f7lp#I*q(|_H2AfiHrGvY=bmMRnj2z8za*xK8_&(3~vy!1o|r`b_2fp&dki@T&J1Xwa4KgeRRUzakNbC*=5D{Zb{zI@5wUrQZNvo5cqV?1g6bjJG1clGZ!meeKn zr(`j+usRiIi#N^6UszaMXF>rxetELveSW?=*1^N$%^gEOz2L0JG_ukP6y?jg`Sp%j z0$SnRYJag3OHGyO>FF=EwNFRpMExmZ{`M)fcg0a}#>;D!aq3@Q?@cd8VJDo#Kvq8B zg$4Us4~G~sv)kYRyhixH)YJ^wlKsoq;*FjfRQfd>)G1fkYG!3;FQ`|Y_{%Mopbk)# zEsjYh=>OSM;#ONei;ixbNcz`zSR7gtwD7}4JeDdJ`Gh)C=IQ@L zho8O~ZU!yoKc48hKSq2Qx5Kur zy}jyxeDtTQhw(6m_ySS$UxUbDXyeF{U?7j(9Ci$FxVZGv!$1(i|Gx|eHWd^U{9hma z_r)9?1^#2WClkxaW2&GLuay%m&;J#!j?oe~-&KMQ>31T86Z-c-JAudk*JIsiw zB7cOLq2$4o2hsgkzHf=8y?pwVsoGNbxc|T(5BVO?>*lnxa!AMnV~9T}BO^n2H*SIH z@5l}j`+kcu8s-0)&=C+WqOqE}6~7ct&PjxQ2Bo*ttgKCNu2muJ5d`K#p zfQFfe_!K`pacHmG+vk7;e`GlgOk*MMsnkpND)|PXFlgk~^0L4DRke`$+@?GFjeg!=6fI#y@z^K}D$hOfOzr3%#J2?r_Ps4A;v;gW>z0hI)4qYY`ht~q#eq8H@O{tRTm`uQ zt#kna0V+Y81i47Tci^x-rNV)S@8Mh6+1c+I89mZoy(b4(!#R5qD>Hr6C3fuAP}Wq0 zp-6<=L@OrEC|yPpArm_b(Ba5gh9HTi*jy^;EzGf#gtM!#i zR~TDxPR*Au3pR2<&t304@Z-D(i+mLHF~xIihGk@&ErQbtod!K|mV({4Bq1eyw5w)o zn^hHb{5{?FxXfiSzAi$Y%agrm*aV z#WvR~?aX~+&+4-3CWd+4!WF}?@8QhdgG0Pz;2@$7AYfht4ZyCMujIa7tNjtqz=P1f zG;aJWQrMT;A*oexTEX*|FB9SLrg5mffD_7sN?!X&W%NL9%KMcSJAh9$mJZRC`2X5! z)3kR}wTeE%VNJ)y+-{kgns#U0@rUxkkL9!!H8yfZeSIdD@P7r5Bi#q+0raZ^nX(VX zQEvnsiG!x}kw4IGbCtG)^C5mReraq(d;_Fe$4g5sG*tj&>4~_X`nQ9^VkGyU_#DC% zk#gn!UXt?DeXT+IhGKACjGw6u3gkz}#;pI7ko-HRffxV&-QYj@$U{hr03ZMm{C^dx zl_0nMPh|Rca4XZ8`orJC$Wc6i+kd;~4=4|*5a?gfWa+k(h=+~cPJCE)HgB*Dqjao7 z(eBPBEL+8k7ri?w=E?rB%~tF8twp;qdnPqR%LnH1@(tJr)lA^dRo(;Cb=_E+ccP3M zgw)ZuQtzIb*-cP*j#YQ}bayM=xDoSzwRPq}P2Oo753+WavH~leB9slRqRtK=YGJt( zML^kzK)GC|fG#^iq)nhgkwAiFQEU+y5Ky^N3W9_qZ9!3Lz(ECzK;$xr#R%argyl?t z#9;RO;;v;n`yV7s=FR&)$M-pYAFv~MVcGri`3*Oedh@M(A|lH6oR}7qtUuk88GP+- zi>Sm;&oQS`#Y;@9)(xVn+vfAO=B0!%hbsakf{uGU z!vGe(ZO=>|6#yMaI3e&EA;`@bep0_$H51HOg$DN$ViJ z=}VK{C}a3w!_{NaiJ$(S(O3P0gA*K(FcMM}HQ|Y_ABa`5aLx3Kcu^<=!)wc*M+8Yt zxX&d!X$7}YhAgtS*^jVSMy7g-9g$OQP2yhz*|8_2@zH%hr4aClPrYVwfp^K@FdLhi zenL2z1z+tKVe;h1vop^c1i0I!XUB9B&~FaY*Y^5ed)tBkm-oZ4x4XcKYKc*=#(Hxp zp~UJ={A25;v#zFMz*lQAb&@IeGI$tYNwJ3TWax(oi3rMpNClWJvmEwo9Pwus=V#|0 zO>`AI`iHN!8G!_vHiB^>QO?k@Rki-CF=bmrmt4*|ZqT|(uL9sPS}shMKgXU+VgX|t z=RsX#`0=SO-@w3p_+Z_kbUpe+XFdx!S^>G!dvJJ|2NmEv zdwa*I3JBdYECmb8{gggdANChJp0Q=gquA5_+(ni{%lAN#K+m4;{mug_Fj&uwQX5zk zRE-2nn3STM$??y2S-gm%OQLwVq7Y{^>-`{4L^Ub01B4nI$rc$Rp_3$nW24bP$xy0Xxp+?aoL`F)FQ_- zm5^@b=-jgi^G1ygid-FGh$wRwaFJUx9>xh>SE5GS&YSG}M_S85CNoMUm$H_hMGD9v zuNF=v)5m}4Aon9j+pMQ(#LFYOfB>SBE-9%aayE`E+kbJfR7x)b`j3wGMJIfAwE>HR z-lZ;%G1UC!#>a`>-QCRyZgyZyx@y!*_GjbX_(Qy)H-dcQGOEHZ^dNfY*`57?{qom%vAO)zy9&SimDIFYoqg*I0e#91djXjh5w`{AwQWZ!04S+@RXy zW%`uW1O1iWORT8ULYN2#_9*j7QC$W%8Z5Dy{;F)O`n!@#n()G&MC);IAbHphvX=96 z)XPn+b$;cZbXbgP%&7GSHV&i!^XgA9x2cI%BcV{Bg9^a|B6^;$Bs0PIVVYN&(qrN4%%^)lpLt|D#XS5|rw-b-+u zQHKO$Z>nFdTrwm}9I~(yb{+OiBr2x((Y4h)`Lt$jt6^9`Sf_g5+&~%R{W}Nnkt(AiZqHZ7_Hr;+||L^DUF)iiRJ59H{a!!v%BJRK#I)H0@S0 zuf9+E)E>Q8w=ziPQ3;B<5{KCBqr+9yH7SiUE7db6RT!?nj|Zn?=A2c2-0G=BnC(tc z39rkV7otN$L#vHVNx%FPCF#o)V&+7n00R8ePA@blb1Mo83RbxyqM1j5oApM?-^<=g zsyBlKT;<{rh4j2p7*S~0!8x$WLqo{OwP24Ka($tSi0Izl?g=0BdVIykXH^YY_cVZ) zBKRi}z6pM3*img#{YA(ywJ>$FZX+UfU_iht`&MOUSn`6$7UTWyeTQ4!IKT_vgwV!& zQP_HAOo6$&@$LHv$6XBtvM1l{Hx*p)`q(@S_SQlTVMMr58(qz NULMEX>)lSI{s+0u4V3@@ literal 0 HcmV?d00001 diff --git a/docs/stable/_images/ReLU6.png b/docs/stable/_images/ReLU6.png new file mode 100644 index 0000000000000000000000000000000000000000..c78620cdc859f5317ff4db4586c29c49e6268079 GIT binary patch literal 24611 zcmeFZbx@Xj)HZs9lpv*q0)l|3C@C$e5@I2e(vs33-6$Xk0xBvfrJ$g6gS4bbOLvKM zcYo^!bMO7WbLN{lf1H`)42tkP&;5%P*IMgZ>*0;-QiS+a_y~d!N?#Jci6EHn2!hd% zhXX(1Y3%+6|6o~Nke0`TKhAi%KJfqj7ME^WAqeqp^b6y?XsjXpkl$KD*;>wA-`ei> zT|LC~wzY+cxwVO*_Ay(%yHT)aik~ROe2l(bR~b17zkYe(|NC!#z+q5FbILcWDy`3X z;o2$R`=#E*d>d)T#>R82LxGH^PvcYgVe3=|m@bQ`;jwZtM6JW#4PToXD6g;|JBEax zvng2|YkCqK+}7Wgc;>#ZpI?36WTfkQ)x=EYqaz+{>bcFv$?xA64+V+_goGT%!p43h z7kfQ~<2L5$GiRu~`}`Dc-Fhfs^i*?6VoiYx2|p8F4- z=ugbr-RWoA2QB@Zos{1@gk3ZKwRT@1d*Hc{3??GaBtv zwuyNoii(O4-dyw^{QQJ5XC%U@AyJi&jGX-T#`2WKx$rxUQ4;Bk(P?Q1tw!pwkptAc zHA8l8TN9dd--2X#$L2@sBLm^l&4OQeO*^@-MhG1_fByWnTxavb_3<;-x+vor6R|_HoTXO=CJ4&LyFbydc0<5E%Ike2-@=*m+h(;Ju_C5y3adFd4Z8j8aYCdO@ z^ADPKWl30AaNry`>`pAQT0G?78`b}^ureiuBXBv>xJ`R;v=QF?iGV%dJvP#n zYHVyQXFM+c)n?TCOyLHRf`Yjq%&i` zPQ{~7gXMm6BceM;3=ItrpR+zSzp&5~X!TL;Fqi&;?{(pR82fOvK3Q*6VYcDfntx|B?%rOTR{}GT>NM~!>M-2(rt5yZm!`X zF+CU#_WW7Q2H7Z2u`IKm)4smG;$~(^tNk9ta1fq& z`1l7ue*8FHsVFb+)0(L2mp4<`HN=BYKp+vXm?+d4>9TwaLku)H+^!?fc4~fo10>&X zbo9t!)K6_|)7u*} zl=SrUQE6#kdu+e&c#%Zq=BgY&e%yzYQPMsm+oa=Ww`Jwgr5EbKhVTfI3qGL6_@T8Y zl8Uw+15?>G3)K)3uxYm0gh$WGBP8ZUTUmLvW7fVzN6-1p4OOG ztuIaU23LE5g+TavtPkDy@aUN`*xp&48#;C7OzC6#i$d#moFEv!xxDRBh~ByP1Hrw6h5s1yp^m8>R)fp1yqf5^+Z_&;@$l?nDKM)x*h2lfa2Domr+6+ir@A6po9d z15FQUf=*m3k&_Yj%Fh=r^=UxFv1*5Ko z@`~JwDbf?y}?M zo3UGsH!~*}B_|{-=3rlg{a_$dUD?Si?Y2{$4rKd#Yryf!h;DIj&w7jYe;kRp2wqq# zRe2Va`D10P{E2I?JUh}2bV?r(+K;~Ro&{xyR)|+TEO7Vj24`2cd1WnM-=p~CLMqhA z>*^lI+_x`Pzm_UNH9JXqLrIEPP!X^3bz2a5l7Y-Pf8YF4S=$lPmq!6Vg{P<_DyL{+ zJRqhE7_JV^n=x$5G*&`l{Su9n3$hxk9|i>AAiBD`IR-}~$NE2cT)K40Vn;};7(r$o zJ;paTofjuN7Q(7Gz=m)*uUh2KRZ!DTfnUKRCbrw@hL75uEhT>c;lp_?t=DaBx$TU1 zIXO8GbKfCpP1X#2$z`BDRN;SE&^Dn`wiOjz+q|QZZrdNFHeT@Lj;%C+T%SA%{%4i5H~jh6h_ z1tN8?&}Cx~P}B12gC}k~8-1T^Ys+og)a~kqZzZ-mN*VH+wBJa6O(W_m1ehz;d2QY? zS$1VJm->37dV5&)Tj`+Lt(88VrSXupAv7i7r-qjkKA>6X_`DZs+#zt+

    6>tjP@kzP(E4@zas2jDP)3%=JJ-PyxLZA}7-Qk*0K4QCG zhm%S9^OxVgdp8hn*E`2Qz7K(|2=;>73(bFKT%D_=;{=NlCggBXHQTHl4lA&4YuXNv zh)8m#w;WsZPy8W18TSwz~WCUOa;E90z4?E z&A2>`up`+S#RRczvz~EvO;3&CIGA`8KnZs zP{n!l=s<$n$AiHdjvp#@AH+1oDV(_)#!m|P&Lis7XlLdbk+pA!;NvqsB@2L%Uk&9Y zyb{bNp|5{JGJs~kfjOa%?IlD7t<~s?B@s)S8Ogb1qDcNXhjx|&P&!P>@^!=C>!fa4|5QtJ<_q@=j&Sr zG7X56Ra{v)BnKp_ytlxqpcf|wT>etTjOk`(X6E3#V2y)-jL3LHuK`T)0S$c!Hlkze z@GkOazA%fgg2s?s13dpXQQ~Z<7!`yt-$q6lxw#jkilU!^&(5It6qAs^K4&|L=T{z9 zS)z>aUH}Xj6%~~+jRT=4kC>Sm+c~@G1RFl?PnapB7cO4J2(@$!^1wlzISITYkjb_7@NbFZTM+&PazjCTBTRpy!+W>7u`P*~XKGG&Cyvt0YUwRMIeyS^~x?@!)+ zizXSpXG89x?YR7Wnnab%3%72){2uELG4*JP#ZE1w`-3-er{O+u6%;b3eW(9gnGWxI z31-7FPm=d>ahSxTtv^@%V=1Eq|BAK=OSDsuv9SfD$LDMPUN5?$MK8Ylym%6_R|SK{ zoOk#AdqXa_kC>6L1rsncGh2LAQ?Zk<`_~IF?i_QLtAf4%xx4jG|A_vJ@RLXv7ZuLh z@U>@jP2C`qXeu22$IY~?wryz93ZxuT@IM_q%n4(urA+M+y1@;TzAt4SCjs5xhwk{Ti;A-snmN*+e>xa7+ z_~Sug8flGP+o}XH!kG2|V>o+Sx7e;7UvA$pk@v-4}|Az#yvM?isR8Rwdj@YujighqA&S32jH5UNbK zn=Hy{OUE>}mPz7_75bwS8(xhkH0Kh$;xfgshX^l0B@;hTXdW-Zah!RwGm`)?>@gEw z>gk*zqU8EpfL|cKQ%x4;;UR|*0da@mUtCIxk&lnEyu5t6Qe09p3X&+FzkIn%8Gw)+ zJ?alHnCvT}19ULG3>J-_h6p^BL;g$0m1)Az+FhI|a{R=Jk}+!rCZ_VOsxL;F49v{? z$;rt-*VI^xwwh%#iipsHU%7bwdO%*!fCcCQ5jHmVsZ*ycvWU@xTmM} zF=u&2MO02sJ0rClT~KnXoU-!awDfdv!NS=u=C+?^`zpmXWeT+Aha33yF8Q{@wQ}iGJLVAIbcXSEJW^|X8 zl|9AGY^f<8H%waG+$>*KS?NThvN9s$U$}mp#q)ize^FqS(uA~v!emD}?*9Gzr%g6p zt`@4ufT@L?1o6Q`hlEAs*A~!=tE;QP+g%{wHzT7sL25>HJ!4%p3_`i|giI&n8%y-d zyF(q{L_3nr&FxQ7VSN?wAtQq8;iS(LNGJI4Z$6PJ;Zu{3%Lz5`6u z`d-SP(Nhd0(?H4L(+{Hxx~pG*qG*xqMA33V-fG@d+LhVHEhPqChO)(uQGZy6H=XO@ zKDQm=?~ql}bX~)qXiY2yfcxMGm;N2lIavjAh(wmIQg6^334n|?hH$0Cvl^xs!=!BIm#vqcZ35l)1xC?k`rbmq@-)_ z7%R_kTJ@N{{q_PAiP7x9PB))?_!{ZmCp&Xh9ie&{bqca6(;K~wZolng*vqsvXHEL; z{M*D|;*ADUaQn;myKtU;Eo^umuWTZlxJ*-Wsfw219}NglLji zTA?Lpc131IXKE$3$jnU4`tXbEW8T*jJ$R;%@ZjwFocYsvtH<^8gB~s3Z|Wo6jD4d% zH@ehAdS`gQxLYN-EU@jR1O0zL{!)i?*i-LV-k1U1OafDt&Xm>G!o#i6>K>Z)mpTq( z7+~;W8ldvmDBqI9vEsHe&_id1fp}TH<^Oc@t+N$=tRh{2Ni9qM#sq<3@3Zg2r!q942b8 zFiYgW9HWyWX*{Axm4E=9d_J~7>TI?e9j|JMiddw`D%s+b)R6x4geYN#)5Dv@zhDWEyJna{HK z3q4e)vbhw^ZF0E^7dh~dpt3sXDGBYtx0dy;BTv>Vi5FH)wl6V_7J=eFXObm{#EmQUQ>5AU(v*LdKn>X z6Os#f&w_*hzNc6P-V1p7bLzYieRq+}I5GK9EtbfybS}{+tkP0H|SAHK1bY0T?{og?4)fs99^3}?0@08iTBNiVp)jP9_=Ra&cY51TJ#FovO z(=$r&^HgN51Z_(mO7Ts8%6i0r6xX^4YH#O=dt%Ln)OihVho-v|UIZ`Gl^yt(G5Hu% z_uczqrNz+KsrDDLp05!Cibq$ob_%5GZbHBxSwb6TI@MWKU#sdUnZG|LZ|XNtD5 z+0BLV6cBYr7434N1CGk=IB~i@{A&_+c-m*j1_|`i|yY`TYE)V~S?g_esQ( zFX1X)o$_w?d3<8@jV)Xo?yjZ~l0OSC@tB#eNGh$wK(f_s_ean+Q7?<_h@uCCT9!+U znsofWQoS;t=DtvQVqy2M%{@Ha6dT>z!*Hly=@ZZWh{DS;(>S5FdEuIvt`xtGVqSxZ zk=khr*NA&=&Q5s9E-Q(3_Eyt2g~-Om|`_@XN_(*QV7R6v>PcTj7cNxCQ=7AAIOSVWVUI<+bB^%EUT9jYt(D!;`qVTEl+JL`*X0VY+GF(}OTKYKzm@ucu( zPg7Ht($v9_l`7Z#jiF@3{a#~Ys_xB$=N^Pd3{M2=7{#r~^~_|_26ep5LTVse2bH+l za!So&vuw=%Dv6Qtn_gyP1Sv9pu*Q?+kAl&y!NMNxW zz(X1v8zF;PxxKX^tDrD!7U<{aQ92-|qVlRT^n=|@FC^_QNlVX-UewlR2)lj~G`PD> zNjXswLa;YcQJ8=wrz<_ZHm1E@87cuzx_co3nuWHt<@F@_iOwNX;kTcC?Ia|5IEcNi>1D@Zy{m-kGE!fR zQzu7)R^P3Xks%iuuMhOP4xFkHac@9&XyoJ)Vly(hS8R3CJGhU^NlN0>)MW&TLB7Zn z%3UF4dFOs|0m_H8<%6q-x48$b&rPVmAD_b0f}+Lc%a^+;pBOn{aX?r#H#bMS)w+J@ z1+7RA0sXk%-ZF10kUK3;qA5DTAF++8DeLlNhX<581Bn&3h2ZXD)6*fYp~{UA=?wQa z;Ejz=>-3r>q1K>^#FNsCfcMcfd1Yx%hCtH&@rxJV>m;i=pTBs)z`|0~93*yC6@hA$ zy>l+&4ppT?KMQ0~99SsCK>^%LIXxTJh%I|M-d&_5VWP_NGR)I9fIu)>ymN57t1Q<2 z;>C*-z_V-nuLmKyl~q8`cLI~DIDriq@YzHSoK-R5t9Nt#O9h^ z4NW7VqM!DY86P>Flf$M#=I%c`_oqFe`NSzlZRJ*G%z}VOYzDMLF6bSXrua=(t5IZBl zmoJ4y(euiam(`SN<&pEfpTHM#+n*c@MSf;58q+&Ml^%q?aPgM8NkF0{JKO9pJXdiv zSmVZ=!J-M32b>YO&>ITm0s;bI>ciMSd|};t6nC};lpfB$i%UcoFCN?aN!e|NJZEH_ zps2V0ea9+hdLyOAKA?caDA~s0$he01vN{o9AZ^P=fnJ9;UfU2v%rog|qa2Z#7XYYU zPCT{zw#tm9LS94LLSRQUT~dXwduI>zLMXre=@+0ecxK{BVn38;l0Vyl(VT-7)rqCE z_H28Ux+V8hvf(q6SLl=LFwL;)0B6q2>;kHbr?aMa2oa9cZ}vsd_2DMo--m`GxO*ti zljL)K{m38iU^}9=UZB!)=;>5@?hapj0)@~@o_#`oVv&rms9SvuGbgpnZHzl4adAq7wvs=EY{Jsx`Ewq}@qgnAcmcnQll_BdNI?BOTGGsxlIq zcg4@rBf~(<#=A%t%%#QNhsQcsg`rfKCvUbw)bHc=_4t>}pT@#g<^UIXwUFC4i1c$a zihiR`fh`-M6x5)ZB<@3=?dpUllB z{jhT#*W`3SE?;>7qTNnaCOL9szn_=EQhmkGeC!q8Og8dl zliD(0(;_jv*9P7<5^@qnb0t0Rw4N-`jfQlgCO<(ADZd{eBWi5DSdNJvvQ7IOEQDhT zvJ5pLy(zf!&)CE1_JX3gI0i1Rpg*qJFub-6H(;=WscUX%;2^*$t&V4|A#PWqc0+(h zF8yfs|BM0>(>_>!P+oPw*`AxCN5n3i#C;(%7RWZh8k$KrQZfa(;k|_H*Mj`7a`sy4 zXP>&UVk1|J(g^hX+7F$_=%ia>oR>mRSvX2NNQrT_4F`$V>&k=Vg6m{n7u^-ot+e0X+WQPcPS~ z1wRs&`hp;C`KNJY9!>67Mf`A$gg|8o+95D1hhiH4?*Lj&&S0UV7qdIZT5y?$`C@#( z*k=9IzPZJa-rW!YlHcnka>Y21g9mES9|4tdoJpLKn(}P&mc6k75%PM6DKXW=$j#d5 z=w#C{H%Cawd>{T8_UikNLX84=X} z?Kfa8QNhxlOO!s`n;s6La+dpts8CW?{&6VI&#x)?6D){GSLV{W0$*wTE6$ae$z)HfI(-g}(}%=vW0PTr2;@`is~_@4L`8?IcMsCf zt*Gg9?eJZCCMImQ;42eJqeMTRX0bH)aqnac7}A~MBeLS+xKcR26@HTp_t{6yWm zYCocyK`ORyAoTK2b!vNTa$O;BcJh0H{Z1?+;(kWEOS|gZ@eHeWmmcA3mU-yW?xxXS zseLym&)Esr)uk22kibBfgezJ0$w)S1n(7HNii)F7KzRNjGO+(;;7U}Sq=fh#k{4I6 zmpoT5&4ibHn2YbHIxnNiBxmPwEw6`2V&LRB*Uy9$8+T|>or+L_Av^DM-M&Av2hD2g zrncQ?-xiBzv4y=?cN zmP+Vw>^NN%e_svf5_5n0^XJbokj@OlXiFKkh`6{*GBVzAaeowSdiT@MRxDo(S+9`! zEbNLeYkGg~Q)E&ZU9xYZb8HeWd>Wj(#lg=zath=SJdxlK(?0U_#7YjdJQofO!0E{9 z>S~NIk+l3tG4w9iPsT!8J9uKOW+h9uqB&$q z9>ozR-G1y}-S#2f4S7c+=z@I|cig_T(N#8+!Rw~bB>V8jhv^lSz^X}QWV;A*!CFvz zMmdac1OH#IDY&?JwHQgZaVqD{mh{z$H-k4SM2e2e$tA{eWkacOJrs9<0X03iIRyOfkzTM zB0ee#$8t>xSY9?$CPkUp@ow|R#Hh*e@bEx6)y4q!0ac@x=H_F&YkZ+|nw3=os>UCZ zF}Q6oPI(sZBNlQG6j{dukbhB4ty@AklJd#HCr_S-B~<88FZajqlfpoB*2T3yL5ivq zg>9i>PH9kpnw|!=#-9s!zE) ziJC9!ImsL|~Q3(2I-jC^+w?x3rwTkNJ&NE=_ojRGfIN_xOVyx%h)JJ04Qf+Xk(i zHIC+d0gC(pz_fa24suwE8LY^PGMuH@OH;8;CnfDwL0>Xa2cJ%vjK9jVb@oskGT7;* zg=W+WT;4AV=vT$iqv`xcO1XMWGZqvdyV+($;S(1G-#>b*5<#+wCLev|J9(@`Y(KV6%WtTijfbsI(#(ytBg0P^lnx(@3N|ugPD`!T^tyIXv+go1fBRIYNDB_yV z-tJJp2_^V%-z%6LSe`&(m<yPc;nhLZ`R^9uA)kEyJjSm@73s^V#}ArEz8y-+cDrDx zN4Rcl<|RXS_`dAJCnBm3iu9Xk_O1(!qjX6=GCdDjn>L%Q0#vwp+u{J4!O22gTTe3V zLgfSf=>^!U3%DQNzmJmu00!=avTeyM*(Mtjq`zZ^pK;DYut``ZTg{HOXm97seF$X8 zjd#;jr7GMVXSbNBS^qu~m2+y&2f6yLRrKcHMT!e9>ddt-36tCzX4Z_d8rTjE5JI+I=12JWR!!VvPsSi&Rd-=IhEiZg>XHvDZSWMLGK>x z{BV>3#X0l)LTyI|J_LTfack;A*9+eKvjY1s4l+PKm~@T#&!9mHvu-zNfI7Z_!c<9E z?MBLWtQt~Go@$z^3&k0M$yZC#>>Z<|dpGzaerkSp)-ApmpeX-p*Zx!Zv;98&LiX2a z5-55lJ>GNfNeBeYkTLodFsm4(55%f<=WmLl?-BRJ83-5Ec;KBpk>ochGhwp$2t_?E z1I$_;Bo|)i=ZF0it?Th7*UuTxEmk_EataV59LL#c{LHUi5WjOHeR)VN2Zc_1AoZb- z^`O{o0koo)5EYbisyMoBrP$?SUP+Wkw5zx-3mP?WpYSYp%Dc;Hupq*3tYB@?o@#wr z^nkBnv`w4O=hxPjgjyyhEa0mYlMA5Wo0^(RwfK1lTsV~7e-$%$lz2~uWri$i-~-V1 zNLMURp(R~yOQ6qX+y1qhTz*V@Aep0V@hQk70<{ADpk|jU-j>}=ubP!^I$!but~REj z+0@I*toXD=Z*Zq+55{lE748BI)Iwb&D6S3V%iGym8;L||k<*dtY?OtY^H;3LzDjsR ztU;YW)osUhalFMZGO2FYMytvFEI(OAPD|M&`UWIUD)VNF;)r{%Sqof)A`6gpKog@3 zG5Bo(9>j{0qx53hB7y@4#Dm$?Q-H9+n)}`E??MGcC{Tc%{pS5)RY`>&>F?B-O103Y z%P_mWFn0IVK{QH0*)bALm?_g5V&G5PGmR;U2@gGa^NyCe9K?CfSegkl5kKN^$00P7 z@{O92D-_oi%FV&y5EN9MU?U;rIYN)sx;er@VbT#u6)ZNA0=1z|I%(NNfaRnwpRwT= z135i>VWnPZ;VRcsncFQE3dlRo<>K!}{|gGdde3=4^POgA7W)u{aY(S=YrjjwY5oSv zo@T6HE2Qkh?oQSe4U9B~v|M<&KQu{LPj!-4RaH?fO?I!U-xs%Lt16CvIyWUq&|=~H7i0|*VWt57ecvL^vLhMUOBm2 zF*d%6=0ZBNAs5mT{`ris*Ju`N%G~xD%MEh%187wUZUT+%tS#}|sl7|@K_+)FCe*AX z!>}o1IzY4kM*#Hkfv(l2E)Q)_3a;i+ethI`)5(Jb6-*4x(?zk)1p~Hw7RPlo3&C;+ z8ZwYA4SNU~q6pF^EcVkn+wtLliPIuT(W<*P7jpL07ZRy0pU=wHvIyElGh{%I1yC#n zIR1~QcrHh4kdmq&Rv=eix<-NKP}mfX@#ZrrcDK?MFw{Djl9G-^vYogG%w_vc6G%Fc zlHLQ3vlb*q@dyc9-W!a>#QI&DFiSDa?sCW&VA)N*tKZ=*KN*HPsZ}WaApI{~!)LQo z_uo14)~~+p+WlR51oD_;r<<_^rP~{Oy5b7Czhft;pg6mY9!7WaxAQL@sX{|d)M_-A zv-EQffB(+F$EP%5wtDm@@o8FZx5TB&#ku5GSlNhw62$`GZ!IRotkh>CO@Ot z52V2hfa2Ddr~H6Rc)Q{C`Da#l;Du z{B9Nh44yO!933)dfFlG1e;!~Azta6U(Ux2}mE9KrKOqlRw>^ULJWSeCFpeBKa;Gh+ z$00H~`<6HqbQI$iCd@{6J^5YuZ}~~%+2XvsIUQRl0)BaYWp?sf!EIu0JN;ez^^+C` zDsJ(;=B54g$B};Pb3LYD3{l=Or`7kG6ROkD*J(^$^I>N*Dnd2kD<9WX3a^+ljqThK&z#gMryMLQ~NVe+FaR8+SMPOH{I#L%iOf z@|{`#TB9@Hxq|a(|AprIih3yd*y7!TXaZ_lE%g;LXbH#~e0t)Bj9a*>^V@w_POL~| zFB9R>zmAKm)M2HlmdK8e8&6dOI+}zmTkh86k>_c)@hvLJ>}ksjOFfWCVPz$RPziYL z4-TVjD^t@%DPE|Wz_-_~`AhMl;T~u;^f^!sRR*ry0T!y!S3L?!%F^MxZ(KH(X{Ugg zUwLmIZoY^CMEW6uc4h_3Zc|%?FJAke7#oq;$hl0D^eJ4q zKhXZ{LU-fH(hBR@{_`uwP!|F+mw~Z|@3Two(^e^##k&~^ zSy>d|I)pGs-xb8f;N;oOd?yuScS#0b8SI8F0D1AF;?stV590MhOHT?&UWQpuXLJED zS>cK0pmf6vdJnw4ON}xIfl9t`X9sk#;mt+dkdq|xIBSV=50>T8oh2_!4XlRU+3+g$ro>t9={T+UEKo` zOh9rdHy?H|upJJ<1|1TwoEcvVSm_7#8l&B}m(00Yk?81X4EL2GsD(2aN#=%NZ!UM4 zM=q?uwK$=egV`4|ucvO}^uK7q5|rugr6naou1Rq7dU{Mi;Z(`EgQ~y3qpcYlO^JVV z%h_qIvcsSgb?qMNdwA&N<0nt`&|XnKAsane`|?%!W{4 za|%Jd9-91Kw5rb$KtlfHN$)l}$3JTByU~H48G#rA4SZs*u7#~V$6zOC$<(i6cH}#x zSJuNb$jG$T=7*tMB1N(Qe#vn5ggTi!2;$P}Em<;Hr&gH{sdC1hCz1v8?7mUZ)cGrR z{k^QhQEdu~@Te#TWHyIBBf@f*@whXvt%}=*O1yT(qa#q6k@6-wipnwYwRFG(oML5d z$#p_AiO^m1H#cW5z`G`c5&Y~~zwenOF%1n`v%D!@0Ja1O+Q11}ze7N8WU`%Xmz@#0 zCm$IX2aHrP=-pg9>Su8KA0%y%Aq(i?g<>`^V=usqCbp)b8>TYfVPST@n%%hhBD(RR z#znGO=#ELT7$|Wpr$eA)00a6D7*Nt9p(C`Gi0i7oqHu?*wohPS@U{p&JqE~KZ9C)6 zyHofjrKNu}=k|ach)`*1sU-mav6=Mr^hWsj?-P&dz_*}nFXd5d`LsR%UTvmDDIs4! zyW1K;Tzve*R^6EOAaq`$?H@d&FLwr%Cik+zzZoLyQESH^gC%DGaxU733GR*Fvlxo{ zsdtAj^M_wgP|s7WW?T2qW$&1Vw!BvL#-l&-_PgL8g`BWbLA&WkK(8Hyo~ugW%=WC{ z+$6(>R-mIV*fj$Hb+G2`_OA24awAs*_9rxM+y}?P$i;OCL0g_RHl*N;n#H-VmP4vC@pL6u)sxm_5DVzd@Jj16knU`Rzj*%g*P?5l;q+ds?2^C!XMVpWM`?b&j z3OXZMKzh+&jg(Y2h-;XoAAWQIg_h5lZjr-pge=Mh$lopCgHa zuPOEP^zJmp)C$lIS)T*mM~`T5aPU6VqzwAxAhW}!oPq#M zO>B#J^~x6-&JnS{VAP5Z^v*XzTRYmvL?HFQz^LA*Sz>AQNgCMYbO2r?VSN6BGv_MJ3E}KX9DtJxE zdMvxI%<&Mw@Y-R8VN#{na zdn7b&qvH!koNK^bHNvcbm3qjHpxxdN2&o^mX}WEN)i2p%aX_Ot6BE<)#m|t6hj<(G z^>P3b5ss4lgQbw5pdbgVhUyR_W8)`bVKt2I8lCmOIsKmi|HH4Z^r12S)2C5>jP7XX zCIP_^vsSeIVxtr^rY%~DoZ#hc-P!?XgtoXrPbHHEv@I?ay56`!0zGHrnQf+@4bqhJ zWl{>;Z9B@-46^go*s!p$?5F&qPHAY4>{7f4$D*^Bt@MVHb8?H2%U6i37oEclV=X5y0!hJw&CX+?Yt>3E9iy8w$H!|4ZM|Tzwj{Iq1he zTg_h74`1RVb>XB(j*M-OB>y1v^8VRouxnTRmo9&k6JXDO;h6t?=6TS-|8fdHnE|^P803p%m0a*wH`@>Yy4!W-T9x(Nnf#HR z{A&QrE=&3I=S#{gYVQnot=OMFCf%L-UOJ#spG{z;3t`?l{4WB=kCOv;^!hx6$r>7U ze-!n9oAiGUmf0l&2LyconF0U(J9_6sasNe9 z)_wbI&dFy;cY4Npe!cPF?LGI$8N5~cE8E-eCj*(FX`-aSX=a0`Np$(!^s!b{7UKneq3{ zU~1&sr>dx`W$Mw{Cwxn^)_8>Q5O6OFghT%+JY&KlaZS1$6r=gc+K)$1wXZ& zaTm0mY=A*5Q7!RGg6dyv(oI#jwzlRSMCm9yLZ0{^`Sn-c+jHvfa_0mEGqpZpZp{U{ zHM3@SpM5*B2h9{S5_#BRrJl4CVB+U`Y_Y`dvn&#W=kcJ^df7Oq~vOKq)!tn8EPVf=5B zl9EO_!OEOIb*iDuyy&PY4HJ}^F2QJ#bJnAe9i5z_hrrEzsjE|zlas3q|zb!c->dT?dy4OZ2VU+u*aHplE1Fm&!7gN|#<1U>X-%T3(KO z_m1ft9eWM)laaAOll-%>;^DD=>2qJg4q5`lcoo%w<#AqW#QNK3(zWo|X$&>!(N|1R zw(QUWeDxN*poH6MDP0E4kbye@CdW14RTmBlyU!^i1Ox;&km?;3#=4$_@N#pfVBS&2 z$mb5#jg(_hLMA3AZecv)ioJIaqou7a9rLik=2w$^4)pOiPLUP7NWna;>U4_irjAbZ z8AYT%Trd{c0GfUKMn8}jSJFCNg&woKNlw^cCuG;5l{a>AaWN!WL0~buzW5D80{60zotie7Fhs>dvP(Aje#NMTsAqA%Dr+3RzbHu39d^s%XU4{KnaP^5> zBi0}1;uD2gSy|0^tAx-6 zFFd=tx_F?C9Y!Nb0BwYd$Ef+ShPS#nf*hwy>Z4V~pos(iF4y-tuZA`C9;w$8ol-1rd zp8F6aQX6|$vF{+lC21^ry1Evc690aSdhx>&HUYF`E32iqw{AL)|DPMhX<%e{;&blYxpsAO++UVr*Jk83AeacUdVug4g1FZ= zAda4Tw?NbM{=Kdew*HCYKX&I4*#3v5LT#2MC6!oU|HZ7@e?9-l+3i`#21c+t(C|i% zXK|Tb;*Ueu*BxS;#4IG5WWYp@tyyg8V>fWYy1`s_{MWi2Jx#mDr>FJ*z0?c){&9r& z#Gu(;Vlv6s;~!SL9ozY6y95>@Hb1ali~rg{oPiWI-UJYBN^7H># z^v}Ec@l;w;JS=$?l`sFk38t1Y%*Tgc=&jkFYom=g@+`g0!^?Y`hDcD|cjBMF zCw%Qadr|LCDTM>0{p;DzWZgCvO;S~AtNTKsW{}2G-Kf(2;LkmGKl2Yw>brg$yMd9V z`XrL-3{{OnK2F7;ve?EzA?@2=wI2Mq|iAzY3*Vm7^d-pCk1{A_{OS+R{OQVGs-{_XEp_cLbSB6tEYyI^rTj6H1{jC+JTfvFxKziN@Lz)n%*+H6 zZ>*hR_~kYCWKpva+ZpoX(l;SJ?>Zk8`93E{*|;?Ux-FiosHkvjm-*z>gQ=9$)YM$G zw6n{88|WJSF(tk#jRhMM0~}t6`MlV%W7TO%j+gWDmugySo!$yqx$ChX7jyq7gs>p;xGllN}lR zX_TQyKjgQ44)SlZnirJeafkUIExzPu*qmx$tVYzyZJ46+6*^9iOH=cjpo8c#92WG2 zVfKw>q^goqER>)KsoY>18#nNQq;JB|EvP72LrV`>Ac?@L`g*1F=kLL6l&L#Ir=&p_ zK*w9)bDO)msE@LKx!7D&h$BJ>JD#}2>$X1x{!{!96wTBQd@}SbTUMaf$2Ig5w7AfAk^#Nv< zj7f>E%XW`csA90Nur$Mbqv#b9{*RdL4GqQkuWw_|el>aOa{ZwiMnZD30X9cn7!D4O zyryP7zE;@WmGrq16wO?C^O%Z?N*Uvk-2db>;ZA|Ur9PJ<9dv?OSXf;ufaW2u` zSVbhc;wjUW3>Y0z0O376|0`5G7lVs7C15g;;`Qrwk2O60aGUv5&@_J&dhcPrK|ETZ z0p|*if8}wgXm+Zps<5yy$ox~iIRQV)-?(&7!}^l$#k_lG2qTEbCnx1Wg>0w0KR{Ja zK)sd6?|*cxUAquD`6pi9wL{Q3w4zY!JJEV?w)f7tvVmN;nK8l!5m z#GJFIr-w+10SrvR&h`d)L`A50H4W$B|LN&JGKUhhnTjS$enm9A0rg7XynXvk>2q-U z@u*fq-a}CR0VC=T!Q*yzb%7KNuVyVzPn(fbP>h4kE$nI3z3`{sGy77JWxBMu`2O{4 zv3u^KGY@pIXU}0_V)#?>d>K2d4O~&dZI5BaUyusG)%O=Bo4NCL_e8ri;7aJCcdz8- zv(C;=l~_M9ZAibuWSGkzKIEsnG5;9`F;cH+9Rla-UO!90v4>Os0xf(0dF^f5SEvpP z|58&U4?{Eb>%zvYrIWt4wZ+cQ-@WzXT}6dtY_+Als%j{$<0w7Mc2WUS6cv~LBWMh3 z?(3VeHVjxTXLo%A?gQ0diwfCo2zr{D@8j~*0&0!_KnJ-zOnnG6_4At za{~o9P6=EF`9tx=ZmasS;(PnnVc=FAT3|z`SB0}p z#W;<<3tk+J4vdS5k%VC8702!SFe5VwrmhO8zlvU-?umz{zk==ciO!E!v0%MKRzB8` zc@T?O&mwH0l@gGs5^A-cOBJctj2z1+BAL?DDqhET0N2MvHQb151Eg4in_g!TKq zE-ZsdwcJoDT7Ta3E~iy>T9>eB*+hIGMal~fm<)@oh#6&xQR(%`r*ll zl-TRxA_{_ymjVNsAaa?rU{!*QAePHu#;R0ki6u-x%3uPOs#OBEr9izgx(Y3e z7aR^ns5KM0dE?SaWfYKF+Wk-1B_{i_PfgpL)6@4n=Y5|4bNN9+mR7?~vrR|(yOG7K z!xCX)as=g?#W6!3Zz$M^iYGfa7gO(RxJEPBbdc-eu%H@9{0(F3Pxe{?dF%web0~^6 z=9IsQu$}Oe-oa9fUUtf}%^A~^)fNj6N34r2P=2)G;8vvWV>ldz%{HwwO8TwmsVM8F zJyPEr>B=f5ug>>~)od?MjGfXsOz#ivLcf0v`|@5+S9R#a3-Lma+g;B)B(7&}6nW`W zf!vf~>6xe^M^t2ewaB-k@bt#aZTRa$z5tf3+;O4xT>Ic&{4}BHQ64zudBd3y+&2oB z8V~7SpZ50l#wD__MWBgFmu$by^E0pEA)J=Zn*)PttJdh|rVZ!({!K>k;*NfHh5>8{D1ORqFF z8Br2@0M6_rD17|`qmNdgs|cBK_?zR)Ot1h{=3HyrW5Fk_2Fj9+afI40iYU(xtZR6Q zyH8ZpHL&)ogu#p*#CLq#gjA_gMdB2TqlZsftyXbL39UDKy7!@ms|<1;oJVP;l~z>9 znwy*HR0hzFH8PnIKT^4#TT63)f1v_89MKa)vvu<#Ux{?~_axr)lSx&@e~Hr;R7emR zHrUD<8;#R`!d+vE)|agjS4lgnrv*fY*wH>Zs+#9yO@f@3>AnS2uu77VxsS!{xl zif~HA??|V|p}}3QooZAms;UAZyrqVQSS1iZ0YDob()es<#l#q~qqPu7PIo$;&Wn7C z5%e;Q7i4hnAX1uf-}58x^D*$Am$y_

    hZuUjGaBbeJ4q$Va13JX=dV&a9{Wy*MQiZ21hFH=cBK;I-Dcgzj835q* z1rl2pHo?3-)uS7OC-NK8NwV;oC3@3&I}*9PzdiYXzB$~5Qg)Se=*VTO<$=!7<~Maw zVzIxSlk)jL5_F>qJ`H54V^o3l+oBJ>0@2~J9g*R$Uge1#5K}9?67oaN_NaymHJ5iWm6wdX~R~Y z{H(o<2Lxjyjut0q?XxB*9yV{HgF*b|?2)jzE(HB~W(yn@u@GfQbIk?q!yEc%?i7g^ zl6Er?FKMHqqt91XQZObxs*IbhMHG1jj6rb%8no(F=)|);UE!~A14^`3F7L!Xhg1H% zCkkkBTpb0-P*4jM@z?Ttk10s4T=@9`2O^zVJTWoB9n~wu7b69hZXtCE7g?3kcaVFC z3xOqPrn1XFF1hu+)NMhQ`EE^G&%KLLpPdjtCJlUwQ{>n)!FZa_1giGx@=%njN~=X8 z(eMKo7ncOl60&gNch?x|EhIG`I`yT$gpqCl>iKMW`3HC+_StQ-XagysjW|hPCV0MNr>BQ`jXuE61wNoVpiZG^?+S!4oy4B6D(UPeDHCX;rTB*tY0 zIgnnRNMr*p+Y@d#JCZWy(Q^nB44N|-MZ{8Mid%bEldQSU0r=wI!1aH;nel(Ppu{)# Sl6n#ckwSwbHZ=t9Ir=9FJz{VG literal 0 HcmV?d00001 diff --git a/docs/stable/_images/SELU.png b/docs/stable/_images/SELU.png new file mode 100644 index 0000000000000000000000000000000000000000..0fdda9b3f7b8412fbad46af37fafd19dc0c7636f GIT binary patch literal 26599 zcmd>mbyODZ`tHzOBHf^XgkbQ}A+3~1N|zuF(wzd5A_^)cpr9b#-Jz6#bcX`c(tVx* z{q)wU0f1a2oA>cEXo$P%F2qG|q|A+KmG{YSHlFv~}(^19N)X~N8(F4fZ z(9zDy*3rt`h~D|ZBL{O^n;TcJUA@LZ|IpFVPJoN+&)=`wJ~HDXG$o^eAbRMwy+ocXl!yEGgVMN_>?N_H1DZmHV~B{y}!NyAPG zP3kMo)iU})n){rNDV*=bt~@4vct!VN{LN1ZlXPR>OKt}9tjIsx-4qfkin;wg!G$@| zmf6AdB10fV1pe*g1`OcQGcq!!mNX+j4G9TpYt4f$fzLm$aX?SNXHQ%lG4PR*uJQke zKS!mpVkde2ysZ$M`1Gmo6_xad10Sm15J)%a<;RDPN2`y;^(pXx>vvWdG7dsatMt z<&LlDJlByfO4aN89Dn<{(UT^_#K2I`c`SH;(rY)-RBB?R*m7eoMk%$R;9ktPN~6MI zZksVaRaMogcX4_RGtFT{bac7*J+@vF5hV(VW>bkto)$W`)Z}>D`MO12$5pe39GW^+I_Yhh zA6`1)juV<{ur!bj`F{M!0pHy1+qb#nI~TrwAWm0^50VJR=TF`Qn?7@TvUUnVPo6yK zdayQ8P360^IqJMJ^4Na1CB^G-2NM^!Y1k||!nXFfWLT*7=$bu`hSUWo-gaN_o8j zhihu3bW*z1W}J|a(6>GS!?-V9fitPrdK3{(CrYcVto(2=hsJ$t!B2+PO|q1NsOQ5p zkr*S*lc=axuswNBOX^#Tee;2vqgIHx`5iQ{GrE<|**#6O2U7tAgw)jG9&6QP_4W08 z`}4^gd$Un#6%~}-qNi03HT_u{##;+LAy1yv>$;3TR!kIN;Nl`+*T`vtKQ8uVq`iHM znx&E30t_#zvO_sdnt_jxJlSjKRiUg!k=5|^w{PF_73x&G^T6BmK_w$qB}1|8)2I4T z+uDeP1nQ6awY1>YUt8wX@2rgGJ1*X<+Uk`H2@gkwB&DRvbx!Ca;Is7CcdTbmV z?G8F{GG?Wu<)h=($~97`39NrQL0v_K=>jpal(@LR9UrwgCWH6>H!>u#tsnhzn%U~} zRk>9^Lm~Qt!b07hV&&OQ(bJ<%)4p!OH6i;6xB23iK0oI^bq?G&G)#f$1soUX+1S_; z`f7obB8ho!cJg+aGs#3=WME)OegFRbuq8rQ)V(7lM3J@@nYlMw?rJ$IaJ$BS4gH9Y z4*8B`QMx!?X9^=@{nD2{;DKvayE}#X4C?z-qi4$$rbY7b@UU`mwT2Ke-PO_I&V3Hj z=W%hf%Z1(Okkv%heo^f4xXYwr+fxQ|5zjIQEjv4Io5~f$GH~zm^778_$pTJn^2{-t zyF^UV5Z?ibxi)-mkoDfSbFs5GuGOAKn)jurSPtfNCV5mWwf0?S{E2pi{ZiHpoq#s{ zTYFquN($1eSFbeljnMSm7cc98@DDPD7>FN~b zfVd@LYRde*D>2NrW*;4n01n;47|>m{{^|%DMtXY4xFx(kl!V=&bTr>~Qsf9kmd?l% zq`JM0*^Ms2HG`IL3e6f%e($3VnWObag6xT!^l<64_C@iKK6l~!hN~7+3YHwTJg(wY zPlRZqpbKukw*t_SpgVGM+TXURxL6aClh=+avOlbhmL{)3@)BKu=kx|E+M@v=JQ7bp@`EyjLE4uG`(sLrB z;wC0vNdfR^YRiN^Zvaa|f(JZl|fS){r zL=gS8Ya3-ND86y}-sR1fiJuFAhu*OoDayt8)YgjTlz*r6JXp+l5B37A;LGyHmoIn2srfH#&UXiQno59- z04%SSRE9PF>S44DZ9O;=!^g)5zEoFg#s>^KPeQr4tKdoL_<{4o`@aXQR9RCq#OL&I zGTU1w9QgcnGnszVgE-wFkQXR02xyxc9^*tmd)7N6)Dg4&BP-WvZ+)7a+W>;Y$t7lH zqutfJ&GJVN3f)ZN?9Ho$+Clo$ z33m6N$6;uCc3DpH-o0nsujCmyITb!?72k2EcnNNzrg>9$+Xq;4*GF)Mg`Tuv*X8_H z$3(cKmBLI?larK`oRf1MOZx>bvN$byXRk4#+My`3;a1FrL2q`)Nef%6t4}CT+zXS5cRmPxP%0cby(xHn7TUY z!OqH3)uJ#>v(?8`|KleO$7;r8o!wIkStwMY)90~i|VEeTtjSy@>` zWMuPWQHPAAzGiPWEGL15>R$8(RTy}(-{=yJxvBU~nOc9H&Orkq$GcijtpDvzy`g03 z5NnmwtXM+-#uhfezKB;C8a-&&7L%I4*MV;s4C^;TzH%NeO*pfM3}2UX?c0Tjam2U` zxycFX=++8Z3TX*`EiiJp?HX12jZf)isrW9QRTRUAnFe=#53eryQ7~238?28rxtiS} z635J@n*NXpR!mA7l$pua334}R0;Owhxf++1O5YT81%-(DR5>3*xJr>iX@heo@qp4|%r>}(2a zXm_NE0MWmWDzL&=*S2eIjo;AdF@)+8$sAjS;}X!AxHft-_lTP1_^9zsOib#g?H>N; z{>-z*M?5AW-yWenQgb9{K=O8-Prp^RRzS;fG6F?G?nEc`=l1n`LkC|*5F&@2j76B$ zv0cil02A6r|NEqa7un_DjB1&y?nF_&E=i6}$GQwPh8_7{!;qY!gbZJaa_+zO(@P zzYIu?|8OI!`?_7z4>46)Xd;0R3LqY_>?IXmsV#0`^3@YpzW7p}`J~?754N{oX00HH zuQt`(zwbI5mk9aRV-dN|1|fyu^H`2upKMXC)AMKQ{>zmjhi_^qkNRE-LkNV`Du43R z6tdd-V>ugKh>uIyeG`VsVR*30C-P#ZnINM zh78i$Xej}K&^lCedg1}PkrWUUHH#h+q2p0H-s>J5Wn~O*L`v&kMdD)ad&N@hqOl)k} zpi2YM?MsDuh7_h>P!PJFd);4Fx!G@Z!phpX{7&1h?A-f+MTQHpXATS1ypGV@U*`Ai z7T%?vnVHFZ*u#iRaUB&D7iFNe>dDsT&5n200uj*0=Blb%f5nxG#y>HuCN?4yyK18y zU*6k?34d_-DJ4~qttpRFq+KYmz1G6d$lNuzIAv4LZO3-;L#;-RBll;Z!k*qU^MAs(Ol*wJa(F|7xa zL9&on$CygV$z6N*liH|KhEh(!#+FFAr7)7rDJ!paM#k=^#HA059;4a^b2-~@gKIt9 z$9GrD< z+NlYTwdO_w(Qp{K0UagCJuija3fkv8;&}-`4QPH%n4KL5L}AFctxXp6W@#YwfBW{W z%gxBh2+j}7W95DK`H;mtJ%t#Vn4;t2Fv6N=5|R=U!a)ExF*D=!VA@_CrvkM!1qDUt z$kW%av9tO^!^3gs_erGFhycv;{=ixdD8jj47yvqjwsz)9NJdZ$}srh;gkBd1xXae&hqqb2dxw75#H0C zD}sWfuOdE~^cE@Y@9(cWi3nmgnOryg^5&K%00|sMmWK=BX7!_XY1cOg9U7$YurLWJ zsmGg_?r}yzrVk!4T)zBn7^_6fr=hno!-GLG?AQ80_`h+3*y~`+-gI`+>(QWl!RSg` zM~5WHvxNj|Nn9=1Z5Jb1tlf1(jQ9$h|mothkeU&{Z5e=M9gXDtfE zXvHCx_|Bl^Wh*m;*3O550ua-PM1(P$%y8;JqpkDk(`DD)ahJqCQVvaDpOekxp;Ft) z+tuhY;A_#jdHkv!hc;9br-g(Nca_H_z3Lj#5sX(iZH7P0o{R=#_`%-CiH`&TBbtqk z4a(w$4orr-y1ICD+4E6hfBdZse&7+esA-xE)yasiUr%mMeakor zCf0hRHZOkDaolcTkDW!_qm33Tp;z7b$P_}Z-0B}S{eo%TF&*Z+q>PLh^}P2V$S3eA zFQtQ9G*`s*BN$k1>6`pspM#yye~lc61yAyxYTkeX6Yu&kG9e@#MH>OYM9Xnp5HNM& zWRUW0c#ka&72>Y2#Z5Z&&__kva~z&hD`Tz(gd(@lWQ{Pr|+)BJE-k&5>Re;DYSo&e0p$=uGDjj%!0!iH$1AYv@p1`cyIHoRTT-G zVl&x7Lqqv0KL5RUO;-4W2!qe55w0uqG?4F=#tV5{1BE@;zAg`Zb2UOtgwMmNZ~y&r zwc5-4OEiaUONKkV1`|9V$VY3#20Bv_buez-hT&E6fjr8)m;}$6WP=7d->z3|D|DlC zbJ~;WEV*NYLXZF7R}QrL=!}tcBtQnLG9*an>kUdSMOg_=!BchJ92S#VIX>(?T)T_l z!MHtp_St=`<`T^-kE}PWb=73ak6)2F22WpH`!T&(cKV&3Fqa`v3>2d>8s&9=;;?(y z_imbR_9oC(zar@0Ik2jCQATchNS2xlZoQ1)n&pQJCB4iRqeib}Gb6PwB&e>;Pj~(6 zOA+ovDjpjO^9Z<7iY8;@yBn|=)|a1EMarCpq@6M(A<9db;)h6;V;V6XrAbNp&+AeHLc#=Sl%67 zUwthO)#cgHtdDFT_b~8@&BiX`>`qdn&hWT~(fn2V_Fa!Jpk!yRsjR<$)XA4iCGr{} z$=gg%YIt7QdB>9qzqCV8!}T%+8mNRFTpbBXF=@AytTuP7b%7-bApJLm1_(D~TRO}_o0HD1SQGaB4 z$jItj=`3)~=CWRc0XID}Gdg;JH~#Jt+8;kDRzpYyTU_(9|LrF!?Cvw)r=L{N8n&-U z>;GdF#l^`#@uyZFP`u9{BBHQrSmMR48@^3bWD_eheG(raRt3szR#w)je|bp`59}pe zITMlbqDMG9q=M*|niPNLA;?8{i|11g)@-$Y#{Y zPKL>-n>1Qz^b~v1gMAH|Nc7rF;CaLaY!TiM5s{>{EFFVCg}%!J<6=SX-FO|(DnkNj zsv)Vz)!dV^0r_~mM_vKGawM_{3bhn4$5nMLwW!fJD`{!_%NNng4pR%cml{^N$+tY7 zkSR4D^U5H3;Hjxa$HX-L>(<6znHDwXOwUEFDG%S7{+8+-&w0%A0rrjgZ;)uo)x~G92#Wp5AUp_Mk8v@Sgzi?5zRrT zhe9}_bITaek(U^mkZ=h>$BA@=w)8~!nUhF6Q-7@AYF-?=ua4uB=FgIIr@kpg*U3wc z_a{CFR2^7vAol3d^?!SI#Eo81o7}4qHKHt=fAcS zg0HNW@kur{zRj*ox*$PaiWX6Ww&cETQ@$k+Q%&K++U>Ze?f9NXMxiOa*zL-V%w$h>`6k(1?sN51C>-u+$Dj`Qv)0$9 zDmuzhi*miu8d`V($U?4CN@D_ypUiIb^*N_y9G9x^5jDxNh9bv&db=NgC+bfM$+;G< zzu9g#n4x{wIzLU~DOT?I7rMp@DT{F-1^Y8w+qpokd--=i4cbXWJPm=@5k|UK8CE~3 zVDC&r{-YhcWUdp)hS$9jV0x00Hg{pi5$&wY9koP)V!BpCTcvlUed%l$r4P;+&js9X z&s7auo;hCB_}XX!o$w<_W-;J)Q}W&s_Fiq0*Uo*`Q2)xc8ttWYLZWDleU}40oz3rB z;Ok6RQ)!ZF@=#7h=+&1@iD<}VkWhw-%!yl~=%PR&3Db2j7O3>Y&7DeW&W_LE=Fcp2 zjeqVJ{b62~tHpz>Ac`SF36o6NTuQPMvc5g@h0#6j&S|B`rKFxGm{S1Z6I~)c;O+W*ioc}y>yt}~nuxxW3NPS#%pNH?V5J-b`liV#G1RP<{6N%5^yF5&{ zk?DKp4=C_6fwGR5m+aqN{k}V5$(go6EjPP9R`>-b&*Aj(l>oc$%VzZkcRfYoXAKbt zGkpPKBF9@Q0CA^?M_3Dd*w6ltMgiYa<02Y*08(Zq;_$gKH~AaZ&sUU0oy)4M1njo@ z`t$oDq?rNXp(_Y9MdfMwQ1U}NMgr<-R|Ls4g2iF zxh2mMh~w(j7MR}x>=)o|lfOEA?~D)TclciPGU>&O$5T@VkQhjRGqbbGIy&J$e?FQQ zx_0f_!dUqYfPF1;P$Pj@NE;m;-PYb7&}?L{s06C|hYwi+gA!l;%6)egFXmHVczC#} zwXA}|<9-V%Y3ZQNBkZ)EkfNfSU%!5R^X5$*f9|lA^qo7{_%s56fL5K~`trqM2eJQK z3vl-m3rkv2(ZzS~-gPZ%ZC~N!!~_4@Iy;>d1rtRsU%s4DRaJE#0q;h31|UN(l9GyJ zPIsJdl}Wy4p4XwXnNL-U(yES6PY7gZb$0vsWu;WWeX8x#d8Ahz8```P7`<2Bff$xHqcEv+T zhh5#1$BL6cUkHeVgRRBc-^(iMT^hf9g9c(oI&tuiGK5@9BLpuk`MJb^{5eW-8uIq45)w3mhjlG|?eZwiNCSGX(1= zR$ZutJ!GKuUib`%j!Y+FkdP&`cc@!v=xn9UU!eIhGbPoQ&`EDc^nlo7bC$6^lL(QM zt;PcQ_2*OTQy8zXVcIHChRj7WXz@n8SIdL8E`+AmSs^y7u>UE_50cNAT!QrXj!#xz?U6GR0`V zb>&~J<;QHY1f__c9{yjErs^_?&;&+XmX=Rd?nUg}V&*7!V>L|}TnP^qwf&UN!+l=R z0eetZCi*L9Zf;qrs!jDLV{>l@D7?=sP0`okgKJvDai;P0k)%WgOC!WVg98wxw3(^) zkJ_yj;2ssN&_h!}DY_d?YSlzjWYO%3hFv6w|D(;FCZ2C!{FzQsbu2=gQGWP z#G=t{IQeILLVxq-bC#=>ziaW$CM$KT;jrLXvqmG8p3A8;aH$}YZ)l?*$zs~4{xYJ* z?&3PdS?}Xz31afWsE%jZ>O%FTOcKds4d%BDjBRi2+hyg(d>jZv1_{OAynP$^SDEB? z2Z52vS$2}R-8Eyo^LRY};pZuJ&J&@_RGfeI10Vu_??<53z?wjya&FEo_a4K*m&fZ9 zbGOQNduh2TwRV?el-i`(fr+`|gDTz)z}geqDj{x@wgD0(3&U4X;jU2Jc`# zu4+Due8z?|vQM=!R_tZ(RNJM+8`hR~R_R622iEmQG`s#>qQ1$edPy36I-3e(=Hr>G zw9Ue<0@4zQ@Z98k*(dqGbFh|)aX$b0o`&|uus-Z9JF6jNExQu$hif}W?5>xM1EllM zUS#J2guT=!Au{{$%v*jT>|Z{+Y}2nbpYMj6n)2Pdb$?a3BiG|6OX8+82L}z7YAhsC z4`$QMCG1=G$DOo~>?;8Te9(~VTOCM<+~42-*CWc|EKpYSeRk>z=_5uf^DP#Z~?+hBIa^SsN`n53tFZui8#RdCp#&ooME~Fd?lKknFJcC|nYvsb~ zz=I;h`K5n%K|QZvhxuM9!*5q!SAc|lQS-#H<1uBuqoJYTr zY7smN_@y4Q*lE3nX*+r*CKUW&iKX-f{9g{bs#*ZL#O~aYpi$C{ zv=)ojsX$CmLCZ1?QXi=FtQ3P0`9NE?-)-~QCleKbtXGPR`-@_KMuJqj73vx_cKmC5 zDx-w$&dluW?#HfL+FrKNFQ5Q0N>A5hh}mgUXzjaZi*XLd1JDEh#@LNviYkXOMS`Am zf{##GaAS;%QF!`aHUF|2(~q&=50s_7#mHgmQ!bD$q#HqX&D8=k%~;2T7CVEPAFLxs zBNCDddiqYZ*@ci7072;oaLOZvcOV{|#{rGuWnrCQL6M2@kJV~-FHY6Sc}(qWRsiZM z`{>*SH&$=hdtu6G{L|z_c7BiYzpew=6_b^0amhELA_)ShH@Fs79h&LtR=&G}CvwIJ zB$BF;Xzm^2A1$?dzDJggz-#osjq>F(<6Qu_^Xj%LQbCIxgc*@X^c8g0$XyhF0ShBtVe@w1xuHo7;%cwrRV%hjVlW zzq&~tYGtgET&S1s;iiQ5c#w4a5?2 zSo4E@=x;Nox7~jc0p?xb?_OK0w01r11`rMZZ6ep>Aio7d>Noc7TaW)d5s_n(_594t zObA*T^_UQ5Q~RHk!hZ4}xb(Q{(%6lTq)X&VE0Sbwh^7*7N5wC-? zgjS1UAeRvE$<4_@x7&K^Je%h%rECD&flRj7v^qTGxOXppd!9ijsH2CV;X7FTF|-I! zb@32W+p-o!(vPi@)tIc3WW{8i2j`$Oa-_|n!ssFZpy%B!4kmVDMG}9wk7&AL7RH)R z8AOoOBS}|rU&SNa4Q~QPKF?`o#-MfXqBBe^%>V8PG@+A00O95X9o@(Kh0+4uRc@Qy zW?c!SeT4-DKt6{H0l}IiAOu3g!j$#&RufnNHU#uyEnSI%iI15ez#=eRzkUH=_j#He zU5lSSeF7A(l(VyQA+HKh0U2I@1^V`$k9Z$Ge8|(v2HGIIBz+($i;j&A0s^&*7ccV7 z^Tc<`-o9NA^gMS}Rp%0DDJete=1c(oms(Wxm0veY1aty`KR^QZ(Pf^|m+|)PB_ML* zZ9lL00TAg2^ih}$3BQVNK(ChAPOdIT@op`u4#11pX!${1wIxSHjl6K-M{vf~6@Q!q zM{#?5dq6k5Q?f4p#V547f9~pXHZ{Kf>gEbDhUZUeF~DrPOgzc0Il!(xJrM*tvUi*1 z0?6cKWWk}K*z^3TTC_+VpFfKO@}iK~>i@p_XQ@p3VT~{J^8tR%XI3PD#H|Fn86VOQ zEfWCE0V9>tf{J!FObh_srWs=53+Q+L3r*mK|3-<-R~!>)Dsb4w!ZiT#U)S6GH~V2qWP!p zj3@#^H{s-~piuU_zcaxGXCnC1olA?}9kn)7;;`9dd@t*mi#} zRA_eH%E|_qL&zFo$m1CFRE>^LiMVSk!wFb0=e^Yo2-4&$k)YR70+`?W++ew8g+0Lh zQUT^?FHYGovJY6@)RM-t-P~Xn#{|Z?StWhmd2F^=?WheHA2m3^2VF$lYrSkXofwav z_-JE3Vz>+kPHYDa$Y10@F#vkue^R?`JB*Q+{V3*X8`FNA=X|1h^8vT1?sMFL>94hs znG$l}g@5i%sRNcAeEqD`p;dwAgols2G=RizhUen3G5=(^h~)g_@S4;}fam{NQ*38F zB@V#Kk=C-Wph6wza5*ZCKgw7Qhf;g`8SHaj7PmFNBU77-jN+XYD+W=}+tpAP6_zxCf>vgA+8-`u~l4YK?4<2KH!roqNO4$R+d-0?`?w!&E=jIPkXn)?}*fZUY zqU|t1ddNTz-br`zm@i=h*3B<@)>1Zd`4W9?lv807<{1_2Q9bcZW6^Pp5b1clE}yI(M`ygV*0 zIxP_&=~)%#-u69gtJ>8>3h~jIM0XU!X!@<+!ub6|;(_g_5qdE6md5p)5YYB*H?yZ4m%O7EY3!msXBX~zrD8ukhcil+6+lC3C zGE*^*B-)rzXXdmPP9{d%^e_h~=Giklpxq_yV`pcNe*XOJtuUb|fQEw{`FlM&+9rjw z26!!18!Yfy0DPX&vQ=N(vfbx(Ud_Y(Q%^Hd-FT%-&s4+2!ErcQ zwc}d{LT(VtZw17<(E@HEE-qNF+I^$1|Jqr9XPN!K0oN=3vv*X1izp?@5ub>9gLF0SFcftiylE6|M>X6kQ81D)3e0deSuH0ds zc6Xzd&ZjCuYo`2M>*S{Q_?$j0XNo^S$DrN6PSws!N7Lc_4`jbEqqJuJk6kUy=+zw` z?%M8D0u`m9A;Zzp(HEd70jTc%v{)Ltwac=S*FEt24G*3<&b?4?d;<~a^$?dv%7W<_ zu#UEQ`_t^0CCFyD;N5U-wst9~UM`a9Iufmx&(mhd&l6GOV_d&}T}Mx`b?^+Jc3~bq zDp;YHuHC`-c_*ufp)K%oDQ>ehQ>B~2Uq&K@v~UODtKU0b1+>z7$SZsP1iHuwlUXlOfBYrD04O-1Bi#@rFUVs=z#{O)0>wZHzYK);W3$B2+Za)FdS6r z_$MhcT36Mj4_ocL-V7gm46mv@rt|ycqQ}O1sFmgl+ssI5qyH0>9u|bWKrQdonZIs5 zk4V~?)B;W-riZcKapm8%h@dh8@Yx`GH_vQKLB4X8V94oj&fRibWrN*8~1kg(-DDnlbH&O$pw z#=xN@#Kn;>T)0380mYGGl4 zSVZ48EdfN~*>yhAW#&Uhl7GL2lVYJ(wkuFF+k@Ygj6Bl!+>jb`U&?+7h7tgi5b+p?jFFq0 z5DX!BXK@x5W1i7FG+#H*HC+jW$m}$?iH9C7jLGD;HzsJd^j@6Q09^H;wOGCU|!+j!-sY7u##m?c?U+7 zaRUV3`8MMMaozb^*`1+Z&op16JvSpQ?)|26M%(_8d_~4SQ^4>AaLyJG?GI?o4-C9wGutd58T3FtcOWmB7!LZU*#CV4w*iq@Z{LWJbVE(yFS) z_?1Au2V-*Kpve=!k%2*Ky`X7nkqv2E$G?O)6sz@sLQR>^v1n=_dw2MxJW|6pAj4s#K_N20ctx| zPEIGq!AvC}90N~8%*H0iVPtXZ%9SfvK$QmW@JuBK@c1`*c`%`M#`%0?k$Xr@dvYBE zvNL~1QM$#OQe-D6;BJ>ZR-FX*eV%w{^q5MYRnVARM!1_lE9V;ShO3>4J~cbr{JzVW=a z--+s&`&?eo)EC=Yivi_sV+HEev9@&G-IUf)~KGY7c5_Vmu z2cu2eD@I?ytV;7!28s0%%UmP~P#%Lhy0C}OfG8$3y~!UM(&nLQg)i^!f^xzp5MH*k zvi^-`K2^f5oxoFaZolkwU=PzkT|IVler!x2G0=Q_iBM^rA!%^Y`kK@i*db_yz|0;P?XcQ#t#n>VkeSzN48$V73t%Ns zT~HtHuE_x9qW9i(Fe!?Jkl&e<#V$$v)+vlHbt=Pk<)rD0hROF+0A~js1t##`!szpy z2GF;MS`Z>qME=g5hL#8_FqiPnvjaGgfIvk*?=!)*szS9~9$CqQ64!B?s_nz8P#sX_ z7-^QuT2let0XnG2W@l%Sd)YtyXSvz%A}9DNs7&>4qbpETlz6 z9_}gdj(?Tr+|c=p3rpZK+q68n1sX4kOelwnIvD7+Y{A;%lLLbQ*)TI=+PYpOAr zfcMa4u8nAJe%=^NiD*_hei-Q7#5`+^B`~(DR)Mfezk|7Yf!k)^ju>VZ*p%^s5X&N;zROs$0B6 z_Z5yBq_MpePS&~`P`lA&R=X#T$oQqg1ZyQn8+-rpBQEq>mr$!4?6zzz$lXB7wNXDZ z4zn1J&7!Sa(+~9-XHKXD$WiazzyIXb4Kqf7z$FSeiG$Dp2XkOZ#4ldF(Bkbj22AC> zdkyEji*09mPJJZMy#aP1Kx3=vhOtui-sXVSZ@3z`fH#_sva+8;7e5A2+cq{mA?^Y5 zbr1;5S4H-wP>#0xlz?v5I&2B`o*d}gK!FRUedmNpqw1~Zs;j;ndB++hR!H2*4o@Yu}cw-$i$=cOzM zf~kq_>vxUwnXdCew&O5!y^r)PWI+?G_UK!j{tT!#A1WUq!GV$6=fv})X1;hpEKxK| zzM!g5tp%0M@dDRZ&1Le7bbhvG1ZEpDl9y5ziRwUk3+8ua>DKx(2ms(U8N~S$cnbaX z>(9=6nu(_hJtR@^zt~ZA>67KHPZOZ_#WVB^(6g2s&^}cTO$3Ss>o2ZI^AQ zIgXu5kpdSWe>OxrImy#t-Ap%CK3w!$Oj`3$1k(frOQ64o#k#OO0F&V<=m~izJ&Dz` zNW#dt1`3|grfo2f7NmWs2_J5A(?Z~@8GJfm6sl#T17+kM70H;#ZGthx^-n8X=z|OQgF3SftZdDcrQIB~~6=Ve?z+iIlA*k`eR2?BPF-pL#&G<@~ zHR^LfkjA6H(<3lt$fPa^pqoi@A%RRXqlS^4L%v{6r>!uR+xsr{)gL5yK4~X|D1TiRUhrRR zXreg+5ge*e$-b^KaqH;03sgza&rhdvaca2DHYCIZkjv4} zfs|yxOKqqi(3%&d%udklU+Y^hen&mX!&5@X=a25uu6*iKe7N7bPHX zvQPh2@W9aaV}P=PvG}&GE@=x3Hqg<@mR`~C1f1oYysCkHTaEji&M)Y08M<+@Em>Vn zCN{aAY9`MLu-zU|DuN$GR1=CWgA0~8H5KCtxPNSOixEF~w&}(`t8GX6h*4urxHhMs6GXXsi z7LEUxE|WkozbGw@-g{5rCow-!G>P6h+89w5O|d$LMfT3=iRad-MHr2m6COOP zS1=LuS%50@$LX$zS|T1^WbFJDjS?so1%l(jz2fIj`z=u0NXLbwiMC1?NgUF5SM1Qj zHP;08D(=Ff`t=C1p_}oYB5pP6fN%%$QA99yrNo>1X))_WG-)57q5d>7E}Oipyb(1` zYkbXRd`)XM4rdIGk`{9|4g|)>)8k1a6+`QU?(bsN+2(#OTk|96f~ve#@kbHZ0;p*8 zY;(1z{_#g+CR&aUA!G%!9SXo9|DMNGR2p6|Kg}kgMRE+_5&TRZ zyx%@Q;SUh`{$`%W{ZUJ+&~ko8R4e{z*|RzKXG?C6bd_=jPv!iCyH!4KK7VwW93!xW z7~GtrJsDaNy*j&}6t|zSbxpvrqi*zoszPK?t)N?_cg1%3;9kzN)s$KcN|Dl2e(y)r zi(8U@id>;fTdBx+>}+2apCBhH-+O;UiTx`@2FZ_(XMtTp*>}xiw?tQcw8hP0Fa9W! zJbHdSzrYuH;Yr}yDFp`6@K9TeGy+k8MM_ym{qnSumxO&rY9)YURrWF3HIf!1wh%$K z@K#kFat(XK(@7~b6rWZ#RdN;kTc@L;=*Pl#R<{|w-+vPPIz`e$)HuT~BPyew*wu^eX)6l-6Im4s4qy|=Rj)~Ys(ypP>G<9U?| z&AJD`D-|LSj!~`2l-XVwZFh@-{mze$h_JFUF~+%rxK-(W$nXr$0MQ=DHJGWERRo6R z)4Ua*ium~#Rf{}4e?I5mzxA0}cF?xgj$n!bjIMtLuhWQhl~+=Va(5T;cFXSSQoMaU z*@IhE6*0>f+HCYlmv?rstjXvaf22dlY=AzZl``%v9u?w!dOGuLWtOYZ)$Yx?3c6GO zX+O#+rz6$wE7~iqH(0tigG^%996t90Gu2oc%pH7wMWuIm?l9qzQxdvf>DKz8#io~H zxqwnYAyy;&%Y%c1>jmfk`RtR4G{U?4%8!T8W$W<@HSp4zqb9o3tGX4ca9i}|j^9}5ezbSlcqDPYoR3A~Fzfj9D*>izp4z&ET%if)gOk1uWj zkm^NjY-V{m|7@5!cu&loOBcXM+UNI%;5{Zk78jLOR8(4;GfPU;b#-+u%*}7pY3t~G zydA@A4&GNVl5Z^IUR6}23KUlswze7`9v<#vx%~YD1DU0z8dSVC@=-M12|yF^wWlXK zTA`-HxqxdN<)4bxXqUX6AsmNI)n)uj2~Y{0FeK zLBf=K1=`;f?HoC2xvCS zht(nQZksHC&u7FvJ=Q_eDYc3YZBRcY6ZZ<*!3SZP*5}8}-A1 zhVTRUWO5J8+Z&^C%dLDexjtK}w#T1=f`Jbjt#CS|;AYO!u~Z;{rrvl7Zf>h~bY$eF zaSN^jcwY?x=z3UcP606s`d>T>x;;q|9gbM9fbIN9ZQgc#TBUdQ*jQQHw8Ccj?oW2- zM<)N^r%{R+Zi#xXP}8+2JKL(|0p8UyVD-}tUd9tiBU!}0NQO>?Sv&LO{PV2M=bZ7L z$tsA}8yWS(sMdqywa2anKv=-btKuf#(eQR}ejOhE5VEr=Bl(y6FvS}@F+nMF-?o6Q z&2r#l55SjHm6fAUVsdo`)#T)G1mg?nf5#!%?G*AP3y>0Sm_-V8w;y1Der%a>7(#|1 zNeVmzS-wc(kjWfcNfTqHKgVFjwyh#}p*a&Foh`KxnVd3d{zF9%3VdU@XE&Ax+{szv!L5I9o*~&fmH`7DNeG&w%bENL1qCft01q|%pZ85!_jFxIDKRl|Ip?!K92@_61|Wa@ zy3YAU$uHz<$^#(ae}1vy>J)L^RYnNU)}Iwi_#$(QQStuTTTa_SBazU`^J(_Ye8c zjU&IXoKx^C+5E8_BnV;*LMH^~Z^SqW?QCtm2eC797EC#RT6+XGS+SyR4Y_8cg{r@| zHx<0xLl49m6>IAiWl`RTl_B>&JfGPpn|y$J;qT?J4x_FdtOU#ELL%U8Q|{muIK}^~ zxoZ!HI&a@Y)OOn1+RC;Lt;5x&Lgm=xR1#8Ds1Pb=B9btU+jcHlBt;=YP9vuUExp(8y{_LsZ+}tKd}ltN=lk5xeLv6V{vLWbhhezkAZ{0A z+8?^x@+AL#TM@V7kAqerHpjc0vY_2_s)9%Jj5c=WCsB;XA&pWUwsGS|8E&wi;+>4g zo}v#Att+O+_85p?c=%qea$VShCOP|1!|K!Xe>OESiR^h}rY;`c2Fy~`P}$isp0n&T z_`ZL=_iOrnJe87-sU0r-A#Tii`)y<*KT#`9Rz>ah^2&2@b*=ddaE^;lZ)j~-=W@CB zc3*y!iF|zV@JO7=E8SdyK6PW9H-9<8>qSJQq9r21>b)Xwl`dm=W!_K0!<~i7@I5ow z4NJgPt5+9zwHbLIJ9aB9?e^^#Fht>EW-jGtJ%jYjOjEo<#jW91S65e`ueWyr4v(`M z8?W~~RKm$0nMLP*8CY!;r*5vHp+Uf0(LMsKGBOP16YH3{Dc;oSp`eD%k zMjFkcv$GTEW*j!GoZ8S}!R_yNLC1`f({jzk%q)sbCa=$ZJV1Oi+&3IZ&5><>oQY)t z!ZzgE8&Wh6UNw=gs%fr2X-}l}GmeX|Y-oL_uB@t>nr9%rsI1Z%y8n6F&NnlPJeoJd z5$NMtCK}!b@a0P!?Mv0-K(yMUE?x&yIOU*`LF1BHYhTZKi*qIg!DFL+8bU!-64kQs z(h_Z3VGoaSz@U;;aN7u8wKob340QPdbc1gXm#(%-9X469 zhqORcEg?5ISD!sY1c+kkUaX7V*zj&0`oI=A_YRXrWdNFeNfwoAl;`lgIw?#uhFgq- z`Z{q^fhA;*qJ3+IJENFn_cv=c`l?55OG`*t;VO5j#V$~bNe)VHJEpGe@Uzx#6&^WN zDpbO?^VF$R=6R3Xh-D)-WjscdoXk}=Woz8xV?uIRy*9yh|iI3 ztgTI1O)c7Ntiiq9-qE4#@a$?5MeC!aAZf*z2mkx}Id5N*2O7N;#TPU(R1cP)OmZJR z@d)|I+uIq+@=;N1#~zPK36QAIYqiJIeSZF(JlQGf-cWikX%-5Z>ym?!9!SW$GAFQx zK(r)uin}r@rexGbkFD+%mru@=xH{}i6>O)%5z?}U)m}u}fp4U2Z4xQu>`eh766vf6 z@j_8Obie+KwA`y+P*9Nb4ct1QUqsF5?>{~rs=i)j;*#@bGd)eMtt-3I*Gz$?kAj*4 zUsu{ec&(D}Ik5>IGEsJ)I5l&vv~RfEw7(tChfbs~$#Jdpp`5Gm#ryGs=eAk@Ow@Po z%pDZEbKyHu$jBeu*d_N~V1aOJPXFGUXNW99^2^JM{1yUDO2WW$$)y+IkPtEbg?|bM z2>z)nMA9B@9(q3KfD;x7Mf{y_E}m~(YFC&0|GjwU_N_@lO+EdEvY+Ur_orLb!rwgZ zGnlfl`OUwCZ%f7*uUn?8T76`H)tLf$0`q<-yFD4V5w_nipBy6AbGn3tILUT1gYG+c zdgh2SdkU^3{7nn0l=#<=+y!$+}B`Ep_{Yf|F-2}VE`5lE4l02!mj~;C;xO;ci z(hJUL*QF!f!_f3v-Rh7DOgP~>!GDheGmx6lBxw-C zbi4vbPIvqG_)uVLVS%*b;_hBKcqm5AGZErNOiavr7s3RFLZKicFMH;sXJrACqb)2f zs+mhM8B=}B0PTu+n2m;uE6B;20c9Irzh*Tx#fcBN8X^6(cXqPStgNvGO@TocvY*`q z?*(?(z@80E_$%+F(dh-4AL$DqpeZi(kG$DW3FiBS1wQn{M>BGa)RzvpQGCEhn_ zvpg`LO|(mX2us6+Qbu-J`crxO_OX#REpJY8bP9V9LX_xxXz@A5d77&KKcGxQpCU)_ zYI48b`m~gk6zxosxSEGH#$Lk9X00_{9RfGcR~6w>T0KQ@z(X~n4~}Z28Up_Ky70(e zd;4Tw$rLWg?|)(|R_K03Og(N}vMJ2x3YIcGbIamw#k5#n=D z;m|g}_V)JbPtAX{s+Cl}Yo3y`!mVb=ty%L53QWHLfV=nc<1XBGR*aH!1lwLrway#m zZFyzNR0IikBQPNG{Fefm;8~ucOO`G3veBIaj{kuf>h$&MOix-MYn^}Q{5Czoe?^=L z_#v(au_JeSf~SWVljtxluh81hFc+9NADp50Qznpi7KfBC(|FIX%YgXJ5KXI4caBill`=fh( zzZtXzTY49m-) z=+pImDyV}>AtdnI#Wp3rJ%<<_cES?oJGEVXlN+t)_H2tyE&cpdCczW z04`itUGJX|rtrCS1<-9|A`JqY=i0nLBKY9r{OjlL;4w>BwuONA5FdGTm_61WVJN=~ zR>g5JL3L{^+BMMRpZ(;?lM(s(;+t1q+fIoedX-v&vCN9@-GUwX|8yT{ZT_$o|0L}P z-r7sQbq~`H{v|yv%welXvOZv@{&SbE(bqCc-rWRVR+?-WJ+#m46HK$1k?%_}7Z?=1 zNCZlvH87(PpRt>e?0hheIAj&k?G9tmZgljy^m%so=N$M>Sw$tipuh}@1YEMsV6sJp zv1^tMqp7=E@F3hnV^ojC1r5;IoctQ2Ui$TWWZzE-TUn`RgGf-wiDv{{6lwPPDb@7I zk!-li?y!Q9Gbab-837L2rTsBsQNmTQ50{ZW8ZV`ni{mGO97CHYgCpl?j8SejSYZvC z-`@T8j;<&@hOoVzKIRLx1iw#)jBF6GRKYq^V)WA)3SIzMO~Ei=Cq|Fj+DBUb;xK%h z1mo=tDz&ovsGuv>%A)vaGDcF!VC~63=F8iXk735s?qWPfCkQ3Z*i1OAgGR*DvBxse z9~9$x7ul^sOHHRe5R~E~&&ut=>y;U)a9W z*)s8q^kSbj6M_6tA!wJPW92KH>k3F0-E2Y6KEI~ec9w}lK53<;RuLfN z5BhweC=wbfthwW#EeiC?ug|oi?;&;v{#NUo_CO23RP3oPdUeusdu>b0Sn$1@m}c!D zM#CPngpEP>p_oAFxUsGbI25{E`AKT}iB=2})WQ2TW3RQJF1(j}=-0Jje$?zBX&NcZC&tv+scc3dk2(1_*>h|&;{j6@x=6CsU zpD9*-z{3ox;T6r7mtCugtdIds-1cx+V=;Mbq}Lm+!Mb;Ok+)zUZdZ{#mrxv>vUPbU zXRbX7^+Op0E=0n>Os5&@kt+%|@&+**yi%%IypBe%s^`Aygs@VLi8aa67(z)#AQSzA zhv%0x(0z^!x$Uy#(RhEwa-SawG69XIVanAT4@2OBHtqQ>} zQ83gDY>D%H^wTO?*`8u@@O%l)fQr)Jv7od{az|zbTZ&iO9cT1~yV0@P3s*pgs<Ws+69#8yd5Ajg2uP9KbuL6=%-B+gLEc-A zaeiBg$nPrYH6r>?sF35a4Xr-T;cdRVhS_3}@ZKf*L@pp8=;Z|MUqSrx4>9rEzl7|J zQFJsK;szK;u}A(=CHFp4H0CgcLRvQct+#H@h9s}r=Dt455(4 zdRYE5>b&=n6pUkncc9ytuTQ`|l`%>&sP=2Sk(y)6bOCdOlKv7N?x|oasx3SSel>8x zPEdBOy`7Y#gNf;}k*hOF0g@o$YIct!WGL{@HZw6vVx!ikz@3>;8x8ncn}hIk9~|D% z8Ql8xid+qRbfS7jkicB=hy@xRv7mJHz5W|qC}{f(l4f{|Sdh+K(iS9e%3s$Ujd&Sn zNFfsYCRCvD!iSI%NZ$!|ofcRq^Hq%cZ=>?x!TW4%*7t^8(Q8Ij4v_H2K-Xvsb@PD} F{{mX@q=o(;qVv`F->Q47?x}O8W|X0O?>9eTt@XSwZpw)5C8Z`sp-_9pZ-~mH zPz0_h)Q)}4?gL8$GgEGMPIk^S$MvnO%=tMu{`s8U?13J~ z{`-fIqEN?C;-Z%o?LxlwI9h28qf_Q)dQW3P_VKG$6Lah3xuV+|3Zns zV>Kr*G4~lc&)ol{DZ^*K@(xOB-iPN+Zw&Y`?UI*2&_u{$@bT=4zbr{jiagJ%V{Vh_ zoK8NgJrsPe`i=Mf;!$$($JgJ4&*^^a;9g%%9A-2yx}MReCmE)qj;*t+<70*qi9XXF zqlLdq;>rm3L+|beq6px(dnhlVP_EDK5Rjly`#GghJ5VTwXS0Cxq|i0ug|LYE~ek1u1`q8u+?K1CK6J@=DT{+ zbzj1VgI*%eRc7iwVRi$%Yu{Xu93CE~7O*kCy!5r_U_QluB`MTH-`J?AYc3lVE{V^c z5i2Px8#YCX=g(K2Ui^I0>!3rsM4fH%de0+ihA^jj@w1xwna6WX?6I}HS>|7_)CO{1 z#m|!C--^+rGvBULmS4Z*XJ5ysG8T%GXq-$bPOj!C*3b*Hk2Wos;t91VKiZjZoq*fy z!)pffm^7)I75V2Ga|QX$tg~@&aP-sBtMP{_o0+Asv9S%=_fl{OAcIv+?e~-UJW>~$ zke{DVp||$v_)Ru{c|p2>nedQ4`;>xd{v@@G`=2`GMnilmBde;bsa}@JRGFw_mWyV_ zU2ygIxVARs+7LdhU#|`O9b-aKHMMuPC>AA`&32b0PMvc7)%oF*xQ%S1_T)sb(`;-q z!^yM(s%Mn_Y#T5$3F+zS$8)CawLX|S&N`k+bXlWS{0xJfW4_&gsw30DccdoB7rW2y zHCKt%0VAkU61ZD)4 zl#~p53+;or@5qX5W{%Vb$5d5GggQ>=-O76~{Cv}X*k8qJJSxC&VXP5WED;COd~>or zr9NLOxdO#My3qfa+;D!VlKO(7AA9k}gCxy@rC1uFjoH%tm4TPmodz*TA4bC*lbx3` z!iNjLc{wkf40XY+A`ciE87<YXb{eHrVg>EPVu;8N-Jo7QQX(*& zd$qN-bz*v&&Vq)1{Y!;a`B6QVP#H-d+^{z8TDz8G5|d0wth+lYvI(?Tt5mqc10vz+ zv4G-@sLsw#t{fp)KJlf&F3;%Y&E-B0*qUwXW(HqBK1Ie;{_1FitkVD`-!eKjP9bIj zvp(&6%7b1+D{OQew>}dPnYiqxtv9yZ-#>@Taarf>v2NF-7I7}T?nReGW14%pz;=et zVskXyMJqtlCdkmx(C@~n^Da0iq17SZcP50Qw0f|!Bj3HdY*sk;tSwP3BF}P^uGu=r zq$?WPl+em_!A#p&W5nX-j7y)O!V2_l{I$AiV>o^_d{P!SB)c$MN`;N${(+ zN?GUkG-icz__Y*#yirOKk7Zuj%fpi}E$wB8wG(&I*s=4k0=A4X)Hq-gwlU;C5YlnYrY=-NV$D9L~cdg$U}4R^hMN4jo&a&lXT zUO4*6$vf(q`rfbut^3f+2Gy_6-paR%o6a5FC$!k25L7LQTNzv%Vit+xvm6;imgq~G zPS%5AiK5w$WckCd)vt7rJQv*6A87|?L*vXL%QV#*O{>Oh(`4E!j~wU94r{XcQ(JU^ zC9~}^$&DC{FT|IPWefk-Vsrk4S z|K$8tGuyflzUlO8b{Y$s;>{_ujPhfI4};Ge_^gkKiM+AS3&$0DXyEitj*nLw7c6xeXiw?9CCm(FJ>=9fY$2{mqh9mU zz0;Y^GBg(IrF*qpmZy8vx)}4;rwhzJ4ZFFJ@1vU`ck17%7K$I>zQ{e^dmBMb2oDOy0te~Qw}4xear0SA-c38DT9i@o zV{5p@j9u2Jau~&@rlt%{P1|5+UVwF~8LksvH`WA0xYnBDtUEmj?HEPLv!1<`$zmk; zIn=h-{!`jsFr)tVE`1m@zj?naJJ_=%)ij2TP1tHm`w8>m;EA0Xu*)f9$+HWKLC#b$ zyv%C0|3l4EOqhd&FOziBdhaqOWWBw$HOkX-pQgij6xh7BU^9$;O%7VHNCX>He-G`1 zSYp^da4rN#x)Ta|*ObnxXG;|o6|IfB;6@A4Xd~<7 zte&U+9pT`US4Gx7iwVC6kB43^=!4~5JYBRgUyU4anKhiI-PfmY_^soe*P6p6e4OWA zYLCr+EzMsY3FZ#yD0V4kFZjllbSq1OTyWNP>(k)??K;8aGbE3E^SvI+v~nKHPmS82 z7&dUyc4pu_E>DgYI>rT_PynTwAnxWub z(4Qu=v$GAsng>mRlM#;R<`BWJ)+1ATSlfxy$f8=uOX#(A-cj!REF=U;>N)I_vIC{G zli@UG`QuERJ$;*@`~m`C%VZ5ETH=t^;0||LY*oq9tEUs%TxfOxI~m@AvB!=KVx}m^ zLpfmSJBwXh+OiedtUt|EoDAaBd3MSGf;BNG%!j*%grYkTS2mPWylGO$z5lV-ovKCV zc64G&ii(1Quk%VN{e5Xt#PPu<6>rzD^sw(i z4m(6z?I*nieJ<2RptIam&oI?v)9VCHI?}Jz!)a?9PAI$gv**po)gre2$(D1Uwhfo)3fE+Gw#>Q zv-vkc^ccq1weUjL>0{yaVazPiCK$yge-*7baC5q0#3SHWT6&8dy(T{_zILN?xIw?x z;0BK1@VG+wn&Jp6F9aCA*wq1IB&NVmx@>ZsRZACb*R%~y&Kscye;{8kf-jtynCM-! zNhw;owK>&?OA=oBKt6#40m+&L3!yVkV%&OlEfCUi2L$t3Mo;IB(xHb>Yd1y(XvK@H zMTp2>yY}=ohj#J{N@l}KA4Z=@%X%Rd@Mz6Q*cZgDX9!_oKG^=6E|(d{zTnl)%6nXs z^~F9`;HMI@vz2ptY=VWC^Xeh;5}Nx&$;Z5i#A{&=6Rb$I^G3u41HN|gE4j0@k#%nL2zPj+n8=zOVi|9NOVVX^kpt!lzg3QhMrUhFPf$!)@BWp*GZAXQQ70XKBd-0 z9R6VNof}Lbak})|xzKZd{?qwLpm#x#nRT@rQ&Nm8nQ@zkGEg>bv7Hqib zhet%Fu;=+Ie5=WwXLq)L`7#D+KzE{AMmKWa>zKjl|}pYUex}5g2ekt;!54f_0WZgQGlg)=v(it7$QfRU$NVBe82o*O)Kw1 zXH?&2)bUHgTC^ZDVr^qHk%F5@AuTdOxiVA`!YhoD^RA0POu8@@;Z=ndEP{-u6~6j7 zG3L|`RKMNqk*)0aE9bxDxGyisT-s0Ai9r`Y=sdqxwFiajAF`;6OGuEUWR~%p5yNU6 zp)8?HMX%dP2XRF;6hP|iS=yGQe!+fz=*rCX%jb3o?tmDK|4+Y`Fwq(tO<0Pa-r{7)u`ziS0<5PI4d0kvg zj95qm?ThdI{NDN#vfnTR$tN@GhQ`MCtd;g{HTvT{Od56!V+X$4Yu!`fD*Cne`jqTa zrIMnM$y!=wG>u!mvt_jUduZ^3bndwwXHIO~?AZ28yX|;{=^NmASnCpo7X4w{2@B390J=~62^gCb#I;$*9bAY#Uz-moP6U%N zX&JsAGBB45m`HX=yIT6a7l$~s1n;hcpHb4(JSz^iSmEl`$JwSm@t&TZHM$d%lNT%o zL`^y~S-CCzA|YsjV`H$nwm5<00cOU=iVWU2B_$=>$eX`)WKelkw03ugWRYV>Y8AMG z6fG?+N9L%~RHYyk2X7}2InGGBzrX(*ZavSXZyga3>(nfXGQ2Ak3 z2aE#ZgdsHL>hjc+YL1ar5fc+r4!%pTfbmo?F-hVv>pe575iOnO#j0I_oFf`u)0$T? z2h08H2hV3p$Gw7I8|VkiA5{?cRDjM!OAFF*j0N%-e|a1G@*Bfnc7n;+Isw@fh&Jwb zjS4UGoz=!@K&pPdZgPl!=SAJBgAmj3Vvjdn3EQDj4$hW)(6lx*+D6r80WXs6tG`Vr@!|j{=9CvO>SbBSw9dv3?g3ChBP2aOh$3T(4E zxuP~y=EAA&Bq-N`(oJ4Hl;kr@8CqJ}#N?{&Js_hjIRM5K(i~My|D8XK@0K0hefT#3 z6){JE#Qga^Q(awZ|C6Q^Ra7b_;*V_~Rx0TV z;jg12=1BYLTE2hr>qo*=(OHJBE%)@(Pl4Iqjt`%q0Q?Vw(ZbZ9p!M9gB4i1!vZ$XM z!IC)j9^vJ;05U483}5;;FO!ks+p5I(i}eP_^&2Gp)5ZNcAOF%+mQnkkTTBWi`NUF2 zNJ!|K<|U?Tqwv`LIzcEA5ZZ{+ZV7b6LBaH*MdU6Q_zos%|H!HA^3V?iXL< z!Lzk&srkP(^K@G?Os+d|BWAekfbRjwdS)RRjz)-LBEjoSr_~2NR?X57Z{$O!nPoNJ z6p^p>MVTV1*`Vxh8iX_~5K;VGzC*8h?iAqJpL`DAA!s-I*0g9*d9zex*z@om`Ae>$5GHcIj*GfQgt*5>Ilm{RPNm7sbw;&`cu|eSG4UyGKSp+`= z1Go{(9xlZMSr#|Ft&NSugh-T7ZekBs5c09(B;;dkq5-*l=*EvVNrG}#bqNLq?_ zytCO$(;2M@);c`qL;Ya~DY?TPyZfP|i>vi6S_qWEcKWo}1i$3#GmQgfFU5?t!3Kk4 zVidQwwl>t)Z-B41Ha2?aNukk22o5YA%%jWX>C@8C@D>8)B<*4?*or|@9v&Vr;f4kV zm1}q22svhh+k-unv2+KJZwV$}Kzr&0F${-Z#>K^nYiep5nwZpTWW)A=ki9WX$g!oZ z?R7`kjZ-aOzQisq*?@!6%RX#~xH5oAZ{?V3-tU@wg#fRBiF`IxfN{24#3v=S0-8P? zQk}`V#5p{9adBZmA0VipImGJQhdj$>^+$fa*F*uPqrw8|T;_|4K_c2_fC3I$dutNT zf6J(!0G#f#!#P9#(7FvCZz(1*!J8M_kCs53qCeAH6tv6x(EOf$aM>61>OvAX*%!=I z@2pJq^6|d^?h;ucXaR)6aSD_{t~d^G0oF(147z zbmv+2EgJo_ceYRPgc=G8+;@F=9TxOMZ8{CaF07K#jD|q|q6oNveK*b8@0*sbUmmPG zz?1g3jed)9861hv1t3sELjfEl^Ck=8AM6@p=M~k|DhJsfy!|I0gfn`eL#br7gpC1( zawlKz3)ykzDELGxtLLj5H?~K95S~6iP68`j{)YkoElAxm2%-!h{`lVSh92BkSB{z9 zACdZxrh0qp{&>Z|H3b{^PgAZ`!tAOfib5TvpDyOxsmf*dg7#+`^0y~sd~~=Yd;9GE zo1I@=T%1Sm8#muRAF4!71gh5vQJ3bv;)1zd8BdZBWMyanCqo|^8u~vo^!oa@|Co<| zX$t70*Z<V@91JP=t6~E?~=5E^UK@)g15)sAiKALWukDiq`XFe{)!N|Qr7z`24ykN@P z^#eS=OzYD)>A>k?vZpYeAF+aEgL9AYH=k1c9>6d@YE|0a%=S<~)PW%PUgd$>a1m|D zc;B46^9TXSOunSPYbYRsQz3uNfogV_$jaqS1faM8BowUhI6@d9@EasT2EgIXIQ;i4 zDh#zR$8p(II`G_OKvmxK;(?=^94=khdoX%<@y;(9ta@PQlW|Y8{;z=U9v7$P zHPv{yJoJU=@>^(-mE_Tt8Hus&KAiRR!oDZh02GJ7+q#l5K!3VBkB`ImKRTTA3q$cU z>+^y$fjQ*b>}DnbYc2)U7@qDgm4kr$Kf0cf$u$c_q#$AtZCGi*ofaO z#zo&^7A<#K4 zlYu$A%N3s<$A8TiUt=R}C;Zwn#xekkl zwL#p>lRL7X;`W~2Q`{pkdi{rsfyD=~&j8jn=z9PVgTV+RZU}P0>b0o&KqP}bU-*xg zNl;0t%Vd4J|A@7XYsg zIIA8FsEs~gfAJ|P^&5^8aWP27e`d?{&6l+O7TAkAP>>Wq1w{vo`@+#n$lrGfR2c;8 z$;vVk+_oRR?axtx;fI{NC?N=A> zyg2IjjjW}$Ii@r&Gv9H&B~&6oAV}FZUe?XAd6Pb-i9ckby0A}Z)>+ox+jpP?S z4PHCx9vtF2_Sd=Fu&V%A0CwxVPWh|%D@hxQKN1n*E~!u+P0h;8B{~TMZCi=;eNk5Z z<|+J+YtU9ga+t6+-Ly-W|2L)4ueRB?iwmqut|F!W6@q0t7`7@MHZOZNEZV|Rrl4fDxI*q&iK(Pv@B=eOfjy%eO9RKZkUsJ=&mIr;H%-8)v%%UFsFLo z8F9`&-%)$_y~ZF~sLt1@32>^%g^U(q7X|I-1%WxdL+{wzu)NOk*J9qosmJX{&szoa z4j9q(w2v2?E{(3K>@btNTB=xfcVj?}HsaNu^IL}6>pd=XGILD<_wk|T=ryMOe%;^z-q+2;UejZn?{CeHJ;%+<( zmWl)GR_LKCoP1>7qA<~g%a#i=Ghnw8;tZnirhO4FO1kR{(L$JTVlyQbk=fv)|KWwD zQdh|SL@&W2PVI8YMC0$6bh)SskT2-Bmz*+FQIRj;_w7WVi&N}N3-^~)d~MJ@mRs)W zZX~6YcR*=4KJJWyv~JJ=|6vKm*KyJ5E?olQlP>DC9KJ3-%(|%Sz-OWu`!Ct1WKU1F zxl9QpPOX#cF*$6SIks4wvv1KxUc=O)sDI`CmBED5R=2FnN(fUl2CtW0h>n*Ma48l) z9PXH1uA~w#SRVfQ;ay7i-Te#W%~_C$B5}D|*YZqX$})&c9KeE-W$KlcE~CMV&8kLi znZ2{dCJ=jF3~ZWNrfYibX__^~7nxnu)0rNF-`wmenpwLLU#0U#Q9bH~PJ&{1Y|9hT zr$}davp(W3o;jOm&F9(zJH?8jFbY`2#H*0d>n;smBw^%gnAIAK!6djuWRMJ z7`f+~r}^vnAPW>VEE{M<4sy0r*7&$|g7xU-q`tw-j3UXTf_zw3T`j$Du@7pG zX(t6yuHmHotg^y3jq3fW@{aYre8gT|1z9;J-8?i{=5m9*1b{^jIXWmtF12vrjgodm$Kr13*f^FrZU?76golyNwPPoAdF4{?*z3D! z0g60;dlS>sxe%$>Y2`$ghA>*Se^?h5?;8glY*8VurEw2490bzwPaONrr!yP|uUA+r zqiY3xi?|J2>>RS#sz~uigivDTo3=OVP{|s34?blBC=YVRo4noTS9dJMu0%E)WMyXR z13EzD@**_#oyS>sI0c>A_|)EGT$Sp@5en+BG8}Y~&L><*z%X$)25W{ltQ-(Xus!pB z1#OwR)%hoP2?8Yt$8AA7YL z&B&{ybBqql($LmR&ExuC8QSm4o_w<$njlA2XgX#dwSl&LkG$CKi46gZ_^y4&8XoT4 zMOs9Ix{}O68*5^gbIX?Q*b1d7KBy>LsE3b3n{HAhf%jqujg!Daq87pgVNI3%=Ij_z zQOpM^OAz){jb321rMEYkIn3?~!d#PPs-QkHwI0^54!dgDV}BFbjro&rdpqrK1`F(C z_22hEOYe&G)BCN=R&7Tw1c?mFq$DVZyr6X@AuD6}3qnO*^tW|PxfgxP>EzriwteUv z2y96+^~4TE@ABR!{NcW?uI@ehfO{~bi6;AwO}D=GyxN&Q`WKwHbKGCM(qq&O2E`ZI zs?oI}HI5og{5|gQ)@nrjUjF__}fjO8gUa}@H zoWkdrXy-T=FQ@YFWAd`w%%32IQyO)2C&!Dni|57@=q(NF=w`ngB1keNfn`La(YJtU z#w;e8jrV_e>=Oy;1c-=kEce)kS5>P$-1S5p90=knI$9$_Lxnm!f?De8=uEnn>z>Tq zNU;_e%WSlePNGSQi}7g7%$EyVj%o8yzL4Ux1H~-Of^*YE;$!DE1N7#kc00(MPKi4V zdhlA|Hb6eY zyW~=sQlR2U9-TRQQ^Jv)u@#de>E=9VWW6-xY$_;uEhwd#(x3mNT-@##q+lJ61@ zu0*$Z%D<0)Rk4n`MDp}5)hr_T1IleYF{7@k6XU$;P=w!{X`k*Xkgb{$1`*5}km1QQ z=o!cS{zqh)R^#%USP^^%LIdrlWGb9oEX{TYQ7JZ;^r~vtR?KE~(ZB7TVqZyDX1)S? zLMFPkC%EmaV)#DBX8!Krws-f!^LL4aPvCPczucQKpP3VBj2{)xiWs2gb?EPl&6SZw z+_8KNGn=);`}cSgY28yyws$r$yLTek0-0oRYLCm|E3hXJ=}@l|R$6H3i%xv(m`l~O zdt7m{=m`Z#a9Yr?GvrN|L8I2Nv5H-sGILS)IY{^r1X-X{IW#V*z1CEqB_h>#>CP=J zmvo|s3jHAj%tUpSdkgo5DD2;3-?wNxPDWr&Kkco3NTnYlWts7kI9}RV4)Y1gWo;%b zu7BzHk(KVhOiG2PrFVS~uO*hE+zLf{;KR0Fx$nX|4@P7=m77*>But)vw#N5$#k9$;W~EeHZ6YT9Kb1+`>RrJ!;}f1_`) zq#mB@)U;l4XWHT2oQJC>cd3m2kizZcc;?Wg z$o0#!$z4Jag0EK;3bkX@b@-c>bdEOnPrlx#nWnl0J^#%!wzmhg{!KXsw?;SUxaZ%L zLP2Jn05a?dwJw>Z<`OF_E7XE<7bHs%7}$kj(Qm>Dm|6248u)@ z#RPJ+zM_kgQ}X9;X=Tm3#3{>$6$<4sO*Fi&WT0Ka?r$$M%qcsJiFw9Kc_E)G*+ktX z#ZHQCSqQR~+MvZ<^Zso!yv0d&i7x88_EMw8IpRHT`{J{*E@a?;u*ZOW0D8+5Co3O* z;05Ugp1PK$sjk-EUiL+a3I79g{L)9;-)3j?m`sm-b_!ivbATL84rKpT8%+%2pyv1o zrGW!cPadisrS&$=2p(-wvSf0mEzrnwEPI0Xwiiuzw(iw>S{5L8N5^N>EYoNAIQK zVVSrx$J~GA3y5Qwq~AntiG=4>eK506ImUq6X+~-#UggRpbGs@;OEsn+Rxwbt#wYmO zg|)jFcJcA?fkMW9XK|FBR+||^SPTABRw;YR!zo;e8SCSiDxtl&Y;2tw$5oLW-ReUZ z&WCj1Y3fR*)-W8El|(X+^}2Vt=romdWZ5pM&lf$c7Kr&kxezu}xx7)+QH`sX3>zhX z#rpZ%RXk=H|7@5T<6N^m50RF5e-(00ZG%`qL`OEufPP4H`PQ#``sO!$!o8cos}U$Hm5e7QAi|tIi&BYu4BDQ7 zrJk?n*t@~RK5ZXnla2hFXEA?SaD>dOW#08#!L>EhdMQ9+B0OJ>3ZP@pH`kT$_V z^!bFHgZ~Y=@=ld#!wbLSCAP6qA%dGz$Yd@9am-D@0*52pNsZlQZ2cexNc3_91w~H} zg00F$TsHt-jHol?qz)AY3RUK|ZpM00_DP0WA12Xd7dc%EkZTK&?8?HMy~v6juYKT) z0loFp+rsqxzaaLFTj^;{R9c#x3zpXT3g~hWy(iC<$Yceiw++a@kx|qjwC8nH#K_ zM0PAMX3m3PO34D05-&u74fu_$N5+d3oV<=qWPY-b`=^Xy-v=EOlEWj&HCP}1!B^Z! zkqiPu+jd*HEzR_4kM*3UuH%Ewh5qqU*0FF`5acHyGI>bB5rup_qWAO3H0|LRLaHDT zZw2wi78x`isOfK(0bU8VTP)e;Q5BpI{T3E!nvSS-09- zAy9p>2oY^k&SumLC%Dl&+kG06^??((XDet0X^DcX&`$V{CQ&FVJ155oDicOVN1^D7 zR=_6YqYxXE<-{c?d;ZHP2vcFaAd=w}DF*P{gNApyW$Gpcc1y80zxgO-@^@Z`NQe|+ z6cIITlzbdTxFnPBT0-&KY4q%4`ZFbARP+w=B_;Vpf}D5$iwJz|8`)1;avAsmB(^3i zqPbSRjBagj$M$#rhE_kB$Z~zTH-(X8i_#MyOd)!;LGtJlyzO6@yOINdPwVM1bEJp< zH-XI`)ARIqZW6FdsOkDQ0S)6vzL>BuxSoi{ z2S&?=CxetS@dqi^*?G&Pw@|Gv@26x?R@RHhv^(&qEXXgyf5;Xwp3MzBQC!hZ$6{W; zek~OYf}{pc;CwI3smQw!;F8)WMLZpm&4;b}O9^9^_U5I|sp4j|*fW;Y&FmbKb`SaQIG_?FACg(2FbTjC+wdf|~y z=SZG1{^*k?B5i0W%B}B+-eL_wVl1|`HH;j8h>K!0klmDbtUmBr&hD1AsqeDl-a+ES|FVHr^0>9B5G>V{-MD5$OiuES3OSQC(j?s{J$0NB?msdH166y z9e*feRxl}$#jOgvSzX*N)JP-{-jT>KpC*uqLLJdZ?gjXVb^R_Z`cJM1(BJ==D>{EZ z^^dvv-t4xfzzA0Vi^S&)#TNem=PS0UNkEp5C_dBDy#I`HKss?;M6mwlH;HK5UvwBq zC4qt6iyJY`pk!0#4`p9SezI>H%?fJK(8N1Op#jVn06rM>>gUX<%1>nE0)k?b=)hksVT?xHB~L8MTMr zvM)>{LBEx^Hqi#6`Gc^;0TLIa)D}c8w2Ri?k-5`{UQ#*mUiqa&B0Dv6ch{!v3DLDZ z{fN*XDK8@z*)V@+JN$%i)K4Pn;M}T{%=VBTQID3i$$U>vm~=apRt3 zLx{*QGp9)&C7M0xTOPLW{2>&$k!6e0q$k*ogPdx6=~I} z%9^R*W*Y5&{=>i@C&>g?9co#}_frP}Y31wJhhSj3KeQUpa(>7j78iB1{#HQT&te5N z3cC;GrO}W>>>9E4P9xk%N|&iy{W>DQ_y2uICp<9c{*O?74PlWk~d?CLkd+=y8kc)^W^GBruQu8{@V%pbm9^#t{Gp(B@o)2l2Q~z=%8+bWoBLd-}EE+5tnjI{+LmjB~N-bjlb6^WfXA zwT&XV0a81^24qhlHlK=$>SXfCB1@awEPeHxvsF4hrb4PVD@(8m;5vtp^#OaWx)jUA zF4@IEPd$1|6A}5~yR^;T+4Pf`fPjEeV}-4v0r!f*??nb#LMlH>3${F;2Gm28c0DnX zl9QL$LwU8$xZu|l6+VLSu)S49J1Kngn;wF7gM%81SmXbxK+(L5d^bohENF4Ae{lJJ zO`t`KZ*l7PiYwTc^MYD>7A)KCYRJyB6qonMW@Y*NOaGRUz8E@M@)fyJf&|BS4BWs$ zy&u|j5Rz}H{RNX5e)C%18bl!+Kf8mJNBt1Q=>DH9i23_6nExyG|LLNjHsSyGVt-I% zu7JK$9s%y+&%$u$)`CoFzVZ$ez!*Q7ysP~@$jOQlQ1%Vt>VXOzj zriMz0i2)k-~Z5zJF=;J~FwXtx&!E&EVF>c8WuZ%PcnA2Xk+~yJ#PPIU`_dtsy zNQx~;>b~&ww{Lzec$(V&c&N>WihaoB)|i%mO8*n05~qAJZKiI80{y2}tO>U^@QofM zynMfFTfN$YIuHHtW*?G8L{`Z+8shuI5kPtlqIWqEGybUG0*LfGx~r|NtqealwIhPTUK3P3R$R&pe_LlR@`*+aY&XoFD0BO=a%`2_`XnwnD! zcd6*<$rD=Lp**f4&Dq)c2(Rg}x=;Z(SV6w>$B!QEr=;BZvW(zm*}##23o$Dzb_)v& z{hwY?dZu*+DOgz{-3UpySmxW?Vo$nnUpZX2vO`IUwj*~vgm^+wGw%Tr0I;5`b{|6k zi_#4Ch^aAnTi2Y`VOJGGJRk^a%8TioNA}2%Dr#$!7hz+KP(UcZgrb6;*}$iYHT13+PV8j;*fMky;PSrFY!C9p&D zObf?6)Yy@jm^eiG_9fpY#Bs6Tf1aDnn0tcsHoe0*Ha>m;KJGN(p@Dh1kVatl^&=7C zu3eLB)R(+&?KmGKbuLJXSBU+@i5=G?C_wYBQtAaWF!e1=&L#Z`A`dDaeLz_Ly&9ZD@ zBL;NjVv$KYy62hj)_fUuD{b|gUp5(;(Yyx&mifXoNwwm#f`&x1%4bwlQ3WaZ zhbLH0RrNW*Er$S?CE2%cy4}+JFa-sL_xjh#9JeG?ulvW3S5{V5ZUfAj>$J+{3{?S`Dw=zJ2`Q)9?(tv-eMy8-EJ)IFdo3!*jRhy1G>XF+d_Op#1 zSz%#i{Q$@JUVwGiHyZ(#u%&S$67_eXEv9-mHr@JF-L1el$_w+gc8yn?Df z#`C?^eJ{&C_9(HR`=#^HtIm(ae?5Qx{D$6_?_=IWbBX*N3+a_BS91U5O}S(%`~F@U zUKNxOvB+JqZG$+H(Rhf*mF^LJ`q^zu@v1|g?8kJ@d26}~6s)OBfslJh!nLY3a45XY{rnHp}yyOdJ#>mG!G@8W&9AK6cP`Use|FYq5zpAB`{rx+PO^!3qy`0ybR>bk%A zoR2CfpqZPSyLpfZN{Q2}z2o7`a$r^%-oJm}KQrUi7$J6&gM%F8>*b$wA(xE#}2fHE*Y8?Pp-4zrRbkSjf1+HRv4i`iz+o-)s z*KcHa;^tP(ew7kvv)egcX+SL;6LCI9?=-suIF2W9ua$z0!DCZk)KzoMPXH5mQ%&s! z@ES(|2orO3Ecn(G2{Lw(2M->Y!+l(SetsqQGrqoH4l99ssqO_tvleg6lYyx1Rc`J@ z=;udmm(8bseg{|9)&^hz6;M}@3M>rRx@vY0#rcao!O}%=uTl%>BNS~|-z@gn_QioR zSk8-HB$?o5LVWzafJi>@vT_mc-`@-19AlCVOB@**x@fu#s(RQJhLOQw7+!KG>sBjxpsdJ9Q8e6<(h7G3Rl~%>MPCU(D|>4a=ao=hB8M8Q7m&Hy+1uZN z%S1@X$V#Cq-u&Aa&WRo&=L;~($ zs_!gf8Ur8mUd-v`g7Z9L% z@!~~#bz>q>mBwFEcYvev*Q|OA#d(*^%s8%l9veR|{9xz?lvrGsSL)P(Wrb}L85!9R zH-`9VdNLh6d^jRK{QwaWQMzQKAKagJ7wYz&zj*NhZaRDpL=6$7%B9&RU9fNsCHP;mZ+RnR0L_|7<_1$|+ zE5NU(8Md-X1)kdhC6Zh)_o)WWOdV0+`}3Q%(TRzB-1gBvYSg#N=iua|fIG34a{DCKgR7MQg9+rfX7lm& zO#{H8KqGYN$&)7~U0pPQa?1?DA0&JC`YZZp4~>l61+w*`c|QtH|Gj{S-C~}{E;~93 zKqcnbk;$p4GSDZ^Hj2@|gL4k*hy$nnn9rQ48P6g?m3z{@>?yF7jDMbdcjwr3DJDuO zreSJLl&aUKevv>_eJ-n_60)v9T;2j0WXkUp(Kup^izbsgZ)sP5kmON1F+D7mAgY6^+xl3B~vo zm_vD#-~(7#SV%fLIzCrdU%hwlRLy;EQZlj~sPXY}*n`OJc((bp`=LJZh>$~OvI;kp z*ZlS74JibD;H?KhRpPn6kUfuFTUiJ+`VA?mPe2u>fn-f1-)cA5Ah308%*?yA(Th|V z+?o*~F)@Ggi(~v&EE}7fj4I43w{PDLH#*_A=jfxarEZfr>Pon8W^ZL>rD~QT6(auw z%Xk|u|H*f9yKDpwGWOp>jPl|lQFebnzqfI5yFmM2dCj@sqO-)Va3uAIMVL~+Zg4jKP z$geMMAS+VrEcEEnquXh!@b4^KRoCqix1XYi6Q&`O%)-I~*+5cKdk}v26cf|?%*>YY zz=Z{?!i6SDyV(!B=70)hfRhOqJdU+vY4#99OmNWs{c#xByLa#4ZrxlOg%b?jmLFh?iY&TE}-Q+A-IAF~s4=a^o*dUYq__2lFpc6V!_Kn5=8`uX#Xq0!NZv@{C1S)ml}?g*h1fg6jE zSO|{b*w|P}p9}u14$LiY8O-gNOLYK;n1ux=3}F`TUD1K3o_lx@&GxB(1~f|y$OQN# z73$59qdz_e6Z^TYt{hh3Hdjz80IWxwk2B=6UMpI9bO?5l{6QkPCGqHjEcporpNGf{ zY(<>kcbwC`!b9s`Pv62B%RntbK< GqyGiH5m`+D literal 0 HcmV?d00001 diff --git a/docs/stable/_images/Softplus.png b/docs/stable/_images/Softplus.png new file mode 100644 index 0000000000000000000000000000000000000000..93b4bfdc31de26dfb2bb58597137026438dce300 GIT binary patch literal 26500 zcmd>mbyQYu^X{fmP(bMvPyv-P=u!z2L{d7Wq`ONHkbDsp5CjFKQ@Tq^O1c~Alupk) zps)IUzq8IiXPtizOBeY(``&ZUJ#$^x%DYFg+Z z#+p`UMy6Iq23luqbSx|lOiiw^ak6ncArWShagt}xD2yzCwEqYzn zE^>a*-s!Z?0nT#ES>wJou8$vCSwF71eUT1)N`3tJ*fWNFw=?gGVy0&gCM-(2v<1sa zhy@5`Z8}L_sS@f`C=*G`PRLH9u|BxN@{Dx)I)T!g^WjsQ5(Y;4w1I6l=j*5T9ab*9 zy?41~HT?E!Q=`BYU&0%1KJZcOb$t$-kcfy#dQJ=NmZ+%cgpML|3ck0eAa#RpMb*5p z&cL_9p_>06{JJ0<3y<^+Cnu43ED;L}OVdW-%F0UqYBPVt!up!1G_k<$eAb&w$_*-c zro(|19yp8~9E83xXRck73$cmxV={FVb*M)FCjm?6@nem>trZ_XKaa99VJ$5!vq9@_ z&6Tn0`wk_hW&5N-LOaY}_!Lh+-VN1S94J8C3M{9_LZpNYn&XPzrkC#A$zm{VkGpTT z(W@sdFCSO`T{TlFHHLOTf;J&wJ^bu%{yC1>f zv}I;B+i^@L?%s7-S>o@jtC>0tq!SYp8b1#9$@bm^32u;+UQ%?EiI=Q+e3>^62MbFR zmTI?DviW9UXlu((#ky0DY@Jf+jgT|nklpx+{GAYa85s;ZI=Zc`t?ar_>JBsK1isfX z)-^SWIXM-#Ym@q6VSAvJmX@-9G71-T+<51>xoEe$kZ0Z^xS2isNwrm5Ty!W!=YaAQ zQIzX}y{D(A#_q;~et&LiyLN35ooSmPt*{F<8XB5pHQn2Yva&MWhK<$L_QwV5{XOCR zQM3lHh=RGt=`~_VUxb8sOiVlur7I5(4wfwIH{uf59I%`oD7dn>w?{}v7sM20hfeBO z%^4KKtXOEpl9ZJ6j`=vTScT(K367Zb139^8o5QZdoFxZNNqW)Fq`om4Em4B`>s`v4 zToJ8ls{D;_uHjv~c1_RNb>Ff@wyep?5RC*r6#lyQ9!DVdLs}nlf zz+PtcMz6~2!~5N}Bzu3=zcAOvfqsTPgZ zGevh8!yX4ZBF~j>si;tMnGX4@7TLVlYl+f(yBX!USh2fQ7PK;6JO19Awz4Z#nbUMg z;IWR5X49K%h?`Es>(SJ_p~_t2{$*Gbf?yNS`lEh`dSSUchB>$KkWIKQ@&H{@_} zYssaOdI(Fq66rI*n{xU!qito@p6tV}InN0_XTE;MC1 z?6@R|iG?)^e=7Rm0b%Kn14n`Vwf2OBgz~3Yc+{(@%6TVa#RG0wSzXm_dh;wHff*J! z>c?JMp7JQmY}k48{kwO_Tvuw%&kS@SC8 zvsQiLP<59AyR}w{>b@L9F=OM@cBjO|M0B_OsVK+DIN`nJDs;E4gWV;X_PBfD4Cojb zRIC>{I3C;!zx=UvO)tt`93kW$cFJmB1D8-o2n`Gj0u9W~%_Y;Ne+*nayn3iU8b)j^3@w9PYHW8dq_`MNV#I0Mm2;sG3pP!$WTun-v z-TIMj(5{lJfJ=|~z@=(db|>mX#2!EXkkM-tXz=XWvq}vPA70DJ$QmEK8S4!(1L|-CUS>Ujxr6m@E+DmCArP;89 znfU1A$k=L&uJgd(HNzIT;yDYi^c{vQ8AvrBzKfMVPNPaWk^A zo>-k~sr4aZBs_iEqeW=9k#kr<-5Cq91FwG@_2cVJ0yL4>Dz*EZ#(h`ghxHo6MziPr zq+NY>!FSy{pOn0t!p6kpIZy`&7#q`4vH$i8(b#|XTdGbI-#lL~A2p`D+S3S?Vo19)+i8`Ov zBnmunSa7z69G9vgD+Y&$&s_bXYCHJNs(I>_g4(fni-ZP-VLavxH*Vhi*uZ|_LM0g9 zGf^f!2C|T3i}CD^d7i(|01=)@4Cz&kawJ8qgpx@zvK$o=z_78OKAjq`4YGCe`S$Hw zg$BpFyP<57Edjp1H~MppgF{1YTNiHKxnr?Y0uh*l9A9#3O9W#4k;e$oC+Vee`xW|sU_Zo?dieesY)e55{aGuaQ-rhYn66xK_{`my= zfZIR+5T>XUv~e1L`||46?b{XSLRH4XeQ*oKHK--zkmOl zW7sX;4rhJQuK4{GzoZ?ld2r49N(ayQy1&o)rA(>_FTN%U<_DzJas1x-)7bdrsLZcb z|KePRQtB4Uhb9eXhhD_-MC;T~@>;riK_1I_LV}8?Gwt!?b-|3NB*0d(_ApYw{^8D! zj*bpFml1heTN{U7^Wmvg%rl`;b6OEY+>(;WzQ*E>PfdAqYS(B$q=iGkV=;m2vi*%L zzQk2zupk7W2-m^)&@zqn+0Lzve$$mE?%`~^UCPUp0c`Pp=hZ6aoVpF~vX-68ze)9c z4S=BZ#kt`!VdPy#2BFBH29cax@Y}ZwSBEpTYahtV+o+K(2`XH-oitn;E~86I;wq$7 zzPauw-CS&zZWJ`36RPSp741q}cCd5tV0ZY3*5=~i)^ZiW$MiCAwwEtoj+AT;RY9=j zc+$bF;<%6_=}qN6jFa6F)cC64E4l-ma%MKRnw|Bzm6ck$o4BL0NEDAb`doKyxc=gdL(ZG3 zKreWlzm~xj>>$)}+RQ6jEe=q;di4qg9|4llRHt495F)Ox&kV`rY4{aAz$2HGb5o=% zbU4T}9@n4prxm0Gpq@XMRzyfc69DK_1Kdin$R?E$0}11`JTLA~juzYMb35ex?a-yU zIY0C9n$ao(VNp3b5;Xv_@wH95SG-Qr_-?OGRXxdJIDZ~{*kza1!qQT8k^qqs+CF>! z{u@knb#=H}``Wp99{WB}!O?3T}z?tQ;oyYD^7 z!pM;Ru60_QA!u{GTip-hq_4?f;q!Oz&S+i^7Y$`o{R|uV8Fn#pGDj&@3ql+NJ^l5q zogLqv9>o=iBK&O69!l9zgn>DH-rw~0b#p_|%FS;F?A$GQtgGt_>+u^I(IBFvybvc9 z%_jMl@7o%so|^AXTzUW#+?@Jf4=)ri`Svs1M%(md4t|??!Vo4Y(zkrpr(9fI7%pFa z=udg=&K@A+sm5tQ1Vo{#HsKHt32FGO%v3JmxS`Tz`q4JL z1$^NfALVeT8)C$jA-u3U4?$plU{5YzUtemn;d2ZOV=h(zff?AV(EkDWR@ab$oP}?{ zUlzbQ`reG-f2Pet$0isOl6;>&MaRU!QD1hx_1wqj7S#=0S$X*wI8bVidO*C%P+&7N zGgFD%1K#}28DU{M8h)FOLW(yUXm&(TJv`7lj74;_9<}H8EZBUAkB_xo8VZc)k)hFL ztTmbl3!u&l*5 zEKM$pUvIC=*sauyOP}G?Pv3YTa5{&JL|WJN(whM?jMJ4$Rt1Y*w&(@8BqSt|s;}GI z7fgD)fw78J#R5oz(woaeXPzN$$wgvgACEZ`lImMT1w{`RpL`epK#l}buvb!&61SM` zFIya5GRKN*#=C*zNvpV1xEd~<$5SQ=n_7O`HB;F=X&PGe4Z<6^9UUER{8m(8y+0QD z5Ei*S6z37PD@U4Q;~tiars$-szBWhPm(s9u^%4TBs$+enpuAF-bk!-s9v?At4%$%U zC?&YjxAkP;^1k+=bYh}c`~GxTrV~@(%ePPK^~+B)7HNJNu}d$_Zg{3~WTO`^UhsTx z#QJL&n_dMZ9WTyWK_*?Z$hhRrq^VLB3l-2gSmPVSy>`GMb)$u#m`+n~#rAu}-~?|N zBifRb1lFW4xb!GvBIKR?l&dxCH!_3?w$+$j#8wDxif911XY?=pmzh+JxcV*ba(>T= zUa1;l2y?;@XzPW#Le7UdwC!%3U^I=+afazbxvlQ*?0X`%D>1G z%PA3#7=DfvKC8X=k%sq6Uih-S8QFEIycdaO<#8aU3{&fA{JA-{rM*3J?%iE_+AEvA zgy{JnoC$~WQ|Vsx;czP}udQvTk}RUFfk;#S`0@%tAl_d#YG#ViCb179$ot>T2Z6EPov#I(X?C(Z*udZ|X)|up0=}+XHSNO~Mdlzhao;@Wi>-BIo zZNx&>7RjG>Gi~0Cw=q^qrE%EZ>w4O~h$dh@hKUlUMytHXfl8y$@u#dT$P`J|?QcYJ z-TV5K)EvtHcF-HR)I9Qm-tu}3`)0g|#_T_s0yf&*VNN{fb8_sB#~>6lXlZL_LKMcQ z<{^Y!mw&J`*IHnymm1M^HYthW%9YbeNlyle2mgu;n{p0l2N{p?+6Y1!&z6&!4!6=t z1ua&LNinJw;Vfsh2vq?k;?S(b+=GPY^Za=Qq~Rby9W8BBikC>?U+oe^4n%UPO-L|B zRhpO_9~;81mqhmoYz$(tf{_`$lcAyaab`lP|8bHQZlCwLJv~YhW zB-v=7M5-w#%B_$f#cRQ8Q|Q{1!;`I{jh8~#x*gi8h6PJZ98YAeycAMxboi81zN)UP z+51`m5gG0wy!7Vf(;l1aZ#L@t?TyjwM#|AHJ^F-6AhdNR%5LnK(Qqlv6}@K9`p1dx*iBQGz-xpU|08X7F`aYr-(gSw%qNq-y!r}S z^XFq=AuLq3OEQ+J@n63_n3$XdlhxG>?sllmCC0^JgVbQTR(pDAsahA^bw6FgWOWY0hY zu+Ha=h5k(YRo&1Q^Bf!wE-uoKA3p|$+{zii2m{FiRKWOx`f|GK{>5CQ-lr%I3F@w5sT1#z!yN0q2(J5cao3(FCDk=IPd-m&n;W$mRv*pD&*g-kVyyZns=`cSL*7xjZ~iH&#N8ANO&&8=7L4?g?7g zJ#)~aaNy7?=9|%*lA5}Ul zf~L`tAWeSl$;+;$GYkwCcON|?2`hENvG7gOc;JSUUWEC>R*sDPL@{peki9k9^lf6X z(N|OFe#fh3f_?w^-mkK_6B@3%$CFi{r`}LMcX15p#1+35A04-{2@J>i*8?Wq+k? zBK0`3%G!4$&w~e-7hj@H$?e%Ya4mXaz4Hbj0pbj`>NoV?pve9S*A3j_)m5~CPmfQZ zd?n8qXN(9PG3$UUd?dYN}CPp%wuw_<~Pb&QC)A<7iRJIO0b2B6fL9O)6kEP*{z$)}5Nx4Rw8DEZwYd(>r8;*hS`( zw4D1n;mMI#*1H-CSf?s6R(6b?g9qq8)Ths|^x7Q{L@&jq76V`%HEE}Ff>tHmD`3|T z4bgtcH$tcFNMF}txk0hA8sQ-#;6;cIqRrp8SK!D+mE{MiKcFYSj2x|0ype90DqZrP z!RxV>!7LQk5%f$-V!m`~V*0|L%TmqC?3F9dwqZY5Ta+h`&VFaOtKr1ujBBz$*Kg!) zGKpH_B><)ArzEeVD;r7pGxtZ1A>|Y9Z3yc>+R^1E#7LP-E-7tL*6Oe!v(ObMlj9>(N8=hHvOmwhIJ~cgBsoieift`a@kUY!5T2w%uCLlMv5aVA541iI)A=fo| z{wT!|6ex~q)2jMt{(kF8*Q@+AiCuX^54Efn`9qRW=dmE{a;iOasXQw7)dS*@A70iq z>Tonl#TPNY>mFJ{pd)Gc(6?cBpf?cSq30#gfpc7S2F#Z|~nuVh|och~AJpq1hNwHXZ4p|c;X4ZMPim$#W zs6@Y?+83BLY7@(Tc-I4pPh2SC*oWP#m?SIUWM)zojV@k1WmkKG@8V%m(&UyL)4$tj$ySGvOl0SzvcB!2)fT)`V>6N((Z}fmSaVC z3a~S|C*@z39oyB!z+H<%*|)s^gr0*jPa!nye?52k*7)mSrau_Q-`7qv0zQH|kSoeV za3B{v_(l_@1xJ<)jRJL_JcX-gASnE7w{R=Y@;5>&n5@%XSd6u^lH>a#^@wPG%7e^g zCLqE=f?EbJr)oi5UD3-Cw`ef1qeB)bYHCCb|8L}N%h}tZxL`Ngu9|P!YsxIFBeoPs zzhyD`idK3L4cy1e5fQ{4mi?1~y)@dO6Mjx{(S{*8107kOx_ElI&c}pf+pf!n5_ay8 zWZYuBf$Ox_uPLxPV-oqPua)hqXQ;`7$EYe}jtQkb&!Oi~CZuGJEk(~)#wTJSZWV9) zHAf+Dz1hEJLSc_FbbOIvdPe^8I$6)!=BBOR#mc<%zo{7iun~j|%^Kggbr%!8THlRS z%&}Fdds%z6juhC>8Wgp*Pvyx2J-W&4iyl({VLReZ(C@gtsM4E|R9;jzF;;E5QT~L? z$AHSM`~1guu1<;<^Rm~?%fBuU|LO%OY9(voE9&qww|eyl&3elk@gjtDVE@2oZ@l4> z;sQGNS&7l)a0Lgk6&$moR;oob&>x;?Mp*c+lzS)gs2RA|*Sf`vub0!|SdR^HwBh4H zPKqJb%1Tzu7NYWV?IECdV++2nC{Vy&DB(C|ulIpY0Mn@tkG&WXlU^>zAkZHq20a6j z_#&zKyt);!6nW1R# zWNfSR(&^q8zb(*glBcaIPjs+RR@h&FwY{yen6H*!Acd0}5yx9%#C=3Sme2(3zn?|H zjN<)`e%uo$ItzL4pZ63oHfB|9;ja0ey|3i}{J4#M@I4ydZGDFPX}Ydj{TKl_Pt6=& zxj}o}Xk3IzWlpzRvb7AGF94y&M#x)#G2NP+{aPul#)yavr(GhX-1+9|l@)dDSeYU& z%Dj7Ya9aHo2++4dpa!AsBtZ84;>kS4OBsrn0*5bNm|g7E&+DhD>LU?zuh0OkU2@n0 zG>-obvG7VZ9e*Zix*$~W#8GBv;=&P(W_q_6WKm^P=otORQt+CSspFmKH->^Uqf1e0 zxq|eV@=KS>uSDyNz3e=h75!AfJJt)7jm`1M}3~i3LEVjV&#; zpqyuz4DxO5Mh_d?LiNZS6mjl1Q2>@f+(AU_?07(d`6oI$2&zD}0Rxw}6v(uo zIE(4(#%U|b?7R#M^MQrL|AgsK7z&PvfHE87Pm1y84dr})UJH=EX&KA3c&}bNu+FEb zq|^&TebVmH1fA=#QjWp{UpL4b<-CL6C{4e1ev-KU5=c;w$-VO-an5~3w(P}kyHBET zPlQNufV3PRBE4W!G~Vx7tz}>plawsz1_B#1E30*+Xwo;@^0hUqvrJ6wil|s-8?)zs7UPH z-QD%O(+2HHTpa-80=WZekY=hx`j-v}a9_rv?Wwo0WEFvx5Wua*2Sq%v*Nm67B$aaz#Z%gDoZ5yIZnz9&^|?f@5dO zHRaJe?FV{+l@7ERC~>>{_*m6EQOH~_a<;R)dGhm2e?f_hvbvSu_vV}46BLIXMYpLh zB-S(%*fLxz!$&v_L@q^9y!A&yHf*!b6q2LBObqk#=H`?=$-i`^_*{gP4Q<7+aPPAa zQmV!SA(YTU-g|K;rDf%{(VM5JB?O|IoJ7Ih342K;BfFgtRmGC5#Xwq&v74D@57j?s zpZJ=u)1k7p-E{p7?M+;FaJh^qQ~#eVYpTBY!?)1UI5A=E^=3v6=5qD)63mMSPQCVI zu2GRq)mmITmFQn@Lb5L|76?qUaR2qX=cs7@;skHLPW}0bSF(pBpm52ev5@PSP|fO( zgtgB`Z}75O$)YfzrAloS-WxLmfI3<)a{82$ci2|R zX&=4vIt_x)7A5EIo?b*|*$?Yc=Tgzq=z<7gzJ7b|C%Ja>Zn!^3(IS z9NMiDrPI_zR?_Kn$9l7gbz|A7v7Cqw9cX`^U;a5ogRRz5<{hM5P~q%IFjZk?i+A_N zD(T+dz$~UQ)P^>=3NZCZ>R}owFJ?$F4p)9 zOO3ih+?e7sETBKGMVVBKl+*+s?o%m@yIq6%JV2C{16AMy}AF$nl+Af*d1scydu#3 znC8}w`}CNA9nT= z{5oV}DCmYU8j@J1_!o007B9I>n9S zZijzZ^KELC`}dS)f6;aB;vC3d)-wX3IV1?JC2-MwbCL30@>ozmR3ci*j+Av-Sy{8+ zoUNd%c~Ul&=Ev6M(?13saFMU97s%7^&-$fFSEPR0!>d?;3dA%X3bVAO`d57m%ducO zNh}8qR0RxZK?7e&T!h2}l*K284v)_w;O;yZKDW@aLA;0yUx((sLzzB|_qEvk0X7gW;td9b(FPdv2N_> z5}@f^qocMdG@vj-B*mft--e7j+m?F|-Dgw#JVxbHtXQtK|>2lva?nt=lavka-^C zqg)OL#Ryi7^|CM@Hyi%iyftTiySNxn`XY+Gg4>|#4!@T@Q2kg&J4d^suVibp zMg%xe5EW+00k;ne$0y4}GRd)7J^J_SW%BKK&? zp(S$G{fuzSHuF!5>g0Pv#R0&?vq6*YLYBg+pOCKTtC5iniC?##b|Y2Uncc11o;cg1 zgZ#VNz|d&rP^_TY3&Oo#$!an6Z)Q-i+NAqq<&MCfIp4$8hNJYh3L;123AWh**#Q%% zgt?&VaxuDk3Rpwi+lZQ49syLDDh`CgoGmZcYy_Am&+QP|UqGF%juZ=D$+6&|_Kfid zgKA}(p`s(tlpW~#&Wen`CXyEXn;cB#&0YDWsy#;0K3nd|v5%&km0B_=E(ExHIZ3&| zyePO`hRb1Ruj}|GIH8!d#^vy9d92XAinsPH+tV@{4$d?ah#ddd1t@P(q z9ryDGA?`q|(jcg4mHQP?3cPwdqUUVK4TZ<^icjNyU#)<2KI)N(QIfWWkv|xD0*)Js z3B;V|ReboYR(`2+!^7HfKW+L{AX)9ExIY=GizsMYP;yZvlq1=?Ty(racpK}QJPFFD zixY9q6Zn4{6=dKZ?eun=0t|_UfqjODhaALZR1d($;$X6M7UX)vA1fhYYt8b-5lYV~ zlYQw``!pBSy~lE|zg65LDj-Uy#d4}T!L8Bu;Wjyk1_Bx^O7@=b%fSl|4~IIiz|`+D zML>t@Pgc3t{6t|Toz5~tf8V8}Z*F5pkn0owL0x4!kqNhjL%q0Z^sv(MEc)_2F)=Y8 zfB(-x!Uw48c9f-yps|PprCK`o5ygxc{z%7G6`S0aIi-IN8A-a!u-s91_H3@{+8yey;tcTN@BTcSS(tDaYRCVU9+=$P!iB`!e zVR5#2bp%6CKge6)Hi`#G%G)hdwV4~gHsjRY{(9h{z(E=Q#4C9W#ANYwK)lIpAg(1? zJc8|DamKpeFf1T`Es=LvYCXtskjb-O3%7c~ATw;=ZwX3ay_QF93;%0+}Jz$$i5h{-Nl7`xhWAeANGYW-C3pG)9g*OQ z`iiPo$S0FfP*4OKWzWkzdPD(Csbq&*B%KX;Mi|DajSX#*9w(@{&akk|JdJ7zx%Is6 zG%B$Hq|x4w0^RxZvIr`%d7SD8Z9e98MT;rBFs14;xA5+>qb&;OD<{S~#NKa$BUr;KFf zM#j}w&DZZzEibYcWF(_AzUUt>!tvJZTV^9u-7B!n_rcd~`O>x&yDJhm6r7qWDyp-?MLZxuC zU3y$J%s2jF_V-UMQobm!DiQ6#MFA^rc|)0%XjXeWV`nbV@Q!K(E5U=UL-Xn8b3V7y zQNRgu7FWc-kOl;ALiAx>+A2KtrRyf#GHM~XX`ikSFca5%#mQTbiQZaG%g-^G~` zD)W_KjVi_}PJ4}b`HKffxcL_h`MV$i8CQ;!N^RSQ-K#7cE!mdZov^&8lymD~ zGO$Jur$%Lmy{C3l4 z{aHN5N0$sc@}G%DJp-_bImT({M{`6UtDfO@{()hnN=B@sazv-wrXGVLi~iEdzC;nje-|4z`w3dt8<=LGK>m<)IZ8 zQsVx^eYQNWH?&{nFvpWHloSM+g)BdN%A8b25#SI7yMP6Wp1bQb|`#UL)OmK&5`(vk_ zYy0(-9vfV_92-Q$o6vQ@S%(3-LBupJ4u4AhXjX*krfL*d7Z^_`x{mAa>wEkclK=z? zTdckjI#O~UC)1^cv!i7w$C+c1>a|a+GvD+idUMk=6bVAbkV^2Pgkw-RO&PMsTr%aG z&NswCRKke^6K|d}gRA7HaqtRBbgQx|h-Dgn}q>ja!Y2=y;(K|73j?TDRK zAheP^6BV6`^F!x=Nu~ZgQ;8y*g~+(jSFhfym$~qmjkt-kTG9c&faq!#qow~46CZntzp(*D zP*9%TnvLwuXSGoOP%pA!g*HCwvi=2|6!kJ%=&YyAZrlfPB_-vO)zZ*gfIz7F?0s$6 zkgcohxI(@5n7e*S)a|D$F*wadE}6`96A@JmIa81U9z;RCX}>tE5y*i^hx5g-%R0X8 z+Azi76BJYn!!Idf?E`^?=zowZYf0J8F+cwsPKB$uE=~NSVJ4TM24q|Q!Tl`b0*_OFFkzN*ZYnzcQ5)1o)#pwd2$#w7LHC^5y5%3o6j`6orbuIKIit zRZ-(xHk+JAEf>YAVx?63akNESnEe-N0zPb1NVcr68L?rbb_wOOR_5#@rOvy2`zFxs z017LNaiMx_p`r6@f{tFT>aB8 z_u3N@02B0>-|tG%ubWZWOuQ{`-n;ym^kvl(TOqmL;cmABbItr>OMaEtJts@}y~?}m zPB%RFX%1z>c0YUeEYH3P2RRI0y2BI&B~Te2z+vZFI$lRfGNc}Lx@|f|g5U_n)}}Nq z*VfzfLJ!>su}FGU9|Sg59+&Ovp=_d}eSBkB^8Iu-3vp=8vluBO+&w3y4Qhs;=c8_h zg-X!b#ZZjQ5wWK!_lswSp@Wb~(+h$SYEy#)R7vXCQ)>>!nS!17CT5~vp$JaS32eiT z)oT}~Np-}_B#m3y)6%Br$SJ?h&K-(5zr0Es;d&VzO@R0W1a#(~Tl}~flamPjMWfS6 zK#L0|zraSlNKyh<5_d7UX5p8d@xv<9_FeuLo%cA%br+#&B4hEOlh!|8@P`ExQ{aaL zZFpN2p!n`lk`g1sde@{|HQ$?+so%-~qYkGKLSkZ1iYvM!;d)W->FMb;tY^QQ+`xtH zH_wj2mS`~i$2Er1`%aR1LN>H%%XNN{*xcSG|A(B&^F!SkE8hKdiEN6um3`$JBSQ#7 zOc%sXvl@R2#S({WlP%Ff5HF~a3}`k&&37`huvEM_cNdj5^iJJ)*O$bQ{sE>@dLMw2 zlj%!I{JJ~x!2Y;I!<myP_yci13E`K*|snOW&^!9|6% z&os6J?yd)`(d1Y4389YX0}e?J>xJ2=3Et@RJZVvzqPJ6Wx28D(Yd||r{F8-*q@4GK zU4a+w4z?7!a>+iT+{^BJV4N6{_eJYwHH)(9d)vY~euK>SiA$4ktYC%uWE0-{S;El1 zL`3s&^9mSk`X2;Juf=KQ?c+AFWQXJ(+7n8^W3Q88jYRTVX6Uuh!qg5iw1XpvU-eVe z_`vPkx9ujw^g!;7hcK zq)M9|^SPNH{gN%pn^`7NN)gBX?mw&6^{!U>@uR)0`^WGTQ;y-Kpn7a^l}mt98V^dL zo8TGmyG6ZzL_HfX>EbfnH)!YYRo~H7qC2?uhjv|G@iv}$p`y717MAt8jjARC!kn9` z1-?*`KGRXi@ZROcc)UKxj~N4~<$*f$H0G+^31-5;Z_kI-MWtO#)je#EcL+Y>(ItuR z?=?FZR*x&i3fz8{z)z8K{Rb7Pb`mn5bbNhnJh1Y9hbr5L(lEoVFoznas5*e{BEob~ z&-hw1;UOYCJ`*jZKW-@MR_`VfLeLlHkGr;d{aqP*ql*yk26yHB5}u_S!433#`c(Q) z)30BzTCzUWNJQj1#oOiNssm~G2?yeX#+B{&_w&8}2-Il(C_ASoQ=Y<7jo zxeYs3xie&K@jJV(xu`Rz7g2qFR=PilG*?6u3l*xKmjeEz8NZYc7zyI`_1y-a_Rw;u<>2wSlgQMEwtUmLiA5x*NUv?^5NY@H>+bal z@AQI99(dAtTUcsbM&O$6=jl}X(qX3V!ap#Jr4QrN!Z0Q+4WoKcvqG%NZoysOy5oBj z3OvX0f{;#;b4b~;+19hq^z7^e)z#I3-OmF9#l26`{1Xx~zSV|)*j;+!gcwY;coGIO zFhLPlHw*L!3Q5;G$F*rRXw!tAj`&EKhYugXh!Y5AwO3)l=H*Lbm|!TJm$`kren#m1 z_2?H56cv3cD{s`UEiap)>dV5yKZanMTJ7qm&MnpZ%iU z|C2rg(j%T}yx+}oC1z%3k$d;>xs3ZJ$f4C9HA)ZD(-KC5g*mO3Dhp`-0Rh!7at!oO z%i89h5|f^_G4?BT);mtj{AI?J#dkG|0xO{ivxZ`Ie9fu~ezIQlg`~r^e|x|GZdL6` zS^=N@d`S*U|9HCO-CPV@T)B59^4~URZt(QUbPI_{OSZ6M&f0#x=+!=4+%del%@RMg zUm{i3qW~A)eHo}IG2Z&Si+cX;NKyw2j3CXwbybxmpEs3cL7e3iFai0NDivN+k#G!h6 z3{zP!vwvZEWyO52!XP0{#ANH{M)dw+;{BKP;8kKYnslHIFO$ z*^yf>$F(@br0}LK{z_J>a&+-?whe?5ADddxA-lD%AEr7PVdP3aIY;a@Ka3rr8pL5f z2>Ssy?gyBn3KMcJ>_34{4~;9URV}6gn+WKX{0GtD;m-jd^10-Jng&B>Fc{me)!+*A zu$($yh}V{e=^&GSu6%|k4Fms0i-pP6uX`VQ5+cb2{_%~+k01M)o^6kps)dO-Wh)A(K8iZT7@(1pk>Mc4v;IMe8B`Nt@Xedih z3Uy9MlqX96tIYlHR>fgyKmi;A7qhbfH#ZrSGE%$Ga04tL1kdugzG^P`-`g8!sQKB4 z$nJYRleDsO|1>8+E0}gW&OKn}fz4_u{C5NF(I&qkK%arpUvLTz(2lLd z)zxz6=v^0~Q>Ps*mD18`EzJ*>epo?7Oa`y2qen*&QOOZgECo5ak1&!u>p>v& zz|b)H?96~=%eNd?c-Rlq?Ge0k3^-%ftw2x&{fl4SIxDND*6e5K{Iy1$iptn1JF~S$ zJi_c)rz6@F)zDo+#BHx?)fRUMkGx>CFilBWnOF6j2NzdgkG^(nKmdHTU-Gc%)Bul^4g@eyTs54~5C3fu1vSNLvJRLT7cb+^B78wXDW zLEOSL(Qpxj@Jcbsz%kr*<$CHZJT5;!zi&K}@3fCm{r>Zw%8v8*T86J{;&3of*GgXw z$KQ*%;Qz~psCT`ekFt-XLhpb65EDUsV4f0okMnc`)(8xkS_4{!=fd29Sv%VHyHPAbXd2s@RVYicY!Os|hkmpJ8MzAMzO2f*^9f~=J(5Hp z+YLyFi7^HJAH@8&6YC}8%8fl5PenFlA9#x_JY?c4JoO`R$Fg#biuqpcT$R^vGvspQ z9BW^-eQ?}^Rzz`rxJ;c<+@EJ&+riPXv%f!LJ3l%3ZX1(p46j*Iy*c%{Q^yL@l0CY+ zo{QcH9H(AguZaJcGWVjRtu3anuP;Sd6&6#sDo~IXQDCFtU+b_h+fr{jUwehAW%Z;% z$dcukPG<|%!#nwKJLIzQcW23hLMhm^rlzK(^Ix3T5?S3dZMaqcIx*$xteF+9q<{E3#YBVpYnX{$~!JA&~2Jg6jP33l}fSz(X@~ zO+m+NZf?|u%IYPYyr*t2r-@<}Nv_o%px9!N<^O*-7a9{>-t=@=M@TyMJS zvS)WL)Q9>X;*u2~p)7Mh>hSwQ4vW%U8ag|kkau|=TP+Hmq+ zCI(Ob{n8eeP=2@j_wUamJB*`=j!7)kC$TcdDvbuD|CD9dbv zY%cW2L;2AKlbR%Nz`_q6PGj9uE32TOcK~Kmg>k9&jzpPtcnk>5${ZO9Nn%0*>-hFxRJi(FMSeJ2a3xQdfP@kEEYp=vt7*}$|^E~f3Ig+VwXxI*k_bQ;S zeHee|c4eeH$Q!cd7aN_Ye&_msBJeDyHy z`OY{TWc+<>KU<*X!xL|ehl(EonTuT=GKg#PYn~z_|6PV*B99m3MYqqP6iZ_W!SLK^|<)J#OSR_c zGt=U(`Py8J5)){VykkMgK?Y{{;(B87{ns9dUGYlv7Y( zsx^Ok=i^g&rW8_sn`~DGB{RkX@W_FOe59%tp#rMJet{;x*h`29@EEK$jvLQ^-*Ik6 z_Obp|TSBc8jC^=O$<~;kQm)aXuQz!$qoxwm)1^4oTXz4*cL7|7xZvq0;Gs?7S0qph zqII7^JWc1++QvpwULNA zFTyEWCT&rM@2$M0W!CKM?61HM5nL0M-!L#R9vR1Qpn^I)_AG_#dJD^(aNA3aJ0FrS>2b!9aS>IVU| zf=WxJyC%A$%(p=+gN)Y>sjs8GJ)}TQMke;fi&NLQx%Hf{1pKe&&ODsTyp7{0OIqGY zP0JY6#EfYYvP`nIFrz54w8>IxDpbgRbR3a3p{R@@OKI#RTOC`)gz1zcbd<4=98Ou! z2`6W~-$yi-uIv5lz23Kfoa^E`J?HoQp8LMP-~0Z34rmk=hx@NE*MJN2f11shq~}wz z*)p5cCRAQ^45$C8VoGf6CQPCOJ5wueBgo1V+-qVIhl9FDtaTUuCE7feqEy{UB6&pF za7yHEQqo1KO)(olw!!fM;w5|H7V!D}h>k_>q^z>Wrlt(@@0VPXdI)k-Qg*iL%Zyo< zkwn(!{do&KC8_u8kgqMEgsi>aso~_<&wL9Cr4WX091LChpiAJTOQOS)oufUXl1OY+ zul7vqJZpK)Au?|N!Gm!I_!?uCPp9V-mIvSIM8BKS`Fx+$Lo1XIUvP{(9ZhT& zW;82?H1WyDGp4t< z=4XQLa`AL_^6lIH)w^`2N6SgocnLBQYIzJa&3KEJFM8)Te<4mH4WI9DbSKI9=hYFq z>QhXNbUHX|xj6LFR84_l;YpeTpJdh5t%(H%mKTNcT$Lt?2`H=2(WtxTmJ)mKn>26mMua|1sUgUbaOS&k`pHr{77# zgABnX-yENGd*_=)Drr+i0;C>ZaFWzV)!gfttTZ)K z5UZ(f_47yJ`%ghKRzBGll1>9RSj%9wjYdER%#7genXGA zjSkR$0yQ*y_Uvphkz&salSoc4e((RH_u)fdz=BeK3|!g?(x!HI#^PKvJt!Pxl+9u? z*Ct=9VdPz0>(@6n3Y@gJH$?%D75&yYIu#M#3V1xPLP3^SG>^yoqJ%plK6U3^Y!C6x zJN|0bHjDdCaXv_Q4q%Sa08Xr}t4rJw`GXyNm7*ACeSJz}V97Bph6}6_aDv}taqn=&!4+A^a`d3$wqQ+Ha3~E!r70Yfpf6Q^GS=o^ z0}D)}snV*zGjr)$3)gZk+D-d$&?ih5Sug3{JyRH5dyXDG`eq$~d=jvz0P|{~KQ;+P zM#NL_8|D!c6B6WH3Zc({n6N*2@;UE_#73Wj(2977nwQJJHHu1XY_ui>pIi!!rs3w{ zk&=_6Hc*E!x8oP3&ED_dRv}s`GYt(4%#m4xgGJq3T@R;=s3qX9q1WyBJZ^*o)JRvC zy_>uHk(`eL_WF$*T`=y#I=mu2%9e!aXlWoLSF6N)M_(O2*xCGpNd(fsxYz>@;?0}w zk%yJPWJUdO^3TNkfi(^!pp?=8&XOEUNMtOH8+fXn07oS#Joqb=o}s3y%0iB08dG^@ zO_kjGS*eMM3|Olw8DGyiCF1n%bkN^`s@ z@j*>?1Nky@aBy(Vb%Ov~5vQgi3Z!Zf*MTIHBM4sK)*#6Z{TD(aG2eoJ*~l_Q~s*TxK^`%X40U*yL7xUjak~?mC zm|Iwo&29WA94VPxK~H(&_hi`rnef;y5ZAt8|s347amBp?WuRCo8w-jIpn=%&)lqXa&u4JtC*_E!s`@>oY{J5 zqBth>pSh(ao&EhZNpSoKyNv32#3;XC2cO-7=We~m+(=7)eyw9YEap^Z9Wf1wW z+h?kw=`LO6Kuu%g^Uu0;Y#Pjr<{4sWN-!oa2_I|s?k6+eV!A$SUh6(poe)-_kmJ1* zpN+zV6Q!fsoA3)d4!-)UU~y3}xq!mj6r@{5$=oHAvgqTQHxCEgqda8xk)nOOyH9R2 z#Xfu}gVirf&d=X}G|KrWM#&tZ1YcucHq9P&6mGNN#+Qw?wRbQ+ccQ6CJ6G;k`AK;w zAZdI*u&Qof-Q>+}}y3^x+=O4w}jSlKhwLjg-BiSUDVi{@X5g?w?23_Qk6M$RK}dgYQnG6-R@3R71}v&GLaM7@bUn#b63vgQDXC0%2 z>PGC5!rudziKr*{zRGb3m#gp|ZV$)*e9YCu!y8TIMo_g23>6D&@-e98FKyaPpeTDn z4Hf2+m-jvrJs^a?NrE|rObo2yZkQeE1^ot)f(op-fXA2*RRd0y-|bOcZI^I!DaUDx z1MoT$>umKHCF*xj8F0{b>_QT#v^11mL>uXU%0x?x-~77(ViytL5s<&Wxk~uyfEG}$h9KSUqEIM!DAPdObQtb$rvbC*C;i9j z+E=fVTs6`F$kJrY7e$lm_pHzE#-Yy%mT7{TyN=OV+giw=(viK zzl%c;XKC8r%q>_O9-u~XJ*PV~X)WizBAA!L+Szr7X8od%X(`1rXVDLpXPBq9y};B^ z9LCoyKn$(IR`|ZmnNwqCEUXn6)^@BO=L;+w8X9Og$nP)RzuGz5N)(eTzbX3k1p+Nk zLp8UUQ7(Q2o^mw=JqOn!qO zIa2q#yrEZN|1RV(Y(v(!J(Zc_jHcYats4qYd5F$MYX`0fA?G$m=>Cgdl?$mkLt%

    !S}@ejVG`ghJHhY{Ri*GazWU2jU7l}{W4Dd@ zDFB}@V>etzJcC!Bj3V;sjkVP{FR4qWfwmPe^B5f-OXgX$ zwk9i1*dB;9UfYV%#Nv^5MMO={H)>arW6lBHD$&dGQ#(D>t7|>q3q*c4*7Y*qLAH3X zu*a^eCW(gr1Gim~y3eto@0>K56}R{AvtJ)a8mxJ7#}JlyBB-)m7^T{+tLu+ecS%#B zkPSlI3xvBI#2ekEkE1(;Rg(``x;#^RUFe>OlzBz^1^{BsBVh zCyJKOC8J4};y=<;?*&|M<(>}jWGq~9Re}yT$&A+|lgWCa0AZxTsbm3-$OX2g zbFhI8Kq97-FSn^%;IfIhM3>DFEg6*Z~vi7^V9j$G;Y&AS;l}DBhZc@?m3RI4V*nBKVLfOgAP_1tONR+#L>Z{5$hlJOX7McaOv5+OS z+k2!1NShB}EylVT#D3cjLxJS0)Lg4XYCS78)m)jbJDgS Ljr7vD9Ps-$MAlF* literal 0 HcmV?d00001 diff --git a/docs/stable/_images/Softshrink.png b/docs/stable/_images/Softshrink.png new file mode 100644 index 0000000000000000000000000000000000000000..133ca0ecb3d5789d7d496923c76df3ae552f6faa GIT binary patch literal 29832 zcmd?RWmr{v^ewyqX%vudK?#u*q{ARoL_{g+knZjfP>~R^04W6(rIGHCQc6U+8>ErW zcP{iCJ@@|K5AT=v!@c`?&U4s%yVhRo7jup==9uf@jqCD931|rr1UY);vdm2c!Sq8A zjA1++_~cw$|2X{bh~vd8DtPen!n+d$@A2&}-*!Y0VgvL)jQ7&X=J272ldOi*En5>O zR|5xQ#M;2g&dS!w%G~gTi?M^Fxvh-=4?hn-*9kKxCp$4--ap^-*gBZi zip&KSx7fJ>cMTQe1L>uW=m-pBGHexcPeB|3Uzwz!l> zG&b6CXwi{)N?0cYx?81o z(ym*-S7P#7!hjF6jp}K=`kX&^(!@#*{s`!#AuT#d=_lj-_~pxB`>{G^78Xn?DJipo z;&V3TTPA7RMIY5#H|7h5Mn5FSU##%i%W|35JE9dgE^AJ1wumnd z2M|krz0-I0;K*Sggt&^nygrTb~?=aj+OXsgD(-CaJL{z6R#dfcP5fxEk2b3+vsb>ZoW zDV*@qKO3#j_kGTL3~rL&ZAss%Hy3}bE-clVF80EUqz;RFwT5>K3WUjJKSWyB*VLR4 z5h?7womPBDOa0%gqcw-sq`tP&$a|8Hk7Q?k{!;rp#d#msAKyP{N>0Ibel4^&(xDH< zcvV{3@A7qRdAY4vbgj-zu-Z(};-fWpPDh{HQ~Ql&`8U|*&skF5hK3T!$jhsr*+)(Y z3VvUh`ts#!>E>w2^w-x|rDbJp89L%kF~Y+R`EN=bLt|VRS*edw|K1{mul#S!`X?r(fwT`lT>K?lCO(p_zVYK%uE6>jj zmT491%F1F-wkCw@mU%G4&6k&#Q}gK~KHD?N7PBcS!3BdJgagIS0pa1q7cXAqGpxbe zU9M-!_c9jM^&lR8ubc$yZYE2yOV3x*V`IU3$9=vxkNWH#yo;AEjm*!7W$2U`uKnn6 z2{-S`U^qrE;+NpN-;+OoApi&C@z7Adc{|(cj}F2xI$=3GJHCbT9h+DY`)ZrGorMx& zA0HnP`_UtE;fzP6rKJM{1NB>CMYA;1ziFqg*a;3kVh}gksYt5l(#Wb@|LKoS_=GCKeXIb+6#F=g(C=ck7vaL(9E)5@9g#2nedfga?CcEB4e1V95)> zlFuV2CA}+su=_KGCtYfDI3P;eSIT33?xFCY8x|Jsv6?5W3U*yOu05Sw(@Eic`jv>A zr{@?pH<6H3jGnVp@jk#9vK=DHA?oZv$%?lSkjvy)2Ci?px6wmw$W5jNInV?0C3 z=bvh#zWWk3CG$csXLLip2a-00BRDg~tgNhVi)9;pW?xT~ZvRNt$kNBSo*)%A^6A0J z)2Bb%4B#@lA>mTn9MzvbE&oW(9E~=OzY8L zs=|%pGr$a$JNbg^w*Lc054;#*Te*Y%J!KV@I|UgX7PU(wpV~^@9ZO+?s735fyx`Wl z57VT+y{qajLTj7HZ%}>Y*^3v3pP#UzCxR)!;?gesY7=*lkZLsEbK!1eBo!sMW+1#} zk&vL5-rr2J5nyq!^bfL_=(DY$kV{gg4wJLrTk8r*j`z6g^|M(xF)^{)rtHm|QyAwZ zU`AaUr<$TqB0)^PyKVVau&=ATx(a&py}Z3^zkj%u%1ul}L@r>`RA?oRdyJj~2_lx> z{hHroB@_ae8VrH5_r>07yAq~fP-rN*v9YmBtMXb&g3tDc&ejrG!^)bPoH%fte?Wk& zq2Z`;A9oytJP6-kONS^n(>#Zrua7hK*#KwXu{;&9x;xnC?hb z&$HI~(&pCK)I^Td^yXQnrW2npU8*L`PtbE4!N#bx(Zge|iQ+es`8nBaEuEa4Oc+pg zqArx?WSUy)Krz)K(_u7s!+HGv1)3D*dU9}KU5Gz<`ZU{V<|Yx76n(l@L0!IepVh4Q zT8BE~2RpBSVHs}d2%-;xHO~eOt;;ZJ%AeB03?W2gewyAxvieI4K`0mo!iGCMAV~i%Z7Xm=%pXuF*Tv z2V2CD)d+b@Z_JBY2z)qs;sjFA??5PI(~oE0CQ)>hPH4DzHsexXzO{8DPfJQ+AvOFu zx*Q)dN(PUPj?&uhgiG&NLb&Ji-gYY48MG-|Q*&DA?-_A(ACHiq^I_!E_xt31Lyz9N z;?*kz_8#H~cY9G)wJAZmqW9FJ-Z>`qrl8BV8p!dE3kg!*S?)hgU>D<59PEovw#0>e z`*t&wPWTu^ZG=Z#NRXOnx;r~*z(_0K>ft-Z7?v|{&kMr$10MK}H)srS58R5hx7xcZit z;QT~5Sj8^$Kc3AO8Oq6c01|7!zs5$ky*d+68$x9S!7xjMt2@{H>3|qyP!=+_Yn5$D z^4Sg(R|D_e8-av>$;rtn&vWUHfcCurrMtzwIlkw-2gMf(u`c43u6#LjLv<}B{$MOz zx(*T}EC;b+y@PG4^8IZ~uZ;m$NIDIw((={=E`8Q!-`?P((~fi(+Mk50staeb)nY$+ z5=(f%iEQQP&up(P2dklSh9U0_21d_0e&qdz5ax>a4Xuy%9!2zgpZEj)>1j zPC0x`p-C(%DnfX!Q7Wra%*@QV%*IK$AD5Mt1)y0t>ZqIHX4R2Gfrdy}4X}~Pnq!3P z8+j|VA25CohdUhG+Zd`ZcAn)kstZl^XfjRkz7M@d52XReWRN|AebE{s*#D|EJ@|cBujmw6Ok-WHeBjs<;EU&FOi{vbSQwRm@fk#M~ zv*Si1S+p_enQd500LuwSNqTRUqAOF((b2x8VsAZW{-f1%2$>i0l&vRc-iA9r8=HXD zMJg&Pisq`vv9a3oE15~_6jW4302VspXDtc_N?dIN@836UetFJi$G-j%gNWPGuxeOj zF%X&afDC-LCa6j#7E6C#Pfu`Moo?f!Cep|?Q_jmt%HOK zKrKz(?87G{c-_9&6Pb4A2`<PrCP6I~YvHzy+X z?8X}+G^O=mVwUG}T1RWC^lJPtuwd}f!mvPxUp*r0;l!6GE2R{FC=+$Eer z#O_tlqemI*u`pE>Koav4c=romD14T}dB@Yb^Zk|*K4RFBqS_Ik6r7S`xI9`5{O}?G zxe!K4k09QP-H*LOfrU#E{F!CzFVRbe(PcSxN~YhTk$bdN`XD!bj6STb^+zSs6H|xrXbAzN;3oAo!ersiAr3R**b9ZkK8|kZ$6ZxWs$1G&k zeZpgPni;^NY>r8D&9l?DYL>_9=Q|_9B2&Umi;nkxR0-f+7e57E9nUFpJw3n&N}EmZ+o zgrR{8`xuXyxT}~3PNqWG)6)a{BCNCd>k-0AF_;xxI|Bs!$Pu$%%G8eFXV1u;76%8` zvDch1aw3d`9Gc`F^)&F)iAoyZx_Q$dl3bsCKUp@C7Ld|dj)8$u%z*&eBOxJqwMD34 z8x=&Mgd+gGW2fvDE(}z_4!V)uv8n_nv~I-oUFGR|8s!eFT$CT0$cL# z-Mbt-Je|A8`QShu2F40^oUhh!r-1CwtE#5QB#QrYRliSCo`pD(!Hq{o2CzG&{&m&p z_pA5*xA7~+52*5c|DK~I!@=+H?*6@)HYy*I+?X5WwMM3Ab@2b(%;5+;jrLWWWixwZ zTo?ni=JM66{v8$_#ixru{e2_XiF@#vvkZoYs0&QOO{~8X#$!00+zt5e-Cl3BYI%8P zx&MWVmExDX79;X)OGM}-7v<%~9`7%Apvq^Ps5HC5=V>ON9`i;t9s|`z3Qh%9vRCYd z$?Cm(op@6=^*mEr?02j#LCZiCBG0IS!rLOXB+DaMy<%J_EX-yuX-&C!T35=^FDVKO82MLD$#G@8QJ zp<&_L;SX#=?<~9G|GJ+@b+1AHWNC$}F5DFa`Ba==X*p&{+FheC_F{(YP#tnGztSTc zq!;NoX4>+p@xA_F15e@NeHV#YSDHhasDTxqc_~vT+ReD0StSM2I_6we%4f;1Ws0}b zW8Qf(N)m3?JS1}iW(*Pd7~~l=vZCxA0I0WWcvl}6OKJx}QrmlH0jGz5uY*{DoYS1C zm9dJSH}{86Bb5y{+{<6O7x0#fR#^4Q7%K9HvrUoKSkVBVy={Y~;i~RzBU~WckK^MX zy?S*bJx1-!MLZQ1m8B$Hdi_6J$mHZylhu9`XHApy!-ms54?aX-w1)o(F3GfRY_hC0 zGR3F>MhH8#9$yeS%xICpL#ZE(KJgv9KIq1|EpXRKS*^Pv+)XqDtNr^g2>+JTrL`sQTO3#ce#_QG( z*7g)CB*bocq-=XD?l`?`@@E%fGn$HMNBsJXv_{^x@#C3UrKl!I#MM*!$%ix6q6u+x zEZhhnZFhk!lS%VS?5Cdt0q=`>H37xXvKy(kXoR>lJT--j;2k?ACnIwNKypMvf~{7v zYN{H5AAq^{)9sG<0X;r~i#rC18ckQ+;laTd&!0zb-nuofdkqr&z0Y?p-n{ulhYnyL zPEJoK@X)?JpsA$x_Vys?$a;7bTk9|}F@Z{y1XSYo?c4bi5dMPTG&VLo4vvo2jd?$A zX=~F1gn9hpg^8A0jFhaL9E-H{5YMlHxdmk4tbhP>V3zUN4{wJ~A5LE0dH`=$d-e|w z$0|0gWcBmyotaUq{zpmI zUu4g7Z#!)+8Mujs#~m>QJ_eu+g+)}9hM%85|Mv$#iZ5TkE>~bPFgK`D zC~=@9{BJgyEuB_DY6` zlf{f)vJ?dc1v3XnLlXO4H!BQkkZ?eppaF;(Z{717`;5SbRbQizl|^%v^!3P>%iZ@+ z&27(CN0((w_6^szwV4^F?9?hSR!F)XzYLmYN*tz?!#Ey7O7?htLR^m<{A={-P13*&P`FiE`>s!&E6W_6JC>ZFSj-=vdU9XlNEuWt_H0=wLdWKVCA`3sr9#PqZ z$QhAItCUE=7~vsFa$(gV+U45d078)8nP@PV$Y;jKp5h#~JZrcdH()2PZJ8c3(J4`k zII#yOE~@_a+AXCs=on{obH7%Ab9#Ro6OumA?K{TC_LI?A|A|^Es|9-(hQ@C|(i4m<2q`0!T#Mb|mrWn0Eas{dM7S8?D9}zb)5M;WS zTX|SH_|Z{-yRtNHdYVJZ_JW_%NiGJe5K?~bJewEtMi|p<!o zoPougy;Us63Q_zTYdq%1PoI8@Ha*5iC;IQYC>l=vuH9ces{V$aSFW8+*KKidFiQ9% z;fm6PyO)qh73Nn(2(116+AR`}e_;;^$ry(aidNf!@R@{DufA(P)Fr*pEoF(^cd;H> z?wLNih$4VQvBoCp2b<3LB9CujB5rr|aV7ZYXx(VsGp>XmVn*dSD}|P-{)0Ee+V?1$ zy%aU1m?;Cq?pX|Q6%(N|p-c`j(D6SrF=gKKOe;-eQ#U|$${WxHddVXsb{OSJkS zKO@;V$u$;R8~1QK#{bB?WlCDXgYjVH>T8bkyTgy7yl;`y7FDw9{Th#@*$00Cx|UlX zdt&YA3o?7}U2=bmxBjzEenT&W{l0Ut)$-Koo(z61l6+N`D41p!dJ1U=u~3Mt?lgYqw29Xfztvl&1~>eAgmnTonHdZO zm*S<=YYtb%n?RE8)q4A;p?szZN{ZL}gKo1KjD_Ww9MfauoGW1xAi?EYbP(WRW&Y3$ z{u3+uxnmqBuSyg+OXr`8M-ZYQf;u{~F_O6{ov|s00xN9>Q;aSQQ$T7FC083yU)U`^ zLf|DZ)w_8^*4XmJj+Uj@V)s3 z-b~8hiAfivnvU80Vh!sF9Ce}2{{l5Rr|N*clLMnqcvb7sfoTW;#55AmRa&}tf-{X1 z#bwW*dcHF41Jok77KHsO2ADd8gW)+&+&^)orQ_piDch>{gtx=9Snh~lOi6y_>>*Zb zoauAOv+DmdEU8z&@LWWPMcR>3R}S0JcE;a)ulIs-|G^X9@S#}jIUyC*S|G9_C13uE zBaZj10!iE!yOO9YOoSgLh=wnjge~`+6dvB+pP$X!gs&y=c`-`xnU8#FY!v;^q3xbR zvc?oqIj6CQm?ZEK>Yj8~U79`y45^lianI9breo%Z1LN)|okPIysKeqA+-Pu?c4_xL zPvK56UU=cSLI;ROu-%PPIF}YO<(GSf9~h`&_nx!$SAFWq^3M5Hr{c+@*+4LG0x*=7 zm1A`#kUzX&hC9YT$3!CgG>tfM0uIv8NO!;1)g>Uo^%K+n+KaH{Um<9@JNlY>^&d#> z@nv$O>0*<1g5OhC6F1?r`c?C7g{ew~CXrri&yLztX7&BQ@e7XzImoViQIARn2Lj+= zC%om_spUPs54Kv0iyQ5igjRjsaK45u$+G^nuQW{@i@&3LYNTt`3#p8g)EcW5Hylo_ zV%3uuv01&ubkZND{KuUQy{)J+X8dG@X0titnndwL{&LsC&SsW4LzI|~SEr>^1~Yxj z9<6u6!_aQ8Cbm^tc=(=jUsE{DDO3+z9oJ)NxZPA*hEUM{^F^JftZ#h=4xtiSA}S(=gju?<_@Izg0t{WAjIq`27+VzzX|TwvHXAoa zzXmqPY# zmA+p}ip!z{=)O?%U0zX0ZftykMK!3SQ`~PRcoENF!#~x+NHJ2_DS)ru|;X?L-3ZX_oshu?GE3e}{SmY0iC>bY)zJVHptla?>R%8CtDO~h|_ zSRc~P32|{cs6ga9Yvx(p%gW-rbLUPsfjj~ek4m1$7$t&MRvh|PKv+0(;9|!ad1ZhdBs2)It;Z1UKUnl767o3~~2?z)tKYwnl z#YjSe>J#IWlPto*UHRInQnj_UuM-n7p%jsCoHujp&Ye>L#^yiT1JAOQ#>U2G1{wPz zo|aI?AiL>&GF%6}s6$OBlk4!)3m6o8blH-~b_W%5S1`Q< zcVC|G0GV(y7h3oVSI!<%>kk@~mXLZodroaIs~%IGyypQ9vTD^wZmSq7tI=LpJPkDS zX|3S}3yTzPI_@1H+JV5j!{d*PW@GD{n|~N6Aw;5ttZDzL(gK}P_d14Ry2N6U7rw^? zI$4L$x7^E7Atny3i8Y8DHeTJ-TAif`A7b>v?;aYe(2SUxoei06j?q(HP8L2C)T)Pu zhFpf|bpAq-=RTWgIUJj`zFvFVk@e-)%PgylTsW=-)~{X&Nj#$S9?>Eaex1Vkt-amd zDD^Ms>v|3aTu~*W6gi#jqiIFQz5C96QqRFDbY8}_>z@v*2fgUdR7;$u@ZF%GevP`B zoT%k1n8zlbUsU$ntYYBbdB-6*%Bp9ff2e@aR=%hib8~6*d8VpCI%ir9V1@4lr_8KL z!~?@a^ov%H7gO#2f|~Tkj^)+CTM3Kx@1(=Akc@DvM+FPPm&__VypKee3>UUhg~wsf z5glT_NXP#fXl3DjI}X|W$f!^tNkeNZ?(Ei&Kfx=5lMZSh%i&i2HD7aggp+H;&F3%9*icPruTtNuj8^D2BUO^klC;(}ep z?k`8-+71EW_$k1k%ilM!8&2m{ze1FVW&;ng7o$gK_-Y6$_bD^rGiU8@SaI!+B2CXZ zHF4aGYTv|9_mxLOHg`aA&JA*GAn^am=G|ww3-kwH>}|4$;v!!Q!gvRxJsAA=BrRVo zK8L9x{0IBF=kKuxqe!@(;LF#BSHr*ymcII3)?Mik{hJy54d0sY5lQts61kS=(4hwv zn;TiVZF=mE)mDiRQmy0!@<7 z3_20sI_VGubW>I9+^9ghStZ<2pefm2nk&%B!+T@n4Ej;wwnuyW@>^1iwY zePAso$zU44n&OX#-AnHveZN=J02xnKnS`Fc$@TTWpi_c7Ch;RdQ_nO1MdJ7)NdNKn z`tFb~HGnJEsW;CRLeM2TvE#BRix=8w$ zQvC`9BtB|4)q{W4HrGbBgEX3O>&w-dGh2wCxMyx%NRrX;tj^C%Z%T~Et~JT&i4YbZ z6A1yFoj2doP(P>i)UNnD5fd*Ks(;rFC4hy8isx zSliJn-EqnY67{v@DCHnyKpmS=TtZ(*&Icv9Fcbtw)Nh{K6J+dr9Jrf9VP<%Cbb_Cs zx;+VM0W21FXgFz@SJhN4-SQS29NeM`M8pbK>p@R#$T|q^e3UV(0f)TqwFIM45WX)_Z%sIV9BYPG=!rqB*{0RK6LH!_?3Xa6Bu9 zb_x(b=J9AJ%c7{Ko!k zshr^C4F3D4xnt(4QRzU&Vf~&$Iq|A(I4I`#KPZ1HP_pfr#?NV^`o+e><32mF{-cUW z8vk6|*Ou72O>njG3~6^aVP<%EZL^M+Stp~Uags1PhxbeHp+*DRq(-{h-(?WVJ4P9~ zqxV`j#v;PWZfNR0jJJ+4+)3MiN$Kk+^|9>?%zy;ewA^OmlYeR>=F!dByN*G634VL* zSHWrePbU0cFM9M5?9wEr!}Vb775yZ@bpL5^&g9i2=a#Ei@a?JlMO9`^NPk6@H=Yxe zXDGd+?>krQ9YvIZO@-R`kOpK zG2*j`H>H5omCDzpH#rFSjBdzFhzSyeIXQgZFr*iD@c1I9RBxNUM06Odp%(d*>N(>- z5rp<^4EMwO<+&Go9I07}MVa`vgA@-il$qDc2YN>fp#-s=SW%#)awb;iUm|1~WYwas zvHacivzyhWvtKyV3Z*`ZLUwsIJ4!rUSVVs~NWcqgN|ZpM+1 zqmt~C*vP@rIV2!NcV(h5qFq7AL)^!*Dv7XCAx_Gh7OrS>YbwEhEKFF}-dpnM2}#Aw zF1p|HRcy{j(u>3G-aQ&w^S{#Orhe@kMl@}bY4v4^*o?0}3yX(Z(ekuHWh5sL4-O(| zIqBi+1!A72CasF<-_RKK)!H8nZzPj=o%8flpz=lPn)TN`CI^Z!P{9;F)V6E#z)oF7yBfSXvSk) zyK)5xPVT_be9eka-#*b0{(mhxKw&|}p&Zk(V6Ohr9y+@w6Ok-N$S>-HDWm(NF`>&s|HJq1-_uIQ z3r8F$)w%89LOR;TexhVaqBN8sp0SMv!gl7C46>pv;p4e^>M_cSQ7aR$mf^+4@X}HV zKyD+uTR%00)9V6B@)fx5g#Tv8u6a-bUfR#N7OIs)*K=>GjqN?ZkfZ|^BR$DXJ_>;8 zAp?Z-AnIV-M9p%vf_Ukgc^~)svJ3(`6;t_DRU_i#Tj;bH{wW^nD54tIX1%Ft+L*cP# zcnU#j!7RrMA8+dCDd%N=vO!fq34ql1VsjZM{9oBg$E&#b-+)VRe9J8Z(@}_Zto~9m zf+Q6eR6h|fsT!(RSQoT;I0i0(=@9u=%#1TA&%#`1I8&3h_OOBbu*ykj6H@o4d_>~w zQ=8?@`4K=r4@oeANS9`|{i6YDs|u;51{e5l6S|g_P!-iq1QD&*P6=!qtmIis=5LN2 zhVO8bqbTkYm zVY`i%V8)(S9~m>QOzbn_D|e*}mzhTgBoHdsG!C*=IJum8kg}6Ov$pSK|*Q!FivXqOm@2(EoGa zwn19!ZVUHbHg~yn-KyFAa!xVKrf&lU| z9%8I|nIM25Z3Y+`=Ze^Sad#~G!89cFYn7lC1()Z)2(G)PkSL0|yjk{}#ITUUjcvRp zx*lvP-8zd|4^5$SRj8;}ZbxayU`GjNhk(WQ>xM0+pE)>lDu+r)bs4%CPL||-E50qW zcK?@zz8lQ&4`YZ4zDmIhNCQ`T7faTd(#e)AXg&u=JA3nIpq0g8weP=#p~zFH=bkb3 zfwbq+V8%71a&ND9EJxnveBY#E#-;{v2_BTO{vwp)2IYeZ4OB@CoySqyj?pKJZ?z?3whx6@VCbh< z90at;lz zFC3sTOIcg3q#yK%aBy(E&o6!d9uNMe&23nN!V5@gcK9@;Xz+iGbl!Magpk*B6*BNSslT7!R7bKE1P{7y{DA);-0{;_9O zUTD(S2btnl3*7V^OMhrz_}XYMZ{Ybyb2AhxO^3Y09!7o62M3_Ml2B8d?etjWje__8 ze2L4vpjq@RRC#fU8D%1QbT$`$96!WYz&)m#p1bY;-0P0D8wqnbcvR(PWJrH;w`|tA z9uzD4^)CEC%{GyXRvA&7PyH3RuEZ@oiuQ&C@23vm?Mf~n|1?q1FY6xzm z4k?t&jQcn#BRrb9(;q7mquxFms_k`RmyZSR#&I!%AYz{kg@&6``<)bW>5 z844uX+$^YA9*s1A|6YZZaykGlHmS=9K(T53quXxh=P87ZjjeaDufkUvOceI*iozq= z^^EPs?mRHrEYK1$Qa|G+cDSEbA1hud!d{*|zP#JH2dCrDRp7{PrG4eToczFQ^{|Eq z3x!o*9upH2oFNzc!B<>374MmA^GIPhnH2C;+l=?~!wLz>7(#233F; zN6@v&Vibb4wj|*mNkemUE!tzEWm6=Zq((}5E^YKIVf-!^&Ct8JBM4Er&E2EjI&3Yz&f&V67n&ciQV2{>16|I4OUK5a<3REs)k;^NYs1+r4U~O!|P<2-G;7b#^i6g(DB`X64g~NLpH2&CgcpaA;Zr zo9Z!e^Ip=|KLMQ~q8eP3=`r#$<4(TxXKETKDn%v#%nJHKj;!xLKg%bI7ey0^+`fU7xMg! zdZO)6`C7gt%Zm1vX){!P&iaTt&?0h|+oETgjQOsfKie2DOn7g>yubv3ktK-Y&yZT+k@Wd!uNwV8b{ zrYSz$>fko(@yta~IauWbjg&49SFt3{m}h}~58bG!4P?w%-H&a3%UXoRg}8iDQjEOi zn=D|gwT|wwssq=0=;r38N8bWipO?R0A~xvFHJ|RyI|B}&05-*!!{C+9EU@mRVm<=S zq*nnRx~_*pt)rTdnyP*+@l4ciYnh0ug2-P3&2z3J~l(uX%`p(Y6!NDuF|iB>Ji$k@BsCf zxyV|8V&zA;XqIAf?sPC_Koz2@I*(HXm7he8bGkk=R;PO$5D~j!q}X-QW@F(CTK2(E zRkdzR>D6eTI6A;CZ=I=_5J=MTyWsI5B^YJKLPDJ{pcXWf3^2ynfhUcsKwkM3(m^co zWFk26JOw7$^35p8@0|HgE_)a83+c-AIQDs^Y1i?xl+OrVSlzl+*u4VRdkNv6-`2-> zSjbvfa6(s!z98+EN3of^vGH4-e~S`^kzpte1iLFt=3##kHz6@`W|#Hk)YK=7yhPa$ zN|#2hT(i62))W2z!<=>(N@FZMJjdR?eG58bLHg|2SitnO896!mi8E&!^Ch5{28z`X zjsO~FSQn{zLnylhorB-LeY0w`XitWQhViMXzTXuYs5@OWF<}E5W2K#^M44Lo;+p=Y z(JLaVF1m3E)qU@WnIy6J-p5g3vpbGV;TSD8M4Zf@8lL@Ju0&-z#^dc!=W|t2G0)iO zY_#BfJ1q5)8OTPjC>SWaVptYup7cpBAF%26qtVKmN z2>2cAMgM&E>`jP(-ye49Y>#8t@R+%~)Lr1EP~O4EpHH3~4`r03MVr^!3ux%+iNMPL zlzqOH63p<&xwZ10HmPlGZNZSPuBChKF|V3)Wub~6*83oY!`t{;H)bbMlOs0?Ttg`V%;IT&&s+dg0D zE#-Iqp|ZL#K!^Sg+H44ohG-C4%WvLh2|RCZej5Dr){#lmnBad+=>g0WLX&W6_=fK3 zztAU5+(yQrYK}SHZ&eGLnUK(skbp!EDAQcJPxN7Z2O2s0T3R|DJS1ZuU7cx1J3i6S z4K>uviv8l=8WQzaht)Rg4YjWa0Dm*0nmcRAq6%0kj49LvH_nZO0U}&}Ns??^PmyEJ z@z)Qfz4WH0p?L^Rnhea$2OIMJcg`{k3g!;tUV)yhGM~;9q@wj(1ZOC%#Ql!?-hql) zh}C@x*R}IzzksfTxd@fQLLOP=;jyt#JDjLVDC2SSJL!-72S2eyY_kuQMa9rhM;QXZ z=g^>(!A1Dj+s1iOEkz6P<_#SE#|vOOw;w0zN%?@732g(-_Sy4DkC{4tSlr9p zS5>dE;<=RBL;vMW)qJXpfnzm3HMu?%)0al}yy-R9?H*}kSJpT(Wxl~`}zy7k#o{ZFG;u(NJ1ukO2-vs#;qVW7g+b1 zY~+w&Lid+Oo(0AB+HA6NYS8=FbJEJ8sBKQ|1`b%Yo_-P^VZvge+f8F4jYtNJAmXuV zR+2y=d*SkBy!%8n{?OxY{++Wck%IuB4_9QnicGN&Z&Wr$cy5Fw{l8w*`@I?-RN4_LQW`^Ut4Yp<8@O^~b$TKl)x)a*+0A z+u7K{w4e>-QN1n0HO$kToYf#OLIo}qI#|&*d(_YDbe7X?p_6BGs;taeb=Mx191bb-s%*PLB_zN?XyHW(6Y3MuDR#m@ z-n@M~0^R{> zm*xldBpH>5Z7UaNO{S0MM5MM_wZx`YlEpk(9(#QY@zW|Ps6GRhuZnQX*tJ8Q3JAm@ z-Hh~u?eqw`2Pwh#+~H#?6tfJ;&!{Gdls30%ZRv~)EOBjbm-&g zBUu1kVc|8@4)9B@L4^Xrgd^X;-VHm;0J?k8X1PET=(#(AwyHy0q1S4gyrxi_v5NQS z{5h)>c36!qEvQxOAolLW5W`(n3RF--i+Sgo)}%~Begixx^zcITvhK~TikI|6yJ(x@ z#Jkpe&IczB152(;QB5UbWXv!odpONGk#KHoIqdW1M8iE|Cng4}`;m1{0SN^><#u3z z;Yr-mn*=HYES*8bagqpZ4;To8xXXB%uME$f#Wu_)&4i#^Z%sDcXtJi)sY4eB?hAX8 zkvg%3X4GP@rQ(rPN=PP0wjfr!wRI5bNdmITdqJ#yvs&5&?Md%9v+fRke6xTg{u8 zjps5l$CfwK0~RsarXVIkXXr1jE!A2Gq&e_M=|KI|uTEDE4J%!B^~I{o^}iVr%JMh1 zEayjn%bz|y5^06%e-TMXNxw1bQ1ZQi_sUVhH|=FV8N;Q$A4PAE{sW@XF@|E= zbjmnIq)}(`!%fK?u$z6b711r^rj^p&z3IxrqK;OjGt8z&P`BAxf>zbat~)iO;w2-G z3OhB0p`L8<5Q?www}Tm4GExAV>zt+wQLSBBe5AjIy1KhiSNBz=T;g@>1``Yazb5y0 z;dOy4d0N6GL#Za=Hn_Vtw5jfz+Rj&_YQT!y?j)Cf0XS9QVGotREDN`Vr0FFdMx61f zg?~Q0IDof!ZRR1Be7&2>f5t->)8$vw)h|(M;hPmFrz+RpHpyV$b9`iakc}jjZA}LB zwp^=sY*0J;?GQPvyCMjPb5MQ^8lPkNrOnM~iB*+EOvSF9WLY7H@xmLT7kOwZl$`jc z^f-o8*wXf<22qI32egd#J|6A&f>O}GcFdgjmF0 z-Kq54Sgk3I(9zh{2}H+z_h5ZNk|#y%fN68KsS+XJz~kg}P;NRy6nxWy?nTV)`HYf` z=$9qM6DA>%U-GR=-s=e7627%zSs-uSH_7VUe6^%-TVSs^nj#dF#h*&$Jh{sGX9gxM zTYDp7dUv-t8X80$TV}W1R^4hI(75^Pn%WKi6rB<{S!Y!f6_#&@eM5dzp0&=3C)K`i z>B}V6=2ZL`;)lU+y=-P5N1VOhyK$Hr(G5 zhuiyE({tf&A|j%|Q&&Sa#y9q1={L^H$NBbJjx?2?!7yn* zn9%=jMH_H{u2x-zOq zkTQqVNXQWVQ8Pky@5?Y>V{!~|*oRlr8Knmi;om4a3&q{zLVhx0_Dd?P z-=teAM+7?fu0ToEb6Dw9v9JcLO_sCcr-vcRjf^~Cl7oI9e}Dh4=~}|*(?as{@+>0d zzTXK>)NdXf+w2G)5b=6P%Wx4Z%i>FVlB`Lz5T3O}_|qTQWdP zX4F|@0`%D)(04DqJ!NK171221-(#g#LZtZ3Qiy8qx(+wIS?SUxeN(F*AAQ9RdAJ%dzddvT%MHmZ5TD?-&g`NsGkJPT`>uKF zS^L~HT9c=N$qVJ!;rm>S@7U6|XZ}9ZX*zB=)4eWaUmYkTJc}gLovviySjoV`e(}eZ z6?V=-ZJvz`wkuEIQ9dUc&t@#7E3%QDx?-{7wl{Dy(H?2|{P_Yr-y%+3-6V$wnD`6h zjqc~PF=B(?99L9!!kVrqKf~$ST!}^WJBZnrSEz7r%sy+&Ry;9C9}{>tdxS6grCJGB zBa|oPpeIY5F7MIPPDW3jNJ2o7V2oa{^76huD0Y1rtUadAs9e~^|w|gx+M2X^s6x8*(jM+oBMk^q(I~jEt}V_5nYcJx%=_s*$r!W z_{i4}x7g{Wyt)TOFwetRP{n#26BF|CCGCbA@JDz$P6RxdW-9yxG+4t^5wd!$oGqri zGMVOF#I1z1;@HoCF(V~~6F7B=Egdy=5EuZ!kU|dtY+)H%yic7v^$JX{@Cb?p`@G3J zd78>vS|QNST3By$=lb&UG6y_M;%QiB%ZFa(u`+L}?4Q%vNTuI<#0QUuDur(GuDvZV>*3*>9N~8^{yF2MQOa@`2t; z(R|kVRC==nI<@PqG`B@`EVa|epqGvI&e*KuN@rp6@6{Fmad}PD-QlETN@?#K zp5YDO@~bb5VI4Y}KE*A65thi|<~l_h>OLz@9#;{VF_NA&l3f)gc&f#_ETLdiQ;7TF zpMYBX_~`JPD^KCER`mx#-<>jwDZ{-sPwEalAxusiF0bM6;y2>hb~aCPF_)2fbF#Si z$i?wS9|;%Ac6HOlH;Fiu(qy`&6~b(9T-LTzn1B+~osWBb>I&InMux0rtK18U(!8I1 zFT5P_cy3?Nma$BuP6(BXv)NBc=*&?j`xB!>3|uihv0O=&9iQuDjeS;HH<)@YfcWX% z79Z)*+qh(h=*5td4sdzx3r(N%^LWES@a4-FH-;o!&1RF<2#5ZiNuD_gCmakPHZJ-B zVugB5t=z443n9%)G({ecU2iqc4fZ>?pj_%Qy?%rD@m2OAx;;WHr(vX zRK0Sbj>uxz%#iq3C|;J^SZ7Z8?szUQr|sd!egC}@Sa^G0 zXa(P%T_4$%#l48RyVN@^(;rFw)&&F$nWI@4-|??Jfky>a4X85q&skZ-ou%~lCRtn% zBS5CRijJyX(&aZ_%w;?TF5b6Cp;T1Vt!Y z2m8n{d?Y3mWZtNH?N73i3=GT91r4NGmA8oA+j%kXKVRb>h_gL1{}79BBhuR|WovBg zH!AssEsnmrzH}+SwlpqmV5Yji)nRE`Ui$UNI{g2qvn!9sYW=sbgcOye2pOW%peU3K zDU?V_WF9h=jAe|F5F#Q`hQ!I3vCQ+B2uC4ANK$03W9F_+r{SF6y`OvU`KLZQ?`!X8 zfA{mO@37XoWTkErVR=aU>*r5F4?6??y$mN$t7!S+kiDaHmW&i~*@0zVBx>6(C&%63 zv=-i=Wmni@`cB)mmHDse>LJOu%JVAxTd8fo^+zr)#K}s8CMftgkaYMhxf2RM8Y=c! zbO+T^jns=iCnNFj>8c3wGKIHM+CP<38nIl$ao?j_v$(ccQ*zq&!%K=kQmQ4*WVlIP zr!IDkl-D~HcfNEk_VjdQcvzG}Eh&WHcXAR#@LNQ;nxr#yZ>(9^n8O{k!rZjvrZb~k z?C|IK#E*VIfg%xo5zCq}B79m^^#vZ$et}Tg4BnSBB2<^04v>+K`>uMPZu#QV+D&%* zYR*#p{)9|OUdd-Ha#wz4GvWk33s^44*37zZ=!DC|+sbpu9a~82=H3th&i&L=%NN~8 zBrQnyZfBgoaKx?60Go(CE8-IM1rP|jhjy$<4z8_G+omv^7z+VA3bU_lO-d|m}K*Hh-eeoe(T5Uuky?6oh7Gh*?zw@5sD}D-cgY;F&=oC z;~v&h8z56svhjTjsoxE( zb$}D%IX-H3x4ohDP`JN&yy=XLffuGgV<&}Z^ zDQ`b4SRo$hW;FEK>zJ9}4X)9!}EX;?kk+x zE}b&F1|RmUQZ{^YKfs$u{Fc$UErXcZ-`gzSQuXX*aa(4ibDzAAB0pu`ax>)$4oLCg zrp;@=OAPx1+`{r2--L++(Fyc*9^!*yeqE;*kTH-D_@6>-Mx zQ#OjqUOMBli-b0x7*yLv6>a#M@$Xz_Y1hbeu`a!km#D8=vhZblaf|I=k@$=yT~gZU zsT+Kw%^{Qv*^IkGWB$hMOIw%*8P`Kar;^*-#9MEguL&n3%~rQjr_ATFxsp#>Y=vej zjhJ4*sj2zn2MwRRki7Vq6=&Pa83Q&SR>oSzmAK!NI+KdUHhHY5&EujCpT=vhrq0%< zsLATpYtM5yt#LDB-L1V2dWth;~zK9IFGM`qlaZolY)fjQrz3+NkFILDT3gBd6Nt#|Y`rizrB3Ui zMK%jb&e2+-P90zz1e6`yn(hH7}AyV%~7q=0G>@HCzJmQ>Y!k z`*|`GAw%Op3l`#JE;>VRzmx-Ic{dRN-7C5 zeV6tYVvKY-mA08200CrQZ_sa(k9_jv<+V`YH;A@Tqi$#;belx{6Lc$x%4|$brD%k| zzY9_ckm;MI^!hvUgm!I^y)nQr^KF0%$ysQva^H(;v?3D{R${hp=q)8Bm|YLWFnV%K zSND;F$o~E7F`I$vm4u9pmqY&JKC8b1R>>BL`-SaWO)4JAW}hu-+tgG+QMJQ#It=ri z3d+jLNTzNFqL*cAx*O%8uFZ<$2awTpe`<(^L{7MGNG{Zz4ny=Abf3VX9K`K_XEPLMqjbTzvX=}gQv&@ju{*;;b8@&{) z%{m(o@+>|?ZR$(CO$FG{tE#$IM&3MlFkBu?JqwCI%k1WU7}*FO)ZqVU{-T0y zett`xwpFBdpJ0oKUfi2d@#2Z}i!#6Mo)!K(UA|RRR+hRLl`35sYnPH!52AkSHBNGo zxuG}ur9hOf>$zef_PgEJY1uR4Qm)8%*AM&!Q0DdLVe$O=w_NS7v!WL<1JlaI)PovI zQV$A}ONXqwGxn@`f1~3$i6qltns#1;KdOv*VrzTtqi-gS)j1;@85i@5rMkKVO<^UpdJ5 zF7N2AH&GcT&)j~bW<+&wWYgiX+-QTQ;|mAKsZQ_~wf?0CaQ9&m+`VDOvZxhP`sW)5 zCarLuI=3O=k|qJ?wAqdyY1=*6#ET-bXEKraaLF9Ga3Rh(FVUD~5x^x*5JCsu0B}@+ zD8fYU_@;4k(B#=th32~mqCoi7_V!*My zb_rTSa08k-w}zA3MqbOzo}V)N5Twk(d4R+=$HDM)s8C9=_MuwqB$~mCCCeq`8WnDI z(RhREJ%ux8UY*ZNe*S%IY=sK<{@Nz5lsrh`W&3VzxazqXyDOR<;zVl7#Np@k& zePZX2DXgEpa*9KGuHKA=CsF$OpNF4J^&}l(U_}mWyb8*+Y{JObSY#JY2DeFhO`WdOE{K7 zs&K*U*y>a7KlcH+!B*`|r!-MhEe%ZoIp*uq3?sT;Z_RK%_K&hOnX zf3CLES6^@a@Anw%VhvG~v&zbXS8bc7Ze`dRcZkg%n%Gb3dgsJCiVPDe-aJp|^n(Y! zcYcCABAXF(q5P`YttzJ-Sqep9a6Ji_}8j z0u2_Xv9U4R_FQ;;dXHCz;H6RAsR?ody!_BW|Z-U=vzHPc z0e{!ZZ;j@Xkx!nIU^0A?_UMZ%D{x!>*F-VKz;#KBx#}NuCM4_f$Jy?7T(P5-I4RO&ETR3myAb8ul0{60ugb~+)cCBT*=kX zpKp?w9hxiHo{}tOqAqSS{o2%IzTYxO1NBCW91Y{(sDr;Pl>1H>v4lU~FIFwqJg*}o zRj%=_oYOR>V`F-9*`Tf5+2WUE1|h-a*F4L(^%~yT+ZpxIkq&p(JyPLI7C*m5ZCr`axfIwWzA~9fZ7f<}J|fTabK<@` z_Ol`+omX8dA`a%KmQQNF>vi~^w|wJa^DX)Hc#@NCMQ(!{riEHk^ukV#ygV%WQJr(&5I(HC;mq zmwwi{7GL9MSe;GQ`LmqZb&BT$8AvY2JSwq?TrGY7JR%%B$b;v9c9diKk3bvf;X)Kx zw=qt^ICwd!B|ym)MV3;03Vk@g38!G!zTx58kHo%{Uz)npg;~&~7*d*ho#!3f?fb$l zgu_woSI&sRftX@eS8>c?B^hZhFSBB7bgm)C)IY%*>vpG*@$$yh*PkU#Om-36IE-#j zdN|(}vZZ6)omlxyiK&Vwc{xes>M@as+?frNq0B}7eV21|1jnB`i~e?NF+nETt6Cys zUH0+wpFVqbId-@YAM}aVI{GCENhC8=MCNgtD{6R+G;eU+YtJ6R+L|a+`!H&ITvC!d z+04|y`5z_0rKPIyk9F~5$(fa|#`seOWRB03S5^}8=*MMc%OE_8F8a`@6H+vrypB0* zgWldI;ozFycmK{}>g;^W+=GSK${(;{c89XkL-#Ye%%_&EYtcpt_=7U~s0ZbjK1T^0G*r}BbVLkRR))*Tn<+jfOtSX02|sZo7psr#xF z`%q|CO#%IVvSu^Cb^6`P|Apr#Hy2kbC}Ao|X>#FZjM$Xx5jC^4dWU~X5PaA58A1BvI@3}HoX^BeoWh1>bm9ycB zT3VZZ zb--(*5Nk2_%~gZ0yH$jny*WDC1O^y3kn{}=FH@XOU8W+=j4Ks7M?xt7-y1J9*JvY? zl}TH+Y@uUfvhR3H3$bb>IaM_^SA<+v-y|o7VdUg|Mw}L#FDowWb?UwDC=eLm{mo(H zrEoh-#nsIVR_jO>>W+u~!k3s-f(JBp+hA0=o1Hv!kc)*9zzWvtiufxpr^V5Dd zX2M@*REDZ8HDMg>iMZQv>wB>JIxMC*NV$24d)p?&kJ!9mDlZ*{ye!5J6NkijT0YeI zQ4uV`%+d%!JcObS)E+N2)?cXRjXL^Ri2%t{v3rWS$o38QU%zN;bF;PFuN)RB#xm^` zPmu7$5F<*nFe|6t9u?Ju0n#oODlg5h;#O z5634Y9Kni$3Is}m;4jOcovIfl5FLO7Ata|(=?u=L+UT~qYR78=mW~I|NICV3|d|0_eR?1 z3i#}g?@BQRuIH@P!xt?~ma)@(?ob-!$Km3+$vxJkAW}Ox(>R!fyW6y>n&J4rzYGYh zix*$O*;&?So=ob;77E4TkN@HK65u<17lgvyP~rXOD!J1VqP=%YFh+sz;%EGkLS*Mv zlm9@%OMMdc>{Z!J*+dprHGvk*PM6#gzm{KGDBq9V?`?;F-oEal?w$VL*_YGhGUGx zeh1Xmr3d-XmY{2Z9zMpy>DH}_n$G+SJ2F&GdmzN=(b2$F`VaK6-P;wPC5lvA1Nl-s zdRFS}0|vJG{7Pz&zaOgQN^@&u6MwsU%q2u8&uZ$v%nT-Oz8K|xEEFX;6rV3<`(rkydGGxzmz^P`h zMb}|iD@0<%nNat~+;JnXY@n9Ipuq6}HyCDU44&El2o$$9qF>jdjB=A0Xak|0m>h32 zVA_mYL|dvZIWgbyB5<2tXW_M$0tX>@9HiK&J{W_XKOt6c9Q=qHR1(w%>`(~16Bu}d zFtaf)__kqwEVw96Q|*>Il#3%A$7G0E6ExSMBUS22x5Z-m>%|ON#U}7sZb6hDr8O$( zOu3hJ7kAip`u6u5XUM+33#3E_{~W<}^z==qFQ@FEB@RT`GA0Zk9rEvlfH#wplG2GIM_dUtsovX%hE5yS zh2e`596sJRx(kGqph;<8%y5{s8e^mi3UsLL!lQ(afx#Ubxtxa%sSm!||LEDX^@=fa zB?Q3=Q-blDw+)rfo@Eyi(HP`cJBu16Opvxg=eS+ekpKQ($4Lu9(rbruj40+WNjllj zt1@tvJ%uwviQ|)exe)%R;o;$7+-FDwL3=+Szr}{=A zpO3|%!Gr`c6j9w2FZ*kC*SoFvg0h4J*}Xm5w{dA_2hXUTRaIru;N2)}_35-xJ}4oC z9{dTVi+*^x#KgRQ)mmcU5pS$63=~kOEJlzu@=7V*W;y5zCK4wH2N^MBPgq^R8i5>- z?@`-hB9Pelp;UG}z^Zm3S)+!6R4+Qg0>33+GIC17!OMk9MkUKEy!vVO!A+sHU<4^B zDY;_+Re3N#T~hB#jv5vgz-==IjVZB^3h|d;r6Xqajg0hg9e7C8fM~^tuTfY=!CC7x zu2AZp!PntVw(-9DW6w!Ps1v_p44S=z~V~_9}@faFZ@akEhdEh(K#*qiaBd=0;QqS0Sh2b0fBFZ z_u{-OKQ7bInnFGk;!x3|&;O2k86?c`Jak}NGn92Qz<>#VA*?LUCd+GLGz_}igvp7` zmzL|<*#|OYU-=M{ADGm!uH1Sm!Utoxgy|EX$v71W#D|L`HWDbVzZ$%9XAh2|qhOv9 zZWz!oAlN>*LLCfWlU(@pD&sW5b7AJ9);9pLI?`c`RMQkZfS;2am>b0Zw={I(Gvgqw zK6MqI45Krh3!91f-+y84Jzh<3>=belBC3P0&IUoo9NI^^@UU4yO>L=7-x_n0c{v3| zJbnY(jhi%9wSv?`xKE6{IW9I@PES~x5sqYpd6mP&Pz4aKUe!JtwKB@nv2cM}4kSqU ziGWyp1QaV&)9WU*IQ=p)^bIB-#qb#F@@{ebg&iA8;hWRl5mGj2mhwOhiV>Dse7BDmJs9p`H zH`{06(MUOOqigWh`F<|rBgZEYs<4cq5rOj*WZaJdJ@eTPwF_t6;iBGoF1Z|}DTp+> zO-|Z9(Jp#7H;3`)!vS1j;?D0x;Vj24vkqM%5ByMP>;u8D!^@1WR>?5vyogpw{T0{+ zI6%AcCA@>cc@fsS`k5Opei?P9TCjp5s7fFUm*MUSBM|5YW+{j}<9$+rK@rB+P+N7y zyD9RmQBg>5hoodJCObW8IQaPJu6%qcfiAMsL|!KcMEnr0iC01qQC)YAZIn$MfKH$IwR8f^SVn#~#G0`7F%UpN(y^|D16n4f zwvY{lzh_l20h5k`+?q0;fqOH^X_yeY&%1CXe!I8B%Wez1*aMtV8;Gm=R0QGj*Iff} zbs-~hZrMUcST_v~zctAe&cd%pAx9|Z1ZUJoJuLoLZ9=OX1|ELcW_97VM96rK%=601_sC?4ktw8W7NhT_1Y#s4tu%?JK7R>P^D0jb zX>m}F`Z>>%C!R}@#%4yQA?BWc^_ww^h}~NB7FDt>IJE40gM=s85c)7WAsB(2*iHy4 z(x%t!oVmO*In%Bi5K{qM)XYL=CZng;aB!X>$Mos1=w&iqlSFrC!-z2>C;z(SC0z8s j2*xF-`JW>BU#~BSMP#e19}}&@{-9%bX@-nw9ze{ literal 0 HcmV?d00001 diff --git a/docs/stable/_images/Softsign.png b/docs/stable/_images/Softsign.png new file mode 100644 index 0000000000000000000000000000000000000000..a5f49a5e46c7dc1be6bf53aa4b92bf1a0d959e4e GIT binary patch literal 23584 zcmeIabySt>*DkyWK@m_Cq(e|Cr39o=K@cfv>5}g55(!BI1xXRm+P$(3h#C1`56bjuN zg+d!Sj0Jz;`ZzEF|HHJqDxr87e%ue=4us#2SYKDQL!pl8BLAVK38$LCA9?Jr-LhA( zGPHNnwKYIl=-OMGTiKhN=+QVD*xH#`SzcseXJJ22V{C74&Bx06pZ8g;Y>ilt8xoVE zP&6nB(JPA1v9p72?u4JKg%{i2o{M>Bar!t5TEoN3OezwLdYwU$N!Qfpu7oSQnIA10 zy7JwawXY`QYNn!`q?vm5s>)2hc(R&R^|P^R9GnYxF;CE34t#Lt>WQo9x|b?1wk?F8 z(dX`EJzk(|=p?kz&76BXA#TtkzRkQ~*n^`jDx0%j)ERi+mh_ajDEzQAqTh#iUm4s&oq)H?agSa`z8UuoeoYct-gySu$nBCxu-GGntj zl}dN<;>qZg#1}7KKJlaojUy!}CpvohIhvxfvVSW}C9cn9lu*L4B=PX(jmd=Y0BX6fN)BMXYO8!mvgwkP8{wJ*3VT}RD8fRA2=2TTx z%VrB&7vaA7ZEK|X0lLxWK7IPM>8?(%T4YS%xg0Z0)0I%9!xk73LHOdu3*1-w!i#kn z>$_{U;TahjT7~I}HxF0j&~b4&T&7YlaF|h+isn2LF1*W=Q?ogJaISID7 z3a!O5d>nQ^e)h~YQH-${k7VSH59J(TPaM za7$<3%5Rt!&!VU>a_ZJ}VoZ*3oiCpxM6JB%U-<4CB2Mk0h<4M!z~BaBGPgzFi!bkD zQgLu_6xGzC$=LPos^#hUm>L`BS=zyOo7`tJGMJJhE}0(r;UWSo{|Z=7Tu7PRG5r+%gM;f641-Z|Hd@c2=BsHzxCqKhjFY3y_9}hC1)P@!G4_*2Z_2hC?3Ei9dh- z9NlZHf7p8Y-KFu0nH&)diwnxRTA``&uG4|x8=bkHL~zX3mlO6v3g7cFPggu92bo{ANd_Ak@ z($R@8U(iwhc;u&+mxOn|DAubzU~_T$pkN|NDW(wj#c5k zjkd%YzM8e_h0!`6jIzDmEt^gymC>Ydp%X36gAU!g-@kq=TN`rAcV5zqwSB`#!mjs{ z!0v&-&i8g>3yUU)ZcS|Ic59g6@lTShO*xvCS-s<*KFP z)@QW$g5Zp|JOFDLKS;zNSKzv{(*3w)T#hPSJkGJtWExv|-N$vVxLeW9&8@>x^P-uG zwugh_)>_S);GxUaD_!c!>fIsgvQM8rtv0+FA#eKO*%i~#20!sxqqe6BwcDRralE-I z0mJ-t)9l&=FMYwKK^5CieM;Nf-kucBty%PArC73gET55`ovqRjLr>-4;K=H2yME?5 za*Sw>-`rYT2=D9Sl%ARNJ4(znB}-QE4#kw*aj9krkIQ|1G3{w{MFn5-jBCd&y_E%+ zbU9Rq+d{Qb@dfP~0aLNe%J@R{(gE9G^k=&pZC+{?aG#84&n}0OUy>p9PmI_!cQpI- zLV}49N40L=hzgg-4yc-&$FjQPa6zJDHu_ZmZ^x zgHhgi z9oZQu8;U0dv3Vr9kxy7wQaL+eMkL>7dK~e$6;ep zF77(ef6feB#?|_?{FW``Cpa}QuiD>v7t4DRbyGzJ`^O!S@-3-#R9950t+-(N@D z+1l#8N2gwpveVT)?H#sbcjjIM!2@z@toz2^RXeNw>_#*dVH-1>#N7z&qw0c)n4g!p zeN7BN*=MNC+>qmTifX;I&ZskgFJL1=!=s*&^pQCoyb`4;9`XJ+{6+9-frLKeVIo1SY7C6x>JH5Vphp{S!Oeyr&EuPs{20G&Tcg%Ah6n_-&K39J6k<~ zKEeHj-Q@cw-pN?Ij+V^^;u`NuL(Uk-1iv2_3nlZra|a#S@FW)vaS@L|DnW=6@X;Ce zK%VE0s{rh`F^_j~{KUxVv?v|)r{s6k8GCsTF?k<4IxuNG_cDY4ogO@TsR8tl)}b?j6Z)2Z3*|=n(vu)6fV3(?Y|_dJK8)Cl}Ng`^<7xNl|qOSQ@>ja2BA_ zaQXarY&DdLf2Pv0eM(ZhA%ygzH2~%FXU|ZTP7BS$d%I3E z%K;}D>1b8*%r@BN3e<~DA8)>Qs!4)%Ndr#>@orC^le&3n(Eicp_jcK&61;#$)7KBr zA^??~l9I8#y{$!mNBM>j-rKHh_0HMeLM2z%3J2P+HyK{k0BmGVwWnsz&YC}d`ZPU7 zHX+9oT$mM7a(sNENR(4WP7toA0Yrp~;q6<@B$rcOgl zK9iuJW+)lg8$Km`)C!ziaf_|pt%c0xHsN8i*4Ok2SyZ0ux-*-A$CedS=eWR(CYwY% z(|(-?e<>TqAll$TYyQ%qzZF>8-4_8l7ET%~iDtyWdky0RV4w zujmWCUK*+QecU#w09)mRtnI)uCyQpP(Ud1&{H+)L%*h?_VfvYu@0*&MrU7#J$L>9I z3*y=ZIqdhcUIf>i^K$T%2V;^~>0q#Qch=Pw)eQTNg!V=OC<1RSGoHc5^1zsYbH z!6vh=EW}v1j)ey@903aw=;{K&;}V!uy_PV_!&tsiY`W-{WusW<)rwxt=MWIcQTz(P zbCT%~3dGBB%BZE>5>M+FNoOu!Qh_jX^m!z@v8iMkz#FCOz8yg#rPQDPm zhlxl_<28qn>N{OBuSd_LhA*Po3;1k%)@FOK<6gn0+ii75}YRv-$0xFJE4m ztEs8oPe`z;4yzj!ATS?>ha@4wZm3htWVn6$Vm$yu>W{@da}@Fc~pIV zyX6u>B?RZAq)tF=c>HkLs!kW3E;olEo&K+v@$=hf?t2=Wm>`g9JyM6(zjsIq9;L-U zcQ_VJiY&!Mul30VHtF{H93K3C?y!F^BVDwpGZFGIc_Vi%t$rVIvw5uFzi14JtKANS zL_{_YO1OW2mi~^+iRhGa$FuDpe}4|rLYq@hMkN~9a3dlj>ez5AE5~^L{^}u}6Qit; zLwa9Dh5qNo?$_G=B2U8a%wW@dL`ZcX;r0-s=9DfhW|e9DXM$m>W!^MP|~h4I4|CtgKPBj>v`C*F!2k@g~?(lreh)?y6Gg&fxsHvuY! z!~=2bne5Urap6sZu3YW#reI=VQS^qYT-dD& zA)p1v;)RNvi3uavV-yN#g2R~TY3b=#fL$n-*3zOv&@YbPNzIQAg^7*^u^bXQ*6VyQ zWo2TzGrS-UFxbJ`KJog|=!&bHy!?sI0F(91j*$vCM3_%dWElbW=r5_C)2KZB5*{2#fm?-!p`9fVJ7#IuQkiwAb)93kq&lM zc>HRA3{RywLg2%U6!=L+MICR>kv7BX5#k5PDJvTq=ubvP$HagyFg|^aCg%bQ!w2Zskez3pQ{+lwLt5Dh{of*tRx5}&&2Q~RwQcRu~ z6gN=I!|2Ai*yd@eqr4?0F><%!42bj!u&h;J7oSYz^KMh==HIcQjH4)rVZftbsk`d~W zzdTNHaj}Lb5yk;V8gNQelk$Km{TG`Yb%s+~#L|-GfX^a;_s`13T=bUkrOy8kj{IgK zg>S3>2fUDfHLD(QAA3!4IEnUgl|}Tw!4X{MeS=cnPMs369QOi#fe^B)nVEVz|F+0? z<#8@txNzWu|3!_1X?LLXx$J$wbuqn{4j zs{`LdM*SOCf9{B8x16Hl%YT3N31KDw<)uWEXfqWKO1!_S(0ZH6*W>rdjZ?c;RXCzr3ZbEZSnCSmRHH^LVFjj-r>?QLGWcE-{E>XzkA^Et<1Z{jiJpkpDig!|5n zcEIpL3j`v*aNsD~=VD8OUa{eV>t9(pdi{?vMVQB~Zdzb>x1p#6oR6z$IuIe@?mpth zji>A*-mSK^zc-kMHi|`~?6~&Mw2IA;%Vh81V^4PJlb=6-KG<`S$gjk3bY1N=%GYhg z0*kpDRdUSl>S4dDU7w#kIfbHB=+kWNk$eQ;{V3UWy*F7h>`ryNfD4I)` zh#`}~#l;1zP+rMv;5@;_MCgu8YfSIUG~NRP8bIRGkl?D#UAuN|achTxnK^jVAX>nM zqe_eQ#*G`#H`y%uIPYFP4W87k78RIyLSo`1U}+vde%w_8DH0^~(buN~_AA@5b~ya` zbJ~v1PC&8urln?5;J6!GS&6&2ly^D`=mWDZDk37H$bC;J#d$6@HkJaSt1FBK29LfX zIDv!Z+f(CF$rXQ_WFOJZ9~ww4^~nwvN<3k2(iesss`ffpYpJ&sS@d(4#hDTNAX%5P ziHDNU@x{1s8(l9E;P|;xrjeOpu>m&(fk@Yt7mn{yPo6wM*jl-rk}b+7Cx-@^lU;w_ zkwf|$vt z>P8>~>fufK^2HE#S(z(AAb3w*U0rET#3&@PmWt&eK*Dl%hv|-_A?$zbeC4`!U2ku1 zbx_sYw^F#2eBYyjgj3~iB*LV^pSx_VI{r4=Wu8RV1#HqMVG0EDxUl{@Xk&e(b+=&e zQdU-81}nlj!h`Y4fIYgKVzRj5E|5d}?=LCUE&XXMZ!mIla(b`0|Jx(ic?dV}?Ckt+ z8cR+*&)S6ljauB==0b) zNW$}mfcwrDT%AuN;`+E6*O(@ z`a@;k-Z0y|VN9htac=|(9k?~c8a97v0f4mp_RaRTp&`}Es*L)z+ay>5RN;ez)@Ob{ zrgk^DW!!d3d^3;M#e{5Z6sW;)GXJKZ?IBqo%cRAjYcf9W6`6z{W?RfIQT* zlihsJDIF0R34HwlQ7y7SJbsWr`}{=R8p1!sTV*dj@?Rm;$w7J*2hD&K6A#OUat$q0 zY`!7Q74tP`ftxq3?KC*7ce0838S}+3dKYlBHYrzqs3kU;_ zc&){54$SNa8}YTWn%(_^=Sua_dQ2eq(^KVgamZ`W!zRX{^@%Rse9;C3(o;};OL;_SA`#t24*4?R7Bz|m(8 zF03V`r5%RoDlId!uy2k`!N!KYufJbTOUtsDvMmZU1d#bS{_{Z@>FKe7n7(;KwfD>> zf&+KzX-ceyEko%cd;xUc4=^F_THA-I9$85GDsw~p-^FfIPalqki2_3CFl+5L2a*AS zAUd$WDH`o$p~m<+ zjee8S4`H`hYu0HIsDMawWolI*Fyd!C^*Q+RDHe@D;hia2XW+LU#KnbfZn}1KcaJZQ zHaa?Gw*fz8V&W4#<3jM4%W30)w*HDPd<1fnJX^DcU>}VF9i6<#L>T?Rx&0%UJl|W$F720RLaUb+#@a6WQt>c( zoQ0v0`ed6}X|DB|J9GT2iF~nmhvo`rBJSu$vq^nlAF`oNnclYaTpbMjXlP+(KWG$^ z`+jvK(brMjgnu8?>F>+q=q4o@tf?7NU*2rtqTB`KS|-=3wrE;}#5y#bHi(k1~LNiuz)Qp%bxr z3j#A5Eu2Jm2rvTr@i6JmUDd^?&%1Z?#oF4<($@MspPLichv|&@tdGZgdFl{33LY(T zq$}S&H2sn5#aA+UK|X;Bf5mWr!s_u!!f>qclq=m_&YaH#2}P9+u>>l@2&uXT@1 zZGNiQ8AuXrO1jeRYToa%H@MKcnd4AvMM*{3wFb|+zCh$iZ^SB2=x?Y ztaAuXDo7u6fTH4FIdAi|BJ48FZzMqPy|-eF;BaDK`w|rS(;DN6Su=DjIq;Z2%dQ@u zv37gW--gxKfYo<3&SJDJM|hxEx`$*WJ|NshQW~R*s(PIHns|n|xM>hpZtD1C(R=P0 zzD((0(`CmJ7kkR|3V%Y`?$7TN_-j+`PZ=_98g-|x8p|%(Gk@h{71!+UOfhTO?yUje zkegfY>xaY=FDh$E$>8kp1IQXMJ>mier@BPR6dxE6#ZB%H?g z9eUYtkz66kCCgYmK}i81bby~V*ww#s1lxXYho09lD|JEdCf2yn{TWaT=x5K-8phI$W4Fo^o@K$h#8 zW@L195C|k(^`l?zuoC=Wm1y?i{O1oNXc z+udg0xvn)2Y!O&?*8$fBc0vCXG~Z7O>^y=*z8qB~5%D;+xk*v2tI0IX*~_hw=(Vhe zR87)fO`9)N2sO?~1gdhB3Rs0<10}A0v@XXEjfjv?XJ>OImh2pDP~pv&b@4j^OEwDR zrqO*j`&B18sSi7z@(3WnrQUjemr%3ugA^-=09G?DdB{K(C+_Q6hG{IL7ml85de)Pl z>!-ACZ^@7GwTgkXDo`JlnUOI&k4_-hz&Bv2Gg;%?Q@%hWWNF^mB5k2uuhM-&AVtAx z;`R%@iD&i`c;9sI3ga~vd}8Bd#w%AfA&LHc&GHE`{A+PbJ~86n!-ppz&}987#Y%SCfZK8Y3XeGxA@`U4 z2F`*c{AV!qZ8_M^(NofG3n90ipwTv}pY&y2={d@q92*&`z_a8sP6*@(#gb}W!rt~| z5=%g%E<~oM&z?O5f~6Kv>zGgXK3FQmyl4E3lLA*)(&A`I2i4d0nn{%7r8o4slnfrc)X-f!~WGR2? zPj!|GTxO6Q0!_T6%^|q&GsnF$$5qf(_h8=D9K7qLw9r!PxIk>Q@(pX_CMPZVNq+;~ zhQ+oT|J$zEMd5NgpO?ju8~_Yo4%Ixiwv~VT^$894rKa_+(i#INrxRDR>=@;-5nr^x z=M9*VpbF*fLR)H~p5DOF&aIDRL0Y<*CYS3!NJ&wBZt3{w%VrQktssWSm*_Y2VA17= z<4h5D0pzv_!ds7c4-V4MPM3ZuajdGgVBIaRMq5zgem1JZFuf_2i+vHDH;SDb`(n%I zXFOhP1s>_#D^CI+#?j%_AqVZtQ{P3+{kWn6bkg6RxVE>v~9?U2!7RoQH6KnUfoZgcJh zUhloFG%~M#MPdhm;qUCe*7Y3%8f*?{aBMGPXH?SvxC(?dV`Yz)IyyR@rKB{^Tb`lV zkBb^G%=PzqNd70Vz@sY{R?5bl%xpe3E{s2Q;4;R$hHTQ(Se3h8yCW1;RbM5`MrW5K z6t>aahbWF9UYC*0OI)2ZC+Z)bm0Zl!-oe2>MumyGZD25(5@Xq*=``rqp-6RDTJ*hc z6pq)RVz73N>b(qq5fq;g?OcP2%ld+;$dyLz4(GDBj_KbHc>rK;hX#@RBh7LDfnQGY0&VPxcx;*sE9{Gs9 zIcz;7q3;a5Sd#KKq|INyYHMo&O;#w{1(X-)Y;-oheNM~GZ2_sl(qiK=L=cFv>MR-& z^~OxydW~5<&(i6ncUZ$hU$0-3*Cx;H6y7meMuLy*b}yMmeuYXeBpyBQV`gGv1HIKO zu>O~+oIpmg4id>wBn1y*Jtojx_<$@DzrVzy5&YJ0SGUw`S`J)aBNgKPlSS(9qa&)j zVb;G`Yv$CbNSSI?Zs>SAN2n0xNt}=x-iBmBdC!ukLVQ_I03PuLwYx~V`mmo6Kf5Ud zVT0h>fbX^7Q+Pe&Lf=1#lzoXd+vj;e!x%2K$pBKd#Q@=L-8w3}XTakR4dG*VEdQWj zQz9C~`PW97Yq#ll%Tkiox29jaI4_qJTF-1G*f}h%@YnCd%^WQBOA2SMZK zBaf3}qGqA#F>AJUk4hn{hPo{(LcaPZGx;>-9;Ds%VL;0J^xZ7Ih{^hA^8UqsC&w#^ zPjk4xbJ~zOFz?i5F*-%qVYH{ z>TGv=?WicKdwTAA3mJ-tVBj{^1rpFb?-K{DRknI*bJe>7gVwy;uODj7L8SBQ%^P$S zrK{%McS2C+1=<}?lh5${jR_YPG ztbg~+H3XLa6NP-$KyS;q5xAhFC*4+-f!M|bTyW-n~FO8wZ z9_-W7O0=O<##`ggErj<(^MK%fk7_nP7SPyJW z*uBG|&;-^7?DoZ6Ad{AxgPaP&q71ptWS0pWLPkh{>KI}i(pF`KcL?dk!$MqQii$X2 zBqy(!F1XS zoN?S+W9ioT8Dyc6QXqlGlhYYYJUmI zLPV%M0Qw;DYC#}z&z8J%!yifP>|g5Ysz!MaoM)Xcvp&HR$4(aAe3JL%S>_tgegTYI zYHFrC8d_Rm5Jt}w=#n^t4jM#AU(wvQ)vcZ);J6k~<|MrwHD*m6)-XSLQnw_(1BxB zRW+ehzmfprj!Opm(KuFC{lWA?J)vt(_7vxHOzOH*!W+i+7#h}fRRv3&WVt<>ek508 zo4U~R%vN+ED-B|#0NqA^)ozz+EnI3L_qv2L!KW^eeXzqj@t}IO1Z(MAv)cBscxTUG z4;JbJ*GMLPc*DCrcC?fJM*RaKw9ikrb)mBzKa4`PwTOt?5aE(L7wWO@7L1;(s`Q}7 zEgZjT1TL-0d$M4wgj019qFdth}}`SI*d&L}MkiwTv@C zwy)s!o0PL8&Hd_YW1a6-oSn9Wii*7)nS$EFDMMOo*s$zCwzWLDdQO3R>1ReLwuvCq z8a4fB8x%B|WdWhKPFBxX$hI*5YW6*xk^!-Plm7 z*s$^hYClP%kSPauYJzfd?OI9>j54piT`db>?8`>|$rpDPyoM2PFPRY1{fN@Lrg9os z{u7Mk$%GkX_@mmDYvVhVc(*{%FnYKf;(F zuAaQ2;$6gBU5Ba%Mo&(1kLLaKFdaR8KxU>Z_!;y@?PR$63h&U&x1EK#-K(g{&7waJ zobr(QfSZe*dJ4Cm^rWrLszf8ZuJg9xW^8Qe;7_8ukf#KN=?&!~9bH|0gLt%Lmv1n3 zlw!)sb>I2ccS^CN`1$jkLrS3mAC^%>RzYEp#E&&howQv_K2px`o*LH*r|qk(Xom_! zhuR>qOKXKMcXp0;*Y0vb#vV7u^_u9??I@gtS*ykzkAS?)(c_MD{DE;>_VFg-f@* z5D-4+qJvo-enqOMaBQA9L@{fD|EqtEjH!DH*b-p+_1EM?!&K-DsRV8>a5E^-ViJ4msPQJLEvJ1Np#`wld<>+>1lwjcGwGtb_{ zOjq@VwG$gpbVLFWD>HGQ($dd`cNXeKd-hW1UN1=2!@&r7FVPYeYac@S(5FFHmErBX zmtX6rKCHa#!FC_|mGnTuQf@A;(V})4L{wo7r3AX5ct?2yhrJEM+IKy;%(}kw$y2tg4=*t4@FVy&ZWYIk%gcn0J;=7c-KQbcq)DRB}Z zpx%kI$WFYTK-CiSTpx+?3Oz{TQuX>nLBn2B^E|;Z)>5Jx?9^z@aW|N>6dH-AyJ3@@hB`W z9S@Q7$PYy*6D_1h#iL&WEyYO9=z$vYI+*aJi2%%#pb$DtLeB(w}>J}vZgawnA z-I#1S_<}0JXjzFwINI6H6efAxZ}ZjcFVSh96mD~xfV4kw8Mjna0zl~<*ex5!N3^oC zLZ2soPqLj8yqt6IWC`;KVntttS5eR9Ph2z?F903Dh;SEApWKnM!r^G=^Q4N1@5{`aapCQc zyrU>PEb~Aw`kenU@+X{aG0rzihSQyY3fK^qB?=rVkoxPtjbdld&CSSYxPMwi#ya;z zusn``x(!=cwdL zUF7r?*J-23b_K!fGgSj}ZygA25JJYmbAf4^qdI}e&4jVyf)}BKCg^~8M#Ezc|C4az z3C=z=63c712n7rznJT&9`Gr=n5aNu|3vV1@DH}nLadnz~vXg|)SR+Mpqn?c~h&^+J zgFonWBPMS9-9)|`C)NhnQ>L1SG}kObNVEys?@mQMTE{(~D;{tud0BFuSGtU)GwF1P z^YHA8<%ww99N1jyv zlbaepU&4z1l-KTDP*6~w_2^Y3ZG3|fDXjuM`Gu>A$Ds;|k-ySeYIh+iJTcXvXE0RD zK6@Oql1kH8d}cj<>|>%Cnf&)0*cPd-_Ai|IYEq}+pU(C#94+*B2IOuge|2psiO<>k zx~UpoBV66T<7nCKz?al%xkNyS$zLG<2qzCxk@HwKBVDK+Me1N6OA68v&3^0XzQTYt0%7z13d-x z4m}|CXJTeXLxCjvdYb;8rO<+1NrJ=x8}5tLuh(H$w72-7Alp5`QbO#l3FRh;Ryk^P>9L^M(y0um(tKHz1O`0F~UPz3zz!K7%nlOO)G`uoyg z8~nd=?(1^aJ0mZ`A5QVrr?gG_rkIqhHktf$><~IrqL*iCrJn_qz#z~CU#f3c^7G$$ z9b|?_rkVbuKmMf-{v}`7f8^%JheqZHE0+crJc*&==DMtG%lgu|4OCD{}sVvDMV4tzYA={iPy3Lfv%BK{OMF%V4NWVo$qX<~x*VJR%dOz9E0;|`QS zMY~yK&+tLjnw?u)r@TBr3aJo;Dhu0=O6KqujATNo_M*N&M6Qf~h*U0FH3N0>OQhnj z`z$L97ZRO6Lh-rO_+CWyS)4%o;JV^JX+Vm{p@aIikXb=F&i42q70hXSyPA;iesW&* zQ)V@YIzg=p@ymb)NLdlCDlxIX?;KQHK)p)?FxS})1SoVLl&zf|Qkkj4W?T%p*m}sJ zW@Vk9jx_l;@p{ppj>!IX`Unx)$ zT)nN;xh`iU>pAdK7Gx&;Llzaj12;4@7zZgDWjkc>M;s+qSV#J;YAM$s%}!97o|abk z^4`hpR=3sJdZc~|YDLJATDx%P{{P5bQ|`~jqCw3KM7w^Zbug=t_~peY`wy%*oD1>T7)4WrZoj1y_6I7KATcz%wy1sqU+ce>FJeI(w+g!y*#@$y~1` z36?yRGOu21FN#`g-xorUPw8i^(iuET)8~_S)82}YjwXXRA4v;8>YkbVF=KPTRAqk$ zzh^peb+kcwfDPq6_(;v=h^*J;S*4;&D|VE84Sk<1(Pr0 z;UNe$6La%@HVXTuktKUF{BQ9^SpiA|sf&eO2L*E{^_xSkXlcc-JWl&zZB@AcEzng( zy-yIPzL6fK4JKXm_4~&^6s<8x0YWs8fwR*xe+b=y?$)Tp;i39NRtjlKe}62h7Lxx$ z$?D+1b)eMfuf7O~lGS&3Ve-GDxM4^JQsyAt(T_AV1U?5>yJX+0TNJ1K;}tF8l#hz~+LQ@qo`F@=ZeO|7$8na#&F! ziN}M2gA+~2z~0Qx;{z6c^tRh!8UY5G6vzjpfP~v}AaHDKOur_(U73K?A1%BE>WC=* z5U|BQ3~kq{og0}Zty9WW9!2_GU>pv* z_TjK$)LN0-G5qzMhAPyEvw7{hBgL;!M03t@VaP+ORQ-Q37*>a=#By7Zfy<5vXSK3m zVLtZt1-*H5!%RJ)4TwQCj#9Z2YDGZM1i>|+p;2)^R|>FlMWv-sP(5JEAJ0y7 z>eTbZ#M4YaYdd6Q@S#`7vxtJ94-b7Z%z-o}lZCnpkYTxTzve)sKl8n3TBFAD6F?-D zWe%Mzvi18)EXW5~(!gePDeK(QYhHs;5} z*6AaVme#7-vH-QXyHUZviFQA5dcN0l`rH26J=?F@P*%1zQ24uat?BVaZ$WSu@L9HH zdEnHR6B_+$(avPgt%79||Izs!hH{N0WRxk>GmP4bd_36L&}m{AlhlqCb~AUVMn)wgN5LDZD^2 zAJl1Yv)W=R6GA?r}+NWUDq%1 zVWhofNAYy-=2@tN_Hi}`$JhMrtLBh@UNntSXeg9`Kh|*+s!rb+MRynKXVuimDl;2s z$S)SK)BSp}A$CL?#&je=L*3l6-Z4DHrU#Bbq?1Aim2~filJ-VD`1SJZenxHa22M&O zxAn!i4gYzWzls6e{0KXzHwuNK62Txj?JktSaq(iWCK1+e*MO^4it=lsO#Ka1;|E{c zbNzFH{Y%YXC8s(K1*>2gC)T#&SehJuf2I)g0Y5CZJB!i=zu(#L_BhqAyY3I6ifBOE zx?TOX#yUofjg3vg`PPqW1OETa49w9$L}*OTu#D~@wyU?EBu7P&wA7{#1pfYsSIsbi zbeNTye@yEB0QM(TU=YQkU84cDe?j5lD)RCVT~}wP>?PpYOJj|OkK6`6k__ZV=cJ;Z zK}>aPvMr$-5+HFbM`4bhEKjhEK!G(gR8i}q!fJiF%!2sA@Pdk>Vnn+^*@@q$ zto$r_sb+25$m>Y&;NV~-H8rM$wp8dR!+7@WH9^Vgq{m4rbfth)u{5MY`>NeaVUWjHT|mJK$blo2la_Y>`+^lGu_Kr2le2J_ z%QVNoU7KItxUJgd5UyRJ{)Ur90@CIR%F4>GtW+T4E3i=axlF_3nAki+SvzQlqp}tH z_I1fzYeD*)T}Jvc;bF`oXckorr5I7t76}T{arCvrxgbAh;OFQ6`Il75!h3glJ3CzX z0D7-;KVVCZ=92~ZJKf78NS_;=hGpH6CKt_V@`)(CH(y_T@ntyaMX_qvm4{~_)hr8X z&UUEu=YmF4d2Mk#HqRjraqC^Ts@+L(U7416NXBLQ0uqUoJl2;XfUMVt5UHccL}3T& z)$5z(9zqi=xH{cjPre9jYrHTcLqjHZb~(WOBO8XMof(hy^z;-VVwD$xf)7ypB!imx zez20GV+nY@#qgDpeAr^pGN~9EZ>0$DZC~VxkBUlzk_~XHpq%nN&wNKL6k9nbZ0Cm-q34nLwsuPo6wtm+^Yp-iG@D z2Qn$dMkg8VGVU!Gfkaw|o1L9K?7a!F58o;RE3g;8eoW#6ZmV@7+$I)F6%QB+6jt)giVNU_)V?K|Vz2VvP4t>Z&cQ)A{K{@&@ z4UM-rI#IlF!d_ayAY`HZNozS{u1CQFrh-V?qOa&4Q5{>cii%1mq%z3F9QA+Ud# zx_}mvMpQR3ar-Piz4l5OJ|!OmH@9-7;~WmRH>_!2tye8fc}QDuXsD98`K-Y>am{_! zFUx+l)!$rOq%XB<@y)Mo5m8c7mYixWXz+~R-&GY4$Qnbn8DskRQkg@wt>&Oo=17dR zMLaT^sHmu)n{COuw__FAM>vtlR^z$E0Xxg4Kep=3b7fR+3rGcuK=cG1Cse?fh~_W| zs@eELJ~vRV09pR-@4P+NFj1H18dvxmkWYgkoRE?-+jD1g3KFny-o5L5^O!lee8Y~s zzeGt@HT~{gOx05hh0g51SyHg6PD`_7#&LWOoDS29ltS*ez%(;4G0D$#Wyi71_#Z+? zo9itMh?D-?um&5E137@oeGYjo?%(RD1eD3mF6&Ttk= z3>OBZrNy$j(fW^}-Z$C%)*1?vH=!%T0Y>-(3LS-V#>JySp}bo@pu`Th7DPlvy_(Np z``hAb)?M<^6h)yrgfwbZ(LzYT)<8Rk0}+Q#6nZbIs*)Z6GGxdDhA001fN(Tl{x|Md z)dX1g9~43m1R3wp?f(Cu-TuFRLf*N)DRk4qqqR*yj`L{T0~~? z?{_;t5b@4$rX*5@&IDz36{-QwFmiEqj@|&wT!F4>ilBguV`(IqMXx#HbNMnTsT8kX zy))jA2#g}4qcfo3!r4*$(UISWSI+ozi-2zA&E)czLF+8k5GL4C!*|iVnqq-3Uy8QW z<_`q@F^Y$wDk{4yi-!FOKIr~Hvy)1SDaBv&&ZGmJ9fTR9-Q#@&pf7kyE|k& z*3Y6ZS>>#+I~N;&yb44rpVNG7dou4Bw3(8JJ|eIwBcLN3bbR8`d^nEuBmuj}bo%ra za8i|_ix6nTSW;VUlObeKd17GlGLS$fatsILIIa)^cIN4dz!)+Ce>Nd_jALnC29WK5 z4u74w+QN7wY?os$S#=?{3*;5Nnolwj1%hc`Q92+Z!Zkv3KTeQ!fpa3gP`g(H6PgV9 zx524TmX)BlGJ)1=ObiS+ZEW%(pveR>cm{eS%0zxigUUx{VPWl=?p%PZ2T+5!0-fom zfhvVJnQFJc1w(6`Y_Qd-k~V&|d%NnI74~v)h`&P%DKnt6qPZ=i!AQrl;BnK+I5-qT z$400A1ovsvf) zhb`e@1Z~Ddu`_KGK;+W%a2CeKuNhU z_IBWlI7f+D6k$bSr+u|&%qg%M7J?v90j6qmsfBt4nnM})cWuvvuM2w!6h(g%S+IzJgE%QMURYqt1Z>nX7>NFhC}- z3(&c*8!`zUIhy=}5E&wlAMDM~_9pQyF-x7aP~KCpxwhcdqz~FZSm<*S;8-YG3ji>Z zl9ED$ty)Widtv2W=E^x5>CoJXk(--)LLVk>8uW`Vca&YT%QdVQN3IBhns9ocyb#(k z*ac_WB|w`AMkb~k(2541RM>qz4}7Gtn@aN>pd0bnnk$7yp`p=SW?x_*AkEz0Nx?*0$i`qer=} z8X!F)Bqz_*taMC=?hg-NfDVKj`DhqeU!jj#yIF4am) zB%miCSsLV8(x4?$dU`sv!E6USKO<4?w&qyIX9Oyny`wG~xmtn}@-Rw)>S#GNwG3!d z2WH9yO5ymR3)G^%mih)6uk8zHf24{W1880mE#y%llOU801K`n&PWraIyz)6xdm7qa z%3r;Dbus=GG(2S%5*iwvj(vRpzIAW)eOOGS6{(^QCh#idMwG)H5Lap5Pte4vnU2q{x7KVWCcUmwA&9msoSNa?x zW6{01T8BF~>t{ReIz_{;GJu)C?E<13kSsvvSs9-V`v0NH14==;=E> zS30aLBNGNBkd1VL@VRqhD>L1Z2CAl?UfhGT_#EPo!Ju?hopcf!=ExC9qn!>{ke9ay zn7W^jBpM4^x+~BRql%9p_d_<=EB)32h?sIVHiU+2g;`*&!<1B1x}|JT)-fMZ=yhJu zjK-^X9MN0V>RJc?AXVEz-abFb2gnz?<5uWsm_PZW1)Ey$Wl!37th_qp(mKL@>5( zquHK(HRyl>?QXvJ8YP(2-#LVYMlZayuWCWBTBIi}pr>>+rvwy29CPsmX%5%zSI1YT z+QnbLehpm$3Qo48yfX`CgRP9wP~ot6U}h@k7O2osGCGI$(A)>hQ|QvAiSm kU}}Ej6aK&avWK1j@Y&(-H=N1A=R`?dlM&6is&nUm01kIo?EnA( literal 0 HcmV?d00001 diff --git a/docs/stable/_images/Tanh.png b/docs/stable/_images/Tanh.png new file mode 100644 index 0000000000000000000000000000000000000000..3ce1355d0b4abdbf6e58608f4ea8e5efc57a6877 GIT binary patch literal 22229 zcmdVC2{@Gf+de*&HczQMC|OG4DWV10r40#1B5RUtAu+ZwL#s+gsDzO0d-i>4Go|eN z*t0KVAHy)_cipPzNj>lJ`~Hvr`yR*p>nJfZ?)!6pmTNiB^SU0KRae@$fpY^2h1#g1 zd_oh2V)R6z7`oQ2fmg(9+Ir!?)egs0&aZ<%?(44Hho9G9SH9?gLa~@2{}|#FVlClC zNyn4Aj#_qC9i2^Xn4xS;9Is!qbG&AGd8d=v4F^j*+ruIUMGgw@v~YC1E-fnh&-)^F zH_SyhU**`2LhVGUoKQUP@?x|d<9?nTN}r?ztq(YQY3DW(hGV(Uwez;ybs2spY8s-m zhzTS;aj;hIJ^^=0n0L1{CVpJ*J%~)X|TxQL9m?b=%oJ;p4mBF>Z!OxbpwZ zFZoA(7A-M|j*iB$n5Gh2Tl*yOV|fES#>Gn?{dS|@Z9FnJmBq1NFKYYYt8uxxy7->K zcN=DSf?0^{a&>j?JLnX2%m8Nm`Sw~zaEUmrE5Q=72!MA4GP^s)RA5u489 z_Q2F|6+XY}=s=?cZW$+wjzU+ruMY&ptm^;jEMt9t3N@>7vvU>dbt|ietl#Xc2c63yZU2oN`x;TQzlbV#~@-wPVJMs5u@BIq#Opm<^YAqZYiFc5dD*H{NXQ zK|>c!yk)+b`;NsQF56k`WMUE_YD{T&?tIF>ar>e7fvF`5i&m>I(I*5i?Oco8)_A^L z$8m9zAjIQq4fm5?7|kFLDA1)yPIg7)XIzAu+br^NUOaBftO94wo;&BKF4gy^wvLYZ zVw=Z8Yp3JV`~dQ6obv9uwk0&Dyp)2fgS4+4CDnNYVTtMKhuGNJ6VlT)DQylKWxiN) zgO0d$qcDu*#D@-?1nNTT)E7&__y^R3Dt+Bb$$poiwf@pycO!18#6B*%eNn zN==wb z@kvR-WbzRkBLCj$p4Q=!5wf4J`-$WC{;2U}SmihzA3Hoh5Ux>B;ZP+fNU)y?>0CVJ z+>Q=B<--}hL|Yg+&P#pVm}*?oloDu+%ez_;yhkO!>lSmbBwl{8n?==M9mZGhL0Nnr z=_@Zyo?XK63L6(21RC6Ua9UNhp~S<3+u_T>{E?Ib#@2PEOFbKQKE9oOt>qOjeU6a7 zIQ1D#BR5%(G$y;v)F`1Vr3NfM`R@r9v(zM#crl{~k##xJ;S%ZpDQc`>T;A};r#*T_ zZfIioh_tly%o`TmS@Tfzv#}C7P7WqESjIVJpO6r`LcD1deTGgprbq8Oes7T3c<$Dk zKeoN(Fmh2eUYZKbG9_$k$5PtkTWPa7B@G`qjFZw#3BwuTw4_sywneZBT^P(Mp(QVM z(ic2}>n=PK79vHd9s$T7< z#IsjLFZX(cgl$`#b!yRt-KQR%wXsR&aqZnwL~hh~o9_$ULo$*~zL;^UN_P4YkNbq; zMjlyRe{3IV+=E`AE=N9(Mx%%NL+lzK?>auyZ0b8yI9*u>=fn|)5#5ffd?EGl!{vOq zR+iak_I3|CEq^#6ai&&PG|14rDl8d!lBI?5CfK>I+S=OO6Fqe56n3JiWJw#DDr6pZ z*ybvl67bqsY!ME2?r=)M*oG^+QEN+AZB{W7X?|BYU&h1FNAf$OR00a?)MaeK zd)o6H%u`Ku4AD(!W$IkK7$gxkmfAofTBnjXFf(JY#fAk= zuKtC24g)cGL-)xK9nTM%*2))0M7(j}60{KsDx=Kx_`+R}bh;l#kHV&y<#$z6Q!BDn zxcq+IICUgdqX-KY!v)_HgZ_t#&_Hg~QY>g?aYe@a$^!0S=?O!#8$$SktA*?b9FcH#DA+x?@mtea-@ zN#rKuaH<`_-jO;KYhGxmL0h0`(46)4Ltu*0W!uf;va+VGRpU5@L6o60{RFJ#b@Aq^zbGn@cUNu9APwlCcw%61t&X@qCOtIbMUD zU+(g9K(L9E!Q>6&=8xxm<*9K38evIq-yY+(f4_xO;@b122`>eU)@++1TN$U(=FA+` z)2I1W#$hgrlW@{z^qxO`@?<98P~h{k{dHjJ<6LA@;IGNMdqbUcTIZg<;xS`)yPoXT zqDp?o%Fq|5#V?hjD#8^M(ASn0=cU2WX@g;v&UT%?+U{7VhS*YM!@}=JJDoST$`(x- zv3hrJqx5iGq+3zy!nA~#SQPj8h^yrJSu!~Ti@TAwWiz|%n#~^-k+H+RmNs-7iq5Ca zwrcC?C0K|hq+&_d@!XM^be`h_t{n{RDY5=laFuMN>vVs7M1-Qp{Ac5t&~bM`k`srqd)l-8`Z1!oN$+3@o8X7u z-VGfcDa@jU6OZwB8f%G7mkpn)3Iurdjqq;BJ8)_zFGLkLmM^9fATTr(8?G*c8%fTg z9ts;1@~u*!7shCWhe;dh=ma_yJYW?(O)UC6GgGH=M4@wODQ^;)Ud4a`BkXQ1kSL-iWiB zny;QcduBwy7-^3r%2ic$^lI)#1x+G8@MPpU*RVh*?Ot%a@?C^LUd!WhG&woha=dlg)Z5$psWN}2byJEwmIe+)=k}V7wJ=0+k0tj| z9CgUj*W|O9yh`v9JEPPpZ36>g6-x~kQ>pR7=^1#4D72l8O#}Figh;&fzN}UAXM-f| zUs+jO*Dc9X!2}MA!dx?8iw~NXSx{$&+YJON&lZObD3$x1LLFN6CY1_w_jZ>a zpL{Tah)+dqs{*(t^p}GKu51PKv7s-AtIo^vG!Yb*?6K6k@C%26>NRjv0Y<6KHL1o5 zSU6ZsO7c@%xSiYV5o0O_mZYJf!LlM)+-;^N5E}eNqQUm8n;EJjlT$qmWt_+A?{C?idA<9XP4nxBrh(xl zVotkLh)7=VR`N)b28W^JxiZ0*#XWeL85rZ>3x~>@iX)v#aAd%k82I@3#OHCHZZg6o zBYpygqpD+!&Enj#gB;0AQw|VNgE**(0b9A2*Zr({I5rzEGbU*Wz8ic~H8IPY{MB1@ z7_Rr^iM|i&N*UYHoofv}Xyo9-=6H5dqa=OXypy8EbFaeOhWDz4NK^z)KH7HhSe5)j z5?=D_x^~b3Rx?C5Xy<=@DD*|h(6sDPO=4eQ zrf*wghnwDEc>N7ieCzVwm*VkFVuDDYE#?1PMj&*PYjqR4p}9Fu@%9=L#X1o32@jXB z*z9ZxA5QVpe#(-mchgX^>tGf4iHcG%+fb-3)V1c zmWP5C{&YoCTwEN?h~Dts@w6Q_*D}*;%1-ToZxpTSh{S}csi}IJ%Qobi?H;o}gr1wa zs7+jwXD40SD7;B8BYb=$B#6xxrjb7T@7ugxoJhXD)V%|;miM}-{s+qZuO2>pSS|k? zZHIi~$4Av(ONI@&;#l}}va++SCc54bvu#4gJs2TD#J-~)(Lqh$8_Cy8yL{K6iH!N{ z=5b^+KZbSdF4Lv330VyiiS+fBt=*58k@p33f60`d3-bBhhtRCW@F~MTSGbnBo;_@x zw1y;}l^A+v;n>Qno4JH33j`#jq`-Nd`L~<%bbFOA&(Dur-@3gbbu-#9wsZAR^p*VZ zOBoQY;d|z!pRJfCWt&@$bP`7js((Gf{!i}@?(~QaeBY+<+XS%+3!|o27bO1u%>Vju z%eiAj2UDSaX|kz^$Pa~f-l-|Z`_>8r4mt1dhFULNaS?X+c2fC^aI60a@FKZXn! zWQta+9xI`3CJK3iCk<1VA&DWzftf?3zWk{WxpWmvTvXKi@d)nCRnzq~H4&Rr2h@K0 zb(rj91*k$w2S(e-XxVPm#cDLoP4 zGnotfyP>{5DkdhT-oB*&ytZ~U9={2u{SbQeXup`r+tuPnj>KktB1@T#$U>ZC(VmZ< zk`-+bXc~BuJY5fQLqnoYY<~cYLwUa|F+;8R%U=Bf%PI+EdS1j5iNy0Ey2AYA)l0%k$FSj`H zj!JDRN%7fnWOm5mwf8!dXWKiP^Zg??UpNshi$oYwjf(Wx*x2NK2D`w5Ha1>x(lI-_ z!W!(!WG=13&1Sekx=60D!)e)-(6Vvh%!eW6s!==A-b<<8bblm2AvJa2X7cvxZ-#UE zn1oz{5lut|sMn_A$U?MHbA>vHKpi1fu?E0``d(1Jn^FxR&!+h5F~(<6(^U$_;M0mb z-@$8=Nyjiu2RbG3k*{98s-N!Y=3Qw$?>KQ>m-z8O%jR;x6{7L0&f05M9#OEt>%m)8 zSC@_@{IT*HzW_}Kz#K-p$X5RTIp(>YzuxNFwJZPCt?KLF|9**;nAY;GYHKxro9(-X zrE6~*o--7kIKT769P*QLfFyf$4xh}9czWEr4H%C*-Jy%}h7wm5O1pn4^ zCr>gJBp!PCw}XRk-n!HBeAxk6#D|A$|8l`n*>2Y`fXjY&Q@_A~ z%MP`?{Pb_*jV>K$*eSzBSWi-q!LBs4Zx6|8@CT>>WF!=H=65^%mhph^#BsBKd^0k1 z$isfZ9YvX(H{Y7kJk+l7`r`TX`~-Va9QBOwp1xbHPsG&eDc12m#3tG~ACVP-}e8YifR_T;KG3 zz;$?VaP!D~$I8=i2gl%2r*>q>c&X3W5%8cnxhJ>3=BHdpG1%LLrtzHa`ssO3gNAVUuOG1aSU$hAe16JB?L^`}OUfS7m<|d3;Xwxf&5!3@FH^w`1yNe6IUvKpAB zYXhGV%Xe#nUi1<-PHGV_?tfAd9y>-^kc$PY%y=CVfc(B^`ZI5tMdRR)2Jd0#F@uVr z!?8`1V1+kAz~+A*lF^3`H$YCHSW!_C-}>a~Q_K3eb9xz9)vUVzQUOc2Z~uO609#-y zY7vBlgaAxH?Ck72G0R$h$UtBZ#rl{G*TEuWd7BhPd)?-nvJ&p|LTB`2O-i-wgn6ReAZ=?8xHr%hDeMT$hjLP@50BFpzCEYAo8f`@s)y|VeMiyo>Wr7SP zG&D3JJ{~^lh4@#rlT!jj;tge0Qa0`R+5q50bgbS=WV)oUuTQk&@OK1^a1gvxBjDZ% z;QL0WfAUN8)v^}VFO9qlzka=pARk!0MJ69G><}QlRJ^U}i+N|e(z+ijVU{%hX2E!x zgI7kl&#H|WzVvE%Py;%TBOawUm+?EF_|zd z-%3l1_4iBs%dH>)``w5-G4QT-+QNW(jLD_M(UsPfQT^Q&Q9T$i zK7clolFs<8i_H{V8Lp2q%C8&cV?^zwT&GvCl@@mArvTvn`$*s-Z>MD;=SeTvw`%1j zej$Oru=**VfD`$wIKPIjoT51&q&&djgl-+xN={Vbol?MO`Z=7^mR+{n^feq_0Q&(G_ z1G-j4(6aiaA_DD+t&UXn}GM>(u52OP#X_jgl0ezcLWEYZMAEf>ie8^qwh2z zapTiHx3j(f3Ocio(}ybe=;*6|-2DE(Y;HNxNnU3~Q5XaXxW~eqf}j)TVXpo1cDzw` zFL&CcvT**i)VJydlqsFK_2h6*43hQoTMkx7$opQQ=cOb*71Ak38<=%f-nv_{5W>)^>QH~1#R%#wXv&#hx)$cr0aR6)@sZ-stM@TwHxyVxp zx)pynl3)A$`MAtXgmi!%s==L_lmt+j4)_rwOC5mtkQJW?Mi2j~{fZNM*BM;ZRBbFVU?s7^vBKytR`b?ik zQVv!~*F#x%$h-dWlHYQ6w1o;uK_qOQi#-J2BX)s98!oxJ=IQC_Y5MM(nBH(nP0a_O z>=&~r{SuFN8{XPc!Uo9S=lh1C4gyz0PN4ZcK{Yfq)XTmW6r&y*JgcfLvg{GfL%{Pv zB4lnJYh`7n`IFOuj4*4`L0vz`R$E0yC60I1wqd511FqDWScPO`+OQ5JDN9l1<>fT0 z3(tuSsH0=uOESG7M{Vu(8p;-!v`o z)26jx6^<}M$wNYgY}rcrg=;|iMK1*@K)!Ok)h=Zup}06~ZT8h)lBfERqjUR#&c1Ul zE_E5@+Oq5r)OUs%%H3aiXz#djOuV`LoA3G3!+H3ZsG9MB1{{TZ_tyLt1pj8R-`aD1 z=&^^--S@8g3IXl17GUKVe@i)JtF%l_cmIpCS-veqGXK$SpFVr`yNUnROT1AXUB=CR zSV^_luTfuE3qNN6c;TDbWd3G0k&qK~6Mh4(Q?i!EO^LIGub!UCKb;?z@1G(|GtD?| zoZ?CvdHcM{tIFTU+qz|&k^scVB*PRZC&#^k&6HR&`LK;4^uci*hws z1}V(GT>LtJdvUevsE)@3X7vqL85+;6am0`l>=6e42TH~MOhRU>p%t+)MXi-cw$(4t zb8;Lk3gvdQGmq_Pt?_fij8v+VlpQUVlxoL!t@_A__*Q_M0EGwesrTVY7Q}`=6Sa{J zNRh}Kt*7pn$#k0Dl-!wqe21P>yk)D1xQ(@r{=!Y#Ug59GtC*R^UcWy2VVr(=C%d+Z z;)%3d_ct)VdKkU5z)RTB=}}-T_Ucw#WL(c*obv<)Ccpcyw(QL(lrZz*`S6jq1{df! zdbWSBq~?43UP=GRF^_S15vhlg1sv1u1kIc@{0OZFv*bKHO$yzulrmri2n?qJu8yjFI zJc`j=md7m91bjG?RLtzvIN6#Me742;P4cl{L)EUn%peIc)_$WxT_rC26U0d{+rvP| zHUZgy<-9ls**;JY6Eu4^#S_c8wdM3|K`VJQ{S+-?j62!lni|H~UO1sg=(J;PZiKXD z+gWaXdEHf;-i2r1sue&Ck=kpxM=p-$wkM#-`PrVW>uuU{YrMAfg^h8voLYvM`^`hj zk_%=+hEMI+ZH&v0@ernRDFsVhy)S>g{y-?U{<_+BHXX&8iO!%Y>@v{(=IaRK^*6Ve z;^{!>(~N;_mn1s@1LQ4Q9B1ECHh&_;EtO*|(5d5mhbp-L?vyIGKki)>uA&c|7=mE0WOH004S7m_XLWY@p# z(93WUIozX^=LavadTpF>y@Q&%xRYm9u5G(?i5Cjx^`Rh{MD5`1Y+U@Xqu#HLraFPF zuSTfUxd9C!t#b-tGMh>xIPNobH5Jch6&2GH!GoD_s*C~Jn~;>$`#n|B&u+ce+7j<& ztQW~!lC4e(E#SZj`Uur*G*_!%i|0&H=64Q@`Fman%%!|NzA|IEnK?2tGFdPbwm<*0 z`1<~VUiy^>8oOHy;AkNt2;h|gTA*MgQH*hihlT}Hlbf84lQfD(iIb*YgWehV#=We* z%><=Yl7HsXH+^I)jaChum2L!T1PCHMgo|a_T`mW<76KDU5fjEm(^$opjgOB+YH@oK zUHA1dyOIJ!f5(}RiqyAW_*wzvhQ64fw2S&zRUx8dTo*RXJSN?1en0Qo(Y#~ntW8AKQb0@ZfLqRCWFF}XNeWD&0fsdWCyn^9~TI=V8#P?f7sU6FXhMc8VOP(1N zvL=%_E(q21SsfKl&{uHJOp(pwi*=eEV=1;nxQTeR$G}Z&y)Wb`%hClB45c*PFBIN{ zok-}NU7Tr2%vQ}=EY!10X~i~-&+Jwer|j0!mUk@e=8O@YD@F(`fM44JCK2Lmhs3oB z%r+^(pfd^i`N3^LS^9!uS|;D);4JK!2z;&OY;&FL;t z#_;K;M??C&au>j!w2enFlrpS;FVfZh&T9lTb+y1>2RIc1z5`iIjD%f>yK?au!vBBL zz-cPXyPRYgv7EN@pZz7~)Qt$W|O|0+m`3dW1|L4bx9_ER3*(K&YyUc-JY z9s^C34Z<$x_Xk@eI25gGtwJ?O$PkedcY3O896uil)8uRg-d&FZSurkj06@&%8j z0gpyt=Kbnz5YffeK5HjO$H7z&T4M2HpJ*%|a;#8?N-0>rb7j7eg;{#ngDFud@Q57h zXaIp4V0{KnY6}9sKvi*>m-tfJ_M34BMnRTt0g<+4iQ%&3L9hv}MaU=QmJnDdFAn+8 zj`sJu8l`^AP2jyO_xT&#n9;OlBrIuBR%(mf48g4elV+46_%v{)sqiztVSNw&@qnvk zPSsPOu4K_AG1l7JPhZto7$Fo+d}5+dvYPzzw;36SlYKfJHWYxR@%m%?D^-y^ev+=M z>-;WSFhGH@Zkz`%gJa#Y`xK(cET22*Fi_b5Do8i()cE+C(wnRHs36Q05d8Nx?f^{a zMZOwns1U6lNFSou@&4z5_5w&B;jD^^^FkolbErdX@PrvbdJ z26|G9u<43AB8YTwaL@|@N)J*b6_p1dyb`RMUe2f1V#4&C_K=&)qu7(%X>GVzpS|^D z!A+WuBWqNYdV61N`rwL4`i32Pmx0yUYXPDTsHRt@PHTC|0e=!#5+{Jyu_LD2QFMT$gn=Nf#0kYHCnZB0R4mF#F~ zi3eA6gXzG#W^iBtwQz$y{3+jqpAMT>)`MC_6$G99Bo}MzLkcueYxW|`Hb!hSX&v!pvz_hu&zE;%(E{utZX*=PoqzaJ@p!ZzPD{<)3S2&Wt}tR&Xn`7J~wRn zaIl?TV;Sr^bH9G!9r3F1w^SC1_+A4Tj_>(s&sFh4Dx+y(fifPRFYWx7fm4ea0uL2a z8&a(x;YNhFAX{p(HdPh@As@o*5-xNB$;WIR@{F6CT6)S1NmCG`evC@v;8;ZhEq>1TojS}mE{%)&9GT2&us zrsTC``TnRJnzlE8T+=x^8CCysF++>vMYk+~;TsuWv-KfUY}@^*ZbqLvLY_Onyg2blnY6IFhxo7KlEA#jW8w_l?FEs=ef+ z5J@K@RO_i*$G~NqGBQ*_-cN1t_o33J1hTT6Rs3IVQR0=odPm;%^j-BJm(?D_OrE#9 zMi>!EbQW&z9q+8(qDn1N7SV~N4Bgvddx>FPZ{BPrxT}^|$oPl|_>!A%>meFDn5gqWKW+c2Iz zL>AYdAqMZKPPbOX$6oh~QoPF~ZD$oEB5j*_^$~Bv$6eIJ;~f*<(mkNay;GS>!ILK& zml%O$?9<#5dZZCB>f0XDUI)Hs+L&!OPwZ&z26S8s|Y14V78V3^A*~XTwk(tRz_U|{)A>SDb%W^ z#&5O?BO_CvUUb~%K?)Y<1IrnF!2PAbCZR?!+g8JaZ-Nh5y*~ZWbYW2!RodOXp!&v# zSHTF;bwS-qET|p8zANzrY5CIh3sX*hxb376wzqCt->ux`wWt?$ao%4=$68`f{e{jK z`I%u5_R9xP-csW&u`Qbr_%gm`sU^#f#`XpwNeu^M?md$KcUXcv=rBl-sb3 zE&th>wTJf&bJWD`qKR8w_weD=j7q1_<(w?i1U88_zP@?)CRIxW_3}t3hiCig)2GG2 zdzX0GA5EE`ynJ%Q+BvyL?MCz7ox9hioNU1P?mWPyv=7e~mwO=LZJgMZqka#Wna>;O zzI358a-ZYUQn5dCeC>O&Oc#p6o(~7%mAbAa+Of1!?`7pkPul74JmjrC2bfC@BusSE zHavV-hd0Z|d~pmPx{_;VJg$GZeZ4+!@T4{GdzVAFPdDh9w^s(#I{L7D^gMbl>~gx3<&AIPCuvx*zv?=PO;s^J%v}^4KYz-{uqd z+W17;phCwH03q#7ZmyZ}rcTxG%X@Fn1u`-qtiB5!5S`=IH@8}17nUy9ri@qtrxGk_ z&j+G{U%>6dkH15PqUoV4>0uDk0Ghn(?*g;_2V;TF-S%w1FunG&OWQn3_4M_{T_!F; z@kIrkA{6SS-8Ui?LKmS6NsmYJaV&eTuf+2akQv#{MWO#f>8_}HTUP4;$n(3R$YpOX z(8uZ)&WA|MKM6cQ1@x@jm2Wwn*ZXv(c06MBrj>646nuww-%91srWFqim%hC@D$suj zcdG(uBq@B)i0-X>|4^9OJG0i;(sGP*8J`Ps5zYy-Pt&~i{X$tFk2so={wp>9)58fo zVJ-zt2=g<)<9gVQXXaG;`rd z@R*_6ckPNHDK8uU-!@b{f~+wCTkL-ZbD!@Tia?^ zl8hi33yag^|MM&(sq%l5p2di^elEM!wyucMQP9*{-_U@#E%l-zQ2Zj*Bg?gd@X8q3 z^spg=u&Ahxs;cVn*7R^4RG^4qIor{m4VS8x5d|X#gY+xwbx?=D04$)Y`b@Zc!=-yY za;<;sDYF0!fuwUA28c#*imU9Iu0y=GZ|`0xIhy}E9ZoMyDW(n=bzFUWXMH@~_S&_2 zM2Z#aHXM(XN6gO8Gcz-5>*_X-xKNHvh4J})*^xw{fu&^w){u80;QPQZ@Q9Z zCil(FCs!|X%^^abuD1i5f7B8z&vD2lM9$;lXY%Vx3Yv` zAx|V^1zB_|;s9|heJ|n2g4ew3E*HL1T#Cq)x&onyN2$rsITS}5F}5!CWWZ9N1JHPa z&(Ey@N*rs)IxX^@#ij6(!nT2WC=~k+q%Q!}5Hz9dia+iCq0V$xy-3(&2`XKr9)@VA zQ)6*HY~T~Ry*_P)X2x}eSP<%t2|So<3Ez<}2u!lGFTh0n5HbFc3WC1F@*e?*rOjfP zRx{X&LDw*_tyb;V-)%%dsVp3gDvxdZ?{{@>az^(5Xg3QoU>!6;(gH#?t(^V)_SN=& z3P5Um0r-f*6MF2mFI+%`jbISsdvF!;0Bk{Ju3p{^v*G5OUo625fZ$x+`kQtL{6tMY zdISLM?~?$5pJAd-s(OVWFX&XKGeO1@1Q=}{Oa%xyB{dKYj|6Z!O;QMgV-l$2%TP$l zZgqr8lrgddAjNJ_*ADfj8c@DvqzNb{l{_MjTb#qnuQ~8jk-_mUC^A^aNUL)hn=_CF z2m#+GeDN(?=^qXNGS~X=QH2<;+vT4^g35XtCJpKCi0_6@W zKU_{T`lbO2i~o1H^i!gcCzcFU$bORlbyL$ws1}X}cIFgR4p)PNqRzPfu)8u<06aG| zrXl6KnI{O*GPwE%A4zUJQw;C>awe#_-9{pQmVWzMpI;9n|3el6=j zJn4lCzYCw3a4e{n|3@=9(7i2*UG1%UxQasSZ@g=0FipZEV$>A4OTNPD~T@ z9B0Zw?M5sYL4jkZ@`gJP8CBMu(h{0tFpJR7HRz`jD*X0GUO8EFK-@Z5PO(4pD97hu zmaH#&4-ONoi0`(YX%D`tlAGy%34g4^bxP|-$49zS>P+zqY{^U2h~vPZUG z{{B+mkoCx5+$IJ!9$+J$4~Qxq zx$0c^L!{eO5W@@-x>2+Z4MXKm5H2e~}l!n+JmJXUJH4F&Rg_j?Qnl%cS2aL~d@ z=eNOuzhhdCTj=kmj|h1xg1-}&kVr2`;P~loaNmx%sBQd=H$8#LX*O(N&OaEr`V|hG_ifnQLxD2Ca6EQ;(IKbUz?1k4FCFskUef#E@fQu8nC;7y1m9O?VqPw{%-lv zZXIcn`1uZp^C#KAU44B9_e+I>8T~UDOhF$%v_S~043kTQwYYol9ueQO_vc~vN85_X z*)6z=`!h^zaCCH3hB8%uVL_M_dLpDTAKt%j#+xt|c%~lRx%n)FbWrJ;WM^$>XQ!j7 z>8~Ct-Pg*kCh*GzVL_CXytcBkvTglZmDsJzP{_Dz*Dg-zj6>QccS1=?pYGeIz2bMC z+Aii#IX_S!7{ToDnl4XOQ1w@+Flya3F}`l&Mr|jj?6+@S%;+UQZ-(vdE^9-O33b00 z==p6OS1juHA8t3N7#2JU3X1>uQTanznH31)vv<3e-(}_y!!5c%vIS6It9=sGhBZM| z`l}~T{ycf=6vFgFWk4;I4cjPXdU^`H zWC3aGlpSg40NTJhNFmOjKOY5EbVzfGU@3mliMdOTo2uS}Ahp1wgDZ84R3Xbzh$*V59`7K$P;aNT@w_WkMHpYqK< zKg$CUEQ5Ph(Iut>>!7Y0X-VUcyNW!xDeXS@8bo`@?$wG_w}irZ z>cl%1aoGN?_Og36uUl5AW?4N&s#O41I5R&#Pke21n`BW;2$>}BMnrf0RpIm;3Z&-R zKmzkyf-13nU^gG%li=WlaF3;Vt5caVsqN==bgW@D9^St{Xs>_j)V=>k-k7kiJ+l6QlXSTKsXQ)5dkUvn0;|x-o?qu$!D6bpP~O(ro(_bsl7l4$ox!h z0_!Fo*|=xV*n!4KTQC6oH#^pz2tBd1I7AH2T3Ez=xHHSo&p+cL;vWSa!K?^)?q^%O zx2`G0@r#J4+uPgIT}q$>(Oy2jWAIiTu-5!4NNkass&nJUR0H1fq;Y|3kI#|J8-3+t zJs=YR8I2aA&%JuJzu}R^Nn?!PlJ0znJl2i!&|djI-Du}J7M3Ph4d{B%Kr;^j9|6H; zCUlkfJTwXgHX8EtQAiOIhGH8X71fOI-444ru$aEIz%u}SO~kH#*#>H}FFEhR`Qdbu z(-k-z7w0BlVnv2EzZ+k3&i6ImrAfLuF#YCbiZiA?8Dg z7aIlRJOQSzz3D{R`}cnxI#hc@x&ZuvNCEo|q-YJS+jy#wZ zzyt~y7r*Ti2Zx*hgEKKPDX*w#04C`v80Fz%tEJhkZisP$`YAXch7#)vQmpqr z(`rxp+8;p+<2pAO&=33&I)42nFK-0>3+2XoB=OKBF!f@_)xC!fX~xILQ=u-Pz^=0h zlLJlynl&_HQvi&^_rw(!8x=T@UobL4lKT*My~f6&d-h_lAJn^q0dr zJrSr*m#;?Sa{q9cbQ$e5(gWTfW2|!#taZP!$_k&q13`xj~@L25p{}2 zxPq*#!NG$EArXV4_t4k3sq4+{)Ux9Yo!g?LBT zFCrZ`xBLzdIu?ArA0P^luC${NOyzMof3iwl7bSF3M$l?9zQ?w@r)rBp;OnCPqNNBWKZYOPv=ddaleaXobJVpl zM40Q^S(#bdnLXB{buhHCeQasL#m33T$wK?c&d!RDo&De6vsv00vlAGRks}ba2nmtf z3Qn=hqs|%%TSr3c{coP44a%J&B#@K9l;fzAYP;Tjoi*~ldA>Vqgy}F({^+6>^O6$V zpm~1G=+`_J?!s1^CV_C#+oB30Xe2L14W8?^?;Tbf>KO=LyKdFqS{@%F9wHt)IF``Q zJ6%2>yZMIxs(+%VAAF5Gmsvqa{U^ma7mb0Aj?RY54DkZ>i?bBM@GTur?dg~BZDU)` z{||m0TUBg5O+Z96^>Y5_LQ)8=1kv`&aDU4DEZ$8ge5rmg}Agfr{Ra6_V!!YE5DT5FDDixa z0@y!4${!gXrU(xYU;pvLEU$m8;PH8*wrHOQYS**w?(WE<)NpenqbuLPe_!uYx11bv zT{GYAuY5_$MQ1|-QNUK~L-TyN$g%<6=*GsS!B}F^r(KUk0!UdWW@kS*@Jcri533jU zD=`muC(B|Wyu7>=oDZK{ZLNf)q;3#!gnBC~DNWUpX^V8k3lI_zAcTa3G&~Nsb~ihO z%vT*Js?hF*J=@!y*RS@%Tz*&N#3XrQ^9+}%4&;rMQ?@|e14#0Iyf zwsvxM)@NWqrF?HLX3GZQ$*TTI2d?i74vrUWF46h(xmJ^R5Zv>M?%RXL9B-uh?(P;! zkNeCc+_!p_Uov3da#&8;xk&S59Ss6uvep+3;kvzC?Y1-O8x?iI$<>vfpPw>B@F&y# z`}Z?KoL5Wc*S|`#qaompGqbfHJka4$M>IA!*DQ1;5fT%hqM@PDa9t_f-RjpiU(I(| zQG-QGaQ}&&{rTs$p-<+p8EQI>FE8D6SRN3yrQ)%me147!2XTQ@?-aZ{gy&gA@m#cN z#<9nweyQCzy~DD1!#_T|?QHG*;~;7(uJWyW-YfZ#Wo|6nft|9FB_}<{r%Tr z-*iTbt)p&Q)4_$Ve@jl#Tc4;N$T!BHnwm0OJ>Hv7NXpA2dUZwGt4^gX9j?@0oW_kf zk469Z=y0Iap81M&w83WSc}5wLrM^r~(>@kT!6z4N=i7ZkLP83w)5GKAgZI}eY2Zz3 zzPvb3NJHb%wV}Pb*~e`;#xIp1NR?f&J#?9(szY%9?CsmPo#x&cPfSmH+b{L$&b7Wt zNlB?%%C5Ai7*A77=P>DEDlqLoCo8zeP*YQr<8kbs0l^X?baeBh)Eh7PWEtI-2-e+$ z%?>nSr+XXQ!StbsFFzw5zZhFPI(RmmD5(~@O zZ{NP{?oNkTLxh_R6<`ZXPrr_f3o>kp&^M(3z!q%fQf3w>M4E38Itx4PmIH zoL@@y71{U^&74sUKfqK50^?s7+z*$llj)4onSu*n-SiZsm@FqAgZ!oUi+zksOx=crR=i5k0zWrb{w<$5=eh9UI*{ABFF&9a+x3Gx4u9>AKG?g~9 z_hV)u)5}5ls;wRAHv|_#)-&sFYHGS~WtC&|eMTZ?J|iakB#ZK&lA6(>rl#&|!pLJu zDlWd@pZM0eBOZ>CY{mXKX^7k$T(4A_VJJ4Gds4hZf` zNv zKFw4wQj2ukUnzP(jOjRPA36H-8Z)z;ii)3UR>^6^a@KK6uFICm*$W&9h_p{^xM$CP zsl^ux^v1$h!x!pzfgLfslA~c3V}cVo^idXGFz%M)RF^nSW>MtE{r-@ALy`ALq^F4V zM7OMv=#wY><>lp_X^PAR7NZv{54S1cW|u~bG|L>29#~jd7>M?G0n>bbCZ-fEuJe?X zX10~7^2v03d>7&U#2|^Uuh-)1kCxgyn3j+92fcVvGVmaK*V$ulmaP2XdyCWFY$T`s zVo!_vQBRM;h{LcsthTVQFhZ@!BFLA<{Q@FmuMg=3&o_OrLdNxE*J^inSLfT;4}FG` zk!*F`{Tkj23y+raxQyF%zRxtB!NY?*gbzl~#1%f!ok4*Qy z*T@J7pZ@q^$D;lDBnv>z3v9`8$-~~=}$YS0sEjL|$Fdh9|Vh^Oc)i>v! zBo!M#~Zprg2JW%2jKkGojv4 z1mk+X!Bz~EJ9B+9A3hx?V{E%y^!6=6{gYX>x@Corae`Z4DZhZgp})3za`&Zc z*Kn}#sWTkaR}}dcSD%{UR99C!7~5zUE3eq}Y6_7}OOX@uQmmpe*l<*6;Y0S=W`NxM z&A;Q=#4j;1(WQ`qfdP?H((K(=U^z~W3PlLLfJ=9u4;(1?3lzGVFB+L%f3pseJUEgq zn;-~~xTtqK%*@czL zTG*qbrw@&d4TK7H!P(gvS)^I)j+k%sYaFz~rw#SSqYD;`nH% zva8Uv|BBElr?rZm8iVVTu;}61^k^i=Psd^zhBf8aDkV$`yM9Gh?VzNj{OSO zu}@anD&MJ7rzT*x>w8k=QQMnve2_dwatoGxW73yK@5e&dz5DlTo?)LSASHbcA!uN1 ztPh19;R)FDqHvq++p`yMoPqM?IOd$3qhx7mIpV&*qEha}*&cVx7ZaCsU}Ovq9s?ue zG(h1hs1AhiI^3px0|H+(_lG(Jb|~uu$uVSQWdkXBzpmmxs9dkXRrzE_jtV3wPM2tD z5s=pJXlcdUvq*n?=Sy94u(!!+xA1^}y$YkTrKOfJ;fYsq@l6F~W$iB8_5>lCer*qd zu&}W8M*0w1$RXmvl+{1CmU3<9Wf1M*grWJ9)WURFF#vc_7#VVq?$00JR4UyCP*vY< zZLFe6+ZFD&b7@N@M+Xg{$}_-VI6Gsm>`Iwxy))UP7XX>=0xk-Tj`oLSjxo1 zBW6M<5)u-I#^_?)6A=-aF&aP+!S^NPkn!Qm;yK>`{cA1k_k;$jVp>|%2Zx6gPo(9b z?|kT+$i&3XE)MmP+&2(n^O6qSQ7Qky!5p=4tZa>owQk}92fc;{=Ej&?;qj*ua?}7kT33BZfS{j`dy4?HhB6Hw>%`Zci%3u&Qb7v=wY0HT!ObLu}! z@cU&~bpFT2YT;xN7T8^VGek4+V|)8KgG5?iBKJSnfLg?Y*Fh12mAy3ro@@YKE}T8x zJFcMi<$vE8S@odeU)yxDnxFnZIEU%Jd{5bU{ukt2Mmi9vH$9H{`Ro^;L0NpKoUIkP z<&@b*m#4x4C)FhKoy31HCoHHBE@$GCD+0RG0QbY6)!p6a0l?oD6FWWTwxf9p3oBi7 zh(wMUQ&CaTYT8}P?|<%ZkvU@AWr&wPE8fEH^&J{r@ zwG?6quJ<$+}7K%;MoucM$q>>m)2$rzeeg2e&Lv$3&(WE?21DXOn`Kd(gmK?~U~ctt#u zQ8fZpEG$M#Rr9U4IeAS46qS|fxw%QOv9XcbX|c{y!D_N(M$%Sh;c280=|F7?4 zWs8iuWA(4`YWo;h$~OGOFh(G}`F8cXzEF&~5cv%wJ-fQPiobn~LACE{k*pfU1eWC+ zr)A>#omD%kRZ-b&=+mv_3-Xm?0p$`)QI3%mHDJ;+F*RliRgO9k-K%szv|69I z9b@Qwj_Qt`UBP`4?qJBw%*@lv*KYkMdvQA=^D-uqf(DD1#)m_dO+@yU1b21678X}p zx-RWauC|&}R6$K9LNQ80$9uA<-d@||kX!I*S6?cgUv=ecmLDMM=H{%{++b@ zVh4%ixF(MejmgF|Y!RfY)qd3Gxi%&qb^&M1zV|zjTHL~Z@3=Om^7#p`QG4u*el1r# zPEJmzaHJH??s6DI_3&`axYhE1 z%1htt!Pu)jlT9D4?rvJO$yNu-_*X5_msR%%mGkcN$*tkC$#yL{GPkp^>P|#{>5zZm48=Md{WqycW|RbNU!#Q>OrdO zM71>}ADfFY=i;hgNyZV({{zuT=pvV`kU+#V5v>u>&({(J+f)GuGyU-&kMkuWsE~+sv5c0$Sx~RK(yw=?Oh$@W!PYpXE+5y{_K1*wU9G>7s0hagVm8u3 z5;HA2&p8_-aPO8J4#KI}<7|9I3GP{He)-H)D^e?ZD^iG7x#oRC*wc*J{Mw= zgJwrqjwtzsXMPvN+|ofx){LqJ{35-LZAwIyKE^G*rlfvax)h&g#ZvtElTzPiBx*vf zbzm)YEsf>TH`A)h)y1bvw|vtyBZhy0Jn8mFpL8a*KkTkNLPOBlyx2%h!oTZv!~{AE)GFeYKa3)R7B5To_i|DXGehYy5&7 zr@uW;wMYn3`gH_+ffX4)3J*|LTK4T?q3;>Tr3sY0Gaj6&ldICh7P9yY9tAnHD+G~6 zCQmoK=0{X{4iJ>{APFzIYh6I3D)Rb2q-6akGD^5a<<`fk8a9T~&MGLjoWcK5Ke8dQFse0tUSM~xO|^>c zZiM0Tcx3`t$KSchvH4@`qx#pD+f#KkJMSQH-?F{ibsj?KZqCaL-o&6rcOdjnta^0A z#vz>QU&u7aHuye)sSch-o4ml!Z%1S_GJ=V}6_U$4<9f-lsb2MDVG>&snQ)Hsw=Nu& z{6wU&T&MF=(-|9;SQuneS^1js^rQ{wx*Q(QGd;K_ztM6ekMO)?ts7-4j(_T>Qh_A++&GWJG@Y-&d{NX0*1EAN8|MEB2;3wrs_-d0+cFIIX@6FYx!Y8Wr=cm42Nx zi(9Y0J^Z&Gwf5#g3Qah4CbrDt^)ykvT^1o2<>5$FDt?-Kw?u=L!ngrBA`0LaK*xGy z-^yP}T)BD9Oq7KBs6*RndR|JQ1?TzYg6g!#huJB67#hqJ%2fbmpIKgmUgFRBv-L4M z;!_HIJiaWdl}dOTpGxZ@wXf@bfobO@ZC8y;6w1{n8X5wDs*loxPCNZ~a~g5Ov{rQYCCOWTQ*BgYFDG@04} zMQ}1WoxYbXAe!)`jp5M^C3%t~h`Z_D;gyJpt35bbjOTmlnisjbsL_uSc*dWc2Z`+k zWBh|MVkl6)w-k-Oj3$^hjBIhYjYH+$wYCvLdRX}9d!j8572*P2NrsM&ZdHrNGjX^x zUYXJG*qk)9PVcE!wsP57>A!B7)hK_XlbpZg24)?J;yU-jMn@NRu>fb{;8N#(q}u+0 zlK1c$8b7q!AkI)f`QF5M=G-}@vRLMiOgKP7SGj){{CDrW)sVfU*WnW+{NBf7r=WDDw@oaQ)BL90X<1fj zQHc+z>byIj;&gUxOzHed-=HPC+794uWJCic!T`1DT~xGVC}6a`JQ&4os#G!i`0?ZQ z&GbDn5+Wimpj2n(<`Q#qI$aXh*R2Z7he-zq2m9&?5s67jgzy01k}d>^EqwvXW)AE^z}m%5<&n3EZG#+zmAIXOG~?EU|^sUso508XSd*4Q*$TAEY=N$w1G23 z5v8z7M!#0&`o*A|b_*}<`Lj5#h-W324i+~@Q5ME88g-B8UvI&QpvtJpb2COmP3^FwcujGK?@YfJ>*v>Cns@If{IaXV++O;_sf$d7cF zi--~HRfQ`X%bn>Hv+qoMP%qJH#PzkiJ999lBpwt$0!qr|zE{E{pU_y?*^BlZ&=4r? z4n-A0*ZmddgB-bjYaL?61`0VGn0p(Y1G*v5N zrXvpcZv{@2_fxcy1%l?e&CzV>M2|O240@u!WAX29=~6B`&m)mT%ae z{ygMPiRfhJyI|YE_|E(RZa6c&aqS7pJq285yt3|Jz?|Cj*H+BE;dC0@Rm=4-g|`+|7gk?-FoHhT$jvgW${L?oLX6_285Au@nM z&=cP*YyJxg*%}~yWh3=xyjWH8n90IUNtJPu9=jFe*PyelEh?(ju865SodZTLw4PL{yD>+%3(cL+W zv(2V?Oon=Q2_ne6|9y8;DI^w7Gsj+~>r1zf=zQ2tHtUK}c6S*U?6X3-U`-}jCd4LK zAb?C*MCEeql-aaYb7?-Bkn5sp?doU{Wqs?szw%XsRH%{{`->ryzgKxVe|uKmUweKr zx{X(!J6m~-Rl1t~RskB@-~lRDP>3SJ5SRhm{{Wi4muB1Z;d)=ng*N2x zQ~%(@=CWAz62<=^#w?U=lv1#Vdg=-E?Fl3ntyaDjJiVnzo{|A*sa3|uB&8yUo?{(KsO#qn3LJOj$(zn9GxAwBehNNUf8 zJ(w)P*H@;-w#csf?xFt36>$rmp&Heypp!tc1BcE;KxV^wNADXBbz+fIg;fV-uU__` z5Y@=N%LG^XSP1C0UAzh&CEnZWOWT6l(q#YDtf2 z8=E%fG+Q+~h_=Q#U2!hoBA`KhM=MB}Mz*j;0TZ=HCwA=B8HwmHdf9>+{ZXAzsCTS@whvFyYaPdd3{rdOCQ-Ks^8e0FBdR1+6$I22>|u5sH2`5^(<75 zNI_%C^D0@&=h)eSW}1Lv(HC>)#}`H2qZ}0$lSpwp)ssV``BC0bOiDYCY&e7%15v;2 zUz1)`m*jIh+s04?4;35>c6`8={T-c-ZAP`#-A;?c3dG6^)%|Z9gv2iE7(9H|<~G6| z(^73#;PaYYt6iGZtduE7mAyynd^wld(32%#0q`UFXDOTFJ5Ogtc`sicirQEw!rRP z4aJg3Wv}0qbRwTrFKMfm8$#Pm;51@^kmT9k^eckiui~RMew2Qw0O>lWWX`C9E<$I# z={lk+so%be>n>mIL^apP#=_q{(SK>|R>geP&}d)Clnrpoz1$#GYYK3rNUC_p*8=K4 z1(*DP0ROBAX~S^L?98DMONTjhMA&fA7p&>?gwz9x)g7}YgExutyGTDxveoG2(q6vo zQ+dboPZnmi3ufd9ySpVW(}^eVJ0fQ?w_ zPbC?ni1B;i0X;Iki6F{u*J!@n!Ccpqy)$)B7t@Yty+>Mka%1V%?d(@QY(Xii8xO65 z-hPm5>N%?V2rd+c7soGq>wXhFw)JNJ$x~fdswkjjUE=-tp;o(LFz+#&TCT(tis}1_NR7t?#sD@VDS9(s<~V-&0w*opAyT zxiDJB1HusD#fvCeZEB+0+awb5g7TyOT)nd~W#mivvCnU3*De(`OxL=)U0Kyy#3T!T>j!;!6Fy+R@$T$=U^Y@r*stk0dU-AT1KvH! z?xn&%lF-LOv-a09>pewR2Vxb^ATGQaAhis<@0p;ynK1jl(5yg2a`4f&O?6hd&)qv8 zK72rUMzB)tmpy2^9bpL9DI8(=mox+F!JthsLD7>O@|b*_$(7XFVMn_LSN-#fkB<=M zhDjj1sNck%@qmMWUY7qUC>!qpr8%tWUw;E|Im06|JRB`$@Pp>9OwBL`$UVS}mEKae z=c8hfFj@bOo7@r*fx$hV#io(fzXTk48%5Tq`sZwxtDJ-Bhxh9Zek0HG5As*~GrK`n zXxviRsaUy&Vx}MNk5`xk1eSOhOxNNKJ|Lz=_TU3@ql16K7*b0CFDGrgeEs@KIYx42 z#U!L@G#Bk40sJTWV3LsJ3laRSlDu4Cr z)xJ$;kWIn6C#R(qoRPt@P8dG&O9T!5!N(%R&Qbb!}hDz+`)h&==%zCtB)rPiDAUB8~iVa_|6Zo+R5FCch z#0NjRWe32lsi&v+Y$v}6iv#Qpb#--pwr6@qFrp$Np6~8D#hf?mJ$nAZM}X8nvFEqY z!_ad5p}c%oraI5>%Sv8cG!p?#9HWR6E{+ zaZEXiS6f>6_{X0<;Op0n!S%x*jw?p_q?i7O&B~o?7Ly}p z8FZ$kpF4ixo!m?~Wn2|`%udc_`3*<50x9&lcoZ~ZbL%Au+5^%%BJZUQ#kh=)UIr(0 zo5<|a5KwqIt}QpxcjD6kIIE(BXo&W>2D zPzMhtohJa!+{SRme|K+X4hZoVpu(`Q%s2-*aG-Q;pl2xK+U7F2kdd~owsz=1McXvb zg$#p+{SOwNnX&o{>xdibe`<8HZ9^N`8I&Q8=Xz%wC8Q+tEyTKMC3c(RQW3l=zyd4) zJkgpK8_rUPM(hu5BoT-mt?qN|V;8Y3We+b?ONazlp)gT&uk2OEjVXvIpsTBfF)V|= z1*;R)0NvA=iK_2m3}D!(dW;`)Irtud%wN})T@*moMFYHODaIT1s8=sv;R=gzlB5)&4ZU+2fmqpVwNeK_} zQB;>rlNGGeR6s<%ZR;G)>( zCeAF^EE-;(Un?_l+2J20-#(vLOM$Bkz@zCEm%xQCQjiF&hW{)EG)-P)gQURmoiI~Z z{k>4;x2E>#;#qI$0sCc^gL3}$_Ba6!M-4i3b*D7VM3Y}3d(tWN&;r@#WGjh%)GUOF zxMwj=v^Lg@A;-$w<=uEf6eqT9w;Z9j9th&q&9`wZT;>;e&qYn$iRMBjyUb{F<^R?T z>Xk~m0#%v0a?BjtI?C*DHtI*pTrXvrzikeGjQig(S=HGOrQ<&|za|r*a3{?3MZf)* zS3Gq!EqAjW9*ZbH17v(^3Xn01KlpdC(LgpCQwe{%0n}ns*nN@0jFRBB{JYsT8%g3L zd=!KhJg^w*PG<&0p$i4S*FWu?V}}tS5dJb2JaJrR1VZ0cbn*kC_4?*_&pJKpB{R1A z(W00DUF5H(A){ScQ6J9yMqQsiR7jR8+7m3Hgh~6BtApxA=nJ8n z>}4imAGtW+SGgoUlXk=UoDK-E4-^y>ES(_{19A2Cj%d3$Q09Dm(SJIh*0{V>4#nx& zgl%02T$%st z@eu@sJ?N-hag-Z*LGXNJvc6-xDnj^<2(3|wjBwEztv2t*(MMC1RZ!*AG|>q| zTdwg$a~|*1?k;yEr9_bIUi{(Ya?f~VoaZ973$;JJjd=7UgfRzBiiQl)tM!a=nrYXO{e~!mt z7N76@#-=*GD`IDLRNYb)R4!_#3ADhA-1mL_V?)3e<>JBz{OQ~NP4pG}6F<~k#^6k( z($uInzV6`ac?3e+n;J4ro%i8TZA&fZv{j#3G{*0%I^7u$0&gq0(&@zltH8XR?7%Bi zJ!X0JA>K(&kG&cbwyB7suTg!VynWlLGEM<{&gr@KGkEI`yGy+C$hIJqHoVelhH(Y~ zfR~AtRk7?W9bH>mMjndn$g!%`1sV|z(X^tjH3N>Lyo40TXcQeEF_pWbznN6LivO06$^t*7Z28zJYhJLkh7aZMRk>*EUv zTJQrPJ(leFP*!8_(|9%Dx|>{E%YJA`mi*z>JyQ>zZjkej*R?%Q)ay)Canc7}^vS0^ zCjV<`h#rl)X_O|n{pHMpy|i|^QhpcuiJPq8$6h9>^fKDa8Uh*5AOYAjtunRXmuo1~ z@RhdOi|kMHIvbxTm)?z~M6hyv`zdo_l&ePfJU^RnUtSjxYUv6@ZH4Mzfu9X1^uiV^ zKGFb80ImGheZSbef|o&wOasmZ7hWBgVI7YD#K1jj6nm3V`Vrq1D%N=I1qNP&N4)- z2tLKYob8)!;$+>JC|u!W_$-}IxpVlDQvd?D$kr3w3G=SGu@I1UeMy=o-33 zFi?(T{id+XNG|s>nBf5eJdj4{vjjyI%3ug>6%!MaFu3nIV5|cSqCA63goJ3^zK$Ve z2@TJ^jn_@Z&>SeQiWMI0#Fl?ap$#nIBcd<$rEhF_tM@@<#{UK9K2wWt88k~7Z`Mp%y2L-nX zyMK#FS1wkej3`*=D60}h17Kzf69MBwPT-M8*_M>GoIglt6@ix+)!)u_B;X@n8j{P& z$wl$mUyFBLErBbMq<9(evELN$gb7v-3+ghvEhkDM6QB9zMgF`sB<5feC0hE*v%azk z`Gd7sh$F8sQMQS&!&p3gV6=iD$LowK) z@Dai$Cf7jLvH~A2%GKMqe*{kMbIxs`lPr%_PyhfZVC(*72l5pMII)d(IH9Gw@~Y{l zBV-py&^HS6qZuT;KyYGdNf}m$6Lv4GXJVGDJ%Wf8%M->Jl3aIIk>gp=e}S=)rKQiX zkQ-^|Y#Y*~wW56eim2S;ZwV^QkbHl8~(DR zoe!$dbDPoi3(qDEdfm^T_1M8O5Jh9a&dYe4l5z(0n+LSjiA*VQmMG)@zH z{DJ)Pd|RJn&Wl}FS6V@SL=%SIEH>fh&&5FHUnXF`dz`56o31B_Zj3pRKB*g0fKNgwv=w3_hzU!FK_ zU(-8x?x37+aInFOrnYvTnmQO(5&YT1LpRZd?d)#A175y)gPpXoR(l)zDAb4o7|+^b zuClS61CJTx0VFq=)2j@wU%Gs`?&oc2&tcrh@B4Qfm`PDxTUuEOf^LwQgya$0J+%Un~gA7Hp^Ph3=!2y_uN_oqFrZC=;+2-U8`ti+9&Oj|#e74MVA571@(s5;W*E zeLkmk+L5>`=g`tGv9sd?j)nQJbo)6@1poeW{y?!cJq)zTt#NK|Bg8~S-@!m%DXwap zfG5P#EVXboHNYp7Va^7BB3{5X)&l=6I`;ne=A6CG$IJX`fj?duGNsfMpgitH9J1xoyyvWE^+7l>859zUik(3d1@tlYcW<&r1~by zc)@?<*{km+dnXiOsr_`hl;eYSP{BVjub401syHqSE)JlTPy=y0h#^%AWK6E(n${{> zfqdDZnb`gN{#UjU5wBm<-n<#*>)DHZ)Jb~)#|nlB^HjBzl%AuAEXX{LyQa^dJrmCE zpN4K4*&-Xq=YupK8gL7Qp`9(tx>6IKnNkz{d5x5JEuxGqTE!!JxmC5bwHB;l41;d* zC-jV_M9_AGHVcI@Rd-zkPk{p@_Oii8=2FnSy0_f)8NV*8+cHV_TB~#2dGWFx9=`CC zVPQM7m4GM$?E?G_Q3I<0=AZ9oqHAMG?%iAHOnKF(xpJsbHloAPGi>oo7TwN+r5?1s ztY6AOkr~t!_ec-_E!cWs!mi>?QL=1;7>pFO8g20G*DrJ@GXKa&4ZSJCH>1<>U z4wRfx@@MqcG1v9$4<4-Vlpvudn)YX*^nh0tNAj`)J}@2j7VN%Yz^#XT9^&th!@oP> zExjErEegDTWMu95I8E4>H^wNO!)gRv32U}AliGHgOjt%C9C!AZSTd=G1N5GSjk5Yhh>#mUfa_!i+UXg z2j?XmMiAutVlGUbJ!vf>wnlJ%Qa~Aahy*SKMGz|w+nTX?^nDJ;w~!4)sUy~kQ6Bld zNgvD#ep7|NkZC?X92rlTm`a3^#lGRQr!nr_y&Lm{{>^nm9GFV{`ab)%h#s_9yEf|Q zJ`crvPx)U%a7PMJL=Xh$RwQ*Ye!RRxNGSi3PE~ne|t{9=g;MVX7bUiw7 z24V|ls@@hCt9)o?3kIVf+j#1AdclKfj?aT{Oyk;I%^Q@W-1S+)oTDQBq6vj6x%05r zmk0B=B7g;>@ndX>{*i zZYcO7&!g#M%2!|#)^B&}q-{Iltj)VbFdoU8nX!yL*_0k}U8|^(%4+URgst*i_@&I2= z-b(wnPb8#sWeUv`jW%O-jE-RPIGQHy)ra^sQawsU)5z8HX&d)Xq!kjP*_`hX2T!d+ zn!n%0#V>=KPUeQE-P=-1zUb-T%Az=pogH(0@8GN|kw0DlU*r`>0>V(Si>L{~p#xS4 zBGmlaZST{QqmJW>sn_P|PSn?8$2vfZb17zNX@*bo_Rh>5#$zU_J_mvE&}~?MC@79# zM&If|VOzinMmgCqb=ddfK7d7tsyVE8g@Ld?oMXxpg}9x(^gzH|oy zLhe&Sm1w;oO|M_SJ{g9?#=aecH1GT1gL3gzabvE};r%}tmWsvlHD1p^?XL?)F|X0l_IxED71 z+;&7(9^>1n{*Pdw#y)GT8pW8LjDMV*Gnf*-2xki>A>k#I);__M*F<5zRxLzE@2tP66`W!+-DDjH1PJ#4M|Ia;aX zJo}DNrc9t&KmPnM0c3lQQQO5>0asFJoDrTfaeNyiLWxFn%s8J%BD{;-dC$Okf|8|G z!b+|0B39fssYOTUnB;Y+89 z0kfmUiNE%@jS~H1J*bwDT+16jQMJxldKi&ReOr5ImDYL+%$1B5fud`k4+|`3lN=sL zgF)nh z2;iPpfb?JkUw^s=IU(U=l;4pNyn~+yT+YH6sBpNbn5hSQxveq&iSy?J_$Qv{{_+L? z(!hPwjpQ}te2gPMOck|8zo7Omdp?!PIoj3Sv6PqaOa1;gZ=!rIVGKoaO?FqaqS`(! z)1iEgn1*5(e(<3fyDJ*Q)L}h2k|ia2$l#iPrUHg){?0vu*!{BvfmSRM=sZ8>4OcVUR`UQyvk>aYfE6vI7O$6{#4=1#hwoj(`Uo!$Rq=1AMmoDxu!Jv2=9?(a~oZxw_4=up^F6N zB!nhfH)$U*^%~G(U&4+QzITWA*-J#bfzh`6@!Qr*2Gos)VHL(}YsRd7*YUY(*Y;Ll z?DKRi8;eQKTNYYjK464CxlT{+F!FxD0V@T#o;I`%RdPNc9FhZ59^gg@8Jb6SY;P8s z@e%zMmX2skryP}E?8zhT9T<%aXZ6OCopWv(jznJY$(|eOx1d>GmL0Ua@mp0vsXbW; zqQ@fb-vXlI!UMR-D#bk|Y9vEOe6&X+5wBikM}Xa+2zDb?DuSeZv(1|2xb%XB>TQik zf{fTGuh&|kCXtX4`3xJ2JS8P1In9S|z$ac{ft0#1T*MAtByc2e9UdES%rCC&?qOl& z8n_aUT;7WJs->=2?p%3c?V+eH#rr9g2!l7|VjnmB->*pVdF`bt5?bRnd?-7o(_Lv_Vo| z>@_xCj-@0@NEdpPCEE!}*)QZnyQ78)6emEOD9 zR6jmHlRj-{i*$*4l^y)5W!B_%5cM4VXvU6@!Z2w&HZT~E))P2k)CYSMx$X0Cah=B{ zV>7$%zPqmb?D=za1kAJ(fV>TI1)TjL^kJZ0=Li z?t0Z@gz)_lDks+(f)R4^uZz5UHkcfYjK0jCT5~ftB`;sSswEL-pXM=6^5xxIyvZ9U z1Eyvh%udM_ndI}x80quOo`r>leu05A>QyeBj;k6{aeP}5MH2V!tsgjwieid|PzRwn zfQp^5vXyBpOw3F0As0=%Ta4d>q^_({WT^A8m8xvdSr-il0q34wcrR>b%UX?ZtjpF! zI%urkGF;lVBXv2EDnHNWrMCMD$wI!6R2Ax!3)^-#Ks;fdKJC)>F)5@wv%c45Rh1>; z%HZyP>=W))Rtk>AmS}Xxc#iazr|Jg$`2QawOO9U%uN{-3@-Nz6JNj z=p&3TBF{#S+OJJ){(&wicD?Q;-URR~;A-g?63N_p+)mD)3--(SOmT%2Opl}4Gvz-T zHP5WuRM0>vjACyun$IQB=lBO`@~IcF@|%Q$F%nSP;Xfei`6%Wx?ue*?23R|% zEn8l06rm5qT`%>)oLqTvwK&;Q8Kng#4o~b2zwJjXh60h9l{IG{sI|)%({heXYv55W zbHYsZ>g$c#qpn?(b~pFIgAM0DNC2fA)tr972>u`Vkd)%GW?XF}`3Kh6vKd1G9p)q) zF%0Dg3!1y#J(GSo=0DLUk32j60nwBzt+Kb?%|*Iqqi znqWhN-lq=A#;91_=!?CC8C}z{LUvjYD`6fM3ZYY>ajYjmx`)pUVGnQwMt^M`!^zB> z=GNb+wf8GBe9AnBJJnEoFtYKnM)Xw35x zTIGHE!GqmPQi))^@O+;XwSU=K;DfRo;9pMA$GG=g+4cjPt#;svZikjypnl84sH`fjF-SR@0YINeqekJyW{EGO0k3cbQFP*3i_WT%BFspt0tY8c-td8(1*I?dxk{})<<1iBiEFsH2a*Ef8N_b2puGGWF8lM;C8*lSyY zbP;L}cwYfKQ-u!^nv8{sxMj96A=%I!;vIJVC&{Gr>vd*67%#nWPw6nTpxAh1y2!gI zx)dMagbIR(+N*SmKTBZ7#wrpmDr)jaol;-jh6CH-_I;RdOE)x6czJodaOCj`vmPp* z6W%Eci@JLI1w0Db++D{WIGA-*ZunX>y`(LFC~BP6);>Sqt$CO_gpCroYkVi=7*kLo_rNqyk%QCt`v4ePd3T>KLGkd<-T$(vm@5% zx{yu;SAqY&)QAz$fyr@5L)L`u*tk_W2bdkP&}BP!YJun5aL)?A}nlW8+G| z%ffh+wGZFT6f2zkwu8KFshb5We_4vlG4(+gwf>ThdtG(J#us(k#n=^hLvHhQz9k55 z?IrwIS#-mzFit*KSSI#Aeyk-*@Gi|Iif&vTCOHn`oY9}4yCfEvy`0x4`J%?s{oH4A z_QkfYcLi2aT%Emn7V1B5dK7c6((bf)4l9^-`HnwaznC}IY-qKcgtr9TVB-n`YWHCXX(fL6_kaKa(A z|8_@vexI1v7vu0Q=JuG#6=lvR!^6phmz&g?c8ZR6XLI=)yUPaKW#9=?Y1}^db{2GdZWupgAPEAwJ zYm0HLw)B6NW9-Poq=KOuSCF;sZYOoW)4qU^%@v<&6-+{k6~^AeRj?TVvvWX42JQUU z;mVh?XUH+PLur)TR}Qf-?7FfVMDWsn1e&&fyK>8X2qkUSV)=DVNzw0i5ZhioLoWzw zzvipXV=~t02V7s!(qHd)C20f?9XS4NPcE`KI69i->qT8QdNjuEP~3eWssZXUyXvUF z7mjSY^Zgl>kgmLEs~WmS$4f;LDy;xB==}o5rh2tXRwK5e%8l%2N_)FxA;>29n|E9dS4gZ>y{enG=@bJkU}7BUeD&Jv>T<5BjGA#3iu2 zJeqB(SJwCsUwf)Eq?xlM8h|ztwo5^JH4Su*?y?bwGOcmGp3M3WY+ftTqwLzD z$RH#aCEU)@(Q4-tt>4GqYGTPHIlGMt^X~os=b%G1Q=9$&4{}5u z=GWDj{MfXwO~;NUm-K}nWuNb3s+lJS<@XhK=t=WuGza?mQ2?drg-FXiNnReF_@_^w zY9N?l)Q~1v^RZe|QAue4nt-RbRox0HYpm6-skJEp!@f2VE}8n;2>1=_8DV}Sjuc?G z*F}5UN5kq#w}XCIa|Ae9n-bqOi+WEFH&g;M#r9GZ04q=M8AL9SIbg^b*|Do(YhtIh%_=PDx-tCZ^8ZWu#Hskvl<}`i&ZJwN)yL?O>Mj5tN*lcJrLvOT zq``zBGh0sAVq{l1sTahfY{-=YKV+qkPOXRz5sS`p-O2MNSllc88Zph@(6%#Nav^c;&XKF;Rh#RvlXs_kWwkNo?POboI16YbRLZ_#p2%Fj9d6S%W)!;sm!t9d%EShL7gqQ}KJ`R>tq6^UPT6 z57Gii3BsBhd%a(4VNqajWfa}bLRP(6rB@1X2q-JFu*U@3-Nn0*Ep949IB%6bM z-!ag(+yJ+T+6 zCTe)Cau>*?#MlUxKkIC(V5W%FEwAU9$F$@1dlf@>a-@rytz0=MvCYAM)ZDAxdm?~t zwZO$~2`c#a0!;b6=UlXeN@biGumeMsM7OUwSE%UT52ph-VYr?Hd4qB|h%>u{S~b7y zdHB3b)&I0d^Y!d_Y{|<&nDf*P;zHRw!wwNP!Up#!{mn%*ZQ3)XL8%qI=W1sY-3SxP zn4E+4CM|W=Kto*AT*F>SOY|PhO}jeUZ4FR^EE}lfakx4iEk~xo-tGyD?_QT|xvcNm zhZ>0yN?$9fZ;Y%c9GkI#8|qZ;c>37F?fIrxGLZQMP4;0W;%3Z-1kWu#q)XH8M~uu9 z#Ww^>-&QQ#zjCNL;gPRS*-FRU{^XgbWEp%(JH_6vXe~p)I1x;{lLR}Nk!7r zr^GQbt%;q!o5d0NBu*SQ$FS{SXqcE{T4|8$F^agt^qCP;X~_| zwdCe{vxohRwM@cM!TIU^$BoA;U3bZ)v@YiJuqAGc93>QQY3hl1$hvKv%)LqQ1g>r!ao$5h^#@n8>?% zm{N~_?9@y;c{p|to2gjAuTS3b99MoZ{wwZS1cy+`tS+Z)fhLA|wH;x#l@Z;fabJEl zNgx5czjfuX-fP6W2{d8j=0W0jm4DR7MhADQ_?^UvPAss{~NMjb1{o4 z{Lb5%D==6|mNH5BzQ`q^ahd!eLq*$s23Oxo^4!(h_|`=t&C9Bddn}9O{mp%9EP3MW z&$Snz+)Ph5>Ss3SzcV*aaW&gKaSE3EMZZYI>=H~8PY{D2ypyl{qI;Jbw|_fgWrCEEc967J6~=+m_MEv9YERmel>p9fi(P|*ly z;O-5t-XM(Q($hY_^0dg1CzFE4M}Qvs&m4tfmddxmd~p2F{oq;x8ABd%;|t*{?MC^? z+&Q-I;Lw5f)zlt*>FIFbxer|=MAaJ3% z3{(vA3uz*Yy+uF&E7)WbBY-Jxe7tvYM#*pf8>@s!-&4x0gHWJ;mbfXy<&NC%kh6Gb z8Hi-vHY2bmzi*H=aF)7D)7hoIJuHZf-x9;N`q%6lB=TI-nQuox{KH{uOw`nsc&VhY z$&W^x^-a5Jbpk)-Ab-nYP%6MmI0ewgzcR34smlw>acQ;3%F83|9^Q@ixi&0Mm4F$JE_K@wu$#8sIj7q_Vpn%q|)9yC9 z!9%siD+*GC5bm_%qpylOH({eWO^MkQu!%*5^imBJ6kAQ@4M@Hi zL&f{l6RxU74|PyHp4*-6{`U0Dr;fCS&<3O5T}oD4{?LbUiO;>45qbM|kb+r0=55r| zNtqQ~L?chqHH6Lxhd-?`W&6JY()%t*k%E^)nB>B1?OF)~8-WhJfUk?{B-k1E?kk$N z&TL_8pDpmTby_A9yL$C1RLIl;8T}3^5GiKqPl-(INyghx``|-dU7C`@%75PH?QiE$17#o2u zhfQ!UVJ*s)fW=YwMfIIAH8dEi7I1mc;POizwu#6gz^Sv-d{ ztukq>M?9D8z1V8#;0FX8s>!3le;-Sy#9yv*q z9p@KAzH~C}cN+ZlVUNGz|lTU?JR7w`Xxb+aj)GgSZ3*L7j!^{(1SM+9yt9C&J8%nr?OB zh2_KbWWABcs z(5{~pfZq@cJ-it14`isVtNW2f+e!_7nU-2-m6rgm$}xvJnm{Z@%bY80ic|Uii*ZsQN4Hf zCC7@2iVR4$J;ZvV5h9A%=M4_m1|$H3BISm?r_ormgpOMrSwUxuY=t`a;rq`OJK-_K7M@Y3mC<> zH^j9-qWtZfI~ZNdhtJCdiT~y@v>L~%vaRB;T)Bdbi-A!C#YX=xU#xj}dDC!O8MO#Z zFpgVKZHVaTSXQnX<||`FiwntrzYk5wQny0h1-W0zv`rS6W$C>pAlwW?W{0D`x139ey_qIB(+u(htK$OWpb&3 zNv^w3`wN8qAH}CNE)8me%=32sYP_s3{g(IpO-P-4@(ev?it6=kQBSj7KquaX;RD;6OqYtP@9?7dIv4ZD zx;(xbC5QI%WMWZOyJc7u7&b=JWurvjtm#& zdjob7P$vTG0Bf0Hd%YGPb(pbm0=)bNw$w-)Ej?W?0;Wt`U^0(CXvi~gcao9W|7d$2v=1&k z2T22X;UFhph(mq!gn6=_OCuaht%T)>jwI(oFxp)mNds=b;Fp6O_NAv@?gn*CbVpK} zC8(w%B^~+b*d@H-m`cQ`NdsxS&|xUBBgx`b+4ryeOBTnAfpDxQ{8 zu7RurF)_Hj=669Y=z)vmJ?D>ad7VDKRU8wRItyL<(#7$^(lc)qVh;iy^5=A?TAZ8E z&Ji9fa{9=KF^@|+>;p;23mBZyGP-ciiLlt&i%NpT!J0(y8-Vw#;98kK4u(nqX>d~Z z1Dh~o81UmZmft!#6k!C_C`J$gq^6~56iGRQ-T-=)rts;&&PubGy#b~hFoT;qa`)F7 zJ0o@0*;y1Sm0;xEbD;#N|Ik%_cv90MRG<#oIoN}b`2hH%YZbGrLR2Ax2MS7xG?%^i z93ZDbDL#eBp|8j`@wGeZx| zU`>vBdC7`dx5dK;I1LnkNyp*7EW+|Wpcpp`9sdjZvM?Pd=Ry-VWNUreW#5=k{s>#- z_}JUcp+?ms$A@3X1i^CjOo@U0FVqY+VVhYGy%gbu*)ZVi6)k)Dsm!Ufs*ZW>MUMR7 zARY?EVI@KYtbIzHm!50Z$*8D0X9)ewGVX_zZm?emv-zx;jCZl1u7Uz2_#%t4W`Usr z=xffVpVUVl_h_0@uje!^^w;4NZbc6sb{ie|7<>&b>bCKmZV=ab<}L~pkb3jHK&Etl zKEC9pA}S0Goq)kP$C3g^rC=#}z>NV6EeoD?191RNvH?>CkQ`EDeNIwHnGX!`S)sp} z8XC;O3x9shmhgF@C?SC~RC8W>{CHMC1OA)5@X3_8`^<&Lz?@oqi8vLAuwYV* zAB=`@lND12Ps_F(GBBJzv$k zpvZw7?EHh#g#tr+=3$T+nx+Jyq25sy4_XOO{G82H!+~f>HPaXJ2obpOEa$CwjNJOk zVb}p9^5I7U2N-gK_#q-64sX_j3Remk8w3IwMCGn9c`so_28!_6FuTBOmhsYUm@fou zpnf1rsIr{#huO%7hW?O1@m9Rmyde+wh49{<0^ z^VN{GK1B{P8OEDJsxeZ{)0ay*hSUk)wxU@Ebj#w`1N5L}27h=h73e;tu3iHln+Rgs zL||tDx0w=x-J4OdaOnJWc{# zw{Ap!e6V*9J#7Hi6_IO&$c5&hAPoXIa+m^ftHIq91Y|JM2%M8SW!&Q-`GgrW7dQOD z4@yUBUcBh*Dsl{|22Ok|-w|fMHVBGkHA9fWq46#P4PQ~M>n?+fxzKY#7$EZ% zC>I+%bJq;9*uv1g9hM`3pkHXW3+_X2SeB)QMgmgZQ_!+*>g&*d2?T58vx@sJqKc=XtMk-lqyMc$DhdGB? zeh=BgTawIfhdIbnr4cMCn}uyi>q^+>$3XhZ$M0;OXbDOoVYq*={_x{?L(9|9kT00W z|DnC36}hUw&_mPp&h5VYb*>ca7HV*xbT)m$=_#EA7LsvK+7LDp-L+Kb`5wwUBK23_ zq%c5X&&&JtYz+(h9@!ect5b^ElIp-j;)(4~wm{Bb#67Qs29mB%bfMQ)$kL@E<;z3} zB|ugIba7$cFyo~s%n_a+=+|&^E4y7RW1LUC^%$QUMJo>!n6~nz32r^u_dld|7=Cib z6&12(&@=C-v;5X0Ku8$F)FCS4Bs+|?5zkWl$!Z_&`~dOs(5(aA zFOV_=bo%yN3{20v*G2 zM?C;@2;4@(WrQ!(sVQv>`6)heK~${1C6UYHGRqV7!sA*1G*LHe6Gs%dk)j8$Zt$&c z(!^J|3zCnboO8zITEw+GWPj<1Kj8wnf5ckJ%;fE=eAN7^GS?f62I-1kaI{Y~7)OFe z-gHfxtBGaWl`n(6;vw_KiQj+1BbNG8N~o5+X6R#l6F*x~8^5^stbW%Pk<+b~F=r{f zJf|N1qhvXxAN;0gQ@?5LA$j79!i_$}y-*Dfr|!?J71%2yimxnOep+EIq~F1oM>W8B zuOOBSE~!g)o%NkHo{isWMR}Qca)%{UU+iYS`a}~s8gKa)N~+sKD;lwJu(OF1d?H#H zsM={--bH|Qx@yvP3qra~mA3e5B9hCG6iJKv!TU6A^qSdU>>09i!>gJEl5&rh6DZQt z%K#pv&mX9sqhdaf7d=9`ry~ywfzy#Pg+Ap%_0&U~t;hX>ar(hj1AFKr+k2X)AsjC# zzo7@B+(?6jP}D-}e-LdW&3-g$@%8<{J5I0^ER80uh(-O9?sF8nX|xdIK@ zGJ+xbqH$Swk<9STqEFUG0UrnP)e zK*=w0-t$zVRD7a%PkJe7@~Q2;#GbfX-ylQ`X#_9OXQzCewsZ!A;Dt>0N(n5Ni+Q&$epsYlx}AeThyFc>N==PXWk6+s zWPNenY@L0bWnKC;=cm+F1^8PmRyS7m$McUl-VDE?8KTpPw^hf>2W!>8i+-0fjj{7+ zM|_8!gM)*e!=!3Z-?U0jKSlqhVbo51+E;z5RCq$D!G;gr0o{RRA8lV?pCm`Exd?M= zY8(BGhk6yJNn9~9=4Zzylg#~sI=%AACjxW2z1ct3Yk_L2-_Hpkui#c)%3 zGjJ1cGg8yC=vz@(QEIF$xrfEHLD7W#M|o%wPf@4*Y^(%%eN1nRTZ}HH;G<>1kE+=) zaMl-r@Aj^ri=7%iT0czOPFNIO#CMs!&J@Y`oHfe7XgOyo^1{-Q^d*>o-r94haxB}1 z#y;1fYxFkVrv|;3VH|2-JG$EP>F|jRX;#W`xp|J|xk0ABygu8LX8k4ueEshR%z7NP zb9NrfnYR08IX0c6Z8L4lpL;BmM#QpwvqYPS4QLG#_c7+(nw<9c_Oh0?MY@N)< z8X-+J&D~p4URbUJUS%FFn~LsdE?-<0*1xTUiTH@Xv&(ZhMTM{~@u%>4Tm zKTpTC{)D={Y$tA;ZWp5(rlJ)09Q;;NJJ$a_MuoE3ccIlK&q&eK$)_GHoJ&bhlx%tgipvjT?YQ zn^)|GDlKZBP~L{bRsf)jz6&QH{<7=h^o0Fr{_vu-J31)t!SSIKw8w(QVxKoo{et8J z&Nqs2k^yWO!7crJ_5q$KvZs`1tlvbQ6Lv6O0_GEQMwWU5X;vv1aD<6ZQrG%3#)N^d z!{x(L!Z5t6Dq zOgZqwa@zdtRQML})++SHM-k~7E~yszOD0}AH`UFl%HYaIz0N{9&G0hyFF;+miMfKM z(_mm0D65{=idB%=%ZWx9B7XQ@)+C8hSbW(3*+iwu-4fjsyznQKgM6S6w4Q87%b)H}J(dgG+8{6(2G#XX4f*dgGZ z=ex?YO|Sc92ff!25{QGzX082N-IzsaUGzrv2FV6(+G(1}?b80u&|%~=^J;68pN+1k zY^!X8<&+h#o*n8JHJ5m+Ud$=kQO?$WO{wzh4BWiNAz5Q@vd?$au-`6^skUFTu6LRF z9IsPYzB@xd<7Wr6OP%RhWcucA;dZnc!C`4I)a-vLecM}A1vH?v+qY$J0ydL4`gMr= z+_cgL1Xj+!tSj>GFU?8HF=}7+%BeoBu9$ad^Rp1wuPHlRFl9^0@bUZuTI zxJO*T!R1x(sRHMvk&yqymgy98jY_OTu0v$t`R%zzyRP4w#o_P>br&^94oD>42+-zw zV!J(4Kie|PX%yeOT~CvXz)z@Cn2isv zx3f!}HPuA~GPF(N%HHnHS<82Of>%zoe#$gcw7O!o-dY#acRv%Cj+Ww#UwASj`AGtw6Fb)2aIt$4ey68XGUk&Oq%ig@Wi~6FD#E7zKgDTwJ+Ut zNM6uy^d^7&POC)AE-4&ndqs7AwWP5$p74fc?OhWJIhC~~7zyHqBBdVg7m1HlraiMI zmO{5v{|-CkA#BDj6LJui81u^f_YulSIqkUf3L$9C6>Tyk6ug80M&pifC;duB9- zx>%T(j7-fJB)UeVBP9Voz7`wn?J3XnJqIMU^@_1HW%{1_j|B3{)1KO7{u~b&1ucO} zNM8&15vc{hSy9go35k^X_ZL}7i|GiFb>!GQeeV8T6((ZltNyjlFr+~-qB6OTb$v~3=zcr?_mf7-JdD$cH#`rRW;~joLnvF_&NDFxfmp{ z>FDUhT+J;-v}EP~1xNf6XRvm6cNT#_yu7?Py?8mDT&*D7!otE3E*=OE4+mlfhntV1 z`%7;QM>ocQko?6XYvE?*YUAu~iT#=X|9bM@ivI(t_a7u5 zFW>({{?C(tA;loS3HTp^{^8c2Q3Sgru*D$%$9f6u6a79fgfysZWYwP{t|-4ngZO+$ zd|Cg!BJQm=>^srjk&vX2lw_r!dL!={p?g!!HcHgDo2FVMWl;0-^3HRk?+Lk(Q(9*U zsAiTV=xz$w%yF^Ap*ETI6C)n;sx&>X$nhE-@CsbN^`MZfjiU zDe;S!XynN9!N3>4FLbzs8tLg_AB}#{bD_i_F@C^8T!JHzK%hXv|Cwb^_`;M(pg9z? z=lAn}romvBkN)8JgDT{`bTC}$+X6NX);~!8OyXX#`gft+- zdAJ8hM#9bd<7^ZK?7tBbfk2GdGj_ApA2WEZ8n^|WmZ%DgiWqHg#h(9t=X@S|1e9K* zEy>yLta`8c@#-fo^8rGKypyJYhr5e}Sw*oR=* zcbCKa=p$z1vO+vfxO^;y@R-i@Xs)>5i}@x8F5fe2MHQ8bvX{=?e_t^8Clqz;8^w_>=XHbr(0u@U%q_7 z7TmaOBjwq!U+$ttaN|=+cJN`t7WUR?&eVZqO1{-kI7{x4Vfxs=LVt>d5&;Bt)_k5d zr?C$rc{#+rqaXSyzkJ%vR7>Z8&x8dbdRZQnKv1%FsoKYz!)}7zuT`(2$skY!#t=UG zpFe*(=9s8w3n2&HpXTw~(|G?twqEWEc$Cd)*n(ueI>CoZJfmG>EN(qhP9k=D)H~i_ zqg`dMqy*@=T#c5Ri+ih**0a><*SN-4n+(g~LBIaBkMsTe_oE^98l)5!g|EGl1iiB+ zL3pB`d%>l7ffNVb4}l}Vw7A6E<%hdGUf+R~^n|IONz5uQPB%w1isbROmpVniq;sQE z@LA{2kUS)s)wKWW^P%XKygw$`-ztiiz~<0zINH@O>v0mlNCvACsfsj5 z;!~qgTzsuE2yvF3LUZ5y20ZAxaV*oRNUhKQcC}p;|DrbnZ@k6D?D8W)VE$EA{yRJY z#|12YyRXwb_eYVE(+6+MKzpg5RWsk|)mz$Jgv{B@H&y+rgk=j&nAL~P20h%Ed~2`) zX%v$gX^-WKuPpf;qGydfD_MdBU8%IVZeZimN*K039ClvNRGM_tAOdRyp5RRhx-W9# zuD-@C&IMl&=AF8$8fV(QoVKUmf%^LUdk+II+IjZL$jEy4zSpk|L%PEd#v-*p*VtHh z)-lmI0390^J?to=E_9h##uBeMyPa~`c@@^+k6_qTZU!nQzLcgWu)+R25e6Ow+El5! zSDX&}mj~|Ony!%oJnCNxg9MPU*pwcku5K(S@T@GP1;Cs`U1<6N9WX(+%`Yyw$@4@v zK{xZu4JTJuSH{DD<&?Xd3(?^f*&hp~zs6T|>~<0KMV|^?QD7wozy^ZkVx*VA%*^lX zzE%Qt8um>du63>Sm~1UZCkWi{J#zb%G|w;e%c{3;zFXzJ&9gvb5!~|8RI|6qo=&Uy z(N{VnZCbdxzSAVJxQuw+_Gft8f|ccLG_L|?~JBP%E%jwR%g3Tg>n=gh)KE)XJ(E4RBseOv1KfAE4obro*C&;JbZvD zrmI1gJZpYs@F>FUI4odyN76qXq&IJ|on`idl=jR-XDD>OFYZ5()ILDGoM zmICsyRw>pXm4?Ya>e*zGyn;Vd2WOXABvhbxs5`uth*~e8jeMO+J@QhIN&!zX!&9?j zqf*99nj`DF`{Awz_hZ2gUG%M(R=ccm;N7lCnqNd;CyS}-7_KmNv>ps0>+cDksl*{fO>=E3}m6))RvU0qWY%OrRAToOn+cy3)6jUPw`K&ev zkdBEWEc7XXUBrw3O0UN^$|DRxZM?WyelVu`+Eo0V=q?{n^QE=kYD{DdXyZyU84=ZH z9l`EKp~^ezR(wau00OmPcNhHF$Zv)_6P~P+~Qq6EzL+ zhmR)?^o2p!jl$=EYXzPbJ&G2(mCp;r56`|#T|WO4FqG|Jc{78z-FN(f4`-#49?>b; zHGQk21k0QI|1K{0#vM}H3{VN%m*amlE_kvuzB`Ep#o}jkCl~qkHpHT%H zH#cR80tD2yCrjS_R3B}!a7b#e%Z&rjVtbi~?2LzX_@3L2Tx_lnChPOoMWE!w&b-1T zNWD^+WdwWhoCARU1Gw92Xd!eDAU%H|sMTn;cHBuA!GVCAh7t?Y)t^_1aN3N>s2IW( zi+=+m*7#suQwDW+^H3L-5F(JR3>dJA^X8UjyMmI@X~AP{=9WHw3d=N9Q*0D&knW4% zdJkrQR%^PF_>v5KkkXJP z%Bq7v0U2%Olk8e%2#s2ic_F|JqyR8D=Z1YDH=XQFu(jeYf&*-1WV^{d_x5II@v95g zD_M5@=8~3U2pOZ`_H3to7*|3OZ|r)HBhxjcCtVZ24)fc_%rPy4CkNjDyT#|{y%i)J zc%<18^MKf*S~w`4a06psN?@k=16a$Um-QNX5BC?{azF=z#6Gms*Fr9YfdZBw5Cg@k zam)2%tbEqWXKxCu$?4@mpdAGv>E!P{CiMH*v1w5erFO2fsXzqKnNY3U7Wu9tmHQVH zQ-;}oS*IPuUR^HN0>6=)Wyq%lKyqzuEtlPfD92a_?>y#;)LM_oK()Nm^g`f64-#F5AQLXlY^|{RXwv&oL=2D7W%}ZA}44O1* zU7_Dzx1~=m`7Gg@GJIhvo^YA*oRCt?lF?Kp_GQnGd2A9(4!NSaVqeXeo_fP*7l1;q z+Z6@igyjF$Zyery|D3w79b&Cx-j`a)QFB0+oe1!(Mo5Z+UaMl?hck0LZX2 zNY+T;O|tY(vrr3J@r?9=S;a0uH3bH?Op1)UMa&mEa2>{=QCMuYtg0;T;24&0Wt%bR zr8F2c`E}5o<+*97o@>sxwumlmlTbt8y=jcQA(rqf$%ot1a%cayYX~wdLp**dNj$BF z0-vo0w)c)Sial8V&H(c37=I+E?=7@WU9A=aLU`gQ(`W-4hfVI!T+buZ2TTTw!}Xf% zGe`@hS~&(@YIhV{5J?G2f5p(a2PxmCrlh=Gr1hD^|8$jd4W5(&GPuVQk3IoQi?FCG zx*Fq57I(Z_8R0tyCc1q3l#=e5$p$2(zDn9jbYqj7#L`mT+P-2umV-sny*}3JKHJlJO7v2uSS2QM z6mHF2_3}RccNP@F_BvSI6xyPp@5L?cJ%23&+m?d_WwhDF8?Q3}(pn&C5ED*8jN=)0 zb=CqxkPnIrbsE(M!)GRcCd1*jdrxNYQg!$o>A4XckjN~ITFuJh^q8`;B3&9*rap=9 z-J8(xl7X_EoN+_UWs8u_1SAe|U4OmJNN#$vM*Hv$Ub8I&R(8dz7dmW9IV^>kLT81& z4i;Pk+M0fJ+%#fhXsqm zb9avEcx4~H}1$8zNWwWT>*g!LeE zb91jcj~ir9BeCEps;Z*%p-Z{DH_SFUuc9$=YE{%ZFYNqtoot1LXEp@Gj~ z#>`~qomdq#8kF6{Q`g-R=F4FnG5)OfElDUMLH=mlcui4mvSl-4p1p9#rV?(QVy3_N zoRqKM`U1{XU?rzXeAUn&FbzfqLtjPUQ9@l^>-t>_vZYLMtv^pZt18hc`uW|BMG>XZ zF5e0l5Vkz~qng77*Mv)dxq(68T(!cj{$F3Z0Hu3j>RQ2esPhc%Yqb#$uhr<;&B-_- zqQBYj^5Go#n@f8`gW+%%^Gl)sGUxO-L<&AdzlHzD(fmoXKY&nRFfsbx{{IOxr9fmFg0ovk57X6Gz%CB$@}l9B}V^Q>ID-D{Bb_tA(ryPG17VFqwk>R z5v-@ID8_Jj+e_}Uyt_^)=0MKBS?e1)sn#_cjWz-fzl0!eloT}Nu|70@=D2PN10~L6 z$g4l5GZY@Y@x8llv)FY}t1B}L%bN+j6B&f!vFnQQ`tJz?P(uU{$>KV`$JtzrNuG`m zO^gWCKRorVl!?#2Re({$hH!v`Gla;Y5iNM#+Td*~XIp~5AAnfYnglgCjFtWQ-50G& z9j7%d&`ZsO^fS?uvz)Ejp>) ze}sAPi4rs!4@6X;e;VgvrkPic$UN>!d-Ba&0>|};Cei)F{S**VZ^0pvhc3v1ypzSH za>8|0E#gMhmPBtL5KY5`hyy3A}*?Kc~9KaOqk zq?+L&WTKVgc*6MrxR%{y4Gr`jRJ&aL(aIxZ6O+8LWhUYmP;eix%kAy>$=IrMI3Dj+ z0}{(nx4kg7q;c!m_lR$gCfhp*2)=$@^~Y{)nNkd=1|(<#himjc4O2Ni8gbx@lKbNt#*NshutZn+Z@}mm3pb! z_xFaWcG^eEW1B~(G$TXQWS9a*1EDmuH9*OiqnFiOZ7z6QPW7ISeqO>`HHrq7xTX3oW0^d zJq)-9Uh!~$clM&)*5YL@OYIVKrz!a|zgo-LDPz3-mH5EXXV>Ta`SVf`4CaoKl=TwL z9V#Vgdcvb7)%^iJpd%mH3M9>GSr@bC<8ED>ui8+AZ)Y2C_S2BBr`ji!6bu?HM;Fcs zXqcX~$40Jv;>R3npno8rEe8AZS4VqB@iE6P>0^_fzFub-y+gia?s8Wq*m80bElqrL zc5jY1{x!L>yIs(|O0WE#@5#;L(RCas_qDC;=EN^Nv0O5is2_xuxz!;oPvrv9H+C^a)jvwQ~l<;X1Q=w@{BeDLvt;WbuZ* zv7(gh*D_{9#OHU%M{6WE!Cj+DfisAV^jqh@fR0 zXsSKDgeW&)o9fnmrfLw@aMf+Z!e521lV2lCvHSLx!Q1kRW=po)-d25ExBp_NzFQ7@ zHmM4o28_@L(TpqCt4P*)r@5^@xTJxP+PgUT4Se^Ow?zAu<`N%|*!AWK(1r_orY4av zaLJnc4|?@OK5o|t*nZmXN|G8F3HssbM}RO z_PM+dvBqW>{oWi9BqtpEMz3#u>8r=f&ki2^Q|lspGe9Mx)t^OD*|TPLXxs%>Yc-dZ zN!cF=UOgO$Xv&{@qDeAPfH58QanF1+($QX%r&*!J$RX4O+H2@$<+QEYepZnk*d1ZA zp}sQP`lPyV-T4`(e-t>JV>{3J)|V#miur>gpWniAN$%#69OyM1^dPfaRTT2Tq8R4?W$GUL zto0F}U8iT8k3Py?yHjwJI{vE}V*@ln$7@Y{S$=P;r?1pA+tItae2h+=4l#O}%T6bht0c03KE|`uh585EfZm24ryI_aT<##eR|`Q?Tn(MRZSw%UuB2 zr=5?pxwur8PW*L?TdAwc`ze93NOIM7s6a$^HR=n1enhRG;2At`v+A} z3ht3JeeMIMzVH^?agG zv5XxsfQ7d(-#dp2tTU;S3@d5Ua@*p~ppM41enLS!-RwoD@ZlO5j-M3#~IxE6!rFHqt#)nhZ zSpy#lv|L%Vkh8#n*7pqb-3b>HkWXn}MesKapj^{u(V%G-rFm~sM(03+ zdiDj}NAE~k>RguXC+EmRLF+y6#m2iA_eICB79W$J5}XPn@q{};Z_JmHxe4B+xA1nu z0|y$UnZ{{p=@AS#RRS^E)DAc)W7!ld^2#wtj)fuy-B>b#@XFTYL$K$of^AV*XrKM% z9-f*OM;c4)EE^vqQ2Iz7c3{#V7gXoXmsU-IDKyD9rPr&9gZ4LAY8IL+%ftbsVus3l83Nxxs6?4+!6Q#^ntb2 zsa2R^PPXN)6Qi}d;}D)cKq1{Kwl5hHwnmHU)VUj2ze0Fxx-UIzI_Gp{_A$?kMwp+M z+XaW2wvquW43S;QDVs#B*Q!JIRJOU4HL>|Lo4u`CF&G2*u~;OeVcWY&RMwwnfdL!I z!rL5Yab&WQ#xY)nk^YRgJ!JQw=%Y`{ChR9TYUwy7kZ&acwHl>vEbif&v5vx8`VZGt z0;Jav+D5e1iZh@)3W+6V-OR;IN-SL}}e+%BgLv*aFDVz=$|51f;_pi9a^r&Zp$G3##s-o_Q%UJm8HxwKYeM*XrKh z5v~-LQA{RHxT%kI6SfIt5~IH?S;Mr%4=&kWU{6NnIqhuSeRFykkxNhUs`4e!q0Mol z+dwxKkXWv-nE7d0a#6G`(skL@Z@IAKn9mmJUrEQM+sjm8-rZpp_{|mgDnAMm27X-I z=~Jbc61ycQN(V{w(^3`7X}TOmvuPG?bvy^oJmjTcx{sG!Pd}PZ#J9fB;k37 z5-jXH97e37_9hdyPJdRGvpL-`(67YKt;a(GlrE}K`JU}6m{~0$14^xkQc8@Q3WHhl zVHc%_-+G2IRtc%871S-GQm1vd{1h}&NN!HG0TCqfxpL)1tYM|5515~lcU?Z7&Y1og z^xZY4Z7PaR)!uw{-bXv@ZBEM-ZALC@7~)7&)<1I5|56Y z4rti-goecj@c6D>0|-V4RxQ*`2vjDRv=4(+8c##hvBG}7Rf0%s4V(0({Sx4F zimHK~@LnyCduF1RzCWB&$L3CK7QYQv<|3j>^{SB|gE+u3|v^Evw={cc`n)cwcUmu=qgCmNHCs9WNs^Y@FOG ze!mcCoMEkQ{|v{nfVVEekDCSK$~7_J`<8+cg=kJAK-3pQ?jYOiB0Ch5G$qg)s|q1z z*{lYVK)TiR8Y_7)xG=80YHyA4LUofN5mU)i=wgdv;asMF0SNl;q#{=F^cSIRsKL@S z+SDPXdTj8)b~hM$A07c81-@i3VI;ctvxS4TiGr4VU@*S2%Wk&UQiy|eXLcXYhSaQJ zV-coxZLS|B;MI!&r&j<~JCPBQGX@eLSeLLX4S4i8drFEBWUHMkwmqyU68o;mfMa|@;o$f^!00E9vht}A(co!D*pvw62qnyHw; zc=a9;%hGwxO^+McBZKraxe^kFhIT>62Pq`6@GO6oA>r?&`p`^s7ul@W^F2J=QB2 zj1}$Nic^?rTOO!HU*kb^l129bND_=}e3en9!icslr$#|a#Qk%~)_y-M^5ND)iev_2 zHUJUc#g@c$WxBI8)E&vtd{a<$TgU96iZlHraX_uChcXW=-ZQSFRai9Ca6@}$aMdlN=dd_Xipvncg@Q^d2p=)ku_haz zAQ#58W;ne86TZpYi05Kwy%HM)CDVaV>J>IIU)vTy%#|fr?n$vQd1ewH#rCC$wZx5q zxC>m@trvgx8b}Zl8=d22F3P%R_&dE5ljBU>j12q2yZ&7Np@cL!4!-?ljwO~r*eV&z zE&R5_mZkW?9K;-e=Iv)`ZVD|9k|1(QvX1^_1~zDQLO5JZDZ1HwkM&JSuf8y%pMV~A z6Iz`#C6!;vy^UU3tn3ZWj%^`QzpZV7exSb`8>Njfxu;aYCJ?J`jRKj~e&ZxsjWF3u z!|3*hsz>c5(OZ#XNAL(H90oJD(%)Ynd04M`^j(%7ru-C^8za{Tr>bUe-fDxye>~6L zbvk)WR~76`!=#Ga>8-?n$X2H(1JoLMhz^p zL;K!iSn}h!#oiqLU2dy$WQk2dfN(>` zW%4qB-&Dw75&}c$?G;(K2y6`Z%26{*nQd3Pm!bxPymG_(QJ^7_~5U`ZyVtL13G4H)fN*uG>srGEYTfJl))Mb>~)j zYZ1QCj4%rRg05=m2GuZqKr{%N{TJ>O)5pu(5mYHK9IJ>#caz&Ie}PaFQ$KZK@3Qh!=0Bp16w< zgUF~^=38%1O3bPmvhcU5Y&u!#Njsm0=MfrFt|*R0iUPj&_wgHMQ;ERy$J4`#+*$i% z10`%0>LjB?O&Nk-CWFn1lAuF2((tn|jY}7hI~90K_c&a2{cbchV-5`rbqe>9MZ_wu zgJ0w_IZn(Hi@eclSy(JivWV{S&+&&dvENLa&zS*Zc~?(ogxEDtVBcC zNHv|S*MkfF1{N5rWrW^F+nqPeB&Tc8h8UuX^A?XV`LD}HBOKW7T`4p6wm5U-v*e;( zh#&uKPm`Q1>5AfW*#Znj0UYgy)e9EL&E2ROqWlS1UDH8EHiE-%`K5mVO4SfDn4TY5 zejGb~gw}8?r*2`$0=+(A9Eo~TH{0Q#{p%-AN)Ls&YSL5ibJJC&A{mDv29{n1hPB6s zpQbKdZ*Y=Gf@HsTd9y=$Og7S5+fVA&3ffOs$ul&s1DZzP(G&G=RI!Z2vVxv)7hFr~ zHL4FkXP$g}A1(^jq{O$o{aVR(t?Vk`9Q* z!{KOjhtGu3-08vVFy$uS>rBuUjk!als3}x2dG@+D+VQPwY1*gLbO&uAHLXX$Ih7k? z(=4V2&}7EuNhI6*TXodD_gtXE`Lrer(@AgePH91E6P z?z1J?w#EXOQGn=!!FQ1nLVzj+Epyn{P_PLreevM?m9+YSMh1wjqJBIJ7kf>|GeR3A zlYVqGISS9o+&et7RCf~JudHWpz9gx8*`qZQnGO*bY@Gx6PO&%uIZR3FfRD0Cf|?;< zPoCNdS=O-Xi&CR3+d!V4mstaGPthTc|DbEd0x$j z@0>KhF5A(?0x~AH^%9dO;Wx1TK?)uHF(tA2)x+eWgkS3f+lCyXtwf0dkF=>1)|y#W zg4Bz}8BH1b5W%rctGCC?5DKRX3KVHXZ<;g%iu67588p?+q2(7Ni+B=?_pIxxVCVbl zqGe1Ao%65NtcN!2L7IMSjH2&-+17pv1>|TJvT8lFsqy6QYLq`u+KAG~)n?A2`#0S$EPY&mO3*LxR{8l0WL5O_e2+uO<8Y+6hj($zH3ic2)vu zwJldqra4uuWm_o0{i5*r(z*~D79Lfzcd-{=bMlfGXab%R(!=_!r0_Y?hQn^Ux@V>0 z#_u_G>8dlRbOB@6mz>A)b3YG*vt0)>(y($_jxTby-gOB%UJZ-Y4jahlPtp;R_Vtlv zo~;_k06qsc|9pQsocFvY*qsgd7KA!(F6l$G~>m^A?aj}&1) zrs7 zaFdc_$X__-V_l4VwocgMB>#a6t%C?ur8}Kn#@>Z|PFH;h1WT|)6nw<{Q{N)2#>!WR zPj)b}-u2g)Fc}^K<5kGe{6Qe2wiWsRI$wiaau>(BV1uTN0kF!aA6*ze+>l9Zd^A8P-A z|DZ(R@8Vu1`fun35u*49L&ebl zEolD@rH3I%N)Fz1{EyBLf(ZQnW_`^6GsN_}xQNE*^`|-OpPt_?;0x1LN=D9)e@mBK zC}4U7Nlf{zl>hPP|0gU%;Y{?pCG4B7ME%-w8NBO?{+GghHiQ^uOWRG{uf*1=MbUiv z37{XpCu?|JuMrwmm8ey@@&1*Z)Ql9OEgI{hl}WPsO$-$~Gh=Jy1@xdmoN&3AneYB1 zJE3lkCRUZ3&i~=wq1X5cSDM(sA2<136Vg$HsUaFlL9{R?-avMuPVLc@ z_pOkF&*wl;%7FAQEJ9gnLzLBFb~(#Ok38|%H=^W% zpXv`tGuODM`f=(!e;cA>r1a3rak&SVy1MpKRJtnxaK-17%5Y`uBv9=8H;y^_Np7XZ z6kT3fokuKX-2R1!q5o?$*$B;dffgL974@5heR)T)JJ0V#Xf4kz@Xc4A>+gAFR+Rv0T z*+j~}X!HLQJ`CI}y|xvIH&`Y5Xc|@UPXohmD|ssMXMJ_E@vy0Nh3@bM$2;7e0{?np zQ9-C_yW@uX5%90T=%Bytbv(B2^JkfQvU{v`JJW`;IHHy&cl`B(r5Fhg54zSZx+uGa z$!*8hfWJ^uUWf&@@qbofWQjlcm2uk+RiR?+k*tJ<=nZ#=FWk+qd`_Ktp$ZELn-`JN zYQ9gHZY7XiW9xtNe@a>k zb{hFZlNQVfnLPL2GRL!P2M7J1QoM{pco0-4Y#S8{iQA4Q&iLF#A#5BC6&`|J9I_<*SM4}+dIHx7jRRfQ(r-t1u_f2v4&(>W3QW5;_w|^x(SZ^{aIOeTE z3di527aTX2NfZEo1W`bH}_{8b;mc<`-y5*Lu_s_1X092d$; z@NX_0Lfw5mtgqLWrS-=`q%WXqRkMOmA{PSY8_Fm(X7cAS>V8r8;lVLxMd~YIk^>o0Vb6->M2c+jTjhb!a zOqGR8erm=2>&Rb_|5nsjO${%3Hl``Oyreqc7>+^vziuSd2Xx9SzlP)ylm+I8n^E$G zi^M-r5zV;+8rTpO#eZ4gE{9-$wR5UdLv?Trk1%TO~n-*Q0r%l_|^82b}J% zatgXLj$Ycz%QBiXw$RNrh&1+f_J158fg?EZYNAHM9fi+-51lRu6vXs!MAO1|;}DyD zscjt3llo0);<&TLymU~pu zVI0nCE_7N;Mn9yUoNRpOD*E?TgJTdBKEgx&Rm7#Mm`r;|`?UAzkp#Ni8^h~Wj+V2T z_7`JaoAoWZ=&X!S!g2ej?JBM67fAn#h>#E=B9u_}qn^|<>)7;Za)}8mi<@&h=c{2a z{#dxa)@pr6F8XR_1Jv}Su<1u$?!K>`ipQH^(^_Blo~?fcRsLJfu$Gne+e~4flCfOu zv7E5s?Mq*ak6RMMJ4SSE&x{8A&*T(8d$`AtDj4GvQ+-3e+TOOS(8El^h(TBJFPvC( zz4@0Q;BTD;IcjYdXJ|aE(7qz!{pg$AA=A!pn$)->>huX$SmEdP;G1NT7Y+3L+C{(E z$?^$vd`~YNClr;A!>0&eWoBNg1<)1DOGRzUq2u-DQvT^8q|^FsPCYq8QfK zR?}y5^LQ}HphwguPWDE-joI`pB$B z9QODI{MXSn5{kF!!xer(Ki=@L(*`1Z{MBu>yt?V_kb)JA|pd0p_= zsGkP@4*Q(rNh(ws!Tbv9=idxg^l(Y!O|^gpJ*FDm9+WmE*-Y-xbKSi%l2(aBnU=0~ z_I$1vopRB0^!rL%UHWM8OZ#qq|LWUHI6~i&4-{TS2I6{@if`5mx_UG7s>y|6M@xU7 zESJtvFb;du)%%G)&)|e{@cP>5|Dov`1MBLVZfx7OZQHqV8mqBw+iKLLv28U;V>EVS zHMX7arqBERxPR~2`<$6IYu3!3-Bxe%W2&JSt8QT=v~5wJV@?SS4Wmvw*e?3ye-0hW z`}qbsM4N}!U=dl_hVig3TBLOpGa_Nb5VTUK!Td85UNI`-F01}H5#m$I{my#B+Oy+! zUrSr|jI;~cEAmrT^fCEe_LmRied0gT0D%3p-dV+}MF*a6gbxA}Yx9mXR-3YVc&-s%S z7rkW8=JNxrpF>Nk*X}*PzFXk2k{YH&r#AKOHY3zwA!iXT)&H$}#H#Usb{IeQp1Dgk z?kTihibz%Q5WT*n4Zr(et^(u~2%lT-6U=v#x4gZjPn_-6Z70%*0B4%;*Jb~!t`0O+ zb?1kmEO1+nXSYZ4Kb!stTEfrNfOoXUf#gJpa??`ru{Iyk`ak2xX)pgvAy_~^?K4n8 ze_+eREiU(+vgiR%y+%6Pw*7X&2ah!$(}P|WZ8{-DEZmfJCX?A|??CMSXTsJ>)yFek z#mf^01y#A}1*nZR=lbeC)cn&e z2TWQtg!i4CLT7JS9KMI z$101@CQ5MK#`r7jsmAZBq(vlmSPUvBud2>$GJpQgnDWr35DPn4N8%&8$&Y4;@x6u9 z7BqDx?SJ3@7d!BC378hXWa5!ii58X`K;iQ_j3qalbW15*v*h^j$W-Ghi^uU3-Seid zA<8!cwhPpx{n8dXK^X6)@lpna$ZzdeYq2}x zoN^1sB&jbW-jCG(6oKQQiNm+*e^!w68HLd=41mBtjibm*o8!&v(`=zX2r$;uc>Mj4 zSMd4a(D-i$I9&_^GoBR(@||_F${JbJdEkc)RK$d+Xl0bfwY}Pm&csFx+2BtPFY#j~ zIBlR|cP}E!G;>1AC*A(VgEF>HW4qEEo-ad;3%G=rk&X*Z>}#ZEX~zqP3*eR~sJOJ# zYg|lxx>P33*Pu=?r4{l}g~#mf-jh%G8Y`3^hESL*>zWfD?ylkMiM*B&r!=P3Cvs#Sn}`t;A=i=h$}RJ9^;+vqu6`@0o=#rb{kIm2|`)Oc#$ zb0+?y`hv;@kcVa8<0|0}>~#!``xy~&f7l;p@tU>FDDa$MhmceAAKEli6pQtx4wJM@ zmF2B&MgK!zb;wUB_f3~IRrT5Fnlx~6)Yy(WG)-0us)XZ*c5xX_%Pz4}P=$_n68!s` z(`tuk=J$jJWDiFI=J!ej=Htm*jLP;QHw{2S+Q=AL= ze|tjvwK#z zI7U4JN1kWxZ)#RnVy5{>QBum`Ew40{bzLp zpH}y@Pt|-3B*VVAmNq@^?XDWe6jT-c#@8}GJ<{i!iJvo#MjXxGoFUJ&n71qX+B}KF z;kBA9D-jBLhqKc1R{_yTwo3($(e70<)r!_M9U9mqE=XZW|KE$=CJo$>j~)U)-clYd zhyM*@yljUY45-Oxr>t+$z zawsD)Hoku5X#P)VlKYg>QXCvyr4;8gUJ2NsVuJ-@ElEw1yZC&?!XOFiGxVzO#f5{4 zl>TCm(l57SHUJ%%8G$ScNKxjKs|H&vnZt=@HQBcrUNKHf?+ibNlMTSpsD$C_1;fKR zhD7LA4}KydDdZ~`Nyti?BX%+V11 zQl!|6;zULZ$NQB!mCu}?{tD?|grgz@as`PxNi9U;C)oM+6I7UCAcvEZw>RdYobq6<&Eh=W%igvkB*TK~8R|I!FE1Ya;X z70^`SZ#mA?F;f~V;}^#ZHt*}rro3k#7PB7K`s+zSc}0FMhA7!lTR@tlRhtd{q8EF= z(!j9gjuAW1z+n&Gf;uNszQnQhtQYhzf&qYJ3=Pg(bdmBSxkT6(vxebOkNt0I=x9Y> zO*$}@1e^ET`C2D2%%l9xNGajuMkw6Iatsz4=;PAskmcyaB8;J&Wu#VA1ad_!bDk-9 z#YG9ke#pS!g=k&%u1WW&3F8!A2<&baH=r_=*1~EQ&$jRaQl8z<*JF%hQuv=oUIqg# zm3a%tjjq|n!7^BOv4-(UhnxzH^J#pvp>^n%aK^Y3_?vNzDKBU_OixfJpuEb`>WsZ|8k{x%zQ;V|T!&UWFxlqjV6iQ5D2>jR`Co9&?DVVgm(>)-!{t$h4&gu)?; z=|tm+(2$}c%O&qQrrzkR$k5RQ5e17WdhQ55oEI_#4=EEc%KOBu21RcSTk!EH5;&_o zy-eU!=`ewDk#5VKs`U)a%^??QsXG@v?bG-Uv)o9~$z76t5K$vYusJE0%zGeH%^3A4 zYB+1yVr}*ZJkC!Z5;@S3B)9X1BpB&#I}OSp8KY=*Mp&M*a7d3c3jqQjevXG*%%=EZ zM>kO!Mv?sgGx|@u&QMhBugZ5Ds3Shy+dOEvQ_<&B#J=5ZGbr114=x;hYmA-)-r0Q! zkAUe9zI5NdFQ7TYKZEi4V2V59vj+h1!RDTfydHfQd>t4P(CY)V8Nbbz)%CMn?261L zd|kcYQ07nRYf8c1?aqXWY(q*@sZ8P9xU~Ms_ar>P&kYfwE7LsdjSzaQ|6-}ayNyRz zrB7lGQO)^ATjn&}U%V8v-TTF5Bcd|5n`rj#M8BQG?2$>P_mCi4?HOrCfQBCiG zZSNINnzX#xQL?tbkqm~7gu98OMuqbqrvB`l_lSNZKYWJaYQm_^ENerEnOUj#wje#zKc=yx5}?ccVaJ#s}iTAK4Y&5i6B%{)26 z5tclN+4S1%Rjf>!v!j>PRV0*NLtjWMwyVE=2M-%^y3R}U->6@0LufJUi^BUL(O!8# za`k*q7xqACrgRZm?->8!D=^cEQ#A?J!4RpApOxrAv7Lc*0O}mS^gfpp>7g#iH=pCn zjFH<1*yh|Orenw(#Stb<)@_NfozK7^TBk;eSWgoRPniMbs?jLDHg!oAnLPaveGg6; z%dHF~SBcXVeI1A(>Ci~e^I`r|hjV(fvEaM|u^yQaE$2(NOeF-ic;4@uityglfhRkP zH=m~0h2l$o=u?1@0U>vC-rL&C30w^c!>4q$V;3pDnY_|ZZBlm0g@9?`@bBd~y>ul4 zN`=g(oyXOuds1=TMEah$n~%Px&gvoYh^vX4`U1KU#7+wKQHL^wy%MH8H)xovA0 z_tH6xY}@yNqJ#B7(W8v#mVNK#YA7uZ+nR-Xd;POZ;5hf-W_Rgo2hYZHQ`~@?9~6-3 z+Ge+!1m)7xiJ(t%7k5*(FM-!GFa7#f#~CNP>2$5K0hD=yU_KMLC~n5C0}>gVIaU*~ zFiy+MnD)h-gy0FV#Jjsrssc<;tzy!C4`5pL#In{`O;3ugHj^G2H6-0jli~@7J9L;G z2sujn)vKBLNnop@SH12Y7rZ53O?1XxtO2EabUG6z5m?L?%2MFxaRu1xg`LQE!=ZwjR`QF7ml}<*q$1)@ z8s6gWjXxucM6EB2LQH$VX5hAEY$||!XAIN(otCX4YUFT<)ELs=9B2ZIwOqqKj+ap1~SA{jdonRs;p&~ zOZ~xlE06`26C2oYr~Ml46^nBm5B3{y5!}MdGg>8Z)gBO2muRip9Gi04ODhvM+|M zT%tzsQFfCOTKeWAmjBNAPe*`o`9^Ki%hNk-(o2)~}=L~!!Sl7blhqy3JKNBCY6(E9BH8M_$EyHijdLf|)KFC!n*jq5i24wUfA zkklfl%5fzFkQy>@e-^-cf!n9RnGY^YamBBpTf^7FjIRT1$JWo0)aZn_R5+rf+j(OG zadR(6go+SNN0R3uhMOdyo)?uNNUx5v3yQ*I#8Kga)L(<#LJP>Wf_Dc*d&W+_&{z{> zWwWqRW6X$6I585q{apfCstun_ed#FsGehR$U5L)`$*(0CK83Chw@JYE#evw&)1{$C zhEe{vFdHKz^vW)jhMu=&iXyQSDR^9>DAN8md!(q{Yu<^gX2;cvX*{mH7PNGbrZLAg z5d^0@@l%e{2z$7^Dj5R?x(j&=x5f_1lTWZCOxhzA%n# z^bEu=^?}j(cYE?uF*374gmYM_7%v#KC1;y_rABgopMj1h23S>W@Wi>V+%GG~q0%Y5U%3X_4RB`)1L5W5KucpXN@A(5W0w_r=pXR z#0 zg*^p%EOk)RnMNIz=AnkcQ%S$<*?#=~U5*Yb?)o`=@f;4E@4O)Rp zg(4hSzm+A_GV>4&xl{xz=UxjvJXQ;$L;zFK(en*MLADlP?X95V5MHL+GwkNS526R} zTMqSgWIfLnKWHU~hCO`Dj|B`h1c>hJ_Lvn8_>;a+;$h4{G)p(XEt4mPwd)kI9yS29 z*??mZ%#-q$G8JC0M`E!FxbsOY*&%q{2nVnIaR_C^jXh#TK%*9fNV9GhFvy4=9XXYe zlWVED)E+uSmyogDWjTRKp36S6+b_|$m0=v zH92%wl~AIi`Vv#2p*)v{QN!&ohH%dsZ9hkdMkR>Q0`-FvHJ-dP-*d4?bkLUplM)V% zdi4xBsM#+!Fap@b7Qc@~v6tF2U}-gv*EIO5W%z53&~K1}bkQTq$x+WF_2E$f*}`5V`OYMA7Y0 zuvoYnkkP5gz|aQBUcFa8bJ&W|w@5LPqP{5<*mr*O7;OHYj2M&u7_$Tdcde(Kd9NF# zOh-v27!EyODpjJnwHNoKrI4h9$e|WLo1eSAo&+ncAJMJlwG?tJZ?yrN`fll7vCdQW z3|v=P)hD^zo}_99k{1IEx}RWLm`LX*W1+@rCj~>Ya>klmX)-i>hlkZUZIh61CtZeF(D4i3^R@Y$l` z_}Q}9TE}9`+_hvRW3vw0Kt`j!oRxvVNi_~aNey4a9G{Kx?21ln*7jV;926`hSj4JA z5189J$tQzehoGRU?~-n3Jmzu3i5j(ObKE2XmhS{C@!d;XGIBR3vxXmb_5|_g!KtLs z1_zAQfr%|4$Dp?5eF6K0^|*o0yXPfvoY!zLQr&ce&83~AqEsH*XA%|qUB`tZ9BwJ9 z3b1{+az|o10zt#;0p`3ER+|vsxoS~qfNw4E>{91oi1_rI1&tzBvUd$%hbblhoLUvy zK>P~(&@kCfM63bCss0`hR;BP9^wMHRl;e$eaU47^?D=|PqOI~4-w==Qyakj4>0^aqab$Rc?v&(1H#3mwZvm6#S*Ch+vG;2)zorD~l^|@B@!E zWhcI^`?WKpZ#3$E48u_iZ{x%E)92idAxCkT^ySvbV--6j%B}=Sso~PaC7-frgdaoNuej3ogYE@CC9Zt4 z8~&JTzKNIM>s02Yyww~yF!KpfZ7nD5D*v9IH%D(BJgtsXLE|@L5z!ZJx+@N>6tjbU z`z%R7ZOZAY!iB?aA`em{i{GzMLOqli-ju&diJ26dCpOw5E!RT)5RmIhg4j@1{h0ae z$-`iHMIgukr&yybQ8S#+5P;G|a7ZgQyP%r5(pd1pG-yu&x&NkfHv@VvTGw=b-i3um z=eDtWsEgJi)$1dDK1HXvl4h-t)$CzNl3Hk4pxX0+G0*t?%BZ%l#c0OUu0vC}(-A9D z{3#0bw%%x#U1}m=ZWuW_JaZb_&F7#P7E+n{y&*A$YE>EbL$wxZ+a-#PF4$_0e4Lwx z_{^0)mF=H#LNC)`D~9@wGSux__hII{IeL7Bdrn*abUP;S^_*_E(@Q1CaX)xMapg5W z^L?VMtdZ2hrju%)+T(4)j3dk(11moQLJ6w{!c>18HoZay+J$^8`(V^aZ6I3V_?{%| zxD5cdgAyB2#lsRLq{`}oX&Y=5x`}OZ&NBrs#6wVLTfLlp6^FisF{V$6Y!4MbbJ+uI zTD%omMpwvD)@w=|p)u7CS1n94F5@{TO)mRG0ECk zez3tr$+jAIc+}Q>hd#De5Y4TfYNkchC5_6015AY~G%EebP4^y`1Wj2O==5 z>J`(nO9Eiwuj$5<=VE!5E$=ylAGF3G6ewea-vEBcF}8T=%AC&Q6F|3$U-c6Hos8H8 zrRTF6D!j3=eIT9O=0&;)HUSQ*IU@fgR^nDicH*oL;nboL`gfhkqWWPvVbRIU5ZR}! z>9FC5Cv;jiqs*mj zrY7IPCuMBm2}8E;0JLqZ--g98nD(KaVC{zscjV?p-YI?Pxsv6N0{E34%c6e$r1FME zMRP+$yMyP;DodUf!40nDYF*eoEn0GO;g1dc^SLgZEAKgCXndY6X9%EMHXTEh>wAlD z|Cj*O1$6ep9Ig6ZNAiCG~~vN-NEI16){F=`)?^_*N^w zpESD@1t#&b2Q5xIQ6IA^RD=%znZCQnN|UMC5UC4N-WO@?-a|4*kI`_*!yB~%`4k;; zOD>J@b#S@#{O9ITMpiJ?&$4ugLmn2L?;V()KEXy*w;4`yCvxCk+8K$%n<@=o94|hZ zL{%G?(%1<9FYjGCjHVc6&iptVPBDTZG>ctmKKba8?cck4eH#F zZ2qiLE^0K+rba{Yt-9$9GVqTfrm%C(BWK-Q#I(R2W?4kblZHT|*jOHakR~2fC1;|w zUK(my)8{+7TwGxRPHX2UifjXH1N$;a>kWVnEtXiRW?H8Z=p$!QHJcd8-UYcsqu_uh zdS@*8BAZyCENc z7@gv3AuV*{5!Zc^%iz?;LiJ|h+{sdS)l(s*_K6_U&2rH*FCNnnjJo}&^LYlCo^S+? zqo$AqFo(lk#AXAwkK3e_Y@OlszQ2_O9UgAF`bPXnoNfSLcB0hfuVF=(MO*Knr&zng z(5_qxO7^-47F1~H>M$uYn}=F`gzjC*PdK3sJkb&vyfk+!ji1c_>vMG>vrcsg0(42a z7OaXrgW)ik8IH)k0T`7Pva~lxzg+9)I7@?8E{&f(atKtxb2ARot-Vg--{3wGbz`z< zF(=P^3KWO#CxLj_-KdYzmSb~C^vv z1UJnOd+k6#>N@COmB&8iw;Fd#a)*XW{8KJ_Xv$dzc{fg~XJEuF()pf8QH(zi5A4BB z{fyPWs7cYp!wPK{gJO+aLw5VDM!qyoi@4s}WnuNp=@JEvbjA8>(OU!0*<&N|y z2)j30g=fzYjp>iUZ~czJ=#p&cUao0Hnsa+SCyx!?jEXnacIy)2NM*W-ud^P$hx)rF zen_{>thTIT6cj+a60ut2x~)H67u#^-h&u;&L$>Z!ok?ozCz(GrJ6?xx9fnE$9FL95 zbNi+O?-E2|zBmZjMrKjR`S?XHvXiw2WtHJm{CG(;vRTgrCZ)$WKtrKE3hp?O1Yerx zc+(Jx+lo^L%)^QVuIPRxHCojOHlLK*&TKo;|B-_n$XS_bb#fEvqnVKsL+4) zhS`c31k)M0obH@1pT`oBOfN`h>3Sbyb~53*bO5FH{d%~bx3;)md=vdv{zDka5eM5; zAasR)e7m6O5NaA$?J%O=ywn7BcjHbhPZb3{B0a3n{&X3pbmIlu%z=9ox8dE@)m;4y z*sI{Z&Kj~RYr6@qy5bQMd}fo!BF46WMp1vLo4lM4&E(mNDZ9WFqB7g#HTIsx@z)8k z6o75;$6#HrqIsCbj#XGg#}$yzeB#yS+uL$E>Qi`&lP|enmO)Q}qrwu4e_zAw=H2DU zmO=GeDGs|Gt|Vh!3G-g!- zCxuxpVU%gB3|5CTaRyGyrg3ll5rf%(A?I!vnyvqRdq}h?A&bE+Tsd%O7kWzv2$H3@ zxKOffAO`0Zkbv0zY?-9cAL69l#%eSMttfI#xiP86{KL5GE&KzfX7L@?NIZQEtCcQ^ z+#U>em%oWpsxV9Ptck-6(q^o2L31rg-@bK|27d%qCC?;Bok#`~&kf@RTtu z^H^GSh!*u69f+x{?D=i>?0SaNpj_V(3#K88#{;mpuK(2Gb^wpgq~7MjNLqSdmT0(f z8)X$0P7;HkbnuG6Kj1RLg~MZe;6qAAKea1FYOf^Xf0-hH_}!~mtb^u(8=>Z?dMX4J z{oTV9*!>PJI#u48U^}Dl?%uL|*AWNo3FmV=_GCn#q^5^th1CkIb0xbJqy$x?K9aGZ$ z!Fi|9EMEZeQy>DNw^Fi)%gmd1*aW4yer={6%WA1!LdcOT4W6yAan!drM2g331Z`l3 z))EfXksA(b8)Mw`G1G_mMxgglK#VB4>4}DbuIXLC&Kk~5Kl5Dt{4h^Q%$xx7 z1^V^d@j~T`F30H@aBSgani=DFXkZr%aJWmD9Z#l?|eez%rgxl`ZM|rh)FJ_DA^^^720pzHKRRdY8got zu?lt5-n=q^vPzMBX3!AvnTDKl)55!O&*>EYd{*%UJQrf^ zh)oSXgWKL|w8|V|T!YbJ}mA8%Uz9LERZ z2Q;3dpDMPCd*#AMN&1mixN{V>#u*6;$?Q{+H@D!3Zaz1p%D zuHX(|ED$t|xdE)Wc7>9lS+@*h{fdKlP}|`_m9 zHZ+N9U5(Tt>sJ3u1Bp-CpSPOY>Vc1z;<5GIQ4DpL7tjdo@DkPYh63hEOl50nC2X8C9rSlKKXXsUgnH>CnO7yN8MC!r7cOPa(>AFbb0WnDj$T#Zec&2P4bEb%b6n82-_m_f4);!3^Js3~ABoIPFgH$`DXMBC> z(^4h|(~dVMxb9*3!eJ--A)7ZqeX`Q%jmzo8ua#;ocP&1y7+MKW-!&96>km>fAtrN2 z{0p<<`^=^Vx3xGt7y&;MmnAL-c%J(lhc6B84Z zKAkcaYlZ6syhgI}jsEztGDuu<>fIeD3sUIDZmHX)y3b9RcY~zaV9OC2_PcQ&;N8iIhx6ZJia&H{wC+MEnG+yjbLplo zc8TD!!1i3S-<{xHBy`wNmSq?aIXAdkX&niIrnbC+psBiAP1%sAntr9u%DsmYT`M%1 z&VLkA$lqMF!Pa=9_?np`93ZrO=1xVWu`L?e1lV4dnjr0(a;zg>h4xxqlo()aFfnOJ z%}4Leas!21WQvVs{T&jE@5(Px`qc=HnPWH9t3}Iet*MseUIn=uVN7To<5^H?Z<}uP z2v|81lzt{6{mb-)3to|k-Zz~yFSMoO667|bymAV=TP|w*zFOQ4{9VxWTBu8+cmC-X zdDi-Pq3FOpev2&FM>E9xKJS3dV3MYcRfR>3QT_`=i^C?;QjI?NSQgWwg-M+3msq+% zX8szYQ{d^6!HkFkiWfYLY#xYo^B!=@c5bqrXS5afu=#kwe4_(C8i6K$vLS-GM=qJ; zX!u;hRYXZtj17gYS?j*5w;?Ho7Of>{vGOm-1yuPr^iq3eozu`HdX#)nsX-~6&O0`d z$20me3PRG2>X25oBlWHea)14iv0;zOmcX-o(|t#uF#6VA;Z-+U8uq=U#Y+5=a=w`(-x`sc1s zT|WE~C#ZlbWvT1{1&?Tf1)s~2NBB5LiVG)kmSHPDWUbTj0#Wu^AN+NBN!2i)XBvW} zZR(m}3^Mt&?S}t&inhgN47J5q2?`4KE3EieqXJGd15G25ClykWiZf1I<00!;bK{wb zGMH?C2zy-f#JK@7dC7bS;-Jn2cTjw-y#ymxYaMXx6DU>|5d5-25LB>>jjoVQVz-?E z*zk+Z9^{I00^k)sARiy>hwWGYe|gVYj+UdYR@Olecpyd!O%&R>);MlKmeMr(e3 z{?UnIln5aM0$~sGQTlyo5b@y0ODD=X@#a%-{q(>=bxRA^HfE?8DJ%pSZ0kdNezhcD zu3+z`+R?Ad_vR&mdG1XfWQ~PCWxg#*GlH}9646Hnq%h)w* zg=2aUn2$moEkDXzF%qE#bh0KtoF+j$_>(LSk~=*(nSB+lAK5pQfBD%wyRq@(nijE! ze13+m*M?eR@{Pi|hwiUXGM~|{Sv{R$%6-&LG4R5la&gslS{wwq!}qVS*{k%6XYl1j zMC+S}I9fgAc29$k-uLf?HI0ppOFqJ`htr46U#&myGD!^&hsqQV=pN7Fq*q(NuOv=^ z?H(770tFHW&c9>{#Lsac@iUCZYIrx64|7}bkky; z+vlSTR>pFdAB~0v<xN=^4-fx| zhxU%>2T@AM#O+LMr)Toyge&wwZ|TrSBULSOTVK&FcPUz-tHiRJipQ)iPXr zliInXB%*$Ze)Qg#K8b#PICv%-cGuiYf1dKEcH7(%-3>|8VwII!>ZWt9mD5o^z3Gh% zATqI>RiM@TsR`CvD{~N5DC}@KnvAQ-l)s=$d3l?!P5s~4vIu>?^=cR;33BC##e5*6ihEy*r?Mrca#mX%s5#}{ zL=99_lIFWHsAP?Ilng`cp!{OfWJA&?Ne9Sw>xrgZ5D50GSE?qZN~IN3{a z)!Rkks1(po9+6E1J5n;02u~?lyjma*G^5)=(R~)aKu=}wmrE&QG+qg^!Y>V-{pvtG z>3itGK9NczK6IMea4Y`u*pPwPgF(x9(vhFwu}G;PT252Ww?6y>cNrlj^UFci7| zVR=WAt>rY~`e@Ivl1~vL+vPu2rw*0Y_Wi}Tv_cWwGdZNgig>9Q8WY&vx{@`>;7|A< zRL?T{xI9f7Cd*aUFcphpx!ekml@gG)i2o(~`{ans3~Y90#QRQbazupB4U+(1KK3jH zi-27PPRl-Rx^Q*YY1U8;fI857=O9~$)06t(M+O!^Kh-K4W^CNxFFlnnOp!?!ZP_e@y= z>tR8W_v_d2z<7w;aK5=<7xO+Z7@NASKmeVpA-aJ-sf;Zt7;3?4XH&AViKcNC230cp zWUanE(`J!GUSYmcmeP5;)EobMiYu3=Fc~$6Z)&Q&w>+5gx%_WYU$3XKzs%Z{kaPQe z*jsh38{Nl+Nf8m&P{=9M@(wb2+q6VP?-}=VN#E03V!Q~c-bA!+THKDx7!`_E$knXL zhr}{ob;idj{I@p+s29V;P4t6ltP(C>bE}avG6f9H{@9pMnUO{p?$>A!clR}66vVJi zkBe!$r#WE5hONT_`N5Pm}5(OdEPp%m-|au^C6 z;e$|Cw#sY|Sj{^9O>NR2A5TwL!!~TQia@~Q661UDJ?%uX_=ykDSmoYS(Tf8-%D~}@}ExqqpI4j1U5oE_d17- z%X-J>b_J^K9c^R>ys#=QzJHU)Z6)>xHH$;BrFv2}Kgso_yVT31xrO;T9Bb{Xl-X@) zt};+nFLkode(}m#mG^owHdf92MJzQ8oeOb-aP32|PB|M$3(k$_D?dKQJuvW|`yrNTJdVLOt+HiR1W^l#0gzbGJscqb(W#F=Uud)rL$nzNS=pW+(A7bq@XUR9N}y<*Ojz^dq#1p52Tz$W^xIwr z$9D5sP=NoLlo1xA&zIe=)};9wB3|K3 zn{{QoQ#1bbsLa+eqGrfaE#T7TuXpRs)9i<|Neej8Z66lFY+wsCs>lu7>C3h%3)>YC z$Xp;M2XSqdlw}XDY9%Wi%&X*S>AwAVE4TmoUcjV!AoqbRg%uSgAD zWGQteP7lWHrc|<)uq87@dJ!SHQ=!IxWF+=UcJiGY6*oFu5f;%TTuJ%HY$nFPkTz^< znS>`Cv2w_>cMypVM58~+d1rT>z%@ds@XBv>vKRe43HSfm7;l1iaQh%guK7Gu3USld zu0X|ntjsJ-T-hz_F==`XRgO7(NBz{^nwt(>I|J}X%*p52K8PTtawnuH%^|yxx zrHh^_AQ55Xu>=DFpQ&29J0%{v)z_m@EX6{0yMVK0&axD_S`bcv#iu;9WHMxq5;zm; z{J8=%O(f$dz4^NCJx1ysd3FMF-_b(Iom#AU#X3w3tXM=RM`(slsh|;~7b^|B#kY*& z+Q``PX^bc-Z^9=8HS)|{6Rwj0a&ygPoy9P#3WqIm6AjYjll!Olr<4JH<}le*xG-4^ zNqfeojh|!_*zv2HXmLM}Y~6cZgvDjxv1^id{=jb=xvS_*nZ3)?i}#;4e;56RpLFI{ z;n3K1JP4`PP1o!E8Z8`Z1wkD8&7+-^M$vfD_;~wHSeg(QnXhXI2`keYeB>*c8un1l zV6E$gw!Ev6`4|{03~t&LZWdX)iwd7HYZ7!Oc9;2)ruOd^x9!Yh{1eh4H%L_RhuYiC zT#AiRT7?32XR(Z1%_w%&021#(-fkmfqz+8dCdGqz*_(FqS=F#7W#ZPKGrM0eJC-P6 zh8xoS9HYfWC3`s402E>Pn@AQuk{NZxja_h6nmTRF%kf~qbF)^L#_YdaBW+)lW!B>( zVw840$B#IpT`k*l%qFtB$aL5#^StEdTlX&0$>Po}%9@Ay-06t!YhxgTlFM4;Z?m4Y zr!TBRP>kWN4k>}-)Fy7_>H%Mu$Cyt|DFnIk74Y%z$jkd~iRp+e?C4&@SdJKSQxh#{ z_C%P{e!vhIrl9vx=q`j2_Taw>7*iAIRO_Y}pUntbb+{AJCcGR`ZcJU>zBu}R`Np72 z4M`)j(DFfV)#K5%soyapx-l-P%3_fT!SXiawMdRDr3PUwTe5+~N!=&~-^boFgp0cq^%!@_~ z1&2Qu)x-~#mHvxVPEU|j*LPWG%XxwY#v+i{aybcZpP`%;%F3aNT|{oOs%<-nhZk?bRySl8%T z3|7==bRmTdrN`gaoUtK*3_9j+PDz=sODfYkNkHOn6Qj?9|JH$QX=sxOakhb=DdS8p z*@~oi`09SuQ96qXD2mv!-4+gk;f488V;I2rgp!4k5H>{G0>iBU(g%2v9Z-a;q8Qny zb6bdVpIGyTLAM#~oK@$eBAudE2Ix{prOCN%Q*^lare~v2#jEYY5|SF_BgxM_5XM>EaNh5n|i-G(!8s&nwC&5 zbyC&y$COjX{dA%;R7Mk`8ihme8fj=~*sECx@lna}yex5)F$Sx?O!9h<=t*%WUQC^# zpSCQhYc?x$CAzF_mN@xc79dN4f%0^Pt}NAX`{ZE0$5RA|NEnS{_xzQ6;^NWj!E&=i zMoJK>E=u~!bpH$30h%9oXOCPt{KQE&96v6KSlsj-tCRnhwq)tPWL+{By04X@?nqgZ zYVyKGYrS{2qQ{~~x}a5D_AECkCmDqbmp1{KFQ4VAR}n8#e7>rNPEIHOZEN-S)c0-? zxP8WluI2#}Xc@}|8#K>oxu4Vlw}0R2&?^-)fOjG|*0|Eg`M~Xsb4cvjh85OCCxv)g zZt{HZo0&Yb-mgJ|Pr1Ib%AK!}V#@5#C4qPlTk)fC$;&ZARH~D0rlz6rgu+VEMS8vk zWJow50x=E}`=>-6E-DXV&xdX*;=@MmfEf?&ya28ChvstY9!NHp-?47N5?+RYbZ|yI zo3+(tP;>t9o9Go+C%w;I9|57R9pDSqJa^ zEkN#NLf!yZb!>O!JC`!ter;g$UWWs@6B#z4-cP0AU8em_TSYdS zU5v?E(+`>UWSl~wQ3CBt58at7A$=5b$-qDx3~xX({Y962O8A}HYU8CGJ3M6rCzy1P z@ek<}4VUH9noa?TSXlx(4HME!x}^|eE@vZJ&mlbgWu-57HuQ7+h~wM!&wHHfqQxlxW)S`F?|fQtSXJY& zHc_;UDJXPs(8nq(Z_6G?)4wli69WbryuR!)DcmeIeq8<@IZTS)7}twNC|fdOylN}7 zADwpmwhd{Nt<>oS6~9d!GcTlYa5x{2D<}!ZOg?M1e7KQtavDg^#GpZ^utK14c$mjh zUS6IAyj3(RI$6f~=j*F)Cbz>6g&e+S4-9 zUrB?D9q0IYO&MVoGMIXwD*vy&uMCQ-Y1+mT+${uxE=zC;2^u`;0s%s>V8H?;xVtUx zo514k?ht&D-~7LH@vGnF5T}$F-k{4?~u2jfTxLSs0-?^#&A>o z^p5d)uFE`Kd)QGz1Oa$;<5Wb6eLRm)U@GdOML)Mr5Cl2jpJ&CV5&1egsyf}|`l8zA zCmDFr>$-%Omp2$}HrwLqo?FI|bN3vgis(wIdL#Ctaq|AeoBWLX=!<0c5MuD)clg67 z4|Di{^Q8hbua{0%>p;%hInHFsdYm0wXBqg(dWK(KDDbsw^ZcTjj3vK+JNSYB8^>a3 zxSo;fb)Nf%gv-;s-&PhsavJ$OoKXnCJfvgB%{AMUhK6?J1)C|K@i1XgqIbVI9LsH2 zr71)_Tu!kL<{OzrZ%<)g-nd@OJ7?|8Yo>GA`U?(EL&bhHQ>xwzL6$!_T$0PjtDN2| zemZ7j{t)mflq$GW<>_O?TDkyX^u%p~$}7ivWPv%!Ut6S@eUTq`<+ACVU0hx(YjESX zFe>bBf1eyYd0pr35Rvd*7LBeW_oJHygYa32g9W_YA8*2RkF#q)XX={&Abscw8(6C> zV$72S7^!v3pYGId6-&9|j-3*ny3egJpng1EiyJ3RP6-VOJ9M>xuv^ajnfLy>ad7W{ zU`t~4$3#g>Vc=IUk@ih2+P;zYGJP?gmxY(T8*aMD5Rs;WJpCjQ%{K!d+K~b(n0${- zhniHeXTaBkp}QyC*nEb?!SsG-AeE|8y0XwfEFl#dCMN=59KX~GPm1ndd);UP=WSbJ z#|hZ~?wM<_H*&W35MT-jaeKmKvHj~sbm%4H%2L-^;iTe(mbo;E(hs8m+^c#hc<8Gi zqb1|zQlLZXx!KwL{v~RDFjb--XMUKBl*jw&=>Z*P9_j?WsMUB*YQI!#Ld)J;7NLT~ zjUSXkZG@!s1mF@FIU}10d1K=h@7mNHk^QGU*_tY~&(f@PWixaoz8=?fK>b5^5|QCw zh!+Hv>p$5~s=B5|WZVA{8s&GXemy~SU|gZbc~qPC-3mC2Syfl}+3Vsb=E!>9&+a5R z>HsH?Ra?UD<@LyMZG`Ljs{=x=nu$^E3yAj3uEsHnSNl%8IiKE`L(@u3I68N>Bo6fPlXMrB8z2%!rv1|p0S%B#necZY3!^OyGUxA zl|n9GQ-!_pX8W!Fj2QKYhIk4UQxb)c?uDc)EOXQ@WYM}1HAtl~LK z0Xj%5v(X;)$5dA_0vB!eXEe824#~&u{#*%^Jo#-LbL8VJMd;B)0Ja-6*ZkD8o>4GrZ!CT53?BeTJZ!c~b(Iwu z6n7Dt3~rO>6+dz(Ujp{Y?blX};Zy4;c8D8Q(qajTNoWi3(7Cw2TN0Y@>=<{NU=K_QB zH#4kkeq*f1MzmUSc}YrUP8D(dmf}4ml&C5n!(if2PbYKp=cm5-bWh7J8mzmWe+J!O zErwF?z3`fHI%=_|Bhc;1IhBCJN2V2Q&QG`08l3hLpRouVb%#^qwA>sk2pPLvoo-cr zB1}%ywIBg=_->fn2l=R9YtD#Xx^M9}aGG(ja2dq}n6i;H*h#FNliq)r!nmQ=`%W#w zt%nO=h^zwG$fhSWdD%igLNb?7FtcUSS3C7E zVBS3xmLaMb@2z5&KEWHKy!;EBLQ?w27~-0<20rjNEBTD=VDdLly3cRbB-L~pdWS(4 z9=-Tt%ilW#XKr@JzNe^((umxQ13OCbO+dEAO&_Z}oHco~` z>&mh!D^OuItsK(b~%s0gRx6< z>-kZr8zWhlvF8e`Y(gL2!T)S6NQ4v|G5p=S@6H`PAVjYdu+`7;sfEkS@`{Tg zyw)M6ei3NVn}TQDwpA+ddbzZX^VhzS#G*5Z&f^&M48Bd%!?&2F9&h}%vutr1s8U6? zzc^ce3FB|p69@#EEI7OEt>~aI8R^#@+rs-?Q6-8hFZ%NZ=~6_+;rv=Pb+f?(r}EsE zi^LS~#vQ|So2-@j-__r4%y8>E_pJy81<1L(H&XH2(6T6fER%g^Isv>(Q_boX7BSz} z$=n^-2dn4HwvOHBsMazD)6@2@9grrs)>^kK;Trr%?FH-7X{a#HB;nxcwr!pU`}4ZX z^>XXpJq0fWm3MJY$MOx9A&wvo4?D;Sil6Q3FxRCW1m~CZlZBAwOO6QD#9bD=;c33=+?s%-vhtWvADd8#1&;jiqJQRH9Nf*#S{Fuk zsxV&6bU6F#zD3jd1KG!QVvtX?A;kjwaFA{jKH%|}5BH?)HR1KPA-$SXqJ>e$^BgIvA@J85H6AIyB0XH?<$Qh2*u_5u6q;^mL*FMc=kSMxni0*0M}amK zEyOk)Loue+dPCcF(IPL~dD{kGj5Zv23LT{s#S3SD80c>$eBq=$W%w@3Wzif1=FWl1 zgazBYP5*ayfRt&ciW%GAaLUw_zB?UHgZG#&JOb$&(Uo$vDU|$qFTB}mecuh0>*+mC_ z!HK@HTTN*inX3aUx`_C)0@s)d1qFq{U=QZ&(%wSPVDn`BCKYsyM>o9JMS|_!uNqy_ z-jkL(n_B>xQ#YZ9oKn3QN-yEh>XW^L`({m%r2SxL$H%P;{+6{Za74TRv)&bt3F!!O zRLiJOM~#+w9^4Ku&;@wAA)VF4F4rX-N}K_MmkId^{?W@JhiO}r#bM}6mscS|aueSp z-Yh-_0D_-E`fbE~*n5@SI&VP$o(y%)G5G`qT}8Uy@Hcqp zZM`FR+NoSk;R9T16ZWbg5v}h6rsmX8@Yz`_L&6Mn!TEG$k{i~^rpfM?{3H(SeMi%~yvtv;O%DeRzO#~MoUJUc z>Ezs+Q|LEX_w8ULiqO)Cx{h{Rl$;zWGi9ez2$S#n?n;JNCbYM+cP$%q@th)bx2%=y zg<+)UM_@|`woC?E_V5cQ9&Q2Ih0^eTC`=VzkCc)JLaxKq3~~-T8@xYJUtfA&#SJrL z*Mk03({!`?lYcoMknk~=JwS9|D;bM8a)F!;zNOIBbcz>S4BdTBwZpPa=322%N*mvs zw&cZDBV(p30wh<9H6udiQ7XMCNWdzcz?b9$aN4zaTPkAu*4vW;n&sS6EH-n}F8P+& zjGT&P*28{mz%QOaoL9OSX2WY`bvjtDMv)I#T;3!v)=8Lj5exG_S2$#5X;_TH&ie3 zVFGZ|u;{9iV|La1;DkEqwHp!0da~TvCz%Vr9;1(O_Y&Ep8MBF@cRW*(#{kOEZB2r;ETl(EoMcGp}gEyCbhGCS)mT; zDmx-Iq={&p@^E})`aVat>+2j2apYV`=vFO$#GH&?LgPZ6X+kK4tm3^6q-di3<1udN zMT4*@I<(h$(UU2J-Xjo)fCdMq5%YtQ}2k%g}#0G&ZJOk2T0UaExA%lr}J$J#r- zHLZp(%ImV0uto-FS@6uSCnH0ATs~8cU(cKbtk*zHg^ro6ke(yQ=DG9|dLKR+ z(Ox;>b2_1HkSpT+Gc9ENYmkWWB<;W}jC-6xot3)xA3}Ne%IIx1*!Me>y(m2wSD%SU z<46SLEOS>*Q}4KYbM9rt)lc2AhKlL6ku0lR$o!7x67amL{_x$QH?=h6?8V5HR+Z^s ztm}!vvVFsYE#5|iXzS^v?wpu}ba$VDqH&P@%YPwt%A}XPdFQZMDelLiTdSY8d~)AJ zjo<24!7KIRcq-VWwJxN}fL)7aGKT^FxM8ogsKCd1V1RMi0^!xCnglX`VO=6ifyJyR z0#ZGwA~;2~6KS4->4Ob`(|vv26bQg6{fjYyD={P61M}9BuCA^Z>ddW+b=GNftu(ct z3pnKr4Ku#H3~}JKnggOr3-z@U)-nMG2*6`8+Z|&G(|+?U0__IUCIJe%`>yiirS9ZZ zPg+`8QCUSiP9Lm2NCbDPbIZ~U--_?#?I)XY7YJ$o1j`Zrb9}t!1G)%lbEJ~EVl6!t z@*mD$o+rnW7t82Tgg;Y{*0m-8Kl>(>U1}3hU0vPzOewRlW8V3FyNZg+h-b~M7eIVE z-W*{_;5CnB)2SWM%NrhsF)8;&Q1b0wQ1V%QUG4~)?YFvN0suYbl8X}HM0SbG3*RFD zDe@+B_29c^>OZBB_yZ4=Fdd1UoWmxbv{Vb4LS&YASXSYe*k{GOahdtZ&&Z%!cy5?` z;I)0r$o(zrj;^R)AOd=O6XomMFF=@HuAy7j+BBDhuBPcITCa|1pHF&z_s}cBh zhF?BtwY`pTL{+&<5Wh&Gv^t|FPLO=U|27U%B6Qy*{*lZYGe}?&M>M?gVCC{3a5a`8 z8MFq>=6oHO@)aZy?-`4uqob;tno(I2zilR)e#@Mm(~c+!*eYhjRn7-(h4`%{CcxfU zxxSpeEq7Y^%&oTE5p6UPFu41Te%+|6&~JD^EiF{~_9h<#9`<%4QRidEW;#=%Btoj} z<@@ALy+-Y~R4T9lVD)O3FjvIpoy8n6B4?(cHqKvIJ})VB09Iv_|s; zDi?sJ4~%t=>1|X&)PZ-@q4uT8Weq7wm=@*_#4AlNrS@$Y_C&zMvbx%EJl>fZPmNU? zqZ+rG@y&ouzKy>OQHKB<C+>eiFC<(ik;&R*!kv1f1#{JhR zBNN$tZF@1UQPASA%{V2s|Dm=kBz-e|F)0C6G@P6G$Wj;`5fBuP5k{n#?Swu?jsH5D z?jK^2nPm**NEv0*r;@i80#K6nCDNgpr*DB2Mv|AZkb#-{kGqjlj>Ahg#gm^gcO zI3$pz#PrNb_*CK!pO`xE>z)O(xEB7-q!wYbnZQUme0?_h9WIo#NO+A^(vaxZ;el)d zKpa_G9W=Ip+wFIX8+-j9_w}H(z8%XaX48Tx?zeFfWL1ZZ0U09V@I&N_?vce8mJ?wSF-Nx(FGQ4b>OE+$ZLdCiqX1JW_Io(HCk(mj4B6d2+eR(Z>ylw)b zI*pF`Xj($BLzhcXWDS}dJOXklmB+8mSt+f_H^ZI1O3;KK&;o3Gdpp1+Q%l!{)+5|C zLXVz6LcCf`4V5RZP7d8eT5!kUwU2*MeA1mq1rJx-ja(HYz8czZ-b~w@%{C#6Ox<5f zC`-2M`LfOTo?dQqlPdY4!mc8@c}u9L1Cp5lBe857QDAKJjK^3uO5vhE6v!Ro;^Jbu zx-)VJEh9DfqS;0Jd!SnNj4 z-Ba(_teUWk2yyw_{-EggNz|@u`qentt96In97n9R50sY;H^J#Xi{ z1~Cm+UP+~O-xb`WlnlN<{g zJRqu=F$#U8Q{g_}mvWczPlPSTvrFy%u=xr5YoL<2A7>*5>_#s}16=`*$#!LxIgSru zo%mCR(VrmT0qJi`U`;|=`6bG){ex_txF|Ti2c=Z!lO|v~NYl%u-H1R0fDoo%U`Drz z(fjBxPF9q42_BP5sN5RbauILPqO!0I)V48g@=%W-V)t#4^Xt z*B%FGm&|{50f1I%ZFc+|B;cRObjtW%kQxr!IDHFDELRShLQgI@`WEr#`Kc<&f#kG5 z6E(Ij5PdJ0{2R(5VMdExp%qt&B3jq%1Jd0ZhoTZ%q|gl8RWEw()&10(bVpV^)q94I zZ_Gnw%v>J=V1xhZnTVD;72;@+r)hSMRcVwnEq9LpVvJT^TJq~S{Z4_Y!!v_Hh~!E= zp%H>m;0yc0cQ+uZC4IKL(A+q@!@0G^JTIUKVATApW+M*K2CAD$A zqr(m_UZOB_(-}b$T7qfzW1$T+LRuU{n4qj+Ou!{ z3=|z>*(-PB(PRzU2+BEVx_F1o7T6s$Q1XTwED>Fgls&PFB7=^d`7fEd`xkwx4%9Tm zl=xjHLCWhsTqB)9!)c;XLg@e<`Okji!ER^p&SVKpB}=+%emLYC30UdvZvjB@?Bd0Q zVq>4pL*Fi23n#$`2DI3DJRIs1IpiPX^t{T)ctpnc5h#6^9O#`=qX6E_n!MR;n0n-x zqsnYJ&h;ev^Kh56;R?M+Y9g9!+dWz_(*+?o+&nofNZ6HI8kVOBXH>PPY!Di9wF{5L zDRc;cPZzyyI>2JnsiP7wk!P}1fxlB%@2xQUk)59}3q0WvH(Fj*@!}uSqB0P+_)0GQ z76`yJdb&}H-Vx9*y=vIRlRR<8MrDs~7e;c>h$)&bh%c?w0D?TFq+AcNjsNw*7+71cJWOj zd+)?_eS;j4-I6by!8J0h#iN+!-usT+%E~HES@g{pCf*bTP}AY81`LuOQZIKjFl?WA z3{Bs0#ha?7^Kp?t|tbzerp!a`hhS%!2buMr4naDi&M3S??$b<7wno6iyhxO&%o zW@l&DSEEF0**%Li%9P}LjE`@c&)yi0Qa-a{KnAur4N-Za61@k77w1c7PyQ=5oQC{& z12^%65&lvx_(hdAemD+WASxw7*NTh8vvij^CR~4!(#Ycqfuwr z?c1)YH`vbHR=Aj;b+AfVqG>q~g>90=lP7f`Kr5$$PP>{~RgZfwgvXylgdJY+lROxh zO?>1X0eWjC2KPKuXOl1@u2s{}rQt%s&=t>`k+W278%?;!>4;3`n1Dh&$2VrRTos^63S>P~{$@Ra#l4L3ffiueANe?JlPKBW z^i|w77UD!|zF6Xz!O5FyEC;t2u1!2d9xwhAdC(%?6PA~zRhBTOvOzW;CC99I;&XMK zUDlFIlX&9*tvLRtr8i-DwS1}4vRHCCBP{{*Lf#nv8Fi5viI6d1`*8nj|9qk_cqbUx zo@BIR;KvQ&(csQaXC%sDOLarAgj#A{TnfVwpsV&COvB?F*A@IE(;A}(E%5z7Pk@02 za=8tFuj*$M+B$ORYIm1)4Tv9?XU5v(m;b1^+e#JJsF>&jt--JJ; z!|L0e2k=PkUUKqbz#{vescP^`XlrY)M80E{W}2<$O4HW?qLc3K?tq(`K6*+_U=i1z zB9qd5_9X7H_5!z$>4!O9e^?Z1Yyg}@&5Eu$jR;*v*IK(jx?IFt?`Vy8??5$Wycv+4W4|9k7p_=DLMZskrDlQ zws!C=JDUXD?4?>C^UzzitqiR=$&4Td)lgM`^;W?~77loB9Scj#Hz&!Trz^Y2=m@dp zB_;iF0QmIEdSRD^6N^~m=w$8RDXO3VPgT6+3l^_+*92M^K2$_CxxC14-;SNgRi(zhw z7ed<;@Z329iQ1ZV82Ae`|V zK3ppLu_;2%GyXr#lQm^DNWnB7hrZ2Ux~g65}{8+)id)zL^;hcr8&1>FJ(#z!^LQ^cbU4$P9OLhW)pEmVA0JXtffy zezv~Xagd5b{jO9(%_$YKPkCoatdOU+TR?$9P{t(GKg7P*tbqZm&9Rn~JnG4RVR3;P zh)-ix7J5;<(_)?JInFXVGD1hpr1W*YKVAl3$aW>*L$lk2m#EngRf3SSl|#}PHZabi zWC4b!VI}E}e57E#tDutAF$oAzMB_2;iRQ?y-Z#4FbH61hs`}dfot&K9-9)oFaurB` zIbkI8oJ$s!mDRO|^BNE1lUCHQH463>G)!3{L>F!+8ys&Tn-}dsV7sww{1;>bJ1vDM3%Z z=eqy}*Z9Q5r`JC`vk$(pYV=(mtuk0Y^9F){yNi9@5zanM8R{Q$xi%8F=*8Yw_`mQ; zY0qd5o82#Vy_*QTQW5LlD|dj83mkP;^OexEe3h)x{U5!k2XhTZqHJL0)ZI1nF_CzYxp$C= zXF`+qn2jsCU+=^`4wrnNK{I2Xdwo(@S3k$!Gxov>r+5KgX55PU6GqPIG+pYao9A|_ zb#=8<)_BG66_MRvV*49l=ba0v!Hwg&p>j@tEDlJ(((aKj> z(_zi7|w$$?)eyQ_!3ERe`&LSPG56()DwCn`ZU zXRWIJ^@1H%i!IGzj5+w>TGJ52SkmHVXL-X>p)?4hydI2bsbO*a1UEPtlqN zTxDFa(t1%uh@Lqv9P2BPt8qajq#!`&L;Tf1f6t!}5(NIxZJ7}QL`s4G&X6dhkQr;7 zQ#Q|$m;Q&s2taCxX#aUIyTryCa32Pgt<(@*D>OsGNMg>Oukn zWMa%r13(I%%CC6oTymK4Swb5DTn*m$Ex09h0X&had#lF4_(o)_aIYBK6C<@YXm4W; z2CdUDamgne^al}}@o1Kb1WGTEuNm959UW^@lKT|EI}0r=H|G?S=E#aab9(!D6{@F_ zu!t2g@{G}>kr76zTfq`R@{RTN=>(kX2||1@ zGEt8l%GGTVbT_tOJPC#*?4bhiKjxF?8=V!td?{%7RT&qdUv7Z>M*i`B?c{Vh8>^MFwvnVjRgTP3)v!oambROa_E>Q1?fGo;9^#89 z!B4Kwiv6XF7W7Q&&pnH?+n@LvgySHx-%4rRzxIv#yQ*L*op{jlZ+Cpg;~FbFnI? z3KJ?SkplUALZVyu2#@7_=-M|8y_ncoqHBe_&58%0t|61(1`)#F}LX;0yYjE`bLI=L+4eix49;0p?t$G{7i$RYxG~dWbyg!$# zQF{ka0JHTazO>xU;tl$#EP4z2?g`jp9_RfzklQN9Gzic%#qZpnqArJRsiwhNCY-8p z2d3BscXs)l-XPPVS__u2K|oH#)vsaD}a3 zWjgB0<(cdGtN=Rcv^QgNa2}?A<5zr*GTDEQP0sP_y(KA#1@bXTTM-~eT3%=D0cggF zI9``l%y0$8;q^Mr{iV$H0I<6R^JW@4+`dBHWWzuoy@ z;Tr%tXLl}9bFrUl05pbrKfq`t{bI;H=n>$J75Aq5li?*}JfP>H-Vf7GuP)$M*@21h zGAL#}ePI8%_D^{cyOQE4$zm>>PVm6~*(aIuwL zr?%&3hUB&DE9wOr6cX@l##EG%IrkQNlL+2Ke%=eSp!nH++76NIuTi#( zvVCi%)z*3;t?-Y}j$?#N_N{wv(N zQ-z$#-Jd;>vY3KLo?_eMC|!D{^@Ux^o;l-rpxOu;(FG?j^@UUJ5XRLZ9%zWr+?G+S z`2H2^-C8f~JihMk`|bj-wQnHtP8q}5mSMlJ@5Q3`;@w1p+nGf{etypl=3~vMM3~H* z8tvTij3o9dG~Uzixw+l@)d1K}?H-v^&B@6@4`Rk2kGbyxdrYY zpq{YjeB@Oj6A&^)xpTvsC@u+=34cNdu={pL#Xlq3zRcFj0VSOHJrCGZUrlV1 zlvOpOso8~XqfAkQ)0632^l<63`FdMPFa%D(C!=hAYp3%Y*x|;h=Blq%;UFmURtkM& z!j+}2ca2P<^v0+9oY45KvAq7wH(~Timx(C8viv#Jqw@l9z0C{xRf04fM~!!Ae1peW zUC*P8F0v+6bh47GyRbg#-glAf{~~L3+?kxtIGm4KcQ<=5B>h5euK+w48qW5i(Md6q zM%3ye2d~t`#KcLDm=rwYuaZL7Bbek$)f8mO;^bSv3M=AwfJzDC?4{5<{ zugBDcbm?cQycAFRpGyTB@2iV;T)C>7xv*!Xr&*HvY3qbBKjQh((E3^hej0C(_(LY3 z7bsiooo<;sb##2hkm7-L7gv%`)Jc^*7xW&86G0wCCuWX)G8^_A`jg~rz=%kJ@pe7< zOu59}V%?~p3i)hyq$l|om-C+Po8CZN$|n&pf$`hz5Ja~ArAzq~M*b9@J-8YP=~4f~ z;XR(&)G@!=Ra%Ki!JiWt()IWIG`&px;UKFAtXobYeZ% zm{WWaAjS`K3IBkI2Y8x<1{AvF;yr;v*8^$;%3V@E|G&xqRW7teh_$j7>)6J#lxDn5 zP8RguO74vMYpRAR4sZ8r#{Jr!KHD7RKRaj8m^u%}zrg{~A-8Wa7HrQuKfW-p85`C* ztqLkr>J1CCnW}m9r{DVNs#REVvLG9+khcK;aoj)Yn}5)*2pBNdt4TjH{w|(aDWMGn zP7)A4z>0sq6hHKCXnGsZ+^TN33Ww zOGO9xIR?Jic<~2h2YedD0Bs=Or!NEQFaO|Hy8tApT;!ar-s8XevW9|;1^ZAR*7W$_ zP=~085Jig={PgwMI9X&W^FSEQHyGZJ&u2+Pnl3p137N!xV;HN$jjiCjA^*`t16qP~ zE(#3igelp-_Z9&LvjU^+GJHUUz^6?Na85h~`#fd;VqhOM4+$ScClg<22k2i>`uYOH zHKv_2(PQ{~r^^7b_fWZ7gj>}_neskG&_9S3n3sS8uv8$OuJg(X>!Ae`y(j-|54c@r zE0X^%y)h9XYTEbdya=8J;kq6vAX3Jku_}l{q?(&}c80+tv(waR8^k(8h(XOtgN<(uC zvfk;$U&>4C-Q2?EL>dni9{+9WArDNvGY`W5`R{yak>i2zwH_@s@4x!EDd&U8W+E=7 z`;UU!gaA@eSaQ|=Paoe@z-+kjEI<9vKmPwkX#a0AX}d=cZ!~g`^Q7&5c-f2G8zrey I2?M|X2bJud3IG5A literal 0 HcmV?d00001 diff --git a/docs/stable/_images/add_image.png b/docs/stable/_images/add_image.png new file mode 100644 index 0000000000000000000000000000000000000000..0b675524b45925ead44be07ee882914aaa632aaf GIT binary patch literal 47119 zcmbrlbzGD0_dkwUgo=_X64H_iNR5#0P!yzeVyx^=Xst(sDYmPRjS)mWMpJlH8oV8kdcvN$jHu1 zP+S5^TG_Y|WMo$~9F>&~G?kUv4cwvjjxKg&WE!Dy#+RQPwlSueJ<)V{L!m)g7y9I@ zHf4tD^^g3c6q@WAe?DGUSr_i$uY}z==cQ__YVIlYI-jAYgktZ+fH5x4>*ovQ1zoEA zHN-~UM(Reg-)V~Uk^;BKM(VjoIl;Vfi<#AA^O{t)TQ~0J-^+Crd?NXX?2_j%hhNiM z3r}F?=HjnK{ZDIZtIicyGM^R?drJ0{TUvf+5;^bkp4tPHmdXeAn{o@}RHxj>A zsOuiOUt7QRLTR0UI!H~?h03jxOOnSltEZJd#N%$Tzy%>%CRe(L_1Zz(#TTx=if}RV zU%%h|$!+xZ)TJ~X24hF0X>{zL;ms*p%Bn_EQMhX8z15TlP8m|tm#o3>6Sq?W`O2@_ z8w$?;xia2Lztv6xv+vPqaVQOvMnCA7c-J;t#`CfjGMVwydf13p{&@81X70>b<5KRn z0C^}%KGkCd0Z_l@_L?p&vO{^*?grOy{j-c;5V<uuNT|vsy`}!=*PMHsHH{BP}el;3%Q|$`38f{}} zWF;l*d*WQ*=ItvXApASxwO7OhxtRwUv77H*Q+rh_{OFT)GT1dxsKu|y7rL*5SXz>P zO00jUjmpoQdYb1TWm?Ohey5f5L|rzGC%YupeZ+UfawKZCiu`5t_A!N2=~oZiS;5EW z!mmz|lo*X0*g(Mx_|-+~*V~{>gl?Vnk9ryk!mL#_iH__bUeRvYN+#_gSqR(tU*3d- z?jvr0S)+5VW$%hUGW_tW+*TrcdJrH=*8JvDR3Zb?zY z-v~(2>NgyE%h!`xMH-yLQzWjEs(0=TEpFj zTLV01sO}y1XpLsBS5L_9#sA@C_^74H ztm&uGqG_xl_Z4m{RB~}F^^Tq%r*^A$E5kf?-ez84o_Ag`Ot>k2Rz36^1)m3>z`gN% zLZ3Q5v9_~6i*nMx`i197X>NFK+~B3vTdQ)b0zyJU0zy^=ZKl=*s-|(K#IkN@xj|q3 zfgI^RsWM0Li{Oi3Ufczo1dbt950!O!U|{*;4#SSj(Y>SdM<8(U-g9ta#UsBASUD_f zj{&zNL(gKyQqg@S)jE|b^}I}i*OBMFYFPChi!?kr)(P8KE$MZ6=cX^j_ouIqZ^tfd z&7#-uhkie5fM$ScIbj((pzv*~>(|ZUE7#uj90dPNbqnsZc_dyeIepuf`4cnAwinV1 z{SK9yygydlT8!EJ{*sB#2J@h*(|l2D5wUo6G32p*)@W8>R(yohO)tAav#dU~-)c%( zqFHrnBN6g9OFuMz@c3ZNEO~26^0#h^4o`BY#F&eF^H*ruZ->)9QlDMcepJ0pSdvU) zb8`1XtUbnF=7l|k@g>j0aR*p?eou-ct4kWVzMJ&fr|4oMN3W90&+ggErVT-5#^kt; zAGWFXyJktIYNq^xDANkFYo=pn+$KUlG0t97NlrMMRL8pR>Y?hX<_7ziF4<(?WKadY z8Jk%&?$WqN1r)cAd$L|%>RjUI1hwrc7pW*hp_d-PDcoD(-@PgqH9U9RI^8DbM`r?M zd}KmXexwS6vbrt1t(SDWQ*9`0{981d2P3OilssP6&{q%E$lmF=!z>4D8_oXN(=zr! zn;GRhQRS9yA!p%XfvTDG8h|G+SNthx!hNfW1x?jqj*~89IVWr~)Jt2!x4P$o<|L!g z3vHVz^M|Rg@|Ji-pdyLJ?~IF35%6kw#J(=u`E;rD1^J}_%KE$Y)B#Zk_4|Zvfz9!a z{Ty^SGV&C1nnSnFOTp`s-plfW;TQGj?OujfDjgcq-C}`O(f3SGnRoa`Wu9NJpz7ZCr78oB?5wa4(9rP?njsiv1Lsdkx5w-c@GqW2< z%=KFgRWjQ=BOf}JhTVv~zWON&Tb}en{C9uwuRbm_EEBA@qm^--{aPN2ZyIgZuuig8 z6qJO4*N>%$m84259y;C@-NH1I)T9u?l2mm;t)VjzO9`b_rZwi7)OFW*!d1?*#-*=a zsEtUbO)BB}=GKV|^-^{UR*5=_={f+ZVrO6pQXZs#H7R5@Yw! zT^z`NonLNJm&yJ(=(~RB4dYNNTXlPA8zPx5xm4^GpCmUN$|@})w^6KO6~iel*Wqu} zm#>#wTJ{tZw~>IBM_Vy+kKa|FA6V$Rc*(VKGTYU_KhOL5&F9Z84j{1Y_4BH}sxg@r z(s~~8T|Q6lSd!+<{e_W`HQ5fI@FlsFo;0-Axe|68#Wkh1AZ}_uMpZ#f`R8h>S6fBe z<_Y;G+egWtpr%@#T8wd?NaihYKrSpde`gVXG6gmX6JZbmTP^-{nJekBds`B|P`JRb zz?MKrup&+3i0vC8Mz)0xRx9Q11pZn6wja!S?~FD~vryUIy89SSXXcTg!*KiwDZ_p3>LCCYe)GX zD3Tfr3T~J&JL8-LDsG?{Abz!SKEx`v07U-y%aSbrmYmd>REwHfcxoY`FmD`O?Pn)v zTJ(JbD+bnY(a%tLC|N1)g6)_vtEX8vTqhnc?W9B3qcBsxzm^^~DvrN<7;-bD{dRP- zbi}c4W_ID?;9_?p9Sy4D0CnfcZe0Wv^_(?V-pSi@;lb+97#ro5@F<#;k7>)GM6Z$> zFZ(sDp`o4yNbb;+nD||6sV6vH&MPtie{lGkpe}r1A?4q<=EVL{Dx`Mo#6NOcHdCT#Ac!XzCQ4(m=8b2d_5M!N-P2ewMA4SL)19+7E`hN9L?h z*Zy3U^^V=Q-vp0==HNAx#9D>@y9XV-O<(45ei{H>-VKK)XP(j zgX3(V|Ni`Io_5}j{~HP7@$Y2;3luqfBl1A_zQ}+32AayAmC6`6dfT}?S8)W}K|Fvl zK0DC9XO3Ujw}2OC9+8n>e@%9Qz1NtW?m8x5 zLDTl1um67Yo`Sv1k>c-?n`Gw#_sG7`a{t}>I;QM@JpoOxye3ofBQpr4{a4@r>Wl)6 z0mk{?ke42jWyq0zSE2m7X`lr7-xK;*#q;d1ds)ezW(NM#m|2G z8$DidIfC`N*GytIya&Bk?YG`!JZIzF4ax)U+vGK)(WG?d320HBV>(%q7BCw_SJ%nJJ)8d zdU<*qUd!eM67K(W-f_e&47J>QoNm?ACD!O^l0)=0_p09MTWWHfEW=?Sp(c~8r^4HI zgxDh2R};t23%^)d!W8#Aye4BiplI3D{a>Tg@!>IMLH-1awJmtrTZ+%hT2+uJb zWFs6Jo@we{acg2X-6YSZ%#cJXi$q|29yy@08l@>7eR*=h_XdW|!XL7~)D;iCx8ZF0 zqMjgm>T*MWyI0Qvy>rA2d3(xxV<eKrNcWWB^2{pE?1zMW(!^z@(zw77V=(ZM3AcUVP}j>jY2e?%5f%oCnr zO!V;u4Gt}_LHvIO;L}H%UqZ$pVAN{-k1huOyO=ToXM6gIfJE?%f|)F z>ies<8hzkQ2}X081C%T~KZ z@={}LNo;`?(bIImb$@h@yp96Qy+5BI;bfvKJ6`k%vTn=~D}1kv&G%$MgrC%C~lADu9V20eDUM{S&?VvhhM56mc0)%DIq!h8L^CPDHy0WCH zpQi!}<=JMCNVp1`Entg5vL_?fn6~I(@U~taHe=vN#HF~e0vu(?MPupxD@M{*2kkst zDzi|O!n!=;r!`Bo`}dL#O$W66UN&)gmQeYvR)e_gXxudB$lGkK53VN>>szqF$QqZF zsIR^8*Aus2^QT|eaA=WeHBH8vSo8TG`Z-ydg)q~Wc9nEL{I#? z@Xi&zqv^+_(g$63K6V3N45{n&zSgIAVk z``KkPqBo3y(-+wHv54Vey$EXNCNL;P}L|I>?sd*^-6dsbTe^IdHZ->`(o$ zaR}1dNOs=FCJJU0fbLdbg7~;^TYRvQuqfnqN%{RJ!BrJRk6Fa&BN`5GDQt=F_t2nw z3M5b0A#jD?Wfl8P`@++B?<3V5kexexqqr~`*^H{<5ENt~u5*<=JbcXBvj>kGWNKfD z95#+OyYG~v*;df@OTVzP@fcU3KHFA}DJzVC9{FAgDyzgJ7gFn4Pe=OgD>g^cG5JRq zJSf1Nzj*<3@`n6E7zH@=y4gd$usa2&PEC*XQfn_6R~a~19ZbjK&!1EjQl_*jZ>C?0 zB`DPYNS-!^HVkd`2ui1eoU-=S(h#@rjcj2bF1VnWHSq4828C8a4m97!KP$L4;bR0J_1 zEZ2;OA3DZT7o9WflRz&K*W6RlmevZXl=>{aVXH?iR`D{Ly*}j`;R;p0)MBBhQR(tN znS#g+VpaOh)2VH0)et~Osnp_sX}BZ}SX?bnmlbvaL=Kcs(**xToWj*tH#9)j#0uSz zRS#3qJWXPLV&5PpUl&lRy~BU3<0QaOM>e+Do=@s-4O5};j4s|vu;cgN7+ELfcR%Tc z-!c7qe=Dss`6%xcLO>u3^sBS4V>FdW!Dk5N;u{Vnvv<*dGR>u4a~mtc_h3+Yph|;iHXRUqT{JjJh?(|a%@|OjzYtb-eCYKoEb2UpQp=aQ8<%W-5a(x z{#i^N8DxF|s4na1EN^a8-2=8-{1d!3qFhUlhj-(&qv>a{{1|m&V?@fGRL zB6oe<8b=hhu62y{y{eLf;3Cx5*s2b($-r52=!owuE|!O4+eTno^Av|bHiRn_*mh{% z?2zBjSh8}wkyqn;?m$fB0-@5t!MWTdFQ~#PppVxGp4TboxBx?UY`9MCEmFB#nYq2( z9}wWRP(A`HnT`Cyycr$ZEiaZFxxtzIc~z#D>&g58o?Y`Pc5je6Ou{{Be&t%EIbWIG zwVemZB^90biji)&=e6jw7Aa~g*5OeS<4sI#olp_J$H6ca zc2XFt^CSPx_A7X5f8d@~-RNr|EZ6VRZWMTaQMPiLi;L5avQTj_uZJn2OiOUBI;WRJpW zE$DDQ7JEgkb6R5DMND#c@X)0-INk6GfF4UI=x<nCT z0-UxBj(ekAKG_-bgIws4x7QyoCwFj`Ey?e!GxmKcPWnoMH101l65}mLuPG6(6DcV}`KszJWG^O`wS@ZCcNS;&SKWaXf6{gJmUabZPY;** z`+j^gdd^MOl}OpRimc~LV_&V7>ZY~`A|6NV*Wl{d#rMA>dm*sXFBW`dIP+~orQ^}WeV^*sGJ?>L#l z?b*zCuMECTw@q^hq8QMZi%V`QvKKgA+%CS4ZtD@uNX-X{c??{@pXarjEYZk#rEA-$ zFn%{yH&@(k@F5TJF!jW%4%CjiC|zUrHFWVAqJcA~Et=FG1XuHHQO(peEQHI8 zw@iP=7*JkbK~Uo6wPNHBn3%Y>xTAX04o6-!WpZCZkS-g!nh8l5i)Wb3^)R&vef&8`vv-P)+f3&KNn4_9uHnleHmiPYWayTmNE=K+=@dr*JNEb zAy9x16H*Ix#JPr*qKiT?fcv0OoyUdLB!o}-4HSY&RE|=P!|#59gvHRc>JSi;x1F{6 zsXq-dj_|ct0(~@iO?b)2P&8R;^|_SvFw|ofNvLpES7>w|?S2&yOt)d+5CUJPub>Fe z7cUKj@p;G6r0}v`82);APo%8za#17JdJuthUHMu=?mKOqx3^d}>b74hQhhmq+3?Qk z_6WRkE?Vd*ds)ONg*})^#gmyF0I-BlqB+k(C`fnokS0<*U@VY;D@nht1Q7;;r1O-OdnNz*WPW4dMj~JB0VWy2jdwi#IQ?#mzi_s6$ReO; zZs)@<(>Fe9H`N64iA9HaZ?hUmPz9y>SmpS6uU7|}IVy2mMnsDd1y9shs|Cqd0+=Hq zkB=MXo@TPrq6#xPq@KBrn?bQEV;UheH-k@bet_{{D*(~DiyD8ls4PINj;7c04ZMrY zxEIaZN*Yl&>RTBNf-{$ex8WxRn|W?;f0|(6xyNXCE&G*l1MYaX^*F(nd9VsW3DzCn z+wFn792d6H3 zjXgg`3DTg)4TT+YS#crCo;8pXNYnNLdZU)02k%w9teVY9J675yF;3yG7z12lku#WR zrg+Wxo^lPi6na(Fg)il_r>Jub=eA(cU|vyR?0dtG1-Wd+p)FCrSk`V!6evRfTE`0_ zlg)%3?PL6JikctG{Z_alJ@sLrTvpa`>s3H|yy+qpw)(5%pJwX%fz)N(FAz8)Ufsbt zjEP^QDHPmqa+QF3d01d#aG#8wT# z^ac2rUacP(o1Bs$0ZjY53tHZA)maPp`Fya#PS7rAva8vQ(M>vxCNnrSRyu5t618-@ z6RlI)@NBE2!jjW8Zsbsqi3&I0_|cSTDSLlPG zTe;EQi%@;O@t4xr_7Q5g&Y;Z6$Q8r90@`=(nqt@Gq|rO;u`n}@YX%7iLke;~*(Tj1 zI2;L9H!@Nu?kdw!6`U-Y-0I;h8@H8|VW7&>L+Cmn2a-e9Plw;dRwy~YEKjQKN<1x3 z?`zC0tu5Eq^1r{J&1azVD-l&>z+&05u*l@(v3Xi8lepIuut^Vj9Zcw)^*@zyHequZ^6qO#8Vba<21Q5R-Ss`1ZoJ1I*3x8^Nes@ zYj-3cxFPqa#;2G$ltuk3%($0rx9 zxg~Qifgd<(*0XDkU&q7nx{q%U*T-aYCi$-|2<>$SlWv9cy@)(j%19MI=g3t1&w1?X zI2V$-6*WVyN{TAL>N}8GFc1?RZhQ!@gMo)%10(nPxjHv#IgasAi zcQ6x@<(Xja-uB&6?7S;EIX8!aio-Zr>K)Rvd{OIJ>G zOX<~Z9*uc*;fbFw`=d2a*D6XWUFE8}iC-UERZ4aUO$M}62x0Oh7b-2u!=;uJ2!!Tvwe``Z9Lns-KoFF_8Ux~vCY2jVmQ}pL- zP9j57bacz8L!`y5DEFz2L64dHukrlYh&v8XS^xbUi|r%aX_FaJ{sP^nd*{lLQd?u~ zXdiG*$${x*2FbBTKGe3LeLPt|7CZrCq10WVAJ)n8Y?<+R6HwD#w~b7$eq|ZZ)wPwZ z*>hp}vrg&WCiUH3M^TW?=`XxXL$1D1r zcdH2infpNqpT&0kojAvKy3F-?_t#7h@`UjGOLB{@#MmDe_tv{Vr|U`E9a!R|fqB!cME^V&Up$42^vtJ|M^{CPkRB!ux%QQ(Tn zNpkwJTnXbGvx$2_#kpJ+Fco0GP>n2toi7ZYDqTfnhHSlGDjxPodiApA9r zGzt@SK<81j+9z-#+<|qtu5Z7Tk0rTJGPokN|5{I%)Z7Pr zAf28o8n}^IP2N=c)Z2b*Z(@4Wc?A9NRXM~iD1wePvOcetN})p#EB94QnB&@`7r)MU z*ml!O^6aUMV{lTh9u=8fYj%Q)hDIRGG&fi=wjxxk14hvY_n2WFlHu9PsIzGd2yIMd z7c}1%yYn?%_KHv5l%-JF?`I_6RRvt!FmvZ=_wqGGQJUlHm^LXKobMWVp}*U)pYwqe zD!RP-JCjMPc}pkT{J!7Go2re2gpzqTQ-)7Kn>1 zv%M|XjP1@X=$xPW_}RapqTo`8J}c8?(n#1$q_)U(Gf4sbl4sGq?b$wTD_C%$=YrM~ zjg^$Tfct=7wP@uBrMq=#Y!>&TU(0E^6E7|8_RL}h^I(82h_XvgDMJWNS9ka+WvoYC zVe+U47T0$HwmZk8V30SE^l=6EZYJK9r_;;Q>#K8X3g#q<(9JlOKF(j>JtnNAnGq0+ z{PMWy1i@lPtsG_-?N85K6Gz9oBkp?OiUz zVzavEs3kS}gw7}HK}s{{KgsY;diaVZ^-gHm^OYQ4>uR%A$_g>w*rHJl4){9z@%@O= zk2o=ws?7#!Y?5aI4QWT%! z9u+Bp4qptUju$aQspEOdxMzpuPGT4zr#AE4JDKM-=N{N_eW-?B;9A}hjS-*uLb&~0 zcstMe$YrTPvHE>){PP%x_PGY8t-up??y|Vcy6-(h?$TGkoV10Lh!OD-@N|kEy8wNB zK-x~iL=d-QNAgb5)Xbj0=cv%k*)u3~{dQg|gcNF>rl$PTlMM*}H=O%wtxq&7MQKsf93Ro})h+f>T1}Ix;0#l7hL9gU~ot zgE%G(tqq0;T0UeHZj^qyoq(=nJ3VN5X_B=;+(0ACtOa9(0R<&Oo6Qq{x*4MI=dBn$ zx&~KaRz}OJmR;;zR0s2P05pTwnHg;yun3?TXgBSvpg2=W>MQLRlw+?hbZtWsT$ON) zV@|8Mn!UL1uYRLS6c%O%AAGEyXv|ATmzejNuCYsUN|v@fCmm5uTo6ItN}OG;H7bab zKXM@44p<$1&~k?^AmOIM=CAYEc7dXGR{u>!c>BGa4`$!=sBo?=anFoQnvU0BDTH^L z+fHVcSop4d9dAk}PApB_P`}d`BcZ@_gUgf+uh(6( z(BB9V;JYnlYxM)G(K#k$NMymlR!2)g&^rZ2<37Ysq3@PxX#%P!@N!hwipMv*fCl3`Q zoA)whc*I`Q2+8CSE2gX;=UHH7LaA}}w0eP`>W!8<@7(=hC?a*Zr;7T+qXE;yErjHW zuqN&-e>T!A_fX?SeV?*1vDWnq-&g@Ro-SBHS!A%>V}3(TneM?Y;{h}#?db&$Yc-hC zMBcVL)4_8@QVu~T zP^&@9#YGGyIFV6sA>DDb4}DWD`-x`GEB3N?x(*$}3Q3+U%llLbx?cT`9Kx3}$eso` zhpSseE!vuHu|kz>@r+PE7vf9aF8c<`@I!s#m7?J z!A;SN2{Mx4zxlANz$k4{4YpgWOea!~6{&;&cHkc0qeqd}gg+k1VeN`3qj?_r>c2?~ zN*tTI8WU?#r?b_NaPHn@fDzI-yb@*cssXG?ND9~8A`A$c1w8e(pXah@ z8~>6=1i*Gsag(1OM^||1XE&F#VkNWDhl|On>)*TD>Sag;b4fS-H~gl=M1=F*6Js47 znn4a15~5VR9Mn&?J49;sYnSAX$r|Fn`s?-= zT=^WXpK=QF`5{^G%+O84CJ<4m*p3K^!Ol|{cN2lhcj)7h7sS7*h4_oj8<{!SP) zOmlY@{hG?NlbPj;oX;<&y-a&@JD}W5$32IF@)6i>w2d3i*C&tLKlEQVBQ>SMMr; z8jT$++1}(B-l%kCTR#0pys?OG|E}bKxS@LNpG|WNyqv{CAnB5R$`;tsGSdd8>-h4^ z084$RhN#{rH3#QsPX&rJ;oZ_pQUwwnj^)p2Zgk5MI)x^c?pFQ2#9RsBq4X`Eg3i@9 zN>#o7D%Owfuh^SrKAW+umB(P`VH@z-|ZOTUB)OT z)_umISXtwwL(MVo;xxsQc+>qwD-z=GpAMty8G?qWe3K%|^_+IGtmapPDpV@QAbG6) z>n+IrnkiMD2VprQlo7MP6kixzP`g(~h+l!AO>@m~>05qg$=Z-x_AQqW$DU@$X=ERV z)qL;88z22Se~vb^`(bWK(do`n&dP>{t_Maw~GVScgftYCdK( zoYj>C=-oTV*9Ll5t6<{Mv57%CT)u7qdutR{Zk@9D2`R=#8RW7onIgVCqm1j1S>&47 z-OPcV^y=MpuNGscqnE}tixSsM)u-SSZY|5_w2xb9+hru(X8(9Up`24i7ysm{xu#3x1Etb<_|pN$TVxzSX9`N&*# z-hBzE`NhlTWJ?L;r79g_Z-t<%BdP&MODkr-|NEYLE z*K)~wWp+F-X-mY^4fUeJn#WoJ3Whuy zNH5XzxI@m#ZSXKdsPuy~Dc*&;uAF;ZVWN^K7oZdJ>V(q0F z`E(to{5I+!zb`V0i&V*%H)XIgHYtAjaA9&{PWi;~$4JNLaz1^Gs)}FDD)_Z?rEwE%B7L_)$0l!1PbS(6yz~Arc-g0B{l7qIg-?>4K_D zS=YSkild!{neZ&T!83B3B~n*Js_)ZhrTX=E&yr}qs9fN2sZUKV?w+?;2(vAqRWiFc zjv(S%3M$*pRcJO>hL-E9A5-BB)`RngjCgPcnU@s?Sc3;wQFRf9AIg+9`s;wb?`w>8 zq;ZDSZZ$`PD;ru7marX1y;v4Ewf8xFGtUz#iho#SW7tK`HkV)P3r*e_zmHvD1?$1U z56E)aTysVU>upwkQotuAkiW*0qMQ-_a_PGA$M)PUtbX|{VU{XlfGDY;xjW0-p?Xm0 zAoaUW$B-L15mpQ`q$ZMtgCafH4qMeZHp0{eRyAvIH5>Kgnjf7Nz=;XAV{goo{FPpO zy^!Ypr_N^w@%iJ2GFpv0XUPablp40kMdt1<3FDri?;m>Orw7-Dz#rwZQU8iQHtozRUQ}Kc5@ZkoSQ?i;SRjF z!?!sZEx+^VH=&qV_)ym#G5Vi;KA*;4~>?q*8%JP4xT|Pzv$}I$N zJexApF>9uNmb?}R#aR{j0Y-$QUS6Jz2rq#u(IimM4`SuB5I2(%Ikiih6vSxCXWCRA zP*q+nU`Q#AgUT&@GE|$|_4TRa`AM6b1v#Eb$m;=1$ z@fEN2jDhx&$-VEcX9Rwhq3Q48ymjS$0Dnr=wV%)+50pu>cT$ZH&fH{hNgK}=mTZc# z;Y%MJ`#=f+s#z#M%(#S?(n6ob`LQ=e=#!(l2DiVG&Dc{P=_`Tc+JCcLoubV}sjn_b zWKiQG8s~8JY_7k~MLz;Bz52YgW7JxMukKhm*Q&}0gtg0YKHC|gZ!A}YX9_kk1*#$~ z(J$hi_hH)MRw5;RMsLJA@4qwio(Zxz*wY|1o+T{&JKOD%A9>sj4DH7f?Rs3r_=G>9 zi)l=R_u?1H++_^nAV=XicRXMpJ8sB-Xo9^Vwfa44f~kHpXwdlknYRW@C(ZL_Jz4(U zzWKW~hACoQ@jvT$(@qPB`3XQh%)qci=noAwXZ`~|d5W|swT!uJ%5w`Vl?Yw#3V-I6 z^tzQxU1WF6AuA#it7)I=HC!nrx-sV3c-6*(w&3W@2ifwe2W0g>>3>&(ALRyBz!;*t zwoG27GK@2ITu`cUD{h+jdMQJ$ggXVtuq(qKdmG%8GlcZJ5g7@sT8P)nJCXWtjv?8b z-*kNCy~FLhD1eY)qn2hg2wIxOAPQP}IU4q5%8LxTkuGE#kJ8rw6 z#!*(jUpgT^pd%*D%Y6h}&CRLS4_Gza(za~Kcchl96MO}DQdejQEPqc@=t8h?S-Dni z1C@YEV(*b_+xr^#0su0_%pC(t*~ZR76%ZnknBWz_mDg88XZ*oEbxgLMiblUQ8ugp zN-VI8*J+oNds&&8Jpf1WyWh#-5*Co#rEGxYy`OsEC#gg%AF&L#M9DW^5vx2SOCz%e zn8uze?DQ%}%k4fj)E#Q`^FP@@p4Je1L*(gChm+~id*wx{Xpx%rlK6y$^@?!_eo5if zXFcH`L-L=Y9qvwFbl1mMEu}FNicVRR-I{N;`2X3O;?K($g#pFQZX2j6#8y>rdi@44 zgP#fkTQnSzK7>`zo*Fb{u^BGN3yLNd674E#Lo=-e= z+?+15UH)67o|2nT$@Xl~=}FzSoWTNwVrJYN6F_LM15z1jTv^_K46rlJeHMTocDo5x zqh%XmIvlr824@nea=dFlM;ZOB`x4p6;xixZCMD84H)p!LmxxurJ1%U^E_ zR23nC;oIBjvTF930-(bjfpzh-B>2`+$oaoS?fUg|Bhe68{ToZZ`fL~@fH^csa?j0=#Nt$D`dCC8qxp32o+f<giHbu9wkp(v_%O$&PNk^6coC&{jX%OWKC25ufs=C7ko?l5VN7Y6Ha*r zi;xE~e&ICh_R&t^uLr%-bz2-ZtPTb2P(nv)BJ@&GAxyB_^ zs9+q5#tKz0XLh@k#qE{$$b*DCfndVInPy$COut#F55VI&(&!tnVH4hpt0Ud__NS0& z07#$@nnbFnPWDh+573#o>{J-$K=S`wyB8OHX)$5*XR(jTPyg5kyL89YGy8xA++${i z%I`L~z`fhf`7^@R%N#yy%VpR}z$#8Wb|8knPqq%bmwABLD^0H|LhR-xx*<2;v3dFm zd^N{3Mgmz5sl?+ABwOkTGK~5ENR>Fb95p!nY0|6K`V%@GSY{*)GPrRIInCw!b z>Dfstr^8?xP5n$n>JMh|y~;G@?78W_D-nAm__EA`XZvHX(&fVDmb0Q@Coek&4?GPx zNWae0eCR=MH8cOz7*WI(y8}X^wp3SFSpyW=p#9(cS5X7dP2&r1MgZVc$Uu=Sbvn(R zV)Rv(;)GoN0DjtuqkyS^{i;wp-c{#u1{@SYNbVkgsh-#7JJb2eQ14cdl+T35)z& z(tbCetTcQ9Sq9zl#^|GGqEhI8Q2M{k z>~*?j$B`S*cP!!h=-aaP<4ahdrPSzy_~%1be6G7ZnxI15xpKo$oK|M4mXWFBw*wNu zr`_1Fb>GLrqrdOsEujSiVUTBdR z8?k_=j=y+!bpp^f_LH23F`FnI3ES$sud+FRzB=Vjyev2RJ%pFZ-1T>_*(yA-F&{u% zKO=mSc*njV=Q-IlK}`?-wyZiFAHV$~u#QlbTh3>qM2~LHDID*a&8R|QI?W5O>4IK_fgc^;k5z3%d)47u-hp_*kh zyCg!-24E4BHa&N+BT)pALO0**ZW%$0S^-l1MrGw;V4{Ox27c%Mx4kjF@eV(8rCXN; zkD09e`-Cb+t%b(v?JvyS|9eOd@&E=*){7*bGWBSh{=fg9;gJSe221}m=q*eTX?r|t@`;GLz&Kx6&IQ6iGAZx=zJ`kA6u7t36qo*<2MhpAxMkP>V;3({ zvX@c6<-Ga#A}}}pliL435J}JVx>uMY^2Pt}3BKvS?hQm>v1d@_%$4b%5B>1yOYQ|r z-xcnCAYL~B{@KK+tNJl-f&yp$dGo?Q3&`Sf1x!NqRmq9of|7RrzG?aP+K+nlxv4BQoS$cVf27o{y zm;_o2Z{Gya5Jr>N$gPUZ{)5&4SSIW=p)FMY^p79F!gZ#W=cM_zry@@d zqfhPPO`6YQ(^ZY_a{$g4(9Xm&Fu7n&5+(A@7e;0K?fKRJ+*#0kEkTFc%M}lrwvH zc3ZPN#JZ)j%zzN5;W0POB!H#x03L|AmNz+wXZ2fu0B_??;Hra>3aX*-0M8H8LNuy{ z$f3p{Cd->c@1A!moRaj8n^*ISCMCu=99J+zV2_zCmR8x$5G`K;0G+#ar58Sd2ed)C z&(;?btR`1(9|4>WzW!*a=vv$s1A&HWy&xqV6Q&64&}~t zQz9>K&va-Aq`ZN*Iz1Y}Yg--lv-hDQh_BCIb3Kcz8X~vXOMC2(hQ3Y%9xS~2QjMDv zfsR+tv)b~EuU3ogtUWf8T%q3;sLOFS9a(^*i5B~S8Adxi-MzLXP|v$0GXJw~52Ibc zyW2xu`4eC}YI4K{i=p{8MYY@QqX3L_3Xwrf0#HooQ0C-d4ip?60)WMQ?uo1@`6Pjf z`?806C~oLoVyvFbJvgI05ZZ3pBCQ&u+gbE_+rDjn)^Zb(ot6p! z3`&Vuvh&39?bIY6)oiy5v;dj6rHni#yPx6`Id*wrh!^iWSSuJ5-hc3!NscpL+yaie z^#2j}=HXO-U;i+tlaBeAhr}_@LqdjdjF~C(Oi|`2v&hMmsbn6)QKB+M#}JB-Qb=_& z=17K;xg=zWe(Titdw=iedampF>$!jTUtI6K&)V<3*IKVN?6vp%E|2@I2R~f**YfWB zFWkFx-KRInHNDS7@I+bDIq$7x_h6&tWxM00Gw-S-FYfJ92KAexPybl%{+(ZHQhu(= zz`h^Pz)f=iw*Jn_T*;( z^$)U6Y&%+>bqsjyX>7OcFG>(DpFMeHkwQ^9>5Spay_5iB2YbzwT-z?axQjh~YeS4> z6Q+B6o6Q~R5q7*oPh~rPtyQ17q2^M)|6JylUG(}Lz9VF(^5ridhg*E#Bf#B#c{eY_ ztu%Tw`Pl=h7)2$Y-@Qtie#R-X;siEZ(aNQd`3q1K(h2{1{*4*S zr+Y_o#OCF;tD+xhSXY3@Qd?C0v-`r$bs#y_!jOHz`TXLctkLA>;qxhJ_3m5l(aoFI zY-Q3xe$m)1?-iq2>Ko%>8$T|RcYdr&7u6KGB#JDr_(9O3jy;v1M|$Tt8_1sfEv;DUQ8+H^)(}aueOkw>N+CyULYU>Kb>|SogJ3=5LWw z=*!opE~vb8XT6X=7pm=e#$dl&(5Z1~>Q+vNvQDzB-mAm4ZjLTeD=rD4$2Tg6osDgs zU|Z$4Zdd|y_wv>wgS}`-i>D5nne7YNNxANC`o3JBoz8U(NPc#2Zz*j4SIu{?hYjI- z(mz($mCK~Rm-RZS{TwXaS1_~uUPydHbJpLSr8nT#am&=TywBNbcX~GX8us6WJsoTy z@tHXhB9F8Rct=Ma^E}hQtY>Xb#P59UschEv@3^Dn0ChR_%7x3H4!f^?dYaVi zy?ix5l+eg5^!=;<^_ShlZx6WJKUZ8(K-ko5eTYNg%CwKKOPRmqp1rnhRc>Eg zyv4kU&=P~eK(Ek0Ye#B13y2>>$NgUl>3TSvY3_J5V0*;Q>B{c-WTs(y#NmdmcGX(5 zTCWIU0-KGbt^>UWgr}c^$Ep1r?}myf^j-M4Gg#g&(%Sk1Z)nOpS1QCeS7Wd@<}6)M zEmXXACc@aPFEeK4C%I%N-`&QQxwbvka=zD8Ce`Hj%R!|yEPtiQ1KPJazfVU=#xDT}1Ds#@!a?39gBJBw65@E_>^I^PY(eLy) z^e%UsZJS)07%HVNeDH{{naSRJtxr5hFto8H{8@I^_^{A=T%s93U z3sn>k`ZV-O9bP3C5Jyx@k!+>NfZV~)Zw3fY-LhdJH)Mav=jhCuZEBWnk#j3&SoH=Q zG?Y|&>#wbt-KhO^;*E$e_wyeu?PT`dQ4lVyITF7&3b{5WzC3Yn9%4a(PedrrP-= zv3KmW1_lZDS0PKJKjdE-uA2-JVt{(@AJ<Xvz8l{M}=FMjpyvDw}Od&TZ)yzHsOT4Kx9E%9pBzm#2L`Ph(t zU>5bd5Y*CB4WnCwrdLebu63D*D2v#bIwj1lv@N5?;XGZ^_G=%vB;H7`%S^u^^_tot zny?9VoZO|4+T%;7@t!k!bAzfHhj|q8n=gs5vYKCPG?)&KdbgG_60y)uP3LH@^xMAc zv2)D8n=zV)aPffqk@6hZeUqMGhlPfwjKWB_Ay|+veGPBq;wttq@K4 zd5F?a1D4({lD-kq>ai6kuHEo+dGs~hYh!FHU-{elJ$rkhkNy^e@%Sv-Z;?F#9oJ~< zN>>Z}9gh5dE~>#)k3L!8bnTT`LTLW)fZE@yIQ@dqc$r)IiP7Hg2-RkumW5yXb1en% zS%VwC{_noE5PPG7Kkf{KR;W#+si!F1@&b>uK1jo3|ID`U&jQv5DcF~!gm;QQGLq41 zrN4%b97}ba(OhA$mQUNL0{+pB>oqTH>yIdPwX8&4bT^1L7xR1Vx^_&Q(8iV5JteBS zZP+i7<`|*KvH7a3bafY>uz9>=`?Xl9Lxwzu3!l47=A`hK@p@anjNl1^Iql)ksVpZb zDAdHrcLA7@6yHpVumG0&b`xy`tRJ`9g`7H9!|vEH?SD2Tts!}&vrP1p|AyZ8BaruG z7uq4PsTRLl(!Plw#yaBOM2Zc>XW6t`vHKFrh`B3|kK|2Ra<}G`C%qytd(HV-pKcCX z&OcXNM*Cy5m+BqwHIy1oPA0lJct}O4IQd`*&AGhu`@8&r&1b(SGz$vXu7nS`5ZAmI z7s?B`^7*1ku^GYy&p1DC;R{)gCq@>6)l>+pn+4V`YJ4$YV5_4;{ti(Q&-S5wsbas2 zT2HJbJzc}|v|EeqKy2e2la%7u0*NCqaEuBHhR%th4&Iz?gakYf{H4h zBJq1^xmVJw&KYIq#KbsXAHu73} zk!zA8z3vWA?>}nFIp&-3V2WIo`fgLpyDEiSOxSCqQvXP6AjQzKLhyip^T1V*hzrE7kdUtbU-Flzo<75nC6C1NN2`XS|#O|}Y)8gm4H)~fh7GbP_9 z?jbX;@$MX_Qv)?TlEy70|Cf<$B<92shUa=5j~)jOn^fp}zgccmEno^}CUKion>3Uf ztD1gXZdV+G<>Y%8HVvolflki@S2W|BCU-SR8dhQNm)|0D zImeFf5>~{b9`6s2de)#NOeY)|oNp4k)O>m;q@=!D#1kOd>udeQiB=moL#c##Yno-0_hw8~1|@!d_*v z_|fux8M{7qo+*@HaKheLwJnRQb|^9H17Yqs0q^NalMs3{2u|I5zg>@Kd?vbaRm6PL zulrs?(?l7=b|0Zz`7TJ$Vs?L2^U5)kL6PvC=cXdWM?@Q|MYmzMLDh>PmnZxLE4KWs z?V1C0Mr&D6dkK>|WztO5Y>F&*dhPQ>4*BG_&WA81H1}$k9eGjutJc0lF`%FFek5x+^W+tUUlZvwuD|%j%F-w80rRScPEhSkz+ES(V-+t7_-j}+v zBwn;&;j!d2@}TH$&c@jNcJFB@@6}tB)$nMi4}R!q(fM4K^SdVl@Av+t*87g4V{ zY*{kwH!G^Jk+Di&Tx;2w`q{OiCP~|ouFKhL=y?91?)x2i^$((=St7G-eA;TuFVpne z%-h;5)^}|88&&5;otRi`!K^Yyo^CMyTQn1i%1DycA%PetCvtA(Y6j6jwI;yl#XVZ9nUPJ?evhnX-z|@JN#FEyqdoLRp{%CCN96u^{fM&kw;j5}SGKUJV;3aN zew2G zIm@*7ws%;yWyE0DrT`&zsMLm7UES)N89W_Qw)CKQ>ExbaPj9HzfjYlWracljTC;|F zy+cO(vix+E>`2?I$I_0kL@Isf(qQGv9Tn+reZzL4l2Ygfaqz(H#cy35a&M=DqK`+t z1Yh(kx7}|csK`rLoFVTGeUld}&hWiSfTHIS@7f|Ze|1i286U|AXWspks-k*Pp7>Ur#feK3m?_%h{$@^_G_ytP#P9JjAyYQ55^np(mjqpYh9^k z38tO+S$@FL$iyGqL^7zzMcGRf8{+3B8M!@PC&UGc5uObzs^4>uYwAjfGd=AkYTIXm z_M*LWLWVV2=>Yw+gw5V_EMB+&QB5-|b@<-5)Rl8+EY9(fb5OnQ^0r8L>E`%*8#)Z%Spokt`K8 zF1}@*PVWdNoe8%mW`-)ie!1@w{3PsR+5J3SbKX%yFAlD#hWG$cru)AfIyN7_jh5+q zx_qnVh;e!Gr_xh>JCA~nO72PVIy@O^xz!%UwKMv8Xw5w=Fyv`5QKqYw<&Hwxk6o;rTiDRcI4f`B&X`a*WKUU@6^iolBHt^T_k!lAXjr8R=5ID`mB=hD@En=0oC zS=Y7)^@PtKsJ@=mr1j45_oQ5WsB%X9frp_ra(Ti8Q;cZoh;<8O65ue=Mk@2Zs(Wqhx5$t3$>~09Ay-`J{rbyePfNYO z&)Y$)nRaqpE=sRW`f$mGpHGU`D!MKItZ`tVnc0@q6_J#NfYibcNZ?IZFWEJBKTEnG zZ5Ex#h>%PVHTw>sIDO;z*uBG0;=U41k@EAU=6=uz_GigLpr00pX zT2Tfd!zqJ1Ux}=uCa=CajrsV8%WSLn>$K$GT<6&TDnv+4Q=e|kZhTUjJR>K|A9Yuf z!)$rE_AIB!N|eoxcT7mXh)pr)xE-s^I)R0x_nwW3G<`K&9d%2pDxJfBt7(j{m^VE^WuIzH?EF9~%7bYis#d6`T{SN$u`eplc1t zacu9+X$o`;5M7p%iB4?(n8R_2`=0cXHZR2tW3eJQ;@D(V27B(yywXnt_4 z>>u#!?IK8*v`doMZ8rR?yd@qrsq`(SWFO}WJ5@Geyf=m^3oT}Cn5M{|+ot&R(R8n1 zzL$H|->v^qt9Xf$*n|BB8+J}b#TVqNu1N$Z`Bv$xsC zor_6(_U{PVN6&@blzJU_Jox~Fawo(2n8fj2&w^XR2ONuD$xn#?@qSmiPxIvlPxtw? z$TofDxz@ZW>|bWcD4O*4hR5AfleaNRVgEGrf6{I$Td-?8+JtJ$I7RXh^4C-I6YX8H z@TB9&)+Wn!lZLRdSJK%R6Ulkk4CC^9ZEeD4zBfbq(%{6|lPOiS}iMPix&YM3nqn{kF}C@)4AN2Yhud^H{g`NNfC{7DB& zn!de@Q<0C#*vos}z*fgAPv`8V>0$m(M9*>Fb@EYs(;IK$w`+B8KT9u2mH9V~(A?pw zQKg!wpk2udmhrzU_o2cxrq$WLk=D|^vGsPUGsi8vrTqQOw)^+0?ca2Lj^;egXk%ae z;B1y*y37Ef!y-26jDzPxO8pbnZ+=u)6itC8P|Tk{c%*PO|Gh=Tc)?>K6*lP$@+Bv< z9cRqrN>qm*elmW+wjW@7rImN3UALwn+&iZy{93eEbCs6KsK>=dNE{`M<^_n8{N0jc zsHI;ywg~FjHGK|i6g_OhX0uIDFAJf+Ryt50o)zc%{#(4Z|1r+!~Ua_No|g ziT@^r_2m7X;n4h0QIljSPOQ(%0 zWVFYd2fOk+n zWlsKW?^5+!)NW>|d(AC-Ns`_RFyx0*dLHfx1Mim%UPXxw3~(-#*|EH|UvPBtF7??x zrwwVxFUBv0r?f`{(xkg&JciPVBO3*NcWkb&!tFPDFJ{fO9sT-G2$>w;cJ@nEUMf{f z;Hx0osI+>Q+I)RjtG02Ly+XVc>N#)&(lj*;<<6Xio)s&DRg^lR=I(LUN!9HGDJxXH=f+BughnCeOA!0{ww*!69G>9r z6R+P)hDKUAniZaByRVsig1XM5c&XXOHfu)4yg(RB5Gaw#vNLF@O25CKCBr({oA*(y zH236pBh`%yDVj@#O20=$dw%1cZZ5eBcwgusUGOA?-kDsyVKOBA!%0ZGyX;`b=WzUF zF}psL97@=PD)pjK6`J3>Bw}<1qv_Dy_)m~%IB1GPr4L)@I@l^R_>jC$$Bdki2 zd)479Enm64Je5_(tnCSI{7v4q6P)AHY6Vi9Z3@TO)iuw|WksBd_NA-;v14_3U?w}r zxb{}4n#_2yqVu7ZTZ_k4+_RhB*%bHLR<7Cy6{%dG37CVEGIe4{aweU^UyAxuinm7v zy!ErUi+*78V&tn_(%g?GbJ{zH^x0fL8rCQHcFiH#2A(Yx{rJNFW|HR8rPqpiMUZWn zV6`-PrxY;xkJ^>PFW?pgzW^wA^IsY@+ZpZ?>sOoHHX|Q*4mNq#ML#px(L1os=Fv>l zrhQy+zC^{A*=0*bcBgo9q<`E0;l%{QAD?9v=2l+Zwlul|m5C-lZ2Hc+%q#z%VQPMV zCiOAwT?b$y*28r4NlGcKeY6^I?~Y}gcSS*k!D^HHug=+D$Cv^jJ4-(P9bMt-{LJSS zi-<|jx@GzPuktbRTebEZ^m~D=P}gjmJqa~EHnmTq+$}!u*oM_bTByxe@TQdCZHREZ zF!xQn@ikOVCZv$}8r3?qm)i3K)s`RXlqnA&*tnv)!rQL}lTR-M_tcHFlFx82 z>};f}y|b%0Dt*IJqdhhRHj|r+U;}*(xj?#6N>tU12>TZPQtfv8{TtqMs_FGl8hrCY z*KWCbhQ-`0m#hHY@@Vz$P#mecKAdExIBezZ;u9uizPq0F&%K!Sg|O|5AN{IBc_PIv zCKEzs3%s@MG#>@SuFdZ4+dIPy%`dop70TXcGeHF*mLC!Gl5g(y>MhreRNZB`q{u?7 zB<7G23u(_RZVO{^5V8Lzb{t->xTEOXuZeL9kBEB_x?YUb%{FVlY9i-;PK54Ho6q?( zP5_U_LWkiBSV1mwamZa^yqe`lsb>*C-o{P9DaUG~rw zlcjr~x@Xz;i{*X<#a!vPppUujL)%BJzo1Dv7}--Rd}pqmUSk|p>3DL>@qBSeVurjS zxnSkqA<7A+53t)`d<8SS?Od(zBG&Qf>NphF-M+kaX#ItBMcRot4+_Kqk*Q`x@IG$X zYi21daGD{S>Oy=dRelWL$bo~A=w8j@58v)3Gm@825H*i;Q6Fk1`BW+1Zf9jic#7Z; zSt~AVv^D3b6bZg{bT51Ix#-*LHJHKS`Dg`z|SW8`A@ zXDb$JmomI|2fLep>UcGaI*vXwU)Nz#SvZ$4T(a6{G z*NCEIZ3WuZ$px-gU%h{RmH+xvS0%IWy{Ue7(@8UX%P|K8b{T6ap z2MVY7u0Kamq6qx4#DD`ymLOE;q2HUd)l)|B_p}Xd!By-NAiHE;noWh!5ryk&RWC9x zGeRlV6=h~9F(oF#2=qK=P-#^Rbk&Y^ofM}|Gx6$}(`))Esh8^Jx^&v=KtU|llNu8% z;TYnD z8oSF;n(?9LQ`B_-DZ^JNfmQr&N0PFT1>ffG;m_@P)%YlUUpJvljn(`Bj zrJ~E9=O;QLSms>|9k;W25PKLb-+*HG;d4j+2ZKl8H2Z+aj)>~9EoC6Yx)n91c zXCgTZ9uB17{vJ!oqK+&y2D$stTKO40PfMv_G|KtE*?dxjkQbS`ve#NGjTp+KD7KAW z%FWbr<3zq6jpWQWJb-X!X>s2?+{VB{l`r?jIUMsXEI!ob^p-CYr)_&mu*bpxg~c%9 z&gcY{ z@8`v+AYOo3DCu-Y^ZWz2Wr!q--Mv5!t=77NUv;nj0QLP(QKboafM{c^SJ(pq?jTX^ zTVQr*KpZ)cC*{B#Fa!G+91+@WmTd2?sWHt!5$k`e+{{AZPZ>=zqe;aNuIYbBM$hq6 z;I6`=9{h)i4uQ_srAR-$lPf1*_GJAI+t|mtb!!8vL)x+G-iNN{81NHW5hk|_PY-6Z zAXccIj{h`WQ4dyDbMJ3sh&gR8NU*FAd?HTyhzpQ&0quE$Cpz38axW%q=qo zb66NuL&iQK;|R)%jJggRdI9`$Vz3-qH-$g?z+oBNw*4M;q`nE<^F2d@i9lt?LpLEd zEhQs;Akjd>uZF(I4lHS0O#Pq@r+Lw&I057KVHl}4bnj*qDlH#a_?!*9h8ClSZiW(9 z{j$@5ga?~;D8@n>M)Dcvc%0CQM**J7{#ecI?z8f`k#)w9i3Wl8;-uWkOSD$*PQYHF zmiuQ>_O0sk(3Q)Aq>qec&PiW%xG)ySfI^|QSU0XSkc{}R+Gy_c6RIqXKZC!<$pCT} zsGLQE^f7{$9ZXp%vqP=$PBF29bHfF1^I9T%2J6;BhJsn=gdsG^`kwF_hz1>qMwId` zFt&=qFcI(WO$({`P{vghoG%(8zx_Ejqaci&6O28*R;%rUT9FuHj(~dLM6fpMJa5Vo zxIz$sQNo!hv5}tm%_u&xmM5-^3Bq0p)DP=I5+rX_>)?^deBB&8a}NT!Xo#OcE_Kj; z_+VKknigRzL=_*(JXIX;;dj;D=D68R+z!v2dVwq z_+Xg9-702zB(4?oWc02`krP9FXqInjgAP6ndVl0%K7hnzz#QVO?M;Nlp; zx+r-H+#1MLUdQzwexwn|mGexWx4{USM1S8ag&Jo>#_rfETg%FUI7|t((9Ti zBZmKLztab}Y%5yXlbBe3KiWDleDVvwoDTvNb(CU32mbCtal3gP8QGN|rgsK3 zz0BseT=#<~x(6|m;8u#kdYv>ZCGcBZLL_7_xaDIAhn_X=*9#DqTwi9R5eJ}tniu1A zI}tkwJ&VukwawU4YPZd>8Eo>)IRGQxt%-)9h6*XP50^P_Q(Y(4US>N@fl33UjLKd& zJd98T7e(L8_Gz*pLDDsWl>%{xOi?&`AQAf-C^C_9F3g}-`fT|R{y$#^q&{##oq1V^2Dd{N ztx6ejLm7C(ztz~+D71@0p(1~sT*SR~gp{|13cIPLmf-ljWpp=_E#4I`Fq82>65n0aq80K^7PM07O zj6wxHfY`s?f?+{2Dt7rBTivipzLZ8G5j2~jh_b?AlQ)ZN5He&n{wzxLNWa+Kc9K9w z{VNAve|prLuFS~y3&8rJgfm@IAgh7oE?Hh`Sh!3rvekdgzch58i9x6OnSh77JUx^NW$im ze8Pt8QTH{T{0N@%-^Kp7+3f*-6dz!A!B~*xbHIk4D-O#Bl*mY1-v@(O*Q&1p-2VCW z!#sIzhr*EwSO)0Kld+1x4S-tbHTm>#A|C+zof8}cZdi+u8Mv07YFps=vi*3(+n(!z zt7Ku)kEOP`@C!wcOJu>`p{t{GYVE()1OX9aU8W)R{~NhKr4xD4zTkFsGD{;dQ#{&5 zHe@PTiu8d4?J{u!gf?XEs9L(RtoSeR-X92x=3A{aX~eHIwlQxNW`0$#!dEVW4P)6>WN_1Pt`kL#Kc&qbw|5_skRU6GXovWyHz0QZms`3on68p}%h}eq=iy`w15Ou3n7U#6XaX z9VLqR&_9kS1>zZ)Bwlv!0UTBu^v>75=2RJ)v{B<|L=J7hF^RG(H#~%BB3j&wm84e~jIl6=UJf?CR$~8O z0|M&XcKEc6?+!Q9h})|nSl(wf9pQZgG6Pr)G}H2)+av(vp1yve07Ez%}ox#Iy}EHuAA1AAq|A zz~>E^u)K&`|JSoS1Yq*r8g6+ioDpcD?d`%Iq5tUY9vOZP>CMDqFTsKpIsW_;s{DQS zU*xs$%s?aOo`^g(C?M3-y8KAddsOyN)L-fF8!`q0jHn89foniagcQeNoe@=k zd1kv2e;#my7IG`jon4Z!*4HVbdz6KE{+nOC#_X0Mbzvgo7~n1tVDq`p4e2i1KOQ*T z^$rOX8MkRRa0*UB=7xuv;Xen5uo2X92D!}vJzM|?u^QzxjaKO=1PE29%s;~iXXdX` z#CdQ$Tm^S4O%Tf`&p>_XKMpm%9r7!$9FKpdLs&y_b~?e~fj%!pIHRqE*PAiJZYO0@ z(VS<$y%2bhBHtM!5KD1e7$zGhAa`wl_SW#XW%0KCXiRrD3t7{K>7A+{TQ#?ThN~v# zC2?AG@zJj_H-YCl!qEL-ne*5ei+5>YX8(kwvlzK+xQ9H#f*c@T_o(S^UxAEJt4&d^ zhIX5`qFDq#e1Rv96SZ5YB>)GoB#hnAd$ihZ4X?{?bDjKGblRuohCbk~Vx%tvp4=%G zPF={;@P^bVNk_+r4*J|$)5g~!sg9yI72|Y4_N{U|F_0Gs^(@lSI|hD$SNBbSRCZh^ zhN|mgcA67R>y8M#r>BQ@RBXfTrp2X}%Mvs$Y7 z#}c+-{h5#mHnL~?mtF!$p_HPQMl0yoS7y$S5Q2OKscJi~kAk=lB#psswb6(S@(-lf zXmBZH7wu1|{Vx{{!{-c-!l>!K!GJ%Z-^4LNB%Cw{u{|WWe-pVVhgO2(0R^N4 z7SBLt27=x{!yZzynKD5-_IyL8+9vIX8rYA#* zSGVQzIWPdn+xErPk&zINedXe3QXpds*M^BWKM~xziK}sZz_4aA!Pux|s z3&(Y~!chUjfs}u2<3lOp>Y%eL;KU}I=uItr5TpxR>n_HJ{_LmXMwF>rwCgh9^jX|8 z{DLfb@cLcn?s+{J z1DU1set$!u*Z#rTmm#yY87{&Dwy~8A=m>Tfez_B{c+>SF!YB`5==s~zph+1JkE$^W z|H8V>18;Ornq5V6LbgP`g!&|)hzw{;TWo?YiVocGsPJlVk_w<(ftNBR_`slOZ*-_N z(VVI<7V%~l2KfTy;>P(dra$P8TKNOf&%iYE^7@{>iWLDaig25OKt%xE9~NDV$V4+Y z0GPo?59*)r?K&_%G0nsvxplxxJNuyUsvF>I`>w>y^_gu)lU0H;}A3p}Z z`Qd|wHKxijV8Y00!y*dCmmc$u4?Sd2?G;Ax+#B9O4p#AR5V5$pD0qq?@xK>~awh6L3xWK6&V*0mZodB}K zu747X!gjLY8O5(00|mJ!2!ms9!Hdy==t!kqk44AEih>#0KSy>)x4CF@OBq!Bq$pME zUf8G~Ou}A4v6607X&SOX9@v5xQF(c)q(uG`$K_Q2Z%M?j$!fSmKjxJKPX0+UE`qb?{FA%lGP(ycsl~J` zC54ccnILdiwxj3C`N>io^AUhHVBcUCNz9uj;fauD{KUrYtXwm3AOW(wpP)arU(CB6WVr0j|#_kVt{FM znE{HQAee;}Q3NN0g1na#Ym|-lLW4$9S%<#4EkL+*wd*jf{uOB3s5@(f!f7$%8M%Ze zd|s#n&t@k%?ui_|sLqTCb`ku?>^AxyJ^YkpC43NvT|hz1ZT8+rcO0tTjYrLlipxUN zTZXtqF9EiQqd;bEbN3*TK8AQ_FvPWlei2L890H%IdP_*GmsWrM3ra*Hsd0No?ea0v&TV+*#HDs{N=Z@Uk7?#4`a?RiO0W>^6w;OR+6^0gyK) zoT&XDX^9VIFbD^n`XB!#1iF0c?)yw^1s=Ovuvzpk_ZYJ9f3oSE3OM-<*(>{7e?!97 z0(zx=dBvH3aV$ysv#7D-GFTh)^n2%V(KAqUPD{-Hk#_y_`5BDEk*u zI}Lz&C$ARk@>?7R4w2=&7gFKHEUX{lc7PRARVpg-mU05%20)|Re|)YxU}AU=h781G zqk+PbWnQ)rA7F%r+16DGBv>M)*Rbh3Pj&`5BI9cWpoz^l{e+I2E(nPr!n^@}kDS3Z zmB>8v8T^Si=`=Us0)W5Yaa#nNT| zd&di)>d!p?<)CibpyET_gZJorFk`MI<~Ot2m6kZ$RMqPqzbk@NeXY+mlt-W`1T z@QJJYA~L?y5s5yja1aa{2Oco;wMM_=|zXjPzCfOc6$a543U6YLc z%ugh{fU@f-T1w!m_W?Ag6tuW)Gpiw~AmRJhO(Kyk)6*33scS&!q!XnOBhCeSN0ptl z`&T-y(~Z2LdF9LeHkD%o)644=+X60*9LZWZ+F% zK+P}~@M(Fca(x5*s9M+J)3^Vx{f_Ll?A?4e@$o{}d%HcK8_d9nyO>yI z2`C<+h5!=GuV+>5*%yqvzv7PU5q?Z<_#Qo&N(@NKA)CM)sDQA!-_#?C?1LD+{Bp=Z z9S=WYS1&Uh7X1ax-Oq2>G7z<%0AyU{a^;!hL&d8SOkfOafliG5U6Mc@T8s}+b#2(N z<05!%9ELMsCAf{j>8rOk|A(K*0lFXkk6oVuWHYMSqj!$a4HvTebeV?u9 zULW{uu0qL&V9Q4e*7UFE&Atc!Cbf_`c4l`>zrNd>P;&eNA`@Jx&AMyPwoT z@3OOjd^wv=6UKrbX!s1VJdVfOgQ~r9IQF?p6Z-E>8%UwOK>0&;Bumcd;LVYM{$Z-* zjK#G(8rHJ!u=oO`NZPw(g-2h+6_Gn+?;2_kQZFT4UMMS(+xlMp{#xQ6cN0)W2?+X6 z1;Pf|JnT_UFfs97gvrby6OMyMs6ylLeaVY7IC*%OS8wy6Kr}v7aVgyiQqBLZc!CU% zf&wQ#1KPf!rnvyJ5JH}`rR`@{geMt-&(>HSGym&xf_p(CYbWlRIE)|&vpA_6-t%8Z z2e$}~ww1L#;>bSors)fZ`{znvejUL*Cx(UeYoi3PbHlJbpo9Mo{N|oSQc&Pdj7MZ><3g*=np~7m zJ$Dq9Qbr&vXy*AXRiyP}JGC(HTupkck2d;0f|}`z$YzVWQJ>4 z&Hs?ySoBy*W7caA>AEV2b$bxYE1xrX28rT9GVgWcqbWA?ISg2XG)cC({d6%Jy*hDJB9C4q0pR6L>>M0 z_O{cV_$Q|)KOzfJEkZ>+1O@7NX< zoUw15e;LJgwqO(X*EY7*k>AxS_b!}1=EeBfJZcQDo<5+BbfB?nK{@i?DyMeB; zUGcxhVjE51w5?sv9_!BioFZ6{T0x3KZJv`^fof)Oo~m$=2WfL}ylrbr+q zj%Lp@n7B_#LGk;y*Q*9E3JMt~@|oF{iKw1XR|k&OpE#7j{E0Wp_SwN$v?RQvHY^u=o?#Y@&(KERWU$LlKV~*}4S%Jigvs^e z+~djJiJwn4U3}|&ok!>P=6-(s`Q#_bH@a4FbmtA@#E;dj^||5eny%|1-zJ0d93yE@ zEK9n6^X9xkfzvz48(K_}DlF=0e287zs{i*g@e1!Q?70rIG<(dkug2rn}Z1T=;R_b5B$T(!i`Ui#XH}5V`Dg#5R2ct`s@$_>2MIaVsmtq zgAGkcW&haVON$~kE?w9_GQdAc)btiQSYqv|^HoBt+E`-DKhvO1HHfRBRryP)Fq9tc zg|S8;K-y@IL?ppK&*G`>@ykhj{r+0F5!vDEXhP9iF{6e+Mxy3r)mhWE@U^kOSJF30 zzv~%e2zwRh!Te!rrh)Lr+vSm8DS(K$PUd=2t|G)oint+G&W0>@g(r>fldJ-H7QeAKj%UrRQ9X@S#ldSX^`9!A(PZ z;2B2DLt`xQWA_&kMD#(P%xLv=eSBdo@9CPEk+zgr{1cu@Lq_1}dg5E3F zA40zu#&~@@bBP7_6v5E@v_xOf!W7GR-zI3uSr1EexqJFcfkP*ML2EnvNw*dNqFe6| zpq81@gs}R@$yiqy-_`sM(>)PE#1GZvkuBiLRl8k8e`oL@CvU_Lb0t3qw63Pboj2 zM<7FK=VjX#J{ZJyX2tR{UpZ_-OLE<@_HF+t8$K=J>o={Dkp<@_i&v=KVhJqewFjqK=v9 zPb`8TggC?V=>0KR^D(y7%LrfEd4Au~x69P)u(nZ#+@d+aCuWC>v1qA91n_P8A=aRorxWH0~iev0OLy$(xM-;>?UI(&4`#A_sq z4jpK~fK?R2X~P1Rotb=WrL}7&mdAw2a0O}TcwuAURu0l|I_!1*!4<`&b0h!LKdC&p zQnwZN&DpZQx)SB(V)VQ$13A$_Sa@zp{6(HYG_L|bELLok@EU;)9VyC zG`#;c`FtcU8{xR8M>MlvyPQfQJ>RzR)kC7z*@-%lsoVgtqkmdsYCe~i9c)qxnolm` zp+xYLjg+SxHA4Jg(@rYBxZ(3Qdt+nrkD8Gb6nb77pR~@d$V-W+`_sDT(5tCK>*qg1 zWI3*aKx%`K@b|efBTv)#ZE$F*dG`L0_VHzzbF5)w!Vp117NmVnAdoJ!ej9n-TDy`R z{Y!f;96g%U=oXoW!|k<`(~e*V+rfT*wA&iz!O89kX1RP5^?EDbQps#**vbF+bYp)G z_5>_}i|K>c2EcBnu{sya&J;-CVqxB$w^@ra@y^v#xyEk7lu4hBhUIM$-(gbW ziuaWSwb~rIYw|j^K?D<0JRcwt;M-89p7t}k8d`tVQ3jU-QY&fw%8B|p3sGlgxSW)R zT=8LRc0tfmD+_T*q@&+N9QTjPZaX%B9FxL_#fkXVI>-H?&?o7Gqi8IkJgs^T8fiCC!RpcU# zZlBoI(23}9ui2YQ-^&tO3MgQ>vG{yspXbv`6IZzT&tNKCrZhTzNX)wkGmb(pv$c z2=87P((HpckFxqIdv%jbZ8`qD)yLB$01T7v%+!eo_fX)YTGbWfjEv4B6KRx%lGeCyVIx7iqLZZq8=&Da1+V$kwOF0C zYb8*h?Bshlj|RhV`F*GFrqmn!8~5s z3Y(32Qu;+P^b>b6){}DY?!}Fo;?5vS_0KKSPXsT&8~r0n(HD~cPjgouPu1S9S!bz@ zV%s}oCvD@dj3v=PdlyBFZlp$M#A)O?Wq0*?422wKY7fm#X3`wR=(-Cqi zD&anBcX*!X`?a|*1w2oMd!{OzB4!}R=53pD)3w_4 zM8T$9nztxNR(rleo1dD0`{2K4_yke)hqt#85oJbNc9@N(B>I! zda>h*O?YXFmN=cqtoCdwa<_>RSYh1LckkIn_pw%Ggwijr7N>s*%EYa`p99$yzuyK=^vhviIwvQgr z=rlc+$Zov@;tU?+OWDHDy=OZ0^dc)|2ahUH|%IJo2j6} z;_bIL^l=g8-rDd^CzB(OUnEa*X}jRX<;i7cvuLy?#q+$0nmw=W5n}qjJ&&a3$R3Wo%gj@$*!XO{8z1uMK`xR2-rCVu5LndGGxJo6r3~wGY*bL5#c}I{9D8^oZ?#-zec*tk(6#k$ zxLKzORN)=w66?~*XlxHx@>*LS*(J8ur;?;?kTg~=h@cqcbui?vmkk5($$?Am@b-WI zLolUUF;JKde(Z`dEZB*E)uy%9j~@RS;e-1oCkqKIO}oZ0Oi? z_g1n9|287zZAsoYl8{Vzs}~*87VVqa6l;_D$XkPBDnTVtSrH{S*?ou)1I9|bMfejD4}Q99N79-n0oF5avd$t7 zn#r+zaY;U(Do~L*+P|Kqb;k{~PRin?^Ukh+|ZM$|lhYq{4 zP_f?0S;U_pJe;c1`<;V~A>XnjIeffG?)V%VVAxNI$?NjZiV~&0xxy%5l9j$kJt!Md zOo;4*91tifJFE!&%|TP(fiKaVNVw>blA8+u0B{5Z1Yd9y1BY5yTJ!X_#3E+pI+ErN zRFZS`OOn717{^V${a5B1^Z4Z3yms?5^_dY3Z*u~r@;*-G6t@t#Ej0bDM(|B=fXasb^tTxQ{e<<##BIc7Z?S^rbyQ{y{JsS=*zTjN zk6y+!{wwlR>Z@fwW#l5f_ITx{Ewf7dGK7aw;wtUQVA)#;R9wzB9$MxF;4@1k|6^iw z-$eWWWpa+FuzoGGRpd_tNUvg_QeXTUpw4NdREE-tWY=WFt8sxMUxDOb5Z|isZ=M;f z59(Pl6|Lg{9*rU%DdqO>^;~4|vr5*sefEZ${CPNfp$wtJfBZE=7`p;fZUmp0&jw*c zF^5ioa@V|rYUX`FiptX57$1`ZtS-cqJ+J8+upw45+aKNJ{d)w~jrJClf-BCkH zClchiUQl~x?K6G9bI0e_qm)6-e}7beCb15S7{y0#RaPmin5fFjrgQ>UkQs1?+2djo zv8PXP8sT($2X^z{^?3obt%At9l7@j*x`H=KM7q81XZ4$*52_2|FeN% zR;^z*^z6nsFz^^HJO@u!clxV~5XYkDT5}~G|L=T`@$-z5NV4GF4Vw(BCCmqK< zN|*fMC^sMxQYI+Er=o7FGQI}=9KMn{P^jC5J?)$4+ia2XB~vy`LX%Un0GMU4Xx30<0SCfC|EX0LQ5*&2MeFI-yEyas7Q(!E>)5N$k?%sT zi=D=3MKx={V0Xdq%TBn~oDujA77Nk6QD?_e2y{OusZ@C{Cl5h^IfNJ+g4{QstF(tB z2EX4u!%dC93Fih@e-}_=4cIsBpKx;%fJ!&%r$ESp5y+n3u#dcha0!O~^9oM&Z!YrJ zROvtBW*!@yhk0r>Z6)+K$Fo-r847HK^lOH})>KA(9+}oK@VS|h?qp}PM&>O0gMHwA zN9!oa!zl)pdV}MI)`frWl47*U(-N{QqDupXxs4ZR1w$Np+_;*(;N+j0ru+sqxD7q8 zcEoMYW#IF-JZmRAzi3J@&H_ zJvi02p5Bt``ZkyjG?n2YJxCIM^ILY-(pCm|_{H;7NKF8FX0J)%E`i?AQsH|3Ka%+2 za?j)eO5<+a=l?5_g6vaSR}cbJ{weY0p8nrHNDhyP^!T%fLO6oQ_Jr^1Jn5SJxbgo`IwT@GyyUqui+XiJ zaBS{7AMUNV&Y`^_Z`k5>wiAKyoP{ z$jRaYgb9MDqYwpB7QZ&WlV+^ma4sc{5euQl>|*>}nMRmWPr+t3zW^RWK~53=IK+C< zs?($#!lC@Mr0F?oP{y(yeU`CKKpDF-5lo@~t~~_hRm5FAQ5k&+9*X zGe(3Ti#5eM%w!Xt2s)il&u=*+EI^4PO{xIyiu9snpo!VV`JdY>)^)ma-BprihJd_; zT8T5joJ+RjB?H+!x7+S^5N;%~4`Q+VN!n~Uy^XuoDs7&ggYH2M4i>U_mbb%$H~}!n zy?OcTkDSYTG2``iAY6CskRB&oos$I^76hDp^$^%pZ-n6ao9 zo1NRWjqbdn&3DIukVN_i9Io@}N_4;E{o0Ck2s?E_*=MnrAxtShJ}vMdfMZ(Ov>q6*R1YR(@dRS4_ z?F-0HP8SY;WyER#P@Ru?Mt8PBsR%>_W8OG< zCTwzdRM013RZt23vP3_%6zdqeAPl~(F3g44aC_hqdUY2W4~MvI#JT%QvT4f9iRMw9 z35dQ4^A3yg9q_1wmV97KbVyP9VHuRw5wY>zA+}klQY{bv=~wrXtQRu1_9U$Wq4dt# zoi21|5FD5M;NI6dyt^0T6d{fL3&SA2mws2A4Al5$?UEy;^1OX(6bJ|*G|s5lsqn

    PIZ9nW)o53A@nALw8@CInk?SJ@(&HCDcaKWJ)vJ$EM?180$-Nt zD$m~xE8m=HfMvBi6^0MozZ|#5B3>8mQi(*K1cofg~k#7aE8X{%5= zE`Dk{^VwQNv$bORKND=Y5PN4>^Hm_ltEUY%{gdq7W{O=lWjo1O{76r9A=Bu&8rN$6 z7oH%(A3#7K$aFVn{aB>D0llS|*KVC5kU?CLG3jT3Y&ZTyb~C9uTu1;!g{^JV%7@V&^%%TjX0AtRQ!2LIFGy`E_k_-yb6-`pa=b zW!?`s>DRA1M+K|#@Dcm!0HevyDD-oJII-{#E(5G@A<<$EvuQ*BSrt{kkO+5tYu?G|BO%yDJR1qtx{V%(BFXX zGZaTDa>qxU!w+n}t^LW4ee;$!fzXS8Pu?-`M%eTFZ--aOV4)Vrbq)$xa2!gW z{~35wnkYQNk;a{v6EkT~;cT7tEtPAq?kk;ZLd_wpW%Abad1?QIBQ zf(y#=XpMCOymJfF+OI>E>u(+VR?;|Yq49iCUfirSb-o7Bj7F7D4cS3^zrDvT^PSV- z``M9?g=t3ida5CK8vHTv-IskL{0tbC&5397;NmmdSVcb+(b*X9!vIy7R~_H}L($$8 zYJW`Y_~^cUjJdQQiMlwv`@1P5yySb_&>yPyb_f^k{ZuD_nN-;iT>|0g9+4`QrX2t4 zQ$vp)`I2Z&W3#r^_s))kh87<+%y2keQ*Y_z*2W$j5ayc#$(Ic`u?Y7acI?ZAu-@%r z507!CVr%!G{3Qu7>%PSLxuh)&z;0Z>F~P;@WbK}L>EHbbRxteGU_X+{M~T9N)_XaG z=%5ByMu#sjgG{R$S-o_4*RZj$++96owJRwBIY&~ywMP6`ki2;;&B=?ZZ5{2aR5$=j zi-Sv#%onGJeqJVhMN$_Fu#!QG#5Aq8MD5WEi1fR%!|UnX`BcAw2wx46<1g;_uYaDJ z`X?r)-V-cp+CUEtef6;3aK^?I@(%v4Z;kstDDr*7%=ZJO{R2`m0BNHI;Y-~nD8V+l z&E+e@@}9q{c?(@PpH88$Oo1clXn9K2t{fb5 z!^-fbQG#N^ZG3z2nj61~h!PDXRr8Oqd9W3K4Am8+F;j)G635%EbSl=0(`js|30Ug> z@Je>f&RHr1=`Fs&FxT0@a56CpikJJ__6*z;x=sF^^g2c(kZkkG*UD7U|E)@{#`n#T zKTWJ!HlyTE>8X~#69hR232Nfc3y&bfA1iyWf_4>|AdBu_HcL}&2A>ie7(;W>;&W7^ z=O!g`Lqbpw78NBX18t4_?U*g}K)cm_bOMs0vHA$AZNM#m@!?Fk?Bk?%}N>1_(Lb5I04tHKhgJA2K6aaaZ1zv#_LAs2hxxP9>G zbYB0p*$jR>aX_jpeyHA>bxlb8eOvr~YS83KR~XLbquwr6Ml>I?$VwL8OKsXdD5v#* z^&HjM_C3O*$7y|o+Xe}KjMI>oWBejll5a^IAga}g{W_W-)-3Ehuvm)<&ws&~2MDBZ zzJ<->*TG5Zw0J{fh-4Dnf7H7m0cT`|R|SG-7> z%PK}0q@+U5!Yf_K>+PP0L`-$x#_Ug-tpw;39U`m)8jv%5CvgW7ri7Fk%n0mYhMGh} zC*#gXC7ja9C|-Z`de=A88-KM5kX>V~0*|KsQ>#F9$XKg@K_w)ng)L|fs|_g{pDlB? zxu|B+lSMS^Ngq5fYj%YFD$bwU>nViLx>riIgU1>)l91VcsN9oXgCX6Jd+W5g&vjY) zDCa77f{8E2$x(@ZOlrtPm!6$&)Lo`A9BS&5&3d9l^mUW}T(K!2aWxXIqh7wu!0bUyPbk zRZW?!YWQ`xhTqTVydb+i{-Mj0zABeDLgjl*uS0}024$Pt;a})d9neuY?9CmB)N%rm z(`;+y6>PhS>PIT_nAQFR$B^|4QO@pyX_;&UMBgq;bd?BY(u7&C*0kkr z)A%Y_HQjM1)F=wfv`&pc0ep6@U)Y14;-}dze@DwYytI(9@e=d`X|Uh0h0`w&Ba0Y7q<-K=^UJx#^7mt1{k@CxE@ z9!0v5$1+97yLd5(zJWj7wE4CAt}F@ZAEc@^p7JY)2C^EK$mlNW4cc2$OYU%Cn`6L>O>v; z)JmL&mg9?bC@&+=?ScQ7$C-@21Ip3~SlE?6>aEAF=z}RJ+rAgY2qTZk!Z_m{3Vf(B zsViuZjqalwWXC3Q7N_TQvC#0ZsZBsQj$wxVH7kxe;;47srL?gt=zh&Ip}JqkW;FwW zTlPW#+RR3;lw;8L3gxjOSoS#W9Oo_mnqyZKbUzG>?zwK_H1}E76A}Dan%kP?Eb@x} E50IUtUjP6A literal 0 HcmV?d00001 diff --git a/docs/stable/_images/add_images.png b/docs/stable/_images/add_images.png new file mode 100644 index 0000000000000000000000000000000000000000..5fcbf36580b7e781ed61701dd7ff422c05360ece GIT binary patch literal 76694 zcmeFYWmsG7wl0ioDOTLu0&Rf=_uyKr_Tugy+ycR^I4y34wor<@OK_(Yid%6B?iSAM zTKnw1zq8(b*7xiC^>NKy2{ZF~<|AW{G4A^wiF~E5NJKzOfQE)fq^uKs+V zRPb}ODx&-Q~69_1(AviYb~Z@y=1ND3g-hD~x5=)d-4r z;Rh`TSqB-g`%IBdNmkc`EOe=YF!rR4yhgM&WdidfQkLT9g*KdTgrv}L+}f?%myg!p zc{ z#3&@w2%eV0ArI01-lb!!pVviWt6x~D9K9*kjElw_)BC{!{nNsOvtk)I<=R*)c14jK zb28c|*)ZXF@CrV2Y>1bt;UXt%m`6LyDq(hUo3mxK)3KM%6K#X2mwmaW4g4uWoA>s; z_sPxr-A)h0gsmUzm^W8iPS@#)RONk4#c|Gg`h<p6Y~v=Rdh+n%8vY=566s8uf8*J40!{^t|)BgmR^m* zrEJG8+~_s?g6$l+BYqTZzt|=0F5yKQfkRJ1pe=>2h?9z48Cc&^R!EF`Ga7=)7$3;V zj0K_{E?0bStAJ64|B3n*J%OZ8@mA(tmCP%=vmAx$og%`%cf*w5#a-IXB2J_p^@X-k zya4wrRhu8uO+?(2W)Zs%hsugvXfV*(<_XYtbAwf^rW2_Z9oQqn0pLEcT&(Bh1WsP9~!}I#cW`)<zvBqivp$xt06@tibWO2oLOYe%T zg>{=>5U?el;)1Y_Um>xPI^p<%v!N)@$aIcxb$fInkR?d-rtIlD24^%EbmmJmb5rXN;YXgJ(l(UCf) zpj6@>glPpoC;0~x!gAhXjG}JZa>`3dqwdCX2_!_li$<8xGmt}=Orztg;DHG1X*rJko&Z_u76cRitqf2mBT3{&b=)>49e@igbE#Gc8b zS66?c(x=i#v9_>gw#Ko>z9#)1)SbMd82J_Nnd>u-=X1}wJ`R3l9AN${&Q^oy6Wg1r z!l=Te37j389moy`7Z(=?mr+T-u4#$9Zj$az^@ts0!bfAgKx9m~+J+z70o#Fn4{J|w zk0MLG?lbQA_!jml#i{7^^J~m&F^8~oZHLlYDOj#YjmPJ6ioH$IM+|xlwIdI*OtT2G zFhx_`uic(EdNk4-Wbb1v*e-N73c2H+Qu{diRQP!N44!%H8vKBL*Z5gCPCQPvm9hmL zm;Aak+)h3D;9>CSWmrX)bJ&=f6o0wUGOan?N4i_{AC5nu-=M;a+%x5U<+F$1OsL4s zW_cQh^fz8^z&D6C!e3i{p8g#2IXT9b+TCJ8@AH@fLP6#;@8=eUsTc|Bs_4#W*Jv#| zA(|y2gj%L5TgH&!jJ-?G7icxY`hM(o%;NJ!T&vmpbkVe)j1hqa%UMfNV@pR$6E=Z4 zYmb5A(M%gg`)r5Sk=s=7GVIRBKV)BhGm-|sn`c>`>80x`=)T~r)2-Ee zs5_&_s>4+=Yv;a{Zo6leWz#a!_^WZLr^7OFSUkfgL#+0Z9+O_e9?qO=Ep!jL_XgQo zWmgHag_@7nfNRU@+BT&;@m%^mzq!|MD7l?F4>`}TP5%xN^%jlH{GJ68`#fSeV!Ej| zl4XWx=G&{(GZEXcE#qp^^r&&7Nt}L=o(|&CKb>DO+B*}iLRaTA-{72M05NbisB2nu zANS1Is_iQ2-uv40L2RjI_AVWF;mN#Nu3}Yh)X~Ul=&Dd$+j{?D=Gs-3RnaCp7z$3) z3f3yCi}7sqjJZ%_!psrQS(n)K!*6A2CG?BCY`r)+<~W=?xF~3g@{hfDye}X}vg5Ja z=lozWrf4Ueru{+DN1#f4%TmtK#~VTQj_&l?w5T>o^OH;bxrD6Y#ZEZm3hiS;kw?eL ztG#KXBBbv_6+)6jYs0t0SwsH{h2Yf@j1rU)AH*F-r_wn;P9&kBXb?SSn~EOXoOHfn z_tD6dJaT))^@8R5i=EfiZ>u>QEyzS|R=(7H*zcb1Q8G<8mF5)ka6sM(-_+mM8?lkI z_o|g<-wH2s!3}R4uoOCmLKg6$WU^#hY>R3xN^hRlutG-IitiUB9K8}&9P$j!m8?bvq=XjZ4zxp zl&o_sifiNR!`L`Zos0QSuY8NVw5heV4K5u$j**!1KJtlqwIb@S$-~}n=nZdI^?gD9 zj=SQ6-cg&7%+c&Ncl1hTGwEHK-B4CS7^OVtY)xvTu+>qSihfG5&hd4|N2u=0p_i0N zZvJ#f4t|9mg~g{Eo~R`Uo%di0u!GS?h5c&fs6|j^)OzVU#X3{UNs7_!;@-``LAa)Q zskPB|jmyc4l^6Zr>576i4|G4*<$I}J%qrW_O;t=LmB3oy8`p#ss~olVpB!J=Z+(w0 zwO_QZa{kp5_gCKc-Cr!fV0Nl@$-kNxo=&@2xE^kVaarmO)cIaY-*%Rikm}Ld?b&kF zlGaf;!kQu8Hw{dF@Zvd>%Fn*N1zCw%22CrTS*0hXMRN{~Fbjxo*|&oQJ_n6nja*3q zp?ZG%g~55fR$}B^{KM0$@WIY5og9{Z;7S3S2np@?k>E2t8O3$xpKQn z`^L998kPfbkM-NXyb3r`1YH^k`;KjzjscgBE?%z~!%zsa2!8hh0SCULP(j4;^O2 zRy~^*Z<-}9Sk^l~AZD19nK&dx;I>!vXIG1_7JtNlWM3_;#h|9Qwq!#Cdtyjwgu=of zqJ4Y&Yg0Uld8ew7An-oqmz8vq7kW7#b-)*SG`UX5fTgfKOFp8>r6`;F!7Jjpgj`bZ z{P3pqC>#y(5KmdzvLQ6)8ni=YL4N*v8|$rck5r@sn)+JNXv#~Ljw%GP!pjMdw^Y7d z_c*!raAmZ~Tmb-D;5#YlxT2v^vi^CYE5CVq2%wH^n|Io7+G?tzW>5!^i8<8N0_5f3 z1YC`VChjE)d~~pIGhy~}uy=G7^@2S9#}%T$=Rc>xkD32*iJKkdv9{VPW?86<1+xH% zAH@Awf`FNsS=`0kQuK|S!hbXe{)0TWc5`zQ1%o|3Jwcv)AgGHKm`6lJ1kB9~=H=xA zuHbU@c62lG;&OC-^3P8GcRz9#u4XPaPHr|(N9I5MnwUb}-5`%2|GCkB{{3^D7G5_0 zbtgyH|CkmqLGYh%z&s#s@PGCVG!_4IR`ivPmxaByoQ;EpqbqO^2_8XSVex-7_?NH# z>z4nmsm^~jYzMuK zoW?ug9plg50DtGe>)C(21D|uKC_dnOqoGNmDa%Q}^Fl{v;Wj^#pX}ak!F_K|LH-_t z;Kh59PXQuLcG{8#Q-3)Rmx_c7l!_FKY*uf9QmgE6RH=D_F@o}fz6IzU`nm5oXNcW3 zS~bFaEPX81uRdgNmtK~A-xs<#EwQz(MR3QX;s4v?DccVz1S^ji+f#Hh=6`c21_Tk} zV*L9)A1Ma}sU)DKWnw7>{F~!hz0-q#JH~ja1gvNEkMQIBmH%CTO280%|6fe|{~zN2 zePjMVUB!2-flG1MvR{0J433qGyol}FlCNYBx^=T8cdjmmEugA>B5i7A^24%X#aXVU zkBmy`P71GX#t(dS$||g>_>A5LdtXHzG`q5w&Glwhz>N-?P>1tt+m_?M!Mkra0{%+o z(SkO*j5Np?S>7+&Yc`*W@WFfH3?~*1W-|`_GaT|!mqnhAhlzw~n;tr&e)oqN2g5Jx zAO{AWxD)p=g?ug}S*?aft^)xQBAds4BA02N!!=NUTI%)n1)TI6s$E(yHTC>%^Ilvd zK4SQBlDVDe@KnzYCsV9pm8bRGh_831$lccpvR@40j#Z6UC&vq}BS|-dtYvXRz0HTx z>J>$9yY`FoW=;ZMni9=B!BJreuP^mJwrr(@}^H7Ub_<%SP-ivY{~>yq`)6~ zy(9rewzW+uSYBa^J^HJM>43?BF^gN^4I^&Z(fkaxu;HdDU+?7& z%LMEV6^@&UW0Bn?=Z$JAy$yr5P_cB+^Z9b)#TY6{eQxzuyhr-SW4{Id2Tu?8>ZvArSyD6zsknH z3VP3Isb0m?Bzc}sBXLf6Fsl*n;2@u?;95vg7+Y(a6f%Vpoi1**wmMk=w}~7n%+}2C zE)0?T>uTjuh?l~9@%T1&(*lnOXs*Z2^LCFW?uoeRF0_Tq)uq|*aJ(Zwge_x`7CV@I zPFp&cP9%9+&z!>pb2Qe}t$(YYtflvR_Z3ILm24|q>fSS}twd6J`C@60{t{IulJ6Rj z8j7XsBmzAo*qE+K79hc0%8(e0X`5TR9~!)n^l-e-$y$~Eog&iKH13=_TzwHz?sIh` z`U%E$P#lL>r%EMjznA2h@e+jnExDg@X(F~JG{4-pHQD9E;FkMY-(7?#70<@7xbd$y?B)->8g}`8My*{EEziaf)#{PUS+UFhT zm9U4~+)cEaC;60AA7#*Mej;ICXd|i{y3;~*$sp0jCUKyO^TRc;9-Yq)D z;yP6QkQ(6?E8=; z+KL;7n6;f^X-03i&GPu=?J0773)saj^E171)*VTmfzNc$Q+GlXFtDju5KsQcNdJgA zUd}0yN4J^FamiM|>cl};=+TS^i`-CHiQeOd%in$6l*jIe8kf<5U{i5}7 z^Gr3^FGCMDh@62wcO6VFYP3duO*^bbDNHgG1=of^OQ@pYpAG3U4z5`{LX0j2wSH!Wz4ycqxC*&vMV(a*a0A*vL->u&D609e1;A zs^-b`I6xC)#Z7Wr<6PzJN*^yA139ADZEm7WcbGA=QA&TO5G@A2s zT3gSmUe4RZIZm_g=I4nvQ|0s@bS-7@aa;~L#cU=z_gUd`O`*{1=v1pgZNd7@C zEj6;eIyX6vBSROIjk)ssv9nS6iHjMBy)p?p#?o(-ikWgPjPA$!z1n_9U`*FkX{_s$ zCns2MyT*T;a>O&XDu6v-C^-eD@^ukBCSiD*$!Cr2+Dze@hXt_;!lGJW>O1Hl(&Sg1 zVcqk9ovNMW#06c+((l<~Z`5-J?PO>;1-A1uk5YHH*iAD{a>uwr43{C*-G_%LbQQcG z46}kX5epEPM=hN+{$mW11IYhTc#FAZD`)Zt|%E$$pnn~-?1}O7N+Q%r9((yb zlxTKJ?@1f-9xz^K8r)mV-zG6)ie4%-dyd^7<_IF#buXoog&Zs1U(LOQV7P#NUwmIq z6#Nj^dWSe|I&1hrkOzfU?#&iz?2~D7KfI{AnT-$@8uOt(eP>9mjFnLriG}xc zy3K6hqHQgLQc;^_*+`Qlf~ePr&2jE)PFJKPmE@I#+ii!7;|>A(c^&SEPyFHRFQfliVzig?_?LX)FG*fHZB% zE`LYa)h#A@IYj>Ebx(>WUKg+bA@}Tg>)3|!E)ffQ-m~PNKUIF2T=8(qhAmT($1mwl z5)$R*Rbw7?5ppRxU6oNlPO~95GdGAD*AzUtS()7U>a`* zOje=aT}zDJM>v==-WuA|gbNe1(5bxd&ukZojuERfWMFFelKT18hJWil3g&u$caagx zAeQb>w=h}lKokO4SyV0h>^4}^`)uWI91nua z&e1(a1MJqqBiJ5K25@v+K7baSFP8LjJQ5CV6XS{7tT>6Rq%_@dEtnkf?V}d z^X-O^K0@?CNrT4MgO*4RU4(K7NwisnFB#yn@t7a4Cr-+4q-Eg>gK@m{rSRgJRrH9O zVISEanHtVpU=x37I&0~-Wa4SfkJR<57N|qachVs8ym=uP^Q^1cP^pIY`MvFhFKCh}1 zw>a+KU#e)qkdi@PbIc2$km|dQ!^<;ZaXCky^;U3lzn;NS~B8q%c;;G z6fl%}4Gq2=(0E*LJX7}d7nz<{FawUAL$R8Q(@~oaYV<5ch?_GU+uoNbf5j%)OJi<$ zqrxp6s|EZ%v(p*y;{5(Qe9(UW21RUDeFw>kDGsi1TKmf?mImS zXZ(}Csx4uQi3ZBjP0An2%qsS12+TpaX@Lylg>iMb=Lbq6;F0mk>>x+WR`=g(Nj+&# z9i=UF|EE>y2c%YB*4(?!FyGrua_k!H`qfOfDb~AM4?1z^QtHkU!M^(n?ppEf6&THv z-drjLNLA%ic+MJLf6H`PWUcInM4xDg%wBsTvg1EZC;p&UN~zS|6RJtKBrAl1)*Tka5R4Mq9kT1HxV9RVw|bpx8gC zDZ5VP!Ol8*4fD6vpnF`H!0$az!>$%?7l6?jHT6j99Ok zGUTJucM1%?n`%S-vTwUPFAOXQSFwGTM*26=v5O5br3UY)ztryKMMXvlAK_shfA-hy z-Kz^aFg0JCqQzO~@+5ub#IQ0@=%JjUvNv~YBHcR$9qat$^VQ&?eH6^_DDizbQ5Pfi zIIEn_8&$fcis*^lq6?S)i%RI$AeohaZAOsU0UmYu+?>$XhY-7Bs$mw_7VCIM@5P2Q z+eO9)C~=bq-C>J9;K(8&gBhvYvD`;ND*9|W(`wxo_jCqAzLbb=e?$KZ@DVnqxH$9m z;&E}^Q?lk*&5S1EkxHTAxoL)*0DEXlqK}*Hrnrq;t4U5p_>VB#84v?8BNf#HL!Mq^ zgKrMHPNe82icQs7NP&}Y6ig=ilK!XcrO%?5)Q}rUpGbitCuuBBa*u&;6^oVTjqqcd zqb5CKUM@;g;v|`jemeyAO#!mXW$>q)b`QZ#PlOQGz*XHsCR|HQJ+5}pv0r~-`n{iq z&X`6xYw1nSltyo9O|i%MF<2`G;^Nvh(rLX+?2IqpF z;U}5?a=&xw!o5u$Me=FoR8W+E+n0LIat(EDJaYYZbc{zGX__A)%8`}uz+INzvy+xA z4*%DS8mJq?S65ndH)Cm<5o0Q`SF5j`j>w;{UT8>Mb?N(Te|1#miQpmxi)&bb%>-;) zzr7>?Y4bYvGI+X0kBV&4yRJmC=l5g;{a;OkG>)uAE^%>6{=Dr+7>w%^z2nkEip7Ul zCV2eZeXW))mR)GyR?TuEejQjJJ(Tiob_d-k+t;cJorD;m@DOdc)aMO9pxt)Pru72L zV2hYj%&=G&o6onwEt=(ko>vItKX>Sq~JIboJtR-{q+nf>c-9j7F z=pR%&r4xL}1NQy&dHpWhVzS&BHgUKyk+#u1#snKo+IAyR%bvC|*whI;(*9!^(h}V7 z?4TSG!Ww!9=rnV0A5IVL(L7-UD?I`Bf|yFblh?uk`9`zyAfc-_(Qj5|BWVFg1k9jM zOsMM=NR1XKhtXqTR8yRT>!*Ai^LM4^0NV(_wLPT(Y&7?XS$O`MPY zfB=#-*kbkzle6J*Zg|wIm5L3#S5AcuAdBF?AxA&0{K;E3^@!2zul;`8<)o4;_)dQ( zGQC{5Uetn^$YDLAZmh|@N{qP{DNDr37(~}JuXir(3t-J=oQ89Op;MG6iWOQW zy;U*UzLUBQ8%QY@lKL#ryz|vdpUoSk{J37+Jms%BgpZg~KsgrllW#V!F+nX^JMGXo zUMLooy{#bu+hL~H;5h{2+;@m`2sbd(_S?H09`Q^KuraTuCY z*;#Dj`k@n1&s9+|`^%32kz!nyr>-Tw_@W`&u1)w^P}I`g<{b-N8una`K+ec*|EXp+ zB-N}4JUcT~ko>4=>AjbKH7rJn+Tubok+mDJl8OTj2?X}GAj5g3_lVCbv(y(ft^18* z5~Uv+cx^fw#qz~y%&l0`*L zL#)czm@!uH2|XWoNa{Krfr*mA-Vz8_5MhH5b`lj^z5RW=-6D#&QZng_zuC@%(JGQn ziR(E9%SMWqlWlg{PU2^uh8N%>^{6{&)i9mwCPPSm4dt#WW!2zaVVtVrD{4Fao86Lo(v>+lCCiMnkWJ2}qRJ8#@wED5P_R)pv{)cjJtIWI|? zIa=+_5L)sb|ElNMu&z3q4Ka{79)-5uS>~%{Y_yI@-rwdjvQH!78$-!-SrgL75w5pK zy-dTNOMaV)oRxi#^Bv}D4UfTXw?A)DHwXC!O&*rHp}P9LY-;J|i{6_I;}a9+lZLRp z_G`bpvnk8dtuf2kzqV2gJamFRc6?hfe3@RQ0G&;#rg9@V8`cu@Jr5T1VHf+Y+6W^c zWa;%%s9s+jKz+`3ENJ&;J%sPaH6$CZLABG(8Of3<+v5cp7mKh_-=l7-(s}sZbp!mY zSLJ#ET+crzKrMN9`nH$suDOTSF4b|NQGP(;1$#C2y7=v3=SBeLLnm)07_wp`;z_Ji z4wZr1>+yWGK_X46Xr2gosvrwiEW{tDLmGEcRuI%@N|2;H^$Xu zc{F(GZ^eK_Ql-cBeHlmaC)~E5E?!L^FKP|F&4KT|U<-;`#6+huZ$szIt@&!erW)O1 zn(7g`C%M^jU2)a>EpDyIv&%(}Tj@!Aef03>?o_X}^p;_Hx@tTMZJEtiHD9QH-q|{6 z`rMO%-`ggO|Ud^TShS$8Vdh0&6k?s)STRPD6YG_nXj`2m+v zig1_#cdah4+a1Cn zxQ^-^c1#EwMT)okDAe?m-UlU9x~7HPz9m4g;uF3*=!CX9%aDH)D0f=e>#Q z{U`{xpe4^-{PDszIHqPWI;K;R%ADR#x_YUDVOR|2`0TYz96(*u`*00};@HtEz|2Bl ze^yA74#sKV2dpZA{OLoHwRlZ$Wb?_$My%LAH-Nj$uA{)M)8w{Kk0>Bhof-w2`$$=4sz~fvAdfk>dxC4PAoKK2rFqg zXuX;L`l&R`$$j3rH_fiJL4n;fXUy=6krM)2&mamW(}h!Bznz~kq%O8M@qWEM5Odfc zb;86hh`r7fK0YdWZ%?Uh9Qet;*%jP;7(^rPRer(1RN98}aUFPGQ2m|G3$<%>&kH+f z@f`6wB?QgcSH;lqnkyBZQg2dYlKiCpn>ZDK^^@gQ^l}jGme9%(Lyv@gZ4BV}FNqMC zO<-%mnj4+=PY<3nVci>&qFM+^G@#|6ZBQ*vkJ+y`@3>_EO=Ru7AYM4Uba7n2aqQ)> zz~Q%;8WKpk|I5t71c7I}5Yc$bo|iL2x|=muKcj@>a+@U6O;O*Iab)N@&&Kl$Yvx^| zDLyb`bQ6VQrTAV^bUuy1T`axNp;(h+mYu28H)!>-9y9(XfHJ$P7M2FjU=au1-GI$v zW`+UTqnGwjI)1%6Z;g%(81POX;ow(k5`1#~+LnXAIDD&{mEta`i?}m8t7LFzsJg+% z9eh}>isy0Cv@H)(EyL@lZ*Zi~A2=Irpj_`Zis~VT%;pc9cjYyt5Ee*>Ncfm)G(arn;WvS`Trppd{%JA~!8s`EH0K}~emvCUyen;R94gr7L!lpZBadx;XhbNR9QB>jmVgYXOMSFaOsTwb`QVV+muTy7Z#4YGhMF9 z!^A3tT+F}Cu|1>dfky}f51rSDfY)pL#_fC@L)q6sMoxiRKXXv0?CjK`UyW=Fzdpbc z#CzF|MXFD*j-IgBu$j%rQJT7IOPfO8>=Ptj>~;m1vTrON4hvp~R%`jqev=sK>lJH+ z{75LnZ_={?jI8k0U#;#u?SC z)&G^re=G|%JCoTyePbKCva_2+rvECuJZ9u895zQZUp80VMozt^|KoKO4$$gEu{D4TYa!ni6A(#|u^r#@INkuB=43EW2YS$aO6FE>gP~##T;||LIxs zRj~{x;`b!`TjhcE;Xq{OEYyjh}RMBSvhmyek$~DD4(JMS^jTt9!HbL>v$jx~=fIr*{d04kFJbToGH|DwNl&0~e8#Rr%p(#f7>6 zt$RhTE{rH{LtXFh`FlM#c6ITuUk)P7#Kli;462|reDXMb-eb-o`|y)|{F6BGpWZ!N z&G8Rl@MspQxv)ONtm%+2&(%Ylyl z{G4STLp;606%zkFa+ivJ#&#)>vxa}bz}avPM2xi0v7!Sf1PE_?H!#a{c07G+KJmDR zVo2^3hGcTF%jMt&s|cl&;i(mzi?t6L>0EX5Z{EA<*G`)f))mhJ4Enx!%9paS@GX@P zW(2lHE)(f~!zSY<>ssryKcnC2YfLB=&fgmF3O=AffXWBm7#tv+B&UndxOY16o=G44 zeyk$@n>M*b^!n<;b@5vmI_Zr@=JiUmMW^|5hJKBMy8u@{;+YptvL47BbTE~@W*Je} zp>9CI%SUD*$-lln7Sk<%GZNG{G)MKn|NVdbMKxCe2jU5Rign#tyyU3Axkg$vv;Py9pS z2Cq)IC9B(ESavU7j$A)-8Gv zCv}8z&@Rc_(VA?h)Ht&8wMjtn2OABCdq5K!`k7YBFFRsYB8Ila@N_)qEY?YuODf0wjgBiPVP(rtd-_O|>o|wQTuCxf zt+pDQGe0MNa<&5QBAk|jrxNBbm_zogd-=}?c0~OLe@0Dv;;QhQ;`8A5T_6KF)*{5U zvmLc|bv~zvbR9cE_j$AvLm4?Lzoag8V#EG2w&7xUD0%C|PTkjVG9org?|*xA{ELA_ z!8nZ#i&NR7n?kF8l*#WjLB}MNVt(_JMR%;kwY5e_APL&4-%paioEok+GKi9S#(yuu z4%}7D`bDUbCYe*{gE0X6?y>(!+~kKKzILffvW(cpyla*<>y*<(2br)hkRdIu6H8pS z8|P5qPy5UJ_j=>W739Dh<(yZ7eG$B7hmO+isOKaPPbK)Y6YahxX&yIQN!%XFR^1*C zsvJ)%0^D?bJ+gAp<+vRVnuaOJ5kd0b{|=F1UPRWf#+HiX1+r&*nX1oR{%mlyDzk6f zBrM(KA$H#RnxgCY433bG;!z5BI-jx1_@E&%<0o-4#Md&qk9E)Tclx)IDl?$n)CY*; zADxRnsNkMZnDLi|1%%MZxI25@+*j#H(%2cfsj&CYU5lz`j>0P*b8aIg(u&%`EJR(y2N3IBjsrk4E#}D>cO?A(fI4 zVa|5uC6tGiQ<36QO1ovWylbiSR zcu5+2SA1c4zq~^A@(bs^1dBF-T_=&y$HD!}PGZSaKzs4oET>OL67f0C3)c-%gG?Kq z`-|NU7Wn}^F2Y}sSgVU}KTQFP+XkH*BjmkSRkLPSk_#aE%(Z=sAZZNDpNn3rF^+}& zz97wMK-w>wpQf?SYkM#2<0b<9nolF%b zfl5xn8hq+7Zuj~CCL;05={fGYxy4fo{lui6cYr!{h4)V1319b^kRnVm1lbu#^s^$c zNwPfyJsTTu)(&l{41fX9ZFU13aVAZ@l5PruK|z!4VpEw%2<+@9hs^i{vwSjyw4$Yy zaplT>;SWzHF(;0gmsQhvyZ@kH99TSa`&&Gf4=kz~g2QgRlf!ERwE3X7Z;2&8X{L92sg1!9?7mFVX{qT0X@5t8hujjbT0d)9O2A( z9TtSV(I`77q6p8FbT)`}FFifo1uKR7$hji~e{m6VSRH8f2=i~(<_N12RBtT8hz!of zOs=81o04XQ1skP&mAp4kc|mvm`;p&^!xIb-qtFFSZQes#jpQ&itOjCa<-5=h<|bs; zqScpIzJ~1RWDy$7RY6gPl=hoho`Uyx2mX3B>M;LHY%0zi){L%Qi|*57RnGj*EgLunjpi#E(J{D8a+{eyZx_N&u{+TV;Fq z$oCquw!khPs?t8KA4=u@$MjV%9|MV#&rOyTb^A{Z?OJ?vSe-F(im5Csb_$W; zbbB;4Plo_HT~Vi9HxrOT!j|S{nDJ$M`<=TOcFowK5Om-#k-aG(KI;z*J9U>%Qdhxc;+L_L~?6 zk^gOm&9qlR-fWf)0-I}m5TdM7fqW$B1Dz_(%6#ct{AD>$o_Me2am*Evi4NHol~@9P z!O!z$Z=lc+OE~??Nbne7M$h`2SIgQ^wvFz)mX^cm>mxQ+`6acp1AAIQgwsa2H%Kh( z2*YLlV&*47gdtS`X$X@^2c3Khe#rZW9}sg#U2SHp#)^&n?+HcI3Jg4;mN`X@-7Ak%~2Jrx35_#(0 z(MjM&zBP`iw19pBu;Hkb{+KkZ@{jVS-i2`Y%MOZS>lyt&>jeHxjSXYGjlXRP&X_{U zLGDQTkF^j`L^B=CW~Z(?T#*;zIte0-{;dLP_;O;MFe!0&I&S$vO)#NMo`K_eeWu$_ zy(`^kH+@h{TP3oyo0+M~~U(4Pr@@`teM& zk+2G#DiFG-d3N{E@kg&)zko5Dg;OPAMPoA|> z?Neb4&jKtx>=zDIOSHDSYze7^M>))ae@=D&-UFP_e#!*D%%$oDX;Dyb-W;@DtURFR zX6AmVu4{sbvTET;-+QB%>%3}M{x~${0?0a$taUPRo~=f0@I~)?+zDLHVwNTU{+rlZ zd~mf+&_Z)-19Yi%Q6TbjT%T6!5-{n+35GTxceQN7O7=8Z31x64n*)Df*KlMMb}~ks z{s}~qHpqiF>gMt43b*tt9I?_A8(KQ~`V2 z6o;#f2wa)AxHk(=HYuu~{N0lLm(9GA#Uk^r->SO?Sh{I?Frl5e=WoC<3Hn{1I_CiQ z*D{MacP$$dGxm>v$RJuzK6waLh@}@f$h*dZ66!0qTOMh{Rle?pl{xg2PC}!+|n3Pd2Mc*Z|0m#sYt#bS}3>0C@@*#evf^+tKo{Wrw+B+QT z?b*+=#5@I8eg{=J?GzR?#%za1t_hj*$E3MwjVou&6ZM3idpzBVrIRZ^!og+CRcI`&0Wz9D&0Nd`w< zS=FUApY?D9w5*!1w?^72{h&fcu(9v8`~i zI8>@QbT1mM}BEP3Fyd zU;Yjk(?}IgYX%AtEVJAvBR_;V|E)6eU$_6X^XKjj02Z4k(_n^;h-|Lemx8J{htgJ? zDc4{h&2G;3Fy-Grv#BD%m3frBNh@fdIs;UBrO!!m?iYkv`Cv+CJbgaSyRIRJH9 zAn85z!T0iP%7{o@bp^|7`VzZRc$+5W->kv+Kq)rt6aYfb?kOL4k!yI}#I-v~+WdSv zUSl_9yR3UGDsvkUaNPGK&db82uqH`Ux8EBx28i`yP$65PLk!b@_j?txXGDRj1fWCn zt6oV|!uSfXq!SKkIj)r>ZS7T?(R?G^KoV;CEb<@eSX(+al|DEBL-4=-Im#nN<_|%H z=d=i%J-lEXBHn^*pYMSeII`S<|A5E%JhzgTL)7D+6^S>uD*=a<0W6H9FzIOGzve#w z&of^XfR0scOUxAh%jf)SZQDQ3yu!hFq(6X`CjW1TJ0XZp0IFGNvuF65#O6O*OJUX8;NF z!FdrQP#8)B_-4rJx82${fUB^)KHat)lcy56yFM$Tjsh6uv?(M1g`~C-(cRSZy*aI7 zP|^!()4>!YxDBKKt#j4&PEmm&5dlcz%aZR&jQ47c(9j&T%>jOUq&VK@?_V`*Ta=a! zfnA#(bNtJoq-x094g zAB9s>$0AAT%zLk9Y6vI<+g|}SAuBBb9~b+vls@54nWEJeX(xHK`sMIKfksYW_dOuF zZj3213Ak2mJY-UgniMK>2xzvGoCa5@mkb<{%{djU_C?T_;21B5862QqY2M!rWsF0Vk$ z;AxSbSccujQX@bv9{MC5JVd6#Nt%uXhAa44A;u+|8|-tsWumEVg3SkHe&auWkfehK zu%mZr_$>cqCeL2FfgjEu0po5u$gr=B+@2`>;K{XwJ9P91++F3RVpD3j+gEf`dao`& zz9wF^YQ5Uv1LB1{;!>KFyMQN*I~jg=rZwK=3AOSi;=8`uZTqFEmj+~blFxQCU8(21 zqVzg|*kLwX>tLr4O>JU)^PU`V@7wR96O}h5uNf|C{#3=d?alsL$Prp_dpllg(DcLd zDOb_7_kP)r1Vu(0P|N8XEA!q=8NKw3j5|P6?Y+Zy<#zld2{!;K?oa?a=7SB@2HBs- zh|RAlhAtO5k>u=+FsA*Q^0AHG6PG%$qjzdB5T8?FIJXyV_c0mbHJ4tf5&(;WB&XX> z?i^kZ-C9BPvjqY%w`8W(W_IU-2~NIJmn-nI zXNX7KCvmA0wpWv)CB%UypqI^`9O{D5ey zfExA#tlr1_4@H(x1jSC@0D_T1StPLEN0IdiFL~QlKje6P;6411 zbC4nNBUHYy+3XaMa|!LkD8O06G#6LJINlGn$6E6Ksy0m)Dkxq~tOIq=gTo#&>?Dy3 zYMoFTc0^rf47Exv?^t2?PpAZa*G)G5^_aNQuPTqzqUZSU?Qfod8;O?G~pRDkOMNH7YU8y}Y{=LSg~T#Ge?bN^`G-rc)LZ7(Sgi8-2fQ3K4MG z!S1Z`JDKBa|2zT!U|qzy4mg1x@icZh?}q*^X6X_r0MqB|;xwT&*9JV)YRm4q+R4V} zIEhA@pujJPqusK$k-{5buXm0w+9uL8oUVXTx_AL08#Ii|==HzYdh38F+pTYuloEyp z>27H$fuU0=k#3|>hElq_d+0`5T1OD+MoJnVx>4z_bFugPz27-|pMPNHp1H64inV^L z*OsAYwk4%#oPmK{s>OYQaqA>eeN^D6U}%<->;sD-`fYe68br`bdqs9mMNp8y8bTZ6 zOnnoCbH$)CcFEvGU}T0C4*PgNBf~ba<8a{vv<5kLenGW~%Xra>jD;`jS%+=evZj7l zJEfu&lMBOU;(ZFxMwA9<CW$hUXYlU*Hx zPw>U(DOvyqV%a2cE7VVPb>IKKB`Y~I=%B#_=gRG)q;Ao&7T{cl=#%JEh7oPke13#( z|1Z^Jbjo$=eXKxYtYG)sW4g8YA6)3-BBx#G!N~rT%hsIB4dPPu-AV5M=_L-VBDw$y zcI#3pZ{yX{>=#uw(W3Mlyhc?_lBwe2w(#2^oX4@Doex3&CftWJbpc@`&(k2ymne(; z{{Wu-=h@BO=OLtllJ=q{58BtTKmx-nS0GQEK0C~MUoLDkWIhv?ps^mw-YE$Vq&h>z zdWgM$__6;KCAc{yyoelUytg*sdy=4mQE&m(8u3g5&*C~aF^$JEGEWU~aEtcMVlyUB zVklB~OT`?o^~KM?Gl9p~k!LjPT{HWZ!5CRF{LJ>z1(hOf{8;fQnh`O}oN6R-R0pYO zlPv8KD-Zr|vfV(6H6pSWlYJLpZn7KkjZf130Eaum5+43j)4D%7HLhHZ*R3IwP%FekXKFU6Oj?RF! z8~+te{fiIss=z9#fI`<1$gkY^^`&z)b=N2RsHqG~{kd=3?M`W4=J*Uh3*F`}-PWI@ zxBqjNglN1Ywg6S#q*`! z`@pRN;vyO^nMVpIIg_a9L?^(labhv+{^#33i~|A-_`Ls91JLFP1V-b`b&k!B6YR zGA;0leX75%g8KkpM~i_moJdbac^FJlD9WGFL}<*#t$Pd}}Ff8@;l)KaXsG=`aLg`AkTZzJ>)WXZ6xMis|0AegMe z-H3j>3T;a!zsmS(+O9p?!WN`DvBmGWs+YvuFT``^;z??&UMQm)h2EsPFusP_DD62Q z8j*ybR1ybb*zcFvOG!eE;T|qAVv?s?iC0$m1FSoIb<06tr1J;&!f_u7Pxf!+dXH2> zrl*so_o1s#aj20-_rbO>(=gp(eEBuGqJm{*NYk#>OM8EzxJ6NX47skFaw_gan~+{) z>S!walqDtsP5v*Uc@rLBqD8US6OLYGjJJm8cfE@gPez{DQ(NjEZg1(Az0@V#xN{pY$yWX0Pv1gU@< z)gflbUAeS5(1T5U)!!!}k^Y}v0HPZyn;=v?kxC5$blZi|Mm~*C9zFFBpq3)>m1lFL7-2uwTaHdr?Vs2N$S^BZ8v2ltcV}x}V{2r}sq!i(&dFjmqxy&cPB1_es}1qff{R} zR?KcD`0FW&-Jkl3+2%13Ja0-!YvkG~LL7@%{UDE@z~J2`#nuGw`B55n$SC)utU%|p z7}{+A?Sk;kI&rv9pXjgXI&~dADep!_x(Ty83{TO}RN55bvlo_4*MP!&_P0$7?~M9*p+eIJ^Jndcy*9|f<-9}9>0f5)^Nq4ms}+XR zmd#8Dm$bK&qddP}HO}Dg#@V#x8kP_n(KX`mJY@)G9VvQ;FM6^hA_9pQ(&xZO$-I$Z z^pvg*KUUO;5RhbU{iR!UCqch+MBTL?;i+YQkbgKamR}_P=CDs|%ssoypq%g^KLNu@ z!{-I@ZC~`a0(@fK5pyZaoMD-cnw#sjX!|&yMArWf(@32X2Cy--QJObmc`*uBx$~;o z%0{g-Dw0cgYv7-r!}RnS5bYY@eyTnHD%M3u&P2X^UO$QRNRY4b3Z2ZuNXa-jjyIs+ z$*J-6D%xA^DtJ5gyrao{32v`B3Vf1m$QvURcvu&v(8GkjVR0%PG(KkH6<0AlHT*Pz zac7lV9QWWEguk08R(5Hgs!LcdT&l;lWP01{_o(O5?=6Nuxcs2x05YyRVFYr_Oo&^3 zQkRZqCqVFc2+hqr~f!_ zc5nBUT_|KyE3i!9)nJSL{%H#$9v`BdUiuRRjJA3LSBzG5Rbb<%5XuGVW7GYfAhHto zu^}1z1&fLLiOemnKL6W2)41o|(QF`cNw_t~Xcd*PUK1V@>7$5=eTPI@DQ*)qVd2R_ z-8nepd}2dDprP>ny1aN?b?9;rmGOHB({H9WpFgK9ctDp{zrN3qQWEO$^)GbQi8=#*=Wr3{9d_?jfLMDq|Fn~@*VKSM zaT!Hd(vFK}0)e$!jgAB#2rPQ%AxUQ6oqWobhZPpt*mQ=aV_eks{8-g_BG~_{(7?DAgLb{>PzJoBvAq2plAAchb1EppLoV z)iRq3m61N9>~NCxU}z7BcJmn0_7naKLN=mv>(>(5MC0f3F}S>Ed$DbGA0nXJ63>@& z=$vBgfo!YW5Pq)X5>P_9zk!F*4wHG^Q)q`xdGXwHuyG*mh-ijVR8 ziuZ4Zj@E~eoG4VOoA`)@k{*1Jor)0-f&yQFk>0WkpsuUd9T+jHrJ7m$Y7U*%Df?a zM@jl+yinoYb~wlq=$ykcKPRGO_+-wBgrz*B;*hHuBpzKOg2 zamMR?MUE6Sy+PK+;n%gBuzy|~w{;I>+T&4PsWoA~U5Yn1aAp&;N$CJs0Kx#);a5@i z{O+7G-9Dtx6F$pdu7&%OjekrySJ(4;iZF|$ zX^qJ-ZJ%6J@Mtlj3^<Zry)*q5-ma0?XY;!wNMraWJkap0RqDyzJ$>(P zLSt3q=GRI2+^2r=5Ww<4)l8D~?nHyEkh_`w9OeAY$s|3h44Xz?EndfhsA8FK47uME zbXF`5DIImY;+kMynnWca{5qJ~pASGx@;vHfFj{>|zlRS>P>ph7vWuDB^*6l0;a1Ka z>et=H5W2fEpJM;&_w%Ljjy+=Z>Z=I|8}oEU4cd(gHFo$rO^+gb94jmFOEOpcId|?$ zusg5P5Mgm&ZN`Pi>Kq>^(rL5b#78nntLr^xp?+8AL^vQd#;ri8SVPJAY4IqbE0~kb zc80y$1HC5|Lvk0i^NGM8{m($F)E|O&!+gC#*$~1@<0!_MdTED=ts=U;scZkzO@Jai zPy!RTG3b2L7f){*1OK; zG0k}T-|YCz?HW8`GLxtZ2gTCAc>q30Y@SS5&5x1xv4z3I_`=(D(XeMfeQMt2h!_48 z`~~f)RK5|GMYIf54tJt--s7p=N(V9vILs8Ku^=3|$FmD^-G`JXE5_kRW2Ux6CKY9R z9r8$vJ@_Yq(Ao3BO1^e(Et7V%VO@q z+u1zH7YF!px~9HOgI0@*8Kv{lD!GaXjP0)s6ip)8c}{eq$Kc`y+BRNZucZlKiPkMiMxBBlL_De>U3-T7;}}cNcer< z!goS5H>o)`UBBe$4j#~Y0FD*YcJb$Fm|OYtI8d|oq`nHWxR^@EscDAwS4NsrdPBGs z8tXw6kw`kzq^U%i@BA{xy9bm+ZPawbf%clB2+whD z+kp8k89zF)SbR*YN;eR;WXn1qbQwlgbt?H})?ZoXU%E#bhB!wE&ZbccNSKSf{*6T> zUhF5`>ouu+<{<-BNw8tO>U8?j^C3e(LdhEvf3L@8H0Cu*PcR|HNyD2p(>IC zBAP-=yll+q+5VctbWGUZ*D^uyNb%D;>z3z#cl9chK;cd6kNJWa1YBLGpNwUaZ@zx$CW~Wqq8}aD* zy^FBeCm#i>l^BnX86ZhC9=5)piqgg6fRr z98MkB;SBYXraHU8w%EvyD53iuW6mO+qo0lIvv>qQVNzP^w78-KuTzNfn2Va0u5S#m zbv@5QuxPZ}==hLNJ9Fg;F_u=C>6ePA5+Bi5-K13>-t|npy7F81e!h~QlXT3z{ zn>fcMFsf}%D((<@jBOsInzU8_fDE3>C~IPDlbOp&Za(%xoeir&(c0L;gyL?_t_k5# zjvd{CquW6CL6SQ&pXuho&1=&|5q2g-JC35+gGWm1pZ20(vrLjSnPUAR?d~@neg7Rh z(r8693IU)CzeK8 zWGTXes>4Z@&jln%6~6W!q(>~r`tgZ$&~ZPaYBQJj>$C5WkGZpz9c%t;9)v@|k^78y zH>zN!?UxoOovZrthbGuDWfwmBBA1J?PMHXZB(%D^RF4rXVuqs3SHnvmV*86stK&$y z(kApCLk7F4qYE;#;ZL-WbvjLmgSkQ^@1qzlCuBrYoMW6(1L;))66pV^tZEYJgvI30 zb;!pPvA2u?;}eAd{gLG3k`rMYPu}X5 zE_7xs4I96uaK8(VtqNm2H-+-N6(nW!|F^-Y_!Um$(ogTgv9G!LVfL1Or-AjwK>{BY z6H9)fmOsHUOf8M{ab^pGhPZPO5*2NQS7Nf=S6d5;7+XC2bdsi_y-D>C5*!_i+w0E4 zzbv0_)Is9o#$3SBsyO_3YG1Hv*mmq#W;BOxzb?Od&-F?kGDoIr3Lo_lmD(*ocZ6IU zDo;dOKJq|E_x!lS+yiv`0ZN)rTBE!?;*pf6=}NFfs9LbL^`A#2OGbea^r?z{yS$FG zy!-PY3#^Ri>*z%hx(UbCTN2Q>tCVd{tf0&jf!D-q+P<9Byn^{f?ny#=*_FX0Zp~3@ zpTmELDN-5fu0xl<Kl1q}_l$;NS73g_)#LCGoE))7za;9=T8}SQ*nu4rR3xvMy;p{xgd< z!}G#+?Z*&L+wc0o4&2_-+Iy=3;u}B3Y@b|IEuQtrl8$2P0}T%1#yYiq68fF)O1fcQ zro-;COAG)*lL5@dbYuCgwQFWC6tVQQZ|NXPIxTeKWCFUS0XK>thbmG#7sb72pq1qND$Y zGyaKQT|p_(+N>&;_20XCBMp-_ELkcV@h>K*A>!@2ouZ{1QE90o&I@6(DtR_qf`>uv zmQZOps}(}19tW~Ai~nUaA_|QB$A+dle#)bCGb)oc!ijo${;GT3gr@R%?iQI4LQpI; z+I`6Oxph~%WTf(4m=a>zMAI>amW%4A zo(yUgTLx3Upu#(ka-z2Vt~)Ys^f-P} z4Rz0JlHPZ3DcRRI@D*XwdF{uWBhUAT=Ngbs@w1fZ)w~+K8_7=b94b%Hk0!m#%#=%O z1x0w?Q&NqB(1!J_sTtWISvNTi3u0Zha%r#^F^GIY3%k1)WTTcn5eD$CuhET_W zaDCmEVY+V8VWO{}dZb&8DCa}BXy?OhSpM?x`!!JuRo&1%O2?ST!>Q9O$3YT+5ae8#;3Gcdnto+2 z&>QdDZ>Fbl|0XPczjbERUs*q~BhkpXNIpKtqh^Be?lgj^`KQ;^J?<*2+sYAmY0&!h z=KGdAR0Zfd)Au$!B}fO0N&rqu?rc)2WG7Mhf9EabUtzW9|GCHAzJD3H6wFJ$lYibc z)<$-l2Yyh(?)a7_P2|oJ6p?wA;E`zPgDn9Rr%8|XoLwMpImk1* zM1}e7tS0abZ6u~ed3if_}xsg91@VuO)G2FRw2b@;<3!oP62 zQ74iKr5@kcaoKtN|Cj)3D0*R!t`qvRzRr^)7padWz4Gv0yw?$(=va{EX$f5_7bCus zs9GDeA$U!fbP?>gKAa) z4IU?QO6tRv9vazjXjsqViMyv*j8SYCPdeNrLt`=(jbbHF3yi0%{gpl0;lxC2DSBZa zT$2)bS-8Xt_c`eO=I<$?Oyovf^e)Bku}Zwx#TM-@${KHX7l^Zce$DTJtLz(Wf|mA` z_f3C-j$K_)Oz~+npIE&~J;%0R;Tp?r_M5w|09+@*Hjy}?4nNEeRyHPfoJ->8qwdCU zYRdA-Cfvn+NS#E7i)s_r=Uf3yx7^LFq8`Ed0=*@F=O0lk#{BVFUuRC*Pfo35R$`-HQ$A;S~1{0Y)7qe zZy7Hkv?Yr15%HnFUiZ%Gd9paXq)AlH`KQ9E#CL?=rdY3k8rR4+3myM01k(j<=+=J3}bvNm$*O*^QTtL z!8)gj%wUM*tX>iPa=R$LkO?a`c|OYXPP zWmhhcx5mE4-NbhkOvRoq+ueMdkfXed_8iiCi-N;t25K{&*e$Be3&u~IIaUSE=8sI1 zzY}}^JKg#$W0Z5?=F7Dhr9=ArbK6+Io}|8!M> zj8*&ac8lnz*2_~2<{(yW4P4<#HRAtHq+F+mbz-rSw?LW<#YX<-Xx5it9%~m&F=KOA zCmhJ-5F~g;GT$%y9RHZyvYNu?7GCry5(Q?|OHfkq1EklO*mMQ`VE*6a;{SaSI6>4t ze?048(H?+oz2tlT4A?#DJ2+NY5BjzSBU&xI&pe#ky4Gjrcogac8af0=eQ~_pfPc8Q zw)9kLne*-0>%vZu`ik|CH~GL>d>{|7Y-g#>f44`sdO1l9%liK}%RE6ADeJMNud5^B z=)djugczhWvIVcbYLJlmHp!~}p^UAZAmHNbAV+2TL#}F*xVu6fI+^qHpBV=NSPAV$ zC^^cYdWJ4WN_Hl!R#jXb#a%tWz`^n#0Xif?OvbVPOMv7~?>voQ}K~eh*i)1Eey3M-m%xjD6APu79z{J%s7@ z4;#Xk;>p#E4rWao4VZFBY@|}Y%0oY-q&SOboYJ6eJabvYusyw0M~Q(K2jgi~1Fc1d zK>E~^`$h%dd1%Pb3f-oypJPp*7niq0%FhTgs;?AtXsJHT?S6@x`Z$7?!QvHAA~3n@ z_^TN8M|#!L2OAS<2)yws^D+UXZ~xCsxxn0?Ei^gmVS_8hNOigOWdgxaboe~^Fa3LS zL{UnRIHN0lpc`1w!>e*- zlvt}XSh;e9qxKI%wl8!~NSN-;;#Hy!Rh0Apq7TGSI;zZ__pBT9oY#N}MDGxNe3>ZK z8+t@+XrQh0(s+S5H8)Z@zZi`}JU)x`g`MBQeo7J^k+q36itE-!jDt1FYnW^UuCt4+ zZ5bSDyryApM3}A$ev%*@1uKLwf0-~Uj7+Ir1GYj(7YG9j>zq4K5ef9VoKopv_IW&c zlImfdovDym`hTbWe4IPm$eQF+Ogjx0?Oh#!`{oCA|ZxF6};3l@Y8`~<6pL)oIY?)FSn~JTNFXhiM1o}w>lYYd1bZio^cqGMl49%U^Wmw4kwMqy6T&iLa3rY=qb@(0Gzu|D!Ja;k^n4wI zw58ZH*09V$r%`_?-BE(=mG_!UF8{@G@S_45>W;k(rhi>$PQLx2(!ojM zz);&`ZCLnl$84Or7a^!pEENG`DLD zNN@7QSDoB=$6(MkyI-9RdW0_#rreAo3p=}c8OJ~RiqIH6O3Tap&0jJT)Cxx;CHuHbq zYCzDEDC*e}lU>{WYLv-wKUJClL6-D47!hL9jcKd}sY)e#Mxikf+5YbQP|MI|(cKJI zsrkWpmDqT>)an8`+-HAR z2%9QJ?h2DC4n4kKf3WNoHxf}35CSS2vt<*fr~CKTTpDiX@L?1sIu39SpeE5o0!;ra zjd#x5IF*27Mh^DgN*t8$v>sN%Y%}QV;;|=(qeRJY;y{%rQU7|Jc8%h_Uil$GYgp40 zP*R}U=5jN&c@g&y$I>lmHRu?0n%XXGvRirB3!wK4jg7a!OR&(7E#aw^ya2{Ie<5>8 z{zRjAeq0Jm!DA?4OR?ustLDkj_Lk@+O$9ZkvHL4pE@|%XG&n?;u*o#`RHI5l;r^y2 z^7YL}%3@Fv6aM+psn6EB3VbR}-@mSSU#4%*jycL^r{Lmd8#sILz4Il=U)D0c{W0Ck z2*Q;#JiVWI06P?(3g2VL_LfguXu`^yXso;^n1CQq%B-uRmrv_iw_;70%r`oV(m_bw z`MzGkE7Les-oofp4Hc`>s5>0K4t1%#-Xk{J^~1!MN&_4}pauF4=`)V0z*Zo?3Y#PR zw@E{JTq6yC#1Bs;kT3)e64){$4lXsdZNMMm@59XRoi|h0BsZ8-i(~C^S*?SMArjnz!udbzvyp2&Hd@*p?QAY{G#le<9Uq@Q6PVUyuB{ zjU1p9`@&OGeJ_nOD=IrvE8eP{7pc6?+hAejy=C!2FfsONGijiJgHb_B)t`P0Nb%Xv zR7WMO2l0SBDXtFy7enn4k0xxuth88@nSwW}knNlgn_J%e&1w~)(#{HZ9=OssJ zgA+*)_?Kt|9uZN(XQ1$KK1zzU6ii59d=We)x@^FyFrjZUDA}}Bs7&~3`Q@&b7xE_b zfnuy~>BtS6O_!J;%sCbz_&iDUY`P*GXUnRvuG2Fk`p;e#I2P5A zC8hizz}edPlF=jLfT9JKRQ|eFmM{|JQf0x#xZ}P`qy#=OH$#ABX6d zqiVzL0ka`Tbp9WxSqo{dy_r@^hkC<(boVmQg2dP|Z2o%5@4Gg7#o(5kCnN+~V4G2C^&1m;)#Qcwgc2@U!R zP&5E+_AV8M5BS2;!xscp=>wGj!z)DfDluum(GYL6jNd<}q)wEww7J`%A|NT6q7M{u z|Ewra{oYwW^OsT4pi2*~wbf25KdDyHTJa=W!Wn+M@%Ll@Mje!~2s^5O%LkvtlH$2Ax@p;Q0~OB`N+c350*l>0P-67L zj_*4}d(;1nwHN3TJF|SU15dMW_MSs9U;)-3q)VO=4Ai1b8G6JsZJKIT`Fe`DyRAOB zrDF0p_*d}PV@LM~R;IqyJ~KG-{GIR$#(v#D^d`_}1mgn^I^2w#il@VfW1_*c z|FOgVulC9v*i2+}AuBv?_GtzRQ+=zUHu>a*9Lf`V04@q*3~N(mOIwPTRk`K5YISsc zE@zlbH!aQ9Xz(%#E&8rEle<>WOoGctpIcg{dMi!a)L6Nb{;1Tg{uTCd5opSut%K@J z8s=a*9RHR9w!A}b=Wjl`oL+?kh#Gz@t-}3w|8r@~$ zD7st}n(o5GuH)dJr*oUl_H&7SYH+9W`Ob<~wQrx-4BZ1<&6YcA*#okAuw|znu6$S9 zfFRK(cz}msJVl3l+>XvZq1X6$O^yzaIT5Z*R0d0TX$cKd<&H-z+5g$+#O8KZl#1=) zc^F;=bOUcXquMi0218E1`BJaR4&+!8p_28hWVFm}WS!*OL4^|8{OL`_>L-p9J64-X zDN>u$%caPynz~2d6EF=s8;RuN$JD9g8wqYc)qlUK_^R+qKvNFDIf3+s0vIiwKZS<-t3(%o z(+BQUd7KBSk_%`!YA@{I(ma<}@4h6+NsY=#1$v^qy9&D`7_Klg^|E72vG8wnc0`cs z8*Nd71_3A=V%_bn=q zez~mGkj)NLf)$F97S~W~SM%#ksp_X0+IG2IZJ#k=ev>->_0M()ZGT1vsW!_w2!0WJ zcGI*oe5pB3U|hQPNpEw5Uqsj)q_%yx-^^l-40HV2Ygaf}l3+V%3?)rf4W`zsdl!g{ z+Z5NxmixkJ>w!Klk<5;dfzV4LgYZm?0wJ{LgH(Iz353vkPgk_N8sL~5NI=<-c&pRM zOuIACH@X}~JGF!0O!N9$^@-RB7{DL(zs>%w&N$hiC{0CC=qS-z*XPDU1K>f36k4ie z_F#llVFOti@Mr@jl+i5HC0+QJ?F=L1%R0@aZis)OOh%a_T4vM{e@pwV(4{Wn*P1(_wwwCgI9gw`E%v#CoEw!VLf=G z1acd&xg*E<%&8xM3nOQkg@py6H(&D+%R1zn+_&i4K*bJbg7-X{bvLas7xWMO@uuET zuUv~L;^M)A*^N_T^#|sq22gwxchg6X_usS~T9T)MKC0#NLJ@_T4rmK#8W~nU z9-cvIJU}bgb80zD+ni0?<7~A%&mtvVuW%(?7}%&_kmV)NnM(^A`jzW8c0LJnu7EY^ z`GkUF)W-Cg?*ZBq@p$dE#T**hnQ7r2&fXWf3_eXql)T$;8t%FRK4h#EV6Vfq60N>B zNrA01*p_$X(4+iQrp?)T2iv?U(~htGW&bc83NLmU>dTu5BzAF?S)hV!7+ITVyI39? zw&E|}lqKeF9v79xJ#OLAg0W*e3=oB%8V=xCBgX+}03I4)KR(#BC%cbzm|X$Xt>-Ie-_3AGjg_lt!_kdFtG!mZ3kABX#|AsE(bPu9V}E~ z^Hz-br4{N))Sh=uc9$>t3#l*GD;ZQ*h%Q%A3mEkbN8n1dt4L1jNui-U z@vT*(xqB5d7_Mk%`P0-<*H8OnMh5${+*2ljsZSpA9Pa*LD-J)6TBBcNf4G(DV9x4l z_-mg!j=_lzZ>EfNgrxx)J>arm`!1D5D%hpOJ6~m}CYe@FF#Yw;MU`9A-W$KhKicfC zj7y%UZ;;97t8HTB(aOgrUA;6CJ}%pDLQSCnjD9?I?*M=0F{=h(?)C zBBo^^=T-}rF$itt10FT#D0u{-i31E5C%%zFsm46G9LL5|pJWWg4`x7zdr>{Ma&+xS za9ZLZIzNmY2!Me7hzpK}g4wu(4@)Wq{#~j-A$D&BJ}1^BVPGTfU{iP4 zTXxjIn0x%nOXeQ_k+#Q-yqW|)$)C{;=Yv!WNBUt5amkNAs6DBe{%F|@+VxZpJCmhq z$My|f980{FuJ843IQcPj;lVBuu&SbT{r&lhq!)sChVjKq6!r%wW_V%!+7&En(gsPYuQdsov$Ti42UEPS!VJg)ig&Pjll5Steh z504qQf1T5qcMedReiMtZj0$i`s^w@>gE~0vv+Iawjc6Pv;qP?N;f?pO4RXdnCbapF z-}*F@#CUPBh?z9Xw-ie`T3+c!yz00c@(AJB+ZZ0JN?A^Nf>fpCj$bzBTlaR*o9Z>j zZ(;}i0zcI{bdrGzgIt@neefF+@{cfam;>p+_6L<6P+>I!xa>^SAbEHoP|+&LsjJ?E z0$wf0_aLQ=mnzAnB{o_;eU;k#i4^tq^nn&^z#zU>G7x+nC~t(bl;i3;WlM#+yjgse z8=eY)UDm>zDqexncN^E{ek-BPH`b&nI>1~jTFyG&uFn0&?+2arsAZ~@U*oflLq_e% zj4p1}WB~O9HZV;0M*e?(xD#;vGT}&=u2R4wfw;g-I*mfwdZ7APxYZIcGXM=Y zvR&cFXSws%vL0QBZ!S*`fQ`3@Y2NCJnX)Yjq6p#qZ6Ww;qX_}{wuT!t+!J^-%0RW+wHj#kx(#k6w)~iTRQ0}t+12+oL)XjAd3{!r)tj>9 zIBs_Vhm-FbJgQNL`;}Vi2^<4$qKk6x7RuSPaHHqq9EA!An^tq^Uhd|JD?#9bp8Pz7 zY!8B^@PN-b_5qOBsE0R6vBvVVT`>9@`ihN28dS4@FKu3KPq2-ml?NQw z&qJe{A93v3;AK-F0IeT1CCf=HFE`~; z?gyeyk>wNk2p8jRr4R1i{BNqy6{~22g3t@M3J>)L6lKie+v_ORzfJ9H;%fX6_9C&v z@r~j}7T6NcUI^*@6ydR4;V@nQdc3w4?$2E@9A-_qx;{46PEQa{wl+-$$oh0@$?>fm zifbM}u@S?OMdK9%-EnY`s_*G@L&{jGjK_oH99?=$34AD4nY1^jd3Am^?u7*D*AKix zYLwkK9`^E|?xY7?D>NSQ)YrUO2_4M=B-u%Dh>ew!Bb9Y##ZzB_vyS?)qJ`)E#sUli z*BcgrTC7xD^%MVIbEn@@6gQcoX)#IrrCOOM0-a#cr@n0bGp0vP@pyi12bTnuIHF(#4&U{!ol51{6z7-{Mk+mNmq}8NfUZ~R*HAfWYV6}* zb}*YjT>NgJ-dj`5%1J5XgFD>+9N#aEQwd_|YD8mKBHY?Cg`q*JkO6n{hXk6#s9c?r&&sOsoBTr zW%n2^hCUE4X0i$H87BCNjP?s3v58A2Y`}}}kM_y4t0#{wrXM$OPf=LboDLtR7*JE} z#;QCQ5JMX>^oZb#_D+Y|MVqr6P>34^CFNpSD;lp-ExL1*tX9=Re!MNI3d3>U6ACJ0V13nZlkiB0q*dd2KUNg2rp4qOAE&0fn4fpkgFj70V2flH{VpJsaF4;H6uF zp+L^Pn6f93J8`isK+)D}hS;{3{k3yM^$$hZkS#jP9(2~1K5XdC;eeYjQ9JCg8d5~5 ziqvJr+xFI;Pji-HR#OR=o^Oae^)gixU4 z5tuaY0Y$}8zzkB2tC@#R%6()=vpwuox=fR5$o@Nj*78oPh0Rp}Id2__k7y9gl;*{Sv9GMA-Q%S`MkJr_{h(m_kx9$4Q%<`%=(K!3!+yM9=@1ELctLScq;l#M&r@P(m=FtB7 zv7rRlw_fQ@$aeF!j}@5C(g|8Pt+}K$TL`H7R8JGpe`W%Hw^n}K_(I*=CAq)pYgrP)_R&KUa+r<*`n}E^(9P_OSYrODbl}R^DU;cPg zCaKM;NkFw>X#Du7FYkJZMJRk`>4$z(BQ4%|;Zn73WrP*TV1GKhRS6o(hkk`$6IkX_^SAEankg{^l9#TO0(}x=-96Uz};@g z=`oMbb%$j3eKNVQ-&-5{T`J~n;lf=EQbGdw0IgfLOJ~}28IWRkS!E14<|WY%MOh-a zTr>W>IJ9X?~ftg8~atQ)ratUeHL)x&3$_J1yh4e`V0z(mqW`e6E-ou&BX zGRvEON?+Wt1M$%=RO;~g`TC_FUPv$R2%!aLVbGwbXszK1`XW(<+*7 zLxcYACc)DMv_6r-Mmo<{P|&;pBJaG+zGEP0huLX6jKwOfm4)lZyR5~m@QZ7c2Uwy| zr9X@6x}!%Sx4Lf!P^ns?j9Ke1i2dW?k`1_Ysj?^X#ekWl|-3iNJXik4a3|6n1 zVv)^=`D@V`MP7_n@9q71of7??_+Kl8-l!}ZTt!!ReZ!jg z=8_cvJXS?bjsXzc5K%kXI53iDpPyPFkCO3=SB^ba8cY zR8PgB!ttuIiY=*%y5Af9D<^~S??H*K&LGlBMH@x~>6^_-v)#|$AI)IKsN1+E~9 zF~~}Odne^5Xea9@xN>Zoabr87lu7Fl7TwtTQCY(?r=19^9?#}}P}(Etw6*3S?Ehix zt;3@F+O}b06p#*S7&@hqmH}yyln^APB!odhLb^K?q(e#+kk*kLT0%-%YCuXFq?zGe z{_gvE@8>zb=R3adzgc_FTwSQjq z0QAi9os=k@Ya7jHix98r0ix9G@y}S`uhwlD<8g493HH;TFdNxCu*3%+EK{u+nF;0= zw2~;~h9X5271FWJ-wnK(yhQ+!W@!tszUDC$SFK_3SSMaHqn?xV|Mjd)x+hGLZOlVJ zMNu6Ikg$`lK!JikM1h(U&M2s>9Rvb(^(RpSMXq7q^Te*$7pidbKF(L zTs7sr75*LI`li7hYGghD{6y#lBW1wcc}aYzg2VMVsxCt4=Hd`lw+~Mgnk9xzrr%RUHcnf+yISj4=UuM;5$^f@T#2o?d)lqs6D zwekI)uwh+R;m|y}R3xI&f22o%BLPyND`L!<1o(usr(ymLNyM=FFmd{}Wc6;9Y4k}ai5lz~jy7)I-7k+~jD`_WkkoIpNbU%$&wIQB}jP1ED*yE|~% zTQ>5M3PCu=cn)aC`$=RY0MQfpEf-f~+Eo|iYaZ>}}K8?+Qq#K!8Q@KS5rwz|5n+%@-5W3F4u|VZ!Eu zCCilfMC|ZavK3o}i8I!*!usE$k(EbnsQZDm1iE!9kYZ-4TBrbb`npf#6Xa z8%aop=zz20vsCMV<;^8FVL^t_iC~3z;r!~AVfdsF4372RC!_TYPC!wx$y^aX*K0nx z8G;(imO?99haq#1B+VrvhdA*wC=o9(k#W*kGQr53Ax%vQaps{4;yC@JPrOgB5^as} zd5)@n%feA*`o;m)s{Z0CPUBKeS)NP%3&iOL^<5UcAAYT-PVN0hgZFKCG~$+>+OPU- z-n-IAGp(F$Ev^x#>(h@2K?H%vnX{m9@teV9ACla-4o2Ppeo4sTPnH>ch|3(QWiJdm z+P>#{u!8%tp1=^AK@QcXP)m4Is)6?@9!2p;E;3L;{i+? z!7*LCNTP`O#eOLbJ{5o)ydBE#G(ehtz<2fQM<8GGXiNSt;`9~b?O!(SfA|UJq^}tS z(|){@g7o84?LvPrypG7}FBpF4{1z=0hpMH?tlRk78?xn2KqVhMHy;EM)rCwCD?2}` z46?c;aYsZ$-cdmAfZ;sSEBSs01hQJ5J~?N6D)~Kg8^pL?$+j!F=I}7*T%M9g&ZiyD zYRLhqASvUpt*Hn@J<&-2JWs*nkJS$MD78kl?zxV4M%gyx+aQS2-(o8){jDxf@0`&< zhCm8>^ekfiUiTRty@==UyZxp_>6UVv+yo(CNb_g~-}}`Dw7Hd47 z0VB!bqhD2B@f^RhQvu;V_RdZu7qBoM=w&b4TPIi-T!Zaiq}H2cmo2u)0bXOG^oPtp zFyW{%E^w8yt>L)wgML^)t>8^PCkh+`5OfWmk7tJo@q{#Z|!4 zDNmP)6NKO`z%!MdN_kn~L9&0F|Vi3cDU1;@r|Ay9s`wD~;>vj!$q^b2M%FdOtO0sS=eHzd2E zGtm+uPlut5_|LZbj4Xf>jdDqqDZOeGHP=ZfipOn7qi@pJm1hG1W?4~thF?bUG;pcC z&wVV6xRfB@clGDwq8@-j;%orvWSX?Pqe%h}i!|5|i~io6$E|B2PlmnxvVmk>+!E{lyT3SE7cahKJNgT>R zHC{?!WxOTzATS9vW}z<={%`1I9)cA~_)jQyBcfShnDWAr{)P_!~Xw1^efMC@&r zptyvQ=#ir1gs773Kbd~B$-bm-JM0C%UyN%-@5XLR87nsUqR;tcXkvPRFfV z)~F4lD!Qmk@lWEBY;W_E6=e-RT-KP~4b=!0yHmhp!=!PGZ;;&?80cqoh zV&_aar&Dvc_E?q=yCAjng1U3nafMkEhg3g1hZf<|0^fDXMp()0(T${-NW!gLo#wHA z(GYkf`cPaDwu<@vTBDu2Lh^mGGcnCs?%|6g%B>Lf*cKm5N0Lvc3gj)|w*J;a|4HU-{k7L^JlEP(tDCyH&lA;~a;2-p{_R zODo8>c`}JhL_==Or-+OM2$I;u>^nfxoKcB`9g)b+1ns8!TOmY{UH22_n2VvYsizl1 zxeHh1dO@8LVcA?MrqHZ?aDK)Gv7N$}bc!!vnJ*CZ{(F+jv8=zkWh_fl|G@BdhNQW2 zs6GJ>HL&PAWn&K1V4h7=?U?`fcK=UCFJTU-$8OdSW}X+-{I|Ae4!vhVMHqFT^Uwg| zVF(DO=wF2t5Le(B+i?4^8SHw0-|?Gua`fl#y@vvoOywz^{n(N@Vbideg|Q&_j~uRw zpSLfD$-f{w1rhNGDZ_&a0>Dg?F z5xyPE(hk3ksIWbWFJK&gjlRy@p!5OW6r~x5!N(_k$?OAI@3++K6g^ShdTb4Sn9(w7 zk15E=W&4XPNo4eoagSN?uaW)vXkL&@mx}lxo}gYS1vTPx5LNn0xa&qak(j?V<8$&Q zA&@H8u#u{@HjdeWzef-o+Wj==u$;x?_a|I zlFMbcu%7{z#EG(`w2l}uerA|dC9bu}2n!-u z_Tei}Xo+xeydW|=y|jW}wb5GMV-Mxz-``f-+w0t|il)(5PE4d3ExuEUSY?9t21>P!A*6)M}J6&3LfWSX!!PF(NE(cd}9FCA1*0(Q@=mG%}fdfTSH zan$~rtWOb8K5*-`2OHcHWHD@bMM66W-IK_;33>j}c$otNxj8cZPlk4vWPLWbySpO^ z8^4Zei_`L1SQzyjybtPO5DX^InBsWGDwF;{+0Wl}p<>4QZj$Eo)f0|3$T z3O7CxCo*J4@5*yG2ibY|3*!*y6CYeK{Y2qHn~jH&7Go+sGGi9NiEYJ&xdm|6tay9; zxL<2FF2u+bjq!nyW+W~zRJ{=Gnes{U>i#cYH{RYK#%CG{$fe_@he6#8g3e}RWDql8 zI+L2o7nuOB+8L*RZJ@Bee#$xh6|;mFJXB}2rE|d&hbrX4Cgue5zWmT|({KHSgoj!L zisFInZk2MV#uNjHq;U_vj1e3nW zFZP!-s%)`A7*afV$zmw|%HyC6F9-aWS6_e$N0Q1#&bmls96o#6BtFN07E$`$cjt^a zxFFmR1@FEauw#h3ZNThP@lD1({fdWT0g6NFxY$12GZrP=(!~M~3dkaV59`jrEea35jeSDOUt14A@wo}t zL7EAjp*@KiWGDy~cgZhMTmK_1coci-JOt2NMn>1UblthvF>Q3L=j6&C`c@1-8f^Jp zM+FTq2pXJ>hXu=ElMIfhj9IgR_#V?`$M+i$vj4L!Y!2E>_&BO~v5Y)PWMX>xsva~E zSpOvs_(CeW zY0A@re7OvQG&e$nM*OTP{4a>wQiA`Shap@b`6YfQm;9KU8QR5MNem26O-vRh=V52E z3-Fq;te32M+QGPW&US_w(ZPR09NCl~cs;?{hEm2=i5&kTeMu`wuee-A3}|_6VRKKZ zF-;wuv2{{+yddcb0^#!QQxibfC}zmHxfa+U%$Z1B?-{qNG#vn!p|(H5X&b09`xO5lRhg0N9g zCn)H);g`|Oy913;C{dZQ4JZzjtXf|9vVb2$5CVrVTPDtD^`0>62;Q8_#JW^-T=M*) zjhw;VSowYeudF(pFqsT@YKuUwcxgTRZ7KbDuaJjgwRLtDKJ$bB*83J;xJ68G-7!}V zYB@gNz%WCyQ-(w7>eV%pK~VJhcxW&-FczYMF*|x-EKbFCY&U_ifOc_Ca2=i*5zW`s z@j+csJL%gxZdt6jHX0>84~{~O32vPcqj{D z#%n{>ipdEoS@VSS(kL|v?dC9?JTas)gPl3RNkBeUc z7*kWn&f6d`kl~U&;(B6GmoSv_kjh!wTbt_;@T~Thr|YcuR+G@yqT#rT|l^1{!wwR$uul~I| z()c%0hGy!7Qs_0d^&eifx4Vr#86 ztzaK19mfh@DeG-gQY@3kAOAzA=5&w@g7vb zcmTn?h!0sCAWej@b(N%x45dRYzlkN{LwqpN1r)Z1+CSk>&36n6#(U!|jqx{Rf z-mTq-vO6Q^A$^8KNp!2O%RhKXksaP`=8>fV>HF&+htFORDdF-JmCdjC z&3+2_VV;y`})TDDL%OXDG;_@xKou9~ozx=>xr$^;u^UA4a_i&%sN` zd@Z<8D$df%qEN~%>mnEn&jE^D$|sXzM`Ao~Dxa&JklGjIX4_tG5XotJGuGeZKmL8; z929eY=<|f26=IqcVZ+d1z}?Xheyq=o zK<*MV0d8mJQKGLMInsU3l`Y5+vd7@)n#7hGU_g>VCO~f=^e(%xMx>pP@mbv zyh-o9DuwYVtgVLVy5oy56x-{F|9+I<>@4E=ZAKVnUB;W>*$_I(?Lw?4UKBQ4#xb}d zZe_hMyDm7Ft(tq2o z{~jgK{@;!gl-(14%!EsdRsQs|pzBM)1ZSD z^a4tF^q~8nWS3fo@4pnI_;TGwhN6esV9H>-V(Ab+x1D2;$^) zHhA6oY84s}(2Y1z|1U;!!)SK!elVK-zl`XgcEM$cC|*;NTNplyRJc1Z(>L?*|Q5`*O6n-ZODv-*8*G&wK+!VzMpX1oBJXQ%g_@Lu~obj6P zA15R;KlU`PstKwnbZT63zkZrERG-#I%CI>Db48rErCn9C4q;iD~!GJZo$-sbnwYh7)uYmD-BnR5{LER!SL_hrd zr?Gy?{Hy+;gUC20PqRn`P7nk%t*P>3f%iP@`)Lc5WqMr%BPc^oD7*{XNz2Cn>EJG?{Uw?}7JTXIM2BMr5>dVS zn2ze-s>?7<+KF7<1+mXENyWy<*xY{lTg8eZ;FZfQaz3Rcz8y@+=SB$Ed=e}$(^Dni zP`9#Jo&NTOkKdw|5$TA=4I&Ee`;1YePUy?kYs`6#+UPauKatE=s#<(vzi`_e^tCpY zJu?ZS`-gubS@AvL^2UK2t)tg7&!^Cl279>Jh;(Ic`(`rrrXU~e9DBw$yP$A1#d+k- zynW>EbJE&2W>jP4LMw=VL~}gu1nY$J8id^^+fV|?fz;72M#I{#vt;*_;YqBk@NZK$ zxU{esK6BOsC19EfsTSO#10l2JG|wJ6qQ+HYW_R(am=Ps+%{6cE{xY1MyR+(GN@LaX z?N}|^aa6#Z`KCow;AW6dC#QLZ$kKmG7XbMF=oDF8lv4kb!s&2$L7*&7uDRrJkOma{ zCbOC6Mu-CH+1;jbC#R+7O-2ut-_A3K%E*ChTr8~XYler|y#Muz`<)iw zYOn7mQ{HQ!mV;w)sS683V{u8O$nJBNK8fsKQgDh^ z(tm4j&%^)1)Oc;I_DQBFbwu!Jw5p=AcD%y~$MG-uQ|ZgOI`6%OmdW1}UI(Smgf@Iylz@8)}5lYRC zLzZWM?HOPe*m6sozu3QBINVz0R+wb%6K4}lg_E!*QUP^2%Mh59bGQyaE?Np!Ju*Oj z?954&Uu=uOK zyH-Rw$qSha|FG}@VdHT+{I#&#Y8>XqQUF9;P$+bi2nCtXO9I*cTVV^wocoP?FySp( zA9E(iEr=yCCfx21adxP`)O-dOg9qFX4` zc%$eZxVWA9*qj`QUj4V|WH_sWW}HuwEr+?mcetHD&NxE3!`@%WIy?l(dUA)_?MfN> zfiS-8AWYMz_!>(m?qM@*bg~eS^=EVIZ%Wo_VX@DIlcePP@}nMJ5ywg#>CF_&N?o5G z`^!;z(WDha$e?V`=a+5wxj9sx)K`R`k&W)0pjT|FzERo4S`i$uj#Z~?U5_mGE`nqf zsmU@D3Wg$3##|qBV+%_MV_0>Akio%MJ1a-ALnXZ7CruuMzPG-HCe~I|G$ZKtvrii6 z;Co{?9)%!VOV{6q=ln7*0ZoeKvSu_Xd86AREE!I|$@vNN{BElukOd@F!Zd{f;t-P- zhfs-5#d4McO)MC1X)I*|u`2i=kx@XK$cJ44{U|j$Z4I$xw}E98s&;1Dt?V7+UZu#f ztD!`R3F>DZI$+U*(Gq=E8FVdYaQ^&QSwqtUQp}!DM+pV%$1t!p%K?rL7M2{~TX(w3 zX^XJk!%;_U)F^AMkRvZd@=9q_{`C!=i7^g_SarmM)!#OFKb_eb2-4&2xiX<*MKn;i z)rTHYeEe)E0|K%MMTIwDYO=&fC!a#9Z6vY^X_L4>AL35*pCg3q*rw-%?D`_H5OD~h zG{jZ9pU<&|!lirg@r4sZzUOGUFhmqqa(Qu67ZEnzmn(7D$HK_$O z35;gNDVBVbT%kiHsigd^s1@Aec+|u3ZIDKMc${s@NW(6qI?yw{KpU@G(V*LFhG$d< zVd&LwvudB~HGS8!FBof*6!+Ub!I0yk*bDSUO>?<(q7~i6LFWAAM)BLSDt5yQZwzZ5 zoK8_K-i8e5N5sYvIU0`Dx-Z^7+osQ9{!#pqL})VnLq~Lf7$sPhGUPP;1I5c3AH|MU z+LhWN=JbvkeFAGVt)XcJ)Ce54feti%Tsup92hE>(mZ0Mtb0Kq|lfzt9kKC3v5)7wP zJtjn)Wq*zS3qEmmVV@%KAkpaLF`K2hb=I$vq}73pI30Z-2%1|7sG%8q%7MhCwNK*( z_*hY6q0R*oB8!Z%M&>Z&Ub59L$LD*`?PQ>i7w3;Lq;jY+QxS7!$Swg3>iICvo`SU& z2vq{){x?Hjfl>2M%dxr0-!^vI($KLx&7K_f_tlMYt06=M>V3uiL>^9=vM0ODA$YxWO{t5JdU3N+ObnQ=gE7=r2&{gIGXA>b-%baA z+ir7$5vAgv%Ow#LF<4aFICM(Cy3Ng?G=q`8XLA?Ef?6KOvTVHTd-VkEZ01P5iV<*zqs}}h*i3~FuA?rm<474@jqw_(3Lge;FegwLJWkDQ&MuW9n&9&6&B zjt)S#!HU-9X*`SSYSw~{l+5h=9iY?_IiUangY&)=m?d%~dIJ9Xgl?o@3&f;=ZSgrH zwFFWQ^T;~qbZlaJs5-*agHrGo&LzZh2Wl)E`=FIxQ+zx`VVC<&edF_h2>j6}F)z0X z>XS$>MNqLU1u6wEJ1fRXE0}@!y-eg*Pf#w+0m?MskAT)vJ$CdlyB0pT85#cPgSiXt z?&`pL;mrHl?fxHypXh`4H(vKp3bMh=AUFcR6vsmOW*H+CE{W~u#uAp(=z2%x2NtJ8i}pEIyF_gYoM5x|LI>~22_0-N0K(@=q#4Ud zIC6`tvQVFlHVm2FbB*(YNRzg^0ntclBe(uAtN8ST77p>X|G1 zqwjoy(Ti4{(E4Tq2eVh@5fK8)txYVfe#;GLSL*04(VFx6coNspgiS8{%U&sSdVaUt z;aJ0aKUZdnQ+}yQ=jQ%PQ^!*ib%c-Dw^G>9RqbZ(Wm3bDAcf#Arz)N!8?d_v&bAfi zU~S^eo66k8(N`a%Th z2JY`1V@B-O6V|#nF^XC)40M~5ezcQ2uh5U1+fJd^MSN6K?(Wws_3(uJaMaTMnZCa< zA#;gVFis(r*nYA=&&MC9`6q`+63;;hF?LN?*&{`ktH|fw)B1OXu_wVhtE4)6qsY0X zZSwEdF#qsW?1<rc+BORidayOR*4zkvY5I%Sht`imbDyA`JzDSdvl z;59Ps``H%AYSufbfZXPwLO&(KS@5S@>?-o`ri_H9q{4nptU_vdzyu4sk#W{38?k9o zRpnYY4*?Bdr*Tf%l$ta*By}xQQj#=>sO+I+s?^Kh0bd4&L`l(k*Lb=JPcb?{BAmxy z>Re-%#PH`R6*^Y)ZY+c#-vTWjvz^I7uuFt$LEuH*Bq}5a3r8Q4CQ&ZFry&Gwuq-E* z<1jCj!9mM&6o?Ol(56nuEe0|BK9xD(B^w{ECHV&-3~){|Tq6?zo;whEM3%jirE0th zSpg-j0ILR*w8IU)4_^lo-oicJmyY!$G!t}S^VEGh`*&` zpkqT?N{Bkl>rux?4HDM%M5Uj{xJjkD-Y+2QRY!=w+bz`}#Pu>@0ozuj4T=hB zU$*Dc_$3`TFg?~7XxqB(q!grZE5pUn{_D{YetM>G&`}tE zjEy)e@LtM_r=I`Gbiv1wq7va*-=b9+$g2~RR6F(DsR?Mjidh+yJG#?dA6W%#+T_8{ zm$K!z;}ohFnpH|oiXWxMtgk7Ns;m%N5wwyBz3awulgDR5*6L9qEs~zi#1e z3v~HJnM2EN_@mJ>-(DR+X5^20oiwl_E55eJt zQO0I()nqGSTPbtIsglgViyuPJ2g+c*t$1?X(+UFV4`N@e4M!PK;K7$p5AwNn|Z=Y)%uz(4-MDMeMv)_}g#_c2Oi2C%U z-CH-9;{8O1?BgdjF2e_4>)Ql0>?12uM`-ouk{m?|brU|%k5CjH>rrrF+KxzBduQslMcAG8`la>ZxR;XLnZr!<-&z~@1E zQg<*qrtrNj16I$$+VJA&9fN^16(3o!J7F-NuJ`iRF6JYJ?Povk))|cQJrTOzy$hZ# zZu4Y#3_X2$VEMf8C$rpjrQNuG-}Wq5wlRZ18LPGU;v3_pvg!yaO2|CyY>vWWmoc81s9M_c2221jXa_N=x zw&r)!aWBRmp6> zpxDv<{zlOmx0-wMD!7${tW30RaZ>rL(F8oUJysPj6R#HJzB|eKxiZmhh09hFW3>n5 zFRddIJH2lxT)zLh7^RwN*|1_eKu(Yz(3Y0yCM~$Cb}%o>Tt3A(Y@hq>xKj>w_|E-I z?~{;rqGP-G*I9*xC+~LHN?&%|p#1ot$7!8--D6P>Ra>Lw1GUDP=8-w+UA{ocwe}jT zd3cWKL+WUug~m*V9dcHjp%XHD7Ns^TMg2_Y{bsEPyK!>ZP{(>to@W{N&i0jlV87^% zAa=@>Gaus2|K>;DMmX>Z1*60I*r^x(*37pze`wO^ zJ4&S$NXESVA|!W4lg=^o`u^^+XY$z(j7E*J6bf6c1#eV8dM2o0=)&hQ?252wo3+(M zIE0s`Dqg4||G~%YDA6>JY+Of%XSzqT-=m;2=2a(r8Bs0-pSIYCCgnu)TaQ*+*@ zq(!5?`B?@l>i%m1-d}<`crh>zbBc0QFgH%_%HYonlm>lT>(Z-;TdE%HX$KMEaT)&k zv1xv%o1VyWHWY=Pc()+?kh<`9NUHQ?46B%coevTlLnc^hnf4sdB3#x@7(C0qIg=&E|E&uVXcI~+BmB> zDy&a?5S?Tj5a-D7{c-meiGc!t;B0r)kG*xB@d6gM5l=X>by#i8#!g@#6yx$)DZ^Fx9Xx zwBHiHo`4Ko-H7{pm6F+HT2HWSY$ML$FRd~Z7-GU~!=Fh8O}l?L>kHqj+og@hSM_lK z>`FN-|5ZaLpZ)FXbJY&c!hVw(2==mSsiTA&J$3_#DAcRm=5{&R6aBZQw_|O^Ml@b$B@@Mi_mfgx zJETJgvht%Be@EVB$@rCxLU3$D4l4ujURf_8mH=(>lYzrr0wTLvz1fX`h8+N5=KNXlcQO{g zKG>!dw7SMFPjbFyi}0EVxkc)>WUP67IDeiL`i1_{-b+;P!mX}G%vxOKISupK-M3ct zehh}Et{s7tYGxTDi6S8dt>UGr=5Al}888?=I^T8N{c`o-?yAE69L;Pmn?K=%c;78d z&(agNEpFVsmRW&H$V+$#Qh0S}xHb3>?h$|H9mRG2&MzvSD@g zYwx3T=Jj8{_<%F?DIV(y`zsSxS=Jb{A`|*RC9J^WY;4Y|&q2*Z1T+~P;R++Zw+Lt` zz5g_+>sg)Po?!Y1&~y8zCM}BYRitfhG0E~mft_8_r60-8z>mxemj`-$!S9XnkAvy`KoXG`CwgNj1c*sc7`VA`!LCkw)adrf`l zH~IQKQZH8Q(ZRz#!sFI53q%-6WJji(aFFSNyBBoq64M{McrLTQNT-r6*)Cx?h-s(s zcd}K3Q+o_m!iui#%JHe+Q(S#}=OvhZw2~&4w}|2G(M13xdh*TCy%t;KcEq7|3Y-@_ zb^F8&CjD69sq<#q?-o|F1@AYPYQr(|kMD#0B53pK?zDeMc$^qF$(j&n1Ua`q|6%W`&bT|EC4kp@_dE`BU0$qO5`2zsRgRJyfR0* z?!F{s5#pRs{F6bla)#SHu#-VDY;g%NbI?>b6@V??GNy*UkqdT7bZtvIo;RQn0g;gGH91i*GYhlgYS&IJzlM zE79lA@*U9Ogn({jM^uGlRlR;n;CVpIbY^mp|LOUNkUC)U)(yvk)6d@+PTR7UoHGCv z>5=wF^mk?Uz@Nm2=jhvz^o(y0+`qA>aDQM=Oe*G_#l9P)5JbOkToM|XzqDMPNa}WN zmgd;YyIhk-we>RBL$$+RK=+I8kpE}`?or2#uTIQ+;4MY1L#LF24+64i@_)&z=GscY z$E%8eTJI6pmCbeW@n{z>Zxyk{ArnRy%dEX_MLfiLgfQTWXCL#GI@b^xbSL_q7$a=Ct`Pp<2%7&XnXZiCo)=)dI0H*Fb)g?8Y@Uc5&Qt=N@ zPL|y~9R{jwP$?X`7J4%fsWygu?T!W@#iClhPB z<9=!9%o30JTcMn*)(Ef7NXdnT$0_NbG=>YoUm1SuE#9J`5VY&6x%DKMVzVKk$e5vj zeMs@W*sY=LN}Zg>irSHNGL!PNkujPtx1$d2b)65?I#zAo*1V^5Z6Sa3Y&F|%9f3Xk~Juj+{*PwrAZ#wQDy`b{_t7!dxN{O*@r{KC)t zX)jmT!d4cepSj7d97sS(1r^xIluvgFH2gi@pF2J5|9K(uF@9bW5<1Av@apCDQ)*}t zeSTTNAFF+rO)=@fLQN+TFiG5o2DMx4m96=2jyo@EsEa4K;nXS5ZbQEgad_a)KQALm z;%3w>sbPjb8OR$vxFXEA`Nxh-Q{?i$(S3W7Z*iP}AbTYAh=BbG?2X?WXoiG*JQ~2; zUm=q&XFc-iB$Na+ykA?7o57QEfur|3N~(rSOeEuy^b>R;N#TU#sdZ1B56FMXzo)?% zFAn!zC_))F+fBQ^8u|Z{dDBJgIq*(KE)Oo3mUs1XBY9@Dk5Tt1e1P<-AvYf`T^Rnt z1D{BS(sNPx;d2GI4t?i^XJTQqwbJe%t^BTkEl@)m7JvG=$*IJrPNUpE6FDW zxoH6cu_+|+T&7GQJ1)h7MbhNtvyJM`sRM1GJcWBP+LQL4^j|86oGIrqet7XYq>)oP4)dJfVL}URHs6j2JX-kw zANt$_#bA}M0K0lc+70ar*fWvka2bL84rGIWIyh3*;WeMvL#&r3ZWj3QceuU%^3}uU zckG{SrR&wFZf(g9N_tB$aN6uiI0&~HYW#Q==#-mW5!(5I2>Sdfr~P+-+6UYX`L4tg zyIm|rjKL&qoG*@)GQwM)j^9jMQX~JB1FcK&T6RUhc`|b#rpw_kdtm!gR>xz%`N!|! zpwGvD@%|YR1w=VburFM|m6(8r7193D>718{dig3WTgfH~p?XKrHBO#)ZO5yUfw)@9L(?i#!maqzU!*>zS+NiST+;NVO z@-#wfx!tF}=z_zLwzA^c)E8^{5@F-+Ml#o`*LW#&9xL1lb_*-dneFqd&q&!MrRs8o ztDdPF1$M~@;FRqQU!z`NV#~mvj+`vQ6+7$|qfE%u0ex8O@H%&YvODrJ{Mf_FfW!Rc z>(YibB-~1Bh`}L4p+`7;1kaYJHIVA(B_4S*IHTR6wJ)De0b-z^e%suY)L325H8_R0uQa540D`20n+zx%n(!*OqqGFKZ+D&Qs> zd8w;X8gE9r$)L&hj3k)93w2&`#OqB|`>`cP#I{wbPPF{gwN$X83@8`u_#ju7r$0&R z-28%LUO={NsLA#jd_SY8jdjMn zjKYL{>+HeFo9ii`Xk;S0{bAs7-e^AL@N53o4Q%k)GR`fz z2Jg!!Qt#>4uc@#FQ&fSdUBUapxdu*l*X{+{Q9= z0cfyUGXqkFLJl8)r|WJPlz@io%V;b+3F&PBdlPz{=Pj+ga&-GH=;Sbip!PZ9w@O%v z{F!3Dh)KGJK!hoG?(<_tSC%M;b_<9}hUiO*&^PxwCFKLdu3b$BZ5^qaCEMA@KJ0cM zhjG8Y9@FvKCmFK%LpML=1G1JtP{wAo6s3m4`=qj08YN?}Gfln+fePAIUFqL6{H~nR z+b~OKmv~=cs#{FY3_Ubc&IOh3n+tHvw&q<8qBTTzbU1 z%Rhw?Ixm{o{FAeGQQBw{Le!_LZep=JEKPHocO;I7MRjTT})d+2lzQ$N6ty1z31 zk?OH~mk~ejIzjVf{ILu9OWlrdImUX-h|pnVl=!zNIz!q_X@{(*3#yTr%x@ zIFfTF8oomWe}?_rty-eke0nhqATs0g;8nbaJVJ{6*$|}=yjONqC7s)rTamO*Z2!?t z=R$i&u%DMa!|?Ql{{tC}+38=Eiqyw)64epgD}x8KLKYrWfz<7FV|ZXUTkbS%Y3~*F zy@k6A1*#pZ9DM$Q&m~_myKg&=vXFoc#OGL!%MWAr!p_aKWI$H$MaTZ26wIhrq37zk z-5_dH!h#)mbb$(AhA=)SgZZ*QmJON!UO2nuy-u(g!x3F2`)qn%0o0v4 zaDI-t6*w<7I@8-8Fdcj1wHhw!cfXcmxW#pU{T^XY)5cZHRWWTg`&B=DGe95S>Zs^z z!T>d0*cZA1Ya#L(Gqp&qI9|`kbhP=6(q9Yw!!w*E3%S_cXuA`B${WIBRXxmrvxyJy zq6}dPJPhBquyS4ph5&!hao}0G$1!DCsg%k@Kx6d#JHv~0t-|4~FelHDBlPXbJ=O5E zET*|A&D7TpU~eE9JVpBb%l>*S|FM!Mrh%c~P^aX%YvR5O!RD?ne4+dGy+{A(<^J|7 zI2j*XJ#p~^P<97x+a*s6D*5cOKQ}J1>?~h8=O#-pW}=H9S)Bfu%|Jfn3TKywl6~yC zW3c(A`e0POvKsT@k0N`HP1g5K`R}?NNElnynm@bqgQC|piR<}u{KxYMtCC%1Y4@Ss-e@_BA@Yk=`p8D;*t0wEcy?H&a@Qg z>z_iOSLuk?^Dheb7NE2TJpasCv|TPuoP(#k)ECO$v9@Z4enf~Vhh58>Khl}Q0Hv89 zo{*$UA91W(LQ0P!`r)c`>eRabKQ#MAEVdHpRPbjcGPA8fH*HQjHyJK%6VdkZ%E)4d zH|X#}pAflu<`Om080coH^@*^RfMO_waowpkh|GpFr`ki43QCOwtJx^qTM7?l*n?IEXm ze=HZ8v++|Hk}bZYt#*7CER=PoTB(2M+nrR&swd!a?*`u+80&Le-iqEJCy^eP!#9=t zN$OIcf`U!MGp{KZ(oV%1V7y!m_GbKBRs9DgCRr=^KEF4v76T5vP40FDb^tho%9YxWG&Y*-m8{J$_uVLET>DwowA3S$Z2f6*(!}bV&V194=FeSHvN~!s4X}`XVH#%d!vtaXpsZN^YvE|b60d&_rSA^1Cdx>uBa7SNl(KMCaZp% z%mVVpBQk*-B_?j)@S7MjS5`QgQsB~S>#MFBYR7@gtw$su(vOw2gEl@g^A+C28?`wP z6nsL4ppa$88~te}D(d(i1Zk(0j2t0w+TcLPhras_s8T5dYXB`RdM|52Gu-WENjts7 zt>}gEAxY0^j}Nb(G)iT2$bSqnaHY^OYJZ_8?P{)Fc-*bxUnN0EQ}EbZJ}!|A(!waEmHxzZFCQ5h)cV98v@n zkS>RoQV^s;1_bHu7#Qhp5Kv$Uk#0~r2BbTsW9S}ofMK`?zVF8GdG23u_MAEU-S1lO zT5ES6`UX&Eunm~9oaAfJYNj!PqwO}fT1xHtIA<_%i2JbHh2lI zN`c3K<*Lm@MqNEE4yT#sHDl~hJEngcei&aa%t30#EJw%Da6B z+GP~GGf)cdRTG3v7~#e}xqRXauMjACmg{0Nh5lM113uWL#&a*|onC zUz{ee2xR0(lNlNPILiGTBM^O-RT8sHs<9N$(?7#FJXfb>Rpi+P>^m3ie7h|*`GibX z1i}O@*7>i*TAqi{t(Moj*^{AscewYDw{` zc3ToaS=VQJ-ck`n$msqhQHIPtU`+cWp5V)ZCGO8kIU5Nj#S@{-2KqtMhoPDQ12S9< z7xjX<;kzafw@_LuH9DT%x0=+yv|YXT;>7_b=!s&*-OsC63x{zhmkCs-3wqin)*qe- zuZzndWP-As*{!aO8p-61PcCnt2Oo9)#Q}Jf6f6Y?{3y%`ZWm){p+!XDLttaWK+5;H zdRPh|iThtH^jwnw8_3LD5w&m*)}yTsE9`KN1#8H9dh z91g=Oo>E^!+j}SW+0|p!h1L(B$sQdWF>gwPMloS$$U#1m8W@s5l*D~iDfTjak66h6 zCsM6nt+!#fM9$Co!;8g;vkZj(=DGVo>wL6%LiHmID$Loa>H-)AUc+DAwHF^iP zfzo5Y!(PK^YH@Qy1d&@1KC#p`}};mCmKp_MvX<8ponXZC&=s0At9y3Pt7iV zo0Ya`N3aDbyb(ogg zL^NRbtT4{jZ4~p3V`GC4tZ=Ep@&FzZt+O$@)yQ)i=V^g#KXs4U`=;O_{PNXOg4Dw5 z3ULm4|I4H|aBIqe^uvCA3Jz`W{j6e8StIBGLjgHyqDkXSHNI(;737?O-9t6o6F#An z6Y_5>@SqU9dHh$vL%ixBYy#c6IcEN>>x88q#` zRBW0)GHcYw0WoCLy z*q6&-|DKHURl-DNYyjs2k>yL{akDv*xjLAZnfyK83;EZ>V}t?ej5w+~#J-@fLHeAG~Lkg}H#9z?q-B}vmFI*hTIps`HDtMgi2CLLvb=^D1joFrHo5ad*3r!TJHB1u(uTiyC5paI<7El z)BxAyAJ$2NV&~(wgK}+=0VR0#b$5=jB)8whe)?K3ZRS7~J5EUHAoiNX@s*usTCb})Z3-oMUCGEC zq=mB1`vq-p`+U0S)d;7+%_fSyle~t{_dE`_&W(R?^3{e003;K0evFgmFtGl%&u_>G z-z#9y?gv(sqU;9a$=#nqExv{H!qv&{(9R=&xJadUzrjx+OT@Q3$mv~*$s-}>aPQ+C z$lVZI_X>Z$)$sc#CdO_u>*?u}Y5d+ZUXG{Q133*%zMRs5UDxZl23Jdzg>XBvywURE zbjd%`C&fEf3zd!gFLvNIDOCc!GfNUFAwQG(`<1+F+{aD`OSjf}o;H3Rc&eF$4sP^z zOP(ivo5>}td60>$k@9Qna2HLm?LCs93yJ3*G2NJTfK^>mbL1cuU)K*4bcNEy zMCcI?qqhG1xr$LpyGOr>$as`v#czkapJA)4Z1@n6^WKl)vy9fIZCt3dcfle6y>UjD zdk@O+BTwpm(0>;@gkz{id8zLt8ud9EEf|$^5RnWfV2V*SIe*!pyITV6tYEc<{0-OP z?0g;y4Gm;dsV< zd!jnGe8_#$DcZ9V6|Q`LwV6gAPTnz!MlrW0L2o1Jml(_1hI~pj$xn9j z6SVnM6h}EbPZ*rCY`Yvnx2u-vFq_vc%zI`{95(jF(AuliTW|PPlO{6Cz3IMZj{h^* z_gfVh5Daj5ayI3!Z6TbB5J`9%pee+hL{Z`?oaA z@8@?UMKg;?#z`2^A7ArflTO_i#?D6Y!~rl*Ap|xp8;5^hA+piDa%*-yC5uU~TzJ7Vq7&numbn^ObkqUsnlGfH zzOb!^vXOJBH`l4Rnh`l1|r2h?%MJyCtDLN zgmr1_^i#Wn`45xM(MjQckBxZ5q~L5_xx(^IFg53Xdq7zV4VD5{^ymM(BA^#s zqaAo<>J{PsnfV8BmdSc+&$H-hWS#Eg1U}4*Cbm|mfBvIW}mHU+^^i6CyQ%!lvpchef>1WKhm;ujM`*>k?I91E}5k48QCLk zO5LL&9kKDEi>u1;rrMk&kTgNoeu=)Aw9~MHm3|WC$Kaog(!&d$vT^4m3>9jxVX=BO zukg}z8Jyu2qFdh^&QF`V(bwZ({!l~T$y*s-R*y(43;wVN_$!M1mVFhqnX{eb*AED6 zj34bd;zD?(XLS1nbf`^=&RKkqi&Y2D6h!s2LkI1N%RO4}gY|ARdvhPsk27X6Y<&&` z>uhz@I$y!Rp)^k)9{Ld&KE#dz+I1zJkUpZr&-Y@S{N<>idu9xalH3U%8HI{v&qB0J z-9vTiN{j(TMJ0R$SlE+o{ZH+68h4GH=v`Ep=iS|Z!i2?MY*0?OVdiu%e)7|6)6hdd zNb?%|NH|4(kFHFU4Sn{kjpVOA8_+r;bvjq6O}2ViM`-^DInImceBx0N=yhQi0q3rA zqJJrTc|&_nV1Kyzme>2B`r^JK?{npZPH3W%-=U=MvCfj{=6Q?nQ85?h@7t4N$#IAu z6Yo_61p9`8GlPLSE7Y3gjk$ApPW*6K#CO1b4L=WgS*sOIU zcM8BKWdXwr7R>!X6Jiy=Pf0ylcTXE;y^EBI5XrrMNh)Fm0K|BZuJ1x`ncgGm1}A^Q z)I97%3$m{;47`KX%I%#rN6vO~juUq6dr)n}5(bq9Wo=^D|N52oz7pAeKQC|Vn$Ntp zsWnBq@SEk?t<^7N!t0EPx5dU@Fqz#aW$E9=Na4~;Tc5KImB~-`yQXfnDO&mj~|nm4?B;X z1J@`h|H}L>_IymtF9GW2gq5p~F0#{#-l1qi(htnYgV%%s729%ru)XM3qOfk;6R4lQ zjr%qFxKxCz6K@G$4XUWuji|K?c*cxQHd6?(8ZnL?2lp(y_)5LzpRL#H?-5b(8Ad~H&Sj3>=Mgzmu))v27Ck=Zyv*_Y>=L%*R!n+wNxeiWYXTLK@2Qjl;>SdI3EaT_Fg%{>UCtCb_hFINr}ty> zYfTIWpMA8Q1z}fo4piAsv;hJ(VR=Z1aZ!~Sb}opsly}dJTsd`b0${7*D4s1q6Ph`n z$&D*1%}0o*!j4q;;QdFzp_$53o%oIy8NlW|yoN{pQdi@=HX&xp8~B5;RE6qu#c01G>`|bzyNNBJ2V6KNiC1 zJbI{8D|;cYP-L&!(o`5uTdPR5e2R6N+4HcLbZAn?syON=!Os|G$GJwjLcki-CYySx z`xMEAM$CM)&ll(4&#^L3^jA!{ubFij zHlskbys3j_hLoyt@qp+TRdIt369Jf(irV@Dc{#@jWE)Km@Qq@2+M0^G6a?bO&iM#U z`Sf!FJHZ|OPLDjyp~lBP^D-V*?uc=Q@HGY9G(cD^cAoPn%}$79sN)A-Wt7d?-ks+P zF>gnSS2m!8Vj&pb@5;F-dfzHd!p7+=RnfWuY|L7Kz}Y(h7ncMNei)UL}rrT(&|^|KxwK_4d2~<=g>;S(UhnUuqck z1rzF&X1RKlq`NQeM`$oiv7dS2ACOXbmL=rSKeE|wFYEVwH*KDQWLVH_3;l@_UgNsz zc>ptQ9FO#h=;7s_Hw1t%Ut+uYFx&sLG`dAeQ)bq#6ZX|RgeF+!FXX^3JJ648>yY+l zc#U5iZ>QHzb>uw;@UKRVOyd+BVU7Cwe&>ZEjaCaTdy}=dqaX{A!NQPr@6e^&vnuny z`8$2*J*9-|VRunJ9?*wW;hCNl=&!c-9!!eu^b5po{d);MSrHJiSgq)~WPHRM3LYlr zsY10t7{WGCLAPXaMLp~tojhtCt>#OId?~`DwojG+oIsnmTfh+GO#PCzxaunKfweaQ4SveCS8J zPOQ0WW3pa{395{% zlH|VK{-R@>MLE@nT7O)J5OnNx-Anl^kFEG= zu8&)km>4r9p zjj24KI!ddS6*-K!#{m^(yXG%DA@6jlDYgtO5&S!pOPv^Ox5=o5=IbA_M8bK*W~G5T zTUjS@p1?vy`Mg@12(N-ho66W~&%Ky`lcb-%5p5T-Gihdc7?BpO7npQ>r;+7luvUqC zTA>Mo5_{Qne39R72bfgFl!)Z2X-2xK4y&p9%k|bN6VC!~aY8SBd3Gl&?WctLGp%1# zSWhN%)-@@y`A5kz1I#a=6jZ`Bi??}ry(qRPnicMq=^weQOlGZ%02phcx~ne-%rGZ7 z=7(FzVN7}oct)w58>!)U?Gxx~L;+C)GjTnHz!+&&?#2zU&L748o)9J$4u()xfbLyVALN${R(NVmHu_3?UkKUV$Ze9$>2`1t!ip6uf$YeUunTkPGNDdD8T+7So(bh=c-N85hScHSy?uYM>pxg`5zRV;jh5e(b;-66Rh84 zp%2MuA4P8hG!L1&>>4RZ{j*w!?8fF`jw*8NJL&8glCiV8Auvv4Ub&b>1pIuBIf9*MUJ4W=E#wso_n+L& z4Jqb_WYa+S3DG|Q(Pe-JCJs${Y=cp-!X}wY|1!8@vNh^ogAFoJ^|>WD*biz2qQukQ z#v`A8xk8YEb5uo8-P1ZV6vam6ws0pI_!xZVQ2 zx#;yw+VvPsz-?24v($eYx78IH)#Waa?Cw1slx0#;f*VUc6_Lt^PDZrMTN&@MEo1s@ ztXjoFNL>6J)eH_hL>am70t~B_39yn|bv;a-D8TmDdf0(A2AEZTqR*@4f+_UTe0IM? zAHWHC4?g|#4~u>M>)41**<8tfYUi}Rb0YR}DWG@^^C5(X zLTlByf4HWP_+s|QrZ2|~O-0hwPG4bIRX@JRXSkYC4h<3B%O(H87?YSqU%TGDp3=a~ zR8gNgn^?=ml0o5~=HLCo;kd48!&XzJ`u`qKQ;B_dKU^qwG~)IgA7%7o159|0GmpQo6J2#nzovJx}-&ragn>!-8Sgku`L(;M70YzkZQ!_&j=Ism$@hUsJK5q z3sS}-Ep?^)4RQN*NCEx8#~i+!Xboj5U4Y{eu8C`1iJyujgT$gT=q_?d1)qP5cmDPy z`hJEit*Z@t@9k+~oC+-E`IuB4!9?WT_ z>Jyr%jC{*|iARk?m*o*rc#ko0bc-|uTcCAs7C^2;^9K?ygIK}6ds$R>mdQl|9Ly8r zQt~`7dhFgZASc}M;JbRXntiN}GN)gNxN?zl-^e*;PbPwDGRSgs-ns>5kq08~nTZ7t z_pOnPi}+#(Uu_ip*6s%H8zJn?bIQa3I_;`gdHKy^?462Rh^^N??RSM6-DWBpu63En zu=GypUF@AIvxmM4eis3C^n>h?ce-~ppXv=9{JU>t` zikyMZ@cfYVM{)QH8f5E0qGRqq`!jRF&MmNZXAI*f_?Webe}I3r>HWXNyFDjk3x|p! ziASP>ie0G91VP8W5;x5~O<@t6wgS0$ZFlU}Rpx&iXVwybLWwpG5&B?2*{s0dOEoV( z?D@Fh=M*ut&=B&8c$acFP`pG?qoQxYBOgc^djS+zG{CE?@^Vp^FP z951tfI)CwZHg^>AHlolJA-wmlIym`Z$Xt70Sv(PO=5i3Jt-6JdE`UViKYr*N5Ea$-2*gAJQ!1Bme7VL8*_lgyb9wg4z|a z!+iJ_?UwZni{zum!5wUixv1Pm!@IN6wJOKtd>narw0pUE9UbySJPjFHZI_yZL4if; z(81!l;`r~_V6hmvtbJL4Zi1pgCHGOeQ(4t*Tf`rG^Y+q2Z2^AX9?4K{!yOZl%Q46AW3h%keZvE%pE1aG{33M^R-RN+Stk_^h5lmYJ#=-n#pTOWX?yHo@R9)pD5ICfMBGuk+GK8TVHDVVYL z1tqiTM2;&|PmbQB;1_5KALB|O9Tk;IPy6h12gTBAwCV2hixktasFbuCq;@KqfRJ%e zM;3}#xf+U%f;kYBo^};}Z4g<#JWt}vf2adpyJU&`=B+`@|M?4A_viQD5Fh~I>3YI~ zqi{i3>=d}k?ECR!R13n{dF2SC&n0W9KQk+KM8COy8Rh$|~2x$jf=tK9xpT3zl{HmvUkceywR!$@&7vKP7S z4O~{|Vk%vQ$|~RS1rveepeL!IA8}H9Sn%UY8Kn!X!JBYx-LlYR{xMC1=GS zWspWmC|CjUfM6SE zlAIYz8cnQPve-bQC*NCZ>Wxk;snFafkCl=3yt92YSyzimN@QssMH@CgN|uPa(!w?C zz)wEZdLolnn1|QTchv1n*%);X=I6%hYD3lt&Wf(bQ6{XaR6^LZW^ND6qimmCkAHeH znNaH5c1!B5ls$K&4`TprrCxgXuFP?^+rREZK;!(GS^81yx%#9lgBvZN+$j&-?Qe=Z zcPP4d755E5SX`5giwL}v#_Dg7FiOod%>3 zS?={;9d(z9Ai!I7nETymCkE;ewSU{<9%rY=#(oHVG?=!5EsTE{ce7055TqiI#%nr5EdsKz zw=wE}KbZgKIQ7Ym8&gK)M*I*g=1#9s_}cVtk7Xc4S&l*F6>cv!Pu1?RgNX)Xf5jA` zya{JtsOg$ATIHt%|1LiAY~V^4CfjD3=X9O)vT}Rn5h3M0X4sc|DLklRntidxI0OzzTKevPTnc-QGE%LI37WKv@or8M=mF7 z^Y<#8bFQ_X8XFZCwgXlm{&KTcYg20Gg@ioeO^*!R!1l*S3VmqpEm%F~}K8OEwB`o$JX@@&_m)nykq+`q!Da zQYl^m0j3pE_3E8|j))`B85HP+4}>%cU%a?m1?6DM@&?ADx_S|Dg@!?BacK4B(3r-b=vqer;b5j20clZ zBs=gz&^EK9EYa?W3N&|goJq;@Ry6+j7o%&|V`^Mc%H)j*=Qx($UzOjK0OxwM@KtX1 zg?mzPD^F;HfXW2PZ{-EQH)mlTCTlc&;7|lVTl6u}lCt!S$Tg^-^irB;(UWGYD)etA z8|u3onOh~8kS1D0?S2xa{8xf*b3=DKJ*0Z9=Dkx_zQPcB>=63-lEI|{@Oc~!ztMqy z&{F6=T7vJeFTDs0bdXdWSPH!5l-U^*KQzo@?&f*x+zK#LeLU`B*2_H(OZD--u1?Lc zsLt1P7pGj3xm`*uXIvmrawfs3)qv{h*B`c-)?YRm;oj3@M>P?(;iw?yBuGAtoAsYt!&5eZdCU}9%Cc>%4vCV-^s7^&T9|+7 z&bXtbeD>qnSFTQUKn!6x$seHT5x+F#UtO>DIWxf32bbonvu!vwl44*$dA?pe%=K7=3(qisK47oSs z;QVEUatHpdeDej4+#@Ly>!7}R3zJ);^ZC;vw$s@Z3;L;w;lG` zuV*w_n@X1mqzPufq_atpdA&M1y+PxO$b0Hbaq+&)4+RT#jtd!2b%n*-N0R{n*5`O! zYIX+nqKV)cC3?A84flR`_`;UuqDEHh4<~GbSrFE zUjXkLmi7VSfC=+FllD^wO90?c=Le&emz%Q5opj{Z8`CIa_e~#Z@xdhVV;&TtqiQqW zt=m2QGXtrByN7^H60jTlDGFhw-IY%b@KE;wU{&!A4~Rr~ZZ|#>IQ^UFvAJ zhoT|r!G9m80j&~T#|;ti4_5b?K#y$euR&%vg{H^3+e-(d2$Ro7RKyL-i8;!?Zx(%z zrIu4-Nu(nH2aX<;cepJT#zhhwKR%8M8J_PP+0}gf02(Lb=!APtGt>od6*KFqs1eOt z1jJui#`iO4_=jKR%&mcULDofD z9Q)h0{MOwtOJKaMP@@)N*pXUd5J|3q3G=PbmnQ{yfkzg*Ay;x{x^ zHryb*i0dUYes~b~bFE*hgD12{U>wFcz_weGewy98d}?wY?K%3#;P`y#&&8&u<{|#l z$Da`b*cpGKx~u(=2<7G)Lemm4^LMmX{Q8TOYzXxl|22RUodvzVktZa8CPC!4D|C}8 zC8Nb<5nohcCV_80)l)738!23+@B!sf+2-5IEv>)y$AM3e9%%zI1~UFbAT2A2(bY?$ zzKdfK91Vm{av9Hkw($bW1*>4Bz7ct>|>}6iQZV$~>mw?0fpM&`hZT@{CHT?bgay#!}-VSH} z5o9;06eXCuo9+Koq~Y$E54G&f0oEy;-k23!Ej;>7VB~e6XqDjD){l*fqxuS^3TD0D z<4;cQTeLXN*a+lWy*F=T|N6;?2`?vuUR2o`+gT0&V(gJ_9^@Ug^&!48Eh|MN!ldaW zit0uz^?}uP4EXm{@dzoOf{Q|wU5+1A*|nDWM*8>MlRYIdMA{El3)>PHrz|R!NH}*c zXo_qrcRZ2&wj}kQ?z5DMaC>x+TwifgGPoyNc+20vhXGIj6+v!?CK4@F0*WXxYH3{K z8+G0jdJ;5Ya;Sa)fX&9bUs zp)ks#SNZ&Fq*THuDx$#P=@;JSfb}8xkk)G z7T}%GyKZzz8X_7BRzYSleg6&)jw0WjJfUsY6Qg3J+})rL`z!v+DvcJV z1xL}8w<5FlG3v11n;P=K%h{P3!=P>Ej2_wLckyonEXZG5ebsMO!i4J#b|_svfiK3F zH#iD6{c>axiTnw0VHf9VQA^m(aNooB*~^3v9l6xFCWjQ`BuZK-J&~Epqi8{*827AS zBuk7^X!QOIjBG?yiig!KDM%d`_uLBqsVDFN7zZOx0 z`Nr)w8ps-P9MN8|#BuqArNpE*%M{(ohk@+@A7uSnPk0@G3VSA_%kD0bzd^I;$p#j$=LoIXcbrfZfBIQI^%vJ& z%rNMfqKTi}ZqMaj;CtNQ&O+r8F9b7DG$4E|zZxbqizKW(`1FSu=iNew zuWA491Bq3KOA^H!p&yk^Ia8;F=zjZ)qNgnfY9eF9gH?HQqL5m~?*4ZTfuP3ISY@ z!UZo+Nzy~yueO(^NQ<+_Xd4AZG}tl^?Yh)A@(9udG$3-zGpdcFT1|z zZut8v`g(NVw!l)oYy_lF)DNuYiC0U0JMHPRIGP*)Tx{CwxokZ0`PY{Tzn^Rc1A3@6 zjNB}X-ppcJuec5-PJGltP)lUsnHa%1(%CgS37h?@%5s1J4tS~vZR?Jg$?cI{iXs`P#EjQjR8%x?rYLme-g{^Uq zG!gvNegGK$J~w05`@Q3EUdcxENmH;Ilzuz$pS}CZx&2Jar1ozTQdb6RleNMs@8AWI z;`rk`OYeT_sHH*9;5YnESSMsjLE9))H|6K&oDfpNM0rfr)747*?rbj-F8joo(yh~d^GpIA*xRyTh^v+tJ2B?U*!n!U6T+}T& z@#w)W{(3XBi(XoTH;sU)%eS4|z>V=gCWS1EKn&~5@4Wq3%yPvqeOa5WOIoMgtv(tVVK-QTe>A^H$$+>nXiA4a7SkB;UPYA()b zKy{|B4?fRL`ly1JZM=LVzYN(ZmLmN|H?8USb7FN8c)}RhE0)qOnrA;~jF6l!GYN@4 zzy`UEDmx&hrf|xOgQ;0R>%Xykjw%l&Q8neb$Be>z!@0gc@5=(8tfPXU;7@N(_IJ7a zRa`AGOwx#zH@}P}S%!%82v8E_d-aBa#BSsz*7QtzzopyHp^+q;rCfU-KXpkE3#qJ3PE2m)0=8Q9R?WMW5Lss3r)+@CKa6p79SRaM|8#Vqp7TxhDG2W)>tj^PRm*B+q%=L6gc?P74edl zXYlQq;2Wf1+;`Y><$r_DjjIwwDnH6fn%F3GQK(Ix_uKsU2JGuFU6#2)&jXApl@HNLDJ8sSqZkq_1L5Oq zxVf*cse_KA_8sWT{T(=3%TZjP?9uRfZ0$hc|$KA5aZ)mj+x zV3&ePPe(@K3y|KWh9iy0z6WVMSU5EFUJy2&DTFHsD+u8A3ciT=D0?YbBo3l2DxVk2 z?Q3mPQ!Q|?z}~rfq`3n)+XsWoFYnI}H`^%02vMUF<{;LADXJ_&u;l)m>t zDgnyT3FM}MCQ|GuD2%Q*S!JhnS^}bX_*3D&=jpD^9>?-q07}K5byIFPgaUDSMhiJT z$sMwA!{?eeE0eEWq%kWp+9IwPzZ9$RAGofJ=>xA{9RxnbxZdY>)aZjP!0DmT>Tj^E zV`TN|PJe7ovnvA`pbZ`Ob9fF!;R+^~5O$=eB(-V)L5h0Cx6+6R$JuYH6Q#E*AoVRb z&~jsW87^K(p1N5FXix`Leln_ysUXwXg$@<>fNTebmJ03Z<5!`(%V@jD6Fc#iBjt6+ zw>)C~ZM*^DS3t7uLXa={MNZ$5Hpkw(oWsE63h!6d0Nr(lk163v2C zW%9kYI%ziSl^8etE=f41TvBIx`!G%HqUpxLLm2}<*F;Ev$d+)C{8u>zZSjqyUaE{z zyEH4h!?x1PxlFwBHaN?w-ZUtR#iiUVQb~F!l?K$mbROK$;kY|~dtJU<{_vtelCu|j zTkK){KQZfH0}k${!l?rBsmGQrE0^khVha#vXLj0tl**Wh`5?{{UiV_fz|S{C%}l7Kdo- zq1vKg1_vK~fAld-*~se$&TE@$N|+5IU`83RyhtqQ1(awdcMEhY?=mALFpu8YsofHd zTAzVm3}21kV2|C!`39+U+pmM*=Qv6QtBnr<iF}%T211Snu{waqY}6g(!Y2~ zS(Y|s;8_rybyqwo0(VO#0hPI-R@;>COO{*y-6j~$HrBAQkLlXD5OR97(LGL)B4y73 z$P><`Oh2Wzj4%(tu{q)Y;V%4E6K1?xY`~vah+bH4(=BEehE=SV3()kR=grY(Qv)~I zh`Ti^q>LUr&e1u+I7!JlsI%CLdVXjGU9InWe=ygVMg*9DV&3g7c<^z?>XYhlo)e5O zoGZX5cjMPj#f-Yvw3A=A$vK=gjJ871WH@_&0jE=p9?)a}<3EO9Mb|3c#)BZh;~?W- z^NHCNqBl*f{%1vAKtUhxjG9B|vnh*U5s{lEK9OMp10eE&x+E+fvW*YBzXt5-8Z9-s zFISv~{C9QH`fVc$9i~x*#Y~8F{VuD@{RTM^+wr`7%xs7~D-|8nDfFtUYZbmrm>40{dSSzTWwlHJJIb${CgwrETjp~G z6BQ$!lu5(%5;YvwS}Vgof`*1WcFt8kXj)?2mKtvfJ9<3E>QHxE>9( zVI;FqU<3}RFoqdoIiqL9`t;$+Dey!X|^P?T3tudtc_0&_gPDogrL{3N(rj} zNrS!)wtd`W_1Vdr!g1W7rka+4NzdOZb)99~>yO3IDiJWy)nWb)>gOfBa(w}`h9bDO zcz82^n;i6j%v!EHS)s|Wc(0ZxpVu?H+J{Yeffx660U0XZ1^D-5L2lD;WnU$y0ntgX zY|5LsuEP8cN(SBm7%SNLX|d`O&#$P!+YVIGz=lfndbdbOY5J1_DYyln-lc1n9g`#vz==Ilnf9LH$njH_(Laq8?V6SP7d*%5Y1i6 zw0)1eLw|o1v~F?W0V;J;J!`(Uyx?@_6o-kZ?R3Bu3B)~R{Lb)pig8^djbfIpTLXN7 zf4Tc4s?OX7(I0iid__Wc;v_Byb=u8R0I0qbj6A9EmcY>Z^`wlgUgwxT0?}>(oa}?k z2aI;o+Z8^H7Db=3DM`GJM>TDy?8W|sqlJOtzxht$WYd1-D*I518os>@BA#7(tRczH z=YddL5Rf-VLOmo(NGrn&0ejHpvlQc{O0V}W5uXS2qzz=L$lh0Qq9MURegVQnzw6~3 zsVuv7(Qe7;QGhr5WiF^0u3+)0pA9^&S#;GlPHoTZtF$WmVjhsIlY8c&V1WeJHNYR6zv{aOK;E)BQ3T({e6Y!HO2L-9T0a~oYR37a$N`f zt~kDJ`stulHVL%F+GGh=cHOz>v-t%%nI1rjmKY~Oq&3&SNjbZTE>_3i9RRSJ^WvHN zyva?`d6_!%SpWX=Z@)Q_Z^_lsb|?;-A{fx`3)(9hN+m1ynx3s+NAgQUzKjcf-Z2lW z35$|&`e{RugaY{&IHya(`P=ePDz&x$o3mYL3d=rE*MPC7*p|oD;GtX#urvk;T0evSJYVgEX&F$%_HUxbN@VO?B zX{!KOQ{6H_BLeS{g!9}RrnZ6Rq5_)a&#j1juD;1l4@{#C4ub|{aDA>xIr1}5s0p+_ zg2V;?!(ycP_}6~Aq4_MT&29?oH ziCrahW+TQjSS>01cP-m3+WrxrfT(Hy^5>7ry*n`UOR4+YZP(C>9rhKFhjpJO{KTQ|CUzc_|)p1ynj-swS| zcgp>aaO?^HUySPCe>ww(z@*ObK1g^X&qGQm$M?{Ft#Uxr*Md5Q(6?rTP(8x7L)OnI#Ps5u9~U8 z83)&A|M2c>Y>XX!lEjC$2v{(f^uB#48_+t^SDe$$wl$pt(+~#0B5!yMd?@V07ZVxn z0>CT-V|SEvYYeic;XAhmL>p-g{{2z7=X+itFsgHdpf2TBnig@9YVKT?>G>Kgxig)v zGsHd{N0x0~xE*uEuWKRt3xuT!>aCk0g8Cbsm(e4aUsfm#X1tAiq~>2zb%&%h#@FV5 zxjRzkA*KC@qv3TG@#HWw{zx6K-p-+k{j^-pXDDxN@$Uz7Qy)+7IFZWh6-e#fO!?$> z;a~ScIbE+T)ZLp&TI%WXWFpIn-`Lg`K2%Py7c_fUF*`Jwysa5zeY;PP9;8l zzt3I^0R%dVR2s@{C>^=goRnQa=_MijDs5z-bw&%9^0Nk{$JzSI&UI$L4of0cLT?@+gIJIh0| zD^zH*jwEE?mk?2rMAkvJgh3cFM%h9kF+@s{>_!Rm*v1yw=84CW-PjqjXG}BWd-Hvd z_x-Uy;GN_CagO7@KXV*&eXjdF&+9yw4uv|n1ySHLUG&ln7vOT7YWjRcL?Tzi0jA|j zi1?y5oU-(kMFd66S=rnK>#>mtADLdTZa@i)}=ZW_x(iFQm_&|#i3e%9nR zdpceX)!D}UR|jWBk8Biu+|u^coo5!~JTl2av;+%2+jjXU1q_ieqEg$THk==Pbrpu=g+us@D+}|0~s{ zK{eiBq9mI11I%FsPh2B85{n4C*=@n)~{O{60fF z4=PqfF`XaS(L<)0ufMgP#f^p1&d-Sc6B`g)CNL8fsdPafwM8kDnHCq$9JJXB=)@Xn zy%;>HY4{RKQ=B0!Pf=?OsSZ*OxCSbGU-yw=OU#vPpV&-=z7m<>QY^-Kn>(XiOG=KS zrTg9IwYR)E@H<;T_Qjn0MKy-Rbqjc>M3>zy8m>=WJ3S-%Y+=bjRw!%&CiH2^9}f90Dg}1Nw(8f$eAeaw3(CTS zX0XxXhKMwFYPuekWui`7Sc{=SrJJ~@vn|Jaqp|qrut1R}C0I~s%lwH(j%f}fht;_Z z>*QAvv!2gNZ{unv;>c>Nn%eO{M{+)7yHZB|=ke>`Z;5T@it@-PB3Wf^mlG^DDvvu} z1c#$Iaws)!)O?>ZIU%`SL+iBS*8FNRk~q1hl%@uM4cS!~o5!mq*P_JK)rOlN)Qbsb zB;9P;{+CvJ^um(ncuiCi@OZ4>+9 zJP=}WNKn-hD6omEEzhiKb5BvEBZp0L0NIFrF-U5evfn|>cM4GRhtT^VHiE9aN9`e+ zz)`t)W-;5yL5Jr}HgQ_XoF~cD2ue6?25UJ1Tzs8AbQArnWHd28xTC(NfFhYcJEK3x5rOWZ zsK4SYOPecyIJ#$THJj#W1lW`U<48{ma7U?BYr!a^-aM}iMplGhvIu%>V)Hh2-_q1y z<{vYvz|lQ3SA6$(Fji%t)|zwN*1ffOFD^HN(0Nn0p?QWZ!{SHH6@rWt>!RWW#8bow z!@WJ^d3XIYji<;KmY|1i4h%dASv#fRGvZAZ?0IS>;y?x@gO5Frp?f{3fak5X3=s*C#~_gMZ&TC4Pg6g`fUIoZ(%WIMt@=a7PK3L;dWp$UZYv&&-iZ_|Iw)Eu1HI_4dR9?mp{Mw?H$A;**9*`HB36Bua_XWm0 zJ&_{bH7uQ%JO8%hQ}_5tp^6bFvLN zk}IhU_@|l=p>|V7VK=Z9R5U}}EaI?$XFTQDpIcWtlB?)z^{2}Dbw&FDWd^-ihoSA% zX{?a?4AuOsm_5fP+e7&W$nb33;-i1mYsCuZ_E28Y*h>0q-(Gz8vYav6az34 zHc?|7=FlI%jVifU`PS^5M<=A#7*QL)kaZs4qor+xn~@t&9*DV3`If-m z2qnj9q%Z+fHBuuId%${X#9wu}#yC3(-Bc+`NidD*-#cQnOk-??O57Rh@8w)^$ih}Q z={w~~rHp+X+JP7)T4Mz0En8!T|JpQ0hu*=P?1^c=8#ZBHVZPWZaFJAVr&3tZ8|_>8gji%m&JZ4=>gmnT!yGzib4 zS@p*ga&2I$n|fcr$Ci76dPL?Y3CLt5bur=HgzL>%*wjBAG9>DgmOqq@xIBEYwl&4H zRF}9CDVEmu!FRPK9n8Z*Q33NV)9Hx4lZ8<$uNrV7C)|~%*t>G#Z0y;{aeWhaN)wZ7 zOJEK~@YUg9&(CD9O@$@OpoaE`U@@Y4p|6GSK&Wfu08>TID{6zk#Kz0~@*Yk)O_BJM&)X*cL!H;(ws5 zp5^E*VkS3*gBWav^@36Z@`gmW^OaA35#*84iy;oId5^QHb7q2uxqG|X4D0&Ws%xQK zhS0#Y6{QL}9NOH!Qow6nMQoZ9Nt>i>tu)i%Z3#>@w>!r|5Vax5 z;m5JFj5rU$aAb&;&{)S)jn5rdU+t7;nlvvI{Mvl)igmI_Uo9JYo_R1Bx|%19*nFbO zbtY;p!cW5A7Z>r>DrsQdhSlwU%eshZS_q0VMYYQf&M-Lye(bc(9f8w>`HFdLzROdZ z8qQVu`zxw1&grs-qKQ8k;P{Y^G31{r0y-v+v~K;4eR1}!Ft|1pfxP{FBDU#c#0rFf z;I=@ZRQf-KBe9|vw>p;4J(luYRq2jvv-(8^%~+{K+p;qX2PL~djP!rbF!-OBXBs=0 zzCJUKI!GuJESjq1>*|l5GEA2|0u8G@e1OL1mX{4nJ8xg_S?dAYY?Cth(nbe@STFZR z_DwHrB0|0huf0&!N*>Hy?}<6-xDhl9ZHk`Yv3|G+BUyaN&&WC9g^DozaBtxO30?TD z_D3nXKD2O~@zYry62j;D40Hn;>B2Ww4sje(T99kuh5kOXZHTU}%C>{r7c(tZ$Y<8H zHioTQwER1Q-1rpn&ru_-IXVE#dD*l%{8d70bn<%5RdW*qu?E{Db^}_JpfYAZ!_rJiyIh*eJf;S=}Tj>PIYpJ4j zid`D-Z2LG^u^0RtIW5Fc6JZOx?l{GWI7ex%!aM9w7sG6{Y)IsVt8M(~$&-GI3O)QH z4;BL3sOaNRDYxTu$n*;`I^K$$-YRJig_H#lF2yXmO*D^2C8x*lHV{cc^~#)`4dJ7uqIn7S*jJn=Lb59vr;%qLD->C znQW{v_U?rRqWXw{I&7k6uiX>mGKkC4%|F-3IkO+zB4Lr4orJ|De8@d$TtI%De3%Db zVf;b!S;~b$u*7KA8xmlkUoEruX~)EAn)K_+KJt_3?v0@_A*Z3_tBqr8X{}L!yn=^V z>;u>SxTTkmkJ0<{kG^JZrp3qyjt_ncl&jN4OI_8%%%yGstQqA6d!@3L6^<+sE46tV zZeNK0Ie`k`R4$ZHf4%GECsgwfcP?X*9YJ<7N-l=dSqX{YEhwdj+syK~lm-?F2=CcjqcF0C==$ zt{Xzy!__3<|L-cV(p!uVD zXJXQcdj!SHM6(uqJ8HVz%*%Tn4{a(#q?drtgDU4~xE15Tj_guht)zA~i~0T@?V8D! zA_pY9YQEvUI$=Oc6|;c{4R-kxS*yl(C}UL_US`5R1MSI?jl49U*S31x<*}yDnk@Dm zBwE7bh2QZLdZAu$o zyZ8qx2WTr=5SWN(y z!opm7>ds!Da@xewBIj;$WXbXk$V{5}NH({QNRGLh9k|+2dDuf+K(8GqkRnh`#2Zms*SV`IEnSXW z4*3G_;oKPztIpQRz)RmN`nHmsUA8bm9^FvKuIy!>Sp9zWDl?K$nDBLtKe@PFN(xL= z@Jd?U>78No|1xuv>XZ-4kfmV=xIOAqHUQlEpiJjbqM=gKmUOW#*_c%dctRM2OjvfU zJjZh1|KflE*V$ahJf7o^kNsh>U4KlC?rxHnQ{*~E_lLy=f>sTq+0nYk82_-Q--1NJ zJ?RaOKYj5SqX9@~l}sXZiE?{~L(=$7j+# wObp~!wxs{#_Wu*9+obX5XZrvClTIHn!#_(|e(8vmIR-x03~w5gUvY~34@L}0%K!iX literal 0 HcmV?d00001 diff --git a/docs/stable/_images/add_scalar.png b/docs/stable/_images/add_scalar.png new file mode 100644 index 0000000000000000000000000000000000000000..a872b93eca32121acb79ca5780af2b8b38176ea8 GIT binary patch literal 45941 zcmeFZWmr{hw>C^S(jXu$ozh*>UDB{bq*-)#h@ukGAT5#-BDLslS)_n;cX#v6rT24t z@4cV*`Sy?R$M@sqSRBB$=Dg-rV~lg0<6OMeP?N{TAj5!zgTq!-kkNvJLvVtFgP%b| z0a|qFlfJ;gVJX-=eX61O^eMH5i=(BDy#*Yc!rKHrRDJDU;tV4#MeATR1@y+ZTA!8C zvt@BVuuP&UQfGI3z?IqI=x2d=;K94g>d6|ru?H0s)qg`fyVD>}NN_KI{&ZCpqj)=D zzi~f(Kh^g>O=w+$-gQ46UNZkRW5U{}I=B@@jF*RaG{ww?Hf&k~l5i+)E!HgyhpSp1 z#>Tus+ywWe~Ft9cL)In-%T=-2fVqLNU6^=WyJI(?=YZmLYSUP}sWw;5s{EG~A zB3#zf*Mc87m(Z!BUU{jQ&au(I_GqD5#>^;gcD9UgI`T50(>Hmq)RX<9mN!XY{ocO& zKC!{L(+NvN(3;lZWy3ei$r>Y}icj&$D9)LW=@^J#_=nHGMP~60*DO-NOZi+auX^e7 zV2AX%^bX5Hn4FY7hBM@`0E1!9U=QINSDM$XNbL9|P6T{S%3()ek+7{I?REWjxS;XQ zlVtNK87f42Hi3pQaUJ6a^Z4ks?KfrbW#2Myr9H9D78F8x0S-?&N_)jrg>9+Lw$y<& z-9vcTcLSzABT%B19=n;0P%-do=rT=Yq$(vbV!?Mc=Y1v{Mo)%d z#6Ct_)&BIx4iD{V`&gAZg|uR9o@ueNAufK|p+ECL<@cz> zEy#sy-R7T>o!@SW9!A*DbqKnPd*OwnJi^7$mxPx`Nk;w_3~4MY#JSTNe1%B)A()LC ziGyt5tNc4#IfOFwtcN%7F}OYQH_|#4(i&)|nR1m|Uod-g28fG9U0Te;k0l9v!kUR# zIC~W;UmlQ;h2P_)y5}>==B4u>m;$oC)sfa;ZEs8vJf%t2AQ*GVr*RKZmnpGA3yRm6_eJ47H4>&QNmr) z2tYejgD}M@>f$cnGm8Sry0Wh0OV^cV&fb2mC5(Y z;>}pCWyDDW5BIUVACx4WxW#vDB5_h{2hQVx$Xk<^sOPFdVHDjT%lUT-@S}@A&Fg%z z7Bp;N$$Zp9cPB5J!I1YY&Sk=9!gL~fs}{j5=I9npu;R1p%Oy5V`1jcJH`2sYz9?$KK|5Xo)!h)lT_j3bjDv(T_~719A+~rOCqb|1{X}))ohn1i zJ`feTLyn~3bT0h4;ksHL=_8`*M~fUF+)+P#75tW(_;H917ovv&(nJN;ca-Cn7aACS ztt?+Db%NQSlTRWCAY4lk2qLJ3R4m|cz)iH8Kzw{!c!qH1f~1h76woNlsJp_EU%VHj z+Y86R4}V27tRNcu?lYZ1IFcsy@dw8bE-$gQ!t7lO zm(4o{$M{e9^Rf}dnPGnAE=Ks$N%ocaLITuT_f>pHOs#`d5Ysc5RS;jj?b1uU2@Vvc zw^)B5cf%V%6c~QKoQduh!rYp&Aisd0hx-k!0zLN?M%#lnnl{o4-c#H;+_y3e!x?HtSXUTVq~39KCN9ap%|&B!WnyKXW@eA?kEiUTeim)3j-ADz zRZ;l9FkuX3i*!qDipbcnnPluC%-YVj*9o|zo;>t%@G1B4_US+M*ftsREm9w;8O0eT*+|-O z9F@qOA82_vj`bjP@ZxoOy7TK{b4lK>0t;j>$>YgyUJf}7ITkny&T;+x+VgelpumiT zz*0@2N@y$5M4m+4kt))_e+ zIaBmP^~!1@J?lIp&sC`qGX*nO#n=7On`oLa{i83M&X12+52p9e^PArXM%_Ey=M(HO zqA}WM4pBTOYQdZ&8zSn#P{FyO`O4bE9ZsS{e!?^14g^w5}BKp_I zov#^v)YBvm-3ZxPXo^_2G%K|$+3GCtg>9BTSHGZ)%a`+P=S*`?0!msd7Ln{>N_&xpsXJ z=Hq;0B|b%>eE$5Hk=Rj>&)6#1sdWLigS>+_xuXs$HJPTow{2K)q_sRz(6N->RB!6^ z4;isdBDZv$iF*CyJqwm&FHeqzZm@5xLY}t@OOQqEcrnVLz!^ zyctAHKTRXQGP*i|jN;Tjm*=G6_r**9p}xM!g@eb@4x+4&Z0x6MAvM>;0dK8GrZ>yR zejI@g+oJv6@7KlB1~Z!7;lELvOKnSUhtXsD63enpRVCL6S{;@t8z&VT99^ZxI~po| zS0YYu3nV`T`xkl?7N4wn-pzvz-fXKWAOx{G%)S*{7oIA2dIF?Nsa%e>BWKo^-QtJy?6qZfVq4<98u-(_T`7XGCtd zYs*@VS3~6B+aTtBT}$O3P&{q+Eyu4rKRq_xq<+aWz4W;B%QU#o*Fwy&tYCkZ2dv($ zo-M&A0O7Tt?VmAf!r9T@xxQUL$#mR_o|^Y*S(j{=nhxcA^YBd{SxlEu{TQ9fOCtKyQ=1;rdN8_Y@$k-4`asO8DZn-_Ht*vGv zB6jI^ol?NFKKMFQ%st9~@8UA(Sf1m;M9^<|{lzGKnQ|mJ1NYqG{Or9*m?z3UN1Z%G z&jWNG(1|#TT6OPoGcm_eR+@wJl&V@x$;+)Kb^dlo;L?%GSBY|zN>8-hOY3~>b~9$~ zU@mGt|8DMB<;wnR*$Gaggl&e1sKkZUK_>WC`^wj}#}up4_^$8b2XtZ?RP&$%ThuG= z-0}eYQ)Jn*e(t(K;+$r+J+Ad9l_C|ZgiwI(<)hQfIgPoY5Alr4h1CcTA6Z*6z;Sva zNUDeVzIgywp#5WAG=X}nq7Wna{?!jFsRS?huRITfKFh+%w2Spw3fj};wbr|Q$#{sR zffF5*jpv>BrvBr56m`*8o==~aeTSp2f;&*;=jDaiSZ|DaB=3OX)KCMnUhRkY&B zDUEq(llZaUqhvz@6ye6R`GC{{-ATc~6%LM=9`+5dsP*^&$U4$(boAZyRaJz|9l;!C zFCAZ4aCm`%bOiVk^%4et1zWh8QG0>y9bARI#AttAAq@NuYv!b-{&k6)ofxgYss{B_ zM;8lfJ`P?EE?RL6YHDgxmzS2pS~7Bf-46UEMr-Zn<|NF?>FMdo;mO0{=wikBL`X=8 zlZ%^^o0}cDg5A~I!OhHz-NBXacPIbqN5;a{+{MPp&BoDz8rHAb3rBZ1Fd zJx&WRn?Fx-aQ$mqzyvvA?{Gfh;NtwdZ{Su@SgWvxjhBVJzKjjn!od}IhB!YrujsGq z|HnIjp7@VD4gTE8E5QHHJOA;{Uw4Xf!Y1&K3H@HJU#-A$iDQUz{=M|#7{am-1c2Rm zWFw=l1N=mQZ4L0X2YfUA^%MAAdpV#}^&AdP5>8P@O2-R+Cmr37_($!1rT}d$lIkS> zW46}|Z7-^(yukfGs;#B&_Zkg5SIgMn)l~aD8>{#qz*vAEK6_o4(D4bu%g>uH{9+%fZh;vZ6Y28CRY(ycO%`%`$kw zlag>qf4}OmgC8Op*VO72Frfv({q=$%Q7T zT3T9icun*J`CoU!!TTc7(kyuF&#B^wo#3jbaCaUH-2NO4kkbSC^X)d@-({@Fp#5uj za0u7W8o2p+EwDZ^EqFUsB*-A+`b_)QZe;KdLT~P8oRP%)-p<94&FVLLhv(5@)mA6}k7M*;MT2p#A)ZZ`eHfpOrI4 zFyUB$3U9Od9Cngywjknse)dIIGgI7O%VA*t{?79%`u+k(V7D+ogqZ%ke-SAGnC9h@#~0y>*@KrO{)xVhDdS&j`l9j#jMZ= zpI*O45k8-B-Q*0sRDQjAb-GM?NdAqcJLtf$v4?JBRxz}*4cK-BDFd`~`v#ILb z#VPBwb8W+{b2r6U8L0Us!A|Jk+b3B=4~*cVMtG|rCmdvPRBhht5xZCjJptXHg7!H3 z%78<5&{=3wBR ztGqu6yWt9SvUF5STVVEQ%Ju!5%w_JD2+kU5C zK)YW4^_BxG-UN)&a}iZFdZQo^cN z+VKX{wB-ZJzgA#q7o7=RkXZhviplYajPLRXOIf_r?dilDgwUj+kD}h(VrP@)4&FC> z%*~Xi(;0q8y-^tg)xLYR>l-^2gJ=CAuK#cbU|3&wyBxJEu~B-lH$HKU3#*5g4bSxS zM!%Z1&)gc{E8kfMHw&(EDus$iDBNi!lnpip&#;DL|1lo%Xry@LneDP>wdL+;Pf{lw zl0c>w?Vtm}D$hA@MneAP%K^s?iTh)T*>7HIwavG?j$=9UIP>4g$;mf+=%U@4UlBbe zcBBfppcz*qqP{@?*F+C$X|vcCyo?R!oBU^OMD%-l<*}RZPRlm?Sj)bX_8kvO%s-Ls zooG7ayXuy>le)aT3|>`B;SQtpT7aL3n$St;4f5WKnRaXz0eqBAZ!G;DCt26hk*Fhi zplngyVK>42-C2#c4`Lu-w?ljr#-o}Y(SbM1Q4{%I2~)wyB7bjeCIP|}0>>q*rm0R7 z1Oh>n1%uiBuQu~3x}`+AsRBh!JZIfcHgcj-QasYupE$gs@OVh!e=_VqcK*$I@cB{q zgMop8J+e|--XsnaB|5o?4JO1tol_5`qg!JN8eF#6*?b`}^^&r?tMWcxi8%y1L`N&pKBB zoD?{uSac)|$V>Bo_*ytLH!&bOmNugn!2cPJHC|BAXdANmp8;(6AmBQg1mb8Q{%ISc zg@exz!|eKdo2Z9Qf#_U!N1284r!7ks4nYl*Btq)X5!*%wLiVkgB=(=NNxp|eN+afY z_>Z$QK@b$QedQZO_9spU5yPVivKw=u{PEYNjBxN1r{{2de}{HBcy?f{hLGo2fA+#e z9S)&%dmrBRZ`=TfUof3SqypdXK`Q)CK&B| z(94+V^*N7Wb#+yfjaXT=1MklzkMSOg`M8Z}de!4PQL=?1&3v=yw{Cxp?Y8um(Pt$} zcf=~wU(Vg#-66N>D4|To)Xib4@@+METJGsn;_^Cp&ceiW@HJc)|5{p22-oaw?$0{nVc8mY3@i z_nZ7vem@lB81T*?$$1;qI8Zo$6+anzTKM%VSKl-D?Jk5A0i!^(+ORh#^U!n=o6V8j zQZH6kR?I>j-_CH(h=)%?e9rg4`_eQC-{!SZAhMj_Qxv+WA$9~@gMzM{wWlDL+nKi82mJ3D# z4aw!DNll@Jd!Mv0i>Q91K{Do!bUI&Md=-S5#G1qq$&X}wVP7l5JLLg!Da0okvVJqD zzl6}?2WteTCgy~4j)ee)`}l$w_K0^ayx$*dJZ-U&9ObGaE#NE4e#z1V3=lo38qZIP zJjDgYhK_n|7%_i`xA!%!Q!j7ZY!Wap4&zeekFa?yR;=O=TTd_c6zs1Bo zhBbn_kWX4jXL!&A5v|`Dz#hSjFv=Ati4C6*ui8FlnKfSUI=1ivfecJ1 z`Z+QvSso|t+vsC*=@$A?G*1wv8XCl@*2QMT`e{qpV4|{24)_gg5{ol@q{l@BJ%{A? zH$Y=0DajC`mD3>g?`4NQ0!^h|1-6nXyx7{Q!G<|xN_k6|)|ClPHsq}&l`+g#ANUbo zP>MPAsm0l&!6&KE&G>r@(k!&C_T?((V8fG$8*$)8R#TK37j+=>PA&2zLg1cI0gc0! z#*#Q`tzx17ceDQQX8nI-S?im;r>CbGo3R&bsg7E%t~}{JTZOTNKQno(pr1c}B&!Kn zPxqt*+_`LZsqvH2?X`i_!cU($B+sw>tWxKx#H@e$qr6(q)?8}98bHJ_jP?l6o8K+(XeY~e+n4MSq5*%Q8M~)8rVBG-~h22DH?$Xi=y$)|OStd=t?Be2x zqvK;b9iIInH+c{f3k&UDOym^`=&R?O#~dvrJ6vm!7RPw%g`I-|CimN`Q&O8=bRba> z2hTU9HvuS@fl=P&zVUo6M>Und)?H+`a+H>H0SFj2{fX>uJv49J-cv$=WVz1%5( zE70bcifU$C%g~VQbW6+g1bHu5Wan!;?Vzq-w&Q#guhT+H&jd-+^L^r0lAZF^*xJa2 zb-+P#SgnpcK~W=3sLqaXY4sv1VN`~V4}gh=Yh zp#V|CX93Fr9Vrc-x04OOfM$U(*d|#q>vhm*P@WkOqwI8^_Uib-M^@u=faxwm__0Tvk%KfW#;kSx)}rq@0M9HkPZGI9ZPswbXDfgJux zG^LT)v~XD+`8@nO2s4vCDO_&)fgZ=RB?PkF15#zJ()I5~0z4!>$+S~9SRajgz)4co2QoZ(-H1C5^sfTa`+^+kcVf>V9+9?2F> zQ1&^QC2S>mvuu*5u*S%KXmAsOBy&n$DNmy{Wksq8g;wI>9&ZF|dwhKP6b5EHsQbJBw-p{L6mu$T z77xwvx?v{cw-0CS(k(Mpj*N_S`*B{DHJlUOlO^E3wax3kWvBBYQ~eh8B!{T~i%ri5 z?z*$OF9n|aDvSAcna)z3NWkuBp#0h$tOewkF+h+ff$KY{9FPR-=NA9$08WIJZ2Tzt zg|ym*ACEF{`sUw?IgREuDhA$eW4g`zdjdRDCxw9ZYTq-r(MZoBM2Y4JA}g1rAA0jy zHFIgQp0oZu6izMhu__f-&FbnBr)**uL+XWAgBcW?b!QZ$*dBkui~lvMlg>YBsj8|P zzFZG{L+QN&zct?sLO~$5aBRBdpY@o=-m7)rwl&r~pIpY#3(9 z7~ORQ0v`eO0xw|H%D;`;n&meZ%}W6+&LBb36=re41rb;am~xZx+={R{D~dU7kC)`j z2b>DdUdK^+&t)Yh5`W~~w7qJl3RunvJn{^5Gfg?mA5|mYw;2p^T0)N7P!9}9J32a2 zn3${3h?D^gP5)ya%z|t}w7HT13sQqm3`Qpd|AKMAvDSszK43$pzib~e#4P4iVcDqs zo|q$>l>xMMX*V(9m_O}s`$X)f|K52JOO#lMYv})gJ8ezINxn*lVD@gJetAe zx;Ts_BM9&&7SCbc1PW&=`3sc7XJM&NV~xFj*7s2r@L(RgGD7J=oGY&e1u9?$m_CNs zEoOh{C2GNDbVRXq=uB>FySbzc^^9XwbMu+FJMsFa*mZ|F*eqS|Su;|*o-SK=MjB?r z0Ax%61ckv@z;Asjguzz;{tgUT+rgS)upHJ5gXOZ%KU(rH!#42pwrS?tSImik4FF(f97K?f!a{jTANGM5xz^oT` zBqh%X-*Pko<;BnTJ-x!8N6uFOH@g`rU|ENt)_wPyy!4E9T&(6rCk7}rTH-DlmZt&S z>v7flw%%ITbz#>%u=5v}qh3aBHsbfvRo}+5tE&$@mk1m^e7{0De@ivF-r*y(iPVCS1E&)kcYQ)y8qQ zJWBV}nfazFb#y))3dMfq2e~Egr(@|;XaQYa5u^lHCOh9KAvN;>NdecX^TUw>CQs!l z5-vK)$UKHlC+3x+#mQz6YpgXBSQp%O${LPDf^q-C9R|Aq>L|?1d)*a9k@|yyg|Vop zsGzB7u0+kk!eRw^28M!4qZq-IDvdrC0l|-mO0}XBSoP}a0iXl}PXM!0>9K3LB$w)I z`l;zHXM93}5GZF|zOinM!auv9pvU|83H6cvBIcPxOh;J3g|y~rzRp=I>sXG>n|03F zv_NWq-W|a{k)sO-0+Ng9y1}p@J6qelOo;>Kb$hE(X%$4rE+%w zrBzQoggm~Ez|5oHnW|!Cr$0UTp$BsBn%~U>sB*DW8Kmeu^~^o!3+S`@i>3FoWgc|; z15qp`pymugeb;DR&y8TCO6wS-^BB5m-wz@IF=%@Sps5-2o+a=Qb=5P~x}ynPP(eX~ zhGBUw>vrFSC1`;XqL~9^?9UL&IGcvbKoVB6Y2PHTOZkI!0*E@hl$%F(MeAk6+oT~bETrs zx(?p*nzacQ6^p)h+pQoj@X2<8bQ`5@aRW`<=9$J9Rh@cbp38ck{k za)hr|P_7NFb9R6C({UZ`>D&2|U{ba*RiRdyVtG%`j4Rv1)M*+5<|CoKpqE489&0@$u@ z)gNR{Sqiq%$K}se`G)gO$If)FW$xYyiG2TbNA@F+6PWY$@a}otXaFJMl{#ijlkV2> zV54+d6My=ZAZQ>V@j;es6dD^dsic6gcJQN>a;4Bx|2L@Cd%u0=8vs=1+OPoJLK=gh ztSgXFmt*uqS z>$@A*oO7nrG#?=%WbC3|C&><03ras*8PNHRcv%sZJ9EM_+GbN_eb!A2x0;+}kZK_OtScx8e)PFtN&w*j{ zd*BcLHv^xu^3S$m98UbGqBfvPcyFD@I>)V!-`Vv@y}W{hamz@l(wV&zcqoUp&~tw-gI`mraue8d6i*ly00^qzm=DJNkhW;|ya0GNW7B>a81IIe z`t4AB9kV|DwB2sP>&UsgcEGQ-Ua#Z@)@o$)2kVZQ6mPv}ntq!%XPmm`PvmE?KTn#; zkJdH#O;y^&)tmAoJt|WUQ~gcuvYW%mTf;8cb6~4Kw@wDO;I1WCGH%f_tgTStlv)aj zNp_rGj&ODDg6tM3b0+qX&X4mC7|^$FBZi676Q(Zs1!;I|SvCa`UPvS0`YhnH#h+6z z#B17+_qD&xt*nG4ZZO0e-q!<%07XmHVgP6|Cm>sjc+3HC{C9%c0sAryy?QIo4rTYf z*N9=JVT2-wD(}*r7&HUSTsT(!fqsdQj6=eXD3Q`&`5=s~`j6c*VG5SL^}2i~3XN=L zOBWbDzAEo?h6>iOMuepjVt{b*p_5YU*es^m3|>sTQZ3A8+}yDMGJ>LIIx@iUN>g%# zx^xR^4(4|zfDh%sO{fm;2m_dWg2SWj5tB&L2`9#-+l}3w;&d_yJ- zyzT`0d$dRlgxv_g+Q%&<_$So=Wiip=^CLO9LV-B&D+$JR`h|Rr0SwueYw_5b=4neU z!J%+lNoz`kfdBGauXU9i$Z@W9SJ(2)1tK!#x}{kt=i@AZT-0aU2PT~}!XFQVgXAX9 zQDBpXp=s)ch_B2Ru@JzmzEbX2SdxAhG`)EOpsQp9wIP`X#QBu=hlnQln<|iOVO|{Jj)g2>K<@mflbUU9 z)YzXgEDdl&*)3@2#?rE>T1oH~X#V%p?Qk}`TngjOnz(aw(p>99X(Bj$z>+U|C)sL8 zYg%GhK;BM*UkqD5;j^PvC)V~@{Fj;R8Ud3ANdWTIYNr&~W* zVU>YumsO>6D{pV_mF=mSnVomu55{Jkq z!A~g8eS6u9KhFxQ0ajY{6JG(!iak4$usEu~nd8FzUY-PJQ$&;R}8d~uUuNfgq2=GLPEmFxi#6(0Pbpe!cTL) zgGB#(QlcE>h2WEj-MX8Q?lgz_E}4&Vm2GEJI8!`MF-#W zJqmaP7I8qRqTGmx+ioinxk9+d!+@U%Q;YMni3t8h(ZGnVUv&FJL=y4Q75pbCzvvWv z5-t=D)Y2QXHjnk_%eAq$UIyPCGldea*i)6g*`Y5RH&f?BjyJ2*ov=U(l3lE0NSLc) zXo40xQ?GyJk$?|@1AHG7BO)3k?cmZ9Ak*R3-}hdxWQ=<(c4>!x28-f3P8)G**w=S| z%xhR^2z%=_T9oCF22!B@w+;m`)NmK>prC&AOh-$YCv8gRR(FXezgM2;FU`3qPWqi=#qT}{>i`G+JHPaAy1{3xt%(dHKX0OU6vf-WhGJl3-mwZ*nM;hL6C8RN ze|JJ{`T&OtpY%Mt0Tn(!LTOq9s8Iak2B{Yi@K#^K!6y-j&wmG)vjx8=Rnd;-i+MtL z`_K`WDd(%l!d_#V@FW{oqriy7{&yoX;M#St*>|>^iZNSI>)1Z2)11wVa9vU9sJCmQ z*ZEP`EK-H5K2Lax-Sh$S^xwH5nCsS5L&0?tT09;As4oN#Z0&{;6A0f}Zo1QhP~tR+ zYnOM=GR6+wefkUwQgUS(kb$m<{&!uWk3O&p@dz-PO00IljdoqeAx!j3dzm)58cpzm zQSb}eu8EWB5&{smG?RUQSGm$d{(Ip>AM@G2ArXtOq(`A1i`Bk*wvAq`mzVn*erIE2 zCynwz5ZeVMo~fooNShYkvR2BZ0{GX;DfKrw6Z{V@Aw!xNz>$kbh72$qX*d}lS#l9l zvGVL%rXeawd3x!1F7Lv(%u;1!`y;YS2PQ}%l=LW@2sCLVk0uzJiu{We!~|+_Oo+c~ zasLKTJu2edaW;)P`ZAKfYiF3R(?+^J&c92#ju=8Q|t3 zK~lu|`57v(1L1u%NNE`KJmhGCJPs_iu$-cBXru-}^rv>qnNI)v{5j;8ZAKo$ zlNZU9VVHo+xD>|u`4jdv6EEnX;2}~PYJApXGzhiy+B(g`_n*kC@i2fP)3?(AP-40{ zJGC9a8T`i6x2ghd8k!uM+a8~4TJV!M@k%2P%=${*FVi3j9Wc0464%)1S+76@9Rw4! z6{h*O=0Cu75_kFrY*fD-!?_z8WQNp|^;a(yTAB9@IgHE=6TZf7g2S-Uhy8SS z`VFARBEPWX+q3^%GQDMeupkv&%>W!!hOx0D$(BNA?($e*Mpn|Cg3$w#6n^evn1{${ zf?^Iuz(iWaz-%*+b$0M^diVtCdSZ7QxFg1!F zB(kVV^i5TtzK$QRnlO=V^d+&JOa*A9lvqqCDm=;eo-t`yk&#*I-Vb1=?-Nc3-vJ1I zvqHN!dk53m%AOokfO0#r30bwHpNi?g&q2uHAnXx5%1gbouppNPHP5=B>197n*Y@~n zo7Q~H%_+NP`TUG4)iR&qt5fm~-+OP0+ao;*ey{420f(t=WCqFCZ^Yb%(5-I&Q^2aKy2BS`l0n*YXdp(l`I8f>wR`kF@ zTa|v|YnmD?eEn_^10uB#WMpJBrr+!l_KLpb4b?RJKS+0K)8h`P;Q^+H?Lh6xH%nm`hl7Hni;Qalw{ zz{rkmYja^ETkU#c%DVeieKqS2hSUfcj-dR!wTog8zVq7x0erm9gJr;D)u**ON8fcg~>6gN2c1L5C^jCvaw*wR`DdWOP_! zQzSq}gUIh6!JI5C*Y>z*`})n}kl0a~{T)eK<{ew7-m~1iW6d(@cj;c1eIdswa*Mew zByHxK&$QlfMvyMM3+GoVU_cb-Z{cq*=B799uBq}I{g#o0H$M~1>g9@f?0jw(y%q)P zeW)ibgZu8yYO9sorHx^f{yS12+<(!pE4@jVQ-yyKuZZwgU8?j+Ft&)u@QNR>%>eL? zE9^{Nx^j!vd(EL%&O>+ItS`Tg(R$I&35_z{Ek?ki%TMC8bKVEcculzYJ#3wm5tuQA zP6V7692Cn9TPrw^lXG<>8OeZhW=oV1CUkwbIEPR9i!taSKO4=0 zkH2W#1AX2%FDTcOQAeS0z5E(teVu1rRMx(t@4N*$_x_*AAOj=RBU%6P!;j7yuZ61{U?7*Wa7Q*DcY~1mL?G>4rp^37;|^@^z^h5Og75QCrpjuEWorX& zh0oWsq%3sw>tj*ae3H)2^&^^czb21Uod;T9e~2;h!2DHRoHhi{G_2R;BYHdCOrfk~zK~Y#CK`O;y`G+4r6N=g!1pXAdzrgNWN zLB9SJD#jPxXWFgfWMqc*uY8J)G7gQpahS5!2nO>^y4`P$9$iRx?V|GqFSiAwQbP98 zAXKvfoc`uBZ%?YqEcO?#0@X!<5SBRH%NUr>QFnmhvoe7*B!xv!+kd_z}tIJ?C>QW3Dg7#L$NVo`xU9Y#i;NzHD92|Y%|pb{ZCv@~#<8<%N7X_itVzL3U<8p5&f3RA!k3}6iG z?bI8IQx}&;4oeA5@;19Ra;!}axPLPb33<6Yb?Wxs_2w_q*M%{|chADG{L{xgmCNAA zt0x3vzd$nSC14&?L& z5taykQUu9)wF3}bMfh$%FS9FBs9ugl-Ev+$x{o(0JF%m{oLl6vnzs&WUs5`!_WExQ znBa6{IEU`G9Y@?n+{tUi;KyRq5K8CM7WV~ge*ihUeWpNG0;+-AlK5Lu04lPTO>ct@ zgs?}L^%7F1G}={LE0m7BK5f}3Q)%WdCj}Ps9S)eUi+z#6Xp#0_3&F2-Z}uxG2`%fw zLuU2s!}I3G4L|xHnJB_#3VOng*3&ftL#NtJ`)Wt2QMZ@trrCO+R5kxJbq{4_)FE1{ zxtiEmc()uL_Z}(nFT9_V2;8s$$k}rvij*c)pOpq%q7a3Y^cvEF0o!=7Bv@SXklQ0A z21Aqik3NQ@GHiW^i2Yl{E0Mc(HwoQZu zAp@^NJ1x_icu*>XqbPLu7Qm$E11n=u{wd+-6(0xoNP79#?zSNM3L*z#A0#cAyMT%W zF#-o>5{L5Zpo%w?2vABR7#HO65CFSo6Z$aHIMQC(wccr3B-7~V=LNveW87cmkAxf_ zguQfG+*2lh@}dJR|7pG_B+Q6uAP)#Lq%&N)rH#|9kenj|)L zNYbbzp70IM0b&rwL=F6sD`_S^74{C;cyqt$3Woq3+RK-}^e1@Y^D016?d2kF!=tfo zTw96IL2|Z9DdkqZ5;2^M(}axOTQ7zEdElR#E2!#>(4 z1JZmk0EqyT&P<`92{uUc9kKr=&{hCm?H9=hD-ysccit%&-A2!`GG<*PEC#-@urYty zb?VQ!ddsJTC5N) zz1MrW!u0b+>}BDmZ}bajH+ne%l-JaW63A-#;$;9;(82-6kDlzm*hm;j3FAy*)IKbg z9kynLahH&+8Eb)E`}=Q|7J6G%7l>h(P(O!?`P0c?1ta`x7XQLs$;DQw04h}ef+$Dg!!Uu1$Gc>DPV3`-@C);R)YFT;9k zrDxx8zIw-#%Vk3++UfVsg7?Ai(u)|ZAOz@k|Fw`K)_}t5pNvMB1M|Y!e3=Pk*S7D% zWO=_)?$Dd3BHGn;oIUs3BI#wDS`g-Gw;jyMq5pm3csOskz1r3c*FHxi_5+}9 z)M8{c+OvrRmHxhn35khXWu~YaI94=Qp+(|Zb3ZUex0_Zc9Y?C^?$f7yJXEn3A%Li^ zflr9hqZ`5n2p!0HELEV*zCQ2|5tGz9gfakdy1O_yQeh8#+y^GDd?7`68rf;--^fE= zTS4JD<2)OExGwBCc=IcL@c>Cv^cfq{W-nX(K` z1*a3x(jj^+4GbR0tpeBw#&QaSj|`1w>KnmE6nwnALoH3RI(25ur$#qT`6TvK+_AP% z*AinR_0j8>P^DL#lXL?JrS+Z2dq4%RVx7Q{ewnCqT`Q<>Ps?FhD{ldt8Yo=)? zP~{A+9_Z@&(Et4=KdglD~fnvuLiMz|-G_g$c14jr>MV=?l2TtAjC zRTD1%u#7j%cSyC+H%Ob~{8kIVL~0)w2)_f;Nb1$+A#z4|bERvI09uT)I8F`sw6YP} zJum6i;iA1MyCk$LwMPMIz?=&##v-LvA3LG^*T{nee5yow`eCF8+&8fj8K|A6OCGY~ z{8B%$U_Yoa*`0O{%;@)ioG<~!$}&$z-{Xg|**d`>*OR z0$oy>w$==Jd35QGmvw3Z-ANgN}CR0!-Qr!p>%o~erUvm0+P!h<<}3PO&wus z1jj_~1+1j!;v*$AF1!*e7H*7wlB6{+7=CKX zW&ZgX&|l;}M_-kM(Y{QAJQ(Gq06gV(LS>X}LFVE9C7ZBP;_?-TA*`RIt1exZS^i@#yy1M@0;oOy z&y~D08~BN}??8niymK_12#K>!lTT8EIeKp2QB{~xY!UT^LX2;b+Ez+b^?o1W>^8hg?=-A z-V#{lW6Ql%jDKbk8z}}b;L4bhMgT_HN@GpZ4FYtFk^IdJs}s5DTzF+2$Y_vCa?&<4 zrY2lzF`f58oR1ZNqx~PIN($di#c)uUJ0QX%*eT&6u5mFTQ4yH+9J0^=>kMl}h*80~ zNAdf*G3F@(P_xtpTpjr5w}cCj!OvD5ef{JD|HQ_B2tMD-I`rZ?)gB)Av%@eZ6@7&( z4BmggKwiC?!4%BEcojAcy;30*g@UJefX#^%qGIZZ5*6pkeDMx=;BP4{EJcDzX(KTI z4H>IT_@9NLJ^xu4`g}1-^LZi?8ieh+v_c|``$qU)Neerxxxwh0Lze&gfDeWd5ScZRVL}YD&9^+@r^vzug>+);2ZG$d5#_)69W`BgldMF90<_>hpkI>{qJhGow=s zW03z#5rrma*kKijmwLI8YJ)~oYa)364|`u3Rb{lbD+q#YgGQtTq)WO(q?8U3*a!;J zY(lz2Q3L@IknRTQO?M;R-Q5UChj+c`Irn^dzwiHb;9v~IcdvKFTysA2na{M0$8BFA zz?UM8{Kgam81;aU{!@;XCY$XciKHIkIP4Bl5UmiwfU|$0V$EGpB2Yx-sMs@-~CzE!v zMJ#-Ik8mutK8=5bEXEa~qwEXi@^u^g02Eisdf~6Afi9!-yN>M|B@$tu99<_rL}^tV zf8C#2{Dc=fT`VdbXa2R|cf)>O@Ag!V?Y5JVLCkDOjO zjwh2y{)L^%MBJcm1W`;64^E!`4+^>_1wKxswM8b8qA!C2zxzsgpFU#G)e1Oh>)-F{3i43 ziCfo{`z)$7%O*SU2Z>dN?hzTW5Wiq(WN0)^FQcs!;Wx`}lbEhat-uDOEvi091re`N zE&r1!>)yHYe{on-QO+Y$LF5dN+o|n%u~|6h@-~-fkxWc9yZ(oLlvd|bRy;a|TL!lh zx^zEpR5qe=sUuvF;>JNbs4}*D&K;V^ zRP)I7*vjaB!gTvdMf(>wuNl(M_qnsOvl8s5rYbYa?S*sTHBH3!DiXkBn z(4ib38V#luL@Xs|9FVN2z)y^yt|Q|?>0t&9>#vQGo=CIe&S@WU#&VFM@A=Gb*ctH! z^P%4w59;<-=}hX(<+6kAg8WFYvNVkt7uRM2C)ZEO>NcYP+wy9S(R}@=C=;n~xm`Fa4J&K??DPtnRnU^}6_fWu`$ooMQlJNzkdu5pO#Y$;ef z{^^Qjao#Eos@*e@NUrrEySNj30F9g9130=wP0c23@_K8%fa!#LzyyQkN9yjF)7=(J zY*W)=)fsa3`xT4A>*a|TcY_z5OUb?{3f3Oq9=Gn~o4?4NoP`~(%4)x&6t$Wab0NaY zT-;uTF@HYdM=2fGbej9fJM|b4{3~`^F^FC|q6_bBV5}y*i>WWughT^oULGVCasd7! zpj^NfvA-I8hqcuCT;t5aB;N35w=#JpE`a18ZZ7i^BE!r}`oe=_X)~p68h6MmVB96% zlQd;*K?&#qPB;M)pcLsksIcr)n@4Sd0&%V)0Qa3?*^Lzv*miU|3``wOic6DPN3a#r z#X0)jAQSqcz<=;YQSdVR*n7VgLSN@w$6GO9K%!P}Oo z>JEKCu0J*pB3XoukxJ(R>GRKtLWC9ujYVOTG;5F{YiMPIJfS~THUIz}=`W0Cb)a3Z zKB6mSVv(t}-*T>4`c}Edz9KugXn2MR?@ZRSEp5c1rWhgKZSDqzgy8d{Ukm+(+OZ89* z<6LUJ-93_)HPN9E$8WRTN9HgsA?*BXwe4^YgMy!ePw44gVOL$_ck`690zW_0K*W7k zTjWFN+toXWk!k3YPb`Be_AUL@WkjVzK2b8mKvdkb@-2={y!GmpYpsE7*qJ0Nc) zT5+K>Zz76?b%DW}o)>m{Zj;xWohP3z(*43dHTAl2Fg2>|vm3nu-Gh`EC*S)MTqZzh z^(k`#N-?S$rI|873AV6^xY#bSEjZpKpVtee(U8L>AwVlIy5CF$Qhd5DgS_-2hg|p7 z=0qi$kH%wdRUmVDTnDkBp=w!B7+WD%H}-f)hf(pc(rJM3zK*qTSRO0o?B}3R@Enzgrh^z90b}h>3#mmO_)D|Gdo%m0mQGg=g8(^u7lU%b41V0$ro(p@drq zm~EoVSm?g^^S`x^wXygFODUBZ|3GVyv)Di@>j=Wa{!m=t<%YtxmeE(p=7y2fG?8~F z(N~OWW*H`pm7GAHwA*{%7|Ge|6V+=1{N0>67^T=Rh6V_jJn`(Y#{#6?$jJc!zZ*s5 zpxqo?_3DtKFSU*K8qXold)~eoWa6R2!9aS9W~5CPDyqlr+;@3}5!viDPiI-;+yG)9Zpz}~yT z`?tLMUn`av-W1?wki?FpFi&d5j->q{xnj$R#01BT7f>K)t01Txqfp7=`>}Wiw;cWr z0DSk9wA{oGGSJwlx`N-Taq^`5jR5VhHf{q{-uh zdzcZm*0)D4jl8h)% zo`_M_WHDaI>BUSOe~MgjRe@3#t`C;17>#t>=Ef^D=U^4UKS4lL=ILsyoImCB|BlPE zAl6k4NFQ7v)QAN!Y>Tc;sBVT*r5YY!g_NnWz9u6+6W1WpA!b=6Y>UqIqfw2rH5bP{Vn^Si8ib?)=%Rys;N}61+bNln?NP4|cm6>*g0KL~gT7P72or!?K z-QT!kSFBm979Sp<4~Yx>xKs0r2njupv&`X7jVE|tZ;q>CeWPoNl>SBu%LmUi%eJP& zw7e^4&js=0g}J%ic9a1fHZGD27us@AoacP%Ep^$=<-<0b`rp0jSKJIZ7?gAw_fBUU z@$XdxVBimJ0)k9%z0A+>d*yUM&7$(QM#y&cn zgS}Sj)cF9taYwiC1~F#YcjLDrC@A5a4d2lrcfG@pJ{|8lMV0g!w?#S*L8EF%0r1k- z(SHR&JNMUE=3@SsF-WMl-l$1Lwkpaw>vXsw(jNfYi5y*|-kSB-4WO{kKVYKL4YVyFXPHRB}77CjP19&e1CNJ3ZWz zO-)_4@wFRfW2}K7O-AOy`0ph}>yabKki8i4(&8Z|dh(!w-o(a9*_ADev<4^6j1i8Y zyZEI3Ey}uSTe_m*<9AkFMTvA*fk$?mlrO(XiC8zi^4R;_o0Mpj=#Jp#qo;nGipSnl zaKPa8;0hw=8V$3UivWBWF~;iiY*U`0+JI@+S!@5P*)k~bRp0>G*y>lhfV*`a6uaxGvyth~4r;Dx>D`K9Ww&#= zGuR(HLv|mr>w`lrCGn(=|8}7uP9F6%2M=Hv#&YVWoClb3Cw<#xcP^+XS4GZTxJ^qD1frs`ms5VaN3xkOMo(jv_9|sIP?#p^dMbHgxibMGv^sM z32#5auKB_#K7)F7JlULgLa?o-wHsu7JYnyW9Ec$wpz6diO@M;Ftf9+>O2?@FWbF6P zPC^nJq2}%ip>*^WVb~o8Whle?Fu|mG)Eqg$8n#dAN@?`I1|1~pfZh8w4OmB260w?r=c&pVR)+rv~-Q%r;AaTibA#{&ILf4sVy|j%~|E>;fil2RQkIL$hMgrF9Kr zfd-T`EEUVm!n8nj`qd?Kxb@%<5A|=s41{e^IG+0r0=}vT)%1aMoQxfHU@?crSbB?8 zI&|i*oX;_13?1!(Yc&Zspxgs1 zQ0eZ2S@Gsbl?dp`_{`m(D{gN!kQ@3qX!lcAb{%v9?fS|F8#&JW^RB4L}f}_NWyNx^qb%8~(VX+lET} z{tc8D@CA4gDb}C59ViESrAClk-2>wT?()&|%pwbuQ$pR6x-~l&cHp%;5ot#sTn7cr zYg~9S(eAv6&T>VzD$AEuB&<ub>XbpZETI{Q^Oq9aYraF=C(uos-HRhVPN^^N+V7)LI@X*96Fe>FOd!)X)IS zi+%xjYF3gahU*^#RQ`_x6o+10h(>1Id;b>mzQotI3`Sqjtw zdJUkI`L!wsG`c*XSU{-`KC{~77_O4wl`RiDP| z)P@Bhhy0V1pBy(h`uG+m$N%t2mdio06G#dJGD)1O9H1)`WG9%@LUKX>s)K~RFY%k= z+O^-mTiqI0{~_JE6?-#+pt<~LhV#~=TW5jhs0)OTLo!0l_R1x@&_AOL`ufQxQQa>^ z8_Av~DP@*wCOMLL6lIpAN5$xuRF?0HMej|2#@LvD>+E~yux_>RVC2Ax?a^AQsX~0D zhw$B)?eU84ycyLU4}iYtf561AS5j7f8YFVc- zi8_TwT#8Ok{E$)>(4u<^E`$zac$K#Hc*ENh-oDYPT$+ zgqGOZRl>u+6wO(!51OcHR|U{2RPoTT$e@A9&2+lGjRK&YygKf0xdl;Pyc~Koul_vP zyZ-nsD{*}H!@{S@-mgPB&-IcT1D`Sw(V{8{-V#14ECjaEW(EJ@^F!FZfZO71Zp5({ z;t$bDgQ8eo7JkmHwy0u>bvrv802yIfMPcqAavqoE9)8kk!a&*a_1^_2odMfUive5;m_DjS&E2bIC6L(j=?=PiVCV71ix8tb9)fzBUpQspSpUHLrDlB;l21Nzi z(z10EQ9N5Bjcj;d*b5!gG+q0(pMVW6k$%7ORhh#EgEMqmg->ACc}m0t9&f zS-XeG$-;Zj{T|krH`kCwT6Mdmr57$uFXW>tD)nU>g-YeKRoL5{EY8c+^gRcLFv(Vf9cBF2;8Zz*3{bSGyD5|`LFl3%B=Nl zx0)F^{I^y%@~S1dxr=QtM}NY=t!0kG^eL1KWVvu-zCC**9UF$qN=rzaZ0;7T`- zjwRHC7)u{MerHsmD)q_9V&2-M4IQf_j81ZWGb4nR;8blKHOAgJw1l9J(7b^DQ>X~4 zo=6h4)gG(ILqZxkH>dk#0>;+>zd*HbnPsqAnz!Qc#S4}6cgY9?gC*O6Cc(AHA3^PU zCGj@Po#{XrU;tF&TSRZu;i7imW$|zV6)LFR6ufNZsRP}#AfZv_duL-TY|kb{U2z}P zhi1P`LukS&ZbA|-j78~Z1Chc6EcKPso7!vXWS`^V3}b9!ot4U1acM=mzMGhn(E~Gp zC>N`c9&3+FK+2*qosjQpX!&0Hgf&FV1R zGE~`16qH6L%fglHK`3LQeA4|uaQ!>Bm7e5U2{)7R%rJ`{Q^Mskh1DL6WQO^nemygL zPmY8XxBDTP9KT*%^g23L0FjkQ|Cq9%F1Lms5}j}B)p>uIH%kgNzR=54Oy3jVz`CyA z$DF5UGrCE1@y-!ir53So(`Q_Eo;Y6|5HC}GP+`~IUXI>LGKzaDidw4I%viA;#iX(> z8>mrX%K*p_G}5g-K)&xR}#t8Xtkqhye%FCK4LYlJ#1j<0?(wEHP;k!}k9J?j=_8ex;El-b` zYUcy4(i^6w*%8j)kKd1PPX+h?s^D=#7XM5~uH-MM^hYsLksuO*_~&wsr+hR0s@O4_2`;>tO z@bnwOH*MJaobA7#tE)#v3pi|zbAMH+48um06Pz`-XMG589J}K<)9Ow(p&ZW*WANy> z53i~W{H-`{)A>VfhPEg$7U9La$3jn^k1z_QN@dB9*a4AZXq4du;TYS`v__#=+I5tY zgq=GE$t<@(fVPBLR-o_kR5e{>ML_GBSKGdX!^(&P6N zF1;pVHH~|hY_>w;c1@%LkM9I6;FubRG7z-28V0+(*V3HMXOZbbk&SjBbdzoAKN)lz zLY1u&Qm)3y`_8*cM;?t%6J8UuN%V7+S(vGo|4prD>zexyr;a?Zu@^BX-`}iF`$$nC zTBB5dXFymF{sqP+6dowBj_G_jMwGEb>1MpBDO%8BdimV7&*z061z**(j$x4oiM^nZ z>pH0fo_fXD=2a{*VIaL;uOSq^F5>j@Z5C6AN>gT9fTJqgBAb+v^j03tnoXlgx)DAt zhB+lT!8@u%F@K_>8nbu0ZvRz*{ek1FZHnsCnvSp^U@=LYO_cbvw9M3jg+i= zmkB6HIoRNh%A5IW{aS<_mFPp9i{{Bf6K0HH`h(1V6C+o&?n&PlLc?s95z!_mtkEMX z7vIBUdFRR41ND52wEQHN&=lY^v0pAwrDg{?zg>MRTDos{2;sQqN_e@ax2Ie?4s0i_ zXNr}U_}Qx%*lMY{t(yW~le;rK7pfrELiqXHvE%9C~%*E~7QJ%3~qD4AU8}BQGYxWCVQmt1MdrXE$G1k!FM0 zz;sgNPL1Nr=BCN%YL|Yjf*q2)SC#VSWK>51MfjMGs3=6(F(38zT5qDPpnU6L26H1( z7Z%QgSvsB);OHb;T0ZdOc-)wA&cwW*<9@m|EGxin&2~KCg|ZDAT&b}ylQ6af z6s1Z`(E$E0xFJiBv`etI^vkX z?s@rBE89On0DP5LMOIQEwq!{hOW_p^>p_KD;sD`;S?xMOn#Xng!8a+`VtL3DKNv-7 zzuU|xzFEdm0C)*-w|=->CsB7Tn+_kF&O0MKy~ev(r71++vT#q&A}>OYXJEM6GGFdA z+X)~(%H9b4z=9^Vl9`|Y{(0XP_8rjsz6+_vJxvGx$5+!QFAI~^S!YRk=4s+qO4O>X z+%G3ebZL1uCHw-)+jo8T`@w+BcYXXq!P^pHCF{SARhZaKLcx0lL#cOSPJ_lPH2uEG z9NX#>*C4qp^we)mBea612B?8EF?G}y%(Gv;f#3?qa@BH~7^Ts1vxzPr8+duvo4MYq zCID!|dNmH-+hjtURSn;q62AuNV+Xx~)_ErJgISfx48LFnASp%A*W&y}nhZE4)PVAH z-p0jG4^AlbiyVtTSD;Zg6d|)8rw^1=>O}C3ir;fy>sUvEsPosk>RUiDdZ0{584HNO z94Whg;5pCgcIVNiKOfD)Ru;ncg-Vaozs@1W<_7z2!|=Z@mp6AzkODUg%qQOeem?I! zoOKk$Cbuh^@O7AD<`DVaTT;IaTWOVhutz!hMiLt9Lmaz!==&hM{P(9oTdOM2Pk+=K z%dgvR6^xnBXgiPr+bYmloK6NzDA9d`XXu5=hcvUD0glbS6JCCyr=NSK?WO=b&Ievg z$*@!<1r2upS*ME#G^5jzCqFU=OwLR`hF9gIvxR!E^g%yH*@mHMUo$56haS3u6A@$% z9{Lb^sE<|lX#87r{Vk5&(`;T+-uZx=KvH43DfF#7bqWhes?das)Lu>IckjjCHRL;F zYtAk6`T&JvF6a~x&0YesmgjPTuPtC3lY5=Cf%SJlq2rmKqI*wLYbC;eJn73@UN&O` z4a5CMDp0Tj8$dYw0l%%l)`Kl;rt5I4WIPf1bnsc|WujoH2~NyZ&~iz?5-I!SEN2L;d#4TD@OQ(>C_^%R%Y-*t#EN0{mjgMl8)Cxz{WzgdVKhz~2D>U< zhei9AIVEYWZR*vo5;)~q!?xm4!k4@{0GP(&+=a;sI9AW~+(}@i>sjw7=X+h}sz!Ys zcFiX)Ci@XhW;va|BcVn4z1bi31-xx|pjy6dtb(A1e~fHBVa!;D%-`9?{tvSo2rG+P zzcHt-`Ax<5KiY}4Jz#y1URn9(~Tb@OtnUI@E3@> ziQ*!o(kP{%iEu8v+F;PFB!C!V`M{DvYnr17&9J7Xi@-b2{r3z?v}>RW0CmoWrHSm| zgMqF}UP<|2w$WWPowya+_tF0hD4FY|sxk>q{DUEj7Ze>nX*I4xkKU6!V1i*8=Js^d zTbSs1fdE+2?q$}kX+#k!8|BPoxeX*bDiuf?iA~bg2z1E%fFed`3kZjuLPJ9Ex;~Jb z4-MzQO-6EGashX#y{9M07G7?ht)9a%;kXzqEme_Iko8tTsW8Bg7m)xYn(>0Ftb9K? zS>}Xyrt;{znNM7<@5ISrDDa1`S3bNcGBIgNoyr3I$B6M#%e<1ew~Gr{?tOSHO=cJR zVSGsa$HfTL&H*I&2TLt=i^I$^ZkHsWtk*NXhMkVS?V4n>UG4SyaGNCU94Jcj$Uyq) z#l=sVc5(M0_Hx-1{Z^w#(%=^C87%B4OMt`L5%ruN3qDGrbL;2ONw;p3~n!~Tg! zgUMGNvrr6_Nrm0b9UwJtB~49Y(B5H^m0!hrav1P#A`thvP1P2qXC0AWq{TF`4c-j^ z>DA&NhrZN-zJqQnIbv_ykCgA!qHet6vzonz7*mmRV{%aL35!=|imsks7j#H3#fa-f-t3k)8`xs#3K%TKGsdgpOpR)#PH3X>^zM?k zJ6SZgrluLtgPpuwmsyJvag-8gq8A{rB21$r-OO(O7+rEjz;+y!JW;^LLC(w5MgmXZ zu>b%huhDm1nX~Ky!ew+USd&zhF7K99)v(mDXb7}><6lDT>d&D~*yK&iY;!lffw-Kws$kBRrx@K_s5C?L2 zW`k=;yu9T3dnF0pNj79xM%ma5Vl3^@GO zmT_z(-*Y*5c)sDwW!Xeh`>O8xm7zwdz<)Pj@0I)j+zslzLb3$;=&(CWbkK8I>$bc- zVb{9!aiFuS?MZxayCFb&>g&4>*h)Z^E2Drr+EN@}jH7|k;2-JijB!JM>&bZT){v^mq- za=R}~G|hY`uMFWq>5c#4BcIYUb&$M=@5LcN@V?girZYZREn$vJ-SGp%v}0WKCV>0R zm3;yS$CN)>Xse+26aHk(dHBdr;Wf-pY+Q}n4ec@?&m+R$&Hua0?a^ybLc@Tze%ME# zZw6orL7C^~HD-ZjpLiJ_V(1rWB>WtcI|CN*R&vZ75q!BEvhU$?DqZ|BCHDjEi`(2$ z6ea={%J`sA{Nuu^nwC-RzwzJ3luI2^UeJ*T?G?F(?N3g}KWj<*aFj<&+pNpCQ;3rX%{t-Rc7<7FSfX5}4CE;@7>i4Tq3Vfn zJ)6L?1#pvBk>oZxaK5|gF07vLX74zg* z-g$y1;8fs|>~`D7tpIm;23yfQb>IWICpsKx`4BU0LPQ|ki77F96UW&OMmWvMp-T(` zo_k*Vc1-sk$CK=~2LgevWcJg}w4&t0u~6h# zDodS7x+DSpb#qE=9z~y&AkTM$S)17NKZ6o6e#?j5`5&{7U@j8A_Wo#CYm@b97T@efMlwe?; z_JNtQoapzzEvU?_RQBu#Yz3P5B1tj_2|Wfy0J-AsE4+Fp;=@yaY$zSgGPTX36>{^W7Q*iq`2FL#E5k}w1PJPk#w%@q}^Zh zva0lSbgeu=@gSTOMV>E(k=d!k%2X)F9fQ5W^t1T~IKD_1|6!(Bpep?_ZsnVnwf<+w zHrCj{YQc4LTq}O=pIu3C^{42meuV13`?#DrqTS*ZkE?vo|*xAFVV$utEhr+!15f> zPB1>Q*ws)(v#5C?HUBj+mlE+B z0EMAINGvNijlRlaN?<hg*o99T( zhhb7r@Aa*Z^YbjrD~T@!RZ6-2GY6KJmYw!I1Z(%7j9*T1W+?cU$@yeuF;epR`^NJ8 zE+#v8fbbHmrT2IGVGY{EEYE?$I0LmeqNyyl+mo8V3WTXA6viHndOu9O1gRa!r31bp zUMeB=Y@DNB;W3jvX23mbDJqP8K0tg(N$0o9>q%H&H@z=-7!xWGLcPA;M2OA$8VCia z)sYB0BV}`qA6A3%inM{{VQu%5rDy7M>*t2&8Se$n$eWF4bf%U&5+|6+L!w~#SW#urwC9@y26!`e~z5!)|64m8M z*ORw4XcizT7i^i0^x2-OjP_ubtgeS(2Fk zenPOvg*Q+;jAB^0s(-Ev3k{8U(oGBwm2^rKjqd=soYL?RXFNmGj-sVDxk(yxk(nmM zfme`BPXNdQflu=p7;aUEj->NFV411-F}iOjPf-shCFxb{cb^#-YJ7@1zR#lBgEX=1 z2Qo!#-{2NsloKJ?6)WECHOPP2q6JEl)Jl5IqPPbYO6JilvvLN4og4jY#0%1 zTIEi8>{}a~g(26PRoQ~NIVb1MT~UT;0J9RcW}k~)@^Mnl9wb+>}yd$R00Uf z4Zy{)I6K@*kEN+JmZ^WhGGPe#WaY5>J+4g8Y&>}LPv7V+DzrS{dIDe_O*{W=OC5p* zV@Oh8nE;Ql%!ws+z~pKw^U?ECyq5+{U|lM8=#87z_-HW>>h3nr(sSI<0*UB<)-LA% z&Ha5<^<5Myom-eU!M3u-Z@&uxLk!aXFHAC}O*54N@TM(O51LD$`m#R102$7(Rgye zKcv*sJ?P~#sm3+##@JKC$fN;?yMFjzjWQ8ErXB#*IK1R`vUj3Kc!XH(qE?pm zp+~(bTkpQYfAks6ya&EL{N?)JqCg*UtqkUe)T_kmd!(w8irEYJDDB%A(KZ zABWII%{>)+X9dg0vAmPVEYI1~wdF5XSKfCsoEC~)Nu?|Mt;X%u2ActpzOFTtKQoa;mdKsXyvhq^f>2U&K8Dz~-_r1Z?u{bXF|`^1(Ak4UzvA+S{U^Wqj70 z5shtGJT!cN0MXR3KiMMpvskR&8RZPLu5w{z)Vo?x;HF$xm}q};5UG%!Q<0O8J|vNI zum*h;lbIrDj1u<z-oGxkBOy>{I$6a@xIPeV|0}v?S$$r?Sgr6MLIWzh#*2x{dW|bcGG##IJijr+E!I&^ zi?Ip*b`~AWCLIs)n}$CUhW5=aLJEM zQuF#xpQ9ZApef{k&=gF;E#YWhmB&X4gSjsS%@{z~x^tUDovj!|T`&*@BP2yY8(P=8 zy+{JoZn{SlP~xZ#>&x@>fjhmVBUJh5FN6~DAn-P(Ym47XY?Emj*RVV0DJ1J^I!GU` z-6sPKCt(5gqScr|xUzzR0@r=R5v<*YldpE!#yf#qT(87_OQFy$*Df)idg|$R7cfb- z?24wHa0_DgiM zEO3QjYSTG@On*BT_+%;wM2`HkCxa~kBqpHwhHd{-I{xKP&l*5OMdKNfT~h`etUu^t zs>!bX;^2@{Zp6ZHR)yc;_7ouZrVawE(R3h<#ZI@#SHSYvPm8AZ<>OG+9pQ))k0V>g z4Ng9t7pfHIqxtn!i?j8SX75IJv_TIh@MkbpmPoIa>>wXFd;{bfuTnm@NagR28N)!h z!h`OHW~~KeoAPIsoT<*xN4-_P@3xoe&hk$Id;ie+E&yv+oKXlJT@E)BL3e^FZe4s_ zA~qelN_+0jaQo#*BPrRsp%K_zNzhxy_W6v%%`q0V6?cIuLpHmY)nBy2wgS?Byc-Xk zGVWnmQEvXCZ=lv9lA80AUU~3|fdLHyG!Cq=+mNbu=Dsja)fE9MZV6MB4g+;w*BL}b zXDoMVdwL+aQOJmG{qeXsK#iLzWUG^>wY^zZD{|Q^6#(jjLFcVNm%I9XzzVnmTfyT%jSh)k0zvDdj83HdFry&4#j!EMv*B0Ev6$P!(D z50lUBClLKBfc&9vX2Wa9=cS}22~iovBG7p-GT2%Jqdz~mjUOZ*cYq&A((mzG+cyLHK}7t9wx4+0$V<%H|lMgGz=;e z!D;x%$nJ9teQJIH5==*MSb-H@_Ok%C6iN@c+pT5 zDByeuXtp3z-tnK9a#L&z=IO8EjU@Qf)Z@_t7`eHs4vZqpYU}LtsHQU_T0ycQH6NU? zP>iG{<$>%01g-&eL|K_AeZVB!%;1Uqf3S5Xk3jaOL|9OTa~C|+t>Ez}O**-Df|9nC zclpta^XFPWPN?kfprr4709u&(u=E@d_k0Hrv*7;^7sPd1cl?SXK5k`9-i4ybH@=FM zUyg5}Ls00ZjE{c^8MozP^zG=!c8(LEvbFQTQZr@Q^sY0JWU!tmM{ngoc5L+ z`_%0Y!M+G@PrP0YBn_MRY#11?ZNY`0to&SyOL_Ts<}e7Hyw)hEDsjaM5<`~*o-&5{ zSABK}01WgMr>P?p2WcK=H_-S>YF7JpGeZFYC*9>Lm;jDgPJ;>K-JL%PeW1fZL2|`# zNP9>l4H4zEIU+%F6^g=*OkP}%DIp`^wuTTl^+wJDz(o=3q*GCL_+F%2hpFv(Hb{jY z7SD8)-PE&2=Dxi%evzxb5BL#QIfcj(3;ya__jCjx4A<}|A z8tWJNVtN!Ki>Att8Lqn2*|rSRo>1oqnMlz!E`sw z=xQ`iixXL)(PTquv`r$g0JCw$_fSf8F-tEh@82}Rx7eeC1`#4L=mssJ0@*-Ql&$^X zrx9gqW(e6q?-~?%1jE~}~^*oYmvdmjl6pM(gmQwYor?&!T z%5T1pu8M$`?mUE(Mr^Mfbg;*%ySP*cV>5 zTaE-e|Jqd87(t|s)Nb4uwRG*0zkd;Z{_5B03P0%a99| z*Jc@m>nz#HcE`;@X`%$Npw8iOiF(=!Q(Xhih!S`Z<4p^Vq+Ear34ek_s8W9=gLmeL zksevY*jsaUsSc$f;)hWWo!oZ_c707R5biTzZ-~bu31M=tpQe2a2XK@tBME@T&wLEo zQA-Uv)GIDZDoyr6TQgo(R^czJrYj`R&m*|3=ZhIk9#cgHcyfLQBn#PQZC;#Xn*E17 zGj`5P1kehJv_UI_t9--&g?n}&s{*h$I*{(UB6ARMscv|(q;Mz`fV4o9D<`p+%fE6g z$7QM=_3O&~TL%2ugWU)t30lAJ|C{x@ z=T*7cpS`wAN1g-{k^IwM{~p-;GK3nqQn)of{?lD$H&CUuHRV=g>y0X%vD6Ma@HS<( zDd*`O(9($tveAHzr<1!w7X)!sL@!?J8_*UfycE36rlIct4_iyaBu*ExI&A&`7P;F7#jKGV1YwRB`3Pd6f ze6RLo&o{I8SMm83cIey!PD;cJ@Wa6|wl{!rs;S98#kH&8Y`B-4`wNH(DgusPT6SK| znaSs6q(&Np*HJvrbj0$nmQ?Q_dR!xhH%DWo4t>J>KZB zjFgo3TE(VIQEXVBgnsXICZXFJoAjT_-sQ1y&@s}QzIZe~xQm@-F(j}c&(qX^bxGvk z-04AVsT6hNk?tMFW%aajwbez?GzYQe!}6X-xfSab4B}9{A(=2YV>^P0p3mJi0YF;f z&hxkEnxd#Lw6vsnXIzPd_vKD;%7EfjA86ej-1%V16*L1@gTte7*{FFeCS@w^X{r*% zXeE9Xy_z9B-5q9|&&lpKVL^jaoK|dLkJF%$qW+^}vL`vJ=xJZjgV~r~=cwyPr?;|l zbJGg+d#tl)$!jg+H4@iwfuBh|UhTpQC{JB9FSxz}r_RiMNB6sX^s|b)DM>o$$egpp zKI0g9rektYhVQ%eYTTeXLtRqR=Y$0`!9>)^Ba@EV>f76&nNC#{7YgIpxjz~87OD8- z0{w!W9vkY{T5T4zf>c9f6Ah7KLV8@BM67_FZM*o<%x`P}u-zaaBwvYWH52FQ@^S%P zsEqfQyOv)7e!Afdg}QaF zGHl56l|E=MQLD9_Y!-JcI!OOZmrd4LeKIPW46&f8)f^m70#MX@Zab>Nb^|u5-KJ&)>jDA@IkpB9&p6}3o zv46AsiT#)|NZ)!AJn-)h6cDq~_J`=#_RaWw0FJr$a@-rby?EatBye{Q{|N?F+9v*G z)4}s}gx7cq>}2S|OSv8n&n?|U?S`1ai-1nddQY@e1-e|z0WA8${T#@m0}Ork`8huoyOff6nQ-ztD^fx)*+^&i1oW3VyBI zNQ6Qipi#y4HLl#Izq}<-*wyR$W@t?L%=kn4cj#Ibp4*GPSu#r!xq|w~Rp~D3duRy6 zv{DieH7-Hr60`#UubBi0WgTb&eU-Vt+EHlB)K>{`SF!!z=YZ9kOdMATz?OkSDBn8j zZTTh+2KgdEID9=)ArUyJtid0~^`XSKYDbt^2$UTHoO>JvI5;_=EaBe~@jY?Mu7RfA zS5ZB*Xn)yWU`L~LNvi|qXpo^}JH#EGz+;_>dRvF zB~zVgg03_mIca!Sm{<+!ehpo0Y-}Jh(YI`K4FC-|wQU7BKf3j>hnaouDvfsX$`Iiq<-rifxR~B%qJi3C>wt@BY6eObrh#*xC$#iyVnya zu1frZCE^IEE2*hTOq5#o+@~loALCH%-RK3VW1!)Dj3-4pXLLRT@EdOmgntBXWHJ@{ z1>ig$+9l}buOw0NOq>ERs-^M{%1B$oK8}ECPll_Jg!C44?#70d8wrJGv}TB3gS~pf zc#UEX7M=$RIJ3lv``FMLmNT(W(G??54cbdU z6qUQn^Q4Zu@Tcqhz;Wo;`;^as4G}A`8_Y-VDIIo@g3etHR-N>5!x9rGnl?2nWcT8N zelGm_-0lFjKg5CnNAxqJlGHC4M4@=^UJsEy3Ufe(kXaQluK~Yoq7 z?tOL*Re?!I?an_QbPaX=CPY{CyZ`tf4-yk&Lc!$Aaa892$Ahk6X3zl;KaSylG^pTA z>!WSqU(H0a#=Ataps!n;t@zy0+G=u8QcO&Y401O*?}bw3xjkSBK4?8092pt#Bgi)C zOKL{CP24%Xv~KOc+3FErVYjocA!PuhHu<|^PDB7F;)iRFDiAPR75%+$yFO7Gv}JHi zz$ROxE#)ur`)k`Hx#gL&=>h;r#v$B?f;WZ{35udGB}j%XKNpdSoZFht)KqV!CcU8s z2b4>{#RXuEG7}|+{e++{P@ck#tEc28>B&0^QpM{Shovb-|wX9`HIx^}?o6muF z@sPchp%B>b99Zprg3&Y);rPV_K8iHqbmQBU>9X z;Ni5#%T{cwiZ;WtY0x+|XgWqDMC!9p$mN1)V0sR`bbv0NIU!ZHChcj^d7Oa#=9fM~ zR!vZ~R(VJmWR4aV>P+nE-J1X z-3gp7DZCb}>vnK^%46o8Z9V!*zjdoec!m9zc(!_OVy*DeU4fGnivHB_m&TcPQ{%U7 ze_!0~CpL;<96zI&m$Z|JcQ5>6_$lzRyV4?!I%x20d$RU?$ug~%RPd0Lk^Y`sr)6If z+uF=;=XX#^-H%5F^~y!T>G78&7aL(CtBpa^a^`7HF(&W`5#Y#`+6%`XH#UTpSU1gm zBvJzXNf%5aOMIYSB2Vs$7z(UY#M|Q@5Y2Cy?P36>yBe{TI2)b-oZq-l(#*YlB(1IA zVLmet5}K~94W$*`cQ{?~59vDMw{cBF%;kBwDV%#DKe3km)V|MU{GeHneH^(9ca|Kr z(Cw3c=`i)Y@MPHt{9yk)LOQT(?y!rU`=i0p+3w2)Wp~y7f(ITU~-G15MJgn7k zysdYH@80`;I;wE+EBZ&vb|*p^ z`9T+43$CVBLm>p{nA##qr1RQ*s)B`_?k+GbuhS6IRgzK+zhk&Js26I^$dNgqi=tbL z50Un3U*!}>=Rxx~_1-#gzX(npZ7g3 zVfxK}&l$EBxk?6_c5u1jUjhiJ3tJGS7sDEr_SK&`&X974u=D8Jt>KxLi4f4Gfb8hL zc?ZXval*9P-I86=-xq@KG-)v`NRD8jLGNx#8WR0w%>%3pNz#nv>${;CiRC!Q%?2a) zHj%Ln;I;KTop-3y-yn7;`6sO|a$5F#YK_&@)$ixYxpgVmeQJH)^80c}H4uU0AsI#0 z%l@Pd&0?eRuJE=7EM4?;C4FHnCVU|V6}BRJxr>v&*t<&NRz(lZlnn&^e)u~QbG}t7Mc`AKmik^v!xh% zlVT{501=cDdI|7NHf%W;=i>Z;ljq4zo{Y8TXmhUh&N<$ZwwV~R5BgZ^MvIHXsS`8( z$MH%p(|(HB}%}lLCi&Rv1hgd12hCJRhUjh!=#g-KUN_r=y;h9ei^W6P=3+z zKPPskZL_m-$`!Os9J!AQ!gM%G+F-FTEzK8aak{CfE8}}e6!o8EI4y(MPYMzVY4}k4YxGn5JK5XlxJ_f5z9$!ZC9(3zko}#QicXouQLX-&-i*e1SbMmDl^-Z7Y}w!4&GQ zk6WOhq{-86rl9*ZOCh2xqV?prTr(i#WK5prDi=Z^RRa1J#!#9~ZSu2;^t6 zY9LZ&q0@8c>Ld3Vfpf_){ta0~Sf*XN#Eh+k7C&4i?_fTzB$;FYw(wM^r#}fW9-oz# zgf#E1=^Fo?JoJn zv=vtWrZ|WK?*8N=v>(mMIR3}R*v=KhcPm~Hc1`QbCF)1GwE@HK%cj=uwT(PW;g-U%g}#yk%T70)l47yK%UkF#kaw z4;v^*SLoAjdkrmIhUkBwl^>Lw1YBfyI`3xGTx)0-=+==QV+eVW^kznG*9im1p?rR1 z;)Ky08aqt+bBiq@AC`HZHGPs*2%oa&U1|>5YiIok+SE;LH?7mQoR=PT=)zrIE8w#Y zJ#m6o{(!o(oVrtuXY>P>2Ey`PU&Y!fQ6W4@7@u>Ox_&ovB8}&r@bkozdME0qCW|KP z^R#i4-Ozv~y%4Da{o$Cl#jf|I4L@d^02&=INiYCE`W_HW;7f^sbdGi(1 zwT{es&OL9C#iMaJ*J>P@M`XP4@kvx&gWiQoBtA~l;?6X6jxEo*S0C9yqx)vuYSIcJ zqp6-U!@PmJspOGw8{k<1&P^nhehRStrsd%%ik{a<(=$F@$6kukVeOo(@;6wS!iMMt zGNUZ_x^U%vGlnT)Pgs-|YE`2vpVt<4yv@DKqpLIgIVU zF#^0SR*bCF{gE6NKZ-z6Z{Z&y)ms=Z;aYQ>Be?~{pg`i+=EaNbW4aec6;VQ4PyIT& zxWFt1#}}L;X6{oHYBv<8`u*@qS(S@+f$_OsDZiMEw!#I5HCR39Sd|;DQ%vR8oYEst zbJ$lk4-ey*&G4Din@?p% z5*Y9v&sFo`T;l@n?}Gv(N#*6DUY7GL5DR>-Afu8l=n~q;+B=B`OO73j|BEz^#l1OG zX=YHbTTlAQvfn;>KC~dqZK9xxT(m9GD`WI+VjM9~f6BCPl9kcWU=~tUbs4rJX zap>La#d0O0@0Q>hkey?cI8J0&8whNCuQEHI2a6SPy-i65j5i^fZ1Pn#L9@jA=3&*ryXv0JmXkJ7kviWQtTE%uN|E}vkz1vX<860`y2em* z)5R8}x5Rt}$)~B~|M?4=JJiQ#SPbLbHV1=wq#w_tXH_OJzDd3?5E<0FamO0GZ zE{615xoyl0#yxs^d}ZzOF?IXIczVt7rt-} z*@uNCkRc%3g*pBCs;sUNTO+TCMeeHSKRBGjhd&H5fOefx4^ID_?qz4>z;egz%Ml}s zY9{pP!*R%$xYqVne|vh2yx4n*>DU1Tx4UiKwbl+z#KWI?>8^)myR(rg20CJ_HM$G0O)!w+?;~MLYLpW15z0r(uN14JnYbbe~4t zal5n|>(Lt>6-~5N2lJH~!ZCgUbKowxxgRyZJZMn_r&`PUl%>+U~cxXjTJ2?{wl4L%V@gCu%fmcw7n{2!=ITs5sRqW3YZ(K zb~K2Y(+%-ihYWyDWzJvUCf&>1nqQeM4cYZnu<{yQCGH2M4i--+gZHC?^gl0vDyqoS z7ZqOwOOVU~aP6&78hetIETsc@8NeC+3F!D~j1y6K%41Vg<4kpRMwXsjBe- zX5;IBUr4~Uo$r1O{jmf|si2Lba8d{h?LR9`Wn8XE~NC$|4Y`F^6vD~eM6Q?YB zU8yfaj&X%909vGva6*pdxHUnkLIMU7LwO8sID$cRJ&2<`mW$^FTku$1xO&(zkRbs8 z2}danir;c!8UXExnu4q3{>DE*Q$m1=NU*#P{Vk`=3r4E8jLyUzTP!X9?UhgQy;Xur@%=l*^l0=+rZb153=^OQL+l%i zP_{t780=4SO)~7=+df;7|KVJ2tkcc?AOrSkYL#dILORe4V8RpBenH09YwOep;R}&vH7-wHN^3)e+U`GnXgkh#>ub~a= z**gYS@h(VLtszugL5Zq&mWH_kSCCIm9qx2^kF#Bf25!CB8d{QRw7rm^ih1p{g)8YCyS>FQ&w&Ft?lPDw;XJixIPv*9DmP$KvY61-*kGj&@%A2aYR8i@*m!>Uz&Qm zh{@ZRZU}CC-VXR;?7m(s0e$O@nYZaFMI=>?{>Rm>e@?r+Dg=(_ z@jdf63iX}}5Y?$KeP5TJreK=IJ!&~hC*|}5ZxpEsxm-4hTGqnOn`F0GOcIn$Ypv6I z5{M0?@rOKAzc%Jq+Pbe!$h){WP5m`kMLPR6l5z47Av71KD5fm=6t2U_kg|!=sijJ? zDP7yl5lj_k6nGfq?FhB;AGX<`74@nlo$=?DX4Q0vF)lRk$u&4sMwsWIzWga))bkU@ zk45@kfV!FJoMjfVnPA-e>k+yXt#@^qUuj*;Owrr29gs=^=U|-rRUn5-b%q}JJ?+(z z-wn@I_QKHFF5@Bv=L1=6BJ(X8#L^qBJbs8eek_nSG440Ckwj<##*1%WcPI|sFTr_m zcYf7|*}%twu4hiA^M>girH}^S0rNdCTwBYcDHK+CUx$+UXpYFz1P;4?b-~G|1I`kk zZ{Bq6dnMZICKsdFEu2A*+@W6DbUlK2FZf1w(YW?Rw6@KJP6HNQ3q^0s2;DEbn8LWe zGPZVI^5I!D?YVZ^=|fYdDgjgK4rvj+zv9TQm>ua=Zv3droy=5)37V)s1|-yD4)q>^ zfr+!>hJ}%(7Fjeg`@O3ONht}?P4MXq{5#=pp9L{>H%&p1KI?gpTBaoFV z3W=kLAH1D-*Ff+{z5uQMZg6f@(g!%#XlaAAUlbgi>EQza;JassrrN z#R^Xz>TiA-ECDBc70irI6>@vU7gEXW+0r?@=)a;i2ARJgd94&&pK$NIsJ>ri?XvH*O4Lsxxu0_1{Xr;B{-mB#Z`R z%HK~_kAf;41eHX6V{jenHbv`Dkc-ZFwnDmGci2Z#|9`Zs&&p6=NIzOqLN%3(LaTq~ z-kxlYL2!NH8ED$x_HE0a$QTq6yS?M?8T7a+SnmdjJy1MaG3R`YY+u!f5^a~a-9x5d zdsvE!2+4sf;juGO3j|u3BWOR2Q3G7kr9>eS_C%a?!&}(4kimSX^6E~6O&UXU98Z1^ zlzk$_Nl(p{2Xuat7xa(|^8t(sTAr{GMr2MlA8>w2H zlKdU^#!FThMvuy{ox2%ujSCe0sdjtL5E<>(%MmgaDxDO={H2m_`Qcv zmI`iaGNCBR{_pai#Rpbwmq=f>3Fd!);w%DrWxqCmlki*4{QoZnwqgEvTaiPu|GGly T;h&Iq0(>;pbZ?cY-VOgR`Wh@+ literal 0 HcmV?d00001 diff --git a/docs/stable/_images/add_scalars.png b/docs/stable/_images/add_scalars.png new file mode 100644 index 0000000000000000000000000000000000000000..2a31a4b76cf90c9c2c27347c57611befaeca5040 GIT binary patch literal 99156 zcmeFYbyQqS(>IDUxVuY$pn>4-5+FzjI!JJ5aCdhL?gR+Ioxv@*yL*t}?s6yRJn#FQ z^L=aGzwaNHwPx?#y?0l4RaaM6*RKdy{wRZnOpFW#1%)OjE2#(xOGiYuD8nAC-%qhjs<_KbN?`z$UU$k6G8E;5XxsG>%nxj(zy(nR zoFL7TpIVl{{GUXYz#lboQG24Th)^8S{fQ>fxvQwxW#T^gzb4z^t4k#*lc91YLU`lY zwvZ@eg4`7J)|lUfxO7r&qhyzL*qcUyF5I+eG<73C4CEO$ai;L>y;}FbCbt56L8t<} z=G0ootu>|#jXHd_`Cn5J>~m;o=PN@TIp3L0b-?;X&Ohz!J!7&n5Y z#jKI-!L&Sd+TX?pa6=ubLYUxKun9moTD*aoTE!B_uieJ8dZiHP3`|6{k`r~uwR z{6O$L%6jc5ud}clRu}>)CbFg|v0v5N#G;8*<*^( zCVO7uSr{m5LJBPOsa5jkvk>z;B&ROGW_tSj_A;Wzz_M&N06Ek>70=jDML!f_a%sbD zv3CvhGY*_-Or@{sk9J&Mlg`m(y-5M8(kcOO(e*to1uQ&csu(;UOhRXxOrx%n-g8_! zdyNrf;dgq@%qK6X9znUF3xw--#uzC21(WrsS92c~W5<@|ud9`s^J=#;=8z81yvPV< z(-qYuI?Q{_>&?d=G{Jm{zjy}l))qJ#Z!xPtN20AiiQ{W@QwW6YoE+?;_+1F(`zW>< zmb7C4F1PfXpKze=lEqBs^!c;7ph78NJKZi*#va~y393Vb>H;g{@JgJ)7vfNAcitRO zeVwG5u!x$m0%tHUl!QV4;gtBa0c!7HU9{llYf(oKE;|jCk%zh%tH{;-Sqw-P;C)~o z#c+6GJ_gioVDCfCcjwp{2X+$Eko8+jJ__zYEEXH#4jjS z9Az-2y~I`ccOv?|%~irjLLYw;@uIl;Gx1_8b>F)Qw?pa2Xl%BeNk4J=;Q5B#Zs#C5 z1-$J_+mPA7F2bw<)FKrIA$Oy9Q*{&Fab9DtVTMc6O=j<-<@Bi;a@(`o^V#Frvz^0L zCoU+GQpU^n(VD43y-of}gBSNfjzrE=wqH&|R;a+$n571OF^lx$M;iG7`2oCNtG|qX zG5w(4RLyUAN~sO zivQv51MGuEaWU${uJj2r?<^6&7`{7 zCorp)t3Ay;&WKlp9=0CU9_}8)*Di;;6P_hXQ;pLY(**k|`*zbJh3li8gmb9qf#Y`} z)mio-lSZPPRXiKS#w1@!o{T4KC+v#tc-P)7Rt;1wofUr}z%g24ZyEvae%STdMcWNk zG5xmiE$CZvv?Zal$&AjoN$DblHdC!Z!wzTL*Q8%3n^r}brwajlvznerQ$+Twqq<61U%87dpkv>><6wrL-G zN_DS<@1dR$x2_)B0{5OWOWb%insa~>jUQf3? zHp;SS8*83zUhnHRO&Ar-@W>Eoz}2D9i9beIc5JXaJ~~!CYOl4b@wBuv9sBG-m z6LkeR47e6MgLh?}uIxwbSAH#Q2JyS|hi8^#u?l<}(;G9~Qyj}O0vLJq%l6I0H2oHL z{L+HkJkugbI!sC;!{n00^9dVUr#VAjvwmI;F0D9}32{Hq`PLu`W3A_Ph z1&k-EDy9LBFakA_D~1JrP0UuBJEZ0KtkJa|AM!0?Y7{=)i{$P8v~fPHh+yfUOR#mH=AJdTWl57C!X{!^vN2g8;UXWxY!&$^FD%~!1{DJ^!olP zBSEW3cGwcw8gy%l4zjAW^>l}{O7a!*J{j2QHFSkmL*TcU-+>>vQ zy%__~8&^AWJ~pA#DoOnW8a|<6FZIc?k3VYb)RvM?Q#yn@^zq*;Q_1|A-Wi2Q0QIaD zft0;U-82a`HFfW7T`rDbr97k(@*DU*Iwp_0tCH$HZ3DeneQgf~hutIhgfhpoJDj0w zD2>Dp#Sequpm^d-F)!7pHuIXDSIPrZ%Cs&XGQQere;E0IpXB6Aa&F`O!{tZW)voKy zx{X!@8y=gD{%*DPcFnj+KuzRM#SY#MMapH0{?pp=i?dVkfqzQ<(gG-Sy8%d)9h&?q+MBjy87Nmsb48igo_8vX}vnUqSKCX^y%pF zdG9L6?kIL?-J^3)v`1_?kSmlhbci^622M43E;jWSN!Q_ZdAXbz7 zhp?ra(+>MVb%K#Ze`G91W?c5ZK$>$+i?iwB>Zea1bp?d(ogUMQ-|vlw^zp0duw#l{$^|^k!R&LSo+%`EK9wlohW68qdkJUwLcDS(IpRJt- z&$H^rHm$kS{O&8yn6`CU#a{975V*aG6bN=jIAv{?0c*JE-}>~zP9xU8Iy}v#E8tj5ZO= zYym-$JF~MKo99mto_Yg%sBOTPp}X0!`EC71^q**gZi%<1XEuuh+paBZkF6rNR69M1 zU5gZQ6igy~K9={S*Y|76YZGx_>9>D0zz~v}o6Bfc*+N=?%FO4=?^n5p9s8IUJ>%ZK5|fVd0ddA z1rkVB%Ml6+|IMEtw45sK859(BmW7(8lcu5qzmc5{>lb4?Llagv8xZ7cC@4WUe#oPZ ziPINKHydkPM}9XU>c76=hdlqOW}~M3>k}s{A!#sy_}SQ8U0qpS-?Q2|n6a_*@$s>}<6z_9V1azW;^=Pc^u>+E z){*9KBL722(!|lo!2;xDVP{MEhwc|cJ7*^$YU)27{qNu3<1}%z_-{|Pj{ji`!XVq9 zH*D;z@7VsAHl(TGpIUxp3pW#MO-TzI6I(|}AHv*x+=737|9`#tZ;$_IsrBEMTpYar zZ26Bj|7j`6_J@IgF!Z;y{;GwDOBh*@?SDltj6A+*pa_`_QVU5XHOL+2&(uIB4gk3@ z{C)rPJZ|bdg!vN+N)$>?QcTSa`bZbi3s36hwU8h|R9qa}gVqUG5*klw%t8Ivi8aH) zkM9O8wO{zI;i~N!^7phhf7S*>cmx=pZc)jea@E0MPvJ}r`}&fYtsfcRwt3sR(x0}y z+T8_ZEv99qJ-@i@dfzs7+$&0!V*9~Ri2^WTf}sBAhDisI#Oy-Dk(;vlXSFDx7=YPT zi0F?=2?u}{_xt-sS%yg&K%%}?kU{m2TFA=)`SJgy_!oUqO)611p0b~EJQDxvP82RB z`+v#(Mb*Db+z()@8J=hx@~^)90IWi={~hNas$YVkuvV(D=~q$zXY_ybf(7(I`0pV9 zQ0+#BrUrj`^Rb!ee}?`y2h`vz)PJ)Xga;!D>iLlA^#0#{NrD15i0*b$^mD7Kl#d6s z%(CDoCnv=wm=69W!Mt!NEFgtLX?c0D$#8Pl+wYk$)z#I)A4tsX7OJ2R4i08U@+3mV z4Gr@vHWtlt&~m(Q(mYN!x&q^pk_v~v!~Kg9KWDhGjdX*0!(L>!=j-{+Sq=T*k`l&+ zMmvhDgW2JNy8GWbJ`YHJF=VB9ck8b&i1eADfaE>^7Lbhe>p$nI6Cc79-yJeCvcYC| z@Y$3+Ti4C;auX>aY$Jcp~!rf}fepg6?x!fIn56_EIlOrY>2?PU_B^)=L5LH!oq{@ zlCUtqz}Oh~k+YN(JSm@p)`^WCmEZYC-TtCZe`ZWz8Q@&BgPY;q{{U|HJO66Zoy zM`GQov0ddK7#|PK%p@h`G>;1j)W&+2`Ip`rM2VK>h*6oBe^5MFUw^r2oXfCnmXqIh z51#qK#fb8w?h;w+y|u|yft+?)xlW^{$sM=pX!>l48=?8}TzQ6ia=q^u-(DR#FG#dy z>^ER;d`T5hv2(CDlJ-rP$8mRLj>N82MSi<2T+IFL#D(k8!sO6)_ma}bkG9@BNg6-p z-zxrVIFyu7;!9=;^f^&p=Q1<_I=Z?xnx};+dL>u_?*ptPt?zH7qlssirlt%vJ603@ zF;}t#TrF>%M%er7AbZfA6exb+;+DQIg;m$`wqf7npNpiM7+MB!a_V#2GS^S;S#}*v zpl8nV_N@x^HW%0hz)IuRxZ)sxNTBbP_p%D=LF32X$NznqvOzs5d|WqO*Z#JmPmTF$ za%1Z`hLo@7x@j-lO1Iwf^I`3{@N$G?+NPg7?mxAN5^e;mN)fa-o@1xkk_b1yKm9}# zE0`bO9!lY}P=(>5^dJwyLXnm{1#hF5yZZu8;;1CF@s^E`wWAnrM#@`F8{6CUQk0PX z$pHWw1Ev9RLh*VhqogQ4@;1P8@J$MUtMN^elnrrO>%&1Q+VFzVZ;Yzx+{(}F)##~4Cx;R7Jv#_rFx)RanSdVStS;Bi!kp?Wp>x102KsO%J~1Z zK0efx!5hgbDI_k#5fs4pBr68&DxFdwb)<657(m6Qkr@3y=~e~%t?OKGCAZRNyXAM! zLg8K@jSF5z5fi+&Yy_ZeKk03BQ6~8t!Okk!CT%$g%`$kif z<6eWF^X%tlL29s>h1&%6dslL&lBEAOd3mMWAnuX^+}l{i|MYmIL9-O1IUX03cvCV?ce+mqGN2oIsyh9Okg)<%_Y(!YmdDMZMHU+#>qfhsCk#>U3`7wmbd ze4UYO?Zw&p9X>*&-dB^yLlghUnu6*;vZc)B;^geC&~3@Bs6cXjX2=sZr-1jLiWMXpMgvi;q3$q11#`gTu84SXdOFr#ZwK#5wlezQm)-BDi1yZkvkh-%;dNd<=X` zO@l&NotUgn!zGh@J}1}gIQg22h^5=|d(2(F&UP74o`DOm^&;HKt%HKHgvXXHj1lAX zVRp>yjkMSYV8Me9CVB_ro#zqV;G2L_A^sDoHJ?YD`ipQJ`sVqtNUrfL0gY1cp|5YX z1;0{T%~yPGIovgHz=@RzMTdSdF){h!Yum7+zDf(*7+0^yHrUCpZkt3)1P(5{_Gla2 z7)VBIj=)Al^#^RCST8o_bd9wLb_9n%Blb)*3eW1Ay;1Q!gW^m|v5>kfT1T&U7pUV@ z$U$N6&n_->oW`Mg%c}j%lDiIzhCdB)A3vFA4pRmkk|z7l{jw29sr#5VkJXhF-e9jo z3u^>4&VEBQAh`@K2PKQu=BJe}x9GH-52O;x4v-ZTYSG&DsWH5mW66u}7W#&)efn-? zQ5xSjWoXVISd7Q^awo*#@rcKw>oBQLb%=}+FU&7Mluu0)wqXfykHze|j)|pdk_3_Y zPpZF>1le@$4fN8&${5srMjGtI_lf~3aDd>Xb&^sCMQJl92j61O*DAF%%qbC~6nmRc z-G(>CJuKI1PF<+b&deUFcem4<70>fXGo~+5(f6ptKFKT6R7(X!Qf2CM%#b+ki%^5n zp?{YXQ-jwg;uFod^9C(eeV91&7W*e`(P&{?081U&#s(A;EnGphsFI2P3sJV1iP=8_ zQHLqJc6y4FJnswi?bm4Yr6wuu+VV!fR&z6wqdN@+iLte##YdolRDrm^iW(PTLTgc< zV8&&>P=ke`9H=t@6*vq_rGbD7p{Go6y72~F#LWRE(L&F7c&Qx1$6WJD^!G%$&sHdn zoy_gyDwuDW-!TEtIF2$d!0DpHMhti0~pcs9;LQS#ok zGpY5_i7ln!s0pl;{o)#Oh$vd81&;;HfZ_-74R%6f z+Ng_hR3YWUrX-@0LQR_DNpoNxf1%mlrlR`p+mfsvGqwB+-TM+pR0+i&Ve;g5VAr|o zmg2U@|EEH6bWG^o8ea(+>t(LrF(!mFZU~FKi;Plm^M91Xnlq12wYuPgxp*R6U5cqJ zf?j9*S1FFTKZWkk2+_U46Cki`%jG`n56qO+Px3|uma`6{()iQWi$Vr>TRHEDL73sr zRz|SaO*b$L@mo|==Ai;d#R_535o*gw8bolY`GdidAVoj>3r;K`{UmA`D>c}EbNAP0 zTG+oz31S9dK6kvqPB~84(qUPIYs4GzPoE3bzgU}c|12o9vWUtJ4eSoStatpq%@&aP zTgmdbD9w}j`!b{39@n0bZW?W8@6#+K`m9Z@1!*{qz&@)WK!t`|<{)jrA!>|^C42hB z(G2~gcKU=d&04!qf|#d*aSj#HfOM<3=O$y|Qr2>=)MR#z-!77QQD>-PO3t_1X|x$3 zLVfza-e6PD^>hp?U+qG?L&q*jLeI-lZGNCh?rN?C*1y()44ek~*qSyKk(-3fNAf|@ z{}!?tU^!8WRRF9MKjBTKPr9Ui+Mx4s^_yG*!4qZX*T{G4Pa5@>(=9F`SVJF+&%GbS zhR*2|&?-WrZXrhH^o}<{9&R3s(B?DsNAm{8L%+!1(QUAz1nIOy06!Z^b(?R^blK${cXd$fKcgjzbEpa^CACJ{?moF{xKv?#9TAQx=T^FwF^BGC7z462co6M7j z6ciEbK4Za#irv80=Jj`WHDBf>V47@5cZ6>=`Qhkc`X%Jehx=B z%OTk29_VM$!oHq4d~@O*%C$YU6-^ivZ>2pOOs3KA8Jo8FP@D_>l;1NMkV5?xF{UrD`Ut#pb{2d_S_& zqLDgnNARTZ^?So50(Q&yE$sIaXF9W@y{r+DI29<0{2nOGvdxnQs0ix!1607pd2>=3 zy}REJC_e%{sZ_(uB0_BQKGZ}oog4xTC0QIQ57EY4>wd3aa9DA%3keS;v@`}XGwMz(V-O2dG6E91euU zSk{Fh^hOFi9RsI{UfMu(m~5+L<4*rcu}M&(e-gZDFp6G?wylk#_yCpw(2q%(-$m;_Z5 zOJ|HqN1hbmN$_gTL0G8AbO=$9lKWZyS}B3ZB{qI91}(<(+f-sj+mZ zXYzXjU&j4y4xdDbOx;k!{@(=44K!9>UqK!edXSKCL~(V^Hn+yLbliq`2uqF}vWA<~ zut7LT72rt1hYl$buouYFU+W%2zdJ^tVkbjwySQt#m?C^N<>MzLMv9vI>U@q<${fM%S@REylAq@+Y#4pMO2aEl0!Wi%Ufk zvO?CHx?1o&2#j<8vG%j84ism6#sMmomjtbfCzqxGSP`WWrA$!icgKi@Xm4*F2PrdC31nYS#UIwxU z4no!M+rm?WZK;N5<>69bzXj$)v^%2_|Dy<_?MHK3*i*m+?^#T2MiV&I+RPr!dc9rI zCIo9Gsz7t#L(HdfqQzf^eQ{+4xmsTM9~~zrpJOhXq$Ti#JA z2~Sgro=s}SifW=&N;)CPruy?3-qg#;s%RZnQJ%o^J2A~Ss-i+U`N;VFGIG#JfD#qs zTd5J`hq|aH$N}*GHYN`$Q~_3pDpk(x(Xcva@LufGYliR&fu1LF{S7pYW&Ts?(hC|z z^DFR?_&k}r`CY$%OsMU!Lx&x%;p7W^=U|6=7|``zn$(8$tDb(!Sj0pqYPi1nq1f|E zeb$ChZw(o1sN=?zDU~44`Y7&xLL_hge9VR*>Us#7W0x9YLJJ_F&3`UT-sfV7r*X}& z(45-m&vFOTo@!xN4MPh?nv4g%*u`9GukT9C^0TnA@(%w_-@hg%O55ZlKs#DN0@1(6 zm&8Ecf~o!a68L3bC#PmBhT@!WP`H)kLkAG%jXb`XkyiK@61i=*kOxQBkg_#zN+zb^ zDut??uj~X6fBOvg|1e4rErIxPLdXrQMfL_L?S8Dg^^Ka)*eltClc1^devfjcR&6;a15?QKJa6Ya4TO>7UhE0O!(V344Hd+8033=$eiUZKl#Nyf2@f^2RuA2@ z5|+$Y2JA*~m{;(@#GfN!t~XLR32)*uR9HcMg-y_*2Jfm;23W;J8}UJ$^FWS`i2YC@ zgMHGLIO7e%uh1Hj900;*KeiQhHMII@a^J>B*Hb^eL6s5=b^WA^bbaQG0?e8fr?yyi zeP`Y*3U0W@Jn{#6PtGlAa$J`cKBA$HM_}MYIlSot)PtmSxwor{&e})FsF=e{aomKh zjdNEbGmtT;!AEZ$&Sl}|r9dp|iXun&{tnfIgOHdMq{LAL_$tvGWog|D5o!J@(Zb$V;aPD{t>x1PjgrijN=5rDoIaW}wSq@(8J}@Yx)z%~ z`=D6`6Un-Rf%W9VaIhXX6B{^+(8g@<1k|)J_7VA17LyzMvBpnK;?K|H;->Wv>7~1+ z5Zu_Lx1q)bu);R@{qtk_!!{nEWW~9ktK|P|$h5E&P;vuwM6yu#^K_ESI=jPj3JB&2 zeoH!~&|Ki+!KA-?Tfu;RxS8F+5n>2K8R1}iet(tCugi4m6x7&wqW{Bp5R0HAI1&|8 zf*Sl$L@wcwCV+_u6u3L$qlOxRu=6&3f?toB7B<2EGu%P|RlDG&krgKaQM}H~RJ*-N z#`oYJ8DfK%i{~n9*SGp_1o?e1S9lv@PcoOoK}3kNiLO}t!7 zi!k0;C27DanqO3LTZJ1NghaJo=263%DhTgqCRiY{i73 z8&o|}TZD<7&imRb^eC!U6FMCfj5=5G*$%SFCGxwjbcj%p-#ya2vt7|7^nF>3i;qVS z#USW0%khD?YrAG0$>bvoiJdqV4VYJG+xFpojQ@A zbOwWsz5Iw~IzskCfleBf;wHR7mfg)UQ?R+sMYl7kATJFMv5Z8ng4q)&z%jwU+7(|f z!U8*saI1k<0ggivqK6!{12Y6txf<(A*xr@3MK!DLPbv2%P_p`=pb%kp&HgHltTAu)00DIq~MmE&GLwdm+t(Ce7!s*#qdYk%gO zTy()vpM#}hL(9YewimDFa8wXlpmSWRwtWSuwtcCv=X5bLq^n?CFfvDzE@7iJf%r~^ z{Znc~-6#(1uz;N`kK40`BZA9Vxlt~}D-rRZY1v(_g#JoEUetw$Q~>T|cK?O+ZojY* z)?#RiYeb5WDRWU3?d@!~?nr)1fjH1bDOoXcgp;;aM%*EOvE}rMz(=bd0fH=j@o=5~ zE`&gz&51B3{OD9al31G)e6m`*Q6op--e!80y1m(YImXF?Ba48wBC70oL;NDsyO_Q+ zgO4u4NDIq19?_`&(lMC(StVrVc)5HzCaFI(el>N*DJz;rzi%CEo*jpYg9O`B|5m$- zRS$vhefyM!-6>xi#fqbJg+-!yk>67Lp3eg8X}Lf#Qz#rkc*Qam>25xkKEv`cqYsFv z3r~n)K+k~AV?{R7Jg5Q>KAk}d&5?J&j{tVE1jnObFeA0Y{rkQfVRSPSZe#M$0??>CltpKl(&4cLE{3qhHkEtR(j!8^B+(-!Z(QhsPDf*4s*LK zlR>+5&2tA3qu(<)s;X|{_wiwp8DeVH1Mkw74xGLC^mmaolSidAeD!`T&DI=xLp&7H z%HKVoZ$IHT+|QUem$g0O&eEk6uJURp`IFcPeBMjPSpQUNNDRytH0+x@q!*t(*wJ>h zJuwK0K4W7|?Muo3`Rwr?7q}6kPx>w>5OslxHVUg2F__!&y(Ea-1)wZR>CC<4mnz$vP+g8FtyIu!!lt zbQ~>nM}k-X^V~2-LWhxwq*$>>Fllye3>8krx)t zM6V@`xW~2`++UOMR@tjB)nnU7aH_~NBZWsxgZnky)JIGASmHj}vnLeyCT(($M4*;! z&0_5+-yBX#iXq8@q6HbsNDE!txGxUTDi*A2ANO>FHk($iB*TR3l)OJ`5$O$+9&mJe zCY)!GI?y4%K5x)0w46d^I*t*&+wRf=J)_Nc7OT)uQBmo=X_Zg$K|k$igd_L)b^6YD zK=gXiO^RJWR#w(eo~AAin-(+s%wDPC8;_ zOhh~?KeD@0OS`cj@$!=H@{)yZuN1%LHx;dxBIG~3>1IZr+!dkD;kzr)vyv>5Ww&Ua zULy_@!voFxT3MM@k3cyZNA_BxvB<@}&hPIM{4;C>RCqk57yKeezWTvw)Y+9Uwi9vd zo@lj_Cw)bw>8NgpmZ9`EZkRpNzDirsR?B6H`gvKMwuqUST!D||OthgmaHojQ7x36e zatf9HwoI=LJ=@2FuAqRn6lU*Lr_uJCxX)|e!b&@;y}kX}Oy-`!*@iy|GV|WS-?Kf) z5zvk>_A(y@^rcw#AW=uuBrABxIcuK3QcVFTJ!4vNT1wx7-G-X$Jm*UsZ+tOc}Pk#ad0CF7kdyna%13(<+guKL*<*v2L2 z5bh z)tV9WRwSg|6@;C;Zr?=3B0 zUq#1xhrpN5;bzYR17&b-=dCBq%X`%-?ETB14<=v6bUz`BoLn+<3sm}YD2NnhjC;Va zU>-QC)EZ{AnH}O^-#p$N!^RV7VdjCOjPhJ2OOvC9p1e(vS`4|J*K-7)a~09Au>8$pX$6y z;5QWYN5&p<*C9+Mz(z~r}4ea1Qx^lrcql!R@CW5X^`Ao4akNmhcPn9REkmcmET8SK2*J#G9sGsZ>`K@qI7q4zoQDud0ZPThLjAyB@Xa5LlMa zFc@R=@LcO9a0)CS#0bY{ZA?PFZUUIJi72^asft(XwNX-ka&HQ|EnBqkIbk=~daMmm znfYl&brA)L=lzyd{KQ0Be9=m1!)hBa&;41#Mv?4F*g@HE>1rC;O?2|% z0Xan}9W_?u9P?^F{--W5HbFO!ZK7{pGRKedoRbbth!u;c6>iji#adQf8`LKtd89)L%eBV(x!ejeroIA%^0ih`f%#7u{4Qn z??@H&090%A#yMm2IEM{6P#((gqMA8gDaks#objc!|H;5|>{L+I3?1QRZ@`gW*y=6Nc?)K{BtabAgq` zo5`mq7!#&-nJ+I)<6>4U3`iN@&9Ks}0AhB!- z(WT-LV*^;hP*yYzB2HZLpgz@DQ{2_JZ>~s+n?7T2N-Qsz^C-Xpm5uSY9b&& zOgxnbDvM!vy|;(v=l36`8u=*rw|b2}6-E}9yR$L(lj);d11 z+x2Bd(7d|de+oI541PjQf2>eS8204ujQBZ9cDB-=5$Q+&uC1B2v)S`wSD9ZoBtYUP zNdRk$`|fxBY_msu%ML9sf6FtqHN@k_&J!fai>zvHYvBqIaQj*9chac255Pbkky_Xu zN*>LzJvYCtR)U1}sPcZqIAfsGnTk3)(9P!nNZPxS1f#mB$0GQvbDmnjlUB7?C)=EDa+l!1d8mayPe$A z=egRMt%rndGz98AAQ1%)j%Fb5P~e524Ej+`+V_LLLfh_7sOc5?TmG6?8&;!MA7p7k zAUpc@?X}o=T%0z!KPErpyZIk_|yuQ z{Sm?!i|BVAp)!%jv4`yHp-Dyz<5DB

    ;@wuYwZQZgq~@1-b)a+Kbg@dAi-%%Ari2 zb6E>!HALqh9FBoqtbS*I2D6=Q>fw(DJd)cOWl(0$y3?>ndaV=X^P zZVo#1h_858J0f1a1S*#!vbHux@2#0G7S^SuqzNrQI+}VF-dFYx5h#(vyR5k9u>ogD zYpWde`BrM(P~^q;?#wkk$9b=dvz#cgha9oqn|y(DU06t@qFa?fz5GwaO8G$eo_n9Y z@R+H}Woi@94A4o@hi>$pSOXTxOS~!IB==7alPwO92wmFeE&d#1csFeK&@HhSk{n7& zZd=25Unvlgl z8&rN--D5`Ln>LepxQnc>o5nJge;&QTcDuVghSu=iJhM)9{ml-ozm7#Plo~0Xym0~Z zmW3tId+5Z}`|&1(Rd=cM^uC5!PuCmn;^L-6=c131^~(w>Bs~7d0f%^~_1SRhj9-1# z7RVA&39-psAwW*`i}W_$q>ShSRJnC{mv zh|8wG-zvdRI%W!bY&n-6KZdFa_ zTlZK_g7)qd;WYE2ni!Y?wtjaXS|C|$oW+VH3GAr~B(9V+f0#J_4-@o6#!erCnqkSO z!m@EDMmibFa^B%9FApQWzv#{o3Zo@B28H^96~3R>8~O@tou7WuPm`&Am>AUEVB3q{ zY{5|`J(R>iXSU{7gZI^~i+|Gb3lgA;8kIxaUy5m#?E^|e;5euM1CE=e zz)jjNk!xnb zt$MHmY<~q$O-I)C&d1b3u%F=<>sVel&B+b^&5i6uP))PF|FXAyLw8N9stYpAb$#h) zN=b2NR`E;RqH_(T3d3|znPGmXxQv^!0c&>^#;aB}g`Q5{>rm&z?lEtX62aXsghzeY z*7{B9?7sj8k_4Os+5je~DQLMrhJ6_sGH!C51n5}l{+~Tk2&?>WMEB@o8-_=?G-pe8 zN{(4#Z`dPKh%7zsIn3vETC6fYnKB>~adT6EIlZ=ZJdhe0w&CFp${eB&IK)lxpQkZ;2frq;t`Tw&i|lJg z3rijy6CN6gI)!%`G;_y`_A|B4DNE#2vOmgj+^-q~DbL}d3vyy~-!uFT3KWINmL`6f zi415I2)@hOU$NTb-U~3HIg?G(>Z3$~9I0X;q=rO&Faf0pGmkNRvxAf}OCQa^zssG?6 zAODS;6u%Uo@hEa^Ozx+;X%6J(LQPpR=0Vb$h{KTho}EiK*?VL?1+GSw6bSMylAejb zEH@iY$wn?8H~dze&vp9<)<4hf>|V&^baQixN_UFbxNynTwAVI7&CEBC zj^D4@_*G$=$4_wXGRxRCdW25%kvCqfU}=5mD%arGu8m|YE`xHzN!wu$%UO6;Y1*91 zn<*YH^deMwKar>-SS>9rFZF?+CL>ai#VCmWf2_zqQlI{h)Pqr7PW(8{=KvLH?g7PZ zisbGATlhyd9ey%7U`Sv|4_T7MRPZak+&)ePgYxW;iYJZs5C*D+#) zL*r8!O|?q=*#d!p!H*iQcPQX)a*ZVcEQop=vUX#_CT-m8!_vfv=mknPAi}HIf<@o) ztQtx9YzvTV9CN{IzpCOYK3P0ZZ$|n525HAKi1JZ3K{Cn2W?04H8p&wKav&*Nf<-TL zg!m#yoO$<5E;7TR2N(hv;=ECTfh2R{^FeOW8^me!8++yu8;Y zFte2}0}qUj-82wKC|Gf#aJnBQvedrvjcm8KzvU3ll?j|`v$?5;WIC7WKh3nY=VIG> z`Xw0yHWm6i+(v$ENx7tRL~2*$_glhlBzS6^zzEmn&27Mb6!SpVdws>on??xi6WRe1 zwg@$$@40Qv{57;iY+tRapa%D}o8vKWY#th^ZQ4SG+Yfz8je6?)m@R}Ezba1y&Sz>8 z%^~ou!c&y5N!X@j>NV0}wS2uV&yr9e+$WfSbWnFF&(r^ z0Ia~JBLc&e>L=kJEg%GE@L0kTxaNsNRvn>*onuZa%8w4ANH$Zxv5dF5BEXrgxxX_5 zEXnp&@(#i4U6aWEJ3V-nmF%22AS2cxjU(58!ygot zl=R`BF{$$1dCk?-G7eeNd5RnzLEu!`2rO=^e{4$A_l`X#^*EMVcz(VG@wSgv`=lf) zg?2Tr!1L+i+}jrKB0b@^Tj!;aaCEv%WWb=9kMrp-)}*YHL;k_pdxO0fVZj#A+Z5)C z6>{6rcJ@xvNp2c&GIxk)M^Y-Z#_eSTXGkE?syEXN0Vu^)Gi7?RT~5uKO+T1<=uL>L zAT(#FVB1E)R-;jasi|2DAjvd;U^Pe@jgQM<zz1=RZMu$E*^9K)vxya2^qCSogXs04uH z``OPaB#!Zu5neL~dOytLwM5b0Cs_Yc2LHQ+nr#r0XoOiUn*IAY(WCn3-p^P1g0xIX zAb(1P$u`p0Cj%dbA*jbuM4moqeGv%c6A=dfe^ZB@5B6R2V!eX?5DcGOXeo_8tVKoY z&6}P9J8o^;o?=Sdht8JRZjKx!Pbf=yp_bq*kM-v`?`D}Ex@N>}9vzSs>ylS7G05ya ze)C)ItR7RJ;e><9a2rnvd(tzA$p|CEn}s^h9_2RnRPGcGLxG)?B1926VGTyQ6l{;` zl?upXfOWAu+MnY!xZaKju*QbS+f##Cp$}#1A)X$^>Uj%;qPdU6?j1ZWY(Ld?Y4-!H zXC6o?TN+dwih600y)oX0zQ+G4qftT^)HhV-{@PJ>ddN0oY;1)RMZ|*u!I1}R#OgYv zZVh-C>0k&~(jz4^J_74F-8Vh5GC1fPj?sGUJ6iw}L`S@;7?@<`=?oAL3HuQvB+t$} zq8Eq)NhYW&FCHa9HSx2}R`wj`1h@5N&Ww2mm}eW9*%PsmIpKv68{=Qh+xhA5 zwinYS5?ke(ISEBQdB6^H*LU>7*ba(12?Ueltf@Q|k3CqX%(2BYSP=P0ircVqOm&_X z+o_)t>eh#A`x24kdfj*a@mR7}+z7rboGT~#Pj2tb@op?bLiyh|RWkt<@|_PV<&dM3 zjO!EY2b}cvI6fcf!A^Tw7L9=wLH2-*3SRIjm-OkLIxXYJRQf9Ph|;qR>TjEgKN=W*rH442oGNf3_~zYyl`Mcd%nEPbK{ zlWmGY3+d%XbE1GBfu}QY$#9cW3;%O^*a=NXt&4HQYV3oahf8BPGXUe!2OQ$p%pF3k zZP{(#YXW3J#iso~jJ;(;RZ-WlOLupJbZ#1?yIZ=uTe=%b5owTCRJz%8qcli&H=FJ} zOYi4?KfTX6=O3&&=9nX|G1ipsC$ulJ)BZuq5kp8WvSS((mzkDc_%W-vU%1rF63aZp5iA|6* ziOxs<_Qwz!a^*g?ax@~m=fObhCnR-->-FCS|#i0j-^7?8VUGgQmoIR~6CWB04f*BX|H zkM|1cV-r2>GH`fyOOX4SzqsnZ;oqaC8C@@9xj8x(fIRe~H@Jt!Tz>urWA2GCL+A#?UB-PUocq78@2EB%aZmSv= zX#8SiHmZ5D_86;m#xLnCTe(+d00e?OAw#6*w!)`omRqsfh)iuD++l<1yomIz?qvX| zatm;k4c4?}wC0BDJutwi%B5kBKB z9RS6*=@SFsYMn-MFpW(4mVJYM3_?A!BK-Zd`2X)uD_dzsH%K)e;h^efOZ*s7-XTEJ zoo1fbC1}{@NJz`$+t}xvs1(YEt7f}!cwWMZ*;To1;_&4*WEiDcgfMm+nYWQ9{|@>l zW3Cny3bs=iuEs+C=H8XlRBd>~b7u#kE8YtuAEK@k(kJjF_MyMZ7g1Ac#l|+3l%@oC z7)6@rODZ7P5VeS4R2x%+ZT5wH^N=bd%n^EEaJSezVt%-=1$duB;{8a3zH%-5a(K8wmP-Q=93>t zdX@RUHQk$La~QD_tPnG~OIAn=%Z-+WHi5BNAr3dkX4UK;yZ+wHQ(bGo?EU4eoumh@ zv7=WCTKUGd6_dNUuD%Kzo?QPSQl(wq`Yf|=y{m{D80SG}@5rUbdrl{p0IQ+tM?Y_E zXFuvnUi?HS^Mk%Mv=7gtS??S0YjMRf9#NL6U6v?yArFQ$q50&{W(_zmh=;hZlLNO@ zhkfoO^PX!~6^wO0hgg~1D3WSrhETY)1+_#M?ik$>u@}28h-4G0JxHVs?LGR(nx{}d zd~imR7FKtIx7^4G+7ufbUd9WZq#J(}*P#RR75gm;M{fzcf{Zld4@_ssfMS3R4 zzS^Zn0RYQFNQ7`d3b}B!6~5gki~s~FU*P6}NZSsO-1Wo80Zq1?U*_dp57bEzWkEd_ zf4F(WNiX}_LznJq*Jp>Z%P5nM1uXW?;AvUtKGI2eVyE*zdx{fZyh*j{y3I*y1+Jcr z8Z5jC>IZ@#xN^#wzd?}auhSyHW}|0q5q?4=`!H=AB`-Sh@pIHrpA{o3%iLe70%Ip; zJGsKOYOlmz--gDlDHxvib-l*LUMjgYec%6{;cyws-x#O~TJ-9MG|NB1<^5~u)9F*I zLIfS%s8a{R?!xir+8LyMRA-P}wzk)fPSk;Cdc_$fawE3R-Gb{`j3O|wZG8wbgsYu#xdB1ucyL-FUBtCqYzaaW0$yIqLtoLGCV$M ze437cL73?*s#Vd5U!2Ks<_(r_^EnoPL%%gGzTR=O5d8AE!pt@*2c$9cy2sq8@HEF8G#J)@U zm!hyP{;pMa{wAe>usDi@O}qt=)le;@3h^%}=>OdX0MMd;*>NCV3lX`f;!&}C+1Qb_ z{~Sj8BYTd?VeRbly}NU}tQ$?oz20|Tuc>}bdli~>3{BQ;ozuDd-k@R*4B6Fdq4Ww0 z_a&3%4qY@LTR-RaO&U8t>u>B8yx`$uKu=dDrt==R71SCC%Zle{lSq}g+=QQ~wJP-P zbaBEc(jJpZs6S#|ii_u9C1vI^yS4UA<-`E+`ZuR%0>J11EQz=m7lJ0#N^Kyrid}l) z`wFVTE?&DOZ6|3}91lcEbA_L>Ze=!N1$6`L{{yyunF57w=>a2JkkycU96as879KY|f!hWS$n1hYa_B9Vaiq+^ zFDXgYkZW)4@Yb`niHjK=$c;4^X{!^?tXCr$TacT81=MraqMi(WGsF6u2vOQ?bi(Hh z;0_SG(W;q=Bbd4>^;;6?d6YPs4FJEy+}1L55Aoxq`5&w|c>XqDOx9-{8OOQ|i@ycvg*v?gCVy3F4nu-~%YSXJ4Te{-o2_|YMmyNWX0}AbH#{d$ZIV`b za|2D79Sb*PhYMZPY4KM9?$8RGP(87uCFsCIFx&uY5T3&>UyeRG4d1X2`a;Bj5I`;4T23)nhX}pB7@mbMU zblsY3$I<$xkUtN;5t{?YHRloa^=U{c(3e)`JJ}x@BUX8{hlcA*WWtxQK|Rt_*sF+@ z+CYFQ1$-xZU_{JcV=xEh`&|vY+lxfw^(Y0XIGmHf| z0V5HR59Bth@QJNKORbGn3$NTZ22G2(hK1j1uuY6>Oz(xD5CyG!q#X0F?LV+Mf^bHe z)iq;WepChqB_c>tp^gH#T_2$Eu5n+68y?MZHCSp-q8q{vh301TBW4^I6?}&YDdvm; z^+Yp)b~`#{I>V4!L0VQ_503x89+ekiV8c1};PD3O0?9nR1e~4*K z%+u7^llMf;$jBU>mv!)Is(3Vg9R163IX9lw#5)rdm41q)-@jAGuvXX%qu_r*xs2zW zEq3x`THtMdz8)b3`su$b{|NqYbeJiHEYV>I_RGQ??P;Gv!b$oungr>w-7>gWF$f=X zpD#ih9670#5O#(vYM&I78mm)ki}ly0-@dhcAz=J*fz|nHG><8JtNiDB(vVtfw+KO| zckF|nbuu+->&rb+U-11KCCMS{If*@mJQd1%v=1~wiidcweR;M&O%2HUUCQ(%MOEHCWb$!Y?>a@Hx1CRgt^R7$m3)UGsp45-Q z0+ahUr7r3(B!*C1Y4~aC8v4k&n(qo69c=frv5=WiCXv5kp>WxW1R@pe#vnLKJLra{ zf*eSGiT53iOZ^#0GYH;3*pr(6HfL}Don~oRm=^nJ41xn2=fe=%V!*gsM|tvEXyjZA z00|i1Zz6BSCUL**hm1tTRIW^O-Q}|quXDFH{n_GtBYWVfx@YIR{_DM7x@pz*-mYcJ zMh|pel&V3r0xj)sQ3<`>=&h3jbaFD*B#T^YaczibQ9yJNy$NcvEx4);JO3N>PfmmU zJVQl3H4gc!fme4~Bei$#hLZM<1b#l}feME&U+&Ic*V zeIm56fi2G%Y~g<$6$5Tm8-9!7P-23fY)wIEyh0HGz4?*i0Od@M#K26g`2!lY0@

    8eA(ZSC_!t)*FMU#uD_Sgu*Kc=<5`zuiO=fb)*t!00!N4()%>HT zo`?hm)1iI9Iq%+hN9Uua@(j2NPIeVkA5R|lH}%hPE7(INAOM=d43ySS4WPeg>^)|* zB>NN_PW*t82h=F2GNE|flzq-J#t*) zR+q~_J|3qSkz{pJ#J@h%+X9Jxp}k)zLMHwsL$A-N>8CfFRCVw+<**rI*FP=ZuTSEM z`rZ$^y*HwBYB#55`7=gYsZ1R6Y+H6Sb@HryL@BFioA*sAKq<1vXnPb9gm*jdI+J5L0IBkEhym9{;FAXkr z$~$czF<){@X>`hbLbP);N<^Ud-y3&p9Jj~!vZMIp`92^W-Nj`*KGE0k`s01jX&@$- zqY_7Hbdrk@KG(zThqU5{YR$vsIjo`7uq=cJJQ+jqWKpBK4~-%<-fq#~;+?{qTGl?V zQ{>6qrIEN&9k#jlQ1J8TzK3<-Kpim&zRQFKMnE&Q+|jCH_pa# zcUHFRaH!z+rVOBI4wv5IO1Zyisq!w3;fiu@k?znKKmaAHJjF>WIX!noUqJ3iwjFEzOD*Qm$AbwgW2V$-kGf& zw>w$2VEDoQD5A@`+^OVhf>6VW_y5VF@j0hTECY&wr2i#WN&=z7?O&Ao@Hrgz9vE8r zJ8BHXQKKOLb|4L?I-4KDgW%P0(ZUEf_j@QKjKFbPIE$X?rKOoNsq< z9Dpe-bUu_s_>tA~@jJFgaDMtbWFPZ`kypPih})^OEHreHegaB zRCd-S)Z37H{0D8whu^aVL}l|zqJbyk;vr>-rUBj47`i2BXFG{aM|~Yz>FoeJPJH&! z#)%B>Qq=~+hq94Z{M4*=U^NwV;XGkE1W_)pBySHw?`qy=j~5=lXqIZ}Ce@{IT+C4a z6lT8{GWzLnxcI*v6YdLBFkfF>!f&=b(}o~Q;d=`XS08F53dn()b?S3eRcD61M2-4JJMCm3)T?U2cfvw?6{%#gZ;G5Vq; zIPDLxan<)zm_Tp9sRE(e{xHawp9C925aU`yC%Gr4Ni!i#Oh=v3klF zOn|<{f!r_5aME9g5U@T4MT9Xzpy}x5|Lfxl#>WY{8#M}g)0_(+u3C&jQn7`)8E`UF ze=u)Yr1uDASfWvVP|6PbElPh%a(r4gqd+WLq3z^Ag6>Z{1;gaQ2X7Dv?QLrWihd%Z z#_kIpmmEu|n)U@sv{%CKkW629qkk^gw{p@BvNOq_Z^?UXGqVJirm*R0nGTm5uvJ^3M1_k-Pj>C4^M=3r2bsH0+z0 z7$yl{33!_}sdME*=1^UPRowAhu`){#8bWMn2^+Y{@x#5y3ijZx3c4h%l+kFnF)S1q z|7V3mx+;+e3R=L87eFvZ4klM+%z4@sO@u`Z1~4iN2{dg7PVmtRSFt^9pp*`szKDu$(u;51se$4Aq6d zZ4=fE+oRTt$-08l8h4$&`O(Oipd2s`W14?gZA@84FoMty->*O>ULTf5q|F}Nh?%VL z<6Rm}NHS3NZqA)r;~IMt`(^yAFo#46$twYE^LuAzLNzaPZ_qqG1pjD6o859&x!Xrb z2c#GyBhpx!lO19*lhvo^=9uKiTxP%&%D>XhTn-%B5KZ_I)+fHwWX?<{&+)x)K=mu~ z_dDoh@Vd8E)}sLsz(rIV@ck870xMquXg&CMEzzEs4OhQC&1Tt?u_bmv=@n8#S8b;hB>3N zECv*1JV#to+zFFI6exF#9s@<>bOd)qw*H4eW^u)K)*g-4zGLAA?C(au=@$Vv0CBk z>Cho{gl>esuHA*$f?_LD%^%kIy6|ETnHfyW#ZZTPKPBYWCapL%FUvoyu8pntmee+Czt@q_U(70e3 zx8fL=*{qAqi7ddhAm}5+*j)(UY4yT0Ex+GWsNZX}KqVjHs*VEyF<{VG+kT!=4sH&? z7sHs@l0#rxyVv2RQ`rujqpcLQq*L(&-h(T~0PiX@G!OTV3e+YM06tB_AJrQ(B|bAS za+<7*3>2U!bUl2Gl{^yiNf#Y+u@kz>WFlG543FCJu5E_ z?fwCDzA3@klyCpVso9>f<~&nZl#LhvYFP#05fEyV@3znOWmyfWJT{>x?{I8rc zmQA1{o9SwiemXn3bcyHk|MvCa_j3K!y$B5LgHXDY{OR~*!K!|rLmNH*PVW*EVonZG zlkf5k75GZFe-%EH!|n@R+WC=0l5rOQR3{NDSdVd8qpZ)mm@Lcq@yznxvma_#an z)%NXs)tpHk+9Q6(98}>FC$4q2J0Xm=nH|r(rvA7)>Zr!$z)u^nb56;W zM{-mri{bh}5E@AS55Eag=ne}6iix3ibtpi!)`qa%grXAHKvRZz-SiTB9ERLgJZ9{* zJ#FVw8p8G%wl}Pt*@WZPjflE`%27o{BbTYP#`^kqi%&U4s&HnL>`EyyG;+o~e8 zH`$B~QMQjm@l7~Ug1o{=8)gvbstz{i2gEAaQa^VShPzQBoLIv&=#ay%veF_$>Pl=^ zLVC`HDrG&pS%jObNsp_)m9$lSAk6y`BsXFeIvNM+nV8}G{^ecrwN-yoM>XGtkG=ar6?&}_pHSO7*l^pX`jX+tNQwXCmJu9riTMkpd6 zY-culS#i;gJLcYg_7${pnd#=9+E+ot)TL*hYjJYJ2 zfGZO%nlh+f_kFzT3uiqK9}2JUXS+(QoE2O*?AWTVr$4ptixB$l%(Ji$(6kV!%^!ko zk%jLLyl2U;f7ge$q=0BM(X1RW+gGB&-4S3$`DJugzaHryLLr~a;B=>NF4?74dGa>f zrI}l+=@?sUVk>`AtBFw-f!b9CyLnsLlg-|A%M>{hz>p)_TnqShtjs}K_oCYjuc>0h zvE8~)*$6DOnHz8FlCAE10h+u|4pw@F@3}Iq&1P-S+y2>tm=;u^AOi|Lv`qstf_&qP zGT(V+J2o~B4&DWIk9aZZeKDREkJ31FLpPmPN7B^5CyLNoI%Z1hA`7am&t+7Fu3|6s zAeXQ0{mzQ_ly0u5!*@e4X%hd~=RY}SPtRMv8PqCo!o{FBb9ZPWZ>}P7CiKeO(3imhqZM9T#M&i+^@hH2Z73BpXy1FLH zo}DKIzq5IWGgs*I*5Rwq;qe?*h!mk>K;iwMYYnkkj*VY9saU`C3# z+Ndq#)~9_cLJ*8Bqb+Ad1Ku1)!ZCbG9S@gBkg0+NuW-0}N3`0#iTxGzK2Ti9V!^;O z*^X1C=q}v$=Be&wi5RgQqv_)&D#EFYDi3~lf7~zCww{VgyX&}YkNPG#b^@Znb`JYA zq-3-7unFy{um3KP>1oUwSoDW6Eba)vzA%uSv?IwN$zIB5FVn!HdGh)$VEVh>kdKSE zD@rB`b+0VnyCGbZ+C8pW@|LOTFE?HPx#*e2=1dHf+3`QwjqJ=i1hRN_Xj}k%_;Y{ZH_>~oeqIfJu)G~ITWNq3PwjR053?76g$sgRi^viX z@!|J;aTRVgeRD5sXv8m7od~P36|H)oW!zxL9m~#+Jh!&P+>M`4{NhoY+}P>4t1svt zTXwg=y?Sy9uAOk&R?+mDYsux9QjPa6=y>7=?#^YkSl(@cT5PuDj#oULV8t%S>PtLF zj452^u)wTson`eVtk-XR_7$oxru(lf^{B5fajG`v+FX<){9wER<}8Yyn4X zaBdqgr40v1WbMCGYGaVCm9G7WI{NYPSDm*s@kE2{P5=G^EbmkxR;*CItq<9mmjE%L zDahBAWpgex#zX+C!fw(!Gd3nOX@7DT2J_UI7F@JPz#SN_jbQW4E6DyA@OF%B>)ita za5OupnZIpk&*2NYLczlGRxQqOA*(AQ@e~-*%w$8_MiH$kSj^`BBXyQQt7m4$mu=X$ zPu+%r+74}%wjujhcSl(FM=o}DN&>Xp$nzF&XvUE{et$D2j>+$=VP}5<%h^)9UuX<9 zR;9=_v{HUog`0YNwwp?MsyhuPuk$5Nm62{$bdzx=?Ie!56Iig*gbM_@7vfS*U`)rD zk-xq)1#&XEPZE3y0ueSHxukIlZm*i!>7!gj`LLJ$~i8bNvr4fXiCy$@H38;{K@>^(NZtmxj4NJD2-lXui z1Zh>6RN@N(OM5Eh(4#wUpQY%Uog~m%s6OMHE&^x~GI0zXGd(j*@a1^EUek23HrvM& z>iAeQ5=7;?@fHdI`3a^{0BC7d|m(V7P}~bWvPodVaM!8M(2OO?!zb*PFiT^BF9K#L@@FjQjoE#0kP8; zsG~WZZ#P@jj8iJ9Dqx~Ux%j-q(`T$IDy1Ljol`sGz#t!Ef0^a@wY{Mh+zXsi)O|9b z2Ue-6`_CwaCMv&iH2l`+YgGm}M@rlsydI4N)GNZX((A}4 z-w8(+ER3`uGf4Dy2YNR<$VTvOaYbY$*4*{%ISaq##8?K~x{hnECj1x)D`PVBN^%|QfhmiPtqLzGK zir|3T6=&p~5ciq4C$bGEdG3O1U;}f|W+Yl)l&D^iwK2`p##*Nv8&j()Q~aoU>#iqK z2@vaWY#9s2#@G6xK zen4Rm=8r5daJs~k`!=3bdLB}G_We%Qt>!SL zrCA-3?_&#Cxalo2=4_GfwSDK!VK~DM^5wexx?X|?(=QolZo!UUF6!P*-GbD+&ewf? zqa15uRNi4{*%_gMFwY(mp71xJ6T?BSX2o8H3iCMQ0^-x|lg2V;EqWe}fGB>Px%TfZ z&!GPUFtr5KOS#lgYeYoQKJE$BK@Ymv7{@N^9J-E&$8%LqEcE%&Ft=bZBY(FttIxET z*TF&a-6qW)=HMsD;Uoq>_kt`KO8l1WM=VLH&5VrB^MnCsfAwc6b0-sRdMNT$dh^4J zS=Jmnu%M-Q3$Z)V@(~G$h&T7%)7IUwW@Gz|r=9%bqbx-*S{a9-x%kdVvYZ%4p8;RD zH~teK`P?@d$ApBt@jD4N!uw}GWOP86USG&x6>EZjQY6PcsNcrw7sPw$?7NFRC+8QQd7>v;(g zgLNOxds+5c*3!!tRA<>rswhk0!r~RW6L{)YYdkQDzL)A~AicmwPIspgWdMI;6J;Ul z!#~Z+cp>&z%dOlMV1jMvF@41qTZupUB3(ydYV~zRvd;9BoroYicANs_-_x%~9m@~g zXgZp&-yJDoClW`iZMod^mn~GymBiap6|jn1e+K<33u1O;fjtsxM;?<7MiQX69U(gZ zF}pgn=5=WQ*&5mmV721>DrHDf$fI`V`W7eRF)^{)r;ZS{;evz0rGkdOEl#Kgrry;j zEn{mJ|4RbyxS`U&OE`=Vg6b$E53lp!jllQ4DwY)L!*@zlxv62mx)L}`4|86l`v;PW zS6`6p!-2tKtUN~otj5yE+kSQqTtc|>#UKXjt}Xkl&%i^=);4a~CbU|KGgbC-LuhyJfGpe4 zMSy9~;3R!EF{%<2L6JgLrKYUd+?2J@7{{qJASRMMEr6D25Tz8&|jkK z4Cld^Ar;)3)h%A%0U#&ZeZ8uk*5#~=55fH%g0Kzs!PYbVV_jFm3{%H(O^um!W2SfZ zUpb6;rOjwRy9{=X1?AX})QknGjfMVQaC+FP)`5PyY(~>O6=)O%=i4; z_cV_K!Ghb<@eej%<@rVJ+60*Z1Ic_&_If}(egLy?G}Ba1^4&Uxy|Ie zM22)evHu=(S-Ou#JnwvcCL&;g^L+^%H(8^j`^I5?QNiK-Ai@!N$I$^MG5u;6-ubj? zcsEyV5BbE@h1J@>zyLPUOvwK^f~n2!1__Fj!RVJD|j$jKX>_?tjeW zaQ!w*{(TfL+?*_Ck*^U6+jY??0udo*3UG4%hG?Fbt)!mpQ`fGi0mh+Fa@!h|QG#VfO;G04?xkKh(Id3v$lW|H9_yFy$(#B=96tmPYebdn~ zz=2DCUVpjFXP3+X8`~_0e(P)-8Asx^w-!IH)GFiX@skqZ8I7JJwa!S$1G_6V2%2iUmTPP3_o>m0#|DWIsk^;~5J5nle=yV>GOiJg z2FPI5?HN)^Pk>ye?|arA(v@YKhrp1_Lt4YCO7`l6RUj}#{#pDFcsI$l_SNE|iyS(q^gEa!;4U7n+|MZL@`r|pHoPTIB zbrb+rhyT>x)Bz9s5cWPF?MXJ78b_N)cYeHC`4lG4Gm$u9X?h$yBsJPnMdeVs6|z!$n=w2d zF$9&?MNgOG)1n-!{OEd2SU7(J8{?tkwlx8XBW!;$je72Q@$Q& z+5_dRKe z4ZpIlD8v4!>Ni%y!-mo zRXfk@$c=?2;GYNtvBh1t%v+VvmPsQP7fnQc>}1u+j3A=P|O<40Y+JMX2TK}<0t6!?3@ zje(rXYo-W8mu5s49fC@9vdp8p@)_W72^G{kCIWz?XstD54dj>Np9GB=rye0l5*gGO# zZ5o!{zTQ9-u7cp)esLN?%!Q;7euO&kcnUZ*G;+8M)8JTxEByXrsyA<-LG8zqrWrj~ z2izOSb!s1Q)yN?Km($ZPu3qFLV8;W41zNm}XoSm;A_fbvtAkPGMPmAm)MQK2iR$o4!!oh*XaXYx z6VR`!wKect%=6aG*m1n6bm0Cs!Mpr^SfeH*jWT?_)bmzmvudt;*1xIBkwDo*!q3yF zaR^NY4U^XEqP%+oM8dvQUxLPnk5doMf!`y2kbr&c71(aqs?C`v9ycjY(13mW`;>m)pny2bA+kMl5ad>$Bft)-oOZwiv zV!PcIulM@=b$#&hV z{lQ@|@ofn8nksV=EQo|Gpc)r;QIhgA((%GBPMCGB>lZ!cJfSCX z_&>q&&(`l-F+v@Ec&>-^CsN7Q;E);A{AvW(7d>ouHQlqvn>@PUQ(z%M{Q=zlNcNDA z7ePFS2ailO17d@5og1ogS^m|Jios*?kVe_9ThXI55{J|4B>T?%d^zidm-A_eAkyth zg0FI{fZfVJ3W$=jYpT+>8lB^(|a^CjcR@P*sFCW z0e@o1F>sI7nl{%I)6fIqj9uCS#boS;H`pry<5Hf$2Gfb=yn1R zenewUHP#k)PUEXl_~?%JGXd^x&V~he!~1zNWxATi#^j^4v&>Ebe|@-({QDX4v#9i4jKxe#SJNQFE>q8{D;FRPu@1i z3|KR$mqXQ$&IRWn&~c=B?a3{{z6OcesvN}D)L$8fg&KC+)lRXu#i(b{4ziG`EZyj;&!Q%2{z>WP0fov39Etmq|c0y7EbmuA=mCIzH4DV~rwZ$*b!ix&@-E z^LdkAc{0VJ?My3(Qb=IDvy41o1tYul@gNk=?HrP7^YBn|d)qjrw)*qWFz@%bQ53I3 zcF*%;WAbINLe@H`66C3=snXDuLqwt{hlfRvCjpU-POGX-YvPn5v~&?(4;pTBg!UsVX;Efk|L&gYfU8_zxCe?r>S@3a4PkE5-kP(<_aNt zT~8tG@T>SeET|5PP8El@tUKD_qc5J^b6v9TO%8_#`W@zgMoXGG(|(Qv>3|@Jz}F#} zy3B=@_9mVpwjo9xB~aS`I7Nf~k@$$I{`z^(e*K!zEnaq6_U7<`H*V*;j#roy;4d?2AUaJ?B!gA=XRfl z_Y|pCF?{GOeV3qRzi2qhv4)U|exNDv=W4{(;-Vni;zC)-Yy9NSr*Ro*m*3?ckZyS% z`Z*NGPZRP76A9Ej`ot)2rkS5qdoqVd1=S;HL#;VtVoBerL2*Q~(5l|W1h(gPd0rA`kq^n<7LJmc2B_q zTX*s3TI{g#{Yz|O*j;85Ady4Rurrm#wt~5bG4N-w6)ZD58-)G)_V)SGw_-FU+G2RZ&fJ*Pn*Y(_Ye@~$qFs*t7f3|bBmGq$kj7*X7eEv0Pu!or3l`ijC#NkCe+U0pXHl8s z%AczHeY3ZB=yUI)RF5;K->N}ZqRc~2L(k82tHYD}Z#bWT^%42%rd{je%t{b^IZ%Vw zy@fbZEQ%&dtNy4PHT;A^%foBrYjS)zn9$5X#%`omC{0Z^j{2F0x2y8|+Z^H8O#H4l z`9AW8YD=ClGfMDbDO8iI)s~l#+l8^fby)mG(pN!oKgl;$M^M&+w9qFaK4M>^*4wXY z*999!?skU7ihhOM@`&6|)1$qUd@q>oOab<{yrNvN3!)IE8Zc+lCSKOZTl7<%Chz zMkew}cD9GCX7P5`8yqOY`?6r}4Up5pjsL?ecUiOV;>CS7soHc>?q5NuUg%_eJ})Qh zR!09WXnWuqPjFRS@RLUJ^X0KBX{$T_y}M z(WC*_lyQKua@=y%bkn9jujm19^@|*%p;wRd4(wdnV53HJ1(XX|)Gc{Uz7)3jwh0)0 z4w`&}==P>vWD9_sr1v?jt-da86bFxUhXjZ5IV~%TJuY0QrtNUYbjyS;k%qi4qi>e6 zGi_o*aa-B_ad|~OnfWDS=ml71E2W8utG_lXf2%{Gepcc#6DG(}SD(@x+f5w7gPVRm zo|&nT+uC%=+2xQ zdnIjuzIhji>+frAjcZ>F)pl@Tf0cOIDa()_DB0ESLJnAC!!Kwex$-NDKi)}H)FR0&0nNu_ORG)yk{c}P4oDpY#EC@}p2qjGmR+hDS8@kYQ23J`@-)CzhRsJxZ#U`|g^lmaL z40 zd+$dEc;NKQx0>I!^wMInAtd#OsaepgMppP(?Eh4xn5Bi+o#a0XqK{})^4bKIL5vma zjJGZuK0NCuUBs!MLDg2)yHfb`kD5Nr_vl`p&%nZ_l^?M3;@)$Q&v6KbI_VjgiC1ql z>6J^KlU9nA{E$OCi1fTwt|j0ppB(u#}Zfe-KWJWdhU0KyAFLbld4bl~duN zBa<~s@~{sj*v&F>ZC~Np7+P?>7rRz>27hO;2*1mx4nuO|`;+pcjcKH!sO`5oHqvvy z1q`iFmMS#xhCjxs;OE0ozf;d6YjQXJBB%_3jSW5&HxaAK(XTX|XJ7sT_3S&IGPu~=Apr_yDM&pb9;HHZ>^ibl>Ft4!>-n%&~ zns{dj5$MuhOJZA63%sLQq-vAmh_aYWvIPVNiddY$WgwE7$t6G-vs(g@3a zKzHHZi-mL$I(<{;JWj!ONJ;(n=YW^a>50QWu_ghvK$&NJ1vF(1Z*qZdte#F2zuDQoD=F zf;fqTHX4#G&ToZy@bLz0_prXIP__OsTt1Q|*Zaxvqo7azu%y(WI=g4FF6nz7EdKOo zeQ`EoYO*8#s)FUIafuqeXy@e|c^-;@bf$#0Lv5|iN?@>g+5c*2 z9_@t|dRPx~bsb*FSE|xeLqpG(o-$>!XINZ_pbHf;H zI8{7G5pl@@3=5y6W;`@S7=E`QH(_Xt0fGg0CqS^lUEkUJzV|%m%l!%6Rb9QRYOSgnKZD$44;h|dX1b`Rc!9K> zLkh!EiesLteRr;ea~-m91Dik%B&B431QzPm^SHA@INFXWb6o%Gd+gHqc_8BxO^tgN9P>2okdIZ@a*l2? zf;L!VQv~Y`O^Tnpt6~4J{XcwAkXufu21cl2SOT0P9WVnr=~obu@ECJ0E%r^w8pN`V z@|X$jB4nlQ>y5|dU=c1>LX#`}05 zgU5HDX8E>N37WJ-Yn$JBxBw6%b7Y+GBd&&r%tEQV0D9e>wl@0R3OLZY`sy##K+mEo zbSmKENT_jmaAo?_Qo{Km9@>@dq~br3#bg!8_!*mg>(V{?ePn>FZi7FeVbS-uQMUZ& zB94_$3UY1epCii@STM*-LHKpb}6N^z zRwKyH6qD@Ms6{$83f4T{GtA|OM!}*x9A}kri$cpLpaxzQRDEvg(HX5y8=HQqT`E^i}BFj}Kl|WP0)|^K#xQK3}z*GBJt!@p_lyh5vqn=qzBm)khbNU7n)+-&p|a zPdhnWb41!6iU0l&NRDk!Bq~DjyhGRUueJ?qkdFEkRa3q6oVp+?qHRtS?QJe zuxiQ@W5)tU&cVsStI0x$(J(`QYRAq@Ak>h*^bkj7i#P-r;{F;^L;tt&2Ujyh4}Cl~ z%P(pe{dsOijAk920|g=;{dDwGlnH3dM@XtB7w+W6&#bWPjw#5C;pb|H#hrwLecTHTjRw9T8bex6w0n=gd5*%T6 zWp7|Ev3P_5qM#J%8KL~xjvrwOOjo1nd!DOGo^xgx2}B(J z+HYAAs_UN$)7X7?cv4;;=NOnn0lFTRLyHiw)?{T@z1oTRL3VP0_N2^WvQ~N`q#CAu z2BR8~iz1ohuR8EBC|HaJipLUczd$y6eM!!W5e(cs3r{@9^s0ghBuc5w6EeFsO$eC4&Bai$Xo0f>4RXv05hZBecr|RUC0gZnDjxzo`{6^6g+{a>U!2Tq zotnSlAlsX+Z!P74hj5!;?21T*09p$_bCmh=E>XwX&=%dVO6LBx5w5mY{j4;%%VEX3JStVaYrs|5|5{Uh|9Yk@qZux z&E5RqS;R)DKvtm`c&ZLS(@H;qfa%$<7bx+c6*1>nZ;_55V?!~Aql)B3kLNFW7+WvB z_?>({1<6?s;EMKky=NCe6G!d8`AOAQp*IFRU^gmclu``I0I`*N6uaLDHeiCd)8t<1 z2| zp-dCe;(Ypxb13eT`Q`*2aP!&M>yNJ8Y;CIhA2IUjd0Z43xluV7Bc2~jC$eDQ5tSQh zsY=*fMhvfcXZf_o%B`ftlt~65zSWRSt94r$N-eQcS?e$Tn889=B#M6z$W z&!CTj)aK|3@}Fol{lBA8vK5D#L}Wv`bMSU6{8=im^GeZ%i1nCOs3qBy7@{qxgl zl%bFS%)1O&%2JBZ28c-tw&tu|<3QPUuSYMH;)10Hw{)ew#nONH)RT*)xByKskkuzL zoQzu|nUdX_e^v$dscC95be)bx!5DFg7!`YM^*m_705D_==Oya7Cu^$&|A2;u5p2{{ z``d=C;3OZiy3yOn^jJo!oN{cUutvvgMNR&QRtK1n0s9?CC+KfviWR=MaBEi<8wZAK zQy!Unv;@DxR0$7Q9Y;5fNTY2#Iik<^?Nbr7EUHPmiBKRs^lN)`{rhBGS0jGJ7efZU znM|5>$f#yJMEqa~1o4Af{TT{m|HtYIqcCXVyAg44VHpZ*t&R&S=4u0A$>ToT+~<=DGn8o;JHT53XRfK&lg6w zPR};LZFZYeDXfgOyOul}@31)@OyThzIcFNQQx!dE` z7tYdy`=7imI@zpBW}YuxMNGq*(KYzch@l1Fa>vKj@S%J4w@lQB>BU96NE;|tJ68B+ zi57nEB=@JN)jhv(@m=S3I%YrjNhr2*SsK-(BY8M4h+~M1m%0d`n_AsKUTdG|xU{-u zM|P+LYX3Q_@W{c-t{Dr?QA(4vLv$t5YQe+^`{6e@t|$wfBsEpA z)cA`g!nnL3-#Ruv%CpcEekw0vZ1BD}Ml{bl58J&cQ+GmM@I>DCZul6$X?V<_>CJD> z`P@1&U(s!gc>Fy^_@md_t5G{7_>niqknLDJApbu-kKF+m-0jZsc~*|5y7R-*Z>>&J zb}V5A0Ki-3eyaY0@A;RJE)-wIELed;M(!tWyi$n-=> zIzQ&CVdg38%=pxWsJjd^LqtItpff;K*BU72aD?1!_-!DIiQ{XrdgtLn%q zj0|>^P{e4ny=G{#^u^QoHO~r`OUNl_VivuW?6;l1j~Zxh%owTb@_O zk7dD|BMG3pUnnfErkj&P_>DLKcBd7#n43BPTJlqemMaQ$pz)N9uIX26rPw#$F5e2% z!=sGO#PSfC3U;C3*`pVS&wkFI1R7*Ocr#R2qCO=gMvenwwz+*BrttEEYcCbUe z_YTiTd2dxfji9wn@D1<$z9v0c7Bu~5>TxGN72g;5G2!Wjp^?wdt+{~nu$i1LCN)wQ z6eGKF#XN=R`3J|4t1@9`wt+0+yaZ#G*Q{ZkC#!#=kj0~9y6KEOx`0$Th*DWc{#h!f$QGZWi7xFLUoL-EFE@DW8k%Ko zu^P-Nl;hFQ5X9gxR(6G<*ay5MZV-Z#U7C8wVmu2}WcL?*8wk*w z`8H+cPOe?Eft!t`dMS@QKgarMD-v*)_1(tT!x4|Ajj<-;2}sjGuXIC%En!Un#yDXz z;EujnRp@&wRUZX9)d`ymo9pvyh%X)WI~|BMvNNW+eh5JDd~vFd92;3~lnT_Qf(a^} z?ul4qFW<9LYtT?dqUCn3Z@e-aJ4PlH*Ds3sf;wd8yOLj5xC>7(bIA1vy5!ExuG7O9 zo_sE54tYEuOkPXIgHG?UUGkz9Nz}MDPp(O?)EKhT3ElV3`j0<=9zk1W4ft=skjn&I z1>+QPxbNQ}Bi5t|$Fkh$k?rX_HqXdiTWI9qz+dRAQBNW+qc{-}WYeu)8ZXA*8r3VB zCk)>Xeq$+VQ-vHCG+4kN>+F;%A+%R3z~fhJ3QW+cnl!_5iP)qlriMnI1hTGs26LkG z`KTiv|L3b7!^Vs&2JpF;S*#$Af4>OFVn(G`pHMm376a^cesgsA1&3xnQP6_Ry7+8Kg#SZKBN=s^tON z9%1&zN)*j;1H(app^X_=2N^GbBX{i&|nhe|@mkOq+_p>6=Oo zdJt7+Rw_=)e=2GB-#n>d^7!QiUB`I&*$Emahio)kSR$CIR|+y+eD zmdQyL`EoxMxog~QUX+bB;=-_T_qkOan0N6u@;nvGx!nr)cN@s^7-H?p-spc^a7^En zJVMKuSQhA{6=9%@!wQ6T@ey2*g|^Dq1dG_0-P;8JzxT}<`9BT za$1~chw3)uElrGw)sbtNh;~a=7dT1!y<$PBHZqbO#IV@fx1|BszK^_|Trdi` z{)A+2>)F7212TR)M0>%+`9!U2n=J?Gks~Hc|%YKo!Ez}H1G>; zX7tSjkw6}dku;lB7hJ})v=b%Yw8*V;FGP41ybgjM_uZ3 z<%a%*0M3Q-0nP8;6gP>;L@u zT2q`hJ37yKc+V1Z-Pi6Qy1(Vmrp$lu-?V2WRtLkiB4|j)4+H^~CVo4SWr(ol|BD9x z83nWHAlNWPF?Vij5c|Da2g;vPwULN~EV#&au5 z4~LOg3KsroriV`zb49Ye6*BY%+FBSxQ zw>vE83)MYx7o&j8ZQ;!PLP00i(uJP3fR&6)DH#|zO!S~9D~eag8ZivPe~9vzNf9I7 zMd>G2W?OTcsvA5s#a?Mb4+?F^08~>pXGJ80M22deZB%(x!a`WR?*M0ALU10?`f(s zRt{wpK9^ioNBP+=5Gp-xzAg`})6!1(?cj1`(<;g&ZBu@4gz}G{*)`L5ayzr#W-<7K zHo-XbKFInDC!UtOzaGbC4Rc`oYtG`!r&ROhd~Tl^h3I2;n_T4_-4gFg3OZ3N8qiUFxvtgO%d5%K0q zW0}?6J?#q^{A^zrc*l;}z;MD^`5Ue#JI$~HxjXCkMHut<(F!2#IByUseEXsjd{lTM z0Syu-P^p(Hc<-9To@W6L`pAsj@ncl7tx&!`W`c-u#@f0HsR7>E@@^aqW+13i(IqqI zE5y_GYCHHQe|3Q|?ZF?6|FwXF=O=xt7Oi7@zHyji$CbZh|B!%M`>bJ!`G2c6e*$;s zpdC}sfIcc`=1`%c05Vu#P-JLl&uahUvV5t#pA>7&*NZybiRXv9=ItPg;^McS0VesS zy2|nVZht5-9O*!PP=ySRSr|al&!Soc>O%w7piMv>wfviw=UT{&P=)Y~#vt84HbrNb zz{fJ<8%aM5W0N!$`XL_F+y}x*ZNl1>RPkMtHhE*lgbkz04`*wtzss)Ui2ot?TelEU zAF=dZpXR_BFys0<5^kdPjK7T=J5P5lEkZ> z6z*U%Ox$jhemWmAW@#cj>;vbOz_{$%?O(SbnkJRMwkChB^-Kq&_k_z_aPzZK%U8&f zQ>1=jsyL?%)_r`#)P=NcnJGwUX@1}GAjbYR9vOe#Ff%b%klinBs_ISN#RQ?<2EoR@ z{fqZr#&up(bm)d>zjbOGI2;{rjP$zGo8 z`XuPCisCROn&%r{XGB+y0|;bIslk)e+%PhB_0Gcjl;Q&e@{t)*@*O|FD<2i%2KU8k z&=H=n8&>u~Gt_p{WdmglEz4<;arnnR&?+#G0(sAqjIc)&cbnK{Xx~je*pFSxe8y;I zzVt|YOm}NlgBh*!&TiHyc%`J7=GE0zmN+&`@SBB&<H?~1`^L~>s{fxYB35LsoyT!dK6g(pAnN6t~zrCx7~lVqKQ5bmwc_$b)Sij zkau@2tP{y1%BU19EA-d(gvQADHn5mEz_3|80Y!Y>fJl6`@dn@V!?9$w>Ua5p;$%dI zI!!sMNbxN2Ahw+=Xu_7txaVCwjCiIfWVt!3vSQ>ajp1ly{;n8yRZLLZ7mTJ1Vjg zQH=yDHF(#zzun*;!BxR(BdYP1X>6Rr&GEzH`D=Ov#HtTeu}+t@La zN@1SH&n*Zi9IU6-&3TD>OU}t-q(vGo#g;gH3UM!rB&o)z9vgqz^=theLcqkHLAcJr zB?ZkWR%1?G{~!Z)gy(B=Jqtv`EVXZC5GYk1zNECS22SVdY1)w6$a-CrMRr_jEPIZ~beyGi!6RY`V(Uos_^spHQxTH*VY< zcY=dCJK$Yw5IlX%#1H!LHK>9yxqR)N=u)*D`P|XV1Qjr4#%$7@>jl*|a5GtcQon+V zl}3|(6VTkk0oLdI#&~Yw_U5uQrQgp+3r1SKw$-Q)UpIW#*3v6 zlLhLjwfaXp2vtzp(RgntM~JVl7k!P>%?X{U2yHC>5Bah%w26`^Zxuk4zCiVA$O;#~ zGraixs3tqWA{Em#EDcLIGuU~XfW-I8V>7VND&Y^2aDhCi!FTstO7A1(DyNY+R>X9%$H=>_G`bdzNE@zVg`QN z3nwuPzl7&K&4x8^pOZw?_#b}(z{AJ1{`=d1_G4hrRm41v$ zgIGd2{Sj3Jha3uvHp>L-PXBqzTmrXXOd0ofI0eS+gUTEHO#OwB#Bf(I5;!3V>+|u4 ze7xW}(=q7sbMkhXG|4OKwzAXWzi$3{bb_;ul^?(0jxwROJD-iwx(9gTq<-p7qqz1* z#iTyXCd$lI22|gGYL5F4;Q_+LLwt)&gptl@1$TBITT%xQ^Mr!Fa=wBZGYZI;De5`< zO$0#k_ToHH>d*)_RB8&GZKLAa{`N(SKyRlVGm%Ei)L2_nQ@yDC9t;FD*Hnp?VE}E`c}nmA+p zaFQyatO0{LS39-CC75NWEKjPMtbk5W!<@t48bSsYJU{D-)E1ZI;wci92mz-F1hL4z z0Yb-u+^7buuN$iI` z8mNMut7O>YLO!J}o31Z;Oy3VpvG?r?lxFzx&w3?3-0`Nt--?-1L*d)4tS8;nZ1wzXKq zB7cVrTH}PX6@n{(U;~zZP?kbemW9ZZz-}qN`T?rvM-rPW~-{H1m z$UVl*rbb^Z%AW=3QAeJs0Kl9g!$jR!c^(r9BFfnAyM@?h2^W&TxEUW=(DICTw|Ze0 zwA3{}S9ZXsB&V0N{Dqtx7Hh`_0G|rIzab~Zm&Z>oqLs{-_LM6SbC2H>n*UDcv}3xR zC#4MEPaUm{*Lc(~4yY1Tqy73RSIhWx8F?}W_+ge@`tEeLb?{3S!1h@Pkvj}s3Fb#I z%b0&?2HHADZG0#5y+d6;fB^Pl*F$-pwtCAtD}O^(OpJ+BFL-+es3G%R^6MRtqP#G zwdxFOARaLVxu6BSryNN%r9DRUqT#p>NUqZL*09uoW|k-@8&Ywsj#%=4@;w%(vUJXT zITaUsb@k+(WHl`%8o3G>(5m{e3KSz5)8UEBHOWz_|ENZqj5lDs2MORb4SO@4qJOMXCgCHe19uu%|Y zxgy}sZWT3StU<)8g!T+gQM>ml-vJ1+_)0rhAyC|}_6p$pg@J}t>4;<9%!5VKj+Bhz z0iQ|UZWkAbB+pv()tdBH;5DcFueGH{Dfw?c{qSGL4}+ay;R|N?XIB9(|jG+DekRhwq$0a z=&a7$B*+fqBrKj0abSG`NB|CUJm@fre+}^JOUOcZ9J&+*Iz0wLY?6Z#JWW|hf2Y%; zj_xN!M!!A4396kYYt+=tue~%#j}G=Q|?=?bKlF8j0R-0YEt6Dvk%W zlPR?Q_8Ywc^N7f`gfCqO_@+S6MVmARxOAB+u zrHV&&LnASGQR3pLCe*Ri@N#!a7_&bo<%)8erO{LT_2C8qA*z{-$T!Pv|Lk$fI>{;W zL5|8TsywAm?2Lfwoh;))X-Zu}A{IQ>9~l5pU;2`~$U@-8nfGY_L^Keq+^!iNetQf@ zt$9FeN4JYcNdNET&{S>V)E$pQEn?ypHLA@lHtn0}xAB5d{qil6FvF7jlXs+eX0kWh z{bIqpxm@c8>fz*2w8dZL(#bLYXY+Zc0l|S!!}zhZpdD{%b*7Db=rUX9uBn35HJW=< zPx`b8%S{W{H|rU?zK?X<3EP|ly|8x9=Zb>A;dg6Noi*=%@?F;8euc4KBsfCmUrwZl z-`4f_CzU`Xcs-LkKQ9{*U8aV?dvt$d<98Cy_0_{3b^MS_U14wLO3pM;j3taprm$y2 zmeJ#dyPf4SJai3cH-?d($l&-nq(z-R4_4KkCf|VzT0tiGOf$Ea0TiSv3>(|+YBqI> z&sb^YoqHX1vo-D8x`$}@naWvriE@3?oNy^un+QGsK5Znbib_Z*p>lJFhJ#NZ!G`ql z^{RvIgnaexyW8PEc#~Th<7?xxmX^WP*+p{fYAT6`8|5-W;C3UIW(7BM;xXc1*fgfe zt0p&5zSejfl)T)OdC6s6yU(VaBC9&p`QPf6Vo*CP+Swx!-fxi5-~Nfb5+n0OEqA?* z1d$$)cS1}WnoW(3l|>}Mrh00WB~cvn)Pa;0ADYqV)UFHEOtD|8A@?D_VskA9=S9Vl zEi|z5_v*YL9|_*bl9y%6>G_DRVu(+goiSF#JrL$sKBlGJLkdsR-^Jru{C6o6V*L93 zN@DWWMYhR>!o>lU7f4&UwuYwPAI=bvB^>}-Z+G?xzoa$S`!o;DN6Jx_e0NE%LoWh$ zBE9@-xY9RI?qCPlqR!89Fo1!pw2WG5NZBls(@6k2*rfpG0=b6xz7L?U_XRaIu@{P} zs!M?{q}QmXqHU9`SC>F>OTh1+!GRY}n*A$6aZ@GS*q>m9H@4Clp0a5gC=7)5d65HC ze?8K}<}WnQHAVNPezMA2)05{Lb}HTD_$&PnTQST*v~XbL{4(8PYzNnGgY3@AW_mo9PyJS5B1)c zRn%s|q;sLmuz$91UGK#*d@&t+pK;yR3HHW;&nhFZXV=z&JEVcRupC+;F7p1C+c<5C z7qvQsKU$&#Cu80e0}G3B){b`P4IbyCwm$zyP&0kH(yI+%LxI-h%w0Vez}gl7V&V{` z*;S}?jfamf+en04#rbPgxwhNXWl6mqRg$W&vk;!g!c4}U!WaaD)SZ70^ZUW<*-eAL zTzR#rBt7dps&$(4($uKXYxHRIeF;~k&x_Wh`}Au_hTic~pbIyHV9h3BzEzzVuL%iY z;)AWcQu&{DD|=-UgKUXpdl8+b; zn=16DdI}8zJ-#koO;;6S9}{OWPfJIgg{$(12DI6`JM!op-&} z{GFG-Db&2W+?3mMWa0#~2vDcaHfh+ot58ME+03Men9gN+W!zaFQg5;f$UC4i2%RFU ztTd%G;vjMdmd@+WHS4|=^y#R|;yZS4M?N*=l2HUrvjzkZ+?)3d!WSzsD1Py8++;mT z`V}s%_H$aNcM0-JIzu-t>iX@8qbV@|dWeZ?Z0%F`@u@spsq%{ELsIXM)lO)7phVbQ zx%wIdhk3pHYFp{eBU#aFAseDgUxeE_9rc3OicbWQvfw+X2DS*XZf znsMTtAow|O%9cs&lz$%rp}_ZCj5d#EC@+_fDS{WzW<>}y6PNm9E;I0THA_HV%wume z{wM=H#ZcH2`(2l%c)nAdqS@ba@`q)`ZadqRc|SfGNw7{IAp-|P1*~}gN>~ozvpfyo(PgG;qvY(O+T>H}h@e1+qJ`r)so$Mq@7uB=*@l=T>ka)%7c#eriA?ps-kn6$j$h5s02l zuT(Sf+Yl;NS0m)iUlaH64EsFE0ILc^>VgJi%~|I3X*o2{M(YbCbCCg*U!zVbEU9JH=gGz z#NMwm92QpwDyA-sEjiDQWBCEidaUtlv;StiHMZ9BK-q<8`vh$+ADM1*fTO=4XinS9usM8d%eG^3WO;d(W;@m4_#e$APCB9)>CXD zdMjqjXOqnZXC2Q1x==wmW%<$+t-fZ%j>#L)XJ<~xnd;*4*bQAW>cd_C=f^LuRr^Oh z3m(k^yy=@t85J9d#Z!$>#_lrF0DrY3Ebfc(lc>A%nY&B2yUw-fvWe;V4w0-|bK7LK zCh@MlfLC)wq19uWRhlO@oLyez-U1QVwY{2A%Rj2Ry7rK&*8V=|>)Jfe&()u&)XI@X zDO8qRT8olJB`-xGhrVjR4CnmYvbm8B(ddSmCYygK7Jf$u6DUpN)OY?`GUmT@Vw*x= z`3N&FN!X0q6TGNaMDM`w4e$Ghu<>SQxY*Ra16i9l=PXGnt0cnPTmkv#Kvi z{-drGy`Og6jXZ^&9)sJXBnl0~sqjf+HGb)dsS~E+mC;Y1RNZN+reBb(uhclklcmaD zRAc>^CfY_Mvncod1?t_Ryg@SLygpLjk`6@1&VCJ#VI9X;x?y&V+Y1ncvW}zi}33r z4Ui#RQ6HwZUz9;V=MaBO#afeqrS7bV7SDKonN2bnRb<}`PD+^&ZSVr?xe;hl|0T@$ z-Rjl=j19QiJfMQ;0M?-$uA7+R7~A8U(NzIzRg}svIrRQG&jBN2kH4KZCX7->?+qcC zNUA6zWKhl#4;@b$XEM$q!;Gp#@a3Oxu+9F8EsJzr20~FiDte{=7I_E*VWxkw@!-55 z(SJ3N4|-S=x>>Z_`a6fcargbr|HFYnQQws6k3u^tapfc=HO(Yksq}a2mOsq6W1oJ9 zc`AEn@qc+77$h=kDjMRNJ!+=<2on7DyQcT|aZ9mayUx1HkpYyo^ixi6+T13w&MwjnBMHu5h{}8CP)ku%Fc?G|G z@e6r|xi#4GQqo?8i8gK6Ox`LA!0;nD_1I6})6XNJZkDTj=KkL!FvKuytS8do6Xz$L+ z^GkH5`x&z6#{Ks#F}YM#;Sz$6`_E#f;qd7vBg-N8sqd8SF&DHBOAsA#7%9Yy?}Y&l zQsSbFvhC|Q06y!eHYAgu$utYv%c;9-or?aJiIfj|Loz z2OPBom@a%Ptfk=On#KH)li{8Fsg>9pFb8>ZxKOXQe<9YU6+GNWXOL2B)fpS8v5V_c z>@-!?KTpVYO#R1M0-%-f??SA`F)&76g!<6&N#9@f7l%nV9 z3W=iO&c)0gt&91~Qk*#<{gzB^hBv}REQB$_$o($oa_duS(AZe)&XW&{A4paQJncsM z&LF4^kR+@Y4s6jGowB>E`{9)s^MBzMp2*O06mIam9PNwoQXS{% zsW3QcX=ssRJhxnV#T_-R$#}>smUKQ;mpHt1lNinqi0%}cJ1=^Fguj=m4b{bI&XxH> zR%NYzKddjJIs_{uF)CItrLP%$+}v3E24t zv-)G!%NJuXCeE=pFlXb=h4^SntVk8XyK%*Pc!)S?8?59uN?TIQ>EY#W3kKp0Nh42U zhaxqrCg7RwaeHt5&^?yU4cNYNvoWm3-|Q>Ry+gRfd%10<>ooJv`im#h1WuwzlWm>c|jYAVB^!Mqy+6jfarsa|*mI2%SNQo?bXg{E-oKCt9^ zOg+!f?gv46(7{GVd{kYqT`!-Y&HXrPSM&FLhx=*7=WH3dy-R+y==p&UAN29ItB}XZ zvX~-%^(1^~xxJYSU)x0Y<|cf=Y+u=jRl-`0pdlYGLIWwHT-&Bq)Oq!an!>i_Sg(Ys zs#^pWdLpT%F_ws|egFgPostE8c1M9!Ou0Aprju}6b7w>4$1CF=1^H;^pW)lxDihBL zA(&ia*b`&teX(FuZ-sH7Sfeq(1 zc`sJ1P*GAKkrr$Hav0DXjW41JbU3#^p|0!Z7gEjLxL zW121c@^H%n&m|Z1KEiEM!AO>#E_a<|U{}8*F304Mvj}qDsD^FwiiVQ8|FnCHLZ#W} zbO6@i!z6{i@Y^n`P}ET3-*9RgB*{e~hN68QD}LORFFiK8=cOsNI@pYp^b7g{V{JzZ zb3saugXx9X4RbF1_9GLUjH4205BmJTNSZxEDx`2Fo$&xq1Wy5$)218z=bk}p1CJfJ z)7iYPK!ptw9c=e^UeZ8Lh^VV*4GTwF0zMMQs%^tp(a&;Vpas#gJ+i^-nQ;q;Q}wY4|fjOa7+Pmuod!ad;s!^g-%#v8knTPw1>? zS}pFH+EsmIAX`f-4ihD;z0qW0iG&l%oLxPjkwEB5+Ig;O4b>06ewINOf(?+6HF-cz z9#4r4fc%rl99zaIoC9L~R|m|D3eWF{K}y$GIETZ7HFQ%nz0drZLN0*%9Ml`PIw z{2IFjnpo1fmu}0l>5?MS9ZkQ4P2W9^RsL5R4(!{ijEgQp#~EIqD>4OB+VV=3g6S`p zdqVf+&ZBJx$EgVGgu}r3>CW&AnD9|LM4UmFf6D0^=il#T-g(uza>(ABOx2Ig-`+m4 zaiqBpIm_{o#FI8>mE@xB0Odjx$YOT+ zLnD%EBq(c3D>$3=ih2B=$UpD)Srt}mW_>SNTL^%O0q>q<~8l-~_)9^vWJ5C)^B+OJ?qm*f}dRniddy)GdX`~U^ zK^0R!r>lj7LTiiq@n4Mnyyj~2-{ll}%P#@e!|2mFr6*F8!!vGbegiZTbQRvg>0sQK z@j29^6=$-_jWG`_ryEM<;A}na?;l*Rd2;2$4~ISo-K6}DcpAnn90sDmukQ*Jdrs4p zF(wyv+iFOsB>na1xbs8>w3d7#CjwO#>-Cu8ZX39alsc#f zpI}f6g&P*~3~Cde)O^&Qn0@!qsLdN7H`khGIqDup)ei6ObY1N+&-GL_KJ)fX*@PHTGj1(mpE;HOQywJzeT6&q@CGxg^8$md0Ads1}wD$V!r- zE%r`R<98LrP2gZA_oSX*+_3aw@yQPPjkVryrYI;yQ6Od5}xbRo>s_yL4vLXS)th;lFfrf_JArf~FKW zea5$zrY-Xy=tjNADxK`iRPN#mg2r3%IQxgbYfVno%??1hf1$VL(7&t2G@W+CB+$}c zsD5N9(Ief^!|Yb)e?t?v{)npjaovZ29V+W~6w6hxoC{ry_2?$}wb^cn{zl!4IAD5z zF8){<%VjV^h0+~%2lj4#>3q(AaM;>vcKO7q^o5H6lvL}eT?g#0u0FQ^D5YHlW$lK= zG()AEDI|-h2^FvBr37l_G#h!IH;GzwHw&IE&jWN)!V=>nPXwWkyPHnAs)KUFbllg_JJV%&=@K$6fYIzI?+=3Es&A>Kq>d#)6nv5 zj+Tgx4~S>(r-y@DL(+B``jOZfREB2I=;=$*+ij&!`SZJH2 zIGY02d-{xM)*~Npur$giOGd@|vgt57u4bJ$AO7hG1!pA)>;H2``#1kKq0B~RFU0OW zgdGIcLD&1vF!Z{Adw~J1SVVMf|ap=diKe9)em%FE>E=DN{3KQNeS88VGl+BRsd`B>*%N;aN;GTW>N z^o{qsVN{TtnuZa$7?+}QYH1$1k7$&MTx8Iaa~)1Ois$;R8yh%mRX)8cvuXI{1RlAu zsk}FpDPrIQjL2_5o7aBi{A057i7J$}Qc9iXUqRv50ZT2SzaV82%J*M`aKN@VjE<&o z4iHoW-kiOLZ4N&LUrwQMRC3Nn&pV?uQn!T#d0NfRb-g+w#KWwMx@x7-?QG@e@NbZR6#@MJt5y!!D)=3axQk0s^jgiwFdl;^b9 zTPseRt<};i-WJ6ez9=ZuDxRIEPWZD2RXwKW*^8U)-M%ggUIwA4b-PCuS;ZZmUTWBJ zreL&d_Geci5b?uE!pEPWS?KLE`Tp-9MYO%i><*eViXZ|S@rp{!v$i9`4kR2)ntQuS z+R?StnaXsXw_g@W`T||GD@jOlUcabP>sS8g&VQ7&L8(v8`(xJuoeok!HcI7e|2thC z4~$ROoOmd8$O41A%czo(tzG(ZykljDqfNgoONi2s;7M0k)|Msklm~Z>f$e`0glu0c z17jT5F@jOqcXG{Df_rdx8GNwd9^5Us1@|-e z^FC*t^{(>+t`FV4ud41{wRfoqJP4NPJAn+B3Jw^A;X5-*AMdRIN}AzO{kZMVIi>Jn zv=i*Y2I5uHJa~5OFPA7Dd5f^=>DN^}p605nB6qVA1sbA2EU!NV43&+AkrE^rKUQrF7iE8t*l!USIp+6WOLXPmg@O)D#pHp789Ub9e5VtA7Y3@1(O9K2VAbb#Ac zoBqLXTU*~hjB?r3h;F9u8q3<)p#1D)^g6IjhUrZTTUKh?lQy**MENpF2WE`^Eu!#V zPRQ~yrJ_6a6kM7;EXGM7j5+%}Oft;ZXKMno&tkVaoaX=hwg!nE zjXZr#VP~#!#sokd7>mAXhDR&H{A-5e{hDih3rw&>u4Gqe#lGVG-U?6+Scbrlv2Dtr60^_C4lXvd+vB+bpmR-gU0K1^bf0EQ_4e&AgUY%QB zoie0&Vnq73SZ2clW>I8F(0ah)C_GVOxwa4-s|)PyBj-7!eat`X)+PJVkor5%t{W-2 zqO;Y!>G^K;;mgtDcIw$g$AdC+rk{qyQY_}G9mwh@qo?%9seDrQCG?*Gd7!oeBVr1t^N3U446yq<^TZ(9+R8j1OXGp0 zK+3SAe6^iVOh;`aVy7^TCZd3P%j_@D=pP;M)Vv&cNFmKPEH__gkW_wst$d8yecEym zPdg*sARD?puD;K67xWj|nwC6?q zC0X31JxAkK-Z+2w?qN0JkU?}=%@gqJADlV!j|2**peAQqK?53|#-@k2+XWKZA2mug zGt_Nc?K=Q0beB301OH2cYQXA~*Yy;GRIU-d;3nEc#Sz&L?p^|NIlk9R+$>uX<~PpT zyC^^{$NHfPk=nx89o;<^qh#T^T?)d6rE zH+x$#ZS8L2L?4eU?ktjR2J5wnvjp~XKuq@KL30v8a!ao824yUaCm2Y&WxdfbK_j!o zJD4p1sD#v;9@Pc@VQUyBVrv_OF`QM@iRlRyoFtpn%1;4O$-w- zWh{(N@>l9aWq$WrUX=WLF9_+D&R=QGjXZ1?^N%K+E@)km65>rH42N}-ExTMt29uP) z0M1VwVE~~(gs^kEGNJ%hAI~H|YMYLChHA{wnWdm*&t%RwQy@6iOjQ2jiNBRs+JpuP zNLDWkZr|ugt9k`BnA*r_%3|F0+4oitEZCHz2p|ohgw}_Ztk3riKTbTp z5itxR@=aW9oH}6PHhmS-Ef7Tjk1GBy7b{%(O+2As8_3^P758M%3(n*yY~k@6aO+6J zHkGGnygk_|+m{H>ktn2fJo?)2FD)5+?c$tvs6UToZ6doYgS)zbbv%PHCIE$pH}LAB z#Dq!%QqTQY<;>2?e>!Ls=i&<={SyM=`Z!r4zFh2KJXZB>MSll*(ST!YLZ`Sa6#{}# zG}r!u8WRl2RlC`cTmafSA#bxXZ_H^cxC((m4ttTY0y3Eu3KPGIkL;G;uU>Nzc=1nn z1O<#mr07?Nx-19JF$XW$al$Tzx}A>$jLVuC z&_yaY8tL>Puu2f`y10MyKd54NxS>#Bwl2a4&nVLoPqUZ+XRza$@*TKz!4laVCewxo=d z?j5EfK5l^>I;%NUBFpyO3*P@M=8)W4E8Swf#Q3Myf^m6+jJNIHTHoZ9tiz)v-kMK&y;dSz|^JIGsN8AdP@e`LR} zO|U2!qC{y5bB=7TN>oeYzh(c$PA*H0nyM#;q|FQ+cPcL~KB0brmDH_m1&kvL84x}H zA&9{lbhcJ!Rr^hroQ1ggwEoLqCWK-brn+3}y#8}pHY%AYRzojTC3j*_D z-Pf6HmqO4j^WT2oHJ%X2y&x$7(9YIz>#kZ>^9H^a0JLkoMzdkB42yVSqW=`=V$=jk zHsuwyJTi3CFx?AKF%U1j@TjgJKzSq7(?*Mm$aSj4|C|gIZ171q^img6NSakgK}QR0 z=*JC=!JofPoAL0y0$nw-?%kgld~4%3D$eMBO~7iZs>DJS{cXjRL9#*2+DNyt@4=#2 zMzWdXb>gtq-E>3M-Q5TLkkv6+{?9IYsVU*#Clkz+&H4@PLK=ACcd{df#9esaUrfwG zrQcPYMgNUg`6WGQ^$p9#@5%O+yE~H;-0-zaM1}?Kc|*z>7Xi%TcM7YRN%_V&r`k&P z01N{@0V$CjG@+C0LK?{!kb;$l`#)f78dqq5+M%heYUc&BnT$^_rDxZL;y99@f9`6| zLzcEa4fevkPKiAB1WSBFjaK`PEbEYpYg!RjN$wh>zw;m1_(z!kSE=q__} zEsW5;WwcdT_77&F@W)4lfUf#)cwiI!>%d+XJ*L?o+>Ump;N%AWde?QH%og^1eP>Hf}2yV-p`tpHP<0B#k zFrDPx5Uw3y72y}^<=oHj^UzbduNcK$6K5mLcI+EUtpRI7upu!=C`?jHKn+-V1jx)* zNtj=}b6Yo;*ukt6;tTf&zn5fD?X~$sVM6!UF!E)Bxf3VcA>f9?m%kR7S$t(UYvT=achnD0nzcL#}Twak02avUmc*c`R1k%T~i%T-v(x5E6(h^%YE=WR*4nu2gQ zcYiw+0jz8Z;w+4kc1|8!+>sLYQTWXqF%AfizlgmI+YQEbGG8E(|7C#a_4>0K=#ZK} z>k)$u3SWYU3hngOM5}T-*`wt(pC%i@h4+}gi7^|LJ3UO?;lKS2M^mGLmxBKU-oA2t1Y6%L_ zy`ay)CruQ$?Tq+iszq<;1F7RUz=;-}q?Lo)oy=BaJ;7@NW>a~q4BN}tkL(68DJD^x zKoWu?m8Q0-8z%D7L*H;2FBh9pyx(~t7fG;zH$8X@TOt-`DBP2W7WvnsSaRyW2SaaL zg3Y_D?gv$4OxDxV;+^Q92f^+o_AOFD-sfxll!a2Jg%zM3D!CrZf6O8^Z626leR|rX z?)yr9s@l+0!o`1n-`Xcb6<$p!_h@5+10w}|i7@$%JcJ|@Cw80PER?m^%JFDZ^tQ!3 zLl8t)GrN4VGQtXWiboWL>>F}Ht}g-C=NtrwKnj7nYRYxVaO3t!EON4R;3C}Sh==|J zFoT`-FBgO?0aXN>ZPF%R;2n|`BgdWe7pn=OQnr!vzMsG)7Q|}VnV%uc-?o@pJ8LxI zAwer2uNN8Sd=olaHm&rmE+HE@b0@6@ zAQBqi2@?W{cz`u*i|{y!Dn+ywG^s!@XO+PpFgBeWRDMx**2xB~q9tR^NfVQKmgjAh z31Ke;69E?NDIq2TchYL$ilD%6!fzk|;O*@YT^;@Zb;&z{*&`Fo&55r(^|I^hq)g06 z0uj;r;bf!}`a=-;e#6Q0CN4_#Gfsbs>*SBUUqV})vZ++V*7#9GV`-|}%(3FPah~Io zl`j0%dOg#}+3L1xY_Dakljp0G;Qm_rTbAG1+c%FofK#m|oO9<*ipHxnk)egXs24$( z!xDGvK{y)G$YoAsQj3R_w0()jP4>z$RrfSrN%JZz0I9ejKM;VZh(#m-(O%Kh(VSJYckmcLqGvno zw$yFqlHmO|Zl+!TGY@PZCDpKON;(PE*VgX)Td(!-d~G^>vDI7VCX_}~3jeIAatMlo zb;cI|j>~2&BD*^YBTt=3so6R=9noIdZ8XGDTQ~jno|B74vixl2C7JW?0mE(XWvAC< ze?$4_tQAf6__(T}26Br=Av9;%>*02=gjVY5P}hx==91DWdACg3KNTKnuy#>LcX%C@FYgGZ&$zn%C|mIFT@(ekL8APFEDP1$MCRW1Q;xq-tpA0N z;3-S*F;v^A>X@#HO;vq?+E^hjK5R^ET3zU2US;o!xNhgG%p`TEYo49N4|+J5Hqa>? z0@Ey}!Hw>~twMJPH^$*tpT6>y8r8akKJ}~y=R}~d_jP}MA^+SZan=)Y9`-L@>z`86 zHnI!O0X38h??`|7eMyI!r{Y@}<3Ty|w^Q+FzpFB)+6l*h8Zg z@Qp1%&a;{U7nvK|K~*B}((x7(av5X?t+(sZNaGSouSdK|2r9cStNAwZp(yBjdjQiS zR|qI{biCUofGE>UupkKvYTAVr%g*ml*MF zNf#-#yI!8Xf3osdP{@^Z==-YH>SRoLmaL1m2pF~iY?inP&~L=?exxf(44C1OQHs2#4uQU8}^NoL~c zL}wVnz>x;HiXTpzwW4MlK%AQTGlQ|YuKU79`)r?Jfd5LN@7$rHY~IfZt#Uq3tZeZv zthF!@`HWfi57otege=6#9Y21^ZH}o5!`ezZ#3_Rd*N8 zAYh;spl$pi#n2U4ZJCciv02UH9J=>J_MTk?AxE$otI=(qg$2moOd;uHZ_DN3$;hE1 zLixMcxPUuCVu0Zst}D!Yl-y_#dsE!c^We2idwcW|L>c_y>6WzpreC~*WT>n@qI znmcu16G`$xT_I++q*ir#!A({{6SPU$KylwP( zF>r{1u~CGlT$5dhD1+}*bOR`S$nVE06C@EJx=eN0lmwVH&a7eL)vN3quJaIH@0)+a zZp-D!Z>%$tU?&M^z`b>nhSB1L3THb#HzTng$|P>~OQn+$bl#t4{o@^_BlRr=>6#@gO&5{zDL) z-ke^(Svzp`*!YD9eWe;gOI12KNnBLk%LtWriJ?mE?U0Xf zT@4;?edt(1^G8qEdPLIwd)p$B`{K(A{$l<(JH@~nC;NwQ7;ak1QOG;hK^|F);{VpT1wtD#4lPfU} zdh>{;%vKH55irE7L-lUmDvYkelbX<^7R3j)uZ}L#W+mmn zweD}agEW#&ii%`4YvWKZi$YR9KZ~kD2O_ot5({^~KUkNGd5kIlQct&v-wvTdsE2HR z9_8o!ddO<~xc-^kLUR@lvcRs7totPDiy5N=W_nw}%#SaFDIK0N6*kxKQdExNX_oTU z)Ik0#b@4Wih1juZW3`UY4W+Scg%#U`$=f%jDKZj7i0Hg@H8<0|o}qB8MPnac7}!_s z&0qI%l#+3w0k(dfj@6Wodqq>erJv6s-6SY<+dA?P zC7?Ih*#Fzolk_(!g`^t*(Wnf6IN=A;@tU)NE8;0jAZ2gUad)!6dy@g7h~QF8EO0f< zT-hW>T3K6{QZGH;{N^i6|YtPYp?te7ya|QD9XF0 z$`;7|_Ly~$s_vw3!3|+JOP9u!Wl>eVL5lc;#ZFXChB{%1@-RR4iU+Pc&eFt#Om}aw z@}$+5QY*M%Ok+T5@ze_ta**mf_ek2Rdk}KH@lzBHN1A*nUFXv?BtQAIrxA+2GTOa3 zuuo{DVln#`)fB+zns4Y4Wy##@{}0`D(DuX z^@&(jeb%T>FMvoSNKmBI?G_l8A6f(v>TN|B3OD3Ez(EdzGA9h&8h@A9ympLRS>~9a zqvBf?%JgsEzESqn>@|20{Pyy7%=KOLhyS< zfO91`XM_^AGtEo?znvLM*X>ifDdIYI#BJDCueImg@*t^ZCL+pJ>qPeWC?&Nu6~U6{ zFrb(bGsRf5juXrVw~GDYg*@oUftc%@TLP64;YYqRT}rT)VB>@!Nh2(MpM~s+muubT za`7Uq->=Cle;BWCYcyg;w(wcok>W8x50%_y_k{a#pfqdl_err>JP`87*cd=W^b)2d zG*MwJ(k#znkZXvRS%HuG;kqt28;8zvZU&ef{MvF5p%`p4^Ql6Q3LRI&i%RxYx4+#$ zE&b546?^)~HQ;>(oe`d4ZkC1mX2~wjCcWn$C>R}!s7_AOnt)B~%Y>86K#6MC^df2! zHM-^SF<{mTZf0sLf)+85tHiKAukGN#@ttapi8xX|d_lnR9Dx5EDydWV4}P)x4Z!<{ zzn)`+SKxMW_~O`o@>?V_mgslJ$czlV``0Io+Q5JhyMO=ogk73%L3X|en}5$nn2s?1 zye%4BYzQVRMVPy@2O7B6A*qLLN%5e1VHUIBV*kc?-)KNs8Ytpbui3*VUTJlPX8+4{ z(CRhONraW1B41gQ>>oo8YLh(ER1M|p&Ci2eoA3^s^uFGce=jrkr71UrY}S-KuWG2S zIPWih3zl)3mp5!?9${UV-l)Hip-)x{TueJdY2$Q8tAe}f8`$unfhQ-%-dxvCMLHfK z0Q@_SrwIGm6=2x*ecleIBMMEtD3D7o$ zJHMPi;ofSpm`uh}vQL8I0C(Xj9_bOJqhdQT`-S5FqVRh%alq|=Vc}+}+=UTOlsF|` zQW4~Bl=b1eY0PKYk^x2kF_8l~i=Az-^p^vj8u8$uI+_Mka(&hnue5E>uz*jW_Ro`J z_JUOM&&z&(M(ub4!L3FXGZXV!z{avkqqU}QB(18w?+yCAMQ2go^J<1|$Ty}QW=hh5 z$>~TRnYl=%h~5Sl)nKdjNE359O9nFQGEoM>euI~zWI#(t2VC|2IOXOBhX{~_ zS;kecf~Q)X?;k32W{N>9^No1X2>5ePGkT+j{kFgKJ6w8b@wMGp~OUg8}5nf zq1E;h2*Fn;zv078*06T}X=krJ5|a`&RceDA75fLFISGr>#wUEh!Bp3nWOJJjq0VM_ zfU0Zk^4+6uJ{{535)LempF6XC8`P#$)r^=;xoiRzxr;M^W614BmWpQBx;f=mU84w% z%fifErLJ*ygJ@J<_d$>y{Yg0b%6S;bRbu{PY$i&NQ-Xz^*;dM8A7;NmBG}lU5cMr& zB+mz`K}XVe$cK+sAE}G7?W;#BBC~emzIMQ z)kN%%if@`!8>yIbw|cU!-@^PmPXUj5H#>qbt|C>Oc=CVHdYBBjyX$V3tna5bnL`VD zd+Of2p0)0cVo(lMmdSdlXibHZIaKuBdj}AOjc2xSRh(B`-0vKn(%l?!`mX<%8!cIL zalkD99EG^j{F^AM0&ty~;hWz7LSM|dcqj(x=}3tmM4lCtN4HExVd;+9?C0}LXIust z5wJd})|r{SSPUa0k(>BEV-&*EYV8lt`Ha%1j61KeJSxpxcXm5+y5-85houA})Dw5D z)uR0Es<@Zl5iIbBbf^^agB2ZctYBtc`UnvJ`>^TI0iFy_R9~S*LoF<&Rq8MGOdv|+t(dxc+ICvzpb&*XyJzhI6w1yslj8sOB#+ zz3)1@{gV!@9uJF`<%uC>;1~>o*l2ElHP}Av{b6c;yn0xk|D3L_Nnk?tt7R7V$fT!) zp!mgQfXUpX0aK;L!sDu^GtVt%Ox})gGkWDpSs@zCQ0(nRUBG^PD1ktZeBf#jS+N`! z;!{5Py`aVw+?@{NFAlEIi2g%bZkZ%lUsD&z^BX-$?7ESV18Jm?ihaFMNu%GfHp+Ra zGROZ=qDI6BGcDrH8(+M*QqQ4eju3{pWPY8=hYEO}7Z~DNnwl`EX_|C#Yp-r@V6#K! z7Wy)s*-|Yodq9*QaWdCm`kJ(#lE(9;W445vT^z4%?3>B@Q4PVzUrvg-@jrN$dI?HR zte`gkBnglFflW+t4)pVwi0b*|x#YS_X*28~o4aO-F*D*?j`m^{C4_D)u+OSrLwz&o zuce27AvxLdtE(wAc!j)n|3|NJwC6vphuoi--r$No{$UlCyZ&mce9LSBQ#Fu^V>!=h z1dHCEDedRWqpmf;S#qBlmHQS(sqZW+YD@iqAwqJuec3q|oXJAa;;W%u_>GVttxt>8Qs^s8q z2~A5QhTJD!1+1Yermmw|=Ln)H3z%k1cF6F+ILOk%*gd@AX;;q=dNGVPRr6kEoN6y# z?*>2_kO2C5d6q%0?^Jh%gY%MN<~*D5XL#{}k- zS4x)Q{OA!-{!&1fYOMvm3T|VG+&nDf18rurp=uJ&O9r+Omu&XK#~s!09Gn*Aw=g2D z1aln!$AM>tH98*@n^YBLCCknSR?x00bqTZFDyfzJ4hhOQKR*+&D9&{yK^d zy=fM$F`$aEIh0uKjPsV4*3LH3G5Hp7$%i?mLP>P3w#igqsb?W=nJG9U%LZH>?gK%G z-lwbMrM$5?3`iddi z`DuCbEHO#?JCvGiVL4AsJaKJulZ`e#{k#TFtb!aUA+n_DDh1FOp`YfTGG z9&QzMe&$`f-lkw}_O>z%KV%4h%quk1W%$L%&UE!O$8{kYXAKe%;TLSiAS{}nwR|Y$ z!|dncgs33&b5pF2u5*Az38K++1nmLue&W!i&V9k5*~z2Pele2t`BK{D%kIHN%7z;C z=KK9+zS6y{z-_?X$6Ppxr#a*3$gZ@yEO_fM!1NE4mucl{Df!}=e!$tgXO~uolTMel zSG5g|#5(#axg$XhJ4Nu5g0>;IqTr;Nyt99q0+|x=bg-6g5yq+NfWaH;eSF7V~J;XzUTxvFJ7{P#B*$iMQ@HbXoW*4I(>w4g%~rD_8% zj^c)fhF=viAGJx--Q3*rYHP7WfbZ;hC-=*4ju%FKij^8BJU_Dku_!F&LAP7VHIhR= z1&f`;W0k_A>+0TiBj5X0MQdwm!M)NkFenCTu+ISZ7n(kz?dJzCAaQ0idN;che|sl$ z64a-iJOyR_>OXWuhKV(4QSk?Bf2Zbf;L1+a6E=!oq`ejMR-?JBe#XgC&+^s4Qv&Wl z3VX;ZfibKQ_QyAZR6RTHi9M>nh~a-!$wD#JevPVMb{nfX1{Zr3iQZ>24zDoG-Pjto zjbIV+Z$je)cs~Y^duY0hp~b$?(2C-%8+KE^@^4!_r2LHe(2IyC3I?e(dxT5qM$~Ij zGZ*5`^d=!j{)=P%r{hSv+bBI;!X8r-wv&&KElbq+yNAL_-*}9_QbpiXEut0AhPoKz z#dG=9%;%T+OA6tT1lOr>K#k@DU@J)R6l)d9(64z^39bEB3pv}rBdeK3eL#mDlgmYW z)C<0O=C?W~d6ZI`_8M@J$D<7JeiJJ|DhUNOaR9DQ|Iai0VHLeY9q@cPI5jf^x1aqr$0#LF?CtTmX7n^uJg5s6JdjuRfzcUrCUMS+ z;g%G11;YC>kNza(wv)ZOYiq{9k z6^cwY)}4=18pIs|51)v0s~u6E1&;xHQ(EfCHTB?tbsT z8&t;bEuzrPZ$FXj2~39tUEk@bm-dC{VY)abp}FuOz_M-7iG}uqQZr7h`4lOT_JB~w-Qx`p)aRZ+EV~5Ir;1+$3+xBF5 z4(RvNNx$85r+YL|Uu*QGnDsqD^~NCjyXronGyIis*8ji+O7e0(RdUymi`k>lvq5-8 ztr`fcb8wnN;9TyxIbYoufZfS#w?sbQr?rzqW@%rxLx>H+7BzO$hlxK}!J)TbDcWUt z{3)1%J;TO7)T>rTO@I6be|ys!B!Rw*k%I?{oxGes0YE_#zWibqTO6pvAZwKg}}<&LzIfxhc8{u?3|B zLuPE~vjy$1Y+zMBa$oa*;LN-MoDDm$UYf_1$o^qwm~!71KyK{Ffo#Bb&ek5r1KhnF ztJ$KhS5$Y6xfm~0>VhW4uBba|pHU*`7bVMKJXm1Mh04XBS*)JTo*Y}EzVN4^12_Xp zIBuUMA}D$PySHh*eh>o^o^kG+YXKIoGZoQSk&g?A=#VN7Cv#?Q)-=bK^yEX?5X}2i z2elLt!!$YdbA$mrHQ@v!vY0!0u~1}}0Y`@*@c`si9&2>V+@S(+V*Lew2=c;7^MU`rXBl?>G` zY#g`W_F~UJBpxkYt^uwv9wg)o>F8PsFIS#?c(v4RRuYv_o8%k4$}b#j|BE9^^0ZTx z8hD6JXSS#xMWx;i5-qJw(B3i1R&we_*r{+}2%Vx+h*&@pxf}NC>t2d5?I@emsr~=q z`UVg+L5p@qoi4QS21zJlk?!d#?0OgX;LQ(wVz}x%)O2|<^Q+d~qJSYHJHi7}SNYB3 z-w@c_vrd_1R>p{V`Nx=y4!SA<+4MS^*kP zrJ3@YFshvV51K0oS1f3$=?s^%Pmd)Bb>UoUtj|y~Tf@!b8`ehY8w|%9LrUQrGcLQY zTpfM`U)-ec(1Zo$o0bnxv!-41WRt@!g-_3w*_j8a5K?vWI)ZfDQ-@_v7JGpc6EMG+7$$qifr!9f z63Ob?ytEaJ7`?PlzJ7WRReNe_!|WVFt3tB`{7b#@T`;hL({d zs2}vmA#?q+xNedn_iUCfxSME4S!AkTsOmGBPnq`)=RouSsi_yRvXlAmkpBoAp40m9 zMKi@_dUB(i2#GFC9zZ1S@`(!az`-ET&a4rc^2X-$2sYVix82u=a~Z&((E6+bcilIK z+JX64i|7x@q(FQ&q>&>E7H*sJhqy3X9$ywryG|uF#5{HyXQpD+y= z0-c`)^2lHAn{U5C#DzFlYK%ZG<6f#9;!gXSnVAfZ!(=2RWvSzdSM1QHPqF1v9l+J; z*fwc&Wpw)gAoMCSg}s_TfXJpN6KXxuLf*`2-2O<2l(;nCu3P5H=_zLnd4x#T$AnJ* zyZ4uk#_U(Zv56+Ie9xX#x1t1W(fDm^iRyQI@=a|_NP#6)THGK|*jn^W|0t9I(2{e> zOatBP%h07bVtRcy@ZgG_zNoVIO@fdQ$HunRNYzKwl~)M!+Fz}1eEZBBp72?BoqB0P z?ETP+Yn~hu#Qd9u92M4aBiun$HOa1DEDwtQzH(|iAz?1y%WFxk#Sjs?OzaVuLm2_m zI1JzB_u^VKlhTkzDa;OO;J(+i(WQmPc>~=G@Xh{7SnBF!>to z^xWf;q*736d^-0o_lvfxL`rH%0?Hz1A9046!1^m@*v5anF~9Qlt&N|c{?{F3CIOCF z&1T@fB|)bq-d#eves&&4bwq>`kl=r!M=DEL4eG!YGJ90$c5M3${&ylIt)8) zsPxgbaC z<1>5~r1&jzmy({Z=@WMZ?v}s4qnPK=${4^M{Lx)a;&0fs{VV{>MO}>s;0g}1(P|jw zz^yid2y6Br4F46Wo5#}1#c6Da&_dioeSf81mv2-t!uhB!X}n_fAx`BzFEy@NeG>V# z*O)jS>D}w6RFb*r&>!5^Ia$p@Ay9Zj0!}LE>7b5`RW_%zLW~x_L1DNPD)Fa0Wk5`E zbQJ(%MT}po)PZ@_hgYbJBoZGC0JRzBJs~0nX&}4|Q4*cYj@_-fZxr2U~!v zRgMFN7~xlA8#N@ltx>nlSVWHmM>r^M4yHdzP$I*^g{=n2|LGGrGZ~lVfigy43e={= zWn5yEMG0bu&i%v%qq*EznlBN+D*E&at$8muPYxkA8*Iytk=0FKb&-DoZ>>2Eu1m zLz#G)!Qgcd(fNWV6E5srjRMC(e`BZdWFWf}xw1j6w~~hs7mRv*T{}I2Pk^iY5ou1Z znu=Z^l;^dnR*2lUK_*5sKmy+o@b{RLbbl}yw#cHtu%4YI9j^gINJ^ACJ7aDgBb8wq z@%7N|i%(vZ<(skuW|e;=GP3T$D?dy`MA?ZHAq_Ag1 z|F83;BHA3uKj6b@;7B$(PD@W!XgS>L>k9i*!sf0LHoq;UuzXf5;h~4@!j;8i9*wTh zD6#c6PL9eZjp+7dQ-{TVEeX3eAf}sdZ5_|Gi~U3Ej|CX;prh(A98ytpAui|RcUxZH zeIi1{F%jf4j6LPX8;gZc;Eu^fsU^K8Iqka>-|Oi~ge9{X4bgSVQgOSMSBiDU0flHz zhY}KUlfVS(N(MofI#FIs18~gD6xM$Wh_lp&aa!q&dm+~VcD)2{*IB>!X#sgCEqC5C zuoHrye(uz0Bu1W6N%Z2|Vf(en)4qo9#zIQhtpNuqQ5My6lkRE5dHy&1h)npE?H$JH zoTj%f>OoLUlIF9*tpu=I7YWe;IWF}3q6Koy@v-f@C(6T&DPx1i;)M3_EG}efs~l-x zLZy>$_hhIZ%j=~!ANF_qa-MNvYuFa+5XY7QIuqLedn*2bboAX+2BgFA=&?hbFA+<= zy-4zhAE}x3_P+Luk?{BbmUL{`#tywLtdyqzNae}jwoINWt7)b z$w9ChbF@Qr$sBdN@rS|&=%?bqSH^2@_>@bf#pM&;VbM^gH zbY$S3$d=e&tYQp`T2bOLk(f5N>m38b{_M+>SGj+UIRV(GqY=57%z#vDT^u4dJ4Xts zy+~QnTyxh-1O=RLHyP8wzY7J(cu&U&pz1$4iMY0`5CBopFUtQrOeQxFStI%dNB1B? z@;`+zINlcFim=@^hC!33Q={ds`p@mY(FqL~OMIzBaAAlguh4s$s6mK7aEK_{k=l&5 z4YeT1J7<^(gNeUTD;R?5Wt)$&d0@?(rjuM!8>paBWPg*k$p!-%VF@CLbqH<@b)rT?qt$ zt7!=9_r$T(gn#;6q}0N~Zlp&Dn(@riikCtw_9pc{+f0w?ru^ebZO$bJia3OPer#6ne`;!`uU z`VRXWyP5+DiCDHlxPgpIB&wi_ppjHIhz=1Ks>!PHGOK7br?7wA_yORlvAxo$iH4S~ z67T5|b*1OTq4{U}T2HQZ!kQzuewQwJ8XM(*G&yBidIzj?mLAcHsvXS*1DR>4Uf@!q zp<(p3L*o;rLSQt>3UeD1deep_%L5V;*o8f%v{l_8 z+ZSs(41R41pA zgnyzOaJ0bQ4MNz0E+|tw+n;m9#>(QEgL0%xGfhyfJU3NVV0EPUKcClk;0%7k@%-p4 zu0m7D5N_M^V1ulE!sTVi^3GdY(hV|V&Nnu-@!-uPNDYCJ{rlfFh9aJ}T9(x3kX;fA zDTsh-I=%Pi#ySmH*qdeoLFz~b{N{{QdE19Q7kcyXqj@K3x`FVk|I^#9WoirM_BrM# zZh|}7TrouakyHt0*)CGt5Lib2gJC}5Bpt6Pj;+=KU2DuOZ914A)4=s7e!=VSoSgf& zZt+R#H~WTtWgel67DQNF)pnyla|c$Vn39Y!yE*(nl)YtCo=dkah`+c5hv3122A810 z0>Rx~gS&?pcemgc+}+(JNN{(D;4b~J&;I(J9^L1TzJJIcMyPt0RIRn*P0sSa?N(Q zsnpMjGXNCdb)y)Q^aG26C|cm{Mn4ue`VnX6b&wF`(HIkb;NKf8$5di*+ZXz!ZLqIm zd$dyK&)fymnAK^%S@rAdf{$}C{ja4%5K}$Ed!e*^dw&s)AI=4aUg5T5D zgA%lP>wiu{GEDTg>t~5NwQ8bexY*Bq2Fy6$517$$-KGUFW5xEjzQe@Vhh)6c_%E2CPr#uqs^<;P=%AyNCay~&AOe_Puh%ejwJZ3FW1R(yNlhO z|11h*m`XAkY~CHD5GxkKQU0(i99{jk6`I=lK=QkDebY|6tUXY~b?9>CzOXOSSSxH> zpCh><%=1lag7J0=J`w5j`@c)l<$Ny?ZWLCmu>)(SF)`KY!-is@m--k)$*Gp#k=`+@}mS^r$nFi5Df( z{elNULZ0{nQ;hMj<8NUQU)_l#0&!+6d|cdTq`6i|tslTO>s^^NoWZ;>*YY|Ttx3xC zD}Vn}lU_(F5oq%-wbnegtdT;c!;)z{StNAKkS{-*M88SF>GcCm^O;)T^>;Cl`}@XC zRGc=PW{`DeCMS7Ug)vk{CAHElPTUi zD_FJp`ekRV)3JSMgq%Xe(0SWAosSy^ycr(1ny%(;rgW?I6Sj9dlYSe%d(&a7g;rpQ zf7UIq$x*fc7?M~!qPy_H#Rck;PN!{Ogqz3`qXw8PT8DZsJJWYxP_e!Txs594Wmwe< zY4tw+h5yU<&^{BbYn3)BJT7UK6jg;!ly!|$)`(>``{jz=d5WQb%@pIB3@`G{vpoJE zo)zQ7nM%F`J>|DS&RVwKt2b3VSL|>KJ@)bIgRTrI5Bf8WC?~8Pd#iR>fbIKjrG_uF zgYwm+gJ`(V;2+B|AkOxNFU=~1_-Hl7_1)OKtI?MiioO4C1_QXJ2ZV;&FNo@Ey}I5# zQhO-9G>GA{52f34ZI0JSX!-Q9(r&N1VaETcRXBxJpWng65jT6UEgG92}a%XaK$@^8PzvIGxv^Fg>62A}tEIO>xXaOZta zNDFxc&M#p#s%qe}q`a$!V(oqpbxVO+o9oXzo8n% z0R2;xbgMGk-HYj#Yw@Y+FJ; z6v`&M6~P#aL%9{*ZRP7F)P*Kd9%dz4IizzjOYuj%#Pw|TKpYe@lXBe~H3WTygk&Uf zeUAz+$R*uMTL7q(_OSke)W#C|F|K#hA*%ksih2b>nrJ#FOi!;#H^F5VDe1f4 z`+*xt7Q6p%co6(B<5+0n6Y@D;|BIUdOjOj*!yp2OO%N_Rx&PEW-}hpz+L$FPRpn>!?1QMdp%za0I^ zLM~;G+IRa%O`IJfaE)6*sL}Ou>5orv+>cr9pDK;3Zm}QjcZoyqF6k{87y|2u?VI{) zBN#F!a#Cs#;fJy9F$h5V2+vs*%~76xgvxjq3_o(WaLcaGgBbD1ZKH^|ar!%6beq}* z$${6A5;ber6-afrGJq1YJ{q{eOQlTmH$$juGA3pKrunrQkMt#l+;|h-r+_S$l_(O= zD=Xv{R_d*CY<*)%F>%7iTp)OSLuxKPv5EuuHOj5*?Yml5cC-b8ToiD?+cwxr$F1TB|UCyrfUr`s#>rJ^CSa= zx(!~8W%9va=&PJx>wY=h6k%rNwns8*zO=+3nE<=Q2%(+8NubfqVTe2Mmq#jS3}&R~ zkXhuDbzItb?kGb25hdo7H2_|U!_uEOodQbYwpXaP8s&ev9elf9j5OF6$@c5JE;qIZ z1%%(TpcjS7YNc~a#Ce+-%(=rplRUp|vfQ17jYbBh6CJVY*FBE~+f@%2xmfkXO})c` zwJ&(Ng*od?ZwDus-W(en5L`=lnr23SCe3WsgBlf}ic7(Z7oP&-`;>s7iu{}z`g1Ps zDZnNE+iZnMVM$COO8Q^$oirJ?RPudrw7mB|^Q@t?lH?0vdWDs}M*?iLq=`FA(=v!r zrX#E1pS>&rv@w3^#@qnN!bSV4D9g5!`uGJ5hkNmFeGGk4w?j|Xt_p3R`~f~^tXdQs(wSRy6ZP?B^ZkLK@@iP?3iDr7B7RMjo)AeTAO_~D;t>Hc(Silsx% zwcfmASLAtk^GR552Y6*zE#JW#gmu)@OgO_JW9O}oJ``A6L$Drd9yBY2>d{uPsRGyC zM7rF=u6@&zimy=9L=Rw}XN5o6P*`*CQrZii#MI%gfcP@O@CH-NZ2X-p{3p3F=3F+U zyWhqDj#olWHtvSI?w%AysS2ExA$Tp=3jH@VHu^9Ac=Nm8CIqOkq5&-tzyZxy2$A>R zw)VXXk;u_OF*w*inSQ*!Y5vQ0&I-$|$c$qR5z~zwW>l;f5{Qk+m&^K=1!h_Ew*YqY z;dkTgHMgs}v&A^hA`aOz(7IU7IXA$CXwnKV{W&9>Fe{0UOAEPN<@eZAe<*jiHGE=! zqqo=bEAZiwUKke#rWwnX3butqB&+$3LI72g$KYOynO3ptjBtftT}x0LOP`l9y=+m> zXtc&gsQc4E$>Aj@(7kTLj+jFIp@ioBIh!s}{!Hdp-D}&)hqQqcE|&*B&xWxRMZ_KM zFA#4+LJP@bAzem>mn*ciyaC@UE$MVVY9w+I7wReY_LBdoroD*!gdz2}(^w^3UcA(5@c!K33#-On{~U7Segm`gJgS;i z4-mmjM5sPLeSvL_(f+VTaQxITEvEx={5>cS<2wl*G{{J?f@2gx0Fo#&7GEC_k~aIy zmhH4*yI#~CF0)19A#Pf;2`7l9?+iR@tQc`97zM5B7-1Jnj=VH}1T;%&FiwDsY1O3W zJ_|ISLjP2Bhx1Q7eOv~oE`w5_g>U2olzGl}^0Vqbt`_&1JaGw@n0>cGF=8W*^&lp| zfTXP8t61b(zYND*?>blYZS+CvI!twwt}HGTZGRos|71I38>Kw{(MRf~K@9dfpVS0wp%6zXD zNQOo%a#og&edQw>{4>BC-$;}L8|uQPSe!T^$Vew*(t967@#_8NfV3e`z6ezJ|0~#Z z1Mt1F?v)#%Ih)YfK>o+Mh$W8eBhrwmWQ+uo9d{q`JpU+=FkE^4BmuEoLdq$22}ziJ zkG4UM2=_g~eN~V#++N(W4)Q|q`)~`ho&0MZt@F9K6gy)+cbb!z0x>aM5$B`)!c)wX z&G~z%<^B{&iTzaMy|#Ao--&ocsG!FBAzWW(fXw>s@(RaL9)EvvN<&NH>yU4lC!pix z{a(=WUHmjH3D;+3yqEUCssIyDbg&-ol3HGj6>~S}ilO}BL2;enkQ(`ix2L|g{W*f!CXZA!t*_3K-Py_BwhLDtJ8S~j zM`)ee%JaxfyBtG1=Qz6edFW)Xnx7*ea0!a_X1W+%jcdA*=roc7$bCN!0ek8Zp@OyJ z{Cu7}D7j|Up8muIA113(g&eG;Pafyn2rqYxDeoVGjHu$ZE-w(i58?8ANju*&JbMR+ zp;3(Ck5UqmxXxRfH>vW!cLh6;SS|!FTWmqrjwEJ)#(_At$|1xG`eI4RHbD=XBE=B3 zn#eXs9aQDnmLLpnI$3t*{j7kHBiK}&)pBI_;8Ndilol@m>hInmu$lS_41QV_dhgb8 z#u+l=p;w%7ip~i0vcOc;xvugrh;lg3etj&yv}3?#T59>jfqa31wb+-}8Rd7EhgH8I zmb;7+^YNQjc;64w4QY23^GP(Xvj`W#2TIkEYZ!u0Id6B5-1a_j{p8Yf>sw-E0?22F zBmLEMz|G@Zyf?1VHh9-%19cf_$h=3dj`~4`r1Se0;y)1w%7_0cl zhy%5iQ6WIP!64OwY<*LmLhsysm}Al%OFxJG&CM(eFo=S&^o`O_&4Es+Km|1JdX7k~ zAChg|j=_OC(24&j(}Bne*r-G=$l4yT5yGMryULMZ@#Dhtf3yI!_Z_XS5!+XlVQwo_ zlkwb<`0(mUpbt8c(aQH^W?x&MeMrQyT<37?s6W&32Nx2*03&?{0p(#PU!lnakz+K# z*N}Fjky6%-{D*IE%|t-tos_QzlOLalcj7jV?aW^|`YIkpi0+gn(AJDZKwu8wPzFm* zbUp(`AL)nG8m>>t$H%@vU9Q;NKU}W?+BB$%u4G2Xg$+^B@*E<0?$YScyX!2Rwiq2lV3f(u2$vf8sHr!{UmchK(}N;lqE|!MYa(GrCv!>R(Jk|R7Hyb zl)Gg(uhh`3`?wODnq4}(^=Gl64yQkZ1L=+KZV|Ze5WG}IPMO4n#Hh2^Fhz_-2xsb9 z2KS%+d_o|)wms}<_~MmXE$U*E4WdYoMSX&7+!T-5Z(*>$w!EI^zY%&5au-Cu3=M~F zoM9O6%@)N_>2QNSes~sjZNVxvk?Rx&P#ed%c287m4@Cv~L9e(!$&_%dB!hV>iwHEs z*mL{@3D=>P1u4zez5l^;kEsLA6ldoyudD@HZA0UwZ0MLJHhMxXpNV`SK{`%8>7ohL z?jCkHp(?-@74P%lFwkQ=YKcXY?>xj+@gL@MSfKd`fVqu3n8cY^>geq}a}UjsnTD(@ z-~^y5q!TXyk5UeSpb=>5!ucCH-~f%ybZwwSah9A@TX5bNJfdV`xk*wDblVt9UnPRC&G34>1*T{5u`g za&e+eqp?)FR(^%{sK~3PR`zReQj0qSAD+|a3DX#}>+PRdy$l}D&*^fSb-=71{n}qe zBkgwq=|kzeIA8eL5b|%1BM3(QzKf*9fSq6(8&Rs5l8~y1Tc&bQkBbL>+rMq(0pO8k z!GwjzmyutGYSfsZOwY`8rExo4Rl{80E0?GYouu-*l)wKg9h1)MA~{CH`P+TdfqL6^ ze}b&DV|rTYU_6AUth{FiTraYOgeGhnCtQb>G7swhg9Q=l!tQyGp~U}yrVNSC)Vjuy z`O#0zVDIJxUBweU5am)WWNE(mX}I7tU^7{%FzU=FHhzi3ceD$Cf{8Dsx@*n7P>Pr8@*V-&vMmACEstb`%LO1c74tG=(@w;Yr3LPl} z<9kMTL(X_MngIS!rZl4jlR+(Xce&t5zxr0MiM~&?BFF@YWb%^=1Df^5Ulw8o28Dz1 z_z@ojZbH-Q$;tTvs5bA0VBIDr5iSPem$;nm;zZ9xw7;);o`TQUd5~wRLq!wv>Wt6w zVh<}xD-R&Eg$(HevXcx`Qj1M2ZY$B=B-CDh(VB%ojG@W1>08RPX|!MVnBIIG_@~@n z(P_>f$qze=+{;QqPSI&;$S;kDcveA3wh@w&)Vj{j9F9)3FM1ja|H4I0ey0^Vaqo|p z>2~N?Z`GRr*><%F%|c~1GN#31Z!3CENJ#McaVOZ>v5Kcfv~O0{E>m4Rtor2(*#`%B z2;PhA=L#gHdtqu4H2t~NVbgQ`@s(!$Zi^EA3DR+?rmdNR>?1+cX|difv`f=uj?*yk zZD*3fh)7_v5X8C`GZViHkI_&wumEM^*Ato}_@G3_UudZs zh8vgpFijCPh87#8^90-sV)n&L*>N8AIbl7@uf~LYw)!(}=IA6C-RTG~W?5TVCGjUJ zv&-}@Y#-_bwvxl*{#YS-Qx1H3cm4z_cFaOJSNG>#*j1vNHdX@;jE9q7C)a0Fr~&sI zPx4i^E88&p1`xQ1T+g>(0Sw(2?WN3JXxncX8?Wxf!Vl^r3hPp!{Vi>!zxQ%IFTMUj z-C6$3t`>?!06Xg*ug$>oz1AloyW6f-85ntx%(S@^Q6aP^36OOyhBVYNzIWBhC3E2H z9EGj)7c|OLlcH5Tiul#tIqs;i34_X*=I`Jz5o%OKQ?U&JWY8NKCz0vXte!pnc z`hbr$>m1&fw9{>`&G9u1@m5ZRnFvF3Kqo`pFwenIA(xC^1Trdz^0YMMz_x!IcoC~{ zZ7#6eV+V0PJ!ugEH=lM^t$k?hB)zgip$c)cE+XLitYT(3d|`Uj)%Cy^mo~HdlbwWo)`0EHmmc}^wvSY4a&1Lu@E(*pm1wv2Kqs96|6sm;XQ`8mJ zZ!Hlc)WGuJs5Gs2Y}`y@a9($QAFV{YlTZPCg^;=*l&#$jDLW%nxmpt%N97t>Kt_tt z#1v#*QNw#}Bx{?=_$cRQ{ziV7?M`C=jCgz;c;sMQ;24o$Re&n;P%sfJ5loy2D?!u` zih&@(P1}%ki_r=12-@7gY`ngFl^g=6GuW;Z*IhRl9R8emtq0NYuIHmlSj*~b+NE27 zrHw4vK0Dtlx!PxL`i(r!gz`5s9ozW+9md`bq}NFMOn26Nm!;W(F0l8-e!UukWRnyiQql^WT>5AT`w+&ONXpIvvEC?CiFJ(~o}ml&47U zyKi9ZZ|8&U=BnfOerDQ)Z(5Tw;&eAhw%f38t9n2HMCb1^geYrlA4%s4JH67RPpo(H ztJ_WtsO(E-IU0H@UfUp3NkNOY!u*p`6Lo*SdX}5teah0Os<7;geRhB01+&ta_q<(x#wY(-pyyTo^v3%#g& zVlhNT7LodsMcGN(wb#tfmcG3Nwl-X~a2T7ZGc%3euXaZtl|58PM*jEx*HKCRaIzS) zaac|oJX!TOi2&t8Z4wQ*@REs=7z2IKAfNY*$8UEj!vEZ*tclF5p58iMySY1>v`~<( ziH^hg(bt_U{HgD4_y_A=kyi^$GGY5ppGZ_?#Cj!4Smw1==cjHksQI80$feC=?U zgdK>C_*nV3-4T5h=&^Y-5Ut;@VtF2PhPDHHL!^D6>4E(>EP>m{%71PjGW&v86}^e) z>j5@J``<(6(6@7D!fuE9SiZOGqU`)_gRHYu1o2<--8)iyker&w%(Y7bt!u#GG_&v(^OSzl}A?+EEpZYWypN33mjLzer zf@;?qoiDe zs|IH;wANE!b{dlP%P+%BoYcwvj}Yn3#ceZ0Qc|F_9YEFopoZB8@Fl;o4LGU*6Jrgd z#I^N!)D-($VQO*cUv(|`&ZrDdE#59s!F+UmxsC`p8%S8{LhfAAUP47XR#+yf-t1ZL+j>x7zrZM~dlF`8 zJ!^P94s>cljQR+$5DaeyDG1g>Pw8RMzeaBkj930b6CO@;bL7s-UFDjnc2^MnSHvkK zmYK5&KO==zOJ+-4OG=3NxY4oRlaYl2Gg7BcMn)6={DvQW075-C$+H$IL%T$+8+V)l zGEDXee-V&gR->h88M(-ZE;$6$n3Q9$6e59!D*QmU)IagM`ER_YPk|eQ;Weo>_6z$o zTGA%jZN#;d0@qbWim4w+fVOz8lMN|El8f>B5^XcJ5@%hP8`IOEDR^!#uQLzhr#M?D zHpBCFkEHKA+>~Fsp4iCk=Cs!2885?%SXONNwiTyfL zN(68wj6=R$x8vKrYvha7#+vAkO7up2Yimr5N>&0}6j=ent5*owy#T|XZpX{mtZN#bHt1c z^+#lss@F5^+a#(FJggf<=z+zS$wDYxkhbr1o0VUWik%9T_~{b`pvd2?HZ}LnQp6WvhSm!)RM-_1<|6 zpTGmA)4GCtD|OaXxKf&Nnavx185 zl3}PNqEX-G5%*(&<@~5wU$~m>+UR#*CdmY8;#z>?o^i&fsa5Z#86lUzjC%I=fF+d#t43R?Q#jZSBp-BVKqw3pRIFKWU5;I@KEQO)3x#R|X{y0dl3Jz|RNK>CVK4 zr?Qw9X!=OT>I?&CB<=>6;}FmsHdO^0sp(VfD)oPO1Q(g}ZZ~n$^ra>@vH4sZsRsKz zG)6+R+ZKC1g{E@*vpmpTOakE8-t=BGN^Yj2s9Tk8-vnM{$mPccs2V=YY<$=okgt$k z48F^k)W)4otbtIB(O-sHTj+l)3H+EQ1ZmX3n;CltGBCD8FtFtMbo3SM78T9!r(lZn z5pl^kC_C*RbQWJ!y$6m#TIuv^pv6Uj2a@G|GiT(#<~&6pN``EL_ty-)Y=JcJ?t#w* z5cXce2oW-2e#!90N9z}fGIwLCOWD?X`I0^w{EcD|j2(?-?RC(*jEa}n-ZlFo5-8@j zkf-|QVK^_r+gQPXYMT?;LoKj}(IqbQvP^hXTJ*T?&VGepbDO|i*2hkYy+}VZlQvO- z3+;}M;A-Cl{vP{mEfHYD;43`?xlU`>Kag`{S&@eaox@_lgQmsb@9zA zF8^A^S@u8>ZQGzEMwtEsTe!NDVOCl7duApQ^kwpWguihg(m7aBMZ$^)L&6|#N1kWS zjADf6qY{>R>0wNeW#PPI7IGr$CW0fwgL>YuGy**Fu~VBwrBU6`x~G1!-XC*6Oq9#> z{eo^H!x0(X@6>c6UxHIVI3T_n`rTwqM#%sjvFh0QBTujINgTuK9SZ>6A?I1b?-9l; z)nJys=wR@^$NxuuuS^AO*Ca@mQvl7!8TPina@RlLiJ2_vxBbEWZ})fDNjNz3ov)ci z#27hNi-0ETryXhgv8ap|Oe1z#I_argV(kwBXhub42-VRqDmoUp9rj)h`Y6kHt1`_W zi2j|5{qd@DP?*#R4z`5BI@U1TwR&&HFhO3-Y>CT48(ym1Xr$X=fEb?aOm7IOo~7}A z=x@3BbJYcqC9iOXVKM!nhPzTxOXK6xjSqnJ@&MWI7n{?tJ!E;L-uurdZ#OF5e{WPR z1x|W!+vHoqMjB$A0@`l?gTeE(Mt2C80bBt|2p5T0{@9uNsx~AXW4f>)xkd8Mog?+T zU7VTv3QRn|9*f-!pX>rBQg-sr*U>}Zdvn!|Yo5eJKnPgU1S5@=sx|INg96ycd|sBY zg1;nA?FrNoAUc0+FGbU3gU!!$EZzPQ=9A{Dt$C$z@6Lm6$`E~sEO6`_A_V!kE)AL< z&=Yw3;LiWM56-*~(S^*&xY=NitqC-3HY)&tN2@XR$k)gS2+sqDjB#{cihc%)TxugAEQx+wonn=9Px1IhE=Mkt@e6W3X7q z=Kqn$R31QdS({3+t}C&>Y)D=O=V2wl${zn*uv?G7oFnY)bANSP6-n+h%kqTO%K6X} zV0`&%L$My?+o-`e?q zRMPJu(PSbN8JNS2H6@~cJa;&TF#stiIBYxm)osBAfuKNg|8j;jFo}p zs*u8C3_#t*nNc$UV*SDb5pa=vs~x@a4t*eKzJ_qwA*?g5YEj>BPF?eTZxGT{L=x#2 z>oLUr=I@7Iw79Lmnq{WO_EBaya_MDilO-?a!Eui4BV6>YGa&B(d9OtKzKAF5B}{xf z!pa243sHui?J^%a#_P+~l1_^==JH*0H+3qTC0=X18#*e#C$1CZ!+#V@t;9u+yt2eG za4eA#Y0!Yf;ERrPe9<%pNQ5|)YFJ?HnPh^4aQEh3~U1}G=J|UEG+a_#U^s~c1 z`{K^=raQ@R8jzsQ_>DT}nt!5(A4KQ}ex)AR8Z%r=L8KzYMluCFo0tVlzY#= z^A@R6|3>Pg#0;cHy>sF$G%@Y^X7*Vjd<6t2)npC!-{300Y&xwu%5*Ehl@eUyS= z(pztnwlP{&v+lb9V}QDA57;XSjECEB95F_KLWU7z&oSA-J`rHj?kGsYFzr(c(5ex3 ztGW#}ncPAk7kS3($2>5DI`=3?vK(4A>-BD-SS^!gAAN3N2R_$gYE}Zn%_YwRh)Woc zi7D`m6>#VMpFg=eh}`R)#`aXKL3WC|ohZ_2=^&Ns(-x5+1gGJs9Cu`FzV;|OJYO9Z znvO>1nl$87H_OXrOewSus%Ptxa1B|{uk<6DsKyEU38FEe17Ji>nuN1?(gv3Aw9;3y zwaVVGCFRHDe-_?2?BmD{_^C@H{#ER`igUuRHxB_4 z;s0*A8Y;`ZcV=)3!Yzk}tk{S#Dhj!RC4$7LZZ__2PG?D54Bq&Il_?j`cCmdJu*RCX)K_3! zgp+vg2K@e6bru(-xMUEkz!HrZu2?o85^0eLwVTTeV3YQ%4}dEC3<*?aYoI{dA7tY| zw-E`HQ=x_J%=sL+iPml!)_yBMx)#;=>$hTjJu#hs7n=LMk4^vwj>r${rYi}xFtf{Z z8t{q}U}XP4eP&BlP$#{{wGZ9Sr#q-d!V%5&z*}d*Ia^rE4KED{PVgtn&wMuIX?%<* zHT!U?pcTiNYp zQ{W;fWFx?pA<1A<09puGlKJd09q${sL7@ZY7M#ySSWw=`1F>G#*bZ@e+U%^RIu9{| zv}BB3$KKGLVbd^d9{yw?!qV!wV5D8bkhw4e$%I0=YL$&&Gew>Y6+m=3?%&@gApZ_Z zeb5+~m8qr0|Ac=7!>W@ok9uD4(gaHdwQvX^h%2AOEE)HSVcF~JkZ(Q;My=B0;bT>5 ziaD+yin7t2s#@G1oxkbF9{%mZ_=8X@KRSy)6&r|HtwKV=$qT zap091SdPs8`c$4V|Aa%<#!|s%8)->qpJVUlhR)HmHmTJ@*SIktk1uk(_m$}drYCxf zA4eb;uxR{mrF{#-!SOIKoo_t4KX~W3-+)QfU>-A99!8Euy!PShu(`jBz+JOAPO|=5 z-z|7v=Z&Crx4f62J(vGqbxz-s$7j4rp(xHNacE$EI(Rx~jJy6o$>G4+1TmM^dOY}c zh09%0%6}B3_;R4&j?Nlh8ZzQM9|aFHX&~_!FcpdH2O3+bMTo`# zMh;y9?E?@z&dwwMP77Oj5q>F~T#TFxb9C<2BkFK}`!LhYBM@o|1_7 zcjx8753zUAH})-R6BRmj7W4*~*cr}@B6!&7K+LRpQmwJv_4ATG$Cm8WP6h*W^KXb> z2*b#7+LX(4*u9uN-Mbc;a~O5`_oks(OXWOC%;f!7F5P70SZMSfBlaOKVI9z&^u#rR zb2dmRfJNJf4n7YQ`%gmM`8S~ge|BejeiHURf@lSTQQaIsM`uvrLNY$A{l-H_YHUdm z0KQ-Qbc(&Rkf~;Bnh@Zd5B*60aBz;iWci82`0rZNI0!x!LuY)|H!2gZ3%C|x&CrQy zraz4MyIxodDVB;M$A9kC*?^5CHVcXo+DI6oe$1;vDwL*L? zL7tTu2S@|DIuI$f^OXPDEH-CPIqWCffVC~eyVd#&{bvRwfNzV3 zvFia{P4ZdHXxukX22a`vs!YyDnhaXfB!rGL7CT>(m9Cq4*`J!Xyz}hpw#hh`-eg|m!VTz%N{$ua4|88&I z&J!a>#Ifp31`XQ^bzO?J>BVU%fl-7};k`pa9McB)6D%=Yz{IZJJxbU4gz&caeP)go z4Y~3}?@KLWUE|_3zLWh@*LUqqpB)zP#tqH*p;W#Lpr1%BCk2R>M8H2t(COgW;b^-R zwsL&|?#L2AZ=5dB`+UJuS>n}3uwGwvknFOm4!cx;;_v%JzW;|BvOgRjHc7{sER5P> zxHLZ>r~TC%x>Pq4rK!n{S^r6m>$K5uJ=u_2h|OWNf#M&3PNDk)l(6NZyY?+;Yz`N| z1Dqw$bxNW>VX8}b!F)*!A6hJ3-%kexmOxmms^WTjq!`F6C7ICg917~nYzo&@&U1yJ zC3P77M-5)FCy^f>c<5lroB|`4ht$OPMS3TCVwMy)0?XvlS~JgL;LDab63U-Fu6~8i z4z$Ab>CWjpiNP{GByw)J0EAKdmK1ecKT5nAp~_m-5~5br+Hy_1@;RvGrzhdYXPa05 zIN1zThu_?o3W|#B6WR86R~@?DM->%59)GnF#bp(E7qZxQ0L_unEU57ze60H74MUc6EDE)2@1tMB`Flj|P8wYT8d zZ=NUb=;s?YK}=Qz!UNE>PuQ6z*@XgjJF>qXXb^lThm)LbV|z2Sr9^Z;cIA=HO-p2- z^jjglfv&3q4SkgS2paYZO}FV6p7o;{Zn|&HJkC@F3(2w`f|wIJ{mG04r~R_CNgK~^eiUWXhCKnG&h6MDLc>8iz zhwWo6|AS{~UTpi)J`WA}$UZn9zdUvhypj6NX^UXZ>6$^)p4x zy^=cLtR=EYYF4(1XCetj2Lm@n6a^0+(A3^e<(VgEi)rgT4n#9?jE?&O&vGcW{qQCE zywJKw13R_Hr6_LwuedJVj$LSx&kqu_Pc&&ycRUDbian|f$%w3h)QZ&u0PcNaD`LhR zq5?Xd9y0S3&}JN7(?R;t@{EJ`$soBL!ujuSu>Ec|vseH5#KHnXM~VkR>-nyIaMYZO z(0r>KA-(R)$ImZ!Ud=lX4gm5HAmuH30JxOddNu8oM?uArTxx}K)M*8Po}oc3rs&Kc z7EaH$^y&4R+45uD^76kz^GpR^qNCw{2K5|NRqeuz)pQ(?p_z@EeFKljg-9E1T?Jp= zyvIxPZkfLqt$-bw-Je9}0VtinXkdV(rZ_c#mnlX_xXs9xymjmjM|E#5bH9#5rIJp` zOQKxAY(rgFQJw2-byEdo71}^tGx{G$ZMAQf!_KZ)(LBZ?1@De)cDq6om^@&+b&w_- z0kIC0Y%b5?w?0|Yf4Y}Iib;e2&m#gmVKuBddP9w_whWEc5=t1nnW8{qc^lmh5tJ1R z0B?C@J>@1`H2wP;O7x$0a+7``WgTJ>j*rvEWg*X`w->As3`1Due5cOyNz4+GJat5j z2YP3P=RTHbSkXd|xaqK$K#f;|Pb9bIdm_OD#C-w&RfX4M+3kA!<6r;cVhh)u9nOsb z63$O_R=6KnaA2=0+MX`L$T&E}+K>Fu_ycpg4eomC+Fh*FYt24sxvW6p@$pEt3rqm) z1?U95^;@8Y31#!k8{rgMT9-pc+s-K~kqvTrrS2r993JgS39TelV>@*hB{Wbp_sVo) zJ6wmn9JLjG?~Kw;yhaVq_6y#`FS&njfs(@iCHrkNWUhC-Y zrNimaZ$6@=%%jin>sIJ>!!SEf##|S8X~fmsi+}lchrdbszgRtrOlnC?H*&pz1q(y? zOWwE_NtiQ_f4yMbKuCHw$A8v;cdUUr9)X8noDQ*qLqZZE{tl4*fcx8iN%pjr`NPj{ z>$7h8f0HO|imlBBGz#hNPcm$~*-ntX(|{U?M7ZYe4PewKt%41tP^HcN7=OUJ^J7q2 z2C8yN28=t0oeizP3oXzZ_O$Ee`ugD>iNaELke2DZ>9JFFw7OPtZ}xN~^*yoDbonyx z$2p`-+SqP(=k)emg?#$F9C#IU4lORsw`8Bvy`G1scJCgC0x>v9}$V&?r8CNo!Qcc z1gk2}1}S8Fk|!zjZ`3=k5|o>DR8{v_*MN_J%)r24yXv-gKS~&muk+*CsY(5EZ#-$M zjkek+Rk zcwE^mk%}s?``M6UzKP>5Z?q;1|K@3Gl)2GT&s9*^b^vU1UYC#{C|q4i7d1epANVPh5lEFrq%C#pK3X=K5?q#Q_xwhlH`ShuJE7k#F~teKwpBs;>THI%yoY1GBp-EF*)8g@e;Q`jfTI7-{kQ(OgB+5{Fev`{>R$Av_@> zy}{w?-`BlE)++zEaGcDnm9lm@YH4iV>kjbY63fwPj)HysPA~vSPj&Zax*XV-cs00J z)j2ed`u^dFbRtD0)4q?qsYAHmM3(LJ1l5q$oQV{Ykw{tI3A4ZzSs%Nef7;i&P4HxN zoaIJ$K4-UI)aoNzH&SJJ&+BhBo zbXb%k{qNPq&A;pfp!DbIXVu6>40}{DEiXa2Vpo23g73;Qr)@( zr0RY(_x=z_<+RD)9ZOXV!|8JND=rW1f}0WLPqazqpjk*4I~w_OFs}WR&0gx;pmT|; zj7!6!5L(%H5jsiMt5Fbq$LXd9deCDhi0_knvw`RiQX3ABVMY03_JiilBi2XH2cZ=v zz6N%7;+qWbQ|I+LxR!uH6-2|hp$iw&$xt#h`I61C>B3TbukHL?s);M7KF8-*gSH_n z|H1bz?dq=03Dhy)JxSt&nb4gyou_l>#=hk;Y}WW!(QBs|PkNaC)J465pK%5L-x)QL z@zMpjlf}9cZK%1hVKjj!5u3Jq;x_gR%DJq*K1#L&nNc|inBI9siUpIftkZZ-7I4P<+xu+ZMd~&^0=Iz!{Bg=n%OLe3H(IX141s!g|6TWF8X)-gBMHyG#QL7Kl zN)4~Yiq>H&Un}}KrnFVBL?^FwIZ>$~;9l`DmbZeVOnhnM22?Z?uu6w=--cF-Ihr5& z7|P-G2y2{_B=?wuVzf-v*f6iAUp{~R&0%O&4Qy3eROoAedz@qZwbi#feQQ@rper2d zcn;2e=fmgVP5weVsH79eu6xx&&BNVYo*%`}7b7-izX3SIIA6li700o<*BoNL zv^BYbNPKFl3(Fir>nl#2p}%X$Fy*=mLrA!NWAUw1xk6G@njPj_*#=ZQl(rQa^${wF z7}-sns8-*!3E++r{7~3ZPCIs%>Zi)BfmIz>|7$}$NN5c?KoAo-|6D@i9Pud?= zMS?<=ac&k%E?7smhZGNXl2-HP)?70V@o#o#H!HMTGRoU_aojh6hitwC-K4nCbf7n8 z>mkQWU~wvDLAi^F`Rzi|_>NS9@vDN5nxEfxmOhkk5T`mcUiYJ!-DWr5zwC`d@uX6= z>dJAYnv(znD#=1<{AV=mSW^Bg_9NGa1dS;Pv%x4t#OQO8*d?S_9owE`n$J7ZWV^pL z1#ee9Kx$ExwGMSO^&Q@) z&`0PY${nv_tgG%)vK3OPRE~o!nFo6gy=VTpeax_9=dNnRL8SS%tekXRH`r6SFVQSkON-RtO4)aI7?kG* z+wOOd_A7hPs_G^|x%}^zp1fu8HzVpD_GH*(;aA~M4*$6AWlDQJyf-GKE~JLBmh- z<{2C9c~If!3+C`V3)E@GDY(vUgG*8UG{yAdw7$n*DZ`w~yD6&iFz}hLs6*Qs_PX&( z_ko+l@jV;3KBlTkr4@( zv;YTu+Sgdjw=k`Onj7>UNi3_i-?2EHuYzVb-?S0h?e0#m^}I)Pl3Jy&lF?%!g0X(H z5^regknY!aNC9vRpRD!!eNjf)R$xw-g4naAe$Y-(7@6ixatdF;IaPvx_|ED>{WRdL zX$D>{$lcOeb;BK~|Gl$;pwp7#w&3Ft!L2=Z9*EYl>WOVtid*xV%@mpUd*x=zx3Hri z>MkOmJ__YK{%QqCd%7f@_L~|trgw%=Y9Eij2AXwE8c*+*PXuo=CF^U}^;xyh1Ykju zK2~l@7;CJ4_*f8(HM;voZ^WhvVnN2}}HifO_ZiXTUEi}WjcYDerkbT}$y zY?od}qH4Dj_RO80hn3$S)(-ya;1h^#uCvsW^f+^Ii@DB<%w$Qg53}n^>9m?_Bqn;e zGFmj*7fN*U#s6|y4Mr%&v9q zq>7TX<7ZPpXCZM|Gtxl)OPPqv#j791LAr^UVo z&BrV1?z1ei%v-|l3xXBFy&(Bz5PBoV6Qtn(v_RvQki?VHG%TZZBWLZs_wL*k5>XSp zoMFVznqQtjPeRXUS!@qqaLOIzN4S5;FzSyR3ojS`CX#A*{36Emuw<%uMcMidw>Kme zrF0|?`{|0PjOe^HrMLN5YjMUz_V8jf^^hA4Y0ygVRGMG!?1Mp7QInD7ot&16Q06P! zmoZ;hYcY3%XPec)!9Z~{C!~ub6F<@eF?`NYQx?F~I02mz5Y^&1^2^_dMd2OsQ8Y+B zvqb_?Ha{`GW1U=3hqZp?t2tP~{*-O+{OIne$$6<#bzbE5*a^299!|E|zY%KO?b^g# zqUm~aUu)RU)ZDOCZek7iq@^a`6T%|@2{rqQZ>gBlxOQ0VmM= z3cn0b>S2V@H?}SBFPJo$1>21)X*5qtOaWv48IviKW{LbL6i$nS*te5V08e|l+{7Rj zUzVfB!TJ_#XU?*91|w6zl}l`lLR5w-yM9?3?g$Wv+;SGAfV}Y7YXO(SYrK|tbxH}c zD9C;)UuzGel79Un9R>>|Eswj2Op8&#Hycw?guz%swYSA8Zea{%CU*gb$7M?1F7esAKpl-Dob1~(r zXismezHh-8ib9fT!K2;s+~*K8sOvTi5B251x>NL@k6zfnGZ5OY)hTvMHB!l~EcUAaqX-<-fF(X@fz8o04C~TC^pIUqt0SA4= zOLyuCs!Tf4sI7`N2}8-4uj+gzvti|9^RCi?S)fj~6H5QT%KOT&s@5n>S_F=Cc_>jj zj=hR{>`CtxXAxIjpW{*MlN?8G%hX5U9saF$<5UsQuuLod3P|V zaASQnh^iSwmBEBKIZ^!5qd&5D;!W3~?jbSmX=2ihAr}MqrQc!t1|;=;l3&#Apz9iG zXN!D=iq8*^7uwoTHTgKNU%l*_zw^lY&0QOJ6QLxUTf}w53pxuxVe+}Bq}gNrA}bq2 z6~VF1_cU&6;W50Ky`aGJXVfeC-$XsUaAiW61j%-0$CSZTfu@A`TxXz)w3bR@wG!)$ zYBp(l^2AVx6I*#L+#@`7*umoMPL=w1nXRH4d68AroQr1I1M-t;JKUlbXXjSr6nWLI znteRvfjzY1FKqphxZhJ6{jY=^i%5K1wQ;L$)cKc}Nq?Z6cwZi;Me3YaJ_&6ydZ@rV zm^SL|!?=wK`27_gjm>@U&5u$VOfinRy*ftb@EZNz1K}1bu=w1pojqf=0!st*dbq*KE2(nF}Z655~J(9cl!}K-8OK zZLfrPVX!nD%~yH*eT4mEI}=&h)BVO3Ym>Zy5TRGs(S+ zdtys1I!AAsm?!DeT!>Y;;))h*ne4@JQWmYSm$D&0ZVd zAyNdz1Y9&C{fzwy(N{xE?e8m31!>T%7dBhplXnadscCEti}@H!(`kxK&?NKyj34Xf z!67UZHsneEOa)lqQ^6Fc(;eiSu!a3vXgy}QCZZ&Uwh2y2cm`g5kMSPWqls?E<-GzpePd!P+uwFbXe{I;bh>Bsod>ot1UC6zL zyCYwIWG{Sb1}9m+Fkg18b^pS`w)@bJCd}!fRz<1Zls*YeOV)A3i=H?(0>^^Umucuh z;sl)LHv9ok&p)1*8DGL_`+o_%fZ|@lq6>7=NFfahBt}l#*(qS$Xxv(#{U)+}w^Gqj zUeA)*Ta(5F^`&hKQ^hn_qlq)m4JIWaY#}rLxbAG5%dH`;@#xzjpe$~EM@vDQa9)tv z@PM>wN5uKKIqGNZ4JFu(Fxt0H^;{+9{GV}^R;0t?2w38pUYZUfY-NlY0V@_!L#Ha0 z=`Yn;3H>=>QHZ>9wz4}f=B(UCI*uFEOse;M#t1DMaJlE}e?*=HU7GPtyq24*m7iZSCW$wI*Yox+meQ>!|&6+KnO)84zSxGZxLuw#>(fgwZx8%Po9cEH% zT75XgiTof&bb^d#O@Lz}Pk=Hm^WgjdC?gEVuznX2fvaf$%mX=(r)U?|YPA(|^swBm zdXm&%w1j`(O(5ZDnS>5l`4kq zqhG@P+Q-@RSBNNQWBy(h$k1KPdXE#Nk<^>QK@`3D(6v8V8;}kJ!JBf0Twf7{Y1_4?X`k%-ya7* zhEl43q=ADt8oe+m7V{ZMT95&`KK!5Mif^WD%nUTP3blIh8V6o(MtPUfpRpo0!TL~P zjg0pxW&+hSFeKkC(1(KK4xfBTSEW5m3Y8>S*tzX3Q-i03jKGeaxre@PZ!B-ze$)BH z18e>g+vo92`??H*D&FV?E1SITa~67Rq@DS^WPGvCo(R_S2axD%CjDh!rw%MEo{o9p zT@xluZLdTWzjw``n;G}YJR(+R6U zmT7g)ngL9PmF5|DRBhht&mhFGyTqMNzDy8k#e22+wEI@5Ke5QEY8s@{o3dIZj>+4Q!88~_j)pQ$ogD(}=#K$?twZW%8 zkFhxgRoPrLL9j5> z6;fn9b^Hkq`#jWl?EJ&6Sh;2C_P3>yjL%2k!&TaBb|5Dx>Kz(>IV1kKW6Q01wfS+X zPs?BHSWG__hEh58?d-*kaa#&n<{Kz1#)&YT3+8I;(u%>&&osUL6LaMq`&}sgT7=8On&nh@g z2MOp%bp(ND1GOuVQQthft`z3!yFN)5Y}$sX$%7d|6QVzg@xMAb%B$LP$T(d>+PXWQ zF^#b&8x6W*561xengK6lZ6|C)JX$oY3Z6|Co6g9ukyJMtTu>jsiBwDfaq$27xmnau zfm81%-zxPm{238`irH0+A9bcxn-tOzM;i{%gs7>A^$Jts>#q5>06w=lx_a)IR}4u& zwVRr>-sG4dTz`M8P)M~vcVp#!AG7IDXK!g|`Iu$Klt|ngES(WG#X+p_s;BqoudtyT zUl=~+{{(xW9Bz~(^E(VSeDnEEC}_b2FnuxQWNf@imc-oEjS$^Mq~r1>`wFHOtNqyn z349wU<5n4gr%hz#;WMtVi)yo{A4S&jU{{aOvCyI5n@D5in)~4SJ#Nz=C2YPs0MZHnL3+5Q zm9oYa2J;M&ma2CEh2ajHBOhVYlN_sqz6CWxSmE)uoWoQFA4dNEUV5mXiguM&cvl-P zFstM*reR3qCYbME?XBDoL@$k8Ox7i-acro*uVATgc}}rOq-yR}A$8S{3f%~Qk*b6T zyD3~WssdzGS^O{x^V(Z+al!3eqiEZ5rSd!2`gJ-20aV&t#PCVl*~V(;AG8a)14iG9 zi|+^wj&-v2eP@*1#W*Tco~sgA!~1$1S(s0;%X}Ls`AR{a=Yii%d-vJSd5SqI z4#r6dP6$M3#u}1n$C^f`k!v)#4=1v}m76FI9MLnhQz4%X_(9CT9oF zY!zE@bn|F?ZglBLk+J)X^EG+Qi~e=$?% z+H&m@Ba{P+*u2>>8K=loe6)D@a_$a(L$zRmnEkIhLY7}nk|iTWw?fL35npWXe36Ke z2+jJ2eu%zITTp$*ZK~rN;YGoGOnhKG(?8$V29$x#~OHL~$7 zt#(V`xod-K9&Fj|o*cB#IRwnP>rx$k#8emNLU-%f#AN0)Z8+xX)V$?V#pq#k4EE;| zZeH84Hq&v7_n-<_4X+^*ji8AXiMdJpy)6NQPMqgN9X#$jI#dqZ({LUf_IF5_-aj`{ zt9u{rHDW!mkHti@Zf<%Yg2MNjB=8Oj4brcTNjg=%Sm#+jA)58@U?)Mcut%+Hgq%dD z9rm>;&>^a@o_TCOk`1b~n&L&^&_2cI4ZI;=N}#t=G1=RI&*~j_qwww`5w#di(ELLw z=mGgI?tNcdek=)pWrZAW8k?kU$yHpiWY;7U!$fnC+}o=Xv9|?DcJDxm|NP_B0Q5JQ z$$2r+4N@gv`~37)-QUBi=&B_>s{Puy?oQ6|J=1Q!lz_tVFXUgo`A`tP?bH z@j+SW;0K*-{Vz+zVz~)rv3Z|{XvLPkM)l(UPxx-bxak!9N|y9|lpbQppz&hG$>_6bY@!l0?i!6bbFmhJp{A=!^SN?5|^n_mQA12~tW_|6Yp(6-B$r zyCe+%I))6#6rJle#{YEd-}{Nu;33~+2YBRv9TVk6!Go1)pe-Z+mIV+BQxYIc@r3`M zizvI*Jym6uV>XgKQrvW<05J2Z9w%E*ysq6K>!TZ@y*)kAW%~y!Y?*Yy2W$Q13ifQe zW@h9Ai5yymQ=4%6&mI?5Z`Z!#X1CoDhy`+%mAic8q+VxdeCO7!xE0zq#^Y61;K$pi z8J`tXGAPQm%8l88#@r6~*7y&V5-af0a3g`flk>@KQ^tZ;aHB8;RPv| z3bPl&JZ6)=$M765@KL!KJA17OVccIoo|vJKvdK5OZj+VcBft;kI(YCm$kHJRezR2_=bEJ)l>rT0X=!6aRtK?s9N;a zLJg1c<|EAto7;szi*22+J5A#B&Dv(;w*r?JY_UJ8Ypou7BG7uRh7w&Vr$SWOlsCe>k#~Vs?HAKU|Pie39`4)YkXV_fs_6MPyO^KK;6jDDK_!&BZXL z=;-L-rZ@G$$LkdgN828m&fdqF#4Kt-wU^?$9!G3Kh;EN>4Q}dPr+T{13qfhz7Aq9B zXVDv`O_#O#5M0?9_MCAYj|9hU)^xqExY>^#`f*RFJx_z401rv>Jnwn?P2^5awzyK+ z)2rjUpiim{!BnoNIx@1d*mb#8KlZ-2m#pfhv9Rwb-a1-mq6%bV1YXU~;3*O}>*2&b z^LC{2EkfiXcA?i8^~HSqZ4sjF`UB)6I^duU3>DM944_K{kPXT7M_s(In~>Vhg;>rEZ>{6s_%5_3c|WU^oWwei8@+^ft;g~tlh>R#&C7!1L@ z7YAQEN61Z*X??GMj1@dz)kYwrIwrpoy&_BO}9I zY<^joo2x{x7@wCobRxc-7U0iVJ!=JJUGVao%?M9bpGK=Nus(z1%z^>+mM|5mbREfYFtdyEeJ13{HXeLV{uF1-%pv)P!?eBF-&80xQFA}7Pv z(JLr+g<7^g4gp5tu^3~@NpvqVm};<@C#Wo(tgRF+7I&C@Z8EQ5bhs8CHKHh>TeAqP z{sX1Fg#u^`4ms}09n@3ky`8-pSeeZ(*M5P%}CK5KYb^r7<@OjuQ zETrm(%Gwu8k&tf~3+EfwFQ~)pSNolaVd>Rk#4u|ku*j9bvd9UJ9X7#ar7WaI0bk+) z#mM554`8c|y0zB)Pw3brC)o-*Y$l-BB1nPBAWKd7Dd7Q`s-P;aU$vZ{b;5T zOWqO^&z^{^b<}{%B(x$q-Z?uR`vJDz&d`L@!I;Paj1!#G;(g!~$2HMOy0z+sni&Cr zHe6d+Z!MIUnNx3If0pBYJ@2v3wBp$L9F$d3qU52$bKYm@Bd`P0J+qv?4(SLMSP2Uq z*KyN2DP{)??wu+vFGYS)hH>d@weT(jU+V-06(ksmEm@nB;s`b8YiNx|9bzeX?>_>v)cEX%8Z? z4mP;fHJz^uEbn!t43$UpV7R7TJ)Wq{SGlYsG0;?{767J7&}s6P)0eEVJXms{A*yH4 zVt~+i+rZ`NB9Wh)IK0OgHOKBi=!=l)!73yKn>G@3y1W)4TNKKYG3ujR#Us#T>_e#d zNbB%JjApRq<5gF?;;fatJ`7+D630M&ER69j`x<{1aLzQKIE?80nxbH6XN?jAI0241LT*jv_+rOR^6YpjNFhdIZ}pb% zxGahYjZ3%hvC{l(fVwW_k2sN&w@FhwoL_CkPhUG23w?&wzFVu(`5I}6AO#|z`FnlS%9pd&Gr5FFZo+|!W=)kadYbxoJn2w384z=#Jc^M01R>; zm*bh@JSjM%oDfz9w9~uKkk7iP4?cmdVG>(uwa4*VQP^La#OK{U#etK6QQu>o#QqSg z{aNsI&Y;(iROhocuq3K?OW!j6We@E(oeVFS?99ANAEW6!!i-N{j2CPZ=hDyrRk4Hx z60X&-IlS>HB}ZW-FQ?lOl`6VpD=Ef6*{!>!i&2nIlpYNG$Hdc9+ehwtI*5f<{YS*W zdI4rl9?_&(_r+Hu&d;hdVJ2a2y8jp!9ZBTj<)iHmFz2a;h*%_vKQ%Y&^N^Hq>-u>G zKan?Qwg`B9my^^F7=xc1D3&@LLYS`_+bQ2`j!K2nnqpcgVZV7}NMHry{Q44gG7^yI zyGGXJC|s)YZ}bO80ZZ$|PGSZy5YJoM{I5=y`LGn<60i_33d#jihU2?$mgYpUv}dX< zNJgzZ|Z17>fYk%0XY2NV^7U(ZK}2|P$f0P*fozw*a0C!|3gGjoZDi7yUX){mtVTn z)zo$N-Z&Jb7Xxo%TDbN2WOEnBPPNgw=siU)GOFA1=Q2_31xx?dtK|D2?;;#7KF=a%9p3-Tf;T9T^ut z3eP(w^0O`%sD2C)`!C(7zM4_Y(g86c9~|!FXlEImaODy6n8;vz8vVT-uMOc%KsyVL zOF5(Lf4@^hc;ymJSao*`7pj#pKL`0pQ~pTfTSFi0pF-XU`kBO zT7eh58%%K>O6F!7YP4!lbUz$_xvU(B-sWjI*L!7(teT05fQOe>3y!7e=5AdjJ7YGN z=oRNqS8f2x=h((74b?APAc?!Bq7hvaKv5|KX3Bcc9MqUd38WMK*GX_^& z_*^XqxXOe1G3K;Ng}L`*RijNNui6FMU`~EMs8&?zlf`yBuC{>izzTB&{Q!s(vbDg? zpUS+L>_R{z5}P+wsx*$435e6HNeee|p!m3Eina2x{J->0m@nxLyQ$fcF^zIoakzB}a;}~HtcEkeqj*h*~LjN07 zlbCR75l_)Tt3)9myO~o$et|g4W(X02Kbm3RHauWPgaj7fbbO{w58i!1z+h|5jWaHm z;Cnw31Pfs;*i^gwFghjND}h&H{>TpAV~AQQ>VMr{WS2cpnp;o?60IPSn=M`peU%z}G@&Xu309F&D^WIo*Ys%as!SFW@_VwQg zc7`M6y11e*3xsN9a8Mtz(?SwJ(`0av@`oM_!OnQ~XS;UpP5Tp)1ArZpI|f)hO@)>; z$qSpN4Z5d%%OiU*#_Xt^EOom^2}AUNK?GBXKqdsK-uB1J3_Sn>fkj9ae&EOQz0+AY zYR*E${b0~E8heb$M-xt`#NAUzzAj3YX-}H~)nA}#ek`BNt(t_qQ?bVPbz$|g+z6c9 zJbFw36U<0NPk#Yn_v&4?QgqqdFckO_Y(jhBxvgr8;2*c*!PLo*0JC`_(T&(Hb`#1w zQ>yXt8Ft%a;hqg9%JB~NE*)Ah*zgyy0%gGNAW3zTda`8e;+-=o33~wOe6DjGRSIDN zZM!g;qF`W-hN{u)7>C8A={M>q06x0T;101*SXzQv*K_*X0Z3xS32R(&0wmKq+NXXUwa3+wmlo+FXx8!-nrKEVL1Ku((nk1RER_zYzX-H% ztsc-q9AD0y|Cd^0T1CORuWuv*8p)oGI&Z$9^|`3Y2_8RKKEe;}JGM!y!CwvdpxpnU zoD-35v;XYU60Gbgwt(?TBos`|tWBk1IoHwoY_8W3b-QEv9y=m#Xc9XQBws=p&I$J( zS6p#i1`6i);Ny0PcfCfS136!uW$GoRZE>9Kqp+2oLzYO8GcDtZ&E%Fsj};xR!RrgM zBstzW+iJmzkPhJW4Rd=_?%}N3B@-{*k6vPcoo{Qio6>$=8c3+lW!c|#op?eo=y<&t zF08I=jtH3#_feA&9EK&L-9q6-Yu}VVKi}A45@HLjeG5-m8PI)tWN){Y=;`s}bn#-M zJ=e3?#h&!T1T@57_yT63=XvSyxZz~mbh6%s&tW#eo3W?z|pxqY|u|$(k;Rx1TS$TdyG7z{inQ>x3b7AeD#fO5A*fU5EL4g_CsefR&iVc)|U>nhy%X%PHTQu3c1`JTGDT-LHLM z;u?bO`acf0oKiI1Srm=Rpw9SNF*>eE?d#z6hRFi67n=1C!m*q-GpE|We`yJzFx9#h zXG!CV`OK1BCS5&rvdXIPY9KK57Gt>r{fyfV<+oCU0N7(717c5%ef>oI(O_ROPAE%$ zWB@BKp045A!Pl;lx}A`0$F0fd7ebQ}YeU2*N6Z9}>$l6QG0#(kr8`t5=)sowTQx5) z{cl#tUhFhckK%g?09kc<;-+Qo&Xs^!xX@%rREkjE6vln&u=J1+oXWxqwmIEoV9xFM zAmF;!G5*xuHfwvPPRZ8xcF-NX$zo}z+U4kt&w6bb)*54L7C^!=={`i{x%wrk!V->k zFa9{Y)?wpGq?u%m4Qiw5Ji$Sn87n3|`XAI-x20%GYCfWTZ{L9_w=u6^bU? z-7km%tz3rcf?0N??_zU3-Qgp)VVUkU z2q-%`3h6q0SQg$@eVHK22^T#?;7`$TaYas6zuBm>`qY83uz_EwSrgye+n0C9_aG8v zc_sO_7^n?o529Z*Y}T%0ha|tm0#>+Wm)I)v{7cj2h62o;MaHPI$-l>*>H<lclx_ z=wFu3{60Y3O`1A<`-{1I9*cx#jRhuEMg2qEK_{Ogp;Yr~Qab$0EkcRtk)R2!ol>KJ zctz1vUIOpt!caBoq<^VJ8o%5BABtC9n;)&4IoSM6irY;Bo%j>u=qrjBSMuvwL&$}k zrb;^$<{u-CYyAm-n+F;%4`|qUN^x@d&G)Q9a_a3=dGV#*O*=o><>|ElHx*2sM}N-! zMgwr-A1U4H%h=Kr5k_1mlbu&6^knOeK^@%Iwr1~2GeR9*#~&Y_a_|oszn}hNpNJ;_ z`)X!7aC@8=dJ+C{$}Hcoji8?-D9LtbYpL&UhoBAG?@obI#%+~^<|CHVde^&cG7NCF zL#;1{F%q)hgY!w)hjcAGB9nAg`%>CP-&{NLd)#Up{f0`DOQh-#n!)jv0%2i=--+PZJ2}wx{jzIC> z-yyJScO%6o*;Wb~?4O`IprchI8jAd02^S-5D1SRK&11l=@k^_)@&d~o`ss7$-pZX8 zV~?F<^n`cqCkxS6(MptWa-XXdxHmTi)jsTtF(t>t{c|xtS|&bB4DAEuT{v1urJ$ox z*f7L$cC1WmBf->jxh{yET>Ys%uUc%`9FL{+v6X1t5BfLX8T6vQI{9N_{#F(qIe?-- zB`tJSN%&*q$Quy`?OHUyOCH#3S#Q>Rl&`gT8EV-bJKa?~LoE9%{prRi18@nAaAfP1 zbwmbqX+D&Km524~u4#k-Nl(c{V2DzN{S@98&_w&k{_Oyw=)c_Ic!k-`PGbZ~!PeA9 z`E7z-z&AFKF=Ght)6{u?YH$xymS+8ZAu_#^6-ss0d^J|ydXZOpWFsPaN>kUu(@5XJ zH-Le3L_(T@_gViNekw$1EEOK%(E3X?tzG7n?}zwzek)8=_(t3CcII5#-Z2hv{@h!f z*g13ykI*wK=I3i;Zybj=LBxiKiVs=FO||dnJl}*9uD`HWu$zMiAY+YmZdVUyuJF2PnEHQ!98R`ju#}PNOCw$0yI{SC{WK z@INU2xok6YE}}z5hjB4>!f;8y@J-o4zB#2KL|1jg2k?VmvTy(=bC5g}D#OalG+w#C zXNawE1K*Tz=7e|MI>ImXZ2w5DLls-m{5 z6-h3uovOBYAYV&*SEF!?e#wM{DS2ZJ+Ld1xeZUOhxI(*RjBg0h+fif^6T*_o4`z|$ z-;J)jf5Dq7S1O=E%U-Zg7s0piuVP}nxlzVH$Lq9qGpi8DcQH{R2=BkH4343kv4d-- z55f%`@rqOWiZmSWh6?2%R~DqC))JffY6w>6gNQd9rOe=-U7sOJJ2h0G1E=)v zoHOgr$t8HqJ0d;sk6k>08=j{TiN{XT+w3H~loQNMrH3`XDqkx#AkeZdp#O$zg!ao< zCxO6SU8NzIes0aAww7308ih;STh$9+TFD%O9o&7MQER3e7L57l)^jkd1!NL-Ovpc`S}w$&#prmh=y@21}JFk zR~Ua^_53}M*vP~YM*ZE9^lt9>{-=Eo?1;($bC^7neLCmg!^VT1g8%eXft{yJDA%_< XNku~MsYF#F0e{b=6eNqpU%&ewvNnTU literal 0 HcmV?d00001 diff --git a/docs/stable/_images/cpu_threading_torchscript_inference.svg b/docs/stable/_images/cpu_threading_torchscript_inference.svg new file mode 100644 index 000000000000..67f8ec884a30 --- /dev/null +++ b/docs/stable/_images/cpu_threading_torchscript_inference.svg @@ -0,0 +1,681 @@ + +image/svg+xml +Inputs +Application Thread Pool + +Op +Op +Op +Inference thread +Fork +Op +Join + + +Inter +- +op parallelism +Intra +- +op parallelism + +ATen/Parallel +(e.g. at::parallel_for) + +MKL + +MKL +- +DNN + +... +OpenMP +TBB + + \ No newline at end of file diff --git a/docs/stable/_images/hier_tags.png b/docs/stable/_images/hier_tags.png new file mode 100644 index 0000000000000000000000000000000000000000..cbe895685cb8b2881f4df24f3b904a2a21c5d79e GIT binary patch literal 160926 zcmd?RWmKHa(k`43JP;rR*Wd|(!5s#7C&As_-JRebAOR+La0%`@xVy{X?k?Yu{p|PM z+2_~y>-;#Z#Tw|l`|j%Qs=BVKY6z8=6+=bFM}G0*1*(L&u;PmsFw@ZQCPa8>%h4*o z-isG6JU=o(V*@bBY|EG|26H6o`&yCFJ9;e{%V9C`mY47zT+JtZ40f6)3Lq{ z67Y4LISkzV8C~(;aqv;>5zzY?Tp4N^Xc&^2aT!#ZwwXA-Wh*E6zT?`$`HXFpYmz|2 z0VBlrrHvC<^RtqoN?fds8K5R7*4EQH@*?}qI}`LQAcCKa$vf_7^nRV5R^9x8*4Bjp zbgA#))-o@&R{@7Oo+7QAGtGV-KC zHugrOtn@7O3}k%Bq@<)g_J+n>io&A*42S;4OJ?fmXv+lvxVX5`yD-z+*qZk zFFB1|&Hi^LYlnZ91zjNE`3ZoLo&oSb?}iTLd2Z#BH*+R3;Ne?{b_}67auYY;D5HB51B$#XXeEV z{udI$0-s%9?l&Q%8i^zVy_fcQ#^Ybb`@JU@c==L93k#!bUthMcsBo_Yc!od4n%D9b z=!8lehAWS;LH+fumV*CV*x*#^?)1Z*9fq`qrmx|4IX$;pZ-5vYnQbcx1-k(pZYLM(oYbohsY>r}!v-1?0M5r^)xM!GACPH`Yt$ z5)+Glji&!v@BeS%zc80L)DbpqivTj?<@i$I5fBRLe=)NB*QVrh!9Y8bxURgMYjk5K z;;{9#Zn~Jt*S3it{%d)y=+OOu_Ix@H0gf!xIhY;HRXI+6QOQ9L!#dP2`Rrca7zan+*i*U>1Md?lu!1MhB1o%kaHgdSTJGL~wk7o|RFd}k@cBR5+hxncLI-`kj=>%efI<2HnyERqMBH2b~}QIFiq zH*cIA9fdu)xVd+fK(`6;NEJ;dkv7>LM@`c?0n06qdo7}s$CiurlP7gIn?DRru>VKx z{;f*-nokEdJ`VE&+P7m#W3wip19_}09KRpd^`T1-hc4sC#|&PNXP`WXVcsO~;{cM8 zVV_3_u?CkTZTDU&Z9#@W;CTkxsK@f1c0DADdwU%8SSfXMbmXET3l9&kiR+_k1ZI23 zxYh<4lL9z_FWt7%#7f-;EmEbQE^VGfJude%06sVU=fUiae%|L`U7y<{z%-EJ~d=$OcpY&(oRo8@=^;x?66b zd!GmL=9-+C7%A^2mQvBQgKIiXEOq?!bl39qw+%g2=J5{V^;ftbko8}`DmCi=#woe~^mypfBDFVNng<4p;vje@Y1)Ts z#DbTO?~YnP&F49Qx-U-XscxGo))xdGJvzt!IJG>}S}sIQXK6={kIT>g`0 z%W`pH@ckGGrWY3%9XAqHxU$Yi#YmhQqqz?`YVaF9uB=VA+kL&pe=NPjn<90eEY#lc zJ~o&~h_GTT!f;s&rLHHrS9PQB)?-yeu^8!ip`lci6D#jE*~KWn~q$h$+Xj zS~Bc?`Fjz|=QR#JMUif}$AN{;-Yx^1saF1&je($|^PeaRHODnB`>Os##)XK_C;rw;TjQ$!o2qEHbi|?xLkW#8h+(1ODI++yE z7t~?y@x4RlMRjEBFEH|D7#8hi7?nW3MM;h~k-p{L z-lUe;X+j~esc%jXblHWYvwtZ|;yj}_QAn?e?p;KR9D{rTJ<53~S zX({B_ft?ubwQq19<gmR6xU%loSM)c0vSXnWuGh21~R?pEm zQVspBs5r`D0W@J9&gcucE1%IVc{v}iAv;Me_c;w1{7oyQ%X`{aT(@)6&7182kJT_kTV}{1}Thmu)*xqn4qjq7N#a4~P^mQUy5-ah^lf`P*URnXC&}H+##CjQidE z@?nJ8$cm+9qv6*t8_}NYEE69&KM zc0XMWg}--+(-9T1cNf~zk6 zB5}|I|9`Es5|-cUs-F9HMhHz(zswg58Tx-II|>ZMSaTVQ^NxGPHOg?mWPY%XVyS2< zU}=LppDoF6C%9GhVh0N2;&N_%O52AGN9opI*xml2Nx)_BB738hZwNGq_a_(O{s z=rDe{9YKjn3Z>My&oZvKt)TmL>NK+DDhr&d>gusxYLIkfWMts6oX@S&?b21V7y-Mr zV_0)Z$xtE`h-Fl?oUpvkek^An%-4+fCDNKb-rt;ekUYkCLmNk+L{#F{t5=n-C;Cas ztbUQPf8~sSk%g0O6c#eI?6Z?|@ zF0Av_Rs>q!Hye)6#AaoM)a%dz?4-XPyWjnYrepFfVT3$yJRuFfB?T0oZv^IPe=&on9fs_DpU zVq|0`9hcZ;!S1{hQ;s9Sd4EPg=kYWtzo{v$EaoK;<>1+q#5fOeZjC&%3pM{=U;g+@ zzY~$4yYqr&rzex8-jl_O1{#R>wj}o}2>CfVT~=5r%qDM|ZU${~e-CkmTQ}brbDj2( z?^N{DXNA+(e_c9>a;ZgHS#TK6(Xg%ydgh^+$~7nAXZL5rHCFE-^CS@s(EXNv4?A}L zLD12W41e z^Uyt^Pz?XaFa3JqS3odrX4D}cToK}j67t2WR*b}JqSzE<+m2*C3AdWmv~M(&TrHOH zhU|IUpvL~(@waLnX(jozb~&$Jw^=xg+4cn;p*0 z<2^7Fp$^Vw`ZV;%a%R82P%bDxx^*h`Nb?xpI3JgDfxoisY~TIk9a{~_8T4qUU8RWb z9*zaGIHVDw?$i3>7E0KepXq0Wa%nUa=iB?)Tsg=7q0RhUFe4z2+jsL@+EA*>Ay{lN z?w}s_FJbwsFmP)U_a+NfFqI2WI$m?S-oTSEUPHqsY1eon>HtFfp7)MV4_6Hjelr>m zpr;$q`86bZ+#s5FbGOcPl*~n|TtBE`tw_0X-Si*%1AR;79_mRv_RHy!HMni5F7x;< zK3uV&1GvBca}>#{u;9U3?`LXy`9)|4GQ->0`Zb9FP`d)d^~{VSZ9KC))G)k9R%9-8 z?$>Y*H$+p-9*&+KkA_GQ2wepbJYa@-&ZJ#ZCprZUbN=8hLmlGv?L-p1SkhVoC837< zZhqbv$>T8s7bJptd&%SILL#fIY5c8#9`PRsRuIM?tV24mS{Zd>%Mo1q`}cTS-p6~p zB5e%~0y?dFmXmd~?A1?u+0k7q9Ka%D1KLo|e>px(7>V17Qs6ml`eCmU5V+X>52Qh& zTWj&BA%Nz@|HvEsc_vK+3%1henaIzS(El$ClSX;w1}H{HqxUk z@)uV5g$n2{L|8Pc#`kZ4tFW)P2ONR@7i|b65qttD1IGm69kZ*OP`%YPey@95*?3tL z%=Z9hZ(iji(9sSRHF}<$|NXP$CUF>Ggt%KS9WFMcDcr_Jdve7W{YY1g5qLIU4!T2HGTt=E7xz)Z@xW#u{S*qom*4e@1Ix0z_1>9 z9eDtM8}`NpW{-;KRpGllw%jcqA#VMm*J)0LQjsmb-a81%Q*ueurTTYflc>A>I>H*4 zLDp&XFW3}2I1lJDrB|uw}JTo&hJCG!BzpR4{pYSQuIr;g$U3x}@-LKYAtW#?uAunRn zKSw{CFum2@RIwuz(-eJtfL>P(%ak;Go14Zin8(F7Ug4x@yA$*Cdqa1RP;Do!|z(sA{%W^0%JvMCYsDzB?Q9dxfx&Tp37a(DT9~XP~E@z9u6ljNr$ulsA3GQp8}gvGk|=EpCIiEZ3PcoLi}o zciT|x6##hcNbThQuojG)KlKB!QE}pCAGvmOBa)=nZD)p)#WgqcbweCLcd4{jfAGU; z9hz={ZGz@E^3v0Zp|HZY&Q}u^!QiV`6n1x-)Uw}9hLjT(J-brl^YoZ`as6=QBL{`i z@kIm&Z)obH4h_=EpM=Kkdqw23AV6~6ieL(jDoSV;d{>0f?$DT4 z_}92f^C{MIbhW*v_cM#w7WA-hvI9*@G|c;x*Fw0KH0(83FQ2(BB+56!Vm!}aJa43_ z)N_N@?C$blVsSBzi(0_P00aKbYwYKEe-WChsMPC#zlvoZAbD2$R&<)M7kp&Q#8E)$ zcnFqW_ji?S2PnweMNuQ+7?U%0>sUH^KPnm=LNhC-6fm?E3)9V)RWzyVbqCe+Whva< zv=f|mKg^9v)isJR9U>J9V(+Z@7MrR_j!n zcE2canxr`?A3~8Zyxp>M>o~vw?yzr@;4RK`IPcJSoA&Amr{%hy3u?g|nM2>+=>dBfR{xLEA{l%&r)nPGS1>_n^leP$nmmyXGeKw#K%uc3V0pQNQ`K z$AS2_))r~rP4QF<=yG@T>pa?jXSX5tdf3x(iJs%9F06^mO9g~KtWyK+3OiMiz|M%4 zb*<*F2npt`@8GM`TbtIfN`F4{f%6d|v^oYpRFsh7cP@)gDwSdTWJ-_gjJoQ)Wtom! zX)a?9>1Tx9FEwP2f1RieyMNQrYrdG$STsi>7_%lo{8~2ns&GR!U(Vi@5g`hzDIRM^ zuWG{&f%vDK_i6ldcCN0NRdo^kk?m6bIhgQEiRS-KEI23wqS{m1!D@X|f*UMItI`|5 ziK2mYbudpY;mrCB)1(-FNQ@=rAqP#M*{_+_&3Vi`bYrE5QB24ujSXG~j~BPDRRK0-XuqvLWG1vBXpS{R7xLqjVNb9XUK=)U?X&t?OTDg+qNocFhR1n013jf$L-Pk$0Rr9= z3SxE3-{>?J((U&Ve4bMfyD6RBYya}hF}We~Qsa5Ow-ZM%yBN;R=>EVue-2{A54%XO zE(V6xbk9=>7p-bbyf&InNv2DTj;3`=fSpG!yY<4hvXL8YPKkavN zlIoj=Vj$4+)q5O|*%j8R~XeQo0mW~A3@ z%q!Ym)gAEQ3u}(b4rCq%h{-!^o8|AU7Ta-B^cgp2Tig||>1$gzx^}s31TvoUxUL?N z@eNH)izo2!ZDL0)2cJ_y24T+DvP0?GU9)2>R(?wZ>ViO*;& zT>!&+*LyuQ?rkF*y#(E7R*_tdn9N7iz!#*wUhG?f_HKNm5o>r*gS0cXGcQ8;VH27J z9z1E6{x?sQKs40V{Qa`qJ@57nZRgA->78GB+$WEW92P>}5XaPuh&^cDdNGVucC_^y zYA@~ihPIs~Qq;N>SJa!rMhTAwy8@&>z((Bed$)Toa^kykJ3-`X+llGvhH;6quwiz* zIJ$a$g4fE`tg;;Wu!Ykz&6z}NM}?vFz<_%Gs? z{ybF9F|y{5669bczXdy)F-EpOzv(~1@8ny=PWBU7FE<&%zeo{u@;)+M@I`_ZKIXt* zV|44GA^e4-1!Mg5-|heR`pa`K!j@Dw)@epn+bnChR#!Vu2`Tx8iaay33v|^wd!vhF z84TH6xjb>9WL#)%NJzI}M@gejcH38u?}lR0=jj3IgF*Cj?L*6bCud@GbT!%N)+66~ zOZ2M$k_gw~B{bL!ca2U->HgW!0IfI7z3Vc*btju;779?fH|326K5|@KT}6NNFt#O) zqnw{Va5eO(&#d(-_AuKTN)vILK3H1YHZ$bN#3W|DyArjNNJk)cCB-JHGNTEXjTn0a z>&2f(dGp>yLqkJR(OmPA-{iB4Zv3V%yDebToNJy;yK9?5W>=-brC|@S&7S4dUYP3u zGEitRv7mSjM!#*~Fr!_^P?@i1&P=M+S6Kz&5od}Ky9EY^oOxxaHcO7HtE-z%muMv# z|M+%}nJ-5R_bWHcb@}olfBWmN=##aGbA?gvjlPw=ar?7GDV0#ZE>#g;rU18x9|ucE zYlizf_8HNVJ4!M-`ODLJiJJgXAikG+1W1-v3}o(pB1KX#TO*c2X!|XWp zS~ClY5$$!UFvO8Ask6pP5mO_{3@DOC6-C6H}dFm&y(L)e!oNZEMXQ^(^1UmImPQ zHc6mF&N|R^@}qV9ar0U0f-k}~os5oIV8**un59=~0!U=x^m1d^bkyH<>eGUQg9)j& znT;Cg!DaN>+HI~A)COUXW8>qPNsEB&*hyi7o}$!A<$N8lS5@q9YT44*Z4w^TT{CPl zh~Eww=jXb8U}a%hH&Qtm46-LM_J8bU_Zvzizhj6N!=0d2QRr{}37k{<)FPA}LahbL z4gZ!p)(UB)ZL-1H9@jgR@`P*<-i{;gg}0TErm{@MOF5Op-BuzVn8tk^%j28^F%&7} z)d&BqJ>)E6#|3S|2}`;1d^gA(ILs^WM%s1=`9Z{Y6x938mTO&@L9i#6*xmJ#k7 znZ(HWWZ;2vxJwN#r9WO9hx1h>?j=Mq*p2#~Z1pKEh0^+bqBQfXT88u%{;F(AOmw+L zE=V$XC-8-D`2|N_CQ~x36esSKt-dNZkigWoof%Q);c17;20znMm^-A|fs{obJ!78c zr?@pa?o1*_Tk)7@VuD^Jl$7lNe#KtOHQ8Bn$HTSt8%!SWcK{G4+ z{;m#o-;sB9+AYhIlBeB(#Z9WLdi}#!--x@>Fsg^H4Y}?3tPSqbn{49x<>^51g?OzZ zaFl5fe7YV?m48@o6*s1pdKi#FqJgR549oe3?<1>80;$*guo2=xo$epYVy((0Ucyvd zMcfsaa+QQxALY(hw`*R=JhEf0xKW;{Aeo6W!n|2-+rj>Bi)&-SN;u1 zEZ0?}D}Q~RJt+jCNzu0}_mxj*mUJ6C%yGc&aunsCBXAmdtmMLqfSVjnV5hsAh{Rd_ z0@qcW9!WcmIV-x z2Ap$x3HoM)T>zD`Pez1N^(~7lKy8upE|<4KfQd-Ode7=)X(PXH!y2Kb1sf3t4PO-V zaIM}tf0T90vK#{|1RhEBt&gj^ioXBZ-D~<0yB`>N7d;__c42v~+n@=tVYyaIUm-$k z6POcdFE6xCRTbp>c_#LVd}kE2e$zgf2dxsQXEA>8UW~8uQ4Gv48ZkX4)4;1p${_fN zNgBzOAkwR1KU{^YJy(0Ug+t+S14otJRZWqds3F_fF42p#(K)~7Nfzu;M-xUEL>+oX zGXtxZFp#ymYSbyEHxm-w6xo$JA*4KdO)0TOp}iH^g>~r{M;jlvtX#}mz&sDIic&ZC zF)!w&%sq?fV35GdnnNOMIJ|QW3t?DN8NDcSo>fW#O)o&m3K~gYZ+6*ES;bR*9Rhfp z%~$exH?E&S=rl2&FBHH|bD-}dECh3I18BJ&R>!{m?;Cd;>;z~ z4X?M`Vhk-K3q8}xiYtr4x>s=fX+MInsm?7dr9QYTD9@G6)qT*>H}O~kV(Umt6CEe_ zWp9ck5luLeIJ79*gRj5Q$-wBPcnS1bn7^UArQ*ZKkX;LqKPCx(Z8T*!2Ih*sd~L-^ zE6fGD$pgTQZI-|nfRePx6RE+4mhege@`hJ;%{+`_g@rj3)>}3(E8sfhWj>lO6A|Lm z9&Y- z>{d@uBr>;aXLk8{3SWn=e!+sMV`+}$ii`!HD+Gnn3oOJbz-@dRh?Pahq{J2w|oR$E3Bcwmrybu$V{Kt-z%_Uk)oK6=cq97Jl`d3V7P+UexR zey1D#*AAWQ$A}k_auFplGQGHII(JgW(q!`XQ7I}tEve0VUr<|0Z%)K z{5+ONfVPU`eP$H)t6m^MDgJ!s^#`MGqIr-mz_k91IeyCP@KFtQ30O-Fafz?s6n+=; z1ClL$qB}+c4D+(2Dtl=|hiHbT5^eJ)MwqFL2XNA*g^LybnEwUO}zI)#uwK08B30;YEv0|E*m7 zyJut80@eUok!T`?)A1cNKecuRY%l%sjrPZshWU1+WQjz7Hmu5$9i|u%E2difqjX+Q zF@3k`M|ACYb7Bd5RZTTS>Hfq$B5vogCW6DfjyF1TP*TMK{2)73bhY3>Vl`Kp@{LYQ za+n9gQV{!KU?jjSARIpz8*fM3uvz|6&AH@+`knMf*xT`hSaI?leN9M^;6A}Sc4FFT zd|*PBN=bjc8pi6|lJfdHILeVEqAxPql*oyh_a~9o;Vcr-Grth?Tf#ohB9H|e5aPu0 zjCU|+otx#v^w$EoLA&xL(Ko9VcpFGLrJDv+GkLo|rM%CHGqp5AwQs9WYl1qDrB@64 zlfxcWJ*4@G^htI;{BBH+Ko%wc7Q zI4rTQzZOfPn7Ymb-cYi#v5XXIUodg)mkI4~FFy*9EoO`DdY5uv3nQ`_6Od8e3yY^} z2ZQNPX>ZQ6YAEP4<=4rX=80eb`c{Nkf|ce_+!u*bT{fZ`_r|uKtwc#j73n6SUKlrl z>=U`x*{LvndV?wg?KdjT9ENB%rfN#w>}h-C*O5N@?!w7BK@r!5m3X^poVOEu5=3h2D|hX{X`_wk(6X z?12gN?i;a13(ZF%x5*x*51g#aJZ;z4*)#su_@dVp+hx}q$VgQ{34IBYY47i!kJmc~ zX}r`&#QHiv&oxS)_UOLkJ>9(@)9K%&49ps$K>@oao??A%s9AS=e5AgYHK^sKAo;OR zP`D(y$yHun&W2?PMPO)Iwuq22{F{Qw=t->3#(3o%$*8?yIa|bNVw*}5WB>hlUv2YV z3993+SzhP&ZIq=$ySoFQ=&tt+?L^rvKn|~$!Izgr4W>I|lyHtp9|c_&LeVXYL<5O6 zFzFhi<>Ef6=jtQBYBbDIRW8{u0)Gj4JE%jL5KNv6-qmL^%Sf>ssJ$HfNT-sdWTp+` zqmulzqNL3E$tYifgwnO`*NCE%tkwY%r}$%eRJi!h{DTy=Tb5#_1k+b$4l!mv>eaZY zS8~AAZ>6f95n_z(A9M7u@ICSoZr-7De9*b_@B#zV=N69(eXA#0hz;6nKBwn@E7_oD zwfQV`IivJ))D0=ZN%6ZT<7@{Cmkx{uPR`j5n{*Z$6P8TcLFl;oFspKLH1FlM>W|6E z$rJ31cCLTfS0D5y3nP|DfaBc!6?SmZQsL*Q(TiwL);C|1f0?cg~GkO z2REuOr)FxEKts@om_A&1MI7%mv?ic%Tps^GCtUa@)JO_t+ua=a)oJFcTbTj*&QV`2 zKNk6wmh4Bb@<8*`kIGR=RQ5RM;|zjhS(Zrl=J*HEr!>*o7UV*d!z@PwV|fJ!X~cBf zEJ1_R2Shoh#0rKBS!bfpX=F8An zy1aI3N_lr@C!@}x15LS8u^W4)&LPjB%xs)$DYqhNm+Mkd=34afIF$`QwYw13z?R#2 z|EJs8<|y<6i_~=;0oA#NY*^y&`Ap~B7TVfhoh>E{FI?7s& zMB^MHzldZh`#LQ1CatnV*Ml9EeJH2H&Ax4AW+o|XBZW(F(3$uCg_B{t-D15Ht<9J3 zkXobb!Q0>|q9Nc!BPe)tQ>F3uL(^w8^0Y;9tyie~Xv8(fu9Ni{blU{tuf7GG1wfuI z_h%stW)?~SRsUaKUjdE-2`u-r^*F&p{mcvu{PQ(7(csXeeg7C7`wmpc^S5QRoA_6j zcD>|P7nS)QzV!_a_9 zi31%BoNaD&?6JJ?QK_5rr3hY^_#!_+E689XFbQFw6uyZC$t-{JgEOHWKHcK9Br2Uo z{>5#7{>6Dxz30Kf0qmmP3cqJzD)BTEyjC#sQzc(9gN4MY;w?-6GQlsK`HkyZH+>!7 zFA`vB`6#|gPtT^K&A>~JT*2rYD*(ynNC#JGt_zJq(xlTt*7tP!Bxv%KS>O9)UgLU) zc9*Bt>W9Vf<9qMjWjwyg5n)SOR(l}&w2yAG{b{xeS-7T2J>NwkUvb1<)!x(yIMVaB zyCO${my3(b%-VE9e4RX+M-*Ctkzfp1H3E2GXgN(*IUd%PqE~F!dq3RvprIvbJ2)^y z?=sV4>qPqE!qBReJm(`6a;v^Et!gi6p276dg%La@enaYTftJ?}RX*C}<_9xn=m9IL z9kui*Ca0!EYYi{i6>@1AXdJ?ml&lZ^+sS6jdW~Oo(6HNJY+!HB4^hJ(T1*vXfWd3v zA0V_<6x#HGNBE=!Na^*)w~|at@*{WTNWo)E^|DlD!}emq_fSgi%+I&a8Bi{3IZ@29 z`mVrdo2t6vC@ZImQ$1#k8hRN4XH~H1S4CCZYktlp#W;JpkY8Wf& zdL8siNjk+t<&3GXb#gmV$<}rH@|F_zDps)Yr|W|Hp|a`ePeJQjE<#F0rF-6S8r9$J zEj@F&lv`(|eJ>N=n9dl!9wEvc(w3R73RkC5x{uZ(S$hK%BPl@*Ifmvgi z-MQU}My*yS^;$kCXUk%G)n~rfoLRBstKeE5W<4;yH@ju|UFtV7y_&j& z7q`KAV7l6HBiIJ*JcjQ zZB10hUD*25_gAvNbu4sPy^k|1%!|NXut;_pi|5of0FFJkk|L<3kG3I{ zQqs%;mtC3D9LGXqQJ}XLV2d?Vbn+8m2_XnV>fA-!E7Y{HTvkPo@O*8sZ@_U7G$AH- z!ZIs!vM7}Mta}@M;_F08?!MfSpF*<|0kXY}1MvzJ>}z)%!@6asGLauDAfBI9-i))` zxryvjH`*1?pq};>tao5-urN)f@Jqm4y?w)Q~4@g+0uuM#(SfU1wpUb z&bN?zM8N68Y50DWZ5b6nN+7KcB^DDPiQJ;6to}8`v#2w5Uk`td8N7_8xQ2F-Z^XHR zY>1e70>G3ReOy~l^loQj!`jK3(azeP=J*`HKoT;Gc!)ZWcOQ34tW5?7h{OyO)hX_^ zufor_;Yd2#V~AKM=l}Uq=ltag5<<%gQD^6qSo>*A^bYLV2}8+fZv2690$n|90}C7VUdhDV2o_l+4ON~8H;JmDq@JZMKy4!S90huIs~UnZmYs#iHZaA{l#It#XqZ)_LDT`@!X($7DK>_yzz#W-1$rxQ~5`qF*!f4%!c z`}$ZRMRv|HcUOrd_%!y!K~@kq;AS7E$(R0iRnClvVAMlH9f{hnN0My~cy;sS2%sOb zN|$Q6?<#fMFhf098ecoCfTH`fjQ<2>Sm$B^$3arRp%Wje1j9q*Q`bqEkCy zLnfWmu(Lm30;lfHa&9h8b{yRq@f`8*CBRi9@3K!mqNURJN5`<#ak&%O;;s4QZ*=t< zOPY%AC{2g7e~WdNHm6aWbda-e_oi^{I`yNu9MIe_t4q)^ha1k zBXa0dCaP2+-J)q6T_YJ5&|Ymceuh^f6*#udq+oT&GD?U()8(_5!w*l$+Zgkvi;BVl zZaUlP0uDaeNvN#TjWncNwgW%3bk$kqj3Wn=M{z_P&$qM$)bMrIyl*YTzFNZpZCt|F zR9Dwwitg?yDKk-Y7-~`f4%n^G)BJWu^97Q9Mm@wMt@$Q$lT+PCk@GDMUwxl0jaRBRNaC|-V|0&GL$%ZWGCV!sNU*q zO+R*~YI|Xt-S<@Td^m|6yBag7koc}yH=tS%2#}1vrXCq6`<0R<=fq8GAXt;cc5lli z&nX%_RNDP+Z4{*MCE1JKuR_5vbuXftlt!U1)smEiH%)|B)>`n|P> zT0a>aO`r|e#f0>zRpvvYc=&to5O%z5BJK!YoO% z1YA^1D!S?$mho!8Guy~Kxv3NI?+Clq1A-}upcM?%>aWsj9r%KKZwenIunOE$@l6fr zxLJL$`e|w&3&fKs33z_CNfEehVT9hx2R5)F80u^Gr<-}t7Jtf9vuFXS^$9ojz(erq zW!&KNbDmg_#wM`j-xZ?&4o%buR30~L`6gU(7axx+;>|~d-J;o6^DbAATaET>{UPQ) z=<@dSZJ31#kvxCj$BZn!R|`jIV~-!D2KMqA%05cD8Qg@yGO5FZd}2K(W!|=R_)(IK zs1x8%t2(N=iOa8Vimi#*xFsOA@M;9OYJqmlJ$JYws%6TM7pp@M5nr>Swuhd&r!{OM=FDOIg$GQA*w+MoRdlFpVw>DB zZ}Fh$!7HPKmCb`=Onlh8pVPrh@>Pc_hhk%6L0(+m={tCR8Rn2C?yvL@RHFyelx3K= zffT|y-!R9d>Oay4#f!+X!e$?kCS6)^R@p_KX!ENSJeeBnaWWE9o_fq`$?q-VmQt+Q zg~i)%P_e1ky{QS}_vsVtz#gu}j$P+|nJI<=@C-7p*Q{EY(+RyFE9r^OLUZ9sNuAfJ ziy9>I#-40p8DeA-A+PA49L+zk^!$o*!x{-<1*>3wCwabBJ^R4#BHwZrRbyFR(XCp$ znX@rsN4YPANg_IR>8}VV1NHRqOkt`%J7=wD)RkD}1|@nw7AN+bkgq$?wAbG)_ip)!p8wdbxou1b_1LO4sk2Zk=c)HMtnU^w{D( zg=0D1NS_($N-HShb_dHBZ#TjHxU|vZ`g)uDArI{oTFS@wj<4mkp7kT+(js)D3BHpO zpIh5!%x(4RhO7uuxjg*iZMyorplXlRs)U3E+VCD3u6H=vT0GfxHhSYoEl5S*iyF|e z=VtiM_1cn}y6CpebdU6w;Al_yR`?__thYL*r&jhShPoAAUT+SByk#^00R!R+6s_zj z71#G#9NiW^yMt=U0XfsC9?FZNcC7~`ZN?e4wwyvgY}8Ie+yh0b5v z#X0R|+!d6@YasJrOp}c*DXNm`jF`hLDj) zA1Ypaww{fXE0_HA>Yim776WFq5jGYGUd|fzG;qXz0b?nX{Npq1?R71&9mhv&vNU4C z{D7*j{N?vct?`zO_@RVbh4h#rp=)7Sh3kp+9lxNph@Yn_B|kc>-WwHzX_nl_5KMH~ zVkA>F>fVH8cfyT*zSN0P=~u!l6x@B==r}uf#5`s;N0Jp^VQ9Z5ypMpYl?%LfY;Zs?h7>jB?*GD1a8?C)$PC$0ZsQ+{TNc=M9MQ+T(6F6;J zsz`PFz{j@=BkS`4niEqgdOgO*yIyi_$z|o0fC#R(q@Jy#{MiPKdY2X@yZI`15lzao zyl}m3TwX!P|Mw8JT}2s{o(#En+UC^{3jEIZ(L-CqV)-|Xd0g#M+Xw1zp_KtCL6#xd z``NvoRn383dxP0vdFf4ic<1u_ja9Fz%Et}V|U+`lDPFRCv$<==gMi%#nmQcc-8S%Xa6I(l=-`eA6BpYw;_cGS{W)Ju^v&L>buz7MjU+B<;($N zlT5tn_{GCH{#FSr%$@TU8Y5SV-kdWW^;ii`Fr%y)Y7b?FfLU)p8_mPzUO?7DHBV}i z$nKk2`x&c9 z<|^*A*K2Z#k;uwckg%*{Fqb?$CUrtf1M6*H#^8L?j34tOeJhEwC%oyZ2wg%sBAiAJ zW^gr0nh@|6j&8rC_jhM9X{C*eV zkUAja^T0cKwlzW?;!xsuM0~L{@>!=Yn7;eJuGw1UgL3B*_&O3v zg>uwg+qrKa z`=*7a*fLQ5VtJg1kbpHLx~FjKcZSo=P$`-2r|^bV6+g82y$>dv567V=!AWk8#57Hu z4zK2B%&-JLx3!eyT?WX9Vy#0EGh)R8V&j7Rl`0VMB%fZlj3pQ;nr2kBMC@<$#EFuh z_=n8B^^aqI*v*d~F^{EqbF-RMmK%DLV=ZlC?f(Afyz*TRw*AL|z>K700(p&-?3K$|cwx|cM^#~&yBbSz;jK2I0-@b$ONGeqLIkPyn|8P;lhx;v7>_`$)&QI|9yHNh4C%FuFcOpGVV3 zMrw6pWs3#or%U@`CZ;)`{ zM&4KuoH=JDkV>vcX*0(E5sQ2XArQ2Vd2{w|AJfes7F%3E_g%mKhtyB?_OK%=2J?bc zw$a~FsAB6E1Zd75C%33~xV(A5#&LpzDT?%CLXdjzyj#gB204l~;n*HYaDfmdWv##1 zq;d%_dTP^|vF^X9ZKkauiq8v3AK;W5wNy3w_$6A7>|mN?w}_|v7mE4bOBi9T z)4qLQFD4G|le>P`u*>8~w2tybNsH_dZWsBxUFUet^ETfEgXp4?bbxk_ahl!lOs!}I z`PHNA^tEpy34KV@KprE6E91#m#Z+S}(l(mDn5X5)SJn_KjoKa&O(MzG5w&kXqM3pV zID{tFIPePA_@O$6&?Tl#z{r%5hJ4}u$r+?@)F`+e4R3|3Cz`^-9ZfhIEqHc>_{1>3 zQo1|IEg>O|`bWg$FPN>~L>dB~AmDnRueR%PftbFxcLHu=yi5iX?T1DSLzqvBH(#lf zqV1VgoiNlmbY%Ow?%7%P*VBKK-}A%dMKc_BDC%fCy#zL92Q&<>-bk$-wB9f~MYid# z;^4=ZVa=xX$P%Xqt_a@v`-b3&3Ci+(cfMGitRl2=={=m4<MH6Q=Zkd@H=HYH(|?mcUbmYCzyvc5jbnSy6l+OY5? zZpTNsz?zkGf_1hp7g)TcU7SR8wDoUO>_vX~AEBRfopWx!ImdgfYtFdoAtb!~fik|9 zqoJ}So?#a3On|(_ln}eO#nf0gRz+vnp%Ns3Ib=mv5}%P<77gDRVw$XT=u&Ij?B6`F zvG;g1hGnfNI)~-yH*7hY7etpFJb)6_a>VDInxko9LF?q=GM>tL-&5FUJzcU=8Swm< zgiE=i>g65?z1F2s-*_t_v0~uQYeQ-O9{{vKOTRm5-n@D4SBw$f^log+pBX&dFoy>A z>aX7gxiwu|^HKsf!{H;=hJCv!8?O8eH_oMj`Gxsw`2z!W_{adek=lCd->)9!2Sr5x zn>TNEgRj26K8j28G_`DXNy80IG|<*X4f|UttM;t-b@o%^zFm~#6_>uQ0UFrdLQmQH zSj4Wky^S`8Sl9M+QXTK5UeU6Pa3;w1&I8n`+M$Mze5>b=#~cpRjwg&q9>L3yvfL1` z+`4ruH8nN4KrI`;bzS?Mf26K~`uFkv@X*zFwo;8c|Fyrf&&iGQhvv+uqkr^qN7;7b z^Jw=oUqQ&fXU`sLXlS6ew&0@-Qg;ycE?*aN&R=Tq$hISY9=F4&HNTw&AAe|U4OW3W z_Z^_^yLR)UNxVLJZftLaSR-zjjsB#5BJ_UQn|em5^T2_Eu>|=*3pcQ@i}{9o+JzBc z1;-(NnRKwFosQhGnRs_SbuT=K=CN_Tf7cG$IBz5M@d_FGA^M}Zp*d}iC*siWvUpO# z214%vp1@M-+Q9uK%QtEL+;>L!GxS@wY~hCsi)s z(9v^uJWeY)t|Qw(+m!wToz&gU6ASiH(-F_0Ij!61z;4zv*|~HCh(d?)im}_idnfJk z_y|p@tJbHbXe&(@ecCe+=)}Q!9W*4{{+O9Rm-Kh=q8IDoa@+vl*zMS{gXT1|X%*_5 zhW&dS-u)eY%qKs$$=oV)6y{^Zp${R>cSzsE^E>)YV`F0iU5<4%_a}CcH_Ka|2HSXU z3+E!l4{-k)PUv5t8Q`0%jzL~jo)d*m<>cIyeq$7?vf|e|pPjuh)5!=P{=Fs9Xmf76g zObZt-q(zGsMSK%IFp77`w89ikY~1Lta!Dq>gQZ;o-9d|25pE8SJo3mObrcMzl~x|ia&w)a z%y^qeTXQ2dUYR%k=hkHM?F;!J=WSTdB0BiErxG2m+Mbn)+mbME0e}8?KOMhr4Zi@h zI3^hG$fN#(PXx=0K|F!e3S|IeUpH3B#32pGG45v>7A%oiS~91emK=Mm+tY~lgfd;W zjQ2ajN!_7<*Blna>3{Iw!Uo~j-xtJwkze8jM&hu_zazL2=v%s)8s|3Cu_!CeNqI=e z3#7|drBMcT+sinGF4r?mE7q(nyxGJ4X?}X(93DeA=krKj_o-#zIFI`>js^>F80H;y z0-vw@FfD7I>${sL@P zGe0)SvBCmeYULvuKK&TNa+5s<7E?0oWa-m&>(JRzQ*(^_9M&IG0-4C`Y`(P9PVf6b88M)xr=+dy1J-sxQ7l|&Etu0 z=3L){4uuu;syc`Z4xNoy7b>aG>XrVt3;YN5jFKC4Ntc~ZY;cbu85l03( zimvB6gzv1>9(pv_~4)sKR zaTCrufbf0}_Li67aHEe!mc!?={)Ic3Z=i)AbUpABhZ~%o%r~5)$kRVU7F{{Lf#EN}8wToD9 zhV3lL?#gptbW|fEaH5RAH$Rpk9+++Vly8^KO&S6ixO&eu9`j6;3+%{zY5v}5F9&yg+?bWCL@eKi;7_i~0sf!d^C=a;#O3}KAun-uQaHUQR zi9rzyaAq$Ix%p9?b1>q?9bf~QSF#G6$chBqtRy+`)b$%=!&5trRZL?~G%-JQYrav5&|U6wHF?{IXhIw7w#|bgBYLEn>MUi0-*bQl(I!piE*tbWTA;&l`JBMr2ApSOPs8z zAr2pNka&c}X}KlNzapQk3M0+WqxovT&j;axbXB(``U35pLLcLsLKE@lHkOweu>#-7 zt1|2|Y8z02oH72KU-ml2;-dlrkEBU^d&w4@!U#y)K}78rk;PzxkthZP;^c-JbR|FH z74}ozmvmW)UxF0@Qm&F8dt~7x9E9@!h}Ta6^C^}4bQ$nVc>msj7j9sECr@wiWCt$7 zlULkvW3Y%$Hy2hOeI=<%+)c(<%vT9)5X(hi>Oec*|mmAyamguyQ2#m0549i&W;c^DlT?mi|U_p|z8VF|rl$!S!QZ-!mN5k13d zHC?KM!Jw6jY_XUK`DGwtpkg3tWuwa%I>c#zK{#!ssdzuXE@!NMRg7aL^?{P%WBVHK zzA-4_sm~(%%N4;RGbt0aI6VMufyoz-c(oHHDUZ`iu3Y=VC}x5IfRh&;y?O%bx`+a!Njt$LA4+6)oaQ(fjFM_K2<4)P5czl6YVx$ME5?dAL72 zc3OzgN?u$g4*BuZaX~2lAJN;ILmZf?AIj){mNYKJ7=)4N#xsw1JT}L;7gsP&Bq6*? z8M-e?p8<&&JzlnA!Gcvz;U`-p_~l*qXdiWwm1TMQR*Z46G%3T;aHXVPa$1Q00U1Zf z<45KS^+L4U&VrkYXPh1UK>PT;E(htG-ykOaU(@O~@pEi{GKj%;ghV<049VdLcO=K~ zKseOL?q-Q^Cj9hdmigEyf2c=vf5}tj`AT~99*;DLCT71x%Y5V;z#=Eog(qlqzZM=I z>!^-$gDAXWbbe+(91=^}vAnY{V6(`iwmElBZCqFK7;VgQhXX?ly$dMwii-CpJJ;1A zDW$I>hfgUA2pwh)~28 zcr_a>PZ|r^>xzB-vi~u5qoElx6Q8HXJFN`KBeZFB7C!|MQ0*`6sHKz@SG{uG$$PN1 z)BQ=uNB*!3(U~w@^j0oyAVztLc*Hn@K_h+6mtEqEu&F%B$0`ith8`|yTL|Ly))2W_ zad@))+R47+N*0S^dMz|=HBb1Z4V|A?u*c%G?({L2IZxWTAia3Ok>M`+(~M@}D|48% zwZs<`#xijeeKay8?6G8-D5Q!AD5 z4ekjMNd|@-`y_YUe&Isr;=NH*EKT@m8ts?#F<(O^;)xdRyS)5+?uCvK{sLF)>)FUg zeK8@RzVZPH__4lc){TgbWEHI!_+S=JSo! z0YqUwlpFOY^w9&tfk`-=u>so_GMP<_PRPd~MNW*p zC}JEvG5|z@qzwW+7$r|6R?6Ut(%BQsQwF`xTan-64Y{{HLG<7qj?4bHe~L!1od609 zD=RorqlcBw0-j7zcRr5Kxne8V(T8NlvT_~D87V&w3&w5Xwh&BWSQ(P75P@XD&46wv z$tUS@1?F0z>QhuequWILCBMFUu*ULgy2u~TkP(F+rH39}Zaj;kR0+|3P&TgdA{D!g zzA**A$M|vdW#0<&f9W#5s{At1>2d?g;D6T^qN{((r>JZU!hC|s;)D1ltzxIB==|oN zItcjVLa(wyO6d!7LoJf){v>hwDt$-mOgv!{rwv4jD|Qtl68yA2h~)__E-4#n@C$v6 zw#c(f3oBATElLW@@#&-VDtV-QQ3eRh%G6U%rQzjkgMyGAz7Q8} zoq#fsmlTtZ780J(p6e3EwaAiGHAxeNgRsQO%Jq>?4RXro*oBYu6O;W4dg{3PDjPtsnJjw_$N99soVGQc%`DZj?B0FO{{NIyZFR8pSs zlbblfM&d;_2}`92gbMsFBKV8t(e#R@Qm-DcG5*X*6e4J3RcYxeRRSvyIYeJ3M({Wg zu0R!Axt6!!^$LpOocF4F`tnIqq`){rXvCO4`s9F>oRA@mWfGFfOY%v2bY-3~<60S^ zpwaoXKgzE>1?f@&MHBOS=>_>i;#j`T81MU-^Wb29eydGjEZ z@s(Dyh)+0e!f+8CgFNmD5tj`;Ud|Q*>5EQo?&EM$MiAovg!lrQh`4eOc;P4e&x^ez zC?|%X6kf3&#yRrhWRV8paC}m^?-0qnC&qh{AuC%#> zh}&A6q)XVp#@dw=FFBv+jvEpTbm^yI8%taJH&`+LOe=E9&6%Y8a>1y^jf$SH^t(oq z57#I!^Otc{fN?0Vl%FPN#EbbB@X&?MY`+=T6WqQ-a~HT0vruN}_QD6YhmCwe#+w;gDy*cElfI;{b#fY118^W2TUpOx? z3Zq2j0Ty~Ih~9eS$dpWio626)TZ+C^dJ^-rJR&Au&cs;3O%ZNz@ZkjXS^k`EzE3f+ zv?t0y{lQlzZp7h*sHC#gmpegL;P~^-=B6$JP+`ttY|Hr9x=h+!^qh=6xuKF{jnI~J)wqD(VZ;1n62t&H7(s`hXH70 z3l#Ep@K&Er{y324rx)`Q_`80X%0J8yrE3PtuiHlZ70GPyhP`JtRCQ|9QH<*h?!?6y z5T#PCGO9*ZWWZsd$Yh}vh?F682y4GfcSAj-)c!E4fJVoU!LRSwBbmbTWdg;Z7ULQU zS-Wu0Ze)OhhfK7^wuQ)wv0e$#d?UZYLU~=X$QvmrCP19rAmM9Z2dxV@`776=IRA+K z#O!XmGPwHx;1@eB#M}EQG9X_mkS{C975odXI!^mbrE6OAPXYqc$GEMd@h@=@NBcP_ z`cB6D(iMTs1vjBVbo4L%RK@}lr2oP1R=&9`)#akTjdb71*n`Z%6JiEKjx$6x$Xe`>R;&`ty*iTH~oA+V0+4 z_@h$0CHLsAf<1D_{2@SSkEl&}E{PfZK8T(OB0pr+H|Bc5!S{m`eR z#N_hj;IP;f;NRYoziAQ?)B3pdDfC?#A9|j3`9)r?mkx)a=pwujAy`C-v&SPS&7K-h za!rTARn#o@1B+sYQi0;aAlCy4vdRD}W_XJdsHko-FtrkqIL%1K=|Q7O;8$d^B%Me5 zV?>n(7$U_SsYesb=f%cS?Vt|xFSya7Q$&t-O=K&3@7rYHgw3t zm&dPuR8&HOC}LU35?*rS;a1DQfZHOR+_N$v|n_C^krFfmThAtw}pTiJIN2f9%nFI(5-dZ zF{2C_loR6xhN{9=fg zdK!!Q4&xqt8$!LTF+(~GMx^6KJm|nj?#_Qy$`i5dV3I+N%o30C@N;pDk?1A~2PujF z(aJT9)E%4hG`=2A(xp;jWnC};Lpim=38V6tu(RhQO8W#hr$oYTkTei9zS@rxVqszE6&<7T(ncGU zNy+7xa>J`A2GEGHRj_i+r-i_YB7%&vEhK5>8nnR4oi@T(Ov=UTrPD+_@PLyvSsie3 ztR!Vi56*E$S&PHfv^`s0wrJ(#p}z-V{HMu*GNf*x^Y=dLcGiB-`Yw_;>|+x4>7sB! zg`Z}ZDslOgikKf0ui;5=TM)dUOi?86CJK>pQ z`dCE)!zmaso%N}}=@u&xh;Fu&;UWDC{RU3PUct(>_k<^)R{vqAmHbE%108->Hq%IX zh|@+H!rIB$1HVu-XWU7Aj8+X8&0o`$4zw^DKt5rqSC%P#>Ak zq&+mP_RGA6wh;qeVMwL@?cvG*5%{y*a2g|oLG$|`6aCkl zh%WsmVv=3Ni<5mG$vI|ZY>49YsoOQz+&~Z9|1drGdC#Hlu5P;cN0%fDf6qPl(KT1! zK&Lg;=uAtzgJ=aJPzOo8l>MM;=HAJv5 zLW05%^5Yk+T(eOP? zmFNxc=QGY1ON_gA9C0!YzwP6(xS1B1?)nt%5xfhcrZN~5`UYJ3kyxlIDzSCBM$E{XwqL>0!z1ua<&8e)6JQ+8Ng_KMzcA zj6`fjpCJN%oRMMnTW`IM-u6##q{WLD(W=!e>3Pq4HerQkvR#fpV$xH? ziU%E2?!K|-TJH1*5?0nq39ScbDcUGqjaDdkIDfDrP7k0ozfXs-IGqP^Qmmwp=Bfv5 z#6M&#Sb#<*J~4=;EZN(KbjdGDTpMsgkE`&V!z(Co#lF%gJOfJaU*rri2Q7a7$~9K7 z;WqI8LO7fQi!CA8myC%EHweh5&oB)r`Nf(3_=_zvaZ0*wgrc%?hEUw7{t=Hfk7Nka zW0RQ95-T^Jhrtdh!+5#zkhVcv6d2b`D~2?3yS7JMIG&#IqJZZ${K8O@JL4rp@BB=V zg8uQ}*VC34otOSWjU*n^EN&*~QwKS^iQZ0!lVy_nmyBWTZ^g~3YzvcZL8XjqX^8yDm(}!!Prh z7;~Cd`$ZnfAI?P%8yJ3)4&z&|!b1Ng%~zUtF@FqWWkgDOUw${y`_EzhdvhRzE(`sw zm=hkL*|iz}fh%}wmcYm@qHw&H&naP{Wo*-oUzDqLAWGd5KDC!U@*)F_{`MBNvYw;H z+pebhcU%*k^^f|-WME$4U&Gs1BPTLN5rIe7U*qPH&Gg{3GOq8sYXhBn+9|O%Ipd7e z==R&yqjFB&zrTYHTE2|7Y~D)0`OTH|r~my6tzUl#tzNw0z@@P_b*aE>r#as|yBtrN>sfl8fIGrsPmok_%eMx^6cxah~oy5sqJ;GvK7iz z=T@#C;61}|$3G+ga$kG6S35`v{>5mJINxXx8C<%ORp?{aFkQuCi*KDLMv4z!-5W zfzoj~u9$HltLtv%I@B-dhp{}uORgwK`B82q&d-Z5elkvqZ3`({PAI?7)P*`$AD1a< ziPtc{Ug(jhbt2Opyjm>|Tg75HNkW-djYE=Oj8J(AS{s_0KB+jN#hzz<)8{X82p@Uh z2fVuZ2`b_x4RL-rb-q^ZhbhPZI!Fl<|-?jUBm4)4KZ4YAFGsEO@po>Hm5qn*0;?xp%&I|%F1!)%1Nr?h!ZM+f1P zzjNxE2(8z-a~lmTS{+H*!RgXpYPc-OC~jzOz8}@L^P@O+j2>mV4kGUY{yHP3mo{z# z7M5zV4K$Bmw5n;{NrU&_M*CMBLJK$CO6a{^N1jZLywZ)n)3tRI_0L<*zPzj7z0=i4 zVn%p%ZJ(-FmbY1C!)+~nb$yKcfk<}ABi9TY9KbqP-^caucZz;!&ffeDnGlB8?0)7g zkJ~w}qm%P7Mw)V|_y8c|O)GHhJb z@7=?Ec|PP-vHe_*v}vuLP;@26;75O4yg0rMWen^_m31cQ)6P!bUdOJV&#C8IC+7xO zuXb_!<-!{z^!029>$<@l3F5iiZ*b#ngulAl(bGqZIOhl(KYE;iSv!wc%rDTV!MN_` zzShF1;;oHs&pHhg(*xUQKNMDy7m8f?zbXFgwyJ6G?<0UEApqMD9Qr)N7^pYChf6SUhuUEBxT+&nDp zG+f8C6>phAxRdP%YTr*oJ%LUJX5!G#=dipb zQGy}Qb$w4hNZltqN7%dmzko-L=-76~1vZ24w9SD2H8LFJnCbe|$06#ge!^4?!;ps2 zTgnX&kMNW60s4o3;8hBCt5&U`SH9|{^uK@nGrjQaGYhhJqr)nPGWvK08Af)s-N3*g z^@c0gneZIr0e;j{pRIL@V}Q3C^r~{}!^DR~QB` zfMsu8Kob26<|S_octWcUWs-75mXU0olQ)BvEH>Wo$@3nT0dhm}p-pReYYvR-5uT8N zHH=NT;ih@1ZC=;c?~Kb@R;o}ZL#)Jf{}9>1r#N#k)nM)%ZeXZ>oeL4^r$$CYO zjhbH8Gw4@vQGZvMXF_9gsDbqZ)4{lgVWr0j@}iwYpNcMYW0!RajA_@uxL-PBi^F|< zb2-fW`$azuA2`4UNU|;-*rvOYN8C^FxB#wxDmc#8d|iR7eqGG(6%S2H#{J~=UXfc*`a^> z6ogKvaVEUOAvl9ah7HzUyeQ2EcsA@~nCEh=H!bTu4a$@U-ULwbnQu6)ToU zH1MqBytmx)w^$qk88#9QJM6GnJnr}x@Jo>E)~$=hS0jMVbpU&=+0}-RyXP%i!K($q z3iYxD%@8&y3Xhj^p|V@HDt-Sa8@BTJsduEF7A{;!ixy$JDB{}r{eHU2|u=fG_mYh zUF!>0von0Ywc~-O5NOk;7ZBZfJuTh(AT1gQUbxJjc`mJ8e>l-fmiyt`X;C&uiY1?01xEkNzYjvcfPi&xd=)Y@^2y$R6&EvtiY+SP3 z;Q>7W+{R-vD0TBI^%&3VOwp&@acFLRFouCwJ?qsqt9gvecCZfCU;DXj^`6}j*!o4w zX!W6i{>M#QY@9F+4AV%TLg&DsUe3JdaCxEelEo7coV(6jj#e(@KDUC?ML*#MY3SGl zcVM8K>P7}xJ)rNiZiJ4(7D4FXdOXKshZk{i6ePxh@La-S97|WiCF)ZcP<4S(<+8g| zI9S)P%Qnv|sJ}}{_>1XNKOHyW;Zvby^VNLj(;|LS(@{gA8sC=K2k>j#X}D zGxvct+=qCP!8!CdJUwgONK0*{&y zxR2k#LxT0x}G=eP1X)TvX;i!ur@SOL+h$9t2XmYc9aK%Shh7{870A~v{f zgsyi-a#*b!(qa1wPp$wnO`dgGwz)0D?QIeq?5S3q{80<@I0qqL{|yBzeu#oJ7+3+f zae2FD|F0E!3b(MetpRRgs#dt{XMVMXunzV)45c{m)4aaY5XE6Wt$o2f#|==8Oo4}t zCuf&Tg;WOPReJc5*BHln_tQ8z{zd#nI;Q1AaQ*`AMUd@}LjNoi)rIR*v78_G?U6DT zTIiN z;*IR|S2HmXWl$R#NUO$UWi(CPTta--CE_*l<>_(L;eN)vyDl7d9X-XhR}V1b{Fe8; zD}C^ne>H90wj~`ra46pdVoNsuKl`_TDi`Rz@BQX{{oZ}|-ktYfV}1Jix4rp_D6B2l zO=dV$(h!g&~@a~^cQqnNUa-J5wtAZSOKsQ!$K3&qMVO>Q#rkj z{~O-fTfMMyZG{2Bjm3!DOg!kXkwp#5K9+C}T452+;Ed>H(e^Md8Fr@aSE%*>E9}|F z%Rw5fJS!yF+t5z4{qAIl=Z_#*;G0%+ps@}XR*W4^eR!ZjX>j|I)`TokK%AK1Z~QKv zDKW08|7%5_!oz*aHLbM!`QCMw<}de^haQzBR0ms0xX z?@j4_QnU@=VK4%2yE8xXAM&rcEu}S;J*9rN&L7GWMV?fLjWt*cyT+(kNn8{bs*)amZWvy?T_J5Qb_uO zoTq(Th-2wcjV*C~%xl34UhZIy(0=yyly=~NcXgBuifgN0>=cd_)9rFM!pRZBGvYVi zPPg&6a7Uz@-J%(`1Y~8E2@B9QU|6}vVnckN)*f`nHFR7@^`7#+FC`Mb6mY(&GY=-z zd81r!E>xTr_}p^wp$`^+tTrgifb;6O^*-!8$*`h{qK!~xFrchfuAL9)bhSDD2IInF zO`aEML-2p&AD7r)`CLlh{S&4Xo~`&!BVc4F{$9#oUln2k$VXeb_5_JA;$`LfWTCiJ z<=TUn+qlHOrvI-6c?xT$cv}eXdtXZFHE+e$lbzh_3#=0_{rE2y|C_aB4bNB^%zLq= z#C3xB%1R31%}1)Fyr>&zn5SGjpoYhX`QH7CF#KYlG|rO8f|x*;Tl4{B4C&;)V&z)p zxGbRhw5(~bnziY>e6E8!Nf|;ZvvSSn&H;=Iz^1L`xo3NMUd+GK<@^un_&wH#=^|VC zQnJCu3%|#wmC2(AO8s)Bs&ANziYalNX0(*!pOx#{PR0Y*H<)BU)z5$a^_1TIePz;N zJj*bri9c?oas?^YI>v)c zbT?!bKSM`c4+x~;&fJC8&|0D+<^wmsD7a8KJ@7>#WAeUwi>r}{YvE>Vu<_#4`M2W5 z6?uao<`10ay%wG&Yy>uf4qae6wKa$X9+)T`f8_FobPu+#_G8du&5JzL`t zinhX8=z`beC%-u6BG1@{wH#eBVvq*!&U>ySP8W^FLaxAi*s58o%=pSnEf)taGg!Iy z)*8xd3lx)A*Cz^vKS&WjEswvK$32)7%GbsJ)hti51oE~JZKKNIFPr9<%5dI(g=J2M zeY28nzVPi2QI51*9x`$Nw0Va`o^xmM8re8IJt4?7WbC)7Js&!0qpP z;c4SU!P2({p6gkm@oR@0kEdxL+FyKcyQ`@9tPCOPKJ(x-7KjON4cq~P^b{qDn zTsN+(R=K{KTj-Ro)oVI61wl~Qa?&ur{N3TfiEiE_#IVTx5vT2LWSWGUEv0k>X6XDp zSOwaBijw`?!YQWHhf{^82b73+p>ETS@jN5P4~5g#g(zfIHcDt-LMm-}(OeA5Mv%t^>>M>W`#YF@&1aQ=BsoKAO%x@~)oMns6-F;Mf|6 zAHZPlvNPz{&xALA;5Z*`H}7pQZf?G%w4oVGglu_g(`@sA(=c9>jA+?%Jy8b}a0svE zYW7@vLgD*dnK*e6%$Dma>ae`zyNE?24$NfR_}8IwtJwWFjg7Vt_ene2JGQcViAAu88>9Z*bfhzx5#KSNlb0_^av zr{mDcEMVeH)C!34pb_1?WAXjK52WUn4*Vh*Kc`UxLXjfs1-UH<9f=Mc>kSK)1!R}y zn*8`i67XDZ3l!6hPYWZ2CHw7A=DqhX;^B6=?ccn&3h%+*&o0Zg%6jq*-<)O{T)ZU& zx_l`ML5Kp2rNcm7SV`3Be_MDP!34tKh>-MVo6Px6#H_7mp zdrG$bfiB+6U*1B+wvYzslj034*KGx&!NVb3^Bb=mG6vumFS}<2NO-meFJ!rPzBrr) zmp_$GPQKFoNfZvbTQzUK0_w11Bu&@)Zf3(wO<(M)%~vR~$!y5)-B8sDFn*+Z+CpU4)BZah;tpdZz$InqZ3p{t_9>5aoR;ksm4NsV#wY< zZ&!e(Cp;=^6D=y&`V%3$t^1(s{?5siW|)?ajYmBrzt7we!4x9zu>)xz{B zVdNhdn5QlC6B?u%p#92?JHuXTwQ^0JP*2daV$Ni!NlQdLaOke`1}LZNE%wa@;fCW% ztpRaE&!TMW9~L*14Zu8X<$8VKC5L(C(AL2cv;QC5xI*Soc+LtGiWzwR0^|aHpfQAa zvIjll!n@p#@zOIXJ^w^W#re!Q3cqL&eIQz{UFL-j>6KVZ`=>!%pK(0vj9v5NnS0Gw zQ#Vh2w#0h=czR1A2PmBmw5`xu)^w%k_O`)eLfenb z8*AT3 z^&5{XuLk)1`H-(|qaRxda{I$adApR_n@KE|zX&l~x zWm!P`7?6*hf3$9jEJh$#Ou(4gfh|C6o7iU&DV- zPL#L4|GZZmAK#>rE!R?pfqA@B3-4`-w+72KMxTHAGb#OpA1?>q$yZg3v*p^WoW8jR z`qlbet{z2527t#XFJ;eLn?iUPGq^1z%SM+wZVU0sp2;p%xgN;>CUk8+Roge$tw?7@Q_2Pq1!HW-=q{N3|IT{k zlf~kg0(Is1_KVM^j6&+8StH?ROG}@^E1qS~yyE9ClTybXSWpmbt@nXNR))Bm(MmOK zxkgqgz&NDk`@t7l!6>nA{9j=;&}<7qj#|<6ayxD7!;IYC5W#$B`P(JOj1O*?vY0K` z?q?|1lAS0$U;cE7=?R?a@T}=-TF`F+Jg95tEW5ySzjZ!CT$~pSrek@=yXD1Yvs|l8 zW9py4LiI_w^J7`yd=d8CNoWpfWTo31@e$>A$zuGw59$v3NUn>uE!Me<2q;M zkDtNyV+*nU{vn^ypZ-S%`%=7_i<@BUTlaE_eCIF??TaNsWAg$KrITTCa0^t4rpw zV}x|FP>2H!0y>66Z{B-{pVPgN=RCS9OH{6*kt1!IRx{`-%}cD3wVkNocYRD zS@Ij}FS^P*`T+~YL9sAo{&0MI>go6{BSH3SyFJ`qCnVi;lUr? zvL$h1V}Dr)qrKfvJeljqs&B%zoQb!FFo8GE(f3%Yr_X-z%NCPbD7)y;0Ryzctat|2^z$)}6W>p!fWj z`B(klT%P{7a8y}`Z25}HHTgjp1Hb#P%aeb9$;<<#6;N9pzzOx26%+ghfpJx3V$_PI z^7vMns0a{hWdOGnjK-qKdjqvTLK*v3T%0ED$`U36L+me=&YTAe(sa7{5NEX_TBx_T z0dVzXqg>1C4>VRE4RSZ&>Z<|Y;t`c=D;w~WEs0jHx80u-zdPMwo2np}D5NFD89=!P-z#)aeXjWP zpKlWpuHz!ln1oE!F~etud=QhBI+u(1xcOOLV&I896nG$S3n7kI@4T{#pT`hy$+65N zEP`o!qGUN7ajpY*)fldWl3qbY#Bx0s_t~MenDNhtr6V&bd5`XPSz=gpxAEe;@!auH zHlF-!55s${m^J^>UzO7R^Zj`szOBQ=qT|b}ALL;{MgZ0vr`wNsVQ-NpF>_w3g~Zkc z{82`&T=H(9%pMH!Ls^bE%Xr-;4?4--d9=P%73&@GwE=>#C#gxd*;^PYM=7- z076*@Z40r2*dMdJxgK183uZ!N8?@`Pi&pll0=gaFRujjIWtZPbOxHNB58A=dCx9!0 zCoY~S7?-Ew&iNo1);GLApW*$05u$RPXSFfELL?3EUn5@boGqM4Z>etQ-T0K*7Lv_(+P8k4QJs7q z$j_4*!fw5z44Cuqf=7E6zRTO8b$Tx2!m23*h?Rsa!YdiJ;3F`PTGJ%M%5sQJ*uL z?kuk7bg0P#9xD~&S~89h>j{5^zWuCRTfw=Ucm2jZ+;IOf)_}|v9^{duJ_P7Cg`d1F zBp!@yawI-%Xh5)Z}ya$ByUyW|6nRW`=DR{vbJe>+slHFCw(`;W5tEp=-M6Kvl* z-oxy6S=Jy2P*MT=Q9q56+Ok}G(&q3Iv;SXPPDUIadClXECrdd^{n>Jj+@cP;Djo-?17YfTCT`6Dtos29$qkgiv;Z8*U)?unRh zb0Q7^K9JL_b?a?7CFHvKucg0k%6ml=G+)KXQsOs!yDX*>(lBBpjDm1CqmpZP{&oo> zj>4K*c^hxuyL*??20rvF{T38oQ+84_*9ybFkC}JUjaGIKJVrK7pQ5P_>|`U}g}Pm4 z1Pnmu^An=?9!3I`fs(hM-4tQC) zzPVsDmg_1nyJ&#fk* z+z+XL{2B4T-d*3uxlW!{JGo9udxaD;A83quQC~?bs$h%zm}3Ws?ikmKlKlqa8KFLn zM@RkAC-u>Se_NJ%_^K0ta!sf3DSLSh1-Akvq=S_mCCW520sZkFg===p?^3~?)yg&R zjdFcwk2UJK;Gg@!hcS$`g1WwCJ;8qQ;K-Zzha}q9wC`>83CnWbgWQ=|*gng~d zR|R3VEikk2tA|cl9%K+=Y1>PMRy?g}SW$((+o_g`W%Yqz++07dBSnby6_=S~dyD9B zmvY@C2E9tt_1c8VVI{y;!5F6WXKuT@_;4td!GS|VDAO)8p|&=);x-eL#&TT;rnc25 z#=GIZB5PooSJdEt_|lUrZwrBkZDDi&=qrihSQ(5)g2?0GTe-+QRrwZw@c3D#aKi%q z^WEP|XiATLtUTj??N>vVTipIYA4jo^eZl=F_EGoANN=fb;(8)UoV_oVm|p2xDxJmf zT%Rp@FfW?RLjC>bKPh{`Kla;c#Su+1kWcqNJTFT2uzaMhe6xioP~9;l#GzAmv&-Hl(`kS*|zKGy`w+jR$HAPXM!_M0i$OiD~(U<+@R>tDb=M-j-{x zbhur+WEkaW+Jx~bFjPHI+c(Bw+*w+^CMt7^pEUoj9QxsbFe)G%b@r z-lERV^5*l;X^)>e!-zWGXuY+AJ=*6^wYFstS&Ta#_Gj%mc{YuopG}luAsA<0pW@CQ zJe2PES3i>^X42%F|3sQvxgkl+r?XmW+Wp+~>6Se&C5ikDM|V7*cBXZ@)odj3@s5p0 z4mDvX4joJfUw9$SdC5*OP^?z(+>xaEWth=m_tJ|m_LiQizaP7H?aIz^tUwFEnDWjc zzd2bKsIQsH<7xAW<4I~&+B>v7ZLV=ot5o}5crndsJH_PWq~-!s>6|`%Gb(HRnp3Bf z>`EE-jjT#%cf6ElRvoQA^B+U1t1;E{oZ9HzSq>QvrMnSWd$*_UPkbTm*|R6DTURW} z6T5dNi`8?-j_Rt`7t*XO$kfNgzI_Rm`1sIR(n4SX(SA|s&#usfro!lnBWa`x`oo70 zr|C4Fj?O*r;54K`cZ8Zgt%`N_ z=%l_PtoY*haosL5@!8)?BND26-}k7kcu@BcuW zK7Lw!+2gPCr>7f>$Hbmq*@BAp|qF02cRuZtIfCSbM!0tXpmq=^tKCc!`Z3io_l^|EbZF9y~!VV@b1b}XLG%r z*VSh)5ohnt9Ou}%xpb(ewR`t&eaLfp=6B*C4IuzTs)>2$ufFL4UYr8Vj&x9H{U(Icgw$PJ%N z?%OZkJ(&)!crLBowKL1IGmcv<#xnJcvlg^(ym+s<_G|Cw_9|@*z*_w9k+TBAAuCar=^EI3pUDs<^M5Z3m_%wN2 zznlnB8t0_OnU%BW)3H(2f_MiRH*@ra+UICm^};iG{5z+QjP0b}C8zT0f?_kaZQ-dl zdZATUv=uU$@nu9jv{87Iv%~3#@Q`tvX6BMMbEN${UPvcbA(16>HI7Y)M;TuaPfe#| z;@yqYbIrJLeAAt2t+r#HIdmXRKKD$zO?YS3_Ad%=jdbgkQ!_<|?cSbc*zNjeg6!I9 z@9|UVw9*~lSH@Fx3HasKbJGbad~nxGX=+w0Jk+%SKHjY7dGX-Rojau#%;s|6l$Ir_ z(dp20UrEQ--k$C_epI87c8E?MldK%gX8b$#**{DN*KbWT z;{EGQo?iFLH7v1d6Y69S5F99Y8#nftrWbl?u4Bh^ide8_VqIEW6`C2PGdwI|iRDFt z=H@jk)10hzLnoINe8tGO)#%g>y)Sd7-?#5K2VHMukoDvWBGkb0SuwTt>#J}QM=Fg~H-0LzI48L(laaDw${c$w~SUdEJ10(D3O^oETM zef$vQ!!xJT+@@`5*<0U}#$`3({?nn=MTwwZ@8_LvXnd&F=v(g(qwkNH6@>7sW z8@};7)8XY4eYA5x{To&_jY&~3OlSfzw{Aj)xsq@EXVjYPVt_DaDR@>^lHoveMsdb#JucJX!**XBIe#)ym1l3ew^6Q#RQ}9A zYD>egtSq_yy*)+&d@_CHEW@O_l?GPgtl;zKf^qSS(_$h_+|l^)o{V+re>2hAf9d3| zC<CSm)mmW{0SKgB9HE zkBpmMvnKQI%A3|pW)xYrO#5TkOO6d!(MCTTIxSg*MY}58L$}-5HZn`Gg^vnLLxvHe9>jOg&&9G36>J2KAD7U4=^8|LuWA`2qf1mgNjG@ zf0sNR9y*`KRWF8B$7Gch3`r)t+zwmpX?fa(H*)TTOGRnw=$hsBRT$skLD~a z-*iXGzgE?jRcrFvWus|%%_DU*URPaKstlv@A79(s>Nom|*QF$*#@MtBeIBp$1w~}i zn$cm&|GRTK%Z~2P>5L&u1#33lmR5>Z$&);!T+eF!fNyiS^0597|6Ll@b$_#)?@Uu~ z|E}ymBx^JV#lmmr(^wrl*33+49NCbTseXaAx7?OS`FbVpq1m)TvYc^vRyzuVOke^6Hh< zet^Yu`i!&~30Db~wc`?^_zg+n#vfo0Shb86kNS^NdAz5%tX7oOYu7HG_hX9l87Ww@ z>HwpE=@-&El)K^5VTUy!tXr!&wU@1gGp?-#C`q|q)VZY<5jDdyDUs`xZqBDY^SIGd z!svi&QL^G*VdLrv?X{wTwQxZ=CIu2f%-*jvVqp|Ywgm`QZ^ZsofI)YiCNWrYu_&+_ zQQA~>_S(0kbkD0)=i^23lY-b>ZD#`Q=P}g>=~7PdP5QC_J*8j#u{5mf=wAPO|2&O9 z`bL#ayrdul6N5h;I7xd~VV`sHZ{@(J~K`kcm(HT18sDlgjk zcDaTm?*Pfqp&$QyY5#|RJ>9C0$Beb{3zqt?`n1MM2^M{P<+kiQE<@1xAbiKu^R&j` z)KO z%?gC_a8g3K!jP7U7rg75yytqX5%z4?oJzDgFzM6zD98qvo$HzeBeGJ@ho(Y$<&YxY z2a$JB!rM^GfA;o!dm-MNP2C!D+*aGV9c|&}z|I*BqCoo^tKxg({|c@FTdvD>dTMKb zaL9%M@3#&sF!PGz{nC+NEoVTck2Nv|ehfSXFY;;{;ByzSEX>UneRW?eK^ZE)d#gvj zB_e)3lKAH195?itm@m`oc#djY*^uRp!-*UB4EsYMQEnRdQl8XDGoICPf*YsW;Ie1} zk^x0m*P`xz(|4rw$lKf4y?6IvD z{#M*kx3%R9IND~CA&!5!ErQ&Ej~@KGlz!&_O6e#6S#PqYtwH{t!mN2rhJg&;cg>+j z)9&1JEo@(!V7x-1slT{#n$HY0d#)uP$*cPYGS&6NK6mtjfA#Za5&@p?CpUdR6*lr) zD%W)aH+GKB84#=SQyF& zMnZZ%oJZ_-hs_j)8xL^c=i4uG_!9q|PlT8SV;<0u-~#+Sh%g|1&yT0{&hJU-Pya|y zc`KOZzo~oW&-K;#MZ}idg4TH;ctV50cm8lev{1Ezi7$CKK2_&16hUqXdE>ifdC@@H zPX8vg$)>I4p4URX|KIq~Hht$0%(_jcv4)?j6Di7jT9cIuDA&0T7~K3}7zgI3@Z%v{ zzTk6PZNa@4Bsx*Y;12kRAb)k^X z-=xWfAqs!{F}bL0Vy`AmAB5si<7ruH01q1F?Z*FAPy@DHH+NN=Uo2}Z!+d)~SDCQ5 z0a0FAHsH^MGRhe9En(R$Ci6rVKbFXu?An?wMYj)XAHXV^0lwQ1c$xd1tz2USK_RAZ z@Ut@4N3ivxJ~_;Udfq+LO4t+07{wFQ=yyqO)rZG3@_?oE(;t~n(RJ*@@(WFXVr%O5}zg$i0&JojRj~ZYh z?hcP%bo1T-Tpz$4J$TJ?nwHavf;Dz3pY*MQYzPaGlY@q~S~QRc2?thRaI7F1(bjUm zFXd@L3a;~xpM{YXScEwO@D;C3>CNAwZ4w)LIDP3LYy{AX^osA`f47;f!p7WKmL-ct zSTsFgI^05!v^-JB182<#c(IK69bqWp>H*fY{4_o^J=i*b;Mfm5wiLk^EKh=evRX@o zHZ*yl_&Hyu?`O<2ekj){?w|cHC5;b#Fs1+U?|Sv=G5K}e$e$+WwkLlE9dgPL>nsl#p`Jl-J$Vp9xwbX0?n^9$Zv0;jHL%Pw z&k79p@B;34I*66Q%1(Lf|3`MvS1c2AXy7gSy!#0JX#UH1;xFFgBorl)P1#mh+6qn0 z^RVS3${Y&ypa1JA{lMQSR$p&rq8zrqlipHp#o$UmWq-&Mi=0pWksf`9$%Oj>VV(pS z?^2)ChsQG}@c-eLd+pIjyVkK4GAoOHI2|v}J95MmVz+Y#dY_kM&2Ut55A7=Q1|*M9 zmIJY!nJoFc0an6^gCPKbe^j^2>c0EtDn1n3L6bL6baHya%OHB@Xjxew zQ9k$4+OF`i=~Qz0Rex@e_-S=e>I!7P0C{s+icd2G_v@0L?%*K1yyVk$XhE3=Bl#}|MuApCZ?Q-U zcRsARI*J7`77OYi)(becP7vt&U>Vl=2Pe9DUkr?&%OFJ*K=66Fu=te!DA>WbWrZk{ z8u-YU>mkOoJ8lbMkLGhvsEl=~mpb|Q@1*qa{*Quf>oHf02?pMC{pA0b(jWXkDSh&H zOFR!IykTMC-Sy}#O9iw3H;q`7k%zxArT70-35)z}>raeJJptY`xp#YSQ02ba8*o+C zz=$Wi-i82A((dpy1G$x*#jK*Xw7QSRK0%+iKa)52KWfTuD~PT=lri&S=yN%^($3ap zJWy6RILrOnTkb0~J*i|{K-`kYHl~<2r{^bDcK03pB5vIF5bMKfna1UKx<3EUUnuR+ zwOzn@^IgiU1TF&sW2T4HtF)b`Eo1uv}w!@v3PK?D%tgbp%~YUaGwt z@N(3^yt}UR>~^eVr4fwkWD(pU`~2dI++8>=lm^GkJW;UHQT%CWE7$BDt9!ea9X(K} zk+nwAmOFomu_M1$eBA*pT-teB=sgNJ52RR3fAYJuE#VWg#z?>d>}!j-(Ood=zonR7 zU0=>$A71NaNP{I{VOria@Kwv071ciLmymqGgS6bXg9d2RT*}ks%ZC&d0bv;TT|)yL z3uFX2d5C+iCl8bl54j%N>Ea(P5tJQ^Gw=@8|GU1oxcN#V;Idccy4!LsJTK)E2aCGf zm3J&Gq-#aJYd@&VF0}OMyHk4a_m?ki65r*`ZWJLL0UURgmSTYq|LflNCr^}0$c_K2 zpayz-u4PdJBG02tTDe}xa-HR`_zti&Ira(o)4ZMKWne1O&7+;Wpo+@I`BN{suvbUud=db&Y2qcp{9aC;Fr}aZ3-)Y4vwwZ{YU6#tFRVF0`nq=(Otx0!_O9h)AGu(}mYIT8{=us!&$9>1#!Ven zL~z`$aq?$6Rrcq@m~KOB(XlS`b&(a>ae2~~55FyOc&ZOvSACv0UasUC7@ey!!MEDb z{#F#&0_80k$gx3?<*z2^fcr z=D=^;ng@T94#3Si(JGr2IQ5S%AkS41Z@#wYe1mU`E$LY%<8LJn_vHZUGfv8(Nqw|s zXBLN<5kLK%ToHEf2qXU){WdGz6RE9BI}cS)&!(|gLHdgAPVsi$1K{_wAW)qVDTKjznK{;3{(174OI znAefS{d`COumaZZ*zI?4?dQ&5C5f;>;5b%%qtyaB?O8j5q6!Ot)F#VBA82d_0V$V# zAPHhiwLkCOrEK-Z>3H#;HiC5JUmvgpWeaC}(6h4D7eOyg-6P@n0Z5-kzyjUE)aV|_ zbJ;}C^=biYK~7R`+gxWM&8!5P;1Zv1b1%a|1IsmgtX)Q)7OJ@|lnF0USAjovY=7Y+ z4m5hT!dW2CwtU_DYWtQ0Tesa=1S@Ie{rF*mg`m!pMXk|zKxtx&XdLh-rCf8+#gHc{ z(5i~M2N;LvXQ(^!2*ffMpZXtg?N8lt0PU~-V_nJkkbU*+zzv}{oL62Aj9BLRHiXRc zRZ&I>velOrxPhQ{_bnr~bX!gmhdyt9L|(SpB)&Ty`LL2PK)LSS7Sgs{E3Oqe4%|hh zaiMJ?l$E?nS)Orx%OlERS;VrwZ<@gWax2$VkSF7eC&)7QD}R<%Za3!-I&Ho(ZR2Qs7^)wAOA}!tM?5Rnd4*RIiQ}=* z6ELS?x;&k~>NjJ{G4F9ax=*rM-z;c&xZ?L7%0qi4p`~2U#@N76d-{ogqT8^(FQs>WcS=9>GwJE~|6H1T=uN#WUE{7B%Js|E zz+%oeSV^eaR89Ce(pxS+n;~G;EpB2M{t? za62w}^MK?f$T;IS-eOOe5jNPlxUY<1ox~3iD^&|J*tju~aCxkl4 zvw6{^KJy@~C*Xm?LX@^F95+ z3O`})uejUb{wGCg*2?yXv1Ao^S?#ls`1!WBdn``B~hq+n#tckdey)EQI z4>UTjw*JORh{NKu%F1C|jqjI&2(YDrF#EgRoIh|VQ*0CbfFOUH>uPOSDHuKF+R8g} z)e62RA;$GH^RenMKy#oGKg)Y~6A}3J-Lf+&Hvv}u3x4m=8Jt9 z{LI2g3JGA#V(i#CK+HQ2s$$_KqIq%VMd&G%7P`)ZQQOM(x|_?%VTJQq?hICF>9AJL zEx1jGyja-ZRQLB;(KOCdo*uBkwUyTKxDVQs1S`1?vw-lsp9mU&{JFjc;TFvDUnek) zFvYiSnV9x+8N%J$D6Cw2&vh24Rq?(YgIPPz&R@(kWwjLrrGfG?c_Z$f4`$3U?&wDG zX5Vx(v8zO2FI%qpV2k0QM~ju5G`-Tn(ykTatXvm<9%?HV)Ps2sdMMT?*aik51hw0B z!Mf^bL(2BUKa8QF)At>}9uHD`P14+9WUsW-ra7%MkIkQ3e6 z>adiyDkM7pZMkruCs=;ZPnDHrJ3wEO%*r$QXAr9baoKSxw%jodJpS?WZ2px;Tdo=G zIgG-7ZYE&V2YIk^&DIWI*MuNns^uExx_#3EmOAr}`$dx{Iow`Uq1!NqM|WuM#@lsA zTq`8{BHRtQ`f8xJEu?O%gEy?09@CgEbk1eK_MPCccHDAGH z>i}i-L17zboL`<+h~r0eVtAL^^-CDC*Ou$JH@z!ft)J#VBdZxs_c#7gG2S4%n3!Zx z-VL;mm1OT-CoI(Y@NM|T^u-rSLu>to6vz;%mNAg<)gY2NePRvf4wA6!K^v(MT}DD^?QzPgdCm;eMY z{-)&trG)Aqn&J%uR%;( zSGzoIZVU0UH|=O8VDI+Qe!kOuq1LpFva)B{Kpd3WM$U-8+hCrvtTM~xeo&1jQMpzh zX!iYj&#h@doAVRyKPbeuzEduEw%MN7%7|r?`P-9z_czO5_FOYTX2QYK6N?tPOM1;) zL2CcB@*6Gi5T}k^-xJjsv6MtzK5(g%cH0f2QCAk41C7Y8Tkj~?pZQ36a{G`W$^_cS zd=+I{vs$4xWd#tp|8?c*|GbJIzJxz85M{%Y+7*b(b>sokwoU+^!kR;>yt@u)b5FMH zM>y7F-o0WICzlw02=jIw@Ee(mvobLG-Lx%-=06+KjO%ngK`~~dTvs~gONUig9Neo7((5jm$!uw-@-a>3u(4L)TAuP5fBy< z_Uq+laTnAbaLfn(u{<=U1bRwt@J?7NGa$yOVtt0Igg?T&j>dgU7m z2Ff+O(kR#4i+>)_m2NG6x%)K`rQXedct_>h_q4hmJ#h=sv$LseA?+h$D}g2Y)%3H7qaIziR-_^0`aM}E{3wG38)Kh%qjWtY3h`O!Y+E7v2Ku5%8AORi1vby~~Bh7xgQhyt+gQg&N; zoAy_j>kT~07!_~v-JHlY;5tBlWrbZK(GbC;i96FH4M)achWw8E?qk}Gd{jO+ZFQ_( z8JrKxPFt>xc~fo6hK~66Is<7BPxX*@Aul{bHgH z_|S(woZk7aw|9hGVO}t+g>aW}=YDAmqnP!^PXydK8o!7b-RdwFwiXsK*mUg2yOb`L zq=withV{Ym`z+UWChmAvq#2Anaj>6pmh!al0++Hy!G>S`dQ~I%@LLcp8AQu<)h=^a z{6?5nL4g&$&d8f>Ayx`=y;O^pE&8@xTgY~$aUp&&&(2#{e3mE)pA-+Vw-8{dk9I*A zTQKAAec7f>{rF3qOc+qCZL!WcDA%F|?0u;`!OQ$C`hs}}8u5(EHR=0$KenB4?GwxS zKw~K>uf6};hUCv}XywGs+e4o3^6f0S{WLfMllVu@X_4Fe}NWAM|bQ5YINZ=vetN4M^g)67LBDWxyi1 z`vraUq*_SZC8_c#_jkf)+N2W6Vg9bC*Bs)XSr4w;O_)XT!3;- zJGlOtw7Bh-s9ZOeYZOo{*I3ry|C3TSr5F#IWK&;Wb&Lic*ZDJF%*8pi?ZGt9L_u}1 zx=H=>!-!!TZvPxR{E?o=O}um3u2;Rs?=DPNzN20SL2PgCWM@ox^_$C&I5FJg31tiW zvYUQoS?jWU(gz*t&+Rd&F2TFTLA87Jz+;bnF}?B8YBj6be(PHwO`rM9=Q<1VNB`}U z>5XrEBrW61S64i!(^$E-g(E7F90Z9#N8wNUcAzpmwh#QuYxDozGuyUQ5`OAVg) zhysT|D~M9cW<0Qa&_jXt;OKg>rNRQbjxF=jS*nNI8~o^w@hKPi_Ojh`UoN2x)WnI3 zHTl94Vg3WtWy}gQw5kH?C-iNlu@aEmLH@P;^OS4q+;n4JQS^N}-Ss%AO>288m5w&d zzCFraZR`3XFSD8?0qqa)6yIz1|5}l!&7N!bLE5`5*DSjRwI*>aqikKZY#@$}2?Hm_ zmFFDCJeTR#Np|d4!fOuIdCP?3kRJSLxoVnNIBne>WVyDo|4WxLR%sjDvf&|$0E&S!h)MyPkmIKDQVl}X~=c6T`d zefvEnw|?(-@hUwLxDUFs7^dCIPZk@@qpr(HwbIY@CM(C zOH)3Ton!|o*YK?AZrTck0&HLLsedaE9?Xp6r^`w)7V+uQTQPnvkbz z``(`GYA=k6soOrDA+cZIQw(d~v$AKt0?+(9XtHNoR#7 z#K!V|q5tIF6Qe%MxCk*RLO-xENbY7!2v%!=@d;yt4GmFIEEx4)>7cxl9(C=5-n2Pm z0oy_(7f)@uzb{sPA&$og#!ljUTo|ZaOFc91K!nLwY$wZn%ICcEL773IEMzdBAf!!$ zfJnQMkIHA1(TzN=#G>vM#{|bODPEWVSA$8nX&4mg^YP1WO-R3kLyrGZ+d+QtFZ}ub zfQ~nCKlQ0k^@SWcawMHNaYDRiZtM%bq?i5s%d-4aPd$}Zt*Xm~qBtsx!kG5M-YX?$ zRGsfjpZ-KzyZy;@XHD(o&KJ`Qk3XKKZS}1F%XCfCvQtOXc?lP;bvm+tUz$5JmsU+k zc^UwG6>Q@R&n48YV^inT^Pm5GALjV+Lt5FHNORx*mo&?tP3N9?A`NRcx_NpoX{T1& z`Qjpmoc0&m=9lrK4xhrg}K#B8|2hL>JUWdRWyRY1%2;@$&kQt7WexHDtUN)Vpi zD~rG2=+UFv-Eb})Ja|xLIiHqmaGH}4HQm#PwN$qVVDVUScu$&>5IM7NGUa+wdCtoU zG<@<*l4_XF@7nc=j0#oDxUZh6`| zyevsAPR~8|>2!Sm(M(|N&ZpD8a!pBCe&vggr{$An^1vOsXZMx5UA_NMdPPYt{~tel zEgnjp^=Tr>x+FL`vOJwqJu-bhCyNtYaN^u_ zdN%MBc)*wj>2p%R56>-A+fM5JXqwh!=G^MtxgWV+7%;l(c9{;vF4N5}=U48@pMR&0 z&PjQh4qqvI{`u#1mUBu5ZCU;eqY_^EWTgO7}`T-)xDi>O6&G9G+ExXUN^rz89Jq?IdIq?^v3$g=p1 zHobnOgU*OPs5QqkhY)*j4#`&C&^78!T={EW^Sole^n=fsZZ=Jg9?^@_v0wJ&!#ojG(eWqB$C z(}TbN%W1i;+kAT6*7TZs5byKPzo@*a57fiq$guNPfJJ{Xi1M@xmvT^^sXZ!(o+}O? z%1M2acpuHSC#Zp6Mdn={GDTclpu%S=OScH{*-iVdbG+ zInLDDLy0d0j{s*nab|RRZcFNnx-*$gtNBu4z_zx*YQnJ3dLH4#9@J-K67nmSf2&(IpKR%qne zh$eNfgn;0*J|^_luYNV{J9=C^xl&Q?o5Z(Mjrd-#P9yL#@*r}#w6fz zjV&`RYxT7*Piqx5kE=?#-m>+M^ui0<)6F;cX1K?W9Z##oXc0dCN1sY(&Yn%b^;;jx z-d}#=$+Z8#!Stp_zb?J0I|3FKB5=O7N=T2!GyB(TVpOfFKUtKQcZ+zvz%E(O%98i$p*t2Ry zT6xcdy2Bl@JpgiA&*s2&x863toGi(69bg?O@}UQH`18+wIwc8_Eo&qSN7v@~>o?t& z*59W({Uco{vV2)TtzBQtI1{(sktSaI+F}`1+7GioKF-!_HkU77u9b$n6VE=_^?d*R z);k|B!R*i7t*{DrJ@Be@*K5>fJL;r`15TUo%%LdNXT=j+wrCo_ET4UgC{0)`H5jF> z%F|-NApVldO}%g(9WrM*w&6xMIr zoYuozj*qYR4|e2Xrvw^xa`Vk)O3|PkIiOLt2*PZ+3D+t$IFlOi^UVTM!k_#^}6^lX~cO)w8UAQAqgct(TKy|;*$%=<${c^_Z1F1_yc60v(;EHtq8*c!fD zO8dE9i*ROb$CM$P6DA}cn;X28X-|toMmIq&xwwR}T z-aQw4v#})q&T?%TIGY}RNOTU}l=iF~Ppfsa#@ZuA7O$ANshb~(>;2tOWp#?Zs>tU9 zPs?)riL`0;%CyOO&>4gCPyInofAhVsNSpCT@#Ek+!2QDTFb8_8g>%EUd(sBJs`s&9 zPiv%D%^RPxTw5dAEPmA%2V}8}Z{Uw&Z5) zh5!2GX%{MEnfnyaqw1Hxr=1ZR$C`ZS{7Rnwm#IwQ{zt!Fd!BnM(+yr1{piQ4!rmRl zvMXc5!>_ovjFVWC_exFy?%V43llw}2tnk=}{30&~WIJ-B3tkevk15X@qhHM!y;(nG z;2=9>{W|FFH6+NUrXCG2L_fW_`5kH7Wg@qENr;jc@La>uh7e+77TH zXzR9KSJIvjJRr8ZGp$*pb_Nf84uR(zs_M9uBX|5sUgq%*8Hwy;n-P3l%YJmlM4CKx zI^A{GT_y5PrGZT6P-KN|kGwf;d&TQgI=DyL{t0`R=L3JQd4Da|?$#>LwJuMa!dJ)T zA2r%3T*miTzxu)SPyflkOpk_r*Idc>u6KV^PG!fAooU;)E$MCF@Rs~@(koXiPjC3T zN75~~^ptXkU2#tacLWoH)gO}~lbX@yrKEWomaqhn1t^JtX3z@);qJh3*~yXGbPzQ1 zo3rAmQHC}#$D)HEmIAOtc)3Mmkc6TZrQ@xKg*!rmn$veJq#%<$##)0oIDl7o_Dq*G`vZa>bw3 z;Ba!)EoCL3IdpWySTZ43LC=Z_JZ6YXfTYd9?CbQrba=}h*@`mEUTjt#yxaPCxm+J1ZPIX%$4C2{&2O47sqYT8Q zFImA_D<#mMqh>s4gXLtEQ=6oOk9_fCJ*?0E*W$K3PD;ZtFaHj>$fxPiH~Aqy=wHY} z%L0e7l>_{X-WF2Jf&5}64}$~kSS`T$u(XwS_4g6ir+LD8rKsdFzIPShtH&PikRdNV znK5U%*SpvaS}|2VuM9AL6362~91DfkfO)_k)K`Ss&drySQL-0=N}RL}mV>;v%bn>9 z7ry$3zf+7B9{1b0)SKmKrWJV0X()yOZ{{_(`a|Xb|k`??E_YX z%3Jw29sBXF-&{TfBTs~bmGgL@v2IatwB)PjtYo%@QiHgN!{zbvyq3o@+;LBT#ICaR zW0W_S&vgCXh1>SCFmnAN&?JPWnPf!pAnZIbp>UnqFKlUvQJ3uY9(y7TEJGEjZL4
    be$BYgD-`YJ5{dz2_zTC3fr7yXo4eR0yO=QyeY8!^L4dZ$CQ^*Ylb(M?^K{iFF3WXl{phMdVoE#M>0YzVlrZB+xu*3@ex2*Kq?q~7 zqRVuoI7;ZolM^QRfeqy?pGwWLAz@{|Se82&WKyEuEdL{Igr|8D zNrkga^ymKY?>hIk@2~mPQebiCx&)DwYwez2x-!sLJ&=x;4OQ1nLkhI;80%v6&DOg> zsqvTbDdgzy$wAc8XvD4$kx-O!E#W9E3VjXC4AVfa_vtm`G2}^g*XcOxyQh!EP;L%h-gt^ttYcYIrorvqa08|XBAM26*%a;+7RT>ekU zUB<`HeCBiR>8GA`Z+z41-Pto|-5>waCtId|{`{!>umANo+{u%10?`sXyqs>!HGQsY z8yeA{TLER9+y#g?2g2tffp}MgbWaL`mrrCo9toR~yu=)z0!ySDBh!RX)jg9w0zyX1SJP zN*u5ML0#}FviRIc$)kh=#bb2Pqjyy3SW=#$N(6)B_HmQv_LtlBTz!M^kNDrjt* zJHOr4t*~h#R-@8*r}pI=wlV%Z*zg!s=+bSxLd35wq#52zuCB_I<$C(uLL%okV_=4B zc1vsHft{fOxrw3(h%P(X!Z`ch8C@r$FrUydWv%E*=t%LCHJWj>Bx&Q@bcK6(k8?bH zA8_Io_xWqfAJNE=BzUrbaKp1_-}lqb{l`x@_wW7>=ic?BL2=ToJ}&8(v@|Fyy%b*7 zO@rkcX=zZFBI@xjgf!JANmlfjeYsG=u;^ew0vZscSTbD;Iu1}_nJ=6IlB*P3Tc5IA z=e1oD{gNJ)rplma5^Z*`1nql;wYw^g($kgv%P-ooPU&mSv#?uP>mr?+&vjHhNl#0~ z>QDX**>)MbJnzTG#@z6cqwfBt6(P4NM*iRJo|eXjEqX4aC@q&A~IoU4dOIi?A+WQ44fwk$CL zNdBE<8_Q2x1knyp#edUy>2q;PS3Tx6<$4O6aC$a0g5d@27@v$RYw9le6tlz88mdze zM5ozR9f9fG^}2xd^f$vbTCT?>IGWL5xH_v9KEL_taHzc9+$b70UB0IP;%pwX`YHyG zvzy=(-3~-Uc-eCtoU$o75a_Dc&P=5I!%Hp7BFUZb?R8-e|l5#CP zrYFTV8L&fS^Q*kddR|nymcKRYTQtNYA}?1Zor0hNMDH7}QMhK7H6Y06T9%-OI8zj$ zGFZM5X}$B$YE!+$wVMH7G*xH1ihsG3`|aNu5>c2*GQF)*2`5cX%t(uMEz9-7B>|nh zke*j^YW?u%!q5uGxH)hENv`ol|PkFh83<7=+qz{+sJ)7 z5Bx)of{aHDq0FYNMc*E~0V|{gewmn{RjcrX@eK;U#z89A^xYA)+c7>ESuf)au(xpMz99iNp`25}p!7wBVrlbVua4G_-U)GPxuW~BB@}RT6)Az#DRqvH2hRan( z%gMr>@@s4?@qxp)NdTLOj<|oV>kcflYQWO~6fo<5v;2UL+&+j(bCt z7x9&z+g<(SBCbk~3`cD8Xpgts%}S8c|&ShO_c>9H@h&zt0q3?y77 z(D?9YA9e1xKkVG!{sGzx{6>&*gZIaW`0J|JTQM;|t_=7P{OGHodZufLEZ00WNc)Op z$Pr%M6@^;9t}Y~FP872)FFcKeR(Baxl2{Fok?%?(-zcmxFn(1ct@3%~8?)mI{2twS z=-{KkVRUGxEG7h(fao|@gk~qx^UK+jNX;g7CMz!4M25!IC_2T0%7a#a1 z*ME4Ak9ca)a#uZnzGWSX4kR&-mK$o3qkU*W>B3t;#4D>%EG+pJ(|{ra2JO9H6}FV<6{p>Lkh) z>!HjIwbAcXVN5q`bA!w9^Jsr6iLLs237b}ZdTdAVsi%d(rGhg1``cavr2{gAe2cIu z`c^KW9meqf0{(ekc^QiJ=+VQz&IeD8gt{A;=gY$M=76un-?{WIQy>-T@$sDH?j(I~ zM*K&(gZ$|K_=Cxt#z*2l^w2|fFODBS?kJ1P)1a6Q4Gp>esCZ8F^tq>>c%m-th6m(t z?y>!DOMGz=t1{0%`7J+zFM-aS6Nj>Q=t}Oz7k9e;6Q|vR1=v8=0(!CGxgQg$acl%1 zpLfuw0eiX|9Rc6wKp#$#nS^pXG*-dK5zo24^QYYS-0KmyUU?ooaoCk7$H99&P<$rc zb!W~ytnGQI!1(dZj%Quwj+CUcXU`sY_Uu{L-w$OjLO(vJGUwPyw*k)=hYz_OsIP&O z!|qnNnN%VFo_t}aQ2n(ByghJg*i}%s%tr-h$zM2r4nFrMv8nZ7-!~SbK)AjKif0lo;y0?>sR>4E4^;N;mdw} zY-ZtwidzYU(>tGWCFmqfZ|~@&8!>o_&ucHnlK$T7+|cWP*ezUmz3YAUSvP@Im%))C zSLq!956tm-SDc=0Z+7Rt^%UM=6}Sxy!1umqV$2=ug^a?tEY3k@jUJ~Cz|p94n0s_z z7HJYIKVw5MGyrz(+U4a<4~;=n4CKSDTSNO!G3lSXcn5&9-<6?mJ({&G}*Y$gsQq%rQ5&Qg#&gXAbUl$J&&a87xDo z@5jJM!8&msb$A?g2w^%l&Qt>o;P^yZBgiwpj-l9%oSkq-j~;b{gLWDg@X14c_;AnU z;5j!AVLvc@)UAz6#s|-iy2%3hQrh;)^r)b;60ALFgFg5-vfbE5o`1%J8ai0rc$-13{?1@O+@u>s90KWQbip%hU(#k8c~HWN zRR#5Q;_P|127}35ARXJi%WuS8A6JkLoga4-sJmXQY-~V#%mn3n&Dz!OrI+@&B}<|u z6Zt-M>a?3TmnEpdSHJRgH;Q`v>%R_5;!i*Ij5~tzzWUx*x?8v2Ttkd^_uNzOYV!Ew zkGrKym%8oSw>RKkY5*0-vyXh)lj)tmpjaAvYwg$%hwW|Kw}sUNPUsH$skEop(^GPr zHg9$dR;+f57A4lmG~;yt;pQ}cB#YMM{YZmK#EGmIa{vH907*naRO-zsW_X6%+xf!_ z++6V8#`TccoX{_Vl~pbqigoxpXu(k6B9wYD@qFl0{;7;rlG|=a-kYq9aU#2M=++CeJ_S>6Ne_prMUuFycxd%R);nR|i(C_ws{%Yt0&}lv58h7B%Fh zXflVOszP4-jq0~;_4PI9*a5eF9(vuTZEpYm{k~3?ELl=dk-pV1j+DO{g4f@+-IeA* zMSA)x;TYF=blxr1bFNBW-ap5c{_1yK^}B!EmCgm(F=xeEw|*1a{A^r-?-~AsS>QY`!#OOhFkSmf91*otmFNt|HX?JySdBZvPR1~%SyRKcPO4T z#R)8Arw4A-FXDnmgA3+;NBt6@#BRF#Ueq;k=_axOW#y7G zEDSfV03NK+{%l&HZrFMo%A5#tPbB=z!)CI%#|GTO3Dp0SaL4N_yBph;mr?1iaSUam zI|(<+9fXyFmWovy17W|J2Dhr$y`j4=zHybCH*cQXvPJg0fbM_25Jwp)xak)N@_ulK z6=U-quP}(6-Ev~f^jZp5)SVkfy=j2pNe%Mx7wND*K$w#Vnqg@VY*L&E?X zu){Cgd>CnP0HFH^X|eCDMZL_4eTenRdL$o{pYs3%`T6p=n{(=nn>)M$gYNTg%_6kP z1`KXIbp|v8uYe{8@Se~Pd8969*tTt3-S7U+$K1Wf@0v&Q-S^fv`E2anyWg!_x5mBh z^{@5+%G=yIgYKSJ-VKF0>Bm6e$sp+jN@rY8hvk|y`{lBL)qz53P*LQErCxX(*E_K4 zglF=ct|}-eK?o1^)lsm3BVqN^kmJj4gYed+sq; zdhE+#e-nv84*NRfBq@%YC7dr!KFWY&I^9hecI+_5>YxmlF~0h+4AI=gcJTZ{o}pFQ z|APjK>BoE0pft1{p<7G zb?oVhh>~A}nOOtuF#s{Cy_W#@23f2ri1@T*wgu4;qMl-j|%FDZpe zM|F^p^N)Hb+%u;Fmi)o<;!IVT(v;<;&c3TkZjx9L9!aArJ(f8wx7rq~DjwLEnt`%L z8am31ACDQ@6X?WG@nm*V5^e0=FJWAF=RA#-wMPn^x zL`y5PgiIvFN0pO7$~$S$xIzO2(yijf9(y3j2y|K$h^Ep|IIjv5 z4z0{@{r=iX3mOn?ob+Ba6rNQ@b`#zxjRt(+#SMydY4PAUH`T(ug-4&QUT*6mrol~- zJcWcS06d{Z%5B?0AMu$VePBoFt1Qi~+E_!&(=Wi1LipeQo4@!UDTcJZ5?GIZ^OtFt z@TT@FVXyj;;MK1C8s;?I#d;KF4Br_&)MRfk*|q?-1P_ zUI4{BsGL4S@rQVHEdX$XhX)N_UdcuXeUIsXNU#$PrJ>D8RIc@fN3I+c0n*?HdIZs*Rm9Z zukf_)6h4!#S)Nh10#0ZNJbStsaNMIC7u&?IT0&lm7L{uPPvIc^_m9FI@O-ciTiLJ( z;}+v+qZD!Ndp`S>MqKUZ$Y}^m@rWykqFawX4dspb)tKE)`zT#c!_--pScf;t7o(;5 z?vT{gjDAhIo~f-ThW7)1%%tuu^V<9?{HOm<=YIaToqP9>g|`Z~xOw+knr2X8XMOEE zf)LTMWO;GZFbymkXj_Rdx`nTKd?;O==3vOZ|grdtR$2zpqtK3wgu zj`O3nLr3{_qFmD#j}syaa&9td2dZ0Ip9DA!AcIWcqHMw_C) z{^&o4ml})>>XhG#4@5Y9R=B-#`IG-$S=Pl{7*K4vNnd?|_Zkd&p9_YLEZ1B*rd5iT zu3PV^#VS&!8uqoj0yQuopX+w+SE6OxR$+NpuV<)j^AJp!(8xzwziTzTDgtPH@D^w`fbXoA7zf=#uA* z<=WyDxmTNtMn<s92_2J52)tj|k(*i;$tX#)>VY~5z2M2f8 z?Xvc?(jWO;pRmd`6k>tek%IXb-xor;S?{NQT^Nm5))DLeYoBuNpZ_!rHe6=B_3lR8 zsSZ&+W^~ndS-{lZC_R=rR2cJLq^H@FF6HZJh%7UV!{=}Y$VSn%@n8RL$actM zx;o^&x47xO@+9S&6LVRv<%ezKr1bNYJc~aQ?$bOoPudvGlb$3O)4WpdaZ@!xhLY6u zYBy6;F7Qn0o9}|l$=7_eD$cmBo;c&&tKJ&^8JgyY&uj;MNQOA6UpB*9fu|Xn2rT5? z{fOUAR^`!Dn`%5s^P3l^RhW%$?_0&0h3~E@*E6+I#grQKbyav?zV%MH5+VxEi(|ti zc+*d0Z{F_Q#;vUf3rmOhzLP^qm6EJHHOmXbiNk;1YZk3MrS$WjDQhTbEJXPeXh$tQ zHxEFR60P`pmRkrdk#zZ>vSf}I5_~v0Rf7-A$f(2!s1e^_xkvM9huqNNy~A z#r$P;sPdXY8)eTv6kteYw~TvWCq9<&u@JQ7PUn8`7w`dtwHeB^-K6c3K2LSRb-WQ7 zM``NHG}{eWqC;G};f&8)UzrT97MTdMA9PyCbd@yQ(5T69yM&}n5YM)=e@0(fe``>f zcnT1XqbvroUD#>g~I`b+|>N9~E?bGxr7H_!lIh&7mRl{J@#m_Q4NadgyaLz}mVn)}M;)!yeK z&)z^Y>dxLmUbQ6?W3uD00}&J!p6Y+~P8}_Kt6rD|xv2rg~9YHV@2~lxclSG7ehebm2>=HjH{!@qNR6l>;1 z^{9NADpt_cUTp%A?=)hGf0>}Y&&@!j!}^w62=VBqKt;{+TUy3kdb2B#mh0)XUiW2| zYt(lUsB2Hl1Bk5lk%1w3-IToI*)J~ErIW;r_@2D_qvaE!XLJ#ujcUFRM}D>M6{poz z_*S{ri9kHgvjLI{XjP_$xK}zreLChN?vv?_-zjLw9qUzcTdR<&FCx>Z!e>sH32*%F z5Qgs{xYxZaSfaJBC18Z#ymsZdMprw0^S;oM)NvqRshYF7$+)Nopco8Dz51AnM$HhyPzdUO0kq=2>_)b z+PW%Sc(J30WB6iOgxkqgV*}|bFG|bOVOVqv$%65*<pdZ-mX^|-sxn*!bf(j4jY@Yl%QaS7ih%9^X?y57Q?82;1WpzY zJfMEpif@E3Ts^1B*>EXfj3ZfQ-=}f&p2drlY0RJetp_kM5q%pkrLXuFXU5&fKAidp zE{Adrpj$|Ta;^53QACpuTYg!txv7x`2)>iTO~U@$2MDuq7@j^eOuV6ul9ng=K82Kq zo_Q57w4;&I)U)EX<3-`OyDD!UPi@gI((HSGO}U=QZ7L>Z=TQezSYE#Mt|)#=LxNL+ zM(@?>)Cm>d>MG%(xb5<9^G>>&MeBn!tM}VfeIKu2H|)QG9k{)2bfp+EY9bBdC31pDO;L`bdZEt$rtYnwOWD4$l_$ z&=l9?Ps*=P1L+XV>k!c9SLu)jzvicNmc3!12{GoC_yo!I@H8`jRSe~sxukVI=Din| z>jsbvZRK7is8j+eZCC_0={n zbs5WfPy^uACDY0wf0YC^F(Y0jxu5OXRHtRfFT(g@Wa1XVPwUUdx9{|&Mdio7Yj&>= zDS{oUqWGgLixi(!#vV?ghF3U;gseiNMX2s%x%5@Ke70mq}w0S`cq&1=8}Q#nXK= znl@ZC6{qMmHDFnqMQK<(8$YKhEb45&HyXlhF6LEQHqCB*2h{N0f?LJZURi+fBq%IA zwf~jy)J&c+bl*wm6|Uk+cs0#WESCl~#Z!LT=RCixO0P(r5Z4aSato0~nt7o0g6ETp zuFscg6P*Wr6g$!hmR<OGDHPkmvGb$;N*kRCsN zz#|#~r4zZ)?6|1lrz-u#XLj2}E|wR{Rj2n(vy3ou3ymwn^JGqir+L!F^qPlfMo~%m&nJ z%JoccMF~?X*KL*4q#*K}&g<9-YTctchSK9p37)hQ)rx*f+o0S8dlQgC=v|I0F5Da$o;qPk4)>vK8vPZ@8-!N9h; zCb=zBjqxJ;W|nYU9$%)GM6SK;v_HJ|H(8yY2>d|Jui-*tPL_RnNj6Io}|Z7W+% z{x}}k{Mq}W^`u3pP_Ry-fK^&4C5CI+l<$Nm5)9)Yq;Ni`Amv+bAq6ehRncVmGq;f4DfQa3 zCM(?SPvAMJv@~;bBf~ZzjCHslyV?nxwq+LWQY+<}ap<4O{Ai*OC|sUjrYjz>^*&vx zD?RZWp9z-liVRwcC$}c`pLEbnpF?W1=9ilnAAcO@{Tdh^9(L!>opS>NupCY?965Nv zZHTh&xjp;ccb{+rged7G=bf_ce@K0P}Zd0nNQ|--h~MV?_m$eFHX9lXP(KW zG%^cltDB#WVo(p60t~GJV+8-8*{5jSr8wiRjX&GB56*=h%|R zpMK08xGoqQNVjKf)b*blc5CoeS{$tBUOaTfkq!FJj-dWS8OKJ>`FiL-6=ZIux8IGO z8e{Yf#>U3np+kqU?Wx#vv|&UY7)-==_mHRtF)H#9Wl z&YnH%%A*xG3B%1G{Krf2D9Z79$KAxdXjo}VDE|fP=iHeyZgh0iovW1G#be#KE9<9+ zpN0pF&#$!s+mz6wiG_cfnxA9Gj=3#cwoDDNDWWo?l$p{yO)iGZoAV<{(6tfDZlicTGJlLyhTuT^}~RXJ2fi^qG$xAm*Imaad? zST76#;xG*<*Lu~e%);SS=~Bs)TL^svsYI*oExtCwP?09kaxEW9S+2=92aV+#gE^sO z&&Bp3=I7kWSPpD>wBeC_@rKgZmFYF(m%O})e|5EVh@U>^(YVUP zwr+(Bl7R{E>Y_!9+~&=jP3RQeLMf8vdc$qoi`6ea4pTaUi9TU)j;r#l;RvN;uzqu& zLWGI&G8TjuyM+tkHqi>u-X+c**uHFgT&r}|Y6keo)PGN zZhn*YDGj1blE=scfyxXO_tmRc2Le<4O9ir9ekoBV@)Q(U=Bx2M?-Hv3vbARj`QQ1t zTUNc`dTn{(N>@5I;QA(lvb=8VHdlo^V2SP{(S@W+xr}-!4aNF|;iQ^;OpJ9x<#Ejd z@E7GTz_+Vh41EZGFrg|PrWzjsmhx(wv#v`6JTD%0l*NnP;JS_A-Ffx=fcRBe+(ozG zm3(#f3d)N- zO<}#$M7LwIRG1(7=A-Vr-t`W5{P+p?2fzPu_v0V>QPQU253avhJ(qpJwp@9>_$2g}!n^Oxh`U50fogT2p(+f@+O zO!suKk~K^cvIx_BH#z{(+N4!^?HZz)95}n@r zKGbQXX~vJOKh3EWApgJv54s=v;Sa#Vu@E!Xjqdy2{~q`0Pkpv7COb0~?|bjP$IlKo z-?YhX!T*sXMp>wfag{%GrCi4*jh2Rv6%;80QVv*!Qan0QbcIVH&Myg)ZYviFrK@H2 zEdQObXpZ6{x>QkHM zH4mC?(*;0+qK&K$q zEFTR2{-1ICpWoqDV5zoOdD2yRY8Ia*Nd1&sO(<+V_;!MTS01TcKk=3PO^sR>rf)R! z#KD06?3r5#lxwz`&bPkx?vN7G;7KRgu-HfIwaUY{YGe3#dbm<2g;;*I?Mch}bb#7Y zb)o0FeAtXM%S6P9hU=_8MNiSvXNAw=>vVoZrX2mFD_9|lw$=7EX%z|eZN*C4Cw&%P z-j%rOzwP8!O4st6rA=!pjH=lL&*G&tY`j+YqOquExpr~szE!l=^1Efs%lk5|7pq)j z({~jX+A@s@H2hF*5-9&@lwkYai2W-98F}<Y)9Z`p}BMCX6f}F9d;!M4dRudL{;XFy2&vJj)KrLGK|dgla5nq3kvYfV{+jleyE8t8*s8z-fHpT9ND?=F@M;k>@k6 z@5qqXw@;7vIF@n2f(1?E>P09!tmJ_4-r$4P10hO5S{WU8B`Bq(tZX&E$}Ijd)E%ts z2N1VsvgFFh!zAMM!biD;<-*CaN#6z($m0%HU<1CasFM=@Ehis2b=FNBY+i1X;3kzh zm2p=oO}NS#q;p}?&7E_-gV(dWu;&GLJc(a|pK0&$VgIEu;*7(}JPe#cumv4MCUB1R z(Y?Ff@;%Rb)#31>)vki~C2XR)0iSfIjqk$#m)xNP`&}>UwuHfF0!x?`OavAV9dMlH zo+46yCuQi*al2!@?ynRPKS*#ohHmW<&rfwUA2O~VelyUqKbMa zZ;($W&y9IrVcihKOReZ!M87+YOiwJnw=(l#u+S0)h9 ztlp>$XUes~&uAn!btv({((Y-;a|u2Bs0kOtEKQ43^xnp|_eJB+56M|-mJq)}T2Q=tL8sc&hZ%oJ zi{&g*z8)x{+R4mwL(7QY40_;4#{8C0cNHv=`807Xp2}7vJ;o_T-$3S-z%+fmfiCNX zX@CiG^U?#{as_F|_G3Ns--nYn%1v6NE&3`?4=nX;qe?PJ^>8B#@LPc|BLp?5x(Wm& z)6_)U`+fl)A#ao>$h)Eck(LF9UxTTsfpS;Mb)A+{pOHiv zX!zP*J$CYB=zs}QT zwO?zwMn)$y<@!)<^Ej1UZ4O_uqE;Lln7F|;ckfIK?b_Cr>bpiY++m!_Hm5ejv2iSp zifC6}rpj_{^KR2GdS4em%V^Qqvj(qT>R+K^-EhMqSns0msL8|lc>OU$J9a#WBas$h zhw>_q#ew+kZ@wi5YHJu3T9cCTr`mWo9|74&%}u_U+r}2`pW@v|&?E z*OVt=x#RK`j}+=hXQXnZ^Nc~1ySABLwy~rN%PlOM+-dKlJA_~>vh3I0yG*@9qd@EqO17OERu)Q+x7tcV!>2q_Ccdj-R zN)J20od#RA@f?(;543rT2Q0Vl0_?WSu=l`&=&MWYAHAmx4d9LmdEm zj~{9*`zh2@8Jqrgf-e9AtJVjcehlcG(12eS?s~$%>%H|AwfH#x$*m0eTQGLUEm(}Y z{>$HVZr4+83HqSttGE$m!RqyHL0U#nvAZ9xc#u!qE9@v!+*IUP{~Y+c09vfMv2Mu{ zyN-}UsDi#i9w$GLH`rHF*PfhcY0$aRQ?3f$S;{(PU-i4?Q5N1WN#u>~7b&4x_YRV#aDAl`FU>Jt(UKp%W(N@t(2p3UqnGu^mn_l5el15CM3Z?Wzg|(t%({6=# zrQfM6DqEslPh1~mg1$w-{Cc;LVjO3wk83I>%Z!DEj;;cne}vwz&T?|{GZ zIHYfU;}N0w>+aoq-0C$!*{*vrTOQ=QtR3vOGv$?0@3ZBm4x3mVOM{M<#fl35ygWhS zYYWOZm+~ed^a&jci&i}ubhS*Vv_dN8H^x`lGjeHo_M$N^tr2yQhg*jbA0nRGjY>9! zI-k>XspWGmOAAj5A+TIh$3sX1iu&O_p?IXj^T+<0 zxrMB_^c>r|viFv<9V?pQSo}Ji*j_55#p~u?Ji)jHSfOPSdHHM^VRpWp5teIK?FT;a zUiXPl{JC4Zb~QdY0BbFn=DqLxZeQyk|HD6V|LK=M;{C1Xz#8?*PyU5lw|0#?c<8XV zguUf0Z}fPx@+VWSqZPl7adS$n_Zr;woP(MhBz!5Dy4o)n=0?m3q;g1QEuJ-+gHa30 z(uJ_^R-s?AGFbUlXQZCd#eG(mCekC~mNJ1A@kAr5(^eA}tvu<(&r2ENx&f z!K9J($DQr=3{(vkI#Q2kToag2D%bpg4e8MDfBmiDo!-Y)jDg{Ouv-6C?Hx}R*>YFy z>x^b~q=s}k?o8XL-lsEJrO}Qz+T_81JcKynUA3dy)y8LHYLtu7h%2#Yb4W z11Xm=w)mkNVm7W-R*KLnieHCk`bv>Bgd?6L&x;Rj-i4Q5i{PYjuEcYe&)Bw-mt1|t zs@Q~1LgELv1gr~gXe7iFMZ!auzgZp--`-ntnvgS0ZDQZ2_m+;DUgJ!P+v#&1U$q+F z3J_M_h-&j(AnI&FnK8;W4*~zshknF=eQ3@c?EE&fIx5zJcYf#n&|4~ShdAzD{hC*L zze>HEl~*Z>Stqibn^)razFTEMolK{NC|!l;CD|;DyRPld>t=X9Tpbz$F-tSG0=Fa( zjyF<$HK2lSHjpT-1WAG*!;5vPDp#H6P3^O|pvlcua&5@NO_KGGv~a?JoJ{~2xNMDc zyLUKu1j_X-cY8GdBg-{5HF~g?Ydeuyzcti}Vp4$9O@VF6I;J~>fM0TC`FH{eLDrq{ zboqW&k3L0J{4^%7^u%*|?lejD577~?omQq2!A>^H1bP_geuP*?grG?v$zS* zb<|cSu2q~e+U6#uTSU2HbQ{m!8`5oWwVFa+Rry=dT8c?N-R;s3p|nltN+HB{P}#Jh z5gUyumnS77Q_FSySd9N5*QI9N)~CI5D(xI5B@KwX}@ zqjt5%YNBOax#CdCwu*ILPU!Qy6sL=D!_usWR9S4tta>jCnx2bRlscJ|11Sqq9*WZG z6i@l2KvdqRBu&%NRVI}&&104a2`t5pSJj2^>f#F*`KJ&UC|J+UuF8`J1mbZ}BjA(A zbwK5sitz~cDNsP`3v;Al{e?H(9u#RsR(+}*+-Uc2em1;4ygO{Jp}#(9awE@`09&_3 z>uxIYnej%KiE5w}@2VZuu2Yf6X`*rP;35A_f#;up5xN%mw+Zmm$p7-$FSxIL?Hk^} z(FCm_m^53_ZHUl8fX=z*sVrSVT3*Y4of5RH)3)6$G7?hXY?r3R>o%{-8+l&kZ$*o; zfO}6mIgzDZIi~Y{QnmV0qTe@!bSD>Jb&qglU%2v@ZgP~1T(K+S##oD)e`o7c^_^lG z17hcv>sUz1t(11B&vkrtiE5!Od#CxBMfjIki?gUEW@=HAlLqQ67kPOWV@rY3a~^J9 znDEtUr7TE!$cr--VOhFVuA_206=_P)auAU)!aL*SoX$^zk46(I<0r@y%~M?7ohpPv7vy*SQrdmb>5l=hu4j6y zHdwCPG?|!Lf<=O~XgSr%)M<59cu_i?;wj%}PSyn8l%#0{k`z~^mFBS_Tyf)7ba_iV!{hjdzT?ND zl?M34<4GbrzZGbuyW?vqO#hwgfhpPh=}I|GBYiHKu6R|G(TvyH^w*;FrvYzlY|ITG zIqL3z`&)2U{v3DrJ$Jc{n>ILFuv6eZ7@o+)ZwhSOxZZ8sew#aR!1%1D5iZm7k^6qY zePYww+=YAY>n=sx-$XKsqh5kiD|wL{@*<+YWCm zZ`=D0$z0{=GgtCo6y;+c0a|G1BLD8B-^d(7jwr0`tj#3LIKu|Nr*R1Kg70 zy7P5ko);$P%tRP5WHFLJ1cOjUfFhDW*C@F! zP*B+->y_tR*SY+*fh0?_=ddh~&l$PJf;~7O{c>2O^Zq|k>Lju#U-BU%fX5-nX9XU? zS-I{!G!q%S`@qo#G1{H%P_9pVvr_-`W2OH0ovG_iD?Qck; zj+f)Zv=BIR8NQTHxc~j=yxeCX1wZ_4VT`th_*|zr4&!X`9kPSUGxKR42bELi;f?na zWnPj>&3@QvA}D6Y&RbZkMI}k8N01%JUkptBDoY}D+NcVVzJ}Snyesj zmVfFr{k9YCpCQvs&n=B?emorkiYGr@gq4*ojbi#+Y|+P28R6JS zm9Cd5-ufMuYqWQs9uk#uok}(L|7IKU!`}>Jl`)-!*)DRY>VDO1e`Y%Fq5b~YG!u^r?l+nT+c{{JG*6TQ!9pa(bN#FT$AN`|E`n_?5@VII^kpHf%D38QZZ)a zqJH_oY1x|8Ro;N7&7Vb*MMnC|Wq?rgIMPH6Va_z1=SI++hs>F|b2|ttFK1%WT!bR+ z+^EY-^1}~5rVbuFq&|Mlhh2lhO1)#p4%@Zv?rw#TAzNDfDdV=t9w$eRq^73q-@jjP zsO1qn^YY=NAtk^Su1-kTZK)6$r=_#l|v*zmNDuTQf5 zcj&M>e*8G~6bahnAzL83e+^CFYUu6Io|DbJYO;u>1&kGMa8a(~y;bL zT=-j;+#VykGiG3xCpzhwl5FYWvde>0W~@vfe)wdj7XNfTX2vMG&nnteVBfy|w2S4; zl#0H-e$~>9f!pq|GwXM0zkxH(*s89*_GdKZ>1BwYpj~XeWZB}=31ni^w>at) zX+c>ED?35TT>?A)lERs!zeHmVm2WPwTBK@d#vvXqn&zjbzO~ZMsw_0Gpg1vOnTxtvS=91iWu6Cd6nj`5%bG@4E>s1RDEKp4=X_?k29rag3 zgU4tovG#R|k}tVs1+pJdzaqeDaRo*crNTQM7g%}D9tl8 zPHce=m!S-#ysiJ8*K;a@R;-x=4$n$8D+^_-T&I;KE>EsmThd7MvNM>w3fl2s0`$@_a#p-RSb z&6$q#H{Cus3FgD+et!`UqQDf6f5Xa4Ns0A;O!fHYH*QqhjCt4h+f;RSYt_CUtBm+fM3pgfG9IT(G8+ZSo<6sY2g@s9Lh)!dEik$JN2+s zLmO_j)YEr9Sl^$@)4XckjGPchu*xFuXfrCAo|2NIjshhy+MI4LO{zI6nMMYth|44` z^QvXjLrG|tpgs`H;n2>~bKSX4<4}Y(?^KE<^S>z9B~ukSGLnAs(;=M7k~5lfFn69i z!#mF_ZdypXfR?i9P^QfCJ6Um1r#~y*W?aiJ59Nx}VZ{vcd972${riz~t`B}*GC*XDA5ru;%2a~X76TmdmA>`cOze!z5j7gIa{ zm4W3NityXtajC)%xT{Ht?(XVQ%U3M3EZOk?`~UUV>cojY_3SgxXY4C*;f3d`lTKRW z38^IKTxZL*S^ws7w(zDu_c5lQpW`6unD?`@?fOZJ!(dE-a*Z|f;ezBL6{B?jx5`3& zAa5X(R`B#1{*?11*6zj!^O$$v{(7n`k?u;H?ti1CWR)vQp)Rt_@yF$|rd~~@>32jyCpEo=;g+ggEBWuMt}oO zpYw{S*rgG!E4^aKt1qR_@gXBmMM(L~43Ij0Izudz!=xXc5u zFylA{{bi7ps2k^~^Wsdyc^)7=%+b{jN{iQ;>H*}&8Fb^u^$LGbpy$rDuMz{r8V>&7 zx$$eEvk}(@GNCh+E28s$$raXlE$qD?Bu!gb<6d#*#p@hn3YOpGr9Ja#_g+aA{5qEf z`YrF2%LXd$GjWe%^R4#Rl={k_rLvgEbdZ9VG0p_Twqwo{^T&A}O_arz>u-8ADA(*) zNxr#bKyl56>sEZ{b?3QT9H$@0fhC5mF|0T^!)IqEH+(STWlJwG6^Uu^l0MzB+{jz(%q3M|)UW4$)ppLy&D zhEgtheIF7{Ty%X#pQ-^0pF~c;~G8~uVRP! z=}C+S%qkKV&i*wLp=BXNEZ5~wO+l1vXD1nEa7TxO&WAfDvjZvzUb*s^Yd~^dv(6nI zgynA}#D3NQ8{)Mfb7sXD;<2F%=D)!LbJvZjDIxHG!#4bHcV-?qA3V*6L#D&v>u~L~ za!vKk7IJu+E0Cl@74Cm715$Kgk0ldkdOFSUiP|jJ%-<N7y#PZ7gVd2+v~*=Q(U1!lWH{94-q#IH3RJ@}@1bkvC6mqXZ}dkJob}`3JN32bvaZfcMLkO z9yge?F+0!wlvgX{6{1`_%b)9E!x@%YIsKjI9M1gZG1*U8j>lo0gXU7QDr3`_du~m2 zRu~Z0p2D$ae$Du-pgYUU=ltLVOAZDc%s2e+H>G5;oEgc5J2KNYkK@XLGY#i?kaU@C z8CtG`6f8LnU z!R2zZaiNh`QrUQV%D9kY<(0rCfO($TV z13U}&3eVxpUyj2H7=Nf`hAG!$whp`HpUchHoaedxIjl9{S=u$n^8U4C(6j>6nI9Nl zdA(9+oNr#8Ip(@&#Tr*wCWqoV*F3m@N7|ahK{Dc47YcA-%Wok%X-68i^n;aGEZ6S5 z%F1$WtADf&zK`9@X7gn$u+7uQi7ibZ;W>!a*RV%`Uvo@Bw6J*ykYvGvnzuCiw`ODY z8t&`NK9a{dCzFC@*k?j!xqL>EV8tUqEl*s@6TnU1OkX!+xil~5yaB^>nyi3wr^Dx7 zX_w-4XZfAyW}Y*yrC~b#S&22{TF2?Zn8##ieOTdK4p^>p&|I@QWof$pAC%g14jq%F z+08lg=~i#)G!m=`V6e#uSIlm~0A?Jz6*m$;C$oHnGDmH>PB#Eo`GqK0J_V`bxMqC? zttQOb#qw`KKaAu1J}`FfWCBObv|NV}QMyr#io-6qW}T+{PHS2m3c=%CsVmo|3tV#M zLZG58*W0&mw-tGkzKq7g`h9(UHvgzjIyzE2XYJg%Gh^ARsM1jFoiFd zxSH`%)kr&q!Rej92GUC{X&-<8HJ<1sIb&*aR+p+98BkLVv($JGEdfUki~Hj>FMbQ* zuqc0LXD5Bsp8fzYV4^jXKLvHmglF9y_9HsM8)jF-$vDrRruTwEd{7 zt4p_+<8?`!46K!mL7;8AK~2B!^J*kHOEtXq8tul_qQ<*U*oCCeW_2C3{PA(hXdkTz zU^vw0bk9-R8SVt_&Nrzh+IrPA4JwVi{V#=`Xr(fk)*d5%*h92aSTMbI=Q7*x$N<)M zHwMZ8iKERlHa6c3OhV>WqBv^#{m zQN{oOFvv+nK~x^kCUF>s6SEidL>qJ@r&um13*hg$oy|RjXFn{G&P<7#L7{_wH5e z*RR(L)kh^ammDaaNDqpHcGzL8Grc1hDObsr>vKhSy_V>v#rE+r1{5spGS$+ik_(pU zu-Iu0n+GhVUBwFFptPWDn>K9nDcVTg8cGu%chIhD zi5{9mmv+gev}|2NnsM4Mahx7#21wy0>lJnbOB|ti^g&H>`AME1h+qMHY_zciyNdN6 zr?t-(sU)U?4AMg&tf8lD(WxYG?F7{gc4nKmh_hq+O*B(@e&tE)xF(mawOz+L*`rOh zx_z#qUF4Drsa~m!iH`K5{}ehopDb|rASXgMHa12}zHRj0oy%yy$<-n2obrtg3$R(l zqDAhUtfCQ*1Z^atUC8?dadGeW&`W^TbEuu*EfaR?qrp14BJ=)Bv{1Ke4(_ScEB61g znif)wS}>KtAn7|lRt^%^X{8ODhahPdDlE!|w+CbtarV=ASl83H@OBVWIg)?1syfYB*7#4Mnn)2gH!&GN3x1B+pjb4StDg$V5Xk!bb=Uh*x^KYMP2Tb~sfX9hY*(aDSy`@aHIR>K*d{3VNl?W$iZ>oe6A$kEARaUi zxl2^st0)WlP5ikzR-*AXw1Mt<%W_@f>JY}0a&XWWqG+S#P}UrIEb-QqBj<9-oq*X% zn#b;;-F)g?+S^g#zM;M+r?HbmMXB>0humK6I)>ujk0{}Z# znqh6p=FIfC9?h`kb@)S;Q=T)789(l02gbOQv(3lMx7^H|@0)D$8tE+o{;I^4t>wBv zx;3voU-0GtC36~6uiy%OTAAAgw8C zG-WE!9MH|*N^*Dd?O8txoeOc@-mVt-Ag!aNR9VCKyaL{-HxJ1?On)2S4gidPUhr4ZK1zd=lIiq!o zc5SAR`UE+vH^) zn_>JOJGtd6&u_XV6b?%C?%lg>IXZs)xWcMGSTQDQhn05V4j(=o*(y+t9qn2sRth>a zn$&VphN#`p(1hxqs8f?i(jU}DoIra!tx7{}(E05VCsRkOO-m z&_D?}QLamz1;Qu~Y-P!%fSNL_{2S?+*Ufs>GSlvM~#`LshT?meFl3K$-k;;2((0nFo)J=S+z zwGAiK>{eQNiQXC{lH;_((>ygjC;#>dN;xnT!LZ5#PYYorxs*8$Q>wpC2F`Oc-_C1{l7Bg@ zsi0bZ=|HSh=VQ5sOl%lrebh)66OVX4tL zxG)psJ*yR!lxsDeUI!mm;sDAulo>o6#Fcj_k8#N!oI6joBqr4CX8Nd@EY|v?PEz{R z?EEd|c;JY%Et(A`-Of>`NF!>82PXW*#hvTabH%6-1sfR|2@xyaC^F|dUcQBwZVu?? zagdxim9k2*zSa7KrGj#Q4i7O!t5l?;~*(Gl>+B^5LrbJ>rj|NF)?Ihi_bwU%XNr? z^ZKCZ^r7s+l0=)T=mCH`e^YsLOTx^bdCXx=KU1N$$>qXuqze8ud6>sxiwvge=OAky zepp!*5-+YfoE3tnc*Wp81v!YK?H8{k0aK>_jr%*=I7~Z}ML=m1TnM-k5X-e2m5T8T zsU%g@v=B#SdOj$)Ff+Zf#^*rLuJh5bDLTKNv@>-L2^vxnK77yC=W2W zW`;GxaJ-8BHF!l)Zj))&wo_DxhdyMX;9Rn@F$o zZDZjTTdoTMZvtK;wYLNbQF#QMN`a|(m0f8agNBy^@ z@lD0d(ynbG0-x~QK+FrAPOs$QH3*pb^D3)(-KJbu(F-jbFS8B<*H$Wm$+6t;osbBe z(O|hQPsjIhdFI|%u9vLx6|B+%B~9fT?bEz(E3L3no}v&C<+>=?OFU6?T8OEDm3X#& zWzTb_Z+*RNeqn^W- zPB>Tb4o1vYN+X3uEIb-77Gb z?VM}tJ`UR$Bwoo9FfG@z^?AuwQ#og>qcyd_S1Vo10M1 z2d;TO7#SJ#4C|Iy1^Ah~%;T^{GdV^fFKj*}Uj795899GRBB8_x*rtV;igtFMaWE)&BhlU9O7P3;{!V zn(_2LG8F{#*gIwggm7VIDoN(Ct+X7^9Ff`27BJWef7CdhUL@b^6{eX#^C8JdHQ6$o z&M0r#K!KeZkNv9R*Qlc?7?=+~S@>54R-=%Z7E+#5L(F^GD3BbMKLJyr_Ucp1zqC?< zA`mDU<@(V_pHSC+;%eF+bFSL3VV(NWhu^R6x%<8hBs+KRQBOa$U47=W*JfN3k2nYz z%10dH3n??K@?;{&rZ90BjK{o$SJ?DSCOim)v0P`#YjXY)HM(R?dZmDLVm!aQ+6Ef4 zj?81T4uTw;52I1Y3zBxsVcXk}=stP(?pFgT%Vjq$q>*;+gX7=SnfzZt1jKS(L6upe zMJWm8T5lQJYFN<-*5btrRd4U{3~Z;IvPr$~ig&BJYEuF-NEFC}mBs=k7i%(plpWXd znx5vdnOpYP+O>HhzhkysvlOp(ZY|G;P=f=5>U%f;z<0LF&T_qUb?RQI(v>uPS8O>4H2cX2Zu@dZ zA$gC7KuN3?g89>n7B9*uVXc=UkrBi0;hd8_K0dCdrl!>J@UT5#L}!o>2l>;OCtCoA z2NR@+a{dAwHO9YnKU{8U?xRrbnrb)nxx3 zAu%#)g`cKlHx~s{H@Ug|xDM<1Bh6faBe({pQ0h=tFK5*);?#SGcaw+obQwp-6wA~6 zr?O8iXl`zH6IF74xF5PYyVWN?`B9y>vuWzyz~GRA1$*=6jh^`}-=+9GizWdofk5_K zv=T}uWhfGXl2ESe>+3x~U<|`&-@bjeXi$EKhK6EOd{3M>q0?(`Z&xT&AzVjChfb@Z zp}|V7sk1{LPt6)s6NBmPU9GGB4lEf>Eox$DX6}1;cehrqG2j-$L194|o0^(JC&y*M ztH80z+#hH*_H@u2lU@a$lXb0Dnk-|gu~|(FXDYWfsB^cSW6R)q=WSDW-0?GC%C#+`oCtwZA+T(1YUf+(v&9L4s0oyWay@_k zJW`-KGs-qGF{9LFT=d~_+G(fRLLEAENT07-70<%?0|NtU&z?PM#hP%P-CYC@oN|^HM~(EcNsNvizjn z*==u7)8k`m_3G8CeuJU>u#Dz5r4}vElrct%aR<%S#5_%!)2xnCS`Abu=)gT(Yip}k z1Q#z}oEdUtNU5&FDSy1l@tU^LPgALmccsE(num+S8y%#%Y11Z`tMPc@!FKcJ&8kqA zYbf$)H;Wc!z7fkPA=mBPg-Xp?ka5|M2k!m0wl=kK{W_%%r5~(U&~Xms+K=2)2m`D2 zmMzA;K$(K^!0qiE-WI(6`s=oA_x1IuojZ4`=bn4c<{!lguPSxtopDAGX;9nR@4=-+3}nx*E9v%eaaCnGD zUBvNV0Oc3?N4>`3@IV$vdqG~&UhoDjjyHEuP|;>!yokf$%{z3YINku_9><^a{*9^} z{r&yg01?N7L80EH2VqeX$~7{4_0=Cxx7>1vR-%W7Xx<||)qmiFSLm|b^xa$4-~IiU zgOp3c=1Tx`W3fU-C4DjPN4fS4lSN^MRUFnW%(b6Pzj5)D<)e~J<=W3T2%=m2fEsfU zZU<4+f6Q6+BYqVbHumk?Pj3OpvY&Q+ef_G%WzIMTPUl-KOq0V~#Iw&no1;heyKC1j zb??3Rs!K1uR4rH#?yW#QuP=jV+#*uLLt1xyM|FdcQvmMsBe`U{RXYKYmx=-niVem9;KAE{G z*7&GKT#N)(tdCK6u|#K4jHAj99z-yWV>H}Hajw}2s-Oi~A;dL7@ywgRD({c{@=l|G z0=915qQ3c!8`U;r-Zhp<`#<+0zyCtR1@u8P?u?#5lZBxMPKl_eGy-i!Nw?)TQ`A9%mK`)z0DI&><#ho7q63tAYD=KNN^`9(?rU>Skygf9T)1eC1Iv&)iLKU(`9QK zpq33)qk@b@(2Dg&LE_pm4RuNA@&uuu5?8~5 ziHNRwU00M!+7ZW4I^t!B*Lt$)wq8b*G3c3{FD#Fs;tQEFhf-UP#zazF7sU(VWu+hV zOw-G?2Hn5_4I)1Y*ssq7D^-nt>4=W#U64)Q2^vk5*T2isCX@RbzaKY-%(y|5A7IWH zKhkY4T24G%N&!yu#d7meOmM)szMa6M4Ps20b_g&_HVs?%+aNlyYB+xMA?3;3 zLY{K~?0LE|1tysquyP?Y>(P_~Xi?vkZScC003~3maY@tEr>z!Mi>a_(6l|@nWq|>t^hR0HyBknO`PfpKCwu$*({>LRW$F(PT?NF&jn#`4 zKDV@(Ou|SveQ8h5#>e9y&~OgR7nOj;C1r5rjrT4Lk(99SJ6Ty@w}$1{H7_lhhCgAC4Z0o3Yi%VS z*%jhXy@%~fLro1+1~3c8G11iA{iw(LFCkk^R1Y37jESLmF7lk#T>8(eOqwhyvD$Ao z>ajO;z6@s8T&>6b3|!^9lkr78TA(K1Dmyznwg}6!La|BZMo#D60NC$)$GDX}yz_6K_`w>2lOi>kWp$o!&(I^yZ z2K8Q;)ax_mnmVn4B!X7&aw0C&-}vrck?}m^Hh-1tUFB6jg*!$mIz`4UKaksj7YO9`To9{*yx*1N1Ywr`-6r zuFdtIRP6^LH%Hi?ThhK2{w?*d4-t&?$vYRPim(36FefJ3n3H*kd+iner9=7=1(FM&je#mV9pLzmO4+NyeP<6HZMjKl@J9>oT2Sz&bO%5uoC6lwZS$R{H zCN#zRRj%#G;tVI9obv@%q-W-G;7OppJ#U@ZI)ZEn*6<^H?}1PxYkkY<>d;g%XvSkr zV&6A{oETRNul4G3xwNXvX-?e+e=oiW#++hY2!({gCkkk`XKiz)5oc}Frv4^@E4RS; z-pX%7;Y)gETHLKixD}3-rgfTGfum`nXTquZS(km)KXvLn>uM>-QVn^)*^Rqj1MpW~W!Lj3urXAbt*8LfQVwd+B8T zYc~Q16V)UOaBy)ZjktXvleqMAD_wirMHJp!l3vi_JGAe3xkG!j)FdlJMMWn!6NSpO zRMQL!>7$$7qi22w1O&VmoJ^~3$uGr%tsJhg3gg%LAs@}LG1t8p=*_jw_EjCp5Yo#E zLY3tS(!+OmHRsFJMB#;X+ew^!RE6sqCxPe3kB_{IGbH@K%ry!C(cZ0|(RbeQR9S_V z$M7F#`)vFLG!9jER2Y1Nm`F?uQ9+$VdyOO@J=%S1{3}<2e&N8aXlXkKru?;NhJH!c6zFY$ZO|l>+c&iXW`~IMDgh)q<+2vD2f;=s>#Ow;?{jN6gp_vkc zb7<((Dc@Gy!eExR!x$>gC!o`gxI_twzDoD?F6&n&cs>WvoCO=J!HG4?Xt$Qr&Y?`@ zcchE!pU1>Y#@1d$%bG5J7CP&^#SK4NwM1p@2dEAGxN>Si#xm_CksFE0%`-BGCO((E zx_CYS%yN}-C|;MncX>}2a6T`@Y8K#%JK1B7pzubQOzN$NZuBP<41g{davT~sa6QpT z@VOVFJ>;pPpMpw$3ou@!a-XC2e*RQkLZInb?7U%2?E3@NK^x2O3Wc}pkJ{6k_gf=8 zmQRkL^NGt>OM9)>=^?Gi5wP03Pbo|rEN4KJqU2ct-C`NHe1bP0O|A3#dlOR6;Pa>? z?HxiS{#S=-+7ZlV(?De1dRyS$8>ZeVdZ+Va3V!c?sjP>bUT2$GZ|~`5CxC0^HnW|x zEGy5a!Dw=%g^Axn*UYa(x+`N_iD=QD`!}+%t=JHk@2dk;mgN=bUoGWPN!_I^yZ`ZW zgqeF;w1!#Rl9iW}>2+V$^2H`Ga_OYFGLC3^W+qqnTcS`Uo<^givtL2u@mz&n#;fz> z(I&6`j~u}1D7BcDmKI-D34gDQpmH1OA-fFESMs!azA4;hFFqezLG<)}OA~c$Gh1~g zVZ;2>-Yw_6$OC6^pTW=$1v#ERodtVDXt>>N@_|}UoFEEg7g_`BoGuH~*q`8$Bpjox zySE?^xa6k?InT)HlJM=SjlSw;DJ8$JPs=xETcf+IfpDlAS=r3Zee1lB+^;nFtnKJ% z@@kKi+yPqd+e>LQN$0i8fv068U2(eJaJ{@7m3*4zHAhv-We7X3^7-0nL{R&y#_7kX z#2o58ZOqi+-XX+n;e#YP&0!$FQIpfKRy{DeYT17u>kE%5t5|7Zz(qP?1+W}*3~zaZ zI_wKN@elt>FR|DMvq?cG00I4{y`N#c+sh~WpzqAz8ZSv&>O_7GLkD=9-pmDl5WTt( zD$DD57$^bsZW%&}qZ+@VjIj+ZGC7)fw-R!0gTm$Vl@Y8<^&A&M&_yb$n-x^<>o`-R zYJ#31{5IchW&}SMIYsa1=w72toK_oyWZRrSS`>|R3;M-+5`Rq4PnN zPAwaEJ?9-k`4Mfs`f0paZ1nqua19q%UX!+<52&%Li#mp#SQN6FPW%4Rfgy;iIPqSh zu`C)AL9QGa^*L*fBjyCw6Lx8;vw4lMe4wrCSD2 zm#3ti4MlMs!KN|t$>K?$AY7tmL+;PH2N5zFzL#edo)L#laysLwJ{NgI z_I=SBTPRDlTEwPD5dBMqP-$jcpaNC`3XI84ykZ5T4C?%+D^M1lDqf_@bR{DGs2E%W zXLIb=n3%V;GnG4T|I&3ek{SqyK-jGb`%1G50HZ-@ z9&#ZqRka|pK(Y85_6l>%TgKkr-s?X0_IwcoQ(wp>uI8LAHv?vgH3JT5x@V81mQ{Y4 zTCnlZ&<6Ff_Nr88_NmhY_?Ab|%k3ysMcV}xGLO)tXiYZ3d%=iM;xrOf@SLl5gE3mP z-CK$0ow2ktt>i(+e*yp2L+Lg@XQ%_3%zy>t2i=wF^l4FWpGU&-gj3yC4i4dTCTh+p*rx@Ll)o zX=-^D7wX}Nz=xgrS*P^}$Lyw4n%odriD=Ua7z3(^U(Kwoks> z$&CeT6aB+lr}ZL`?}Vac=aHB>jrrgkc&<6L`DK=At8sBjv4D{>JhaP0k%RwWt3a*Z z9Y583^h6Cl^1Vb7=_(mhsA>jA2L~A)c!RHOI4K;@fhnQZw8;tkNi%LGv_e7Ss(0EM z=KSig*e4epEE%?VUvY_($b+ahu+Nh)!}k%)^+-Y^&&^Z@-d-MEoeo^RWwk*NdLB+u z%+Zo;tBmWUd$(Da?}uc#;scw;l{fp_XZk*SU0kt<)#Apr*qkzlsxv&+0<&d{pFp75 z&xM8|!Rbs%E(VBNPz9FxHvVQta3`i_=W5{DHyM@xGI|Z5gj)Vf5 zoo#9)*3fQ$TLSBm0N~`b44b6_PZ{@Z6HwBx7?HF3S=}+x7(rgzYLXvF{037IWx{SU^Bn+#7^zDiyv+u|KM8uaGD+f7LwN zqlTX}dBMtiOT8&w$lK|avPjlMf`%ll?Sg>EzSsfaeCUeH$gtKAm(!$RMoic_Wa|s`VU9an?L}e1LXCF0(ax-2f;RDRxPc>f=40zyfG0Hbb4S!XAcyIapTi}*(j@V$) zpaTq){`;l;XTa2RtYKYF2ls_vkw<2`W&p6ZeN-g=-o}(CuI_Vz5xD+S3P)b*TDVBC zYyCd>$_ohI4jN{8P+evr9c*(1L@tGMFa7eBBY^azv>ko*YpqHf@1ZcuhFBWnkap};U)6HqR*I$C2Xn8A#d>}7tJ#4WEqxjpHeBjs(Nh-6W&Y@4a2OVI zm#&)QmnWg*X-!p*fP-Ecx|$9~Ksj|k91_Zo!`l2}Q8x73>1n;FtozPoswt=CK{SLN zr-!Q5Gx5vY)aF%7f(`P}Ekn&(i&@H&qbHt>T*W>XiI%+ zI`xLI+>Kw!PrlL-;7YOtPB$%)N4eb@VnL}^%67&kC%~o@17Np@_E;qkHCi5#nw_A~ zofMZ!YIM9^S#EKx}q2%OCh##^Fw89V%r9A0MGvQ-DsycH3GCWZKRTM!2_MF~cJ z75XtTc;Dr0b8#3qX@Ew|bfO}o9keXk9y&;9w`=$uExI7B$p#7__%)~iP!=WOd3 zYPyG&R5q-O*2G;M=pynWXf0_~Rx+{ngF>7(Ton*Cf7>{k>hEa)+exZ#w zlIm^vEpj%tsnjVqS5)5PWl=e?^9}BO-3q-A-yTEizST_Ut*iNwCv;PTagVSHng zV;%Git9wnApR>t@?OQ})fj}veR@01d&le=^Cz0v-=CN5^<{F7)LY*^xqfyi}0rqxZ z=lW%>cw{L*1|+&~s~Rgm5$^CaE>r%veJgd8xk4N)J3vNGp|!B{9ezw?wBK z{q|H0BS)a?arN03%5_5C%*Tnj9Il(d0;T<3!0FA|hHY{LtsLELE8^C3m!%4gbsFE{ zO~eN+WutB^yy80|ebH>&wef-ag5<8#(q9^*=FO6J>hU1~zhfdHE9flIEk0AS`qm|W{06eh1v3D}n#5q+T6|LS`b9H zFyLxd9gk!HHrd=U<@o#?JqK08C)I$t!&r3P=eMI5bX2P4io8zTb}GW$v!flGTT=YG zE*a(`09u^8i+2itj1FT@dHo!0$R)shMv6J9nEP_j({pvy#GY+3r_n**g5hGU%1OhM zmr+uPr{S+A_R9e5a+r4;0SN94KB-B=sLg;tb%TM4Wr^1Jo0o`IVr1t8d)D@{IDh%& znWGB!Xb2PVT#A&)J)U!ATajUhZ#kmy{)THI(u-os0ry-#js_`i-dN*)wjzUG_8!$d zy$6&BnuLq_EhqUYry}ij`}i@^eAHjAzRUhY`aL9U=^LV@J&Z3;UkQ*;?u-W@_@wzj zm#7Y6BzTE-SSiMi@yZ9zp<|Zu!DE#hU_*ceZ;lT8PEJhOQORt7WE((kQW-$Tv8&Kk zd4;I&ac^L8rrCIzysPwF){%ejM2G=$u4utlI12~P-T><6-;r};{$1VbZ*E2lau(`W z=KIk+5WIpIk!+XH*-b!YUJsu3Px+;hlaQu|0X9Z22sEkpjqXhV+f(no8C`7r%UGf< z%YFQy$e1;ikH(`_VDKJ`D!Y4pGXRtm-3iG>Ta6z~tn<>c1K)M&6cPZb*-{fpI|l}% z+$@BT&J;BU_n%6)!WI7NM^a@Z@t3vQ4$lw(ds0vEwX@tzlxIxsz=xL_$gY%L&ziXG zsocvA-dWjRNE)yy0Q8Z1;`mEgO!O&Gp*r5RK2(MxpbuV|LI~dQ#gwi;d-#Kn%Tcqb zQA><}RX_7Dwc%lW9`iW%+^nPLbr1I}PI&w(uUZjo_#f>k=WY!O*vbaIFE6Mwa%#>u zV74ogp#FPP2QS3sW+CWKpNb_=T|6kH8#sxW2n<()W}0aVk%NBsV3xm%_ivXoVnGu$ z4VQEavYV>}G}9QAukHPNO7KxhkfP!>zFHn7HOW5RQ@(J1qFN3P^3ZOXqKbLl5)>4J z`i8Bt{pk(@k*DwZ2o4BLof;52c|=p;wfSCytLJM>dH`116VdcL_9mq>1 zIe;^nlK!dd?-~x6=m+aq8SsCv4#>Hu(PfH-eG`)EV YV)+2S5qX-6bp!Y*%Bjj$$(X + + + + + + + + Overview: module code — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    All modules for which code is available

    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch.html b/docs/stable/_modules/torch.html new file mode 100644 index 000000000000..bd757d6d5d89 --- /dev/null +++ b/docs/stable/_modules/torch.html @@ -0,0 +1,853 @@ + + + + + + + + + + + + torch — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch

    +# @lint-ignore-every PYTHON3COMPATIMPORTS
    +
    +r"""
    +The torch package contains data structures for multi-dimensional
    +tensors and mathematical operations over these are defined.
    +Additionally, it provides many utilities for efficient serializing of
    +Tensors and arbitrary types, and other useful utilities.
    +
    +It has a CUDA counterpart, that enables you to run your tensor computations
    +on an NVIDIA GPU with compute capability >= 3.0.
    +"""
    +
    +import os
    +import sys
    +import platform
    +from ._utils import _import_dotted_name
    +from ._utils_internal import get_file_path, prepare_multiprocessing_environment
    +from .version import __version__  # noqa: F401
    +from ._six import string_classes as _string_classes
    +
    +__all__ = [
    +    'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
    +    'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
    +    'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
    +    'no_grad', 'enable_grad', 'rand', 'randn',
    +    'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
    +    'ShortStorage', 'CharStorage', 'ByteStorage',
    +    'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
    +    'ShortTensor', 'CharTensor', 'ByteTensor', 'Tensor',
    +]
    +
    +################################################################################
    +# Load the extension module
    +################################################################################
    +
    +# Loading the extension with RTLD_GLOBAL option allows to not link extension
    +# modules against the _C shared object. Their missing THP symbols will be
    +# automatically filled by the dynamic loader.
    +import os as _dl_flags
    +
    +# if we have numpy, it *must* be imported before the call to setdlopenflags()
    +# or there is risk that later c modules will segfault when importing numpy
    +try:
    +    import numpy as _np  # noqa: F401
    +except ImportError:
    +    pass
    +
    +if platform.system() == 'Windows':
    +    # first get nvToolsExt PATH
    +    def get_nvToolsExt_path():
    +        NVTOOLEXT_HOME = _dl_flags.getenv('NVTOOLSEXT_PATH', 'C:\\Program Files\\NVIDIA Corporation\\NvToolsExt')
    +
    +        if _dl_flags.path.exists(NVTOOLEXT_HOME):
    +            return _dl_flags.path.join(NVTOOLEXT_HOME, 'bin', 'x64')
    +        else:
    +            return ''
    +
    +    py_dll_path = _dl_flags.path.join(sys.exec_prefix, 'Library', 'bin')
    +    th_dll_path = _dl_flags.path.join(_dl_flags.path.dirname(__file__), 'lib')
    +
    +    dll_paths = [th_dll_path, py_dll_path, get_nvToolsExt_path(), _dl_flags.environ['PATH']]
    +
    +    # then add the path to env
    +    _dl_flags.environ['PATH'] = ';'.join(dll_paths)
    +
    +else:
    +    # first check if the os package has the required flags
    +    if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
    +        try:
    +            # next try if DLFCN exists
    +            import DLFCN as _dl_flags
    +        except ImportError:
    +            # as a last attempt, use compile-time constants
    +            import torch._dl as _dl_flags
    +
    +    old_flags = sys.getdlopenflags()
    +    sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
    +
    +del _dl_flags
    +
    +from torch._C import *
    +
    +__all__ += [name for name in dir(_C)
    +            if name[0] != '_' and
    +            not name.endswith('Base')]
    +
    +if platform.system() != 'Windows':
    +    sys.setdlopenflags(old_flags)
    +    del old_flags
    +
    +################################################################################
    +# Define basic utilities
    +################################################################################
    +
    +
    +def typename(o):
    +    if isinstance(o, torch.Tensor):
    +        return o.type()
    +
    +    module = ''
    +    class_name = ''
    +    if hasattr(o, '__module__') and o.__module__ != 'builtins' \
    +            and o.__module__ != '__builtin__' and o.__module__ is not None:
    +        module = o.__module__ + '.'
    +
    +    if hasattr(o, '__qualname__'):
    +        class_name = o.__qualname__
    +    elif hasattr(o, '__name__'):
    +        class_name = o.__name__
    +    else:
    +        class_name = o.__class__.__name__
    +
    +    return module + class_name
    +
    +
    +
    [docs]def is_tensor(obj): + r"""Returns True if `obj` is a PyTorch tensor. + + Args: + obj (Object): Object to test + """ + return isinstance(obj, torch.Tensor)
    + + +
    [docs]def is_storage(obj): + r"""Returns True if `obj` is a PyTorch storage object. + + Args: + obj (Object): Object to test + """ + return type(obj) in _storage_classes
    + + +
    [docs]def set_default_tensor_type(t): + r"""Sets the default ``torch.Tensor`` type to floating point tensor type + :attr:`t`. This type will also be used as default floating point type for + type inference in :func:`torch.tensor`. + + The default floating point tensor type is initially ``torch.FloatTensor``. + + Args: + t (type or string): the floating point tensor type or its name + + Example:: + + >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_tensor_type(torch.DoubleTensor) + >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor + torch.float64 + + """ + if isinstance(t, _string_classes): + t = _import_dotted_name(t) + _C._set_default_tensor_type(t)
    + + +
    [docs]def set_default_dtype(d): + r"""Sets the default floating point dtype to :attr:`d`. This type will be + used as default floating point type for type inference in + :func:`torch.tensor`. + + The default floating point dtype is initially ``torch.float32``. + + Args: + d (:class:`torch.dtype`): the floating point dtype to make the default + + Example:: + + >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_dtype(torch.float64) + >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor + torch.float64 + + """ + _C._set_default_dtype(d)
    + +# If you edit these imports, please update torch/__init__.py.in as well +from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed +from .serialization import save, load +from ._tensor_str import set_printoptions + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + +from .tensor import Tensor +from .storage import _StorageBase + + +class DoubleStorage(_C.DoubleStorageBase, _StorageBase): + pass + + +
    [docs]class FloatStorage(_C.FloatStorageBase, _StorageBase): + pass
    + + +class HalfStorage(_C.HalfStorageBase, _StorageBase): + pass + + +class LongStorage(_C.LongStorageBase, _StorageBase): + pass + + +class IntStorage(_C.IntStorageBase, _StorageBase): + pass + + +class ShortStorage(_C.ShortStorageBase, _StorageBase): + pass + + +class CharStorage(_C.CharStorageBase, _StorageBase): + pass + + +class ByteStorage(_C.ByteStorageBase, _StorageBase): + pass + + +class BoolStorage(_C.BoolStorageBase, _StorageBase): + pass + + +class BFloat16Storage(_C.BFloat16StorageBase, _StorageBase): + pass + + +class QUInt8Storage(_C.QUInt8StorageBase, _StorageBase): + pass + +class QInt8Storage(_C.QInt8StorageBase, _StorageBase): + pass + +class QInt32Storage(_C.QInt32StorageBase, _StorageBase): + pass + + +_storage_classes = { + DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage, + CharStorage, ByteStorage, HalfStorage, BoolStorage, QUInt8Storage, QInt8Storage, + QInt32Storage, BFloat16Storage +} + +# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings() +_tensor_classes = set() + + +################################################################################ +# Initialize extension +################################################################################ + +def manager_path(): + if platform.system() == 'Windows': + return b"" + path = get_file_path('torch', 'bin', 'torch_shm_manager') + prepare_multiprocessing_environment(get_file_path('torch')) + if not os.path.exists(path): + raise RuntimeError("Unable to find torch_shm_manager at " + path) + return path.encode('utf-8') + + +# Shared memory manager needs to know the exact location of manager executable +_C._initExtension(manager_path()) +del manager_path + +for name in dir(_C._VariableFunctions): + if name.startswith('__'): + continue + globals()[name] = getattr(_C._VariableFunctions, name) + +################################################################################ +# Import interface functions defined in Python +################################################################################ + +# needs to be after the above ATen bindings so we can overwrite from Python side +from .functional import * + + +################################################################################ +# Remove unnecessary members +################################################################################ + +del DoubleStorageBase +del FloatStorageBase +del LongStorageBase +del IntStorageBase +del ShortStorageBase +del CharStorageBase +del ByteStorageBase +del BoolStorageBase +del QUInt8StorageBase +del BFloat16StorageBase + +################################################################################ +# Import most common subpackages +################################################################################ + +import torch.cuda +import torch.autograd +from torch.autograd import no_grad, enable_grad, set_grad_enabled # noqa: F401 +import torch.nn +import torch.nn._intrinsic +import torch.nn.quantized +import torch.optim +import torch.multiprocessing +import torch.sparse +import torch.utils.backcompat +import torch.onnx +import torch.jit +import torch.hub +import torch.random +import torch.distributions +import torch.testing +import torch.backends.cuda +import torch.backends.mkl +import torch.backends.openmp +import torch.utils.data +import torch.__config__ +import torch.__future__ + +_C._init_names(list(torch._storage_classes)) + +# attach docstrings to torch and tensor functions +from . import _torch_docs, _tensor_docs, _storage_docs +del _torch_docs, _tensor_docs, _storage_docs + + +
    [docs]def compiled_with_cxx11_abi(): + r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1""" + return _C._GLIBCXX_USE_CXX11_ABI
    + + +# Import the ops "namespace" +from torch._ops import ops # noqa: F401 + +# Import the quasi random sampler +import torch.quasirandom +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/__config__.html b/docs/stable/_modules/torch/__config__.html new file mode 100644 index 000000000000..97d75211ffdd --- /dev/null +++ b/docs/stable/_modules/torch/__config__.html @@ -0,0 +1,531 @@ + + + + + + + + + + + + torch.__config__ — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.__config__

    +import torch
    +
    +
    +
    [docs]def show(): + """ + Return a human-readable string with descriptions of the + configuration of PyTorch. + """ + return torch._C._show_config()
    + +# TODO: In principle, we could provide more structured version/config +# information here. We're not for now; considering doing so if someone +# asks for it. + +
    [docs]def parallel_info(): + r"""Returns detailed string with parallelization settings""" + return torch._C._parallel_info()
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/_tensor_str.html b/docs/stable/_modules/torch/_tensor_str.html new file mode 100644 index 000000000000..e40b3ac72e40 --- /dev/null +++ b/docs/stable/_modules/torch/_tensor_str.html @@ -0,0 +1,827 @@ + + + + + + + + + + + + torch._tensor_str — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch._tensor_str

    +import math
    +import torch
    +from torch._six import inf
    +
    +
    +class __PrinterOptions(object):
    +    precision = 4
    +    threshold = 1000
    +    edgeitems = 3
    +    linewidth = 80
    +    sci_mode = None
    +
    +
    +PRINT_OPTS = __PrinterOptions()
    +
    +
    +# We could use **kwargs, but this will give better docs
    +
    [docs]def set_printoptions( + precision=None, + threshold=None, + edgeitems=None, + linewidth=None, + profile=None, + sci_mode=None +): + r"""Set options for printing. Items shamelessly taken from NumPy + + Args: + precision: Number of digits of precision for floating point output + (default = 4). + threshold: Total number of array elements which trigger summarization + rather than full `repr` (default = 1000). + edgeitems: Number of array items in summary at beginning and end of + each dimension (default = 3). + linewidth: The number of characters per line for the purpose of + inserting line breaks (default = 80). Thresholded matrices will + ignore this parameter. + profile: Sane defaults for pretty printing. Can override with any of + the above options. (any one of `default`, `short`, `full`) + sci_mode: Enable (True) or disable (False) scientific notation. If + None (default) is specified, the value is defined by `_Formatter` + """ + if profile is not None: + if profile == "default": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + elif profile == "short": + PRINT_OPTS.precision = 2 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 2 + PRINT_OPTS.linewidth = 80 + elif profile == "full": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = inf + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + + if precision is not None: + PRINT_OPTS.precision = precision + if threshold is not None: + PRINT_OPTS.threshold = threshold + if edgeitems is not None: + PRINT_OPTS.edgeitems = edgeitems + if linewidth is not None: + PRINT_OPTS.linewidth = linewidth + PRINT_OPTS.sci_mode = sci_mode
    + + +class _Formatter(object): + def __init__(self, tensor): + self.floating_dtype = tensor.dtype.is_floating_point + self.int_mode = True + self.sci_mode = False + self.max_width = 1 + + with torch.no_grad(): + tensor_view = tensor.reshape(-1) + + if not self.floating_dtype: + for value in tensor_view: + value_str = '{}'.format(value) + self.max_width = max(self.max_width, len(value_str)) + + else: + nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)) + + if nonzero_finite_vals.numel() == 0: + # no valid number, do nothing + return + + # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU. + nonzero_finite_abs = nonzero_finite_vals.abs().double() + nonzero_finite_min = nonzero_finite_abs.min().double() + nonzero_finite_max = nonzero_finite_abs.max().double() + + for value in nonzero_finite_vals: + if value != torch.ceil(value): + self.int_mode = False + break + + if self.int_mode: + # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites + # to indicate that the tensor is of floating type. add 1 to the len to account for this. + if nonzero_finite_max / nonzero_finite_min > 1000. or nonzero_finite_max > 1.e8: + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = ('{{:.{}e}}').format(PRINT_OPTS.precision).format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = ('{:.0f}').format(value) + self.max_width = max(self.max_width, len(value_str) + 1) + else: + # Check if scientific representation should be used. + if nonzero_finite_max / nonzero_finite_min > 1000.\ + or nonzero_finite_max > 1.e8\ + or nonzero_finite_min < 1.e-4: + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = ('{{:.{}e}}').format(PRINT_OPTS.precision).format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = ('{{:.{}f}}').format(PRINT_OPTS.precision).format(value) + self.max_width = max(self.max_width, len(value_str)) + + if PRINT_OPTS.sci_mode is not None: + self.sci_mode = PRINT_OPTS.sci_mode + + def width(self): + return self.max_width + + def format(self, value): + if self.floating_dtype: + if self.sci_mode: + ret = ('{{:{}.{}e}}').format(self.max_width, PRINT_OPTS.precision).format(value) + elif self.int_mode: + ret = '{:.0f}'.format(value) + if not (math.isinf(value) or math.isnan(value)): + ret += '.' + else: + ret = ('{{:.{}f}}').format(PRINT_OPTS.precision).format(value) + else: + ret = '{}'.format(value) + return (self.max_width - len(ret)) * ' ' + ret + + +def _scalar_str(self, formatter): + return formatter.format(self.item()) + + +def _vector_str(self, indent, formatter, summarize): + # length includes spaces and comma between elements + element_length = formatter.width() + 2 + elements_per_line = max(1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))) + char_per_line = element_length * elements_per_line + + if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + data = ([formatter.format(val) for val in self[:PRINT_OPTS.edgeitems].tolist()] + + [' ...'] + + [formatter.format(val) for val in self[-PRINT_OPTS.edgeitems:].tolist()]) + else: + data = [formatter.format(val) for val in self.tolist()] + + data_lines = [data[i:i + elements_per_line] for i in range(0, len(data), elements_per_line)] + lines = [', '.join(line) for line in data_lines] + return '[' + (',' + '\n' + ' ' * (indent + 1)).join(lines) + ']' + + +def _tensor_str_with_formatter(self, indent, formatter, summarize): + dim = self.dim() + + if dim == 0: + return _scalar_str(self, formatter) + if dim == 1: + return _vector_str(self, indent, formatter, summarize) + + if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + slices = ([_tensor_str_with_formatter(self[i], indent + 1, formatter, summarize) + for i in range(0, PRINT_OPTS.edgeitems)] + + ['...'] + + [_tensor_str_with_formatter(self[i], indent + 1, formatter, summarize) + for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]) + else: + slices = [_tensor_str_with_formatter(self[i], indent + 1, formatter, summarize) + for i in range(0, self.size(0))] + + tensor_str = (',' + '\n' * (dim - 1) + ' ' * (indent + 1)).join(slices) + return '[' + tensor_str + ']' + + +def _tensor_str(self, indent): + if self.numel() == 0: + return '[]' + + summarize = self.numel() > PRINT_OPTS.threshold + if self.dtype is torch.float16 or self.dtype is torch.bfloat16: + self = self.float() + formatter = _Formatter(get_summarized_data(self) if summarize else self) + return _tensor_str_with_formatter(self, indent, formatter, summarize) + + +def _add_suffixes(tensor_str, suffixes, indent, force_newline): + tensor_strs = [tensor_str] + last_line_len = len(tensor_str) - tensor_str.rfind('\n') + 1 + for suffix in suffixes: + suffix_len = len(suffix) + if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth: + tensor_strs.append(',\n' + ' ' * indent + suffix) + last_line_len = indent + suffix_len + force_newline = False + else: + tensor_strs.append(', ' + suffix) + last_line_len += suffix_len + 2 + tensor_strs.append(')') + return ''.join(tensor_strs) + + +def get_summarized_data(self): + dim = self.dim() + if dim == 0: + return self + if dim == 1: + if self.size(0) > 2 * PRINT_OPTS.edgeitems: + return torch.cat((self[:PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems:])) + else: + return self + if self.size(0) > 2 * PRINT_OPTS.edgeitems: + start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)] + end = ([self[i] + for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]) + return torch.stack([get_summarized_data(x) for x in (start + end)]) + else: + return torch.stack([get_summarized_data(x) for x in self]) + + +def _str(self): + prefix = 'tensor(' + indent = len(prefix) + suffixes = [] + + # Note [Print tensor device]: + # A general logic here is we only print device when it doesn't match + # the device specified in default tensor type. + # Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus + # torch._C._get_default_device() only returns either cpu or cuda. + # In other cases, we don't have a way to set them as default yet, + # and we should always print out device for them. + if self.device.type != torch._C._get_default_device()\ + or (self.device.type == 'cuda' and torch.cuda.current_device() != self.device.index): + suffixes.append('device=\'' + str(self.device) + '\'') + + has_default_dtype = self.dtype in (torch.get_default_dtype(), torch.int64, torch.bool) + if self.is_sparse: + suffixes.append('size=' + str(tuple(self.shape))) + suffixes.append('nnz=' + str(self._nnz())) + if not has_default_dtype: + suffixes.append('dtype=' + str(self.dtype)) + indices_prefix = 'indices=tensor(' + indices = self._indices().detach() + indices_str = _tensor_str(indices, indent + len(indices_prefix)) + if indices.numel() == 0: + indices_str += ', size=' + str(tuple(indices.shape)) + values_prefix = 'values=tensor(' + values = self._values().detach() + values_str = _tensor_str(values, indent + len(values_prefix)) + if values.numel() == 0: + values_str += ', size=' + str(tuple(values.shape)) + tensor_str = indices_prefix + indices_str + '),\n' + ' ' * indent + values_prefix + values_str + ')' + elif self.is_quantized: + suffixes.append('size=' + str(tuple(self.shape))) + if not has_default_dtype: + suffixes.append('dtype=' + str(self.dtype)) + # TODO: change to a call to self.q_scheme() when we add q_scheme method + # and uncomment this + # suffixes.append('quantization_scheme=' + 'per_tensor_affine') + suffixes.append('scale=' + str(self.q_scale())) + suffixes.append('zero_point=' + str(self.q_zero_point())) + tensor_str = _tensor_str(self.dequantize(), indent) + else: + if self.numel() == 0 and not self.is_sparse: + # Explicitly print the shape if it is not (0,), to match NumPy behavior + if self.dim() != 1: + suffixes.append('size=' + str(tuple(self.shape))) + + # In an empty tensor, there are no elements to infer if the dtype + # should be int64, so it must be shown explicitly. + if self.dtype != torch.get_default_dtype(): + suffixes.append('dtype=' + str(self.dtype)) + tensor_str = '[]' + else: + if not has_default_dtype: + suffixes.append('dtype=' + str(self.dtype)) + + if self.layout != torch.strided: + tensor_str = _tensor_str(self.to_dense(), indent) + else: + tensor_str = _tensor_str(self, indent) + + if self.layout != torch.strided: + suffixes.append('layout=' + str(self.layout)) + + if self.grad_fn is not None: + name = type(self.grad_fn).__name__ + if name == 'CppFunction': + name = self.grad_fn.name().rsplit('::', 1)[-1] + suffixes.append('grad_fn=<{}>'.format(name)) + elif self.requires_grad: + suffixes.append('requires_grad=True') + + return _add_suffixes(prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/_utils.html b/docs/stable/_modules/torch/_utils.html new file mode 100644 index 000000000000..509074f5bc03 --- /dev/null +++ b/docs/stable/_modules/torch/_utils.html @@ -0,0 +1,883 @@ + + + + + + + + + + + + torch._utils — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch._utils

    +import torch
    +import warnings
    +from collections import defaultdict
    +import sys
    +import traceback
    +
    +
    +def _type(self, dtype=None, non_blocking=False, **kwargs):
    +    """Returns the type if `dtype` is not provided, else casts this object to
    +    the specified type.
    +
    +    If this is already of the correct type, no copy is performed and the
    +    original object is returned.
    +
    +    Args:
    +        dtype (type or string): The desired type
    +        non_blocking (bool): If ``True``, and the source is in pinned memory
    +            and destination is on the GPU or vice versa, the copy is performed
    +            asynchronously with respect to the host. Otherwise, the argument
    +            has no effect.
    +        **kwargs: For compatibility, may contain the key ``async`` in place of
    +            the ``non_blocking`` argument. The ``async`` arg is deprecated.
    +    """
    +    non_blocking = _get_async_or_non_blocking('type', non_blocking, kwargs)
    +    if dtype is None:
    +        return self.__module__ + '.' + self.__class__.__name__
    +
    +    if isinstance(dtype, str):
    +        dtype = _import_dotted_name(dtype)
    +    if dtype == type(self):
    +        return self
    +    if self.is_sparse:
    +        if not dtype.is_sparse:
    +            raise RuntimeError("Cannot cast sparse tensor to dense tensor")
    +        new_module_name = dtype.__module__.replace('.sparse', '')
    +        new_values_type_name = new_module_name + '.' + dtype.__name__
    +        new_values = torch._values(self).type(new_values_type_name, non_blocking)
    +        new_indices_type_name = new_module_name + '.LongTensor'
    +        new_indices = torch._indices(self).type(new_indices_type_name, non_blocking)
    +        return dtype(new_indices, new_values, self.size())
    +    if dtype.is_sparse:
    +        raise RuntimeError("Cannot cast dense tensor to sparse tensor")
    +    return dtype(self.size()).copy_(self, non_blocking)
    +
    +
    +def _cuda(self, device=None, non_blocking=False, **kwargs):
    +    """Returns a copy of this object in CUDA memory.
    +
    +    If this object is already in CUDA memory and on the correct device, then
    +    no copy is performed and the original object is returned.
    +
    +    Args:
    +        device (int): The destination GPU id. Defaults to the current device.
    +        non_blocking (bool): If ``True`` and the source is in pinned memory,
    +            the copy will be asynchronous with respect to the host. Otherwise,
    +            the argument has no effect.
    +        **kwargs: For compatibility, may contain the key ``async`` in place of
    +            the ``non_blocking`` argument.
    +    """
    +    non_blocking = _get_async_or_non_blocking('cuda', non_blocking, kwargs)
    +    if self.is_cuda:
    +        if device is None:
    +            device = torch.cuda.current_device()
    +        if self.get_device() == device:
    +            return self
    +    else:
    +        if device is None:
    +            device = -1
    +    with torch.cuda.device(device):
    +        if self.is_sparse:
    +            new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
    +            indices = torch._indices(self).cuda(device, non_blocking)
    +            values = torch._values(self).cuda(device, non_blocking)
    +            return new_type(indices, values, self.size())
    +        else:
    +            new_type = getattr(torch.cuda, self.__class__.__name__)
    +            return new_type(self.size()).copy_(self, non_blocking)
    +
    +
    +def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
    +    if not kwargs:
    +        return non_blocking
    +    if len(kwargs) != 1 or 'async' not in kwargs:
    +        message = "{}() got an unexpected keyword argument '{}'"
    +        argument = list(kwargs.keys()).pop()
    +        raise TypeError(message.format(function_name, argument))
    +    warnings.warn("'async' is deprecated; use 'non_blocking'")
    +    return kwargs['async']
    +
    +
    +# Note [Don't serialize hooks]
    +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    +# Since time immemorial, we have serialized the backward hooks associated with
    +# variables.  This kind of half-worked--Python can pickle global functions
    +# (but not closures!)--but there were problems.
    +#
    +#   - It's fragile.  If you serialize a backward hook into a saved
    +#     model, and then you rename the function associated with the hook,
    +#     now your saved model is broken and you can't load it anymore.
    +#
    +#   - It's not actually used.  The standard recommendation is to
    +#     serialize the *state_dict* of a model, not the model itself
    +#     (since this is more stable to code changes affecting the model
    +#     serialization), and the state dict saves "data" only, thus
    +#     stripping the the backward hooks.  In some cases, hooks are
    +#     essential to the well-functioning of a model (e.g., DDP),
    +#     but DDP already manages readding the hooks!
    +#
    +#   - We didn't serialize them in many cases.  Prior to #10220, we
    +#     were dropping backward hooks in ForkingPickler.  We "fixed" this
    +#     to be convenient with other serialization sites, but lack of
    +#     serializing backward hooks wasn't actually the root cause of
    +#     the bug.
    +#
    +# With these cases in mind, we have decided that a better strategy
    +# is to just NOT serialize hooks at all.
    +#
    +# Since this is a BC-breaking change, we should warn when we previously
    +# serialized a hook, but no longer do so. This will be done by adding a special
    +# sentinel property to hooks will be used to suppress this warning. If a hook
    +# has the property _torch_serialize_ignore, we will not emit a warning if we
    +# attempt to serialize a Tensor with this hook attached to it.
    +#
    +# By the way, when _backward_hooks is skipped, we must give an EMPTY
    +# OrderedDict(), if you pass a None you'll run afoul #12219.
    +
    +
    +def _rebuild_tensor(storage, storage_offset, size, stride):
    +    # first construct a tensor with the correct dtype/device
    +    t = torch.tensor([], dtype=storage.dtype, device=storage.device)
    +    return t.set_(storage, storage_offset, size, stride)
    +
    +
    +def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
    +    tensor = _rebuild_tensor(storage, storage_offset, size, stride)
    +    tensor.requires_grad = requires_grad
    +    # NB: This line exists only for backwards compatibility; the
    +    # general expectation is that backward_hooks is an empty
    +    # OrderedDict.  See Note [Don't serialize hooks]
    +    tensor._backward_hooks = backward_hooks
    +    return tensor
    +
    +def _rebuild_qtensor(storage, storage_offset, size, stride, scale, zero_point, requires_grad, backward_hooks):
    +    tensor = torch._empty_affine_quantized(size, scale=scale, zero_point=zero_point, dtype=storage.dtype)
    +    tensor.set_(storage, storage_offset, size, stride)
    +    tensor.requires_grad = requires_grad
    +    # NB: This line exists only for backwards compatibility; the
    +    # general expectation is that backward_hooks is an empty
    +    # OrderedDict.  See Note [Don't serialize hooks]
    +    tensor._backward_hooks = backward_hooks
    +    return tensor
    +
    +def _rebuild_parameter(data, requires_grad, backward_hooks):
    +    param = torch.nn.Parameter(data, requires_grad)
    +    # NB: This line exists only for backwards compatibility; the
    +    # general expectation is that backward_hooks is an empty
    +    # OrderedDict.  See Note [Don't serialize hooks]
    +    param._backward_hooks = backward_hooks
    +
    +    return param
    +
    +
    +def _import_dotted_name(name):
    +    components = name.split('.')
    +    obj = __import__(components[0])
    +    for component in components[1:]:
    +        obj = getattr(obj, component)
    +    return obj
    +
    +
    +# Taken from python 3.5 docs
    +def _accumulate(iterable, fn=lambda x, y: x + y):
    +    'Return running totals'
    +    # _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
    +    # _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
    +    it = iter(iterable)
    +    try:
    +        total = next(it)
    +    except StopIteration:
    +        return
    +    yield total
    +    for element in it:
    +        total = fn(total, element)
    +        yield total
    +
    +
    +def _flatten_dense_tensors(tensors):
    +    """Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
    +    same dense type.
    +
    +    Since inputs are dense, the resulting tensor will be a concatenated 1D
    +    buffer. Element-wise operation on this buffer will be equivalent to
    +    operating individually.
    +
    +    Arguments:
    +        tensors (Iterable[Tensor]): dense tensors to flatten.
    +
    +    Returns:
    +        A contiguous 1D buffer containing input tensors.
    +    """
    +    if len(tensors) == 1:
    +        return tensors[0].contiguous().view(-1)
    +    flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
    +    return flat
    +
    +
    +def _flatten_sparse_tensors(tensors):
    +    """Flatten sparse tensors into two contiguous 1D buffers, one of indices and
    +    one of values. Assume tensors are of same sparse type.
    +
    +    Arguments:
    +        tensors (Iterable[Tensor]): sparse tensors to flatten.
    +
    +    Returns:
    +        A tuple of two contiguous 1D buffers, one containing input tensors'
    +        indices and the other containing the values.
    +    """
    +    flat_indices = _flatten_dense_tensors([torch._indices(t) for t in tensors])
    +    flat_values = _flatten_dense_tensors([torch._values(t) for t in tensors])
    +    return flat_indices, flat_values
    +
    +
    +def _unflatten_dense_tensors(flat, tensors):
    +    """View a flat buffer using the sizes of tensors. Assume that tensors are of
    +    same dense type, and that flat is given by _flatten_dense_tensors.
    +
    +    Arguments:
    +        flat (Tensor): flattened dense tensors to unflatten.
    +        tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
    +          unflatten flat.
    +
    +    Returns:
    +        Unflattened dense tensors with sizes same as tensors and values from
    +        flat.
    +    """
    +    outputs = []
    +    offset = 0
    +    for tensor in tensors:
    +        numel = tensor.numel()
    +        outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
    +        offset += numel
    +    return tuple(outputs)
    +
    +
    +def _unflatten_sparse_tensors(flat, tensors):
    +    """View flat buffer (containing indices and values) using the sizes of
    +    tensors. Assume that tensors are of same sparse type, and that flat is given
    +    by _flatten_sparse_tensors.
    +
    +    Arguments:
    +        flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
    +          tensors to unflatten.
    +        tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
    +          unflatten flat.
    +
    +    Returns:
    +        Unflattened sparse tensors with sizes same as tensors and values from
    +        flat.
    +    """
    +    flat_indices, flat_values = flat
    +    indices = _unflatten_dense_tensors(flat_indices, [torch._indices(t) for t in tensors])
    +    values = _unflatten_dense_tensors(flat_values, [torch._values(t) for t in tensors])
    +    outputs = []
    +    for t, i, v in zip(tensors, indices, values):
    +        outputs.append(t.new(i, v, t.size()))
    +    return tuple(outputs)
    +
    +
    +def _reorder_tensors_as(tensors, ordered_tensors):
    +    """Assume that tensors are of same order as ordered_tensors within their
    +    types, e.g., from _take_tensors. Reorder them to be of same order as
    +    ordered_tensors.
    +
    +    Arguments:
    +        tensors (Iterable[Tensor]): tensors to be reordered. They should be of
    +          the same order as ordered_tensors within their own types.
    +        ordered_tensors (Iterable[Tensor]): tensors whose order will be the
    +          reference.
    +
    +    Returns:
    +        Ordered tuple of tensors with contents from tensors and order of
    +        ordered_tensors.
    +    """
    +    type_dict = defaultdict(list)
    +    for tensor in tensors:
    +        type_dict[tensor.type()].append(tensor)
    +    type_dict = {t: iter(coll) for t, coll in type_dict.items()}
    +    return tuple(next(type_dict[tensor.type()]) for tensor in ordered_tensors)
    +
    +
    +def _take_tensors(tensors, size_limit):
    +    """Group tensors into chunks. This generator yields a chunk at each time,
    +    each containing tensors of same type up to certain byte limit in total size.
    +
    +    Args:
    +        tensors (Sequence): A sequence of tensors to be separated into chunks.
    +        size_limit (int): The limit of each chunk in bytes.
    +
    +    Yields:
    +        Blocks of tensors of same type and within size_limit. The yielded
    +        tensors are only ordered as the original sequence within its types.
    +    """
    +    buf_dict = defaultdict(lambda: [[], 0])
    +    for tensor in tensors:
    +        t = tensor.type()
    +        if tensor.is_sparse:
    +            indices = torch._indices(tensor)
    +            values = torch._values(tensor)
    +            size = indices.numel() * indices.element_size() + values.numel() * values.element_size()
    +        else:
    +            size = tensor.numel() * tensor.element_size()
    +        buf_and_size = buf_dict[t]
    +        if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
    +            yield buf_and_size[0]
    +            buf_and_size = buf_dict[t] = [[], 0]
    +        buf_and_size[0].append(tensor)
    +        buf_and_size[1] += size
    +    for buf, _ in buf_dict.values():
    +        if len(buf) > 0:
    +            yield buf
    +
    +
    +# annotation decorator to get annotations in a way that is compatible
    +# with both Python 2 and 3
    +def annotate(ret, **kwargs):
    +    def dec(fun):
    +        fun.__annotations__ = dict(kwargs)
    +        fun.__annotations__['return'] = ret
    +        return fun
    +    return dec
    +
    +
    +# NOTE [ Python Traceback Reference Cycle Problem ]
    +#
    +# When using sys.exc_info(), it is important to **not** store the exc_info[2],
    +# which is the traceback, because otherwise you will run into the traceback
    +# reference cycle problem, i.e., the traceback holding reference to the frame,
    +# and the frame (which holds reference to all the object in its temporary scope)
    +# holding reference the traceback.
    +
    +class KeyErrorMessage(str):
    +    r"""str subclass that returns itself in repr"""
    +    def __repr__(self):
    +        return self
    +
    +
    +class ExceptionWrapper(object):
    +    r"""Wraps an exception plus traceback to communicate across threads"""
    +    def __init__(self, exc_info=None, where="in background"):
    +        # It is important that we don't store exc_info, see
    +        # NOTE [ Python Traceback Reference Cycle Problem ]
    +        if exc_info is None:
    +            exc_info = sys.exc_info()
    +        self.exc_type = exc_info[0]
    +        self.exc_msg = "".join(traceback.format_exception(*exc_info))
    +        self.where = where
    +
    +    def reraise(self):
    +        r"""Reraises the wrapped exception in the current thread"""
    +        # Format a message such as: "Caught ValueError in DataLoader worker
    +        # process 2. Original Traceback:", followed by the traceback.
    +        msg = "Caught {} {}.\nOriginal {}".format(
    +            self.exc_type.__name__, self.where, self.exc_msg)
    +        if self.exc_type == KeyError:
    +            # KeyError calls repr() on its argument (usually a dict key). This
    +            # makes stack traces unreadable. It will not be changed in Python
    +            # (https://bugs.python.org/issue2651), so we work around it.
    +            msg = KeyErrorMessage(msg)
    +        raise self.exc_type(msg)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd.html b/docs/stable/_modules/torch/autograd.html new file mode 100644 index 000000000000..01d4cf76ee74 --- /dev/null +++ b/docs/stable/_modules/torch/autograd.html @@ -0,0 +1,690 @@ + + + + + + + + + + + + torch.autograd — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd

    +"""
    +``torch.autograd`` provides classes and functions implementing automatic
    +differentiation of arbitrary scalar valued functions. It requires minimal
    +changes to the existing code - you only need to declare :class:`Tensor` s
    +for which gradients should be computed with the ``requires_grad=True`` keyword.
    +"""
    +import torch
    +import warnings
    +
    +from .variable import Variable
    +from .function import Function, NestedIOFunction  # noqa: F401
    +from .gradcheck import gradcheck, gradgradcheck  # noqa: F401
    +from .grad_mode import no_grad, enable_grad, set_grad_enabled  # noqa: F401
    +from .anomaly_mode import detect_anomaly, set_detect_anomaly  # noqa: F401
    +from . import profiler  # noqa: F401
    +
    +__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
    +
    +
    +def _make_grads(outputs, grads):
    +    new_grads = []
    +    for out, grad in zip(outputs, grads):
    +        if isinstance(grad, torch.Tensor):
    +            new_grads.append(grad)
    +        elif grad is None:
    +            if out.requires_grad:
    +                if out.numel() != 1:
    +                    raise RuntimeError("grad can be implicitly created only for scalar outputs")
    +                new_grads.append(torch.ones_like(out))
    +            else:
    +                new_grads.append(None)
    +        else:
    +            raise TypeError("gradients can be either Tensors or None, but got " +
    +                            type(grad).__name__)
    +    return tuple(new_grads)
    +
    +
    +
    [docs]def backward(tensors, grad_tensors=None, retain_graph=None, create_graph=False, grad_variables=None): + r"""Computes the sum of gradients of given tensors w.r.t. graph leaves. + + The graph is differentiated using the chain rule. If any of ``tensors`` + are non-scalar (i.e. their data has more than one element) and require + gradient, then the Jacobian-vector product would be computed, in this + case the function additionally requires specifying ``grad_tensors``. + It should be a sequence of matching length, that contains the "vector" + in the Jacobian-vector product, usually the gradient of the differentiated + function w.r.t. corresponding tensors (``None`` is an acceptable value for + all tensors that don't need gradient tensors). + + This function accumulates gradients in the leaves - you might need to zero + them before calling it. + + Arguments: + tensors (sequence of Tensor): Tensors of which the derivative will be + computed. + grad_tensors (sequence of (Tensor or None)): The "vector" in the Jacobian-vector + product, usually gradients w.r.t. each element of corresponding tensors. + None values can be specified for scalar Tensors or ones that don't require + grad. If a None value would be acceptable for all grad_tensors, then this + argument is optional. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Defaults to ``False``. + """ + if grad_variables is not None: + warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.") + if grad_tensors is None: + grad_tensors = grad_variables + else: + raise RuntimeError("'grad_tensors' and 'grad_variables' (deprecated) " + "arguments both passed to backward(). Please only " + "use 'grad_tensors'.") + + tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors) + + if grad_tensors is None: + grad_tensors = [None] * len(tensors) + elif isinstance(grad_tensors, torch.Tensor): + grad_tensors = [grad_tensors] + else: + grad_tensors = list(grad_tensors) + + grad_tensors = _make_grads(tensors, grad_tensors) + if retain_graph is None: + retain_graph = create_graph + + Variable._execution_engine.run_backward( + tensors, grad_tensors, retain_graph, create_graph, + allow_unreachable=True) # allow_unreachable flag
    + + +
    [docs]def grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, + only_inputs=True, allow_unused=False): + r"""Computes and returns the sum of gradients of outputs w.r.t. the inputs. + + ``grad_outputs`` should be a sequence of length matching ``output`` + containing the "vector" in Jacobian-vector product, usually the pre-computed + gradients w.r.t. each of the outputs. If an output doesn't require_grad, + then the gradient can be ``None``). + + If ``only_inputs`` is ``True``, the function will only return a list of gradients + w.r.t the specified inputs. If it's ``False``, then gradient w.r.t. all remaining + leaves will still be computed, and will be accumulated into their ``.grad`` + attribute. + + Arguments: + outputs (sequence of Tensor): outputs of the differentiated function. + inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be + returned (and not accumulated into ``.grad``). + grad_outputs (sequence of Tensor): The "vector" in the Jacobian-vector product. + Usually gradients w.r.t. each output. None values can be specified for scalar + Tensors or ones that don't require grad. If a None value would be acceptable + for all grad_tensors, then this argument is optional. Default: None. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Default: ``False``. + allow_unused (bool, optional): If ``False``, specifying inputs that were not + used when computing outputs (and therefore their grad is always zero) + is an error. Defaults to ``False``. + """ + if not only_inputs: + warnings.warn("only_inputs argument is deprecated and is ignored now " + "(defaults to True). To accumulate gradient for other " + "parts of the graph, please use torch.autograd.backward.") + + outputs = (outputs,) if isinstance(outputs, torch.Tensor) else tuple(outputs) + inputs = (inputs,) if isinstance(inputs, torch.Tensor) else tuple(inputs) + if grad_outputs is None: + grad_outputs = [None] * len(outputs) + elif isinstance(grad_outputs, torch.Tensor): + grad_outputs = [grad_outputs] + else: + grad_outputs = list(grad_outputs) + + grad_outputs = _make_grads(outputs, grad_outputs) + if retain_graph is None: + retain_graph = create_graph + + return Variable._execution_engine.run_backward( + outputs, grad_outputs, retain_graph, create_graph, + inputs, allow_unused)
    + + +# This function applies in case of gradient checkpointing for memory +# optimization. Currently, for gradient checkpointing, we only support imperative +# backwards call i.e. torch.autograd.backward() and the torch.autograd.grad() won't +# work. The reason being that: torch.autograd.grad() only calculates the grads +# for the inputs that are passed by user but it doesn't calculate grad for +# anything else e.g. model parameters like weights, bias etc. However, for +# torch.autograd.backward(), we would actually compute the grad for the weights as well. +# +# This function returns whether the checkpointing is valid i.e. torch.autograd.backward +# or not i.e. torch.autograd.grad. The implementation works by maintaining a thread +# local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask +# in the stack and before a NodeTask is executed in evaluate_function, it +# checks for whether reentrant backwards is imperative or not. +# See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context +def _is_checkpoint_valid(): + return Variable._execution_engine.is_checkpoint_valid() + + +def variable(*args, **kwargs): + warnings.warn("torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead") + return torch.tensor(*args, **kwargs) + + +if not torch._C._autograd_init(): + raise RuntimeError("autograd initialization failed") +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd/anomaly_mode.html b/docs/stable/_modules/torch/autograd/anomaly_mode.html new file mode 100644 index 000000000000..ad7308b0e2b6 --- /dev/null +++ b/docs/stable/_modules/torch/autograd/anomaly_mode.html @@ -0,0 +1,615 @@ + + + + + + + + + + + + torch.autograd.anomaly_mode — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd.anomaly_mode

    +import torch
    +
    +
    +
    [docs]class detect_anomaly(object): + r"""Context-manager that enable anomaly detection for the autograd engine. + + This does two things: + - Running the forward pass with detection enabled will allow the backward + pass to print the traceback of the forward operation that created the failing + backward function. + - Any backward computation that generate "nan" value will raise an error. + + Example: + + >>> import torch + >>> from torch import autograd + >>> class MyFunc(autograd.Function): + ... @staticmethod + ... def forward(ctx, inp): + ... return inp.clone() + ... @staticmethod + ... def backward(ctx, gO): + ... # Error during the backward pass + ... raise RuntimeError("Some error in backward") + ... return gO.clone() + >>> def run_fn(a): + ... out = MyFunc.apply(a) + ... return out.sum() + >>> inp = torch.rand(10, 10, requires_grad=True) + >>> out = run_fn(inp) + >>> out.backward() + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + File "/your/pytorch/install/torch/tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "<stdin>", line 8, in backward + RuntimeError: Some error in backward + >>> with autograd.detect_anomaly(): + ... inp = torch.rand(10, 10, requires_grad=True) + ... out = run_fn(inp) + ... out.backward() + Traceback of forward call that caused the error: + File "tmp.py", line 53, in <module> + out = run_fn(inp) + File "tmp.py", line 44, in run_fn + out = MyFunc.apply(a) + Traceback (most recent call last): + File "<stdin>", line 4, in <module> + File "/your/pytorch/install/torch/tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "<stdin>", line 8, in backward + RuntimeError: Some error in backward + + """ + + def __init__(self): + self.prev = torch.is_anomaly_enabled() + + def __enter__(self): + torch.set_anomaly_enabled(True) + + def __exit__(self, *args): + torch.set_anomaly_enabled(self.prev) + return False
    + + +
    [docs]class set_detect_anomaly(object): + r"""Context-manager that sets the anomaly detection for the autograd engine on or off. + + ``set_detect_anomaly`` will enable or disable the autograd anomaly detection + based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + See ``detect_anomaly`` above for details of the anomaly detection behaviour. + + Arguments: + mode (bool): Flag whether to enable anomaly detection (``True``), + or disable (``False``). + + """ + + def __init__(self, mode): + self.prev = torch.is_anomaly_enabled() + torch.set_anomaly_enabled(mode) + + def __enter__(self): + pass + + def __exit__(self, *args): + torch.set_anomaly_enabled(self.prev) + return False
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd/function.html b/docs/stable/_modules/torch/autograd/function.html new file mode 100644 index 000000000000..3a85ef3d4bf7 --- /dev/null +++ b/docs/stable/_modules/torch/autograd/function.html @@ -0,0 +1,906 @@ + + + + + + + + + + + + torch.autograd.function — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd.function

    +import torch
    +import torch._C as _C
    +import torch.utils.hooks as hooks
    +from torch._six import with_metaclass
    +import functools
    +import warnings
    +from collections import OrderedDict
    +
    +
    +class _ContextMethodMixin(object):
    +
    +    def save_for_backward(self, *tensors):
    +        r"""Saves given tensors for a future call to :func:`~Function.backward`.
    +
    +        **This should be called at most once, and only from inside the**
    +        :func:`forward` **method.**
    +
    +        Later, saved tensors can be accessed through the :attr:`saved_tensors`
    +        attribute. Before returning them to the user, a check is made to ensure
    +        they weren't used in any in-place operation that modified their content.
    +
    +        Arguments can also be ``None``.
    +        """
    +        self.to_save = tensors
    +
    +    def mark_dirty(self, *args):
    +        r"""Marks given tensors as modified in an in-place operation.
    +
    +        **This should be called at most once, only from inside the**
    +        :func:`forward` **method, and all arguments should be inputs.**
    +
    +        Every tensor that's been modified in-place in a call to :func:`forward`
    +        should be given to this function, to ensure correctness of our checks.
    +        It doesn't matter whether the function is called before or after
    +        modification.
    +        """
    +        self.dirty_tensors = args
    +
    +    def mark_shared_storage(self, *pairs):
    +        warnings.warn(
    +            'mark_shared_storage is deprecated. '
    +            'Tensors with shared storages are automatically tracked. Note '
    +            'that calls to `set_()` are not tracked')
    +
    +    def mark_non_differentiable(self, *args):
    +        r"""Marks outputs as non-differentiable.
    +
    +        **This should be called at most once, only from inside the**
    +        :func:`forward` **method, and all arguments should be outputs.**
    +
    +        This will mark outputs as not requiring gradients, increasing the
    +        efficiency of backward computation. You still need to accept a gradient
    +        for each output in :meth:`~Function.backward`, but it's always going to
    +        be a zero tensor with the same shape as the shape of a corresponding
    +        output.
    +
    +        This is used e.g. for indices returned from a max :class:`Function`.
    +        """
    +        self.non_differentiable = args
    +
    +
    +class _HookMixin(object):
    +
    +    @staticmethod
    +    def _register_hook(backward_hooks, hook):
    +        if backward_hooks is None:
    +            backward_hooks = OrderedDict()
    +        handle = hooks.RemovableHandle(backward_hooks)
    +        backward_hooks[handle.id] = hook
    +        return backward_hooks, handle
    +
    +
    +class BackwardCFunction(_C._FunctionBase, _ContextMethodMixin, _HookMixin):
    +    _is_legacy = False
    +
    +    def apply(self, *args):
    +        return self._forward_cls.backward(self, *args)
    +
    +
    +class FunctionMeta(type):
    +    """Function metaclass.
    +
    +    This metaclass sets up the following properties:
    +        _is_legacy: True if forward is not defined as a static method.
    +        _backward_cls: The Function class corresponding to the differentiated
    +            version of this function (which is generated on the fly by this
    +            metaclass).
    +    """
    +
    +    def __init__(cls, name, bases, attrs):
    +        for super_cls in cls.mro():
    +            forward = super_cls.__dict__.get('forward')
    +            if forward is not None:
    +                has_static_forward = isinstance(forward, staticmethod) or isinstance(forward, classmethod)
    +                break
    +
    +        cls._is_legacy = not has_static_forward
    +
    +        # old-style functions
    +        if not has_static_forward:
    +            return super(FunctionMeta, cls).__init__(name, bases, attrs)
    +
    +        backward_fn = type(name + 'Backward', (BackwardCFunction,), {'_forward_cls': cls})
    +        cls._backward_cls = backward_fn
    +
    +        return super(FunctionMeta, cls).__init__(name, bases, attrs)
    +
    +
    +
    [docs]class Function(with_metaclass(FunctionMeta, _C._FunctionBase, _ContextMethodMixin, _HookMixin)): + r"""Records operation history and defines formulas for differentiating ops. + + Every operation performed on :class:`Tensor` s creates a new function + object, that performs the computation, and records that it happened. + The history is retained in the form of a DAG of functions, with edges + denoting data dependencies (``input <- output``). Then, when backward is + called, the graph is processed in the topological ordering, by calling + :func:`backward` methods of each :class:`Function` object, and passing + returned gradients on to next :class:`Function` s. + + Normally, the only way users interact with functions is by creating + subclasses and defining new operations. This is a recommended way of + extending torch.autograd. + + Each function object is meant to be used only once (in the forward pass). + + Examples:: + + >>> class Exp(Function): + >>> + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + """ + + # only for backward compatibility + __call__ = _C._FunctionBase._do_forward + + # for the tracer + is_traceable = False + +
    [docs] @staticmethod + def forward(ctx, *args, **kwargs): + r"""Performs the operation. + + This function is to be overridden by all subclasses. + + It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + + The context can be used to store tensors that can be then retrieved + during the backward pass. + """ + raise NotImplementedError
    + +
    [docs] @staticmethod + def backward(ctx, *grad_outputs): + r"""Defines a formula for differentiating the operation. + + This function is to be overridden by all subclasses. + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs did :func:`forward` return, and it should return as many + tensors, as there were inputs to :func:`forward`. Each argument is the + gradient w.r.t the given output, and each returned value should be the + gradient w.r.t. the corresponding input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computated w.r.t. the + output. + """ + raise NotImplementedError
    + + +def once_differentiable(fn): + + @functools.wraps(fn) + def wrapper(ctx, *args): + with torch.no_grad(): + outputs = fn(ctx, *args) + + if not torch.is_grad_enabled(): + return outputs + + # If any of the inputs have requires_grad=True, we force the outputs + # to have requires_grad=True but point to a grad_fn which throws an + # error message during (double) back-propagation. + # XXX: this is only an approximation of requires_grad - there's no way + # to figure out if fn didn't use ctx.saved_tensors and as a result + # some Tensors might require grad, even if no args do. + # Unfortunately, this leads to unexpected error messages ("no nodes + # require computing gradients"), but I don't have a better idea. + # These functions would raise an error in backward anyway. + requires_grad = any(isinstance(arg, torch.Tensor) and arg.requires_grad + for arg in args) + if not requires_grad: + return outputs + + if not isinstance(outputs, tuple): + outputs = (outputs,) + + err_fn = torch._C._functions.DelayedError( + b"trying to differentiate twice a function that was marked" + b"with @once_differentiable", len(outputs)) + + # Create aliases of each output that has requires_grad=True. We need + # at least one of the inputs to err_fn to require grad so that the + # output will have a grad_fn. + def fake_requires_grad(var): + if var is not None: + var = var.detach() + var.requires_grad = True + return var + + return err_fn(*[fake_requires_grad(v) for v in outputs]) + return wrapper + + +def traceable(fn_cls): + r"""Marks Function as traceable for the JIT. + + Traceable functions have additional restrictions - they can't pass any + data-dependent values to backward (e.g. Prod passes the output, which makes + it non-traceable), and their backward should be implemented entirely in terms + of operations on autograd Tensors in all cases. + + DON'T USE THIS DECORATOR. IT IS FOR INTERNAL USE ONLY AND SHOULD BE HANDLED WITH + CARE (or can give incorrect results otherwise). + """ + fn_cls.is_traceable = True + return fn_cls + + +class InplaceFunction(Function): + + def __init__(self, inplace=False): + super(InplaceFunction, self).__init__() + self.inplace = inplace + + +def _nested_map(condition, fn, condition_msg=None): + def _map(obj): + if condition(obj): + return fn(obj) + elif obj is None: + return None + elif isinstance(obj, (list, tuple)): + return type(obj)(_map(x) for x in obj) + elif isinstance(obj, dict): + return {x : _map(obj[x]) for x in obj} + else: + raise ValueError("Auto nesting doesn't know how to process " + "an input object of type " + torch.typename(obj) + + (". Accepted types: " + condition_msg + + ", or lists/tuples of them" + if condition_msg else "")) + + return _map + + +def _jit_unwrap_structured(obj): + if hasattr(obj, "_jit_unwrap"): + return obj._jit_unwrap() + return obj + + +def _iter_filter(condition, allow_unknown=False, condition_msg=None, + conversion=None): + def _iter(obj): + if conversion is not None: + obj = conversion(obj) + if condition(obj): + yield obj + elif obj is None: + return + elif isinstance(obj, (list, tuple)): + for o in obj: + for var in _iter(o): + yield var + elif isinstance(obj, dict): + # We only accept primitive key types, so we needn't inspect them + for o in obj.values(): + for var in _iter(o): + yield var + elif allow_unknown: + yield obj + else: + raise ValueError("Auto nesting doesn't know how to process " + "an input object of type " + torch.typename(obj) + + (". Accepted types: " + condition_msg + + ", or lists/tuples of them" + if condition_msg else "")) + + return _iter + + +def _unflatten(input, proto): + # unflatten a list or tuple input into a nested list/tuple structure + # specified by proto + def unflatten_helper(input, proto): + res = [] + if hasattr(proto, "_jit_wrap"): + return proto._jit_wrap(input) + if not isinstance(proto, (list, tuple)): + return input[0], input[1:] + for e in proto: + if e is None: + res.append(e) + else: + res_e, input = unflatten_helper(input, e) + res.append(res_e) + return type(proto)(res), input + + return unflatten_helper(input, proto)[0] + + +_iter_jit_values = _iter_filter(lambda o: o is None or isinstance(o, torch._C.Value), + condition_msg="jit's Values or None") +_iter_tensors = _iter_filter(lambda x: isinstance(x, torch.Tensor), condition_msg="Tensors", + conversion=_jit_unwrap_structured) +_iter_tensors_permissive = _iter_filter(lambda x: isinstance(x, torch.Tensor), + allow_unknown=True, + condition_msg="Tensors (permissive)") +_iter_None_tensors = _iter_filter(lambda o: o is None or isinstance(o, torch.Tensor), + condition_msg="Tensors or None") +_map_tensor_data = _nested_map(lambda x: isinstance(x, torch.Tensor), lambda o: o.data, + condition_msg="Tensors") + + +class NestedIOFunction(Function): + + def _do_forward(self, *input): + self._nested_input = input + flat_input = tuple(_iter_tensors(input)) + flat_output = super(NestedIOFunction, self)._do_forward(*flat_input) + nested_output = self._nested_output + nested_tensors = _unflatten(flat_output, self._nested_output) + return nested_tensors + + def _do_backward(self, gradients, retain_variables): + self.retain_variables = retain_variables + result = super(NestedIOFunction, self)._do_backward(gradients, retain_variables) + if not retain_variables: + del self._nested_output + del self._to_save_nested + return result + + def backward(self, *gradients): + nested_gradients = _unflatten(gradients, self._nested_output) + result = self.backward_extended(*nested_gradients) + return tuple(_iter_None_tensors(result)) + + __call__ = _do_forward + + def forward(self, *args): + nested_tensors = _map_tensor_data(self._nested_input) + result = self.forward_extended(*nested_tensors) + del self._nested_input + self._nested_output = result + return tuple(_iter_tensors(result)) + + def save_for_backward(self, *args): + self.to_save = tuple(_iter_tensors(args)) + self._to_save_nested = args + + @property + def saved_tensors(self): + flat_tensors = super(NestedIOFunction, self).saved_tensors + return _unflatten(flat_tensors, self._to_save_nested) + + def mark_dirty(self, *args, **kwargs): + self.dirty_tensors = tuple(_iter_tensors((args, kwargs))) + + def mark_non_differentiable(self, *args, **kwargs): + self.non_differentiable = tuple(_iter_tensors((args, kwargs))) + + def forward_extended(self, *input): + raise NotImplementedError + + def backward_extended(self, *grad_output): + raise NotImplementedError +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd/grad_mode.html b/docs/stable/_modules/torch/autograd/grad_mode.html new file mode 100644 index 000000000000..c294a20dc65f --- /dev/null +++ b/docs/stable/_modules/torch/autograd/grad_mode.html @@ -0,0 +1,662 @@ + + + + + + + + + + + + torch.autograd.grad_mode — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd.grad_mode

    +import torch
    +import functools
    +
    +
    +
    [docs]class no_grad(object): + r"""Context-manager that disabled gradient calculation. + + Disabling gradient calculation is useful for inference, when you are sure + that you will not call :meth:`Tensor.backward()`. It will reduce memory + consumption for computations that would otherwise have `requires_grad=True`. + + In this mode, the result of every computation will have + `requires_grad=False`, even when the inputs have `requires_grad=True`. + + This mode has no effect when using :class:`~enable_grad` context manager . + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + + Example:: + + >>> x = torch.tensor([1], requires_grad=True) + >>> with torch.no_grad(): + ... y = x * 2 + >>> y.requires_grad + False + >>> @torch.no_grad() + ... def doubler(x): + ... return x * 2 + >>> z = doubler(x) + >>> z.requires_grad + False + """ + def __enter__(self): + self.prev = torch.is_grad_enabled() + torch._C.set_grad_enabled(False) + + def __exit__(self, *args): + torch.set_grad_enabled(self.prev) + return False + + def __call__(self, func): + @functools.wraps(func) + def decorate_no_grad(*args, **kwargs): + with self: + return func(*args, **kwargs) + return decorate_no_grad
    + + +
    [docs]class enable_grad(object): + r"""Context-manager that enables gradient calculation. + + Enables gradient calculation, if it has been disabled via :class:`~no_grad` + or :class:`~set_grad_enabled`. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + + Example:: + + >>> x = torch.tensor([1], requires_grad=True) + >>> with torch.no_grad(): + ... with torch.enable_grad(): + ... y = x * 2 + >>> y.requires_grad + True + >>> y.backward() + >>> x.grad + >>> @torch.enable_grad() + ... def doubler(x): + ... return x * 2 + >>> with torch.no_grad(): + ... z = doubler(x) + >>> z.requires_grad + True + + """ + def __enter__(self): + self.prev = torch.is_grad_enabled() + torch._C.set_grad_enabled(True) + + def __exit__(self, *args): + torch.set_grad_enabled(self.prev) + return False + + def __call__(self, func): + @functools.wraps(func) + def decorate_enable_grad(*args, **kwargs): + with self: + return func(*args, **kwargs) + return decorate_enable_grad
    + + +
    [docs]class set_grad_enabled(object): + r"""Context-manager that sets gradient calculation to on or off. + + ``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + When using :class:`~enable_grad` context manager, :class:`~set_grad_enabled(False)` + has no effect. + + This context manager is thread local; it will not affect computation + in other threads. + + Arguments: + mode (bool): Flag whether to enable grad (``True``), or disable + (``False``). This can be used to conditionally enable + gradients. + + + Example:: + + >>> x = torch.tensor([1], requires_grad=True) + >>> is_train = False + >>> with torch.set_grad_enabled(is_train): + ... y = x * 2 + >>> y.requires_grad + False + >>> torch.set_grad_enabled(True) + >>> y = x * 2 + >>> y.requires_grad + True + >>> torch.set_grad_enabled(False) + >>> y = x * 2 + >>> y.requires_grad + False + + """ + + def __init__(self, mode): + self.prev = torch.is_grad_enabled() + torch._C.set_grad_enabled(mode) + + def __enter__(self): + pass + + def __exit__(self, *args): + torch.set_grad_enabled(self.prev) + return False
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd/gradcheck.html b/docs/stable/_modules/torch/autograd/gradcheck.html new file mode 100644 index 000000000000..7d1a9148306f --- /dev/null +++ b/docs/stable/_modules/torch/autograd/gradcheck.html @@ -0,0 +1,920 @@ + + + + + + + + + + + + torch.autograd.gradcheck — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd.gradcheck

    +import torch
    +from torch._six import container_abcs, istuple
    +import torch.testing
    +from itertools import product
    +import warnings
    +
    +
    +def zero_gradients(x):
    +    if isinstance(x, torch.Tensor):
    +        if x.grad is not None:
    +            x.grad.detach_()
    +            x.grad.data.zero_()
    +    elif isinstance(x, container_abcs.Iterable):
    +        for elem in x:
    +            zero_gradients(elem)
    +
    +
    +def make_jacobian(input, num_out):
    +    if isinstance(input, torch.Tensor):
    +        if not input.is_floating_point():
    +            return None
    +        if not input.requires_grad:
    +            return None
    +        return torch.zeros(input.nelement(), num_out, dtype=input.dtype)
    +    elif isinstance(input, container_abcs.Iterable) and not isinstance(input, str):
    +        jacobians = list(filter(
    +            lambda x: x is not None, (make_jacobian(elem, num_out) for elem in input)))
    +        if not jacobians:
    +            return None
    +        return type(input)(jacobians)
    +    else:
    +        return None
    +
    +
    +def iter_tensors(x, only_requiring_grad=False):
    +    if isinstance(x, torch.Tensor):
    +        if x.requires_grad or not only_requiring_grad:
    +            yield x
    +    elif isinstance(x, container_abcs.Iterable) and not isinstance(x, str):
    +        for elem in x:
    +            for result in iter_tensors(elem, only_requiring_grad):
    +                yield result
    +
    +
    +def get_numerical_jacobian(fn, input, target=None, eps=1e-3):
    +    """
    +    input: input to `fn`
    +    target: the Tensors wrt whom Jacobians are calculated (default=`input`)
    +
    +    Note that `target` may not even be part of `input` to `fn`, so please be
    +    **very careful** in this to not clone `target`.
    +    """
    +    if target is None:
    +        target = input
    +    output_size = fn(input).numel()
    +    jacobian = make_jacobian(target, output_size)
    +
    +    # It's much easier to iterate over flattened lists of tensors.
    +    # These are reference to the same objects in jacobian, so any changes
    +    # will be reflected in it as well.
    +    x_tensors = [t for t in iter_tensors(target, True)]
    +    j_tensors = [t for t in iter_tensors(jacobian)]
    +
    +    # TODO: compare structure
    +    for x_tensor, d_tensor in zip(x_tensors, j_tensors):
    +        # need data here to get around the version check because without .data,
    +        # the following code updates version but doesn't change content
    +        if x_tensor.is_sparse:
    +            def get_stride(size):
    +                dim = len(size)
    +                tmp = 1
    +                stride = [0] * dim
    +                for i in reversed(range(dim)):
    +                    stride[i] = tmp
    +                    tmp *= size[i]
    +                return stride
    +
    +            x_nnz = x_tensor._nnz()
    +            x_size = list(x_tensor.size())
    +            x_indices = x_tensor._indices().t()
    +            x_values = x_tensor._values().data
    +            x_stride = get_stride(x_size)
    +
    +            for i in range(x_nnz):
    +                x_value = x_values[i]
    +                for x_idx in product(*[range(m) for m in x_values.size()[1:]]):
    +                    indices = x_indices[i].tolist() + list(x_idx)
    +                    d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
    +                    orig = x_value[x_idx].item()
    +                    x_value[x_idx] = orig - eps
    +                    outa = fn(input).clone()
    +                    x_value[x_idx] = orig + eps
    +                    outb = fn(input).clone()
    +                    x_value[x_idx] = orig
    +                    r = (outb - outa) / (2 * eps)
    +                    d_tensor[d_idx] = r.detach().reshape(-1)
    +        elif x_tensor.layout == torch._mkldnn:
    +            if len(input) != 1:
    +                raise ValueError('gradcheck currently only supports functions with 1 input, but got: ',
    +                                 len(input))
    +            x_tensor = x_tensor.data
    +            for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
    +                # this is really inefficient, but without indexing implemented, there's
    +                # not really a better way than converting back and forth
    +                x_tensor_dense = x_tensor.to_dense()
    +                orig = x_tensor_dense[x_idx].item()
    +
    +                x_tensor_dense[x_idx] = orig - eps
    +                x_tensor_mkl = x_tensor_dense.to_mkldnn()
    +                outa = fn([x_tensor_mkl])
    +
    +                x_tensor_dense[x_idx] = orig + eps
    +                x_tensor_mkl = x_tensor_dense.to_mkldnn()
    +                outb = fn([x_tensor_mkl])
    +
    +                r = (outb - outa) / (2 * eps)
    +                d_tensor[d_idx] = r.detach().reshape(-1)
    +        else:
    +            x_tensor = x_tensor.data
    +            for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
    +                orig = x_tensor[x_idx].item()
    +                x_tensor[x_idx] = orig - eps
    +                outa = fn(input).clone()
    +                x_tensor[x_idx] = orig + eps
    +                outb = fn(input).clone()
    +                x_tensor[x_idx] = orig
    +                r = (outb - outa) / (2 * eps)
    +                d_tensor[d_idx] = r.detach().reshape(-1)
    +
    +    return jacobian
    +
    +
    +def get_analytical_jacobian(input, output, nondet_tol=0.0):
    +    # it is easier to call to_dense() on the sparse output than
    +    # to modify analytical jacobian
    +    if output.is_sparse:
    +        raise ValueError('Sparse output is not supported at gradcheck yet. '
    +                         'Please call to_dense() on the output of fn for gradcheck.')
    +    if output.layout == torch._mkldnn:
    +        raise ValueError('MKLDNN output is not supported at gradcheck yet. '
    +                         'Please call to_dense() on the output of fn for gradcheck.')
    +    diff_input_list = list(iter_tensors(input, True))
    +    jacobian = make_jacobian(input, output.numel())
    +    jacobian_reentrant = make_jacobian(input, output.numel())
    +    grad_output = torch.zeros_like(output)
    +    flat_grad_output = grad_output.view(-1)
    +    reentrant = True
    +    correct_grad_sizes = True
    +
    +    for i in range(flat_grad_output.numel()):
    +        flat_grad_output.zero_()
    +        flat_grad_output[i] = 1
    +        for jacobian_c in (jacobian, jacobian_reentrant):
    +            grads_input = torch.autograd.grad(output, diff_input_list, grad_output,
    +                                              retain_graph=True, allow_unused=True)
    +            for jacobian_x, d_x, x in zip(jacobian_c, grads_input, diff_input_list):
    +                if d_x is not None and d_x.size() != x.size():
    +                    correct_grad_sizes = False
    +                elif jacobian_x.numel() != 0:
    +                    if d_x is None:
    +                        jacobian_x[:, i].zero_()
    +                    else:
    +                        d_x_dense = d_x.to_dense() if not d_x.layout == torch.strided else d_x
    +                        assert jacobian_x[:, i].numel() == d_x_dense.numel()
    +                        jacobian_x[:, i] = d_x_dense.contiguous().view(-1)
    +
    +    for jacobian_x, jacobian_reentrant_x in zip(jacobian, jacobian_reentrant):
    +        if jacobian_x.numel() != 0 and (jacobian_x - jacobian_reentrant_x).abs().max() > nondet_tol:
    +            reentrant = False
    +
    +    return jacobian, reentrant, correct_grad_sizes
    +
    +
    +def _as_tuple(x):
    +    if istuple(x):
    +        return x
    +    elif isinstance(x, list):
    +        return tuple(x)
    +    else:
    +        return x,
    +
    +
    +def _differentiable_outputs(x):
    +    return tuple(o for o in _as_tuple(x) if o.requires_grad)
    +
    +
    +
    [docs]def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True, check_sparse_nnz=False, nondet_tol=0.0): + r"""Check gradients computed via small finite differences against analytical + gradients w.r.t. tensors in :attr:`inputs` that are of floating point type + and with ``requires_grad=True``. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + .. note:: + The default values are designed for :attr:`input` of double precision. + This check will likely fail if :attr:`input` is of less precision, e.g., + ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` has overlapping memory, i.e., + different indices pointing to the same memory address (e.g., from + :func:`torch.expand`), this check will likely fail because the numerical + gradients computed by point perturbation at such indices will change + values at all other indices that share the same memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + check_sparse_nnz (bool, optional): if True, gradcheck allows for SparseTensor input, + and for any SparseTensor at input, gradcheck will perform check at nnz positions only. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. + + Returns: + True if all differences satisfy allclose condition + """ + def fail_test(msg): + if raise_exception: + raise RuntimeError(msg) + return False + + tupled_inputs = _as_tuple(inputs) + if any(t.is_sparse for t in tupled_inputs if isinstance(t, torch.Tensor)) and not check_sparse_nnz: + return fail_test('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.') + + # Make sure that gradients are saved for all inputs + any_input_requiring_grad = False + some_input_not_requiring_grad = False + for inp in tupled_inputs: + if isinstance(inp, torch.Tensor): + if inp.requires_grad: + if inp.dtype != torch.float64: + warnings.warn( + 'At least one of the inputs that requires gradient ' + 'is not of double precision floating point. ' + 'This check will likely fail if all the inputs are ' + 'not of double precision floating point. ') + any_input_requiring_grad = True + else: + some_input_not_requiring_grad = True + inp.retain_grad() + if not any_input_requiring_grad: + raise ValueError( + 'gradcheck expects at least one input tensor to require gradient, ' + 'but none of the them have requires_grad=True.') + if some_input_not_requiring_grad: + raise ValueError( + 'gradcheck expects if at least one input tensor is required gradient, ' + 'then all other inputs should have requires_grad=True.') + + func_out = func(*tupled_inputs) + output = _differentiable_outputs(func_out) + + if not output: + for i, o in enumerate(func_out): + def fn(input): + return _as_tuple(func(*input))[i] + numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps) + for n in numerical: + if len(torch.nonzero(n)) > 0: + return fail_test('Numerical gradient for function expected to be zero') + return True + + for i, o in enumerate(output): + if not o.requires_grad: + continue + + def fn(input): + return _as_tuple(func(*input))[i] + + analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(tupled_inputs, o, nondet_tol=nondet_tol) + numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps) + + if not correct_grad_sizes: + return fail_test('Analytical gradient has incorrect size') + + for j, (a, n) in enumerate(zip(analytical, numerical)): + if a.numel() != 0 or n.numel() != 0: + if not torch.allclose(a, n, rtol, atol): + return fail_test('Jacobian mismatch for output %d with respect to input %d,\n' + 'numerical:%s\nanalytical:%s\n' % (i, j, n, a)) + + if not reentrant: + return fail_test('Backward is not reentrant, i.e., running backward with same ' + 'input and grad_output multiple times gives different values, ' + 'although analytical gradient matches numerical gradient. ' + 'The tolerance for nondeterminism was {}.'.format(nondet_tol)) + + # check if the backward multiplies by grad_output + output = _differentiable_outputs(func(*tupled_inputs)) + if any([o.requires_grad for o in output]): + diff_input_list = list(iter_tensors(tupled_inputs, True)) + if not diff_input_list: + raise RuntimeError("no Tensors requiring grad found in input") + grads_input = torch.autograd.grad(output, diff_input_list, [torch.zeros_like(o) for o in output], + allow_unused=True) + for gi, i in zip(grads_input, diff_input_list): + if gi is None: + continue + if isinstance(gi, torch.Tensor) and gi.layout != torch.strided: + if gi.layout != i.layout: + return fail_test('grad is incorrect layout (' + str(gi.layout) + ' is not ' + str(i.layout) + ')') + if gi.layout == torch.sparse_coo: + if gi.sparse_dim() != i.sparse_dim(): + return fail_test('grad is sparse tensor, but has incorrect sparse_dim') + if gi.dense_dim() != i.dense_dim(): + return fail_test('grad is sparse tensor, but has incorrect dense_dim') + gi = gi.to_dense() + i = i.to_dense() + if not gi.eq(0).all(): + return fail_test('backward not multiplied by grad_output') + if gi.type() != i.type(): + return fail_test("grad is incorrect type") + if gi.size() != i.size(): + return fail_test('grad is incorrect size') + + return True
    + + +
    [docs]def gradgradcheck(func, inputs, grad_outputs=None, eps=1e-6, atol=1e-5, rtol=1e-3, + gen_non_contig_grad_outputs=False, raise_exception=True, + nondet_tol=0.0): + r"""Check gradients of gradients computed via small finite differences + against analytical gradients w.r.t. tensors in :attr:`inputs` and + :attr:`grad_outputs` that are of floating point type and with + ``requires_grad=True``. + + This function checks that backpropagating through the gradients computed + to the given :attr:`grad_outputs` are correct. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + .. note:: + The default values are designed for :attr:`input` and + :attr:`grad_outputs` of double precision. This check will likely fail if + they are of less precision, e.g., ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` and :attr:`grad_outputs` has + overlapping memory, i.e., different indices pointing to the same memory + address (e.g., from :func:`torch.expand`), this check will likely fail + because the numerical gradients computed by point perturbation at such + indices will change values at all other indices that share the same + memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + grad_outputs (tuple of Tensor or Tensor, optional): The gradients with + respect to the function's outputs. + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is + ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the + randomly generated gradient outputs are made to be noncontiguous + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. Note that a small amount + of nondeterminism in the gradient will lead to larger inaccuracies in + the second derivative. + + Returns: + True if all differences satisfy allclose condition + """ + tupled_inputs = _as_tuple(inputs) + + if grad_outputs is None: + # If grad_outputs is not specified, create random Tensors of the same + # shape, type, and device as the outputs + def randn_like(x): + y = torch.testing.randn_like(x if x.is_floating_point() else x.double()) + if gen_non_contig_grad_outputs: + y = torch.testing.make_non_contiguous(y) + return y.requires_grad_() + outputs = _as_tuple(func(*tupled_inputs)) + tupled_grad_outputs = tuple(randn_like(x) for x in outputs) + else: + tupled_grad_outputs = _as_tuple(grad_outputs) + + num_outputs = len(tupled_grad_outputs) + + def new_func(*args): + input_args = args[:-num_outputs] + grad_outputs = args[-num_outputs:] + outputs = _differentiable_outputs(func(*input_args)) + input_args = tuple(x for x in input_args if isinstance(x, torch.Tensor) and x.requires_grad) + grad_inputs = torch.autograd.grad(outputs, input_args, grad_outputs, create_graph=True) + return grad_inputs + + return gradcheck(new_func, tupled_inputs + tupled_grad_outputs, eps, atol, rtol, raise_exception, + nondet_tol=nondet_tol)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/autograd/profiler.html b/docs/stable/_modules/torch/autograd/profiler.html new file mode 100644 index 000000000000..667cd17a6dfa --- /dev/null +++ b/docs/stable/_modules/torch/autograd/profiler.html @@ -0,0 +1,1345 @@ + + + + + + + + + + + + torch.autograd.profiler — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.autograd.profiler

    +import itertools
    +import torch
    +
    +from collections import defaultdict, namedtuple
    +from operator import attrgetter
    +
    +
    +class EventList(list):
    +    """A list of Events (for pretty printing)"""
    +    def __init__(self, *args, **kwargs):
    +        super(EventList, self).__init__(*args, **kwargs)
    +        self._cpu_children_populated = False
    +
    +    def __str__(self):
    +        return self.table()
    +
    +    def populate_cpu_children(self):
    +        """Populates child events into each underlying FunctionEvent object.
    +        One event is a child of another if [s1, e1) is inside [s2, e2). Where
    +        s1 and e1 would be start and end of the child event's interval. And
    +        s2 and e2 start and end of the parent event's interval
    +
    +        Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]
    +        be a parent of two other intervals.
    +
    +        If for any reason two intervals intersect only partialy, this function
    +        will not record a parent child relationship between then.
    +        """
    +        if self.cpu_children_populated:
    +            return
    +        events = sorted(
    +            self,
    +            key=attrgetter("thread"),
    +        )
    +        threads = itertools.groupby(events, key=attrgetter("thread"))
    +
    +        # For each thread we keep a stack of current nested parents.
    +        # We maintain the invariant that each interval is a subset of all other
    +        # intervals lower in the stack.
    +        #
    +        # First we sort the intervals by their start time. Then we iterate over them.
    +        # Every time we see a new interval we remove several parents from
    +        # the top until we restore the invariant. Then parent child relationship
    +        # if recorded if the stack is not empty.
    +        # Finally we add new interval to the list
    +        #
    +        # Algorithm has O(N * log(N)) complexity where N is number of
    +        # intervals
    +        for thread_id, thread_events in threads:
    +            thread_events = sorted(
    +                thread_events,
    +                key=lambda event: [event.cpu_interval.start, -event.cpu_interval.end],
    +            )
    +            current_events = []
    +            cur_end = 0
    +            for event in thread_events:
    +                while len(current_events) > 0:
    +                    parent = current_events[-1]
    +                    if event.cpu_interval.start >= parent.cpu_interval.end or \
    +                            event.cpu_interval.end > parent.cpu_interval.end:
    +                        # this can't be a parent
    +                        current_events.pop()
    +                    else:
    +                        parent.append_cpu_child(event)
    +                        break
    +
    +                current_events.append(event)
    +
    +        self._cpu_children_populated = True
    +
    +    @property
    +    def self_cpu_time_total(self):
    +        return sum([event.self_cpu_time_total for event in self])
    +
    +    @property
    +    def cpu_children_populated(self):
    +        return self._cpu_children_populated
    +
    +    def table(self, sort_by=None, row_limit=100, header=None):
    +        """Prints an EventList as a nicely formatted table.
    +
    +        Arguments:
    +            sort_by (str, optional): Attribute used to sort entries. By default
    +                they are printed in the same order as they were registered.
    +                Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
    +                ``cuda_time_total``, ``count``.
    +
    +        Returns:
    +            A string containing the table.
    +        """
    +        return build_table(
    +            self, sort_by=sort_by, row_limit=row_limit, header=header)
    +
    +    def export_chrome_trace(self, path):
    +        """Exports an EventList as a Chrome tracing tools file.
    +
    +        The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.
    +
    +        Arguments:
    +            path (str): Path where the trace will be written.
    +        """
    +        import json
    +        with open(path, 'w') as f:
    +            chrome_events = []
    +            next_id = 0
    +            for evt in self:
    +                chrome_events.append(dict(
    +                    name=evt.name,
    +                    ph='X',
    +                    ts=evt.cpu_interval.start,
    +                    dur=evt.cpu_interval.elapsed_us(),
    +                    tid=evt.thread,
    +                    pid='CPU functions',
    +                    args={},
    +                ))
    +                for k in evt.kernels:
    +                    # 's' and 'f' draw Flow arrows from
    +                    # the CPU launch to the GPU kernel
    +                    chrome_events.append(dict(
    +                        name=evt.name,
    +                        ph='s',
    +                        ts=evt.cpu_interval.start,
    +                        tid=evt.thread,
    +                        pid='CPU functions',
    +                        id=next_id,
    +                        cat='cpu_to_cuda',
    +                        args={},
    +                    ))
    +                    chrome_events.append(dict(
    +                        name=k.name,
    +                        ph='f',
    +                        ts=k.interval.start,
    +                        tid=k.device,
    +                        pid='CUDA functions',
    +                        id=next_id,
    +                        cat='cpu_to_cuda',
    +                        args={},
    +                    ))
    +                    chrome_events.append(dict(
    +                        name=k.name,
    +                        ph='X',
    +                        ts=k.interval.start,
    +                        dur=k.interval.elapsed_us(),
    +                        tid=k.device,
    +                        pid='CUDA functions',
    +                        args={},
    +                    ))
    +                    next_id += 1
    +
    +            json.dump(chrome_events, f)
    +
    +    def key_averages(self, group_by_input_shapes=False):
    +        """Averages all function events over their keys.
    +
    +        @param group_by_input_shapes The key would become
    +        (event name, input dimensions) rather than just event name.
    +        This is useful to see which dimensionality contributes to the runtime
    +        the most and may help with dimension specific optimizations or
    +        choosing best candidates for quantization (aka fitting a roof line)
    +
    +        Returns:
    +            An EventList containing FunctionEventAvg objects.
    +        """
    +        self.populate_cpu_children()
    +        stats = defaultdict(FunctionEventAvg)
    +
    +        def get_key(event, group_by_input_shapes):
    +            if not group_by_input_shapes:
    +                return event.key
    +            return (event.key, str(event.input_shapes))
    +        for evt in self:
    +            stats[get_key(evt, group_by_input_shapes)].add(
    +                evt, group_by_input_shapes)
    +        return EventList(stats.values())
    +
    +    def total_average(self):
    +        """Averages all events.
    +
    +        Returns:
    +            A FunctionEventAvg object.
    +        """
    +        total_stat = FunctionEventAvg()
    +        for evt in self:
    +            total_stat += evt
    +            total_stat.key = None
    +        total_stat.key = 'Total'
    +        return total_stat
    +
    +
    +
    [docs]class profile(object): + """Context manager that manages autograd profiler state and holds a summary of results. + Under the hood it just records events of functions being executed in C++ and + exposes those events to Python. You can wrap any code into it and it will + only report runtime of PyTorch functions. + + Arguments: + enabled (bool, optional): Setting this to False makes this context manager a no-op. + Default: ``True``. + + use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. + Adds approximately 4us of overhead to each tensor operation. + Default: ``False`` + + record_shapes (bool, optional): If shapes recording is set, information + about input dimensions will be collected. This allows one to see which + dimensions have been used under the hood and further group by them + using prof.key_averages(group_by_input_shape=True). Please note that + shape recording might skew your profiling data. It is recommended to + use separate runs with and without shape recording to validate the timing. + Most likely the skew will be negligible for bottom most events (in a case + of nested function calls). But for higher level functions the total + self cpu time might be artificially increased because of the shape + collection. + + .. warning: + This context managers should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Example: + >>> x = torch.randn((1, 1), requires_grad=True) + >>> with torch.autograd.profiler.profile() as prof: + >>> for _ in range(100): # any normal python code, really! + >>> y = x ** 2 + >> y.backward() + >>> # NOTE: some columns were removed for brevity + >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) + ----------------------------------- --------------- --------------- --------------- + Name Self CPU total CPU time avg Number of Calls + ----------------------------------- --------------- --------------- --------------- + mul 32.048ms 32.048ms 200 + pow 27.041ms 27.041ms 200 + PowBackward0 9.727ms 55.483ms 100 + torch::autograd::AccumulateGrad 9.148ms 9.148ms 100 + torch::autograd::GraphRoot 691.816us 691.816us 100 + ----------------------------------- --------------- --------------- --------------- + + """ + def __init__(self, enabled=True, use_cuda=False, record_shapes=False): + self.enabled = enabled + self.use_cuda = use_cuda + self.function_events = None + if not self.enabled: + return + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("autograd profiler traces are not reentrant") + self.entered = True + profiler_kind = torch.autograd.ProfilerState.CUDA if self.use_cuda \ + else torch.autograd.ProfilerState.CPU + torch.autograd._enable_profiler( + torch.autograd.ProfilerConfig(profiler_kind, self.record_shapes)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + records = torch.autograd._disable_profiler() + self.function_events = EventList(parse_cpu_trace(records)) + return False + + def __repr__(self): + if self.function_events is None: + return '<unfinished torch.autograd.profile>' + return repr(self.function_events) + + def __str__(self): + if self.function_events is None: + return '<unfinished torch.autograd.profile>' + return str(self.function_events) + + def _check_finish(self): + if self.function_events is None: + raise RuntimeError("can't export a trace that didn't finish running") + self.function_events.populate_cpu_children() + +
    [docs] def table(self, sort_by=None, row_limit=100, header=None): + self._check_finish() + return self.function_events.table( + sort_by=sort_by, row_limit=row_limit, header=header)
    + table.__doc__ = EventList.table.__doc__ + +
    [docs] def export_chrome_trace(self, path): + self._check_finish() + return self.function_events.export_chrome_trace(path)
    + export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ + +
    [docs] def key_averages(self, group_by_input_shape=False): + self._check_finish() + return self.function_events.key_averages(group_by_input_shape)
    + key_averages.__doc__ = EventList.key_averages.__doc__ + +
    [docs] def total_average(self): + self._check_finish() + return self.function_events.total_average()
    + total_average.__doc__ = EventList.total_average.__doc__ + + @property + def self_cpu_time_total(self): + """ Returns total time spent on CPU obtained as a sum of + all self times across all the events. + """ + self._check_finish() + return self.function_events.self_cpu_time_total
    + + +
    [docs]class emit_nvtx(object): + """Context manager that makes every autograd operation emit an NVTX range. + + It is useful when running the program under nvprof:: + + nvprof --profile-from-start off -o trace_name.prof -- <regular command here> + + Unfortunately, there's no way to force nvprof to flush the data it collected + to disk, so for CUDA profiling one has to use this context manager to annotate + nvprof traces and wait for the process to exit before inspecting them. + Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or + :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection + e.g. in Python REPL. + + .. warning: + This context manager should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Arguments: + enabled (bool, optional, default=True): Setting ``enabled=False`` makes this context manager a no-op. + Default: ``True``. + record_shapes (bool, optional, default=False): If ``record_shapes=True``, the nvtx range wrapping + each autograd op will append information about the sizes of Tensor arguments received + by that op, in the following format: + ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` + Non-tensor arguments will be represented by ``[]``. + Arguments will be listed in the order they are received by the backend op. + Please note that this order may not match the order in which those arguments were passed + on the Python side. Also note that shape recording may increase the overhead of nvtx range creation. + + Example: + >>> with torch.cuda.profiler.profile(): + ... model(x) # Warmup CUDA memory allocator and profiler + ... with torch.autograd.profiler.emit_nvtx(): + ... model(x) + + **Forward-backward correlation** + + When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler, + correlating each backward-pass op with the corresponding forward-pass op can be difficult. + To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it + generates. + + During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running + counter, incremented each time a new backward Function object is created and stashed for backward. + Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that + if a backward Function object is created by this forward function, + the backward object will receive sequence number N. + During the backward pass, the top-level range wrapping each C++ backward Function's + ``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that + the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq`` + numbers in forward, you can track down which forward op created each backward Function. + + Any functions executed during the backward pass are also decorated with ``seq=<N>``. During + default backward (with ``create_graph=False``) this information is irrelevant, and in fact, + ``N`` may simply be 0 for all such functions. Only the top-level ranges associated with + backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function + objects with the earlier forward pass. + + **Double-backward** + + If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words, + if you are setting up for a double-backward), each function's execution during backward + is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects + to be executed later during double-backward, just as the original functions in the forward pass did. + The relationship between backward and double-backward is conceptually the same as the relationship + between forward and backward: The functions still emit current-sequence-number-tagged ranges, + the Function objects they create still stash those sequence numbers, and during the eventual + double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq`` + numbers, which can be compared to `seq` numbers from the backward pass. + + .. warning: + The sequence number is thread-local, and some forward functions don't create an associated + backward Function object (instead delegating that to sub-functions further down the call chain). + For these reasons, the correspondence of stashed sequence numbers in + backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is + not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully + disambiguate which forward function created which + backward Function object. You may need to make a judgment based on analytic knowledge of what + the expected correspondence should be. + """ + def __init__(self, enabled=True, record_shapes=False): + self.enabled = enabled + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("NVTX annotation context manager is not reentrant") + self.entered = True + torch.cuda.synchronize() + torch.autograd._enable_profiler( + torch.autograd.ProfilerConfig( + torch.autograd.ProfilerState.NVTX, + self.record_shapes + ) + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + torch.cuda.synchronize() + torch.autograd._disable_profiler() + return False
    + + +
    [docs]def load_nvprof(path): + """Opens an nvprof trace file and parses autograd annotations. + + Arguments: + path (str): path to nvprof trace + """ + return EventList(parse_nvprof_trace(path))
    + + +################################################################################ +# FunctionEvent + +def format_time(time_us): + """Defines how to format time in FunctionEvent""" + US_IN_SECOND = 1000.0 * 1000.0 + US_IN_MS = 1000.0 + if time_us >= US_IN_SECOND: + return '{:.3f}s'.format(time_us / US_IN_SECOND) + if time_us >= US_IN_MS: + return '{:.3f}ms'.format(time_us / US_IN_MS) + return '{:.3f}us'.format(time_us) + + +def format_time_share(time_us, total_time_us): + """Defines how to format time in FunctionEvent""" + if total_time_us == 0: + assert(time_us == 0) + return "NaN" + return '{:.2f}%'.format(time_us * 100.0 / total_time_us) + + +def attr_formatter(name): + return property(lambda self: format_time(getattr(self, name))) + + +class FormattedTimesMixin(object): + """Helpers for FunctionEvent and FunctionEventAvg. + + The subclass should define `*_time_total` and `count` attributes. + """ + cpu_time_str = attr_formatter('cpu_time') + cuda_time_str = attr_formatter('cuda_time') + cpu_time_total_str = attr_formatter('cpu_time_total') + cuda_time_total_str = attr_formatter('cuda_time_total') + self_cpu_time_total_str = attr_formatter('self_cpu_time_total') + + @property + def cpu_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count + + @property + def cuda_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count + + +class Interval(object): + def __init__(self, start, end): + self.start = start + self.end = end + + def elapsed_us(self): + return self.end - self.start + + +Kernel = namedtuple('Kernel', ['name', 'device', 'interval']) + + +# TODO: record TID too +class FunctionEvent(FormattedTimesMixin): + """Profiling information about a single function.""" + def __init__(self, id, name, thread, cpu_start, cpu_end, input_shapes=None): + self.id = id + self.name = name + self.cpu_interval = Interval(cpu_start, cpu_end) + self.thread = thread + self.kernels = [] + self.count = 1 + self.cpu_children = [] + self.input_shapes = input_shapes + + def append_kernel(self, name, device, start, end): + self.kernels.append(Kernel(name, device, Interval(start, end))) + + def append_cpu_child(self, child): + """Append a CPU child of type FunctionEvent. + + One is supposed to append only dirrect children to the event to have + correct self cpu time being reported. + """ + assert(isinstance(child, FunctionEvent)) + self.cpu_children.append(child) + + @property + def self_cpu_time_total(self): + return self.cpu_time_total - sum( + [child.cpu_time_total for child in self.cpu_children] + ) + + @property + def cuda_time_total(self): + return sum(kinfo.interval.elapsed_us() for kinfo in self.kernels) + + @property + def cpu_time_total(self): + return self.cpu_interval.elapsed_us() + + @property + def key(self): + return self.name + + def __repr__(self): + return ( + '<FunctionEvent id={} cpu_time={} cpu_start={} cpu_end={} ' + 'cpu_children={} cuda_time={} name={} thread={} input_shapes={}>'.format( + self.id, + self.cpu_time_str, + self.cpu_interval.start, + self.cpu_interval.end, + str([child.id for child in self.cpu_children]), + self.cuda_time_str, + self.name, + self.thread, + str(self.input_shapes), + ) + ) + + +class FunctionEventAvg(FormattedTimesMixin): + """Used to average stats over multiple FunctionEvent objects.""" + def __init__(self): + self.key = None + self.count = 0 + self.cpu_time_total = 0 + self.cuda_time_total = 0 + self.self_cpu_time_total = 0 + self.input_shapes = None + + def add(self, other, group_by_input_shapes=False): + if self.key is None: + self.key = other.key + if group_by_input_shapes: + self.input_shapes = other.input_shapes + + assert ( + not group_by_input_shapes or + other.input_shapes == self.input_shapes + ) + assert isinstance(other, FunctionEvent) + assert other.key == self.key + self.cpu_time_total += other.cpu_time + self.cuda_time_total += other.cuda_time + self.self_cpu_time_total += other.self_cpu_time_total + self.count += 1 + return self + + def __repr__(self): + return ( + '<FunctionEventAvg key={} self_cpu_time={} cpu_time={} ' + 'cuda_time={} input_shapes={}>'.format( + self.key, + self.self_cpu_time_total_str, + self.cpu_time_str, + self.cuda_time_str, + str(self.input_shapes), + ) + ) + + +################################################################################ +# Utilities + +class StringTable(defaultdict): + def __missing__(self, key): + self[key] = torch._C._demangle(key) + return self[key] + + +################################################################################ +# CPU checkpoints + +def parse_cpu_trace(thread_records): + next_id = 0 + start_record = None + cuda_records = {} + functions = [] + record_stack = [] + string_table = StringTable() + + # cuda start events and the overall profiler start event don't happen + # at exactly the same time because we need to record an event on each device + # and each record takes ~4us. So we adjust here by the difference + # adding the difference in CPU time between the profiler start event + # and the CPU time of the cuda start event for the device + def adjusted_time(cuda_record): + assert cuda_record.device() != -1 + cuda_time_0 = cuda_records[cuda_record.device()] + return cuda_time_0.cuda_elapsed_us(cuda_record) + start_record.cpu_elapsed_us(cuda_time_0) + + # '__start_profile' is not guarenteed to be first, so we must find it here + for record in itertools.chain(*thread_records): + if record.name() == '__start_profile': + start_record = record + elif record.name() == '__cuda_start_event': + assert record.device() != -1 + cuda_records[record.device()] = record + assert start_record is not None + + for record in itertools.chain(*thread_records): + if record.kind() == 'mark': + continue + elif record.kind() == 'push': + record_stack.append((next_id, record)) + next_id += 1 + elif record.kind() == 'pop': + function_id, start = record_stack.pop() + fe = FunctionEvent( + id=function_id, + name=string_table[start.name()], + thread=start.thread_id(), + cpu_start=start_record.cpu_elapsed_us(start), + cpu_end=start_record.cpu_elapsed_us(record), + input_shapes=start.shapes()) + if start.has_cuda(): + cuda_start = adjusted_time(start) + cuda_end = adjusted_time(record) + fe.append_kernel(start.name(), + start.device(), + cuda_start, + cuda_end) + functions.append(fe) + + functions.sort(key=lambda evt: evt.cpu_interval.start) + return functions + + +################################################################################ +# CUDA checkpoints + +class EnforceUnique(object): + """Raises an error if a key is seen more than once.""" + def __init__(self): + self.seen = set() + + def see(self, *key): + if key in self.seen: + raise RuntimeError('duplicate key: ' + str(key)) + self.seen.add(key) + + +def parse_nvprof_trace(path): + import sqlite3 + conn = sqlite3.connect(path) + conn.row_factory = sqlite3.Row + + # Parse strings table + strings = {} + for r in conn.execute("SELECT _id_ as id, value FROM StringTable"): + strings[r["id"]] = torch._C._demangle(r["value"]) + + # First, find all functions and create FunctionEvents for them + marker_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + WHERE + start.name != 0 AND end.name = 0 + """ + functions = [] + functions_map = {} + unique = EnforceUnique() + for row in conn.execute(marker_query): + unique.see(row['marker_id']) + evt = FunctionEvent(id=row['marker_id'], + name=strings[row['name']], + cpu_start=row['start_time'], + cpu_end=row['end_time'], + thread=0) # TODO: find in sqlite database + functions.append(evt) + functions_map[evt.id] = evt + + # Now, correlate all kernels with FunctionEvents + kernel_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp, end.timestamp, + runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end, + kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start + INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime + ON (start.timestamp < runtime.start AND runtime.end < end.timestamp) + INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel + ON kernel.correlationId = runtime.correlationId + """ + unique = EnforceUnique() + for row in conn.execute(kernel_query): + unique.see(row['marker_id'], row['runtime_id']) + assert row['cbid'] == 13 # 13 == Launch + evt = functions_map[row['marker_id']] + evt.append_kernel(row['kernel_name'], + 0, + row['kernel_start'], + row['kernel_end']) + + functions.sort(key=lambda evt: evt.cpu_interval.start) + return functions + + +################################################################################ +# Pretty printer + + +def build_table(events, sort_by=None, header=None, row_limit=100): + """Prints a summary of events (which can be a list of FunctionEvent or FunctionEventAvg).""" + if len(events) == 0: + return "" + + if sort_by is not None: + events = EventList(sorted( + events, key=lambda evt: getattr(evt, sort_by), reverse=True + )) + + has_input_shapes = any( + [event.input_shapes is not None for event in events]) + name_column_width = max([len(evt.key) for evt in events]) + 4 + DEFAULT_COLUMN_WIDTH = 15 + SHAPES_COLUMN_WIDTH = 35 + + headers = [ + 'Name', + 'Self CPU total %', + 'Self CPU total', + 'CPU total %', + 'CPU total', + 'CPU time avg', + 'CUDA total %', + 'CUDA total', + 'CUDA time avg', + 'Number of Calls', + ] + + # Have to use a list because nonlocal is Py3 only... + SPACING_SIZE = 2 + row_format = [""] + header_sep = [""] + line_length = [-SPACING_SIZE] + + def add_column(padding): + row_format[0] += '{: <' + str(padding) + '} ' + header_sep[0] += '-' * padding + ' ' + line_length[0] += padding + SPACING_SIZE + + add_column(name_column_width) + for _ in headers[1:]: + add_column(DEFAULT_COLUMN_WIDTH) + + if has_input_shapes: + headers.append('Input Shapes') + add_column(SHAPES_COLUMN_WIDTH) + + row_format = row_format[0] + header_sep = header_sep[0] + line_length = line_length[0] + add_column = None + + # Have to use a list because nonlocal is Py3 only... + result = [] + + def append(s): + result.append(s) + result.append('\n') # Yes, newline after the end as well + + self_cpu_time_total = sum([event.self_cpu_time_total for event in events]) + cuda_time_total = sum([evt.cuda_time_total for evt in events]) + # Actual printing + if header is not None: + append('=' * line_length) + append(header) + append(header_sep) + append(row_format.format(*headers)) + + append(header_sep) + for evt in events[:row_limit]: + row_values = [ + evt.key, # Name + # Self CPU total % + format_time_share(evt.self_cpu_time_total, + self_cpu_time_total), + evt.self_cpu_time_total_str, # Self CPU total + # CPU total % + format_time_share(evt.cpu_time_total, self_cpu_time_total), + evt.cpu_time_total_str, # CPU total + evt.cpu_time_str, # CPU time avg + # CUDA time total % + format_time_share(evt.cuda_time_total, cuda_time_total), + evt.cuda_time_total_str, + evt.cuda_time_str, # Cuda time avg + evt.count, # Number of calls + ] + if has_input_shapes: + row_values.append(str(evt.input_shapes)[:SHAPES_COLUMN_WIDTH]) + append(row_format.format(*row_values)) + + append(header_sep) + append("Self CPU time total: {}".format(format_time(self_cpu_time_total))) + append("CUDA time total: {}".format(format_time(cuda_time_total))) + return ''.join(result) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/cuda.html b/docs/stable/_modules/torch/cuda.html new file mode 100644 index 000000000000..d556ef54d0bd --- /dev/null +++ b/docs/stable/_modules/torch/cuda.html @@ -0,0 +1,1196 @@ + + + + + + + + + + + + torch.cuda — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.cuda

    +r"""
    +This package adds support for CUDA tensor types, that implement the same
    +function as CPU tensors, but they utilize GPUs for computation.
    +
    +It is lazily initialized, so you can always import it, and use
    +:func:`is_available()` to determine if your system supports CUDA.
    +
    +:ref:`cuda-semantics` has more details about working with CUDA.
    +"""
    +
    +import contextlib
    +import platform
    +import ctypes
    +import os
    +import sys
    +import torch
    +import traceback
    +import warnings
    +from torch._six import raise_from
    +from subprocess import Popen, PIPE
    +from multiprocessing.util import register_after_fork as _register_after_fork
    +from ._utils import _get_device_index
    +
    +_initialized = False
    +_queued_calls = []  # don't invoke these until initialization occurs
    +_in_bad_fork = False  # this global is also used in torch.manual_seed
    +_original_pid = False
    +_cudart = None
    +
    +
    +def find_cuda_windows_lib():
    +    # Override the default search process
    +    # Fixes https://github.com/pytorch/pytorch/issues/20202
    +    # The libary selection will be done in these directories one by one
    +    # 1. [Package Root]\Lib
    +    #    That's where our libraries are in, which should be loaded first.
    +    # 2. [Python Root]\Library\bin
    +    #    That's where `cudatoolkit` store the cuda libraries.
    +    # 3. Default directories
    +    #    That is stored in the environment variable `PATH`.
    +    test_env = os.environ.copy()
    +    old_path = test_env['PATH']
    +    py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
    +    th_dll_path = os.path.join(os.path.dirname(
    +        os.path.dirname(__file__)), 'lib')
    +    test_env['PATH'] = ';'.join([th_dll_path, py_dll_path, old_path])
    +    proc = Popen(['where', 'cudart64*.dll'], stdout=PIPE,
    +                 stderr=PIPE, stdin=PIPE, env=test_env)
    +    out, err = proc.communicate()
    +    out = out.decode().strip()
    +    if len(out) > 0:
    +        if out.find('\r\n') != -1:
    +            out = out.split('\r\n')[0]
    +        cuda_lib_name = os.path.basename(out)
    +        cuda_lib = os.path.splitext(cuda_lib_name)[0]
    +        cuda_lib = str(cuda_lib)
    +        return ctypes.cdll.LoadLibrary(cuda_lib)
    +    else:
    +        return None
    +
    +
    +
    [docs]def is_available(): + r"""Returns a bool indicating if CUDA is currently available.""" + if (not hasattr(torch._C, '_cuda_isDriverSufficient') or + not torch._C._cuda_isDriverSufficient()): + return False + return torch._C._cuda_getDeviceCount() > 0
    + + +def _sleep(cycles): + torch._C._cuda_sleep(cycles) + + +def _load_cudart(): + # First check the main program for CUDA symbols + if platform.system() == 'Windows': + lib = find_cuda_windows_lib() + else: + lib = ctypes.cdll.LoadLibrary(None) + if hasattr(lib, 'cudaGetErrorName'): + return lib + + raise RuntimeError( + "couldn't find libcudart. Make sure CUDA libraries are installed in a " + "default location, or that they're in {}." + .format('DYLD_LIBRARY_PATH' if platform.system() == 'Darwin' else + 'LD_LIBRARY_PATH')) + + +def _check_driver(): + if not hasattr(torch._C, '_cuda_isDriverSufficient'): + raise AssertionError("Torch not compiled with CUDA enabled") + if not torch._C._cuda_isDriverSufficient(): + if torch._C._cuda_getDriverVersion() == 0: + # found no NVIDIA driver on the system + raise AssertionError(""" +Found no NVIDIA driver on your system. Please check that you +have an NVIDIA GPU and installed a driver from +http://www.nvidia.com/Download/index.aspx""") + else: + # TODO: directly link to the alternative bin that needs install + raise AssertionError(""" +The NVIDIA driver on your system is too old (found version {}). +Please update your GPU driver by downloading and installing a new +version from the URL: http://www.nvidia.com/Download/index.aspx +Alternatively, go to: https://pytorch.org to install +a PyTorch version that has been compiled with your version +of the CUDA driver.""".format(str(torch._C._cuda_getDriverVersion()))) + + +def _check_capability(): + incorrect_binary_warn = """ + Found GPU%d %s which requires CUDA_VERSION >= %d to + work properly, but your PyTorch was compiled + with CUDA_VERSION %d. Please install the correct PyTorch binary + using instructions from https://pytorch.org + """ + + old_gpu_warn = """ + Found GPU%d %s which is of cuda capability %d.%d. + PyTorch no longer supports this GPU because it is too old. + The minimum cuda capability that we support is 3.5. + """ + + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(device_count()): + capability = get_device_capability(d) + major = capability[0] + minor = capability[1] + name = get_device_name(d) + if capability == (3, 0) or major < 3: + warnings.warn(old_gpu_warn % (d, name, major, capability[1])) + elif CUDA_VERSION <= 9000 and major >= 7 and minor >= 5: + warnings.warn(incorrect_binary_warn % (d, name, 10000, CUDA_VERSION)) + + +def _lazy_call(callable): + if _initialized: + callable() + else: + # Don't store the actual traceback to avoid memory cycle + _queued_calls.append((callable, traceback.format_stack())) + +_lazy_call(_check_capability) + + +class DeferredCudaCallError(Exception): + pass + + +
    [docs]def init(): + r"""Initialize PyTorch's CUDA state. You may need to call + this explicitly if you are interacting with PyTorch via + its C API, as Python bindings for CUDA functionality will not + be until this initialization takes place. Ordinary users + should not need this, as all of PyTorch's CUDA methods + automatically initialize CUDA state on-demand. + + Does nothing if the CUDA state is already initialized. + """ + _lazy_init()
    + + +def _lazy_init(): + global _initialized, _cudart, _original_pid, _queued_calls + if _initialized: + return + if _in_bad_fork: + from sys import version_info + if version_info < (3, 4): + msg = ("To use CUDA with multiprocessing, you must use Python " + "3.4+ and the 'spawn' start method") + else: + msg = ("To use CUDA with multiprocessing, you must use the " + "'spawn' start method") + raise RuntimeError( + "Cannot re-initialize CUDA in forked subprocess. " + msg) + _check_driver() + torch._C._cuda_init() + _cudart = _load_cudart() + _cudart.cudaGetErrorName.restype = ctypes.c_char_p + _cudart.cudaGetErrorString.restype = ctypes.c_char_p + _original_pid = os.getpid() + _initialized = True + # Important to do this after _initialized, since some queued calls + # may themselves call _lazy_init() + for queued_call, orig_traceback in _queued_calls: + try: + queued_call() + except Exception as e: + msg = ("CUDA call failed lazily at initialization with error: {}\n\n" + "CUDA call was originally invoked at:\n\n{}").format(str(e), orig_traceback) + raise_from(DeferredCudaCallError(msg), e) + + +def _after_fork(arg): + global _initialized, _in_bad_fork + if _initialized and _original_pid != os.getpid(): + _initialized = False + _in_bad_fork = True + _CudaBase.__new__ = _lazy_new + torch._C._cuda_set_run_yet_variable_to_false() + +_register_after_fork(_after_fork, _after_fork) + + +def cudart(): + _lazy_init() + return _cudart + + +class cudaStatus(object): + SUCCESS = 0 + ERROR_NOT_READY = 34 + + +class CudaError(RuntimeError): + def __init__(self, code): + msg = cudart().cudaGetErrorString(code).decode('utf-8') + super(CudaError, self).__init__('{0} ({1})'.format(msg, code)) + + +def check_error(res): + if res != cudaStatus.SUCCESS: + raise CudaError(res) + + +
    [docs]class device(object): + r"""Context-manager that changes the selected device. + + Arguments: + device (torch.device or int): device index to select. It's a no-op if + this argument is a negative integer or ``None``. + """ + + def __init__(self, device): + self.idx = _get_device_index(device, optional=True) + self.prev_idx = -1 + + def __enter__(self): + if self.idx == -1: + return + self.prev_idx = torch._C._cuda_getDevice() + if self.prev_idx != self.idx: + torch._C._cuda_setDevice(self.idx) + _lazy_init() + + def __exit__(self, *args): + if self.prev_idx != self.idx: + torch._C._cuda_setDevice(self.prev_idx) + return False
    + + +
    [docs]class device_of(device): + r"""Context-manager that changes the current device to that of given object. + + You can use both tensors and storages as arguments. If a given object is + not allocated on a GPU, this is a no-op. + + Arguments: + obj (Tensor or Storage): object allocated on the selected device. + """ + + def __init__(self, obj): + idx = obj.get_device() if obj.is_cuda else -1 + super(device_of, self).__init__(idx)
    + + +
    [docs]def set_device(device): + r"""Sets the current device. + + Usage of this function is discouraged in favor of :any:`device`. In most + cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. + + Arguments: + device (torch.device or int): selected device. This function is a no-op + if this argument is negative. + """ + device = _get_device_index(device) + if device >= 0: + torch._C._cuda_setDevice(device)
    + + +
    [docs]def get_device_name(device=None): + r"""Gets the name of a device. + + Arguments: + device (torch.device or int, optional): device for which to return the + name. This function is a no-op if this argument is a negative + integer. It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + return get_device_properties(device).name
    + + +
    [docs]def get_device_capability(device=None): + r"""Gets the cuda capability of a device. + + Arguments: + device (torch.device or int, optional): device for which to return the + device capability. This function is a no-op if this argument is + a negative integer. It uses the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + + Returns: + tuple(int, int): the major and minor cuda capability of the device + """ + prop = get_device_properties(device) + return prop.major, prop.minor
    + + +def get_device_properties(device): + if not _initialized: + init() # will define _get_device_properties and _CudaDeviceProperties + device = _get_device_index(device, optional=True) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + return _get_device_properties(device) + + +
    [docs]@contextlib.contextmanager +def stream(stream): + r"""Context-manager that selects a given stream. + + All CUDA kernels queued within its context will be enqueued on a selected + stream. + + Arguments: + stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + + .. note:: Streams are per-device. If the selected stream is not on the + current device, this function will also change the current device to + match the stream. + """ + if stream is None: + yield + return + src_prev_stream = current_stream() + + if src_prev_stream.device != stream.device: + # The given stream is on a different device; have to restore the + # current_stream on that device on exit as well + with device(stream.device): + dst_prev_stream = current_stream() + + torch._C._cuda_setStream(stream._cdata) + try: + yield + finally: + if src_prev_stream.device != stream.device: + torch._C._cuda_setStream(dst_prev_stream._cdata) + torch._C._cuda_setStream(src_prev_stream._cdata)
    + + +
    [docs]def device_count(): + r"""Returns the number of GPUs available.""" + if is_available(): + return torch._C._cuda_getDeviceCount() + else: + return 0
    + + +
    [docs]def current_device(): + r"""Returns the index of a currently selected device.""" + _lazy_init() + return torch._C._cuda_getDevice()
    + + +
    [docs]def synchronize(device=None): + r"""Waits for all kernels in all streams on a CUDA device to complete. + + Arguments: + device (torch.device or int, optional): device for which to synchronize. + It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + _lazy_init() + with torch.cuda.device(device): + return torch._C._cuda_synchronize()
    + + +
    [docs]def ipc_collect(): + r"""Force collects GPU memory after it has been released by CUDA IPC. + + .. note:: + Checks if any sent CUDA tensors could be cleaned from the memory. Force + closes shared memory file used for reference counting if there is no + active counters. Useful when the producer process stopped actively sending + tensors and want to release unused memory. + """ + _lazy_init() + return torch._C._cuda_ipc_collect()
    + + +
    [docs]def current_stream(device=None): + r"""Returns the currently selected :class:`Stream` for a given device. + + Arguments: + device (torch.device or int, optional): selected device. Returns + the currently selected :class:`Stream` for the current device, given + by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + return torch.cuda.Stream(_cdata=torch._C._cuda_getCurrentStream( + _get_device_index(device, optional=True)))
    + + +
    [docs]def default_stream(device=None): + r"""Returns the default :class:`Stream` for a given device. + + Arguments: + device (torch.device or int, optional): selected device. Returns + the default :class:`Stream` for the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + return torch.cuda.Stream(_cdata=torch._C._cuda_getDefaultStream( + _get_device_index(device, optional=True)))
    + + +
    [docs]def current_blas_handle(): + r"""Returns cublasHandle_t pointer to current cuBLAS handle""" + _lazy_init() + return torch._C._cuda_getCurrentBlasHandle()
    + + +
    [docs]def empty_cache(): + r"""Releases all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU application and visible in + `nvidia-smi`. + + .. note:: + :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU + memory available for PyTorch. See :ref:`cuda-memory-management` for + more details about GPU memory management. + """ + if _initialized: + torch._C._cuda_emptyCache()
    + + +
    [docs]def memory_allocated(device=None): + r"""Returns the current GPU memory occupied by tensors in bytes for a given + device. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + This is likely less than the amount shown in `nvidia-smi` since some + unused memory can be held by the caching allocator and some context + needs to be created on GPU. See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_memoryAllocated(device)
    + + +
    [docs]def max_memory_allocated(device=None): + r"""Returns the maximum GPU memory occupied by tensors in bytes for a given + device. + + By default, this returns the peak allocated memory since the beginning of + this program. :func:`~torch.cuda.reset_max_memory_allocated` can be used to + reset the starting point in tracking this metric. For example, these two + functions can measure the peak allocated memory usage of each iteration in a + training loop. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_maxMemoryAllocated(device)
    + + +
    [docs]def reset_max_memory_allocated(device=None): + r"""Resets the starting point in tracking maximum GPU memory occupied by + tensors for a given device. + + See :func:`~torch.cuda.max_memory_allocated` for details. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetMaxMemoryAllocated(device)
    + + +
    [docs]def memory_cached(device=None): + r"""Returns the current GPU memory managed by the caching allocator in bytes + for a given device. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_memoryCached(device)
    + + +
    [docs]def max_memory_cached(device=None): + r"""Returns the maximum GPU memory managed by the caching allocator in bytes + for a given device. + + By default, this returns the peak cached memory since the beginning of this + program. :func:`~torch.cuda.reset_max_memory_cached` can be used to reset + the starting point in tracking this metric. For example, these two functions + can measure the peak cached memory amount of each iteration in a training + loop. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_maxMemoryCached(device)
    + + +
    [docs]def reset_max_memory_cached(device=None): + r"""Resets the starting point in tracking maximum GPU memory managed by the + caching allocator for a given device. + + See :func:`~torch.cuda.max_memory_cached` for details. + + Arguments: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetMaxMemoryCached(device)
    + + +def _host_allocator(): + _lazy_init() + return torch._C._cuda_cudaHostAllocator() + + +@contextlib.contextmanager +def _free_mutex(): + torch._C._cuda_lock_mutex() + try: + yield + finally: + torch._C._cuda_unlock_mutex() + + +from .random import * + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + + +from ..storage import _StorageBase + + +def _dummy_type(name): + def init_err(self): + class_name = self.__class__.__name__ + raise RuntimeError( + "Tried to instantiate dummy base class {}".format(class_name)) + return type(storage_name, (object,), {"__init__": init_err}) + + +if not hasattr(torch._C, 'CudaDoubleStorageBase'): + # Define dummy base classes + for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte', 'Half', 'Bool', 'BFloat16']: + storage_name = 'Cuda{0}StorageBase'.format(t) + tensor_name = 'Cuda{0}TensorBase'.format(t) + + torch._C.__dict__[storage_name] = _dummy_type(storage_name) + torch._C.__dict__[tensor_name] = _dummy_type(tensor_name) + + torch._C.__dict__['_CudaStreamBase'] = _dummy_type('CudaStreamBase') + torch._C.__dict__['_CudaEventBase'] = _dummy_type('CudaEventBase') + + +@staticmethod +def _lazy_new(cls, *args, **kwargs): + _lazy_init() + # We need this method only for lazy init, so we can remove it + del _CudaBase.__new__ + return super(_CudaBase, cls).__new__(cls, *args, **kwargs) + + +class _CudaBase(object): + is_cuda = True + is_sparse = False + + def type(self, *args, **kwargs): + with device(self.get_device()): + return super(_CudaBase, self).type(*args, **kwargs) + + __new__ = _lazy_new + + +class DoubleStorage(_CudaBase, torch._C.CudaDoubleStorageBase, _StorageBase): + pass + + +class FloatStorage(_CudaBase, torch._C.CudaFloatStorageBase, _StorageBase): + pass + + +class LongStorage(_CudaBase, torch._C.CudaLongStorageBase, _StorageBase): + pass + + +class IntStorage(_CudaBase, torch._C.CudaIntStorageBase, _StorageBase): + pass + + +class ShortStorage(_CudaBase, torch._C.CudaShortStorageBase, _StorageBase): + pass + + +class CharStorage(_CudaBase, torch._C.CudaCharStorageBase, _StorageBase): + pass + + +class ByteStorage(_CudaBase, torch._C.CudaByteStorageBase, _StorageBase): + pass + + +class HalfStorage(_CudaBase, torch._C.CudaHalfStorageBase, _StorageBase): + pass + + +class BoolStorage(_CudaBase, torch._C.CudaBoolStorageBase, _StorageBase): + pass + + +class BFloat16Storage(_CudaBase, torch._C.CudaBFloat16StorageBase, _StorageBase): + pass + +torch._storage_classes.add(DoubleStorage) +torch._storage_classes.add(FloatStorage) +torch._storage_classes.add(LongStorage) +torch._storage_classes.add(IntStorage) +torch._storage_classes.add(ShortStorage) +torch._storage_classes.add(CharStorage) +torch._storage_classes.add(ByteStorage) +torch._storage_classes.add(HalfStorage) +torch._storage_classes.add(BoolStorage) +torch._storage_classes.add(BFloat16Storage) + +from . import sparse # noqa: F401 +from . import profiler # noqa: F401 +from . import nvtx # noqa: F401 +from .streams import Stream, Event # noqa: F401 +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/cuda/comm.html b/docs/stable/_modules/torch/cuda/comm.html new file mode 100644 index 000000000000..354bcb4518e6 --- /dev/null +++ b/docs/stable/_modules/torch/cuda/comm.html @@ -0,0 +1,681 @@ + + + + + + + + + + + + torch.cuda.comm — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.cuda.comm

    +import torch
    +from . import nccl
    +from torch._utils import _take_tensors, _flatten_dense_tensors, \
    +    _unflatten_dense_tensors, _reorder_tensors_as
    +
    +
    +
    [docs]def broadcast(tensor, devices): + """Broadcasts a tensor to a number of GPUs. + + Arguments: + tensor (Tensor): tensor to broadcast. + devices (Iterable): an iterable of devices among which to broadcast. + Note that it should be like (src, dst1, dst2, ...), the first element + of which is the source device to broadcast from. + + Returns: + A tuple containing copies of the ``tensor``, placed on devices + corresponding to indices from ``devices``. + """ + return torch._C._broadcast(tensor, devices)
    + + +
    [docs]def broadcast_coalesced(tensors, devices, buffer_size=10485760): + """Broadcasts a sequence tensors to the specified GPUs. + Small tensors are first coalesced into a buffer to reduce the number + of synchronizations. + + Arguments: + tensors (sequence): tensors to broadcast. + devices (Iterable): an iterable of devices among which to broadcast. + Note that it should be like (src, dst1, dst2, ...), the first element + of which is the source device to broadcast from. + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple containing copies of the ``tensor``, placed on devices + corresponding to indices from ``devices``. + """ + return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
    + + +
    [docs]def reduce_add(inputs, destination=None): + """Sums tensors from multiple GPUs. + + All inputs should have matching shapes. + + Arguments: + inputs (Iterable[Tensor]): an iterable of tensors to add. + destination (int, optional): a device on which the output will be + placed (default: current device). + + Returns: + A tensor containing an elementwise sum of all inputs, placed on the + ``destination`` device. + """ + # TODO: try to find an input on another gpu, copy it, + # and accumulate into the copy + if destination is None: + destination = torch.cuda.current_device() + input_size = inputs[0].size() + nccl_root = None + for i, inp in enumerate(inputs): + assert inp.is_cuda, "reduce_add expects all inputs to be on GPUs" + if inp.get_device() == destination: + nccl_root = i + if inp.size() != input_size: + got = 'x'.join(str(x) for x in inp.size()) + expected = 'x'.join(str(x) for x in input_size) + raise ValueError("input {} has invalid size: got {}, but expected " + "{}".format(i, got, expected)) + if nccl_root is None: + raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors") + result = inp.new(device=destination).resize_as_(inp).zero_() + + if nccl.is_available(inputs) and inputs[0].get_device() == destination: + outputs = [result] + [t.new(t.size()) for t in inputs[1:]] + nccl.reduce(inputs, outputs, root=nccl_root) + return result + for inp in inputs: + input_correct_gpu = inp.cuda(result.get_device()) + result.add_(input_correct_gpu) + return result
    + + +def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760): + """Sums tensors from multiple GPUs. + + Small tensors are first coalesced into a buffer to reduce the number + of synchronizations. + + Arguments: + inputs (Iterable[Iterable[Tensor]]): iterable of iterables that + contain tensors from a single device. + destination (int, optional): a device on which the output will be + placed (default: current device). + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple of tensors containing an elementwise sum of each group of + inputs, placed on the ``destination`` device. + """ + # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just + # return `inputs`. + dense_tensors = [[] for _ in inputs] # shape (num_gpus, num_tensors) + output = [] + ref_order = [] + # process sparse ones first since they may have different sizes on different gpus + for tensor_at_gpus in zip(*inputs): + if all(t.is_sparse for t in tensor_at_gpus): + result = reduce_add(tensor_at_gpus, destination) + output.append(result) + ref_order.append(tensor_at_gpus[0]) + else: + for coll, t in zip(dense_tensors, tensor_at_gpus): + coll.append(t.to_dense() if t.is_sparse else t) + ref_order.append(dense_tensors[0][-1]) + itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors] + # now the dense ones, which have consistent sizes + for chunks in zip(*itrs): + flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] + flat_result = reduce_add(flat_tensors, destination) + for t in _unflatten_dense_tensors(flat_result, chunks[0]): + # The unflattened tensors do not share storage, and we don't expose + # base flat tensor anyways, so give them different version counters. + # See NOTE [ Version Counter in comm.*_coalesced ] + output.append(t.data) + return tuple(_reorder_tensors_as(output, ref_order)) + + +
    [docs]def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None): + """Scatters tensor across multiple GPUs. + + Arguments: + tensor (Tensor): tensor to scatter. + devices (Iterable[int]): iterable of ints, specifying among which + devices the tensor should be scattered. + chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on + each device. It should match ``devices`` in length and sum to + ``tensor.size(dim)``. If not specified, the tensor will be divided + into equal chunks. + dim (int, optional): A dimension along which to chunk the tensor. + + Returns: + A tuple containing chunks of the ``tensor``, spread across given + ``devices``. + """ + return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
    + + +
    [docs]def gather(tensors, dim=0, destination=None): + """Gathers tensors from multiple GPUs. + + Tensor sizes in all dimension different than ``dim`` have to match. + + Arguments: + tensors (Iterable[Tensor]): iterable of tensors to gather. + dim (int): a dimension along which the tensors will be concatenated. + destination (int, optional): output device (-1 means CPU, default: + current device) + + Returns: + A tensor located on ``destination`` device, that is a result of + concatenating ``tensors`` along ``dim``. + """ + return torch._C._gather(tensors, dim, destination)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/cuda/nvtx.html b/docs/stable/_modules/torch/cuda/nvtx.html new file mode 100644 index 000000000000..9617d548dc4d --- /dev/null +++ b/docs/stable/_modules/torch/cuda/nvtx.html @@ -0,0 +1,591 @@ + + + + + + + + + + + + torch.cuda.nvtx — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.cuda.nvtx

    +import os
    +import glob
    +import ctypes
    +import platform
    +
    +lib = None
    +
    +__all__ = ['range_push', 'range_pop', 'mark']
    +
    +
    +def windows_nvToolsExt_lib():
    +    lib_path = windows_nvToolsExt_path()
    +    if len(lib_path) > 0:
    +        lib_name = os.path.basename(lib_path)
    +        lib = os.path.splitext(lib_name)[0]
    +        return ctypes.cdll.LoadLibrary(lib)
    +    else:
    +        return None
    +
    +
    +def windows_nvToolsExt_path():
    +    WINDOWS_HOME = 'C:/Program Files/NVIDIA Corporation/NvToolsExt'
    +    NVTOOLEXT_HOME = os.getenv('NVTOOLSEXT_PATH', WINDOWS_HOME)
    +    if os.path.exists(NVTOOLEXT_HOME):
    +        lib_paths = glob.glob(NVTOOLEXT_HOME + '/bin/x64/nvToolsExt*.dll')
    +        if len(lib_paths) > 0:
    +            lib_path = lib_paths[0]
    +            return lib_path
    +    return ''
    +
    +
    +def _libnvToolsExt():
    +    global lib
    +    if lib is None:
    +        if platform.system() != 'Windows':
    +            lib = ctypes.cdll.LoadLibrary(None)
    +        else:
    +            lib = windows_nvToolsExt_lib()
    +        lib.nvtxMarkA.restype = None
    +    return lib
    +
    +
    +
    [docs]def range_push(msg): + """ + Pushes a range onto a stack of nested range span. Returns zero-based + depth of the range that is started. + + Arguments: + msg (string): ASCII message to associate with range + """ + if _libnvToolsExt() is None: + raise RuntimeError('Unable to load nvToolsExt library') + return lib.nvtxRangePushA(ctypes.c_char_p(msg.encode("ascii")))
    + + +
    [docs]def range_pop(): + """ + Pops a range off of a stack of nested range spans. Returns the + zero-based depth of the range that is ended. + """ + if _libnvToolsExt() is None: + raise RuntimeError('Unable to load nvToolsExt library') + return lib.nvtxRangePop()
    + + +
    [docs]def mark(msg): + """ + Describe an instantaneous event that occurred at some point. + + Arguments: + msg (string): ASCII message to associate with the event. + """ + if _libnvToolsExt() is None: + raise RuntimeError('Unable to load nvToolsExt library') + return lib.nvtxMarkA(ctypes.c_char_p(msg.encode("ascii")))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/cuda/random.html b/docs/stable/_modules/torch/cuda/random.html new file mode 100644 index 000000000000..f5834808908f --- /dev/null +++ b/docs/stable/_modules/torch/cuda/random.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + torch.cuda.random — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.cuda.random

    +import torch
    +from . import _lazy_init, _lazy_call, device_count, current_device
    +
    +__all__ = ['get_rng_state', 'get_rng_state_all',
    +           'set_rng_state', 'set_rng_state_all',
    +           'manual_seed', 'manual_seed_all',
    +           'seed', 'seed_all', 'initial_seed']
    +
    +
    +
    [docs]def get_rng_state(device='cuda'): + r"""Returns the random number generator state of the specified GPU as a ByteTensor. + + Args: + device (torch.device or int, optional): The device to return the RNG state of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device('cuda', device) + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.get_state()
    + + +
    [docs]def get_rng_state_all(): + r"""Returns a tuple of ByteTensor representing the random number states of all devices.""" + + results = [] + for i in range(device_count()): + results.append(get_rng_state(i)) + return results
    + + +
    [docs]def set_rng_state(new_state, device='cuda'): + r"""Sets the random number generator state of the specified GPU. + + Args: + new_state (torch.ByteTensor): The desired state + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + new_state_copy = new_state.clone() + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device('cuda', device) + + def cb(): + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.set_state(new_state_copy) + + _lazy_call(cb)
    + + +
    [docs]def set_rng_state_all(new_states): + r"""Sets the random number generator state of all devices. + + Args: + new_state (tuple of torch.ByteTensor): The desired state for each device""" + for i, state in enumerate(new_states): + set_rng_state(state, i)
    + + +
    [docs]def manual_seed(seed): + r"""Sets the seed for generating random numbers for the current GPU. + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + + .. warning:: + If you are working with a multi-GPU model, this function is insufficient + to get determinism. To seed all GPUs, use :func:`manual_seed_all`. + """ + seed = int(seed) + + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.manual_seed(seed) + + _lazy_call(cb)
    + + +
    [docs]def manual_seed_all(seed): + r"""Sets the seed for generating random numbers on all GPUs. + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + """ + seed = int(seed) + + def cb(): + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + default_generator.manual_seed(seed) + + _lazy_call(cb)
    + + +
    [docs]def seed(): + r"""Sets the seed for generating random numbers to a random number for the current GPU. + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + .. warning:: + If you are working with a multi-GPU model, this function will only initialize + the seed on one GPU. To initialize all GPUs, use :func:`seed_all`. + """ + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.seed() + + _lazy_call(cb)
    + + +
    [docs]def seed_all(): + r"""Sets the seed for generating random numbers to a random number on all GPUs. + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + """ + def cb(): + random_seed = 0 + seeded = False + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + if not seeded: + default_generator.seed() + random_seed = default_generator.initial_seed() + seeded = True + else: + default_generator.manual_seed(random_seed) + + _lazy_call(cb)
    + + +
    [docs]def initial_seed(): + r"""Returns the current random seed of the current GPU. + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.initial_seed()
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/cuda/streams.html b/docs/stable/_modules/torch/cuda/streams.html new file mode 100644 index 000000000000..47243555a726 --- /dev/null +++ b/docs/stable/_modules/torch/cuda/streams.html @@ -0,0 +1,714 @@ + + + + + + + + + + + + torch.cuda.streams — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.cuda.streams

    +import ctypes
    +import torch
    +
    +
    +
    [docs]class Stream(torch._C._CudaStreamBase): + r"""Wrapper around a CUDA stream. + + A CUDA stream is a linear sequence of execution that belongs to a specific + device, independent from other streams. See :ref:`cuda-semantics` for + details. + + Arguments: + device(torch.device or int, optional): a device on which to allocate + the stream. If :attr:`device` is ``None`` (default) or a negative + integer, this will use the current device. + priority(int, optional): priority of the stream. Lower numbers + represent higher priorities. + """ + + def __new__(cls, device=None, priority=0, **kwargs): + with torch.cuda.device(device): + return super(Stream, cls).__new__(cls, priority=priority, **kwargs) + +
    [docs] def wait_event(self, event): + r"""Makes all future work submitted to the stream wait for an event. + + Arguments: + event (Event): an event to wait for. + + .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see `CUDA + documentation`_ for more info. + + This function returns without waiting for :attr:`event`: only future + operations are affected. + + .. _CUDA documentation: + http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html + """ + event.wait(self)
    + +
    [docs] def wait_stream(self, stream): + r"""Synchronizes with another stream. + + All future work submitted to this stream will wait until all kernels + submitted to a given stream at the time of call complete. + + Arguments: + stream (Stream): a stream to synchronize. + + .. note:: This function returns without waiting for currently enqueued + kernels in :attr:`stream`: only future operations are affected. + """ + self.wait_event(stream.record_event())
    + +
    [docs] def record_event(self, event=None): + r"""Records an event. + + Arguments: + event (Event, optional): event to record. If not given, a new one + will be allocated. + + Returns: + Recorded event. + """ + if event is None: + event = Event() + event.record(self) + return event
    + +
    [docs] def query(self): + r"""Checks if all the work submitted has been completed. + + Returns: + A boolean indicating if all kernels in this stream are completed.""" + return super(Stream, self).query()
    + +
    [docs] def synchronize(self): + r"""Wait for all the kernels in this stream to complete. + + .. note:: This is a wrapper around ``cudaStreamSynchronize()``: see + `CUDA documentation`_ for more info. + + .. _CUDA documentation: + http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html + """ + super(Stream, self).synchronize()
    + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_stream) + + def __eq__(self, o): + if isinstance(o, Stream): + return super(Stream, self).__eq__(o) + return False + + def __hash__(self): + return hash((self.cuda_stream, self.device)) + + def __repr__(self): + return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>' + .format(self.device, self.cuda_stream))
    + + +
    [docs]class Event(torch._C._CudaEventBase): + r"""Wrapper around a CUDA event. + + CUDA events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize CUDA + streams. + + The underlying CUDA events are lazily initialized when the event is first + recorded or exported to another process. After creation, only streams on the + same device may record the event. However, streams on any device can wait on + the event. + + Arguments: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``) + interprocess (bool): if ``True``, the event can be shared between processes + (default: ``False``) + + .. _CUDA documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html + """ + + def __new__(cls, enable_timing=False, blocking=False, interprocess=False): + return super(Event, cls).__new__( + cls, + enable_timing=enable_timing, blocking=blocking, interprocess=interprocess) + +
    [docs] @classmethod + def from_ipc_handle(cls, device, handle): + r"""Reconstruct an event from an IPC handle on the given device.""" + return super(Event, cls).from_ipc_handle(device, handle)
    + +
    [docs] def record(self, stream=None): + r"""Records the event in a given stream. + + Uses ``torch.cuda.current_stream()`` if no stream is specified. The + stream's device must match the event's device.""" + if stream is None: + stream = torch.cuda.current_stream() + super(Event, self).record(stream)
    + +
    [docs] def wait(self, stream=None): + r"""Makes all future work submitted to the given stream wait for this + event. + + Use ``torch.cuda.current_stream()`` if no stream is specified.""" + if stream is None: + stream = torch.cuda.current_stream() + super(Event, self).wait(stream)
    + +
    [docs] def query(self): + r"""Checks if all work currently captured by event has completed. + + Returns: + A boolean indicating if all work currently captured by event has + completed. + """ + return super(Event, self).query()
    + +
    [docs] def elapsed_time(self, end_event): + r"""Returns the time elapsed in milliseconds after the event was + recorded and before the end_event was recorded. + """ + return super(Event, self).elapsed_time(end_event)
    + +
    [docs] def synchronize(self): + r"""Waits for the event to complete. + + Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + + .. note:: This is a wrapper around ``cudaEventSynchronize()``: see `CUDA + documentation`_ for more info. + + .. _CUDA documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html + """ + super(Event, self).synchronize()
    + +
    [docs] def ipc_handle(self): + r"""Returns an IPC handle of this event. If not recorded yet, the event + will use the current device. """ + return super(Event, self).ipc_handle()
    + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_event) + + def __repr__(self): + if self.cuda_event: + return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value) + else: + return '<torch.cuda.Event uninitialized>'
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributed.html b/docs/stable/_modules/torch/distributed.html new file mode 100644 index 000000000000..c4a2d3704064 --- /dev/null +++ b/docs/stable/_modules/torch/distributed.html @@ -0,0 +1,531 @@ + + + + + + + + + + + + torch.distributed — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributed

    +import torch
    +
    +
    +def is_available():
    +    return hasattr(torch._C, "_c10d_init")
    +
    +
    +if is_available() and not torch._C._c10d_init():
    +    raise RuntimeError("Failed to initialize PyTorch distributed support")
    +
    +
    +if is_available():
    +    from .distributed_c10d import *  # noqa: F401
    +    # Variables prefixed with underscore are not auto imported
    +    # See the comment in `distributed_c10d.py` above `_backend` on why we expose
    +    # this.
    +    from .distributed_c10d import _backend  # noqa: F401
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributed/distributed_c10d.html b/docs/stable/_modules/torch/distributed/distributed_c10d.html new file mode 100644 index 000000000000..520f89115ac0 --- /dev/null +++ b/docs/stable/_modules/torch/distributed/distributed_c10d.html @@ -0,0 +1,1958 @@ + + + + + + + + + + + + torch.distributed.distributed_c10d — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributed.distributed_c10d

    +import torch
    +import warnings
    +from torch._six import string_classes
    +from datetime import timedelta
    +
    +# This module is wildcard imported from torch.distributed.
    +# TODO: specify __all__
    +
    +from .rendezvous import rendezvous, register_rendezvous_handler  # noqa: F401
    +from . import (
    +    AllreduceOptions,
    +    BroadcastOptions,
    +    GatherOptions,
    +    ReduceOptions,
    +    ReduceScatterOptions,
    +    ScatterOptions,
    +)
    +from . import ReduceOp
    +from . import PrefixStore
    +
    +
    +_MPI_AVAILABLE = True
    +_NCCL_AVAILABLE = True
    +_GLOO_AVAILABLE = True
    +
    +
    +try:
    +    from. import ProcessGroupMPI
    +except ImportError:
    +    _MPI_AVAILABLE = False
    +
    +try:
    +    from. import ProcessGroupNCCL
    +except ImportError:
    +    _NCCL_AVAILABLE = False
    +
    +try:
    +    from. import ProcessGroupGloo
    +except ImportError:
    +    _GLOO_AVAILABLE = False
    +
    +
    +
    [docs]class Backend(object): + """ + An enum-like class of available backends: GLOO, NCCL, and MPI. + + The values of this class are lowercase strings, e.g., ``"gloo"``. They can + be accessed as attributes, e.g., ``Backend.NCCL``. + + This class can be directly called to parse the string, e.g., + ``Backend(backend_str)`` will check if ``backend_str`` is valid, and + return the parsed lowercase string if so. It also accepts uppercase strings, + e.g., ``Backend("GLOO")`` returns ``"gloo"``. + + .. note:: The entry ``Backend.UNDEFINED`` is present but only used as + initial value of some fields. Users should neither use it directly + nor assume its existence. + """ + UNDEFINED = "undefined" + GLOO = "gloo" + NCCL = "nccl" + MPI = "mpi" + TCP = "tcp" + + def __new__(cls, name): + if not isinstance(name, string_classes): + raise ValueError("Backend name must be a string, but got: {}".format(name)) + value = getattr(Backend, name.upper(), Backend.UNDEFINED) + + if value == Backend.TCP: + raise ValueError("TCP backend has been deprecated. Please use " + "Gloo or MPI backend for collective operations " + "on CPU tensors.") + elif value == Backend.UNDEFINED: + raise ValueError("Invalid backend: '{}'".format(name)) + return value
    + +# `_backend`, `dist_backend`, and `reduce_op` are here to maintain backward +# compatibility with pre-c10d distributed package. +# TODO: remove them when users are ready to take a hard dependency on PyTorch 1. +_backend = Backend.UNDEFINED +dist_backend = Backend + + +
    [docs]class reduce_op(object): + r""" + Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``, + ``MIN``, and ``MAX``. + + :class:`~torch.distributed.ReduceOp` is recommended to use instead. + """ + + def __init__(self): + # __members__ is a dict storing key-value pairs for enum classes + for k, v in ReduceOp.__members__.items(): + setattr(self, k, v) + self.__members__ = ReduceOp.__members__ + + def __getattribute__(self, key): + warnings.warn("torch.distributed.reduce_op is deprecated, please use " + "torch.distributed.ReduceOp instead") + return object.__getattribute__(self, key)
    + +reduce_op = reduce_op() + + +class group(object): + WORLD = object() + + +class GroupMember(object): + # Alias to group.WORLD for backward compatibility + WORLD = group.WORLD + NON_GROUP_MEMBER = object() + + +# Cached process groups +# For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store) +# For MPI pg, it is a map from ProcessGroup to (Backend, None) +_pg_map = {} +# Process group's names, map from ProcessGroup to str +_pg_names = {} +# Process group's global rank to local rank mapping +_pg_group_ranks = {} + +# Default process group state +_default_pg = None +_default_pg_init_method = None + +# Default process group wide timeout, if applicable. +# This currently only applies to the gloo backend. To make an attempt at +# backwards compatibility with THD, we use an extraordinarily high default +# timeout, given that THD did not have timeouts. +_default_pg_timeout = timedelta(minutes=30) + +# Process group count for default naming +_group_count = 0 + + +def _rank_not_in_group(group): + """ + Helper that checks if the current process's rank is not in a given group + + """ + if group == GroupMember.WORLD: + return False + return group == GroupMember.NON_GROUP_MEMBER + + +def _get_group_rank(group, rank): + """ + Helper that gets a given group's local rank in the group from a given global + rank + + """ + if group is GroupMember.WORLD: + raise RuntimeError("group.WORLD does not have local rank to global " + "rank mapping") + if group not in _pg_group_ranks: + raise RuntimeError("The given group does not exist") + try: + group_rank = _pg_group_ranks[group][rank] + except KeyError: + raise RuntimeError("The global rank is not part of the group") + return group_rank + + +def _get_global_rank(group, group_rank): + """ + Helper that gets a given group's global rank from a given local rank in the + group + + """ + if group is GroupMember.WORLD: + raise RuntimeError("group.WORLD does not have local rank to global " + "rank mapping") + group_rank_map = _pg_group_ranks[group] + for rank, grp_rank in group_rank_map.items(): + if grp_rank == group_rank: + return rank + raise RuntimeError("The group rank is not part of the group") + + +def _check_default_pg(): + """ + Helper that checks if the default ProcessGroup has been initializd, with + assertion + + """ + assert _default_pg is not None, \ + "Default process group is not initialized" + + +def _get_group_size(group): + """ + Helper that gets a given group's world size + + """ + if group is GroupMember.WORLD: + _check_default_pg() + return _default_pg.size() + if group not in _pg_group_ranks: + raise RuntimeError("The given group does not exist") + return len(_pg_group_ranks[group]) + + +def _check_single_tensor(param, param_name): + """ + Helper that check the parameter: param_name is a single Tensor + + """ + if not isinstance(param, torch.Tensor): + raise RuntimeError("Invalid function argument. Expecting parameter: {} " + "to be a torch.Tensor type".format(param_name)) + + +def _check_tensor_list(param, param_name): + """ + Helper that check the parameter: param_name is a Tensor list + + """ + wrong_type = False + if isinstance(param, list): + for p in param: + if not isinstance(p, torch.Tensor): + wrong_type = True + break + else: + wrong_type = True + if wrong_type: + raise RuntimeError("Invalid function argument. Expecting parameter: {} " + "to be a List[torch.Tensor] type".format(param_name)) + + +
    [docs]def is_mpi_available(): + """ + Checks if the MPI backend is available. + + """ + return _MPI_AVAILABLE
    + + +
    [docs]def is_nccl_available(): + """ + Checks if the NCCL backend is available. + + """ + return _NCCL_AVAILABLE
    + + +def is_gloo_available(): + """ + Checks if the Gloo backend is available. + + """ + return _GLOO_AVAILABLE + + +
    [docs]def is_initialized(): + """ + Checking if the default process group has been initialized + + """ + return _default_pg is not None
    + + +def _get_default_group(): + """ + Getting the default process group created by init_process_group + + """ + if not is_initialized(): + raise RuntimeError("Default process group has not been initialized, " + "please make sure to call init_process_group.") + return _default_pg + + +def _get_default_store(): + """ + Getting the default store created by init_process_group + + """ + if not is_initialized(): + raise RuntimeError("Default process group has not been initialized, " + "please make sure to call init_process_group.") + _, default_store = _pg_map[_default_pg] + return default_store + + +
    [docs]def get_backend(group=group.WORLD): + """ + Returns the backend of the given process group. + + Arguments: + group (ProcessGroup, optional): The process group to work on. The + default is the general main process group. If another specific group + is specified, the calling process must be part of :attr:`group`. + + Returns: + The backend of the given process group as a lower case string. + + """ + _check_default_pg() + + if group == GroupMember.WORLD: + pg = _default_pg + else: + pg = group + if _rank_not_in_group(pg): + raise RuntimeError("Invalid process group specified") + return _pg_map.get(pg, None)[0]
    + + +
    [docs]def init_process_group(backend, + init_method=None, + timeout=_default_pg_timeout, + world_size=-1, + rank=-1, + store=None, + group_name=''): + """ + Initializes the default distributed process group, and this will also + initialize the distributed package. + + There are 2 main ways to initialize a process group: + 1. Specify ``store``, ``rank``, and ``world_size`` explicitly. + 2. Specify ``init_method`` (a URL string) which indicates where/how + to discover peers. Optionally specify ``rank`` and ``world_size``, + or encode all required parameters in the URL and omit them. + If neither is specified, ``init_method`` is assumed to be "env://". + + + Arguments: + backend (str or Backend): The backend to use. Depending on + build-time configurations, valid values include ``mpi``, ``gloo``, + and ``nccl``. This field should be given as a lowercase string + (e.g., ``"gloo"``), which can also be accessed via + :class:`Backend` attributes (e.g., ``Backend.GLOO``). If using + multiple processes per machine with ``nccl`` backend, each process + must have exclusive access to every GPU it uses, as sharing GPUs + between processes can result in deadlocks. + init_method (str, optional): URL specifying how to initialize the + process group. Default is "env://" if no + ``init_method`` or ``store`` is specified. + Mutually exclusive with ``store``. + world_size (int, optional): Number of processes participating in + the job. Required if ``store`` is specified. + rank (int, optional): Rank of the current process. + Required if ``store`` is specified. + store(Store, optional): Key/value store accessible to all workers, used + to exchange connection/address information. + Mutually exclusive with ``init_method``. + timeout (timedelta, optional): Timeout for operations executed against + the process group. Default value equals 30 minutes. + This is only applicable for the ``gloo`` backend. + group_name (str, optional, deprecated): Group name. + + To enable ``backend == Backend.MPI``, PyTorch needs to built from source + on a system that supports MPI. The same applies to NCCL as well. + + """ + global _pg_group_ranks + global _backend + global _default_pg + global _default_pg_init_method + + if not isinstance(timeout, timedelta): + raise RuntimeError("Expected timeout argument to be of type" + "datetime.timedelta") + + if _default_pg is not None: + raise RuntimeError("trying to initialize the default process group " + "twice!") + + assert (store is None) or (init_method is None), \ + "Cannot specify both init_method and store." + + if store is not None: + assert world_size > 0, 'world_size must be positive if using store' + assert rank >= 0, 'rank must be non-negative if using store' + elif init_method is None: + init_method = "env://" + + backend = Backend(backend) + + if backend == Backend.MPI: + _default_pg = _new_process_group_helper( + -1, + -1, + [], + Backend.MPI, + None, + group_name=group_name, + timeout=timeout) + else: + # backward compatible API + if store is None: + url = init_method + if world_size != -1 and rank != -1: + url += "?rank={}&world_size={}".format(rank, world_size) + elif rank != -1: + url += "?rank={}".format(rank) + elif world_size != -1: + url += "?world_size={}".format(world_size) + + store, rank, world_size = next(rendezvous(url)) + store.set_timeout(timeout) + + _default_pg = _new_process_group_helper( + world_size, + rank, + [], + backend, + store, + group_name=group_name, + timeout=timeout) + + _pg_group_ranks[_default_pg] = {i: i for i in range(_default_pg.size())} + _backend = _pg_map[_default_pg][0] + _default_pg_init_method = init_method
    + + +def _new_process_group_helper(world_size, + rank, + group_ranks, + backend, + store, + group_name=None, + timeout=_default_pg_timeout): + """ + Create a new distributed process group. + + This function must be called by ALL processes in the global group, even if + the calling process is not part of the newly created group. In that case, + this function returns GroupMember.NON_GROUP_MEMBER. + + This function is called with ``group_ranks == []`` for the default group. + """ + global _pg_map + global _group_count + global _pg_names + + if not group_name: + group_name = str(_group_count) + _group_count += 1 + + if group_name in _pg_names.values(): + raise RuntimeError("The specified group name has already been " + "created, please use a different group name") + + if not isinstance(timeout, timedelta): + raise RuntimeError("Expected timeout argument to be of type" + "datetime.timedelta") + + # The list of group ranks is empty if we're creating the default group. + is_default_group = (len(group_ranks) == 0) + + backend = Backend(backend) + if backend == Backend.MPI: + if not is_mpi_available(): + raise RuntimeError("Distributed package doesn't have MPI built in") + pg = ProcessGroupMPI.create(group_ranks) + if not pg: + return GroupMember.NON_GROUP_MEMBER + _pg_map[pg] = (Backend.MPI, None) + _pg_names[pg] = group_name + else: + # If this is a subgroup (which means group_ranks is specified), + # we check if the current process is a member of the new group. + if not is_default_group: + global_rank = _default_pg.rank() + if global_rank not in group_ranks: + return GroupMember.NON_GROUP_MEMBER + + # Use the group name as prefix in the default store, such that + # a single store can be reused by multiple groups. + prefix_store = PrefixStore(group_name, store) + + if backend == Backend.GLOO: + pg = ProcessGroupGloo( + prefix_store, + rank, + world_size, + timeout=timeout) + _pg_map[pg] = (Backend.GLOO, store) + _pg_names[pg] = group_name + elif backend == Backend.NCCL: + if not is_nccl_available(): + raise RuntimeError("Distributed package doesn't have NCCL " + "built in") + pg = ProcessGroupNCCL( + prefix_store, + rank, + world_size, + group_name) + _pg_map[pg] = (Backend.NCCL, store) + _pg_names[pg] = group_name + else: + raise RuntimeError("Unsupported distributed backend by group") + + return pg + + +def destroy_process_group(group=group.WORLD): + """ + Destroy a given process group, and deinitialize the distributed package + + Arguments: + group (ProcessGroup, optional): The process group to be destroyed, if + group.WORLD is given, all process + groups including the default one will + be destroyed. + """ + global _pg_map + global _pg_names + global _pg_group_ranks + global _default_pg + global _default_pg_init_method + + if group == GroupMember.NON_GROUP_MEMBER: + return + + if group == GroupMember.WORLD: + pg = _default_pg + else: + pg = group + + if _pg_map.get(pg, None) is None: + raise RuntimeError("Invalid process group specified") + + if group == GroupMember.WORLD: + _default_pg = None + _default_pg_init_method = None + _pg_map.clear() + _pg_names.clear() + _pg_group_ranks.clear() + else: + del _pg_map[pg] + del _pg_names[pg] + del _pg_group_ranks[pg] + + +
    [docs]def get_rank(group=group.WORLD): + """ + Returns the rank of current process group + + Rank is a unique identifier assigned to each process within a distributed + process group. They are always consecutive integers ranging from 0 to + ``world_size``. + + Arguments: + group (ProcessGroup, optional): The process group to work on + + Returns: + The rank of the process group + -1, if not part of the group + + """ + if _rank_not_in_group(group): + return -1 + + _check_default_pg() + if group == GroupMember.WORLD: + return _default_pg.rank() + + return _get_group_rank(group, _default_pg.rank())
    + + +
    [docs]def get_world_size(group=group.WORLD): + """ + Returns the number of processes in the current process group + + Arguments: + group (ProcessGroup, optional): The process group to work on + + Returns: + The world size of the process group + -1, if not part of the group + + """ + if _rank_not_in_group(group): + return -1 + + return _get_group_size(group)
    + + +
    [docs]def isend(tensor, + dst, + group=group.WORLD, + tag=0): + """ + Sends a tensor asynchronously. + + Arguments: + tensor (Tensor): Tensor to send. + dst (int): Destination rank. + group (ProcessGroup, optional): The process group to work on + tag (int, optional): Tag to match send with remote recv + + Returns: + A distributed request object. + None, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + return _default_pg.send([tensor], dst, tag) + else: + group_dst_rank = _get_group_rank(group, dst) + return group.send([tensor], group_dst_rank, tag)
    + + +
    [docs]def irecv(tensor, + src, + group=group.WORLD, + tag=0): + """ + Receives a tensor asynchronously. + + Arguments: + tensor (Tensor): Tensor to fill with received data. + src (int): Source rank. + group (ProcessGroup, optional): The process group to work on + tag (int, optional): Tag to match recv with remote send + + Returns: + A distributed request object. + None, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + return _default_pg.recv([tensor], src, tag) + else: + group_src_rank = _get_group_rank(group, src) + return group.recv([tensor], group_src_rank, tag)
    + + +
    [docs]def send(tensor, + dst, + group=group.WORLD, + tag=0): + """ + Sends a tensor synchronously. + + Arguments: + tensor (Tensor): Tensor to send. + dst (int): Destination rank. + group (ProcessGroup, optional): The process group to work on + tag (int, optional): Tag to match send with remote recv + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + _default_pg.send([tensor], dst, tag).wait() + else: + group_dst_rank = _get_group_rank(group, dst) + group.send([tensor], group_dst_rank, tag).wait()
    + + +
    [docs]def recv(tensor, + src=None, + group=group.WORLD, + tag=0): + """ + Receives a tensor synchronously. + + Arguments: + tensor (Tensor): Tensor to fill with received data. + src (int, optional): Source rank. Will receive from any + process if unspecified. + group (ProcessGroup, optional): The process group to work on + tag (int, optional): Tag to match recv with remote send + + Returns: + Sender rank + -1, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return -1 + + if group == GroupMember.WORLD: + _check_default_pg() + pg = _default_pg + else: + pg = group + + if src is None: + work = pg.recv_anysource([tensor], tag) + work.wait() + src_rank = work.source_rank() + if group == GroupMember.WORLD: + return src_rank + else: + return _get_global_rank(pg, src_rank) + else: + if group == GroupMember.WORLD: + pg.recv([tensor], src, tag).wait() + else: + group_src_rank = _get_group_rank(pg, src) + pg.recv([tensor], group_src_rank, tag).wait() + return src
    + + +
    [docs]def broadcast_multigpu(tensor_list, + src, + group=group.WORLD, + async_op=False, + src_tensor=0): + """ + Broadcasts the tensor to the whole group with multiple GPU tensors + per node. + + ``tensor`` must have the same number of elements in all the GPUs from + all processes participating in the collective. each tensor in the list must + be on a different GPU + + Only nccl and gloo backend are currently supported + tensors should only be GPU tensors + + Arguments: + tensor_list (List[Tensor]): Tensors that participate in the collective + operation. If ``src`` is the rank, then the specified ``src_tensor`` + element of ``tensor_list`` (``tensor_list[src_tensor]``) will be + broadcast to all other tensors (on different GPUs) in the src process + and all tensors in ``tensor_list`` of other non-src processes. + You also need to make sure that ``len(tensor_list)`` is the same + for all the distributed processes calling this function. + + src (int): Source rank. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + src_tensor (int, optional): Source tensor rank within ``tensor_list`` + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + if _rank_not_in_group(group): + return + + opts = BroadcastOptions() + opts.rootRank = src + opts.rootTensor = src_tensor + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.broadcast(tensor_list, opts) + else: + group_src_rank = _get_group_rank(group, src) + opts.rootRank = group_src_rank + work = group.broadcast(tensor_list, opts) + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def broadcast(tensor, + src, + group=group.WORLD, + async_op=False): + """ + Broadcasts the tensor to the whole group. + + ``tensor`` must have the same number of elements in all processes + participating in the collective. + + Arguments: + tensor (Tensor): Data to be sent if ``src`` is the rank of current + process, and tensor to be used to save received data otherwise. + src (int): Source rank. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + opts = BroadcastOptions() + opts.rootRank = src + opts.rootTensor = 0 + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.broadcast([tensor], opts) + else: + group_src_rank = _get_group_rank(group, src) + opts.rootRank = group_src_rank + work = group.broadcast([tensor], opts) + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def all_reduce_multigpu(tensor_list, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False): + r""" + Reduces the tensor data across all machines in such a way that all get + the final result. This function reduces a number of tensors on every node, + while each tensor resides on different GPUs. + Therefore, the input tensor in the tensor list needs to be GPU tensors. + Also, each tensor in the tensor list needs to reside on a different GPU. + + After the call, all ``tensor`` in ``tensor_list`` is going to be bitwise + identical in all processes. + + Only nccl and gloo backend is currently supported + tensors should only be GPU tensors + + Arguments: + tensor list (List[Tensor]): List of input and output tensors of + the collective. The function operates in-place and requires that + each tensor to be a GPU tensor on different GPUs. + You also need to make sure that ``len(tensor_list)`` is the same for + all the distributed processes calling this function. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + if _rank_not_in_group(group): + return + + opts = AllreduceOptions() + opts.reduceOp = op + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.allreduce(tensor_list, opts) + else: + work = group.allreduce(tensor_list, opts) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def all_reduce(tensor, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False): + """ + Reduces the tensor data across all machines in such a way that all get + the final result. + + After the call ``tensor`` is going to be bitwise identical in all processes. + + Arguments: + tensor (Tensor): Input and output of the collective. The function + operates in-place. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + opts = AllreduceOptions() + opts.reduceOp = op + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.allreduce([tensor], opts) + else: + work = group.allreduce([tensor], opts) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def reduce_multigpu(tensor_list, + dst, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False, + dst_tensor=0): + """ + Reduces the tensor data on multiple GPUs across all machines. Each tensor + in ``tensor_list`` should reside on a separate GPU + + Only the GPU of ``tensor_list[dst_tensor]`` on the process with rank ``dst`` + is going to receive the final result. + + Only nccl backend is currently supported + tensors should only be GPU tensors + + Arguments: + tensor_list (List[Tensor]): Input and output GPU tensors of the + collective. The function operates in-place. + You also need to make sure that ``len(tensor_list)`` is the same for + all the distributed processes calling this function. + dst (int): Destination rank + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + dst_tensor (int, optional): Destination tensor rank within + ``tensor_list`` + + Returns: + Async work handle, if async_op is set to True. + None, otherwise + + """ + if _rank_not_in_group(group): + return + + opts = ReduceOptions() + opts.reduceOp = op + opts.rootRank = dst + opts.rootTensor = dst_tensor + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.reduce(tensor_list, opts) + else: + group_dst_rank = _get_group_rank(group, dst) + opts.rootRank = group_dst_rank + work = group.reduce(tensor_list, opts) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def reduce(tensor, + dst, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False): + """ + Reduces the tensor data across all machines. + + Only the process with rank ``dst`` is going to receive the final result. + + Arguments: + tensor (Tensor): Input and output of the collective. The function + operates in-place. + dst (int): Destination rank + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + opts = ReduceOptions() + opts.reduceOp = op + opts.rootRank = dst + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.reduce([tensor], opts) + else: + group_dst_rank = _get_group_rank(group, dst) + opts.rootRank = group_dst_rank + work = group.reduce([tensor], opts) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def all_gather_multigpu(output_tensor_lists, + input_tensor_list, + group=group.WORLD, + async_op=False): + """ + Gathers tensors from the whole group in a list. + Each tensor in ``tensor_list`` should reside on a separate GPU + + Only nccl backend is currently supported + tensors should only be GPU tensors + + Arguments: + output_tensor_lists (List[List[Tensor]]): Output lists. It should + contain correctly-sized tensors on each GPU to be used for output + of the collective, e.g. ``output_tensor_lists[i]`` contains the + all_gather result that resides on the GPU of + ``input_tensor_list[i]``. + + Note that each element of ``output_tensor_lists`` has the size of + ``world_size * len(input_tensor_list)``, since the function all + gathers the result from every single GPU in the group. To interpret + each element of ``output_tensor_lists[i]``, note that + ``input_tensor_list[j]`` of rank k will be appear in + ``output_tensor_lists[i][k * world_size + j]`` + + Also note that ``len(output_tensor_lists)``, and the size of each + element in ``output_tensor_lists`` (each element is a list, + therefore ``len(output_tensor_lists[i])``) need to be the same + for all the distributed processes calling this function. + + input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to + be broadcast from current process. + Note that ``len(input_tensor_list)`` needs to be the same for + all the distributed processes calling this function. + + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.allgather(output_tensor_lists, input_tensor_list) + else: + work = group.allgather(output_tensor_lists, input_tensor_list) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def all_gather(tensor_list, + tensor, + group=group.WORLD, + async_op=False): + """ + Gathers tensors from the whole group in a list. + + Arguments: + tensor_list (list[Tensor]): Output list. It should contain + correctly-sized tensors to be used for output of the collective. + tensor (Tensor): Tensor to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_tensor_list(tensor_list, "tensor_list") + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.allgather([tensor_list], [tensor]) + else: + work = group.allgather([tensor_list], [tensor]) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def gather(tensor, + gather_list, + dst, + group=group.WORLD, + async_op=False): + """ + Gathers a list of tensors in a single process. + + Arguments: + tensor (Tensor): Input tensor. + gather_list (list[Tensor]): List of appropriately-sized tensors to + use for received data. Required only in the receiving process. + dst (int): Destination rank. Required in all processes except the one + that is receiveing the data. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + _check_tensor_list(gather_list, "gather_list") + if _rank_not_in_group(group): + return + + my_rank = get_rank() + if dst == my_rank: + if gather_list is None: + raise RuntimeError("gather_list is a required argument in gather " + "destination") + input_tensors = [tensor] + output_tensors = [gather_list] + else: + if gather_list: + raise RuntimeError("non-empty gather_list can be given only " + "to gather destination") + input_tensors = [tensor] + output_tensors = [] + + opts = GatherOptions() + opts.rootRank = dst + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.gather(output_tensors, input_tensors, opts) + else: + group_dst_rank = _get_group_rank(group, dst) + opts.rootRank = group_dst_rank + work = group.gather(output_tensors, input_tensors, opts) + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def scatter(tensor, + scatter_list, + src, + group=group.WORLD, + async_op=False): + """ + Scatters a list of tensors to all processes in a group. + + Each process will receive exactly one tensor and store its data in the + ``tensor`` argument. + + Arguments: + tensor (Tensor): Output tensor. + scatter_list (list[Tensor]): List of tensors to scatter. Required only + in the process that is sending the data. + src (int): Source rank. Required in all processes except the one that + is sending the data. + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + _check_tensor_list(scatter_list, "scatter_list") + if _rank_not_in_group(group): + return + + my_rank = get_rank() + if src == my_rank: + if scatter_list is None: + raise RuntimeError("scatter_list is a required argument in " + "scatter source") + input_tensors = [scatter_list] + output_tensors = [tensor] + else: + if scatter_list: + raise RuntimeError("non-empty can be given only to scatter " + "source") + input_tensors = [] + output_tensors = [tensor] + + opts = ScatterOptions() + opts.rootRank = src + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.scatter(output_tensors, input_tensors, opts) + else: + group_src_rank = _get_group_rank(group, src) + opts.rootRank = group_src_rank + work = group.scatter(output_tensors, input_tensors, opts) + + if async_op: + return work + else: + work.wait()
    + + +def reduce_scatter_multigpu(output_tensor_list, + input_tensor_lists, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False): + """ + Reduce and scatter a list of tensors to the whole group. Only nccl backend + is currently supported. + + Each tensor in ``output_tensor_list`` should reside on a separate GPU, as + should each list of tensors in ``input_tensor_lists``. + + Arguments: + output_tensor_list (List[Tensor]): Output tensors (on different GPUs) + to receive the result of the operation. + + Note that ``len(output_tensor_list)`` needs to be the same for all + the distributed processes calling this function. + + input_tensor_lists (List[List[Tensor]]): Input lists. It should + contain correctly-sized tensors on each GPU to be used for input of + the collective, e.g. ``input_tensor_lists[i]`` contains the + reduce_scatter input that resides on the GPU of + ``output_tensor_list[i]``. + + Note that each element of ``input_tensor_lists`` has the size of + ``world_size * len(output_tensor_list)``, since the function + scatters the result from every single GPU in the group. To + interpret each element of ``input_tensor_lists[i]``, note that + ``output_tensor_list[j]`` of rank k receives the reduce-scattered + result from ``input_tensor_lists[i][k * world_size + j]`` + + Also note that ``len(input_tensor_lists)``, and the size of each + element in ``input_tensor_lists`` (each element is a list, + therefore ``len(input_tensor_lists[i])``) need to be the same for + all the distributed processes calling this function. + + group (ProcessGroup, optional): The process group to work on. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + """ + if _rank_not_in_group(group): + return + + opts = ReduceScatterOptions() + opts.reduceOp = op + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.reduce_scatter( + output_tensor_list, + input_tensor_lists, + opts + ) + else: + work = group.reduce_scatter( + output_tensor_list, + input_tensor_lists, + opts + ) + + if async_op: + return work + else: + work.wait() + + +def reduce_scatter(output, + input_list, + op=ReduceOp.SUM, + group=group.WORLD, + async_op=False): + """ + Reduces, then scatters a list of tensors to all processes in a group. + + Arguments: + output (Tensor): Output tensor. + input_list (list[Tensor]): List of tensors to reduce and scatter. + group (ProcessGroup, optional): The process group to work on. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + """ + _check_single_tensor(output, "output") + _check_tensor_list(input_list, "input_list") + if _rank_not_in_group(group): + return + + opts = ReduceScatterOptions() + opts.reduceOp = op + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.reduce_scatter([output], [input_list], opts) + else: + work = group.reduce_scatter([output], [input_list], opts) + + if async_op: + return work + else: + work.wait() + + +
    [docs]def barrier(group=group.WORLD, + async_op=False): + """ + Synchronizes all processes. + + This collective blocks processes until the whole group enters this function, + if async_op is False, or if async work handle is called on wait(). + + Arguments: + group (ProcessGroup, optional): The process group to work on + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + """ + if _rank_not_in_group(group): + return + + if group == GroupMember.WORLD: + _check_default_pg() + work = _default_pg.barrier() + else: + work = group.barrier() + + if async_op: + return work + else: + work.wait()
    + + +
    [docs]def new_group(ranks=None, timeout=_default_pg_timeout, backend=None): + """ + Creates a new distributed group. + + This function requires that all processes in the main group (i.e. all + processes that are part of the distributed job) enter this function, even + if they are not going to be members of the group. Additionally, groups + should be created in the same order in all processes. + + Arguments: + ranks (list[int]): List of ranks of group members. + timeout (timedelta, optional): Timeout for operations executed against + the process group. Default value equals 30 minutes. + This is only applicable for the ``gloo`` backend. + backend (str or Backend, optional): The backend to use. Depending on + build-time configurations, valid values are ``gloo`` and ``nccl``. + By default uses the same backend as the global group. This field + should be given as a lowercase string (e.g., ``"gloo"``), which can + also be accessed via :class:`Backend` attributes (e.g., + ``Backend.GLOO``). + + Returns: + A handle of distributed group that can be given to collective calls. + """ + + _check_default_pg() + + global _pg_group_ranks + + default_backend, default_store = _pg_map[_default_pg] + global_rank = _default_pg.rank() + global_world_size = _default_pg.size() + + # Default to the same backend as the global process group + # if the backend is not specified. + if not backend: + backend = default_backend + + # checks the input ranks + if ranks is not None: + ranks = sorted(ranks) + group_world_size = len(ranks) + if group_world_size > global_world_size: + raise RuntimeError("the new group's world size should be less or " + "equal to the world size set by " + "init_process_group") + # check ranks' sanity + for rank in ranks: + if rank < 0 or rank >= global_world_size: + raise RuntimeError("The new group's rank should be within the " + "the world_size set by init_process_group") + if global_rank in ranks: + group_rank = ranks.index(global_rank) + else: + group_rank = None + else: + ranks = list(range(global_world_size)) + group_world_size = global_world_size + group_rank = global_rank + + backend = Backend(backend) + pg = _new_process_group_helper(group_world_size, + group_rank, + ranks, + backend, + default_store, + timeout=timeout) + + # Create the global rank to group rank mapping + _pg_group_ranks[pg] = { + global_rank: group_rank + for group_rank, global_rank in enumerate(ranks) + } + + return pg
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/bernoulli.html b/docs/stable/_modules/torch/distributions/bernoulli.html new file mode 100644 index 000000000000..36a7a164469e --- /dev/null +++ b/docs/stable/_modules/torch/distributions/bernoulli.html @@ -0,0 +1,626 @@ + + + + + + + + + + + + torch.distributions.bernoulli — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.bernoulli

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property
    +from torch.nn.functional import binary_cross_entropy_with_logits
    +
    +
    +
    [docs]class Bernoulli(ExponentialFamily): + r""" + Creates a Bernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both). + + Samples are binary (0 or 1). They take the value `1` with probability `p` + and `0` with probability `1 - p`. + + Example:: + + >>> m = Bernoulli(torch.tensor([0.3])) + >>> m.sample() # 30% chance 1; 70% chance 0 + tensor([ 0.]) + + Args: + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + """ + arg_constraints = {'probs': constraints.unit_interval, + 'logits': constraints.real} + support = constraints.boolean + has_enumerate_support = True + _mean_carrier_measure = 0 + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + is_scalar = isinstance(probs, Number) + self.probs, = broadcast_all(probs) + else: + is_scalar = isinstance(logits, Number) + self.logits, = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Bernoulli, _instance) + batch_shape = torch.Size(batch_shape) + if 'probs' in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if 'logits' in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Bernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.probs + + @property + def variance(self): + return self.probs * (1 - self.probs) + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True)
    + + @property + def param_shape(self): + return self._param.size() + +
    [docs] def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.bernoulli(self.probs.expand(shape))
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + return -binary_cross_entropy_with_logits(logits, value, reduction='none')
    + +
    [docs] def entropy(self): + return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none')
    + +
    [docs] def enumerate_support(self, expand=True): + values = torch.arange(2, dtype=self._param.dtype, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values
    + + @property + def _natural_params(self): + return (torch.log(self.probs / (1 - self.probs)), ) + + def _log_normalizer(self, x): + return torch.log(1 + torch.exp(x))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/beta.html b/docs/stable/_modules/torch/distributions/beta.html new file mode 100644 index 000000000000..7cfd442156a9 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/beta.html @@ -0,0 +1,604 @@ + + + + + + + + + + + + torch.distributions.beta — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.beta

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.dirichlet import Dirichlet
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Beta(ExponentialFamily): + r""" + Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`. + + Example:: + + >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5])) + >>> m.sample() # Beta distributed with concentration concentration1 and concentration0 + tensor([ 0.1046]) + + Args: + concentration1 (float or Tensor): 1st concentration parameter of the distribution + (often referred to as alpha) + concentration0 (float or Tensor): 2nd concentration parameter of the distribution + (often referred to as beta) + """ + arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive} + support = constraints.unit_interval + has_rsample = True + + def __init__(self, concentration1, concentration0, validate_args=None): + if isinstance(concentration1, Number) and isinstance(concentration0, Number): + concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)]) + else: + concentration1, concentration0 = broadcast_all(concentration1, concentration0) + concentration1_concentration0 = torch.stack([concentration1, concentration0], -1) + self._dirichlet = Dirichlet(concentration1_concentration0) + super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Beta, _instance) + batch_shape = torch.Size(batch_shape) + new._dirichlet = self._dirichlet.expand(batch_shape) + super(Beta, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + return self.concentration1 / (self.concentration1 + self.concentration0) + + @property + def variance(self): + total = self.concentration1 + self.concentration0 + return (self.concentration1 * self.concentration0 / + (total.pow(2) * (total + 1))) + +
    [docs] def rsample(self, sample_shape=()): + return self._dirichlet.rsample(sample_shape).select(-1, 0)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + heads_tails = torch.stack([value, 1.0 - value], -1) + return self._dirichlet.log_prob(heads_tails)
    + +
    [docs] def entropy(self): + return self._dirichlet.entropy()
    + + @property + def concentration1(self): + result = self._dirichlet.concentration[..., 0] + if isinstance(result, Number): + return torch.tensor([result]) + else: + return result + + @property + def concentration0(self): + result = self._dirichlet.concentration[..., 1] + if isinstance(result, Number): + return torch.tensor([result]) + else: + return result + + @property + def _natural_params(self): + return (self.concentration1, self.concentration0) + + def _log_normalizer(self, x, y): + return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/binomial.html b/docs/stable/_modules/torch/distributions/binomial.html new file mode 100644 index 000000000000..ea67a5c3a9e9 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/binomial.html @@ -0,0 +1,653 @@ + + + + + + + + + + + + torch.distributions.binomial — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.binomial

    +from numbers import Number
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs
    +
    +
    +def _clamp_by_zero(x):
    +    # works like clamp(x, min=0) but has grad at 0 is 0.5
    +    return (x.clamp(min=0) + x - x.clamp(max=0)) / 2
    +
    +
    +
    [docs]class Binomial(Distribution): + r""" + Creates a Binomial distribution parameterized by :attr:`total_count` and + either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be + broadcastable with :attr:`probs`/:attr:`logits`. + + Example:: + + >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1])) + >>> x = m.sample() + tensor([ 0., 22., 71., 100.]) + + >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8])) + >>> x = m.sample() + tensor([[ 4., 5.], + [ 7., 6.]]) + + Args: + total_count (int or Tensor): number of Bernoulli trials + probs (Tensor): Event probabilities + logits (Tensor): Event log-odds + """ + arg_constraints = {'total_count': constraints.nonnegative_integer, + 'probs': constraints.unit_interval, + 'logits': constraints.real} + has_enumerate_support = True + + def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + self.total_count, self.probs, = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.logits) + is_scalar = isinstance(self.probs, Number) + else: + self.total_count, self.logits, = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + is_scalar = isinstance(self.logits, Number) + + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super(Binomial, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Binomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if 'probs' in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if 'logits' in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Binomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @constraints.dependent_property + def support(self): + return constraints.integer_interval(0, self.total_count) + + @property + def mean(self): + return self.total_count * self.probs + + @property + def variance(self): + return self.total_count * self.probs * (1 - self.probs) + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True)
    + + @property + def param_shape(self): + return self._param.size() + +
    [docs] def sample(self, sample_shape=torch.Size()): + with torch.no_grad(): + max_count = max(int(self.total_count.max()), 1) + shape = self._extended_shape(sample_shape) + (max_count,) + bernoullis = torch.bernoulli(self.probs.unsqueeze(-1).expand(shape)) + if self.total_count.min() != max_count: + arange = torch.arange(max_count, dtype=self._param.dtype, device=self._param.device) + mask = arange >= self.total_count.unsqueeze(-1) + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .masked_fill_() + bernoullis[mask.expand(shape)] = 0. + else: + bernoullis.masked_fill_(mask, 0.) + return bernoullis.sum(dim=-1)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_factorial_n = torch.lgamma(self.total_count + 1) + log_factorial_k = torch.lgamma(value + 1) + log_factorial_nmk = torch.lgamma(self.total_count - value + 1) + # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p) + # (case logit < 0) = k * logit - n * log1p(e^logit) + # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p) + # = k * logit - n * logit - n * log1p(e^-logit) + # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|) + normalize_term = (self.total_count * _clamp_by_zero(self.logits) + + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits))) + - log_factorial_n) + return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
    + +
    [docs] def enumerate_support(self, expand=True): + total_count = int(self.total_count.max()) + if not self.total_count.min() == total_count: + raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.") + values = torch.arange(1 + total_count, dtype=self._param.dtype, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/categorical.html b/docs/stable/_modules/torch/distributions/categorical.html new file mode 100644 index 000000000000..cd92bef6806e --- /dev/null +++ b/docs/stable/_modules/torch/distributions/categorical.html @@ -0,0 +1,642 @@ + + + + + + + + + + + + torch.distributions.categorical — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.categorical

    +import torch
    +from torch._six import nan
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property
    +
    +
    +
    [docs]class Categorical(Distribution): + r""" + Creates a categorical distribution parameterized by either :attr:`probs` or + :attr:`logits` (but not both). + + .. note:: + It is equivalent to the distribution that :func:`torch.multinomial` + samples from. + + Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``. + + If :attr:`probs` is 1D with length-`K`, each element is the relative + probability of sampling the class at that index. + + If :attr:`probs` is 2D, it is treated as a batch of relative probability + vectors. + + .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1. + + See also: :func:`torch.multinomial` + + Example:: + + >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) + >>> m.sample() # equal probability of 0, 1, 2, 3 + tensor(3) + + Args: + probs (Tensor): event probabilities + logits (Tensor): event log-odds + """ + arg_constraints = {'probs': constraints.simplex, + 'logits': constraints.real} + has_enumerate_support = True + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + if probs.dim() < 1: + raise ValueError("`probs` parameter must be at least one-dimensional.") + self.probs = probs / probs.sum(-1, keepdim=True) + else: + if logits.dim() < 1: + raise ValueError("`logits` parameter must be at least one-dimensional.") + self.logits = logits - logits.logsumexp(dim=-1, keepdim=True) + self._param = self.probs if probs is not None else self.logits + self._num_events = self._param.size()[-1] + batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size() + super(Categorical, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Categorical, _instance) + batch_shape = torch.Size(batch_shape) + param_shape = batch_shape + torch.Size((self._num_events,)) + if 'probs' in self.__dict__: + new.probs = self.probs.expand(param_shape) + new._param = new.probs + if 'logits' in self.__dict__: + new.logits = self.logits.expand(param_shape) + new._param = new.logits + new._num_events = self._num_events + super(Categorical, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @constraints.dependent_property + def support(self): + return constraints.integer_interval(0, self._num_events - 1) + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits)
    + + @property + def param_shape(self): + return self._param.size() + + @property + def mean(self): + return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device) + + @property + def variance(self): + return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device) + +
    [docs] def sample(self, sample_shape=torch.Size()): + sample_shape = self._extended_shape(sample_shape) + param_shape = sample_shape + torch.Size((self._num_events,)) + probs = self.probs.expand(param_shape) + probs_2d = probs.reshape(-1, self._num_events) + sample_2d = torch.multinomial(probs_2d, 1, True) + return sample_2d.reshape(sample_shape)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + value = value.long().unsqueeze(-1) + value, log_pmf = torch.broadcast_tensors(value, self.logits) + value = value[..., :1] + return log_pmf.gather(-1, value).squeeze(-1)
    + +
    [docs] def entropy(self): + p_log_p = self.logits * self.probs + return -p_log_p.sum(-1)
    + +
    [docs] def enumerate_support(self, expand=True): + num_events = self._num_events + values = torch.arange(num_events, dtype=torch.long, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/cauchy.html b/docs/stable/_modules/torch/distributions/cauchy.html new file mode 100644 index 000000000000..3d2302861619 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/cauchy.html @@ -0,0 +1,591 @@ + + + + + + + + + + + + torch.distributions.cauchy — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.cauchy

    +import math
    +from torch._six import inf, nan
    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Cauchy(Distribution): + r""" + Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of + independent normally distributed random variables with means `0` follows a + Cauchy distribution. + + Example:: + + >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1 + tensor([ 2.3214]) + + Args: + loc (float or Tensor): mode or median of the distribution. + scale (float or Tensor): half width at half maximum. + """ + arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} + support = constraints.real + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super(Cauchy, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Cauchy, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Cauchy, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + return torch.full(self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device) + + @property + def variance(self): + return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device) + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = self.loc.new(shape).cauchy_() + return self.loc + eps * self.scale
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return -math.log(math.pi) - self.scale.log() - (1 + ((value - self.loc) / self.scale)**2).log()
    + +
    [docs] def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
    + +
    [docs] def icdf(self, value): + if self._validate_args: + self._validate_sample(value) + return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
    + +
    [docs] def entropy(self): + return math.log(4 * math.pi) + self.scale.log()
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/chi2.html b/docs/stable/_modules/torch/distributions/chi2.html new file mode 100644 index 000000000000..fad292ab419c --- /dev/null +++ b/docs/stable/_modules/torch/distributions/chi2.html @@ -0,0 +1,544 @@ + + + + + + + + + + + + torch.distributions.chi2 — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.chi2

    +from torch.distributions import constraints
    +from torch.distributions.gamma import Gamma
    +
    +
    +
    [docs]class Chi2(Gamma): + r""" + Creates a Chi2 distribution parameterized by shape parameter :attr:`df`. + This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)`` + + Example:: + + >>> m = Chi2(torch.tensor([1.0])) + >>> m.sample() # Chi2 distributed with shape df=1 + tensor([ 0.1046]) + + Args: + df (float or Tensor): shape parameter of the distribution + """ + arg_constraints = {'df': constraints.positive} + + def __init__(self, df, validate_args=None): + super(Chi2, self).__init__(0.5 * df, 0.5, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Chi2, _instance) + return super(Chi2, self).expand(batch_shape, new)
    + + @property + def df(self): + return self.concentration * 2
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/constraint_registry.html b/docs/stable/_modules/torch/distributions/constraint_registry.html new file mode 100644 index 000000000000..2bfe0795d854 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/constraint_registry.html @@ -0,0 +1,759 @@ + + + + + + + + + + + + torch.distributions.constraint_registry — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.constraint_registry

    +r"""
    +PyTorch provides two global :class:`ConstraintRegistry` objects that link
    +:class:`~torch.distributions.constraints.Constraint` objects to
    +:class:`~torch.distributions.transforms.Transform` objects. These objects both
    +input constraints and return transforms, but they have different guarantees on
    +bijectivity.
    +
    +1. ``biject_to(constraint)`` looks up a bijective
    +   :class:`~torch.distributions.transforms.Transform` from ``constraints.real``
    +   to the given ``constraint``. The returned transform is guaranteed to have
    +   ``.bijective = True`` and should implement ``.log_abs_det_jacobian()``.
    +2. ``transform_to(constraint)`` looks up a not-necessarily bijective
    +   :class:`~torch.distributions.transforms.Transform` from ``constraints.real``
    +   to the given ``constraint``. The returned transform is not guaranteed to
    +   implement ``.log_abs_det_jacobian()``.
    +
    +The ``transform_to()`` registry is useful for performing unconstrained
    +optimization on constrained parameters of probability distributions, which are
    +indicated by each distribution's ``.arg_constraints`` dict. These transforms often
    +overparameterize a space in order to avoid rotation; they are thus more
    +suitable for coordinate-wise optimization algorithms like Adam::
    +
    +    loc = torch.zeros(100, requires_grad=True)
    +    unconstrained = torch.zeros(100, requires_grad=True)
    +    scale = transform_to(Normal.arg_constraints['scale'])(unconstrained)
    +    loss = -Normal(loc, scale).log_prob(data).sum()
    +
    +The ``biject_to()`` registry is useful for Hamiltonian Monte Carlo, where
    +samples from a probability distribution with constrained ``.support`` are
    +propagated in an unconstrained space, and algorithms are typically rotation
    +invariant.::
    +
    +    dist = Exponential(rate)
    +    unconstrained = torch.zeros(100, requires_grad=True)
    +    sample = biject_to(dist.support)(unconstrained)
    +    potential_energy = -dist.log_prob(sample).sum()
    +
    +.. note::
    +
    +    An example where ``transform_to`` and ``biject_to`` differ is
    +    ``constraints.simplex``: ``transform_to(constraints.simplex)`` returns a
    +    :class:`~torch.distributions.transforms.SoftmaxTransform` that simply
    +    exponentiates and normalizes its inputs; this is a cheap and mostly
    +    coordinate-wise operation appropriate for algorithms like SVI. In
    +    contrast, ``biject_to(constraints.simplex)`` returns a
    +    :class:`~torch.distributions.transforms.StickBreakingTransform` that
    +    bijects its input down to a one-fewer-dimensional space; this a more
    +    expensive less numerically stable transform but is needed for algorithms
    +    like HMC.
    +
    +The ``biject_to`` and ``transform_to`` objects can be extended by user-defined
    +constraints and transforms using their ``.register()`` method either as a
    +function on singleton constraints::
    +
    +    transform_to.register(my_constraint, my_transform)
    +
    +or as a decorator on parameterized constraints::
    +
    +    @transform_to.register(MyConstraintClass)
    +    def my_factory(constraint):
    +        assert isinstance(constraint, MyConstraintClass)
    +        return MyTransform(constraint.param1, constraint.param2)
    +
    +You can create your own registry by creating a new :class:`ConstraintRegistry`
    +object.
    +"""
    +
    +import numbers
    +
    +from torch.distributions import constraints, transforms
    +
    +__all__ = [
    +    'ConstraintRegistry',
    +    'biject_to',
    +    'transform_to',
    +]
    +
    +
    +
    [docs]class ConstraintRegistry(object): + """ + Registry to link constraints to transforms. + """ + def __init__(self): + self._registry = {} + super(ConstraintRegistry, self).__init__() + +
    [docs] def register(self, constraint, factory=None): + """ + Registers a :class:`~torch.distributions.constraints.Constraint` + subclass in this registry. Usage:: + + @my_registry.register(MyConstraintClass) + def construct_transform(constraint): + assert isinstance(constraint, MyConstraint) + return MyTransform(constraint.arg_constraints) + + Args: + constraint (subclass of :class:`~torch.distributions.constraints.Constraint`): + A subclass of :class:`~torch.distributions.constraints.Constraint`, or + a singleton object of the desired class. + factory (callable): A callable that inputs a constraint object and returns + a :class:`~torch.distributions.transforms.Transform` object. + """ + # Support use as decorator. + if factory is None: + return lambda factory: self.register(constraint, factory) + + # Support calling on singleton instances. + if isinstance(constraint, constraints.Constraint): + constraint = type(constraint) + + if not isinstance(constraint, type) or not issubclass(constraint, constraints.Constraint): + raise TypeError('Expected constraint to be either a Constraint subclass or instance, ' + 'but got {}'.format(constraint)) + + self._registry[constraint] = factory + return factory
    + + def __call__(self, constraint): + """ + Looks up a transform to constrained space, given a constraint object. + Usage:: + + constraint = Normal.arg_constraints['scale'] + scale = transform_to(constraint)(torch.zeros(1)) # constrained + u = transform_to(constraint).inv(scale) # unconstrained + + Args: + constraint (:class:`~torch.distributions.constraints.Constraint`): + A constraint object. + + Returns: + A :class:`~torch.distributions.transforms.Transform` object. + + Raises: + `NotImplementedError` if no transform has been registered. + """ + # Look up by Constraint subclass. + try: + factory = self._registry[type(constraint)] + except KeyError: + raise NotImplementedError( + 'Cannot transform {} constraints'.format(type(constraint).__name__)) + return factory(constraint)
    + + +biject_to = ConstraintRegistry() +transform_to = ConstraintRegistry() + + +################################################################################ +# Registration Table +################################################################################ + +@biject_to.register(constraints.real) +@biject_to.register(constraints.real_vector) +@transform_to.register(constraints.real) +@transform_to.register(constraints.real_vector) +def _transform_to_real(constraint): + return transforms.identity_transform + + +@biject_to.register(constraints.positive) +@transform_to.register(constraints.positive) +def _transform_to_positive(constraint): + return transforms.ExpTransform() + + +@biject_to.register(constraints.greater_than) +@biject_to.register(constraints.greater_than_eq) +@transform_to.register(constraints.greater_than) +@transform_to.register(constraints.greater_than_eq) +def _transform_to_greater_than(constraint): + return transforms.ComposeTransform([transforms.ExpTransform(), + transforms.AffineTransform(constraint.lower_bound, 1)]) + + +@biject_to.register(constraints.less_than) +@transform_to.register(constraints.less_than) +def _transform_to_less_than(constraint): + return transforms.ComposeTransform([transforms.ExpTransform(), + transforms.AffineTransform(constraint.upper_bound, -1)]) + + +@biject_to.register(constraints.interval) +@biject_to.register(constraints.half_open_interval) +@transform_to.register(constraints.interval) +@transform_to.register(constraints.half_open_interval) +def _transform_to_interval(constraint): + # Handle the special case of the unit interval. + lower_is_0 = isinstance(constraint.lower_bound, numbers.Number) and constraint.lower_bound == 0 + upper_is_1 = isinstance(constraint.upper_bound, numbers.Number) and constraint.upper_bound == 1 + if lower_is_0 and upper_is_1: + return transforms.SigmoidTransform() + + loc = constraint.lower_bound + scale = constraint.upper_bound - constraint.lower_bound + return transforms.ComposeTransform([transforms.SigmoidTransform(), + transforms.AffineTransform(loc, scale)]) + + +@biject_to.register(constraints.simplex) +def _biject_to_simplex(constraint): + return transforms.StickBreakingTransform() + + +@transform_to.register(constraints.simplex) +def _transform_to_simplex(constraint): + return transforms.SoftmaxTransform() + + +# TODO define a bijection for LowerCholeskyTransform +@transform_to.register(constraints.lower_cholesky) +def _transform_to_lower_cholesky(constraint): + return transforms.LowerCholeskyTransform() + + +@biject_to.register(constraints.cat) +def _biject_to_cat(constraint): + return transforms.CatTransform([biject_to(c) + for c in constraint.cseq], + constraint.dim, + constraint.lengths) + + +@transform_to.register(constraints.cat) +def _transform_to_cat(constraint): + return transforms.CatTransform([transform_to(c) + for c in constraint.cseq], + constraint.dim, + constraint.lengths) + + +@biject_to.register(constraints.stack) +def _biject_to_stack(constraint): + return transforms.StackTransform( + [biject_to(c) + for c in constraint.cseq], constraint.dim) + + +@transform_to.register(constraints.stack) +def _transform_to_stack(constraint): + return transforms.StackTransform( + [transform_to(c) + for c in constraint.cseq], constraint.dim) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/constraints.html b/docs/stable/_modules/torch/distributions/constraints.html new file mode 100644 index 000000000000..901e7a218ac5 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/constraints.html @@ -0,0 +1,879 @@ + + + + + + + + + + + + torch.distributions.constraints — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.constraints

    +r"""
    +The following constraints are implemented:
    +
    +- ``constraints.boolean``
    +- ``constraints.cat``
    +- ``constraints.dependent``
    +- ``constraints.greater_than(lower_bound)``
    +- ``constraints.integer_interval(lower_bound, upper_bound)``
    +- ``constraints.interval(lower_bound, upper_bound)``
    +- ``constraints.lower_cholesky``
    +- ``constraints.lower_triangular``
    +- ``constraints.nonnegative_integer``
    +- ``constraints.positive``
    +- ``constraints.positive_definite``
    +- ``constraints.positive_integer``
    +- ``constraints.real``
    +- ``constraints.real_vector``
    +- ``constraints.simplex``
    +- ``constraints.stack``
    +- ``constraints.unit_interval``
    +"""
    +
    +import torch
    +
    +__all__ = [
    +    'Constraint',
    +    'boolean',
    +    'cat',
    +    'dependent',
    +    'dependent_property',
    +    'greater_than',
    +    'greater_than_eq',
    +    'integer_interval',
    +    'interval',
    +    'half_open_interval',
    +    'is_dependent',
    +    'less_than',
    +    'lower_cholesky',
    +    'lower_triangular',
    +    'nonnegative_integer',
    +    'positive',
    +    'positive_definite',
    +    'positive_integer',
    +    'real',
    +    'real_vector',
    +    'simplex',
    +    'stack',
    +    'unit_interval',
    +]
    +
    +
    +
    [docs]class Constraint(object): + """ + Abstract base class for constraints. + + A constraint object represents a region over which a variable is valid, + e.g. within which a variable can be optimized. + """ +
    [docs] def check(self, value): + """ + Returns a byte tensor of `sample_shape + batch_shape` indicating + whether each event in value satisfies this constraint. + """ + raise NotImplementedError
    + + def __repr__(self): + return self.__class__.__name__[1:] + '()'
    + + +class _Dependent(Constraint): + """ + Placeholder for variables whose support depends on other variables. + These variables obey no simple coordinate-wise constraints. + """ + def check(self, x): + raise ValueError('Cannot determine validity of dependent constraint') + + +def is_dependent(constraint): + return isinstance(constraint, _Dependent) + + +class _DependentProperty(property, _Dependent): + """ + Decorator that extends @property to act like a `Dependent` constraint when + called on a class and act like a property when called on an object. + + Example:: + + class Uniform(Distribution): + def __init__(self, low, high): + self.low = low + self.high = high + @constraints.dependent_property + def support(self): + return constraints.interval(self.low, self.high) + """ + pass + + +class _Boolean(Constraint): + """ + Constrain to the two values `{0, 1}`. + """ + def check(self, value): + return (value == 0) | (value == 1) + + +class _IntegerInterval(Constraint): + """ + Constrain to an integer interval `[lower_bound, upper_bound]`. + """ + def __init__(self, lower_bound, upper_bound): + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def check(self, value): + return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound) + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) + return fmt_string + + +class _IntegerLessThan(Constraint): + """ + Constrain to an integer interval `(-inf, upper_bound]`. + """ + def __init__(self, upper_bound): + self.upper_bound = upper_bound + + def check(self, value): + return (value % 1 == 0) & (value <= self.upper_bound) + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(upper_bound={})'.format(self.upper_bound) + return fmt_string + + +class _IntegerGreaterThan(Constraint): + """ + Constrain to an integer interval `[lower_bound, inf)`. + """ + def __init__(self, lower_bound): + self.lower_bound = lower_bound + + def check(self, value): + return (value % 1 == 0) & (value >= self.lower_bound) + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={})'.format(self.lower_bound) + return fmt_string + + +class _Real(Constraint): + """ + Trivially constrain to the extended real line `[-inf, inf]`. + """ + def check(self, value): + return value == value # False for NANs. + + +class _GreaterThan(Constraint): + """ + Constrain to a real half line `(lower_bound, inf]`. + """ + def __init__(self, lower_bound): + self.lower_bound = lower_bound + + def check(self, value): + return self.lower_bound < value + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={})'.format(self.lower_bound) + return fmt_string + + +class _GreaterThanEq(Constraint): + """ + Constrain to a real half line `[lower_bound, inf)`. + """ + def __init__(self, lower_bound): + self.lower_bound = lower_bound + + def check(self, value): + return self.lower_bound <= value + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={})'.format(self.lower_bound) + return fmt_string + + +class _LessThan(Constraint): + """ + Constrain to a real half line `[-inf, upper_bound)`. + """ + def __init__(self, upper_bound): + self.upper_bound = upper_bound + + def check(self, value): + return value < self.upper_bound + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(upper_bound={})'.format(self.upper_bound) + return fmt_string + + +class _Interval(Constraint): + """ + Constrain to a real interval `[lower_bound, upper_bound]`. + """ + def __init__(self, lower_bound, upper_bound): + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def check(self, value): + return (self.lower_bound <= value) & (value <= self.upper_bound) + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) + return fmt_string + + +class _HalfOpenInterval(Constraint): + """ + Constrain to a real interval `[lower_bound, upper_bound)`. + """ + def __init__(self, lower_bound, upper_bound): + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def check(self, value): + return (self.lower_bound <= value) & (value < self.upper_bound) + + def __repr__(self): + fmt_string = self.__class__.__name__[1:] + fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) + return fmt_string + + +class _Simplex(Constraint): + """ + Constrain to the unit simplex in the innermost (rightmost) dimension. + Specifically: `x >= 0` and `x.sum(-1) == 1`. + """ + def check(self, value): + return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-6) + + +class _LowerTriangular(Constraint): + """ + Constrain to lower-triangular square matrices. + """ + def check(self, value): + value_tril = value.tril() + return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0] + + +class _LowerCholesky(Constraint): + """ + Constrain to lower-triangular square matrices with positive diagonals. + """ + def check(self, value): + value_tril = value.tril() + lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0] + + positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0] + return lower_triangular & positive_diagonal + + +class _PositiveDefinite(Constraint): + """ + Constrain to positive-definite matrices. + """ + def check(self, value): + matrix_shape = value.shape[-2:] + batch_shape = value.unsqueeze(0).shape[:-2] + # TODO: replace with batched linear algebra routine when one becomes available + # note that `symeig()` returns eigenvalues in ascending order + flattened_value = value.reshape((-1,) + matrix_shape) + return torch.stack([v.symeig(eigenvectors=False)[0][:1] > 0.0 + for v in flattened_value]).view(batch_shape) + + +class _RealVector(Constraint): + """ + Constrain to real-valued vectors. This is the same as `constraints.real`, + but additionally reduces across the `event_shape` dimension. + """ + def check(self, value): + return torch.all(value == value, dim=-1) # False for NANs. + + +class _Cat(Constraint): + """ + Constraint functor that applies a sequence of constraints + `cseq` at the submatrices at dimension `dim`, + each of size `lengths[dim]`, in a way compatible with :func:`torch.cat`. + """ + def __init__(self, cseq, dim=0, lengths=None): + assert all(isinstance(c, Constraint) for c in cseq) + self.cseq = list(cseq) + if lengths is None: + lengths = [1] * len(self.cseq) + self.lengths = list(lengths) + assert len(self.lengths) == len(self.cseq) + self.dim = dim + + def check(self, value): + assert -value.dim() <= self.dim < value.dim() + checks = [] + start = 0 + for constr, length in zip(self.cseq, self.lengths): + v = value.narrow(self.dim, start, length) + checks.append(constr.check(v)) + start = start + length # avoid += for jit compat + return torch.cat(checks, self.dim) + + +class _Stack(Constraint): + """ + Constraint functor that applies a sequence of constraints + `cseq` at the submatrices at dimension `dim`, + in a way compatible with :func:`torch.stack`. + """ + def __init__(self, cseq, dim=0): + assert all(isinstance(c, Constraint) for c in cseq) + self.cseq = list(cseq) + self.dim = dim + + def check(self, value): + assert -value.dim() <= self.dim < value.dim() + vs = [value.select(self.dim, i) for i in range(value.size(self.dim))] + return torch.stack([constr.check(v) + for v, constr in zip(vs, self.cseq)], self.dim) + +# Public interface. +dependent = _Dependent() +dependent_property = _DependentProperty +boolean = _Boolean() +nonnegative_integer = _IntegerGreaterThan(0) +positive_integer = _IntegerGreaterThan(1) +integer_interval = _IntegerInterval +real = _Real() +real_vector = _RealVector() +positive = _GreaterThan(0.) +greater_than = _GreaterThan +greater_than_eq = _GreaterThanEq +less_than = _LessThan +unit_interval = _Interval(0., 1.) +interval = _Interval +half_open_interval = _HalfOpenInterval +simplex = _Simplex() +lower_triangular = _LowerTriangular() +lower_cholesky = _LowerCholesky() +positive_definite = _PositiveDefinite() +cat = _Cat +stack = _Stack +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/dirichlet.html b/docs/stable/_modules/torch/distributions/dirichlet.html new file mode 100644 index 000000000000..ea552c36d52b --- /dev/null +++ b/docs/stable/_modules/torch/distributions/dirichlet.html @@ -0,0 +1,609 @@ + + + + + + + + + + + + torch.distributions.dirichlet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.dirichlet

    +import torch
    +from torch.autograd import Function
    +from torch.autograd.function import once_differentiable
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +
    +
    +# This helper is exposed for testing.
    +def _Dirichlet_backward(x, concentration, grad_output):
    +    total = concentration.sum(-1, True).expand_as(concentration)
    +    grad = torch._dirichlet_grad(x, concentration, total)
    +    return grad * (grad_output - (x * grad_output).sum(-1, True))
    +
    +
    +class _Dirichlet(Function):
    +    @staticmethod
    +    def forward(ctx, concentration):
    +        x = torch._sample_dirichlet(concentration)
    +        ctx.save_for_backward(x, concentration)
    +        return x
    +
    +    @staticmethod
    +    @once_differentiable
    +    def backward(ctx, grad_output):
    +        x, concentration = ctx.saved_tensors
    +        return _Dirichlet_backward(x, concentration, grad_output)
    +
    +
    +
    [docs]class Dirichlet(ExponentialFamily): + r""" + Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`. + + Example:: + + >>> m = Dirichlet(torch.tensor([0.5, 0.5])) + >>> m.sample() # Dirichlet distributed with concentrarion concentration + tensor([ 0.1046, 0.8954]) + + Args: + concentration (Tensor): concentration parameter of the distribution + (often referred to as alpha) + """ + arg_constraints = {'concentration': constraints.positive} + support = constraints.simplex + has_rsample = True + + def __init__(self, concentration, validate_args=None): + if concentration.dim() < 1: + raise ValueError("`concentration` parameter must be at least one-dimensional.") + self.concentration = concentration + batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:] + super(Dirichlet, self).__init__(batch_shape, event_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Dirichlet, _instance) + batch_shape = torch.Size(batch_shape) + new.concentration = self.concentration.expand(batch_shape + self.event_shape) + super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def rsample(self, sample_shape=()): + shape = self._extended_shape(sample_shape) + concentration = self.concentration.expand(shape) + return _Dirichlet.apply(concentration)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) + + torch.lgamma(self.concentration.sum(-1)) - + torch.lgamma(self.concentration).sum(-1))
    + + @property + def mean(self): + return self.concentration / self.concentration.sum(-1, True) + + @property + def variance(self): + con0 = self.concentration.sum(-1, True) + return self.concentration * (con0 - self.concentration) / (con0.pow(2) * (con0 + 1)) + +
    [docs] def entropy(self): + k = self.concentration.size(-1) + a0 = self.concentration.sum(-1) + return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) - + (k - a0) * torch.digamma(a0) - + ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
    + + @property + def _natural_params(self): + return (self.concentration, ) + + def _log_normalizer(self, x): + return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/distribution.html b/docs/stable/_modules/torch/distributions/distribution.html new file mode 100644 index 000000000000..e6140218bda4 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/distribution.html @@ -0,0 +1,781 @@ + + + + + + + + + + + + torch.distributions.distribution — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.distribution

    +import torch
    +import warnings
    +from torch.distributions import constraints
    +from torch.distributions.utils import lazy_property
    +
    +
    +
    [docs]class Distribution(object): + r""" + Distribution is the abstract base class for probability distributions. + """ + + has_rsample = False + has_enumerate_support = False + _validate_args = False + support = None + arg_constraints = {} + + @staticmethod + def set_default_validate_args(value): + if value not in [True, False]: + raise ValueError + Distribution._validate_args = value + + def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None): + self._batch_shape = batch_shape + self._event_shape = event_shape + if validate_args is not None: + self._validate_args = validate_args + if self._validate_args: + for param, constraint in self.arg_constraints.items(): + if constraints.is_dependent(constraint): + continue # skip constraints that cannot be checked + if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property): + continue # skip checking lazily-constructed args + if not constraint.check(getattr(self, param)).all(): + raise ValueError("The parameter {} has invalid values".format(param)) + super(Distribution, self).__init__() + +
    [docs] def expand(self, batch_shape, _instance=None): + """ + Returns a new distribution instance (or populates an existing instance + provided by a derived class) with batch dimensions expanded to + `batch_shape`. This method calls :class:`~torch.Tensor.expand` on + the distribution's parameters. As such, this does not allocate new + memory for the expanded distribution instance. Additionally, + this does not repeat any args checking or parameter broadcasting in + `__init__.py`, when an instance is first created. + + Args: + batch_shape (torch.Size): the desired expanded size. + _instance: new instance provided by subclasses that + need to override `.expand`. + + Returns: + New distribution instance with batch dimensions expanded to + `batch_size`. + """ + raise NotImplementedError
    + + @property + def batch_shape(self): + """ + Returns the shape over which parameters are batched. + """ + return self._batch_shape + + @property + def event_shape(self): + """ + Returns the shape of a single sample (without batching). + """ + return self._event_shape + + @property + def arg_constraints(self): + """ + Returns a dictionary from argument names to + :class:`~torch.distributions.constraints.Constraint` objects that + should be satisfied by each argument of this distribution. Args that + are not tensors need not appear in this dict. + """ + raise NotImplementedError + + @property + def support(self): + """ + Returns a :class:`~torch.distributions.constraints.Constraint` object + representing this distribution's support. + """ + raise NotImplementedError + + @property + def mean(self): + """ + Returns the mean of the distribution. + """ + raise NotImplementedError + + @property + def variance(self): + """ + Returns the variance of the distribution. + """ + raise NotImplementedError + + @property + def stddev(self): + """ + Returns the standard deviation of the distribution. + """ + return self.variance.sqrt() + +
    [docs] def sample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped sample or sample_shape shaped batch of + samples if the distribution parameters are batched. + """ + with torch.no_grad(): + return self.rsample(sample_shape)
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped reparameterized sample or sample_shape + shaped batch of reparameterized samples if the distribution parameters + are batched. + """ + raise NotImplementedError
    + +
    [docs] def sample_n(self, n): + """ + Generates n samples or n batches of samples if the distribution + parameters are batched. + """ + warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning) + return self.sample(torch.Size((n,)))
    + +
    [docs] def log_prob(self, value): + """ + Returns the log of the probability density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError
    + +
    [docs] def cdf(self, value): + """ + Returns the cumulative density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError
    + +
    [docs] def icdf(self, value): + """ + Returns the inverse cumulative density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError
    + +
    [docs] def enumerate_support(self, expand=True): + """ + Returns tensor containing all values supported by a discrete + distribution. The result will enumerate over dimension 0, so the shape + of the result will be `(cardinality,) + batch_shape + event_shape` + (where `event_shape = ()` for univariate distributions). + + Note that this enumerates over all batched tensors in lock-step + `[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens + along dim 0, but with the remaining batch dimensions being + singleton dimensions, `[[0], [1], ..`. + + To iterate over the full Cartesian product use + `itertools.product(m.enumerate_support())`. + + Args: + expand (bool): whether to expand the support over the + batch dims to match the distribution's `batch_shape`. + + Returns: + Tensor iterating over dimension 0. + """ + raise NotImplementedError
    + +
    [docs] def entropy(self): + """ + Returns entropy of distribution, batched over batch_shape. + + Returns: + Tensor of shape batch_shape. + """ + raise NotImplementedError
    + +
    [docs] def perplexity(self): + """ + Returns perplexity of distribution, batched over batch_shape. + + Returns: + Tensor of shape batch_shape. + """ + return torch.exp(self.entropy())
    + + def _extended_shape(self, sample_shape=torch.Size()): + """ + Returns the size of the sample returned by the distribution, given + a `sample_shape`. Note, that the batch and event shapes of a distribution + instance are fixed at the time of construction. If this is empty, the + returned shape is upcast to (1,). + + Args: + sample_shape (torch.Size): the size of the sample to be drawn. + """ + if not isinstance(sample_shape, torch.Size): + sample_shape = torch.Size(sample_shape) + return sample_shape + self._batch_shape + self._event_shape + + def _validate_sample(self, value): + """ + Argument validation for distribution methods such as `log_prob`, + `cdf` and `icdf`. The rightmost dimensions of a value to be + scored via these methods must agree with the distribution's batch + and event shapes. + + Args: + value (Tensor): the tensor whose log probability is to be + computed by the `log_prob` method. + Raises + ValueError: when the rightmost dimensions of `value` do not match the + distribution's batch and event shapes. + """ + if not isinstance(value, torch.Tensor): + raise ValueError('The value argument to log_prob must be a Tensor') + + event_dim_start = len(value.size()) - len(self._event_shape) + if value.size()[event_dim_start:] != self._event_shape: + raise ValueError('The right-most size of value must match event_shape: {} vs {}.'. + format(value.size(), self._event_shape)) + + actual_shape = value.size() + expected_shape = self._batch_shape + self._event_shape + for i, j in zip(reversed(actual_shape), reversed(expected_shape)): + if i != 1 and j != 1 and i != j: + raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'. + format(actual_shape, expected_shape)) + + if not self.support.check(value).all(): + raise ValueError('The value argument must be within the support') + + def _get_checked_instance(self, cls, _instance=None): + if _instance is None and type(self).__init__ != cls.__init__: + raise NotImplementedError("Subclass {} of {} that defines a custom __init__ method " + "must also define a custom .expand() method.". + format(self.__class__.__name__, cls.__name__)) + return self.__new__(type(self)) if _instance is None else _instance + + def __repr__(self): + param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__] + args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p] + if self.__dict__[p].numel() == 1 + else self.__dict__[p].size()) for p in param_names]) + return self.__class__.__name__ + '(' + args_string + ')'
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/exp_family.html b/docs/stable/_modules/torch/distributions/exp_family.html new file mode 100644 index 000000000000..120b44752599 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/exp_family.html @@ -0,0 +1,574 @@ + + + + + + + + + + + + torch.distributions.exp_family — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.exp_family

    +import torch
    +from torch.distributions.distribution import Distribution
    +
    +
    +
    [docs]class ExponentialFamily(Distribution): + r""" + ExponentialFamily is the abstract base class for probability distributions belonging to an + exponential family, whose probability mass/density function has the form is defined below + + .. math:: + + p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) + + where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic, + :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier + measure. + + Note: + This class is an intermediary between the `Distribution` class and distributions which belong + to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL + divergence methods. We use this class to compute the entropy and KL divergence using the AD + framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and + Cross-entropies of Exponential Families). + """ + + @property + def _natural_params(self): + """ + Abstract method for natural parameters. Returns a tuple of Tensors based + on the distribution + """ + raise NotImplementedError + + def _log_normalizer(self, *natural_params): + """ + Abstract method for log normalizer function. Returns a log normalizer based on + the distribution and input + """ + raise NotImplementedError + + @property + def _mean_carrier_measure(self): + """ + Abstract method for expected carrier measure, which is required for computing + entropy. + """ + raise NotImplementedError + +
    [docs] def entropy(self): + """ + Method to compute the entropy using Bregman divergence of the log normalizer. + """ + result = -self._mean_carrier_measure + nparams = [p.detach().requires_grad_() for p in self._natural_params] + lg_normal = self._log_normalizer(*nparams) + gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True) + result += lg_normal.clone() + for np, g in zip(nparams, gradients): + result -= np * g + return result
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/exponential.html b/docs/stable/_modules/torch/distributions/exponential.html new file mode 100644 index 000000000000..0cf9a5bd53cb --- /dev/null +++ b/docs/stable/_modules/torch/distributions/exponential.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + torch.distributions.exponential — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.exponential

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Exponential(ExponentialFamily): + r""" + Creates a Exponential distribution parameterized by :attr:`rate`. + + Example:: + + >>> m = Exponential(torch.tensor([1.0])) + >>> m.sample() # Exponential distributed with rate=1 + tensor([ 0.1046]) + + Args: + rate (float or Tensor): rate = 1 / scale of the distribution + """ + arg_constraints = {'rate': constraints.positive} + support = constraints.positive + has_rsample = True + _mean_carrier_measure = 0 + + @property + def mean(self): + return self.rate.reciprocal() + + @property + def stddev(self): + return self.rate.reciprocal() + + @property + def variance(self): + return self.rate.pow(-2) + + def __init__(self, rate, validate_args=None): + self.rate, = broadcast_all(rate) + batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size() + super(Exponential, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Exponential, _instance) + batch_shape = torch.Size(batch_shape) + new.rate = self.rate.expand(batch_shape) + super(Exponential, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for ._exponential() + u = torch.rand(shape, dtype=self.rate.dtype, device=self.rate.device) + return -(-u).log1p() / self.rate + return self.rate.new(shape).exponential_() / self.rate
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return self.rate.log() - self.rate * value
    + +
    [docs] def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 1 - torch.exp(-self.rate * value)
    + +
    [docs] def icdf(self, value): + if self._validate_args: + self._validate_sample(value) + return -torch.log(1 - value) / self.rate
    + +
    [docs] def entropy(self): + return 1.0 - torch.log(self.rate)
    + + @property + def _natural_params(self): + return (-self.rate, ) + + def _log_normalizer(self, x): + return -torch.log(-x)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/fishersnedecor.html b/docs/stable/_modules/torch/distributions/fishersnedecor.html new file mode 100644 index 000000000000..f28d66ad8547 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/fishersnedecor.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + torch.distributions.fishersnedecor — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.fishersnedecor

    +from numbers import Number
    +import torch
    +from torch._six import nan
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.gamma import Gamma
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class FisherSnedecor(Distribution): + r""" + Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`. + + Example:: + + >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2 + tensor([ 0.2453]) + + Args: + df1 (float or Tensor): degrees of freedom parameter 1 + df2 (float or Tensor): degrees of freedom parameter 2 + """ + arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, df1, df2, validate_args=None): + self.df1, self.df2 = broadcast_all(df1, df2) + self._gamma1 = Gamma(self.df1 * 0.5, self.df1) + self._gamma2 = Gamma(self.df2 * 0.5, self.df2) + + if isinstance(df1, Number) and isinstance(df2, Number): + batch_shape = torch.Size() + else: + batch_shape = self.df1.size() + super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(FisherSnedecor, _instance) + batch_shape = torch.Size(batch_shape) + new.df1 = self.df1.expand(batch_shape) + new.df2 = self.df2.expand(batch_shape) + new._gamma1 = self._gamma1.expand(batch_shape) + new._gamma2 = self._gamma2.expand(batch_shape) + super(FisherSnedecor, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + df2 = self.df2.clone() + df2[df2 <= 2] = nan + return df2 / (df2 - 2) + + @property + def variance(self): + df2 = self.df2.clone() + df2[df2 <= 4] = nan + return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4)) + +
    [docs] def rsample(self, sample_shape=torch.Size(())): + shape = self._extended_shape(sample_shape) + # X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2) + # Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2) + X1 = self._gamma1.rsample(sample_shape).view(shape) + X2 = self._gamma2.rsample(sample_shape).view(shape) + tiny = torch.finfo(X2.dtype).tiny + X2.clamp_(min=tiny) + Y = X1 / X2 + Y.clamp_(min=tiny) + return Y
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + ct1 = self.df1 * 0.5 + ct2 = self.df2 * 0.5 + ct3 = self.df1 / self.df2 + t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma() + t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value) + t3 = (ct1 + ct2) * torch.log1p(ct3 * value) + return t1 + t2 - t3
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/gamma.html b/docs/stable/_modules/torch/distributions/gamma.html new file mode 100644 index 000000000000..dfa1d8217d97 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/gamma.html @@ -0,0 +1,595 @@ + + + + + + + + + + + + torch.distributions.gamma — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.gamma

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import broadcast_all
    +
    +
    +def _standard_gamma(concentration):
    +    return torch._standard_gamma(concentration)
    +
    +
    +
    [docs]class Gamma(ExponentialFamily): + r""" + Creates a Gamma distribution parameterized by shape :attr:`concentration` and :attr:`rate`. + + Example:: + + >>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # Gamma distributed with concentration=1 and rate=1 + tensor([ 0.1046]) + + Args: + concentration (float or Tensor): shape parameter of the distribution + (often referred to as alpha) + rate (float or Tensor): rate = 1 / scale of the distribution + (often referred to as beta) + """ + arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive} + support = constraints.positive + has_rsample = True + _mean_carrier_measure = 0 + + @property + def mean(self): + return self.concentration / self.rate + + @property + def variance(self): + return self.concentration / self.rate.pow(2) + + def __init__(self, concentration, rate, validate_args=None): + self.concentration, self.rate = broadcast_all(concentration, rate) + if isinstance(concentration, Number) and isinstance(rate, Number): + batch_shape = torch.Size() + else: + batch_shape = self.concentration.size() + super(Gamma, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Gamma, _instance) + batch_shape = torch.Size(batch_shape) + new.concentration = self.concentration.expand(batch_shape) + new.rate = self.rate.expand(batch_shape) + super(Gamma, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape) + value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph + return value
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return (self.concentration * torch.log(self.rate) + + (self.concentration - 1) * torch.log(value) - + self.rate * value - torch.lgamma(self.concentration))
    + +
    [docs] def entropy(self): + return (self.concentration - torch.log(self.rate) + torch.lgamma(self.concentration) + + (1.0 - self.concentration) * torch.digamma(self.concentration))
    + + @property + def _natural_params(self): + return (self.concentration - 1, -self.rate) + + def _log_normalizer(self, x, y): + return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal())
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/geometric.html b/docs/stable/_modules/torch/distributions/geometric.html new file mode 100644 index 000000000000..bcde782a8b55 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/geometric.html @@ -0,0 +1,610 @@ + + + + + + + + + + + + torch.distributions.geometric — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.geometric

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property
    +from torch.nn.functional import binary_cross_entropy_with_logits
    +
    +
    +
    [docs]class Geometric(Distribution): + r""" + Creates a Geometric distribution parameterized by :attr:`probs`, + where :attr:`probs` is the probability of success of Bernoulli trials. + It represents the probability that in :math:`k + 1` Bernoulli trials, the + first :math:`k` trials failed, before seeing a success. + + Samples are non-negative integers [0, :math:`\inf`). + + Example:: + + >>> m = Geometric(torch.tensor([0.3])) + >>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0 + tensor([ 2.]) + + Args: + probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1] + logits (Number, Tensor): the log-odds of sampling `1`. + """ + arg_constraints = {'probs': constraints.unit_interval, + 'logits': constraints.real} + support = constraints.nonnegative_integer + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + self.probs, = broadcast_all(probs) + if not self.probs.gt(0).all(): + raise ValueError('All elements of probs must be greater than 0') + else: + self.logits, = broadcast_all(logits) + probs_or_logits = probs if probs is not None else logits + if isinstance(probs_or_logits, Number): + batch_shape = torch.Size() + else: + batch_shape = probs_or_logits.size() + super(Geometric, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Geometric, _instance) + batch_shape = torch.Size(batch_shape) + if 'probs' in self.__dict__: + new.probs = self.probs.expand(batch_shape) + if 'logits' in self.__dict__: + new.logits = self.logits.expand(batch_shape) + super(Geometric, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + return 1. / self.probs - 1. + + @property + def variance(self): + return (1. / self.probs - 1.) / self.probs + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True)
    + +
    [docs] def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + tiny = torch.finfo(self.probs.dtype).tiny + with torch.no_grad(): + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .uniform_() + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + u = u.clamp(min=tiny) + else: + u = self.probs.new(shape).uniform_(tiny, 1) + return (u.log() / (-self.probs).log1p()).floor()
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + value, probs = broadcast_all(value, self.probs.clone()) + probs[(probs == 1) & (value == 0)] = 0 + return value * (-probs).log1p() + self.probs.log()
    + +
    [docs] def entropy(self): + return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') / self.probs
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/gumbel.html b/docs/stable/_modules/torch/distributions/gumbel.html new file mode 100644 index 000000000000..3ce77a7514de --- /dev/null +++ b/docs/stable/_modules/torch/distributions/gumbel.html @@ -0,0 +1,582 @@ + + + + + + + + + + + + torch.distributions.gumbel — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.gumbel

    +from numbers import Number
    +import math
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.uniform import Uniform
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +from torch.distributions.transforms import AffineTransform, ExpTransform
    +from torch.distributions.utils import broadcast_all
    +
    +euler_constant = 0.57721566490153286060  # Euler Mascheroni Constant
    +
    +
    +
    [docs]class Gumbel(TransformedDistribution): + r""" + Samples from a Gumbel Distribution. + + Examples:: + + >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2 + tensor([ 1.0124]) + + Args: + loc (float or Tensor): Location parameter of the distribution + scale (float or Tensor): Scale parameter of the distribution + """ + arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} + support = constraints.real + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + finfo = torch.finfo(self.loc.dtype) + if isinstance(loc, Number) and isinstance(scale, Number): + base_dist = Uniform(finfo.tiny, 1 - finfo.eps) + else: + base_dist = Uniform(torch.full_like(self.loc, finfo.tiny), + torch.full_like(self.loc, 1 - finfo.eps)) + transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)), + ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)] + super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Gumbel, _instance) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + return super(Gumbel, self).expand(batch_shape, _instance=new)
    + + # Explicitly defining the log probability function for Gumbel due to precision issues +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + y = (self.loc - value) / self.scale + return (y - y.exp()) - self.scale.log()
    + + @property + def mean(self): + return self.loc + self.scale * euler_constant + + @property + def stddev(self): + return (math.pi / math.sqrt(6)) * self.scale + + @property + def variance(self): + return self.stddev.pow(2) + +
    [docs] def entropy(self): + return self.scale.log() + (1 + euler_constant)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/half_cauchy.html b/docs/stable/_modules/torch/distributions/half_cauchy.html new file mode 100644 index 000000000000..54b7a3bcd3a0 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/half_cauchy.html @@ -0,0 +1,577 @@ + + + + + + + + + + + + torch.distributions.half_cauchy — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.half_cauchy

    +import math
    +
    +from torch._six import inf
    +from torch.distributions import constraints
    +from torch.distributions.transforms import AbsTransform
    +from torch.distributions.cauchy import Cauchy
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +
    +
    +
    [docs]class HalfCauchy(TransformedDistribution): + r""" + Creates a half-normal distribution parameterized by `scale` where:: + + X ~ Cauchy(0, scale) + Y = |X| ~ HalfCauchy(scale) + + Example:: + + >>> m = HalfCauchy(torch.tensor([1.0])) + >>> m.sample() # half-cauchy distributed with scale=1 + tensor([ 2.3214]) + + Args: + scale (float or Tensor): scale of the full Cauchy distribution + """ + arg_constraints = {'scale': constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, scale, validate_args=None): + base_dist = Cauchy(0, scale) + super(HalfCauchy, self).__init__(base_dist, AbsTransform(), + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(HalfCauchy, _instance) + return super(HalfCauchy, self).expand(batch_shape, _instance=new)
    + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return self.base_dist.mean + + @property + def variance(self): + return self.base_dist.variance + +
    [docs] def log_prob(self, value): + log_prob = self.base_dist.log_prob(value) + math.log(2) + log_prob[value.expand(log_prob.shape) < 0] = -inf + return log_prob
    + +
    [docs] def cdf(self, value): + return 2 * self.base_dist.cdf(value) - 1
    + +
    [docs] def icdf(self, prob): + return self.base_dist.icdf((prob + 1) / 2)
    + +
    [docs] def entropy(self): + return self.base_dist.entropy() - math.log(2)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/half_normal.html b/docs/stable/_modules/torch/distributions/half_normal.html new file mode 100644 index 000000000000..425754967ac3 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/half_normal.html @@ -0,0 +1,577 @@ + + + + + + + + + + + + torch.distributions.half_normal — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.half_normal

    +import math
    +
    +from torch._six import inf
    +from torch.distributions import constraints
    +from torch.distributions.transforms import AbsTransform
    +from torch.distributions.normal import Normal
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +
    +
    +
    [docs]class HalfNormal(TransformedDistribution): + r""" + Creates a half-normal distribution parameterized by `scale` where:: + + X ~ Normal(0, scale) + Y = |X| ~ HalfNormal(scale) + + Example:: + + >>> m = HalfNormal(torch.tensor([1.0])) + >>> m.sample() # half-normal distributed with scale=1 + tensor([ 0.1046]) + + Args: + scale (float or Tensor): scale of the full Normal distribution + """ + arg_constraints = {'scale': constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, scale, validate_args=None): + base_dist = Normal(0, scale) + super(HalfNormal, self).__init__(base_dist, AbsTransform(), + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(HalfNormal, _instance) + return super(HalfNormal, self).expand(batch_shape, _instance=new)
    + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return self.scale * math.sqrt(2 / math.pi) + + @property + def variance(self): + return self.scale.pow(2) * (1 - 2 / math.pi) + +
    [docs] def log_prob(self, value): + log_prob = self.base_dist.log_prob(value) + math.log(2) + log_prob[value.expand(log_prob.shape) < 0] = -inf + return log_prob
    + +
    [docs] def cdf(self, value): + return 2 * self.base_dist.cdf(value) - 1
    + +
    [docs] def icdf(self, prob): + return self.base_dist.icdf((prob + 1) / 2)
    + +
    [docs] def entropy(self): + return self.base_dist.entropy() - math.log(2)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/independent.html b/docs/stable/_modules/torch/distributions/independent.html new file mode 100644 index 000000000000..006ccc38833f --- /dev/null +++ b/docs/stable/_modules/torch/distributions/independent.html @@ -0,0 +1,612 @@ + + + + + + + + + + + + torch.distributions.independent — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.independent

    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import _sum_rightmost
    +
    +
    +
    [docs]class Independent(Distribution): + r""" + Reinterprets some of the batch dims of a distribution as event dims. + + This is mainly useful for changing the shape of the result of + :meth:`log_prob`. For example to create a diagonal Normal distribution with + the same shape as a Multivariate Normal distribution (so they are + interchangeable), you can:: + + >>> loc = torch.zeros(3) + >>> scale = torch.ones(3) + >>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale)) + >>> [mvn.batch_shape, mvn.event_shape] + [torch.Size(()), torch.Size((3,))] + >>> normal = Normal(loc, scale) + >>> [normal.batch_shape, normal.event_shape] + [torch.Size((3,)), torch.Size(())] + >>> diagn = Independent(normal, 1) + >>> [diagn.batch_shape, diagn.event_shape] + [torch.Size(()), torch.Size((3,))] + + Args: + base_distribution (torch.distributions.distribution.Distribution): a + base distribution + reinterpreted_batch_ndims (int): the number of batch dims to + reinterpret as event dims + """ + arg_constraints = {} + + def __init__(self, base_distribution, reinterpreted_batch_ndims, validate_args=None): + if reinterpreted_batch_ndims > len(base_distribution.batch_shape): + raise ValueError("Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " + "actual {} vs {}".format(reinterpreted_batch_ndims, + len(base_distribution.batch_shape))) + shape = base_distribution.batch_shape + base_distribution.event_shape + event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) + batch_shape = shape[:len(shape) - event_dim] + event_shape = shape[len(shape) - event_dim:] + self.base_dist = base_distribution + self.reinterpreted_batch_ndims = reinterpreted_batch_ndims + super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Independent, _instance) + batch_shape = torch.Size(batch_shape) + new.base_dist = self.base_dist.expand(batch_shape + + self.event_shape[:self.reinterpreted_batch_ndims]) + new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + super(Independent, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def has_rsample(self): + return self.base_dist.has_rsample + + @property + def has_enumerate_support(self): + if self.reinterpreted_batch_ndims > 0: + return False + return self.base_dist.has_enumerate_support + + @constraints.dependent_property + def support(self): + return self.base_dist.support + + @property + def mean(self): + return self.base_dist.mean + + @property + def variance(self): + return self.base_dist.variance + +
    [docs] def sample(self, sample_shape=torch.Size()): + return self.base_dist.sample(sample_shape)
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + return self.base_dist.rsample(sample_shape)
    + +
    [docs] def log_prob(self, value): + log_prob = self.base_dist.log_prob(value) + return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
    + +
    [docs] def entropy(self): + entropy = self.base_dist.entropy() + return _sum_rightmost(entropy, self.reinterpreted_batch_ndims)
    + +
    [docs] def enumerate_support(self, expand=True): + if self.reinterpreted_batch_ndims > 0: + raise NotImplementedError("Enumeration over cartesian product is not implemented") + return self.base_dist.enumerate_support(expand=expand)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/kl.html b/docs/stable/_modules/torch/distributions/kl.html new file mode 100644 index 000000000000..0e3638b2105c --- /dev/null +++ b/docs/stable/_modules/torch/distributions/kl.html @@ -0,0 +1,1253 @@ + + + + + + + + + + + + torch.distributions.kl — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.kl

    +import math
    +import warnings
    +from functools import total_ordering
    +
    +import torch
    +from torch._six import inf
    +
    +from .bernoulli import Bernoulli
    +from .beta import Beta
    +from .binomial import Binomial
    +from .categorical import Categorical
    +from .dirichlet import Dirichlet
    +from .distribution import Distribution
    +from .exponential import Exponential
    +from .exp_family import ExponentialFamily
    +from .gamma import Gamma
    +from .geometric import Geometric
    +from .gumbel import Gumbel
    +from .half_normal import HalfNormal
    +from .independent import Independent
    +from .laplace import Laplace
    +from .lowrank_multivariate_normal import (LowRankMultivariateNormal, _batch_lowrank_logdet,
    +                                          _batch_lowrank_mahalanobis)
    +from .multivariate_normal import (MultivariateNormal, _batch_mahalanobis)
    +from .normal import Normal
    +from .one_hot_categorical import OneHotCategorical
    +from .pareto import Pareto
    +from .poisson import Poisson
    +from .transformed_distribution import TransformedDistribution
    +from .uniform import Uniform
    +from .utils import _sum_rightmost
    +
    +_KL_REGISTRY = {}  # Source of truth mapping a few general (type, type) pairs to functions.
    +_KL_MEMOIZE = {}  # Memoized version mapping many specific (type, type) pairs to functions.
    +
    +
    +
    [docs]def register_kl(type_p, type_q): + """ + Decorator to register a pairwise function with :meth:`kl_divergence`. + Usage:: + + @register_kl(Normal, Normal) + def kl_normal_normal(p, q): + # insert implementation here + + Lookup returns the most specific (type,type) match ordered by subclass. If + the match is ambiguous, a `RuntimeWarning` is raised. For example to + resolve the ambiguous situation:: + + @register_kl(BaseP, DerivedQ) + def kl_version1(p, q): ... + @register_kl(DerivedP, BaseQ) + def kl_version2(p, q): ... + + you should register a third most-specific implementation, e.g.:: + + register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie. + + Args: + type_p (type): A subclass of :class:`~torch.distributions.Distribution`. + type_q (type): A subclass of :class:`~torch.distributions.Distribution`. + """ + if not isinstance(type_p, type) and issubclass(type_p, Distribution): + raise TypeError('Expected type_p to be a Distribution subclass but got {}'.format(type_p)) + if not isinstance(type_q, type) and issubclass(type_q, Distribution): + raise TypeError('Expected type_q to be a Distribution subclass but got {}'.format(type_q)) + + def decorator(fun): + _KL_REGISTRY[type_p, type_q] = fun + _KL_MEMOIZE.clear() # reset since lookup order may have changed + return fun + + return decorator
    + + +@total_ordering +class _Match(object): + __slots__ = ['types'] + + def __init__(self, *types): + self.types = types + + def __eq__(self, other): + return self.types == other.types + + def __le__(self, other): + for x, y in zip(self.types, other.types): + if not issubclass(x, y): + return False + if x is not y: + break + return True + + +def _dispatch_kl(type_p, type_q): + """ + Find the most specific approximate match, assuming single inheritance. + """ + matches = [(super_p, super_q) for super_p, super_q in _KL_REGISTRY + if issubclass(type_p, super_p) and issubclass(type_q, super_q)] + if not matches: + return NotImplemented + # Check that the left- and right- lexicographic orders agree. + left_p, left_q = min(_Match(*m) for m in matches).types + right_q, right_p = min(_Match(*reversed(m)) for m in matches).types + left_fun = _KL_REGISTRY[left_p, left_q] + right_fun = _KL_REGISTRY[right_p, right_q] + if left_fun is not right_fun: + warnings.warn('Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format( + type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__), + RuntimeWarning) + return left_fun + + +def _infinite_like(tensor): + """ + Helper function for obtaining infinite KL Divergence throughout + """ + return torch.full_like(tensor, inf) + + +def _x_log_x(tensor): + """ + Utility function for calculating x log x + """ + return tensor * tensor.log() + + +def _batch_trace_XXT(bmat): + """ + Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions + """ + n = bmat.size(-1) + m = bmat.size(-2) + flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1) + return flat_trace.reshape(bmat.shape[:-2]) + + +
    [docs]def kl_divergence(p, q): + r""" + Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions. + + .. math:: + + KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx + + Args: + p (Distribution): A :class:`~torch.distributions.Distribution` object. + q (Distribution): A :class:`~torch.distributions.Distribution` object. + + Returns: + Tensor: A batch of KL divergences of shape `batch_shape`. + + Raises: + NotImplementedError: If the distribution types have not been registered via + :meth:`register_kl`. + """ + try: + fun = _KL_MEMOIZE[type(p), type(q)] + except KeyError: + fun = _dispatch_kl(type(p), type(q)) + _KL_MEMOIZE[type(p), type(q)] = fun + if fun is NotImplemented: + raise NotImplementedError + return fun(p, q)
    + + +################################################################################ +# KL Divergence Implementations +################################################################################ + +_euler_gamma = 0.57721566490153286060 + +# Same distributions + + +@register_kl(Bernoulli, Bernoulli) +def _kl_bernoulli_bernoulli(p, q): + t1 = p.probs * (p.probs / q.probs).log() + t1[q.probs == 0] = inf + t1[p.probs == 0] = 0 + t2 = (1 - p.probs) * ((1 - p.probs) / (1 - q.probs)).log() + t2[q.probs == 1] = inf + t2[p.probs == 1] = 0 + return t1 + t2 + + +@register_kl(Beta, Beta) +def _kl_beta_beta(p, q): + sum_params_p = p.concentration1 + p.concentration0 + sum_params_q = q.concentration1 + q.concentration0 + t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma() + t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma() + t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1) + t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0) + t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p) + return t1 - t2 + t3 + t4 + t5 + + +@register_kl(Binomial, Binomial) +def _kl_binomial_binomial(p, q): + # from https://math.stackexchange.com/questions/2214993/ + # kullback-leibler-divergence-for-binomial-distributions-p-and-q + if (p.total_count < q.total_count).any(): + raise NotImplementedError('KL between Binomials where q.total_count > p.total_count is not implemented') + kl = p.total_count * (p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p()) + inf_idxs = p.total_count > q.total_count + kl[inf_idxs] = _infinite_like(kl[inf_idxs]) + return kl + + +@register_kl(Categorical, Categorical) +def _kl_categorical_categorical(p, q): + t = p.probs * (p.logits - q.logits) + t[(q.probs == 0).expand_as(t)] = inf + t[(p.probs == 0).expand_as(t)] = 0 + return t.sum(-1) + + +@register_kl(Dirichlet, Dirichlet) +def _kl_dirichlet_dirichlet(p, q): + # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/ + sum_p_concentration = p.concentration.sum(-1) + sum_q_concentration = q.concentration.sum(-1) + t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma() + t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1) + t3 = p.concentration - q.concentration + t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1) + return t1 - t2 + (t3 * t4).sum(-1) + + +@register_kl(Exponential, Exponential) +def _kl_exponential_exponential(p, q): + rate_ratio = q.rate / p.rate + t1 = -rate_ratio.log() + return t1 + rate_ratio - 1 + + +@register_kl(ExponentialFamily, ExponentialFamily) +def _kl_expfamily_expfamily(p, q): + if not type(p) == type(q): + raise NotImplementedError("The cross KL-divergence between different exponential families cannot \ + be computed using Bregman divergences") + p_nparams = [np.detach().requires_grad_() for np in p._natural_params] + q_nparams = q._natural_params + lg_normal = p._log_normalizer(*p_nparams) + gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True) + result = q._log_normalizer(*q_nparams) - lg_normal.clone() + for pnp, qnp, g in zip(p_nparams, q_nparams, gradients): + term = (qnp - pnp) * g + result -= _sum_rightmost(term, len(q.event_shape)) + return result + + +@register_kl(Gamma, Gamma) +def _kl_gamma_gamma(p, q): + t1 = q.concentration * (p.rate / q.rate).log() + t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration) + t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration) + t4 = (q.rate - p.rate) * (p.concentration / p.rate) + return t1 + t2 + t3 + t4 + + +@register_kl(Gumbel, Gumbel) +def _kl_gumbel_gumbel(p, q): + ct1 = p.scale / q.scale + ct2 = q.loc / q.scale + ct3 = p.loc / q.scale + t1 = -ct1.log() - ct2 + ct3 + t2 = ct1 * _euler_gamma + t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3) + return t1 + t2 + t3 - (1 + _euler_gamma) + + +@register_kl(Geometric, Geometric) +def _kl_geometric_geometric(p, q): + return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits + + +@register_kl(HalfNormal, HalfNormal) +def _kl_halfnormal_halfnormal(p, q): + return _kl_normal_normal(p.base_dist, q.base_dist) + + +@register_kl(Laplace, Laplace) +def _kl_laplace_laplace(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + loc_abs_diff = (p.loc - q.loc).abs() + t1 = -scale_ratio.log() + t2 = loc_abs_diff / q.scale + t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale) + return t1 + t2 + t3 - 1 + + +@register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal) +def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError("KL-divergence between two Low Rank Multivariate Normals with\ + different event shapes cannot be computed") + + term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, + q._capacitance_tril) - + _batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, + p._capacitance_tril)) + term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD) + # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T) + qWt_qDinv = (q._unbroadcasted_cov_factor.transpose(-1, -2) / + q._unbroadcasted_cov_diag.unsqueeze(-2)) + A = torch.triangular_solve(qWt_qDinv, q._capacitance_tril, upper=False)[0] + term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1) + term22 = _batch_trace_XXT(p._unbroadcasted_cov_factor * + q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)) + term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2)) + term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor)) + term2 = term21 + term22 - term23 - term24 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, LowRankMultivariateNormal) +def _kl_multivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed") + + term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, + q._capacitance_tril) - + 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)) + term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T + # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T + qWt_qDinv = (q._unbroadcasted_cov_factor.transpose(-1, -2) / + q._unbroadcasted_cov_diag.unsqueeze(-2)) + A = torch.triangular_solve(qWt_qDinv, q._capacitance_tril, upper=False)[0] + term21 = _batch_trace_XXT(p._unbroadcasted_scale_tril * + q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1)) + term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril)) + term2 = term21 - term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(LowRankMultivariateNormal, MultivariateNormal) +def _kl_lowrankmultivariatenormal_multivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed") + + term1 = (2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) - + _batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, + p._capacitance_tril)) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + # Expands term2 according to + # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD) + combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2], + p._unbroadcasted_cov_factor.shape[:-2]) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_cov_factor = p._unbroadcasted_cov_factor.expand(combined_batch_shape + + (n, p.cov_factor.size(-1))) + p_cov_diag = (torch.diag_embed(p._unbroadcasted_cov_diag.sqrt()) + .expand(combined_batch_shape + (n, n))) + term21 = _batch_trace_XXT(torch.triangular_solve(p_cov_factor, q_scale_tril, upper=False)[0]) + term22 = _batch_trace_XXT(torch.triangular_solve(p_cov_diag, q_scale_tril, upper=False)[0]) + term2 = term21 + term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, MultivariateNormal) +def _kl_multivariatenormal_multivariatenormal(p, q): + # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence + if p.event_shape != q.event_shape: + raise ValueError("KL-divergence between two Multivariate Normals with\ + different event shapes cannot be computed") + + half_term1 = (q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) - + p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)) + combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2], + p._unbroadcasted_scale_tril.shape[:-2]) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + term2 = _batch_trace_XXT(torch.triangular_solve(p_scale_tril, q_scale_tril, upper=False)[0]) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + return half_term1 + 0.5 * (term2 + term3 - n) + + +@register_kl(Normal, Normal) +def _kl_normal_normal(p, q): + var_ratio = (p.scale / q.scale).pow(2) + t1 = ((p.loc - q.loc) / q.scale).pow(2) + return 0.5 * (var_ratio + t1 - 1 - var_ratio.log()) + + +@register_kl(OneHotCategorical, OneHotCategorical) +def _kl_onehotcategorical_onehotcategorical(p, q): + return _kl_categorical_categorical(p._categorical, q._categorical) + + +@register_kl(Pareto, Pareto) +def _kl_pareto_pareto(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + alpha_ratio = q.alpha / p.alpha + t1 = q.alpha * scale_ratio.log() + t2 = -alpha_ratio.log() + result = t1 + t2 + alpha_ratio - 1 + result[p.support.lower_bound < q.support.lower_bound] = inf + return result + + +@register_kl(Poisson, Poisson) +def _kl_poisson_poisson(p, q): + return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate) + + +@register_kl(TransformedDistribution, TransformedDistribution) +def _kl_transformed_transformed(p, q): + if p.transforms != q.transforms: + raise NotImplementedError + if p.event_shape != q.event_shape: + raise NotImplementedError + # extra_event_dim = len(p.event_shape) - len(p.base_dist.event_shape) + extra_event_dim = len(p.event_shape) + base_kl_divergence = kl_divergence(p.base_dist, q.base_dist) + return _sum_rightmost(base_kl_divergence, extra_event_dim) + + +@register_kl(Uniform, Uniform) +def _kl_uniform_uniform(p, q): + result = ((q.high - q.low) / (p.high - p.low)).log() + result[(q.low > p.low) | (q.high < p.high)] = inf + return result + + +# Different distributions +@register_kl(Bernoulli, Poisson) +def _kl_bernoulli_poisson(p, q): + return -p.entropy() - (p.probs * q.rate.log() - q.rate) + + +@register_kl(Beta, Pareto) +def _kl_beta_infinity(p, q): + return _infinite_like(p.concentration1) + + +@register_kl(Beta, Exponential) +def _kl_beta_exponential(p, q): + return -p.entropy() - q.rate.log() + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0)) + + +@register_kl(Beta, Gamma) +def _kl_beta_gamma(p, q): + t1 = -p.entropy() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (q.concentration - 1) * (p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma()) + t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0) + return t1 + t2 - t3 + t4 + +# TODO: Add Beta-Laplace KL Divergence + + +@register_kl(Beta, Normal) +def _kl_beta_normal(p, q): + E_beta = p.concentration1 / (p.concentration1 + p.concentration0) + var_normal = q.scale.pow(2) + t1 = -p.entropy() + t2 = 0.5 * (var_normal * 2 * math.pi).log() + t3 = (E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1) + E_beta.pow(2)) * 0.5 + t4 = q.loc * E_beta + t5 = q.loc.pow(2) * 0.5 + return t1 + t2 + (t3 - t4 + t5) / var_normal + + +@register_kl(Beta, Uniform) +def _kl_beta_uniform(p, q): + result = -p.entropy() + (q.high - q.low).log() + result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf + return result + + +@register_kl(Exponential, Beta) +@register_kl(Exponential, Pareto) +@register_kl(Exponential, Uniform) +def _kl_exponential_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Exponential, Gamma) +def _kl_exponential_gamma(p, q): + ratio = q.rate / p.rate + t1 = -q.concentration * torch.log(ratio) + return t1 + ratio + q.concentration.lgamma() + q.concentration * _euler_gamma - (1 + _euler_gamma) + + +@register_kl(Exponential, Gumbel) +def _kl_exponential_gumbel(p, q): + scale_rate_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = scale_rate_prod.log() - 1 + t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1) + t3 = scale_rate_prod.reciprocal() + return t1 - loc_scale_ratio + t2 + t3 + +# TODO: Add Exponential-Laplace KL Divergence + + +@register_kl(Exponential, Normal) +def _kl_exponential_normal(p, q): + var_normal = q.scale.pow(2) + rate_sqr = p.rate.pow(2) + t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi) + t2 = rate_sqr.reciprocal() + t3 = q.loc / p.rate + t4 = q.loc.pow(2) * 0.5 + return t1 - 1 + (t2 - t3 + t4) / var_normal + + +@register_kl(Gamma, Beta) +@register_kl(Gamma, Pareto) +@register_kl(Gamma, Uniform) +def _kl_gamma_infinity(p, q): + return _infinite_like(p.concentration) + + +@register_kl(Gamma, Exponential) +def _kl_gamma_exponential(p, q): + return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate + + +@register_kl(Gamma, Gumbel) +def _kl_gamma_gumbel(p, q): + beta_scale_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = (p.concentration - 1) * p.concentration.digamma() - p.concentration.lgamma() - p.concentration + t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod + t3 = torch.exp(loc_scale_ratio) * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration) - loc_scale_ratio + return t1 + t2 + t3 + +# TODO: Add Gamma-Laplace KL Divergence + + +@register_kl(Gamma, Normal) +def _kl_gamma_normal(p, q): + var_normal = q.scale.pow(2) + beta_sqr = p.rate.pow(2) + t1 = 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi) - p.concentration - p.concentration.lgamma() + t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr + t3 = q.loc * p.concentration / p.rate + t4 = 0.5 * q.loc.pow(2) + return t1 + (p.concentration - 1) * p.concentration.digamma() + (t2 - t3 + t4) / var_normal + + +@register_kl(Gumbel, Beta) +@register_kl(Gumbel, Exponential) +@register_kl(Gumbel, Gamma) +@register_kl(Gumbel, Pareto) +@register_kl(Gumbel, Uniform) +def _kl_gumbel_infinity(p, q): + return _infinite_like(p.loc) + +# TODO: Add Gumbel-Laplace KL Divergence + + +@register_kl(Gumbel, Normal) +def _kl_gumbel_normal(p, q): + param_ratio = p.scale / q.scale + t1 = (param_ratio / math.sqrt(2 * math.pi)).log() + t2 = (math.pi * param_ratio * 0.5).pow(2) / 3 + t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5 + return -t1 + t2 + t3 - (_euler_gamma + 1) + + +@register_kl(Laplace, Beta) +@register_kl(Laplace, Exponential) +@register_kl(Laplace, Gamma) +@register_kl(Laplace, Pareto) +@register_kl(Laplace, Uniform) +def _kl_laplace_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Laplace, Normal) +def _kl_laplace_normal(p, q): + var_normal = q.scale.pow(2) + scale_sqr_var_ratio = p.scale.pow(2) / var_normal + t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi) + t2 = 0.5 * p.loc.pow(2) + t3 = p.loc * q.loc + t4 = 0.5 * q.loc.pow(2) + return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1 + + +@register_kl(Normal, Beta) +@register_kl(Normal, Exponential) +@register_kl(Normal, Gamma) +@register_kl(Normal, Pareto) +@register_kl(Normal, Uniform) +def _kl_normal_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Normal, Gumbel) +def _kl_normal_gumbel(p, q): + mean_scale_ratio = p.loc / q.scale + var_scale_sqr_ratio = (p.scale / q.scale).pow(2) + loc_scale_ratio = q.loc / q.scale + t1 = var_scale_sqr_ratio.log() * 0.5 + t2 = mean_scale_ratio - loc_scale_ratio + t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio) + return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi))) + +# TODO: Add Normal-Laplace KL Divergence + + +@register_kl(Pareto, Beta) +@register_kl(Pareto, Uniform) +def _kl_pareto_infinity(p, q): + return _infinite_like(p.scale) + + +@register_kl(Pareto, Exponential) +def _kl_pareto_exponential(p, q): + scale_rate_prod = p.scale * q.rate + t1 = (p.alpha / scale_rate_prod).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * scale_rate_prod / (p.alpha - 1) + result = t1 - t2 + t3 - 1 + result[p.alpha <= 1] = inf + return result + + +@register_kl(Pareto, Gamma) +def _kl_pareto_gamma(p, q): + common_term = p.scale.log() + p.alpha.reciprocal() + t1 = p.alpha.log() - common_term + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (1 - q.concentration) * common_term + t4 = q.rate * p.alpha * p.scale / (p.alpha - 1) + result = t1 + t2 + t3 + t4 - 1 + result[p.alpha <= 1] = inf + return result + +# TODO: Add Pareto-Laplace KL Divergence + + +@register_kl(Pareto, Normal) +def _kl_pareto_normal(p, q): + var_normal = 2 * q.scale.pow(2) + common_term = p.scale / (p.alpha - 1) + t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * common_term.pow(2) / (p.alpha - 2) + t4 = (p.alpha * common_term - q.loc).pow(2) + result = t1 - t2 + (t3 + t4) / var_normal - 1 + result[p.alpha <= 2] = inf + return result + + +@register_kl(Poisson, Bernoulli) +@register_kl(Poisson, Binomial) +def _kl_poisson_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Uniform, Beta) +def _kl_uniform_beta(p, q): + common_term = p.high - p.low + t1 = torch.log(common_term) + t2 = (q.concentration1 - 1) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term + t3 = (q.concentration0 - 1) * (_x_log_x((1 - p.high)) - _x_log_x((1 - p.low)) + common_term) / common_term + t4 = q.concentration1.lgamma() + q.concentration0.lgamma() - (q.concentration1 + q.concentration0).lgamma() + result = t3 + t4 - t1 - t2 + result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf + return result + + +@register_kl(Uniform, Exponential) +def _kl_uniform_exponetial(p, q): + result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log() + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gamma) +def _kl_uniform_gamma(p, q): + common_term = p.high - p.low + t1 = common_term.log() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (1 - q.concentration) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term + t4 = q.rate * (p.high + p.low) / 2 + result = -t1 + t2 + t3 + t4 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gumbel) +def _kl_uniform_gumbel(p, q): + common_term = q.scale / (p.high - p.low) + high_loc_diff = (p.high - q.loc) / q.scale + low_loc_diff = (p.low - q.loc) / q.scale + t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff) + t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff)) + return t1 - t2 + +# TODO: Uniform-Laplace KL Divergence + + +@register_kl(Uniform, Normal) +def _kl_uniform_normal(p, q): + common_term = p.high - p.low + t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log() + t2 = (common_term).pow(2) / 12 + t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2) + return t1 + 0.5 * (t2 + t3) / q.scale.pow(2) + + +@register_kl(Uniform, Pareto) +def _kl_uniform_pareto(p, q): + support_uniform = p.high - p.low + t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log() + t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform + result = t2 * (q.alpha + 1) - t1 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Independent, Independent) +def _kl_independent_independent(p, q): + if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims: + raise NotImplementedError + result = kl_divergence(p.base_dist, q.base_dist) + return _sum_rightmost(result, p.reinterpreted_batch_ndims) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/laplace.html b/docs/stable/_modules/torch/distributions/laplace.html new file mode 100644 index 000000000000..7fb26209e7ac --- /dev/null +++ b/docs/stable/_modules/torch/distributions/laplace.html @@ -0,0 +1,598 @@ + + + + + + + + + + + + torch.distributions.laplace — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.laplace

    +from numbers import Number
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Laplace(Distribution): + r""" + Creates a Laplace distribution parameterized by :attr:`loc` and :attr:'scale'. + + Example:: + + >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # Laplace distributed with loc=0, scale=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of the distribution + scale (float or Tensor): scale of the distribution + """ + arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} + support = constraints.real + has_rsample = True + + @property + def mean(self): + return self.loc + + @property + def variance(self): + return 2 * self.scale.pow(2) + + @property + def stddev(self): + return (2 ** 0.5) * self.scale + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super(Laplace, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Laplace, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Laplace, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + finfo = torch.finfo(self.loc.dtype) + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .uniform_() + u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1 + return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny)) + u = self.loc.new(shape).uniform_(finfo.eps - 1, 1) + # TODO: If we ever implement tensor.nextafter, below is what we want ideally. + # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5) + return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale
    + +
    [docs] def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)
    + +
    [docs] def icdf(self, value): + if self._validate_args: + self._validate_sample(value) + term = value - 0.5 + return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())
    + +
    [docs] def entropy(self): + return 1 + torch.log(2 * self.scale)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/log_normal.html b/docs/stable/_modules/torch/distributions/log_normal.html new file mode 100644 index 000000000000..5ce4382f3404 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/log_normal.html @@ -0,0 +1,568 @@ + + + + + + + + + + + + torch.distributions.log_normal — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.log_normal

    +from torch.distributions import constraints
    +from torch.distributions.transforms import ExpTransform
    +from torch.distributions.normal import Normal
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +
    +
    +
    [docs]class LogNormal(TransformedDistribution): + r""" + Creates a log-normal distribution parameterized by + :attr:`loc` and :attr:`scale` where:: + + X ~ Normal(loc, scale) + Y = exp(X) ~ LogNormal(loc, scale) + + Example:: + + >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # log-normal distributed with mean=0 and stddev=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of log of distribution + scale (float or Tensor): standard deviation of log of the distribution + """ + arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + base_dist = Normal(loc, scale) + super(LogNormal, self).__init__(base_dist, ExpTransform(), validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogNormal, _instance) + return super(LogNormal, self).expand(batch_shape, _instance=new)
    + + @property + def loc(self): + return self.base_dist.loc + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return (self.loc + self.scale.pow(2) / 2).exp() + + @property + def variance(self): + return (self.scale.pow(2).exp() - 1) * (2 * self.loc + self.scale.pow(2)).exp() + +
    [docs] def entropy(self): + return self.base_dist.entropy() + self.loc
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html b/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html new file mode 100644 index 000000000000..e32b19bc50c5 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html @@ -0,0 +1,714 @@ + + + + + + + + + + + + torch.distributions.lowrank_multivariate_normal — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.lowrank_multivariate_normal

    +import math
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.multivariate_normal import _batch_mahalanobis, _batch_mv
    +from torch.distributions.utils import _standard_normal, lazy_property
    +
    +
    +def _batch_capacitance_tril(W, D):
    +    r"""
    +    Computes Cholesky of :math:`I + W.T @ inv(D) @ W` for a batch of matrices :math:`W`
    +    and a batch of vectors :math:`D`.
    +    """
    +    m = W.size(-1)
    +    Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
    +    K = torch.matmul(Wt_Dinv, W).contiguous()
    +    K.view(-1, m * m)[:, ::m + 1] += 1  # add identity matrix to K
    +    return torch.cholesky(K)
    +
    +
    +def _batch_lowrank_logdet(W, D, capacitance_tril):
    +    r"""
    +    Uses "matrix determinant lemma"::
    +        log|W @ W.T + D| = log|C| + log|D|,
    +    where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
    +    the log determinant.
    +    """
    +    return 2 * capacitance_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + D.log().sum(-1)
    +
    +
    +def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):
    +    r"""
    +    Uses "Woodbury matrix identity"::
    +        inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D),
    +    where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute the squared
    +    Mahalanobis distance :math:`x.T @ inv(W @ W.T + D) @ x`.
    +    """
    +    Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
    +    Wt_Dinv_x = _batch_mv(Wt_Dinv, x)
    +    mahalanobis_term1 = (x.pow(2) / D).sum(-1)
    +    mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)
    +    return mahalanobis_term1 - mahalanobis_term2
    +
    +
    +
    [docs]class LowRankMultivariateNormal(Distribution): + r""" + Creates a multivariate normal distribution with covariance matrix having a low-rank form + parameterized by :attr:`cov_factor` and :attr:`cov_diag`:: + covariance_matrix = cov_factor @ cov_factor.T + cov_diag + + Example: + + >>> m = LowRankMultivariateNormal(torch.zeros(2), torch.tensor([1, 0]), torch.tensor([1, 1])) + >>> m.sample() # normally distributed with mean=`[0,0]`, cov_factor=`[1,0]`, cov_diag=`[1,1]` + tensor([-0.2102, -0.5429]) + + Args: + loc (Tensor): mean of the distribution with shape `batch_shape + event_shape` + cov_factor (Tensor): factor part of low-rank form of covariance matrix with shape + `batch_shape + event_shape + (rank,)` + cov_diag (Tensor): diagonal part of low-rank form of covariance matrix with shape + `batch_shape + event_shape` + + Note: + The computation for determinant and inverse of covariance matrix is avoided when + `cov_factor.shape[1] << cov_factor.shape[0]` thanks to `Woodbury matrix identity + <https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ and + `matrix determinant lemma <https://en.wikipedia.org/wiki/Matrix_determinant_lemma>`_. + Thanks to these formulas, we just need to compute the determinant and inverse of + the small size "capacitance" matrix:: + capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor + """ + arg_constraints = {"loc": constraints.real, + "cov_factor": constraints.real, + "cov_diag": constraints.positive} + support = constraints.real + has_rsample = True + + def __init__(self, loc, cov_factor, cov_diag, validate_args=None): + if loc.dim() < 1: + raise ValueError("loc must be at least one-dimensional.") + event_shape = loc.shape[-1:] + if cov_factor.dim() < 2: + raise ValueError("cov_factor must be at least two-dimensional, " + "with optional leading batch dimensions") + if cov_factor.shape[-2:-1] != event_shape: + raise ValueError("cov_factor must be a batch of matrices with shape {} x m" + .format(event_shape[0])) + if cov_diag.shape[-1:] != event_shape: + raise ValueError("cov_diag must be a batch of vectors with shape {}".format(event_shape)) + + loc_ = loc.unsqueeze(-1) + cov_diag_ = cov_diag.unsqueeze(-1) + try: + loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors(loc_, cov_factor, cov_diag_) + except RuntimeError: + raise ValueError("Incompatible batch shapes: loc {}, cov_factor {}, cov_diag {}" + .format(loc.shape, cov_factor.shape, cov_diag.shape)) + self.loc = loc_[..., 0] + self.cov_diag = cov_diag_[..., 0] + batch_shape = self.loc.shape[:-1] + + self._unbroadcasted_cov_factor = cov_factor + self._unbroadcasted_cov_diag = cov_diag + self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag) + super(LowRankMultivariateNormal, self).__init__(batch_shape, event_shape, + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LowRankMultivariateNormal, _instance) + batch_shape = torch.Size(batch_shape) + loc_shape = batch_shape + self.event_shape + new.loc = self.loc.expand(loc_shape) + new.cov_diag = self.cov_diag.expand(loc_shape) + new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:]) + new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor + new._unbroadcasted_cov_diag = self._unbroadcasted_cov_diag + new._capacitance_tril = self._capacitance_tril + super(LowRankMultivariateNormal, new).__init__(batch_shape, + self.event_shape, + validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + return self.loc + +
    [docs] @lazy_property + def variance(self): + return (self._unbroadcasted_cov_factor.pow(2).sum(-1) + + self._unbroadcasted_cov_diag).expand(self._batch_shape + self._event_shape)
    + +
    [docs] @lazy_property + def scale_tril(self): + # The following identity is used to increase the numerically computation stability + # for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3): + # W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2 + # The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1, + # hence it is well-conditioned and safe to take Cholesky decomposition. + n = self._event_shape[0] + cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1) + Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze + K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.transpose(-1, -2)).contiguous() + K.view(-1, n * n)[:, ::n + 1] += 1 # add identity matrix to K + scale_tril = cov_diag_sqrt_unsqueeze * torch.cholesky(K) + return scale_tril.expand(self._batch_shape + self._event_shape + self._event_shape)
    + +
    [docs] @lazy_property + def covariance_matrix(self): + covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor, + self._unbroadcasted_cov_factor.transpose(-1, -2)) + + torch.diag_embed(self._unbroadcasted_cov_diag)) + return covariance_matrix.expand(self._batch_shape + self._event_shape + + self._event_shape)
    + +
    [docs] @lazy_property + def precision_matrix(self): + # We use "Woodbury matrix identity" to take advantage of low rank form:: + # inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D) + # where :math:`C` is the capacitance matrix. + Wt_Dinv = (self._unbroadcasted_cov_factor.transpose(-1, -2) + / self._unbroadcasted_cov_diag.unsqueeze(-2)) + A = torch.triangular_solve(Wt_Dinv, self._capacitance_tril, upper=False)[0] + precision_matrix = (torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal()) + - torch.matmul(A.transpose(-1, -2), A)) + return precision_matrix.expand(self._batch_shape + self._event_shape + + self._event_shape)
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + W_shape = shape[:-1] + self.cov_factor.shape[-1:] + eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device) + eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) + return (self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W) + + self._unbroadcasted_cov_diag.sqrt() * eps_D)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + diff = value - self.loc + M = _batch_lowrank_mahalanobis(self._unbroadcasted_cov_factor, + self._unbroadcasted_cov_diag, + diff, + self._capacitance_tril) + log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor, + self._unbroadcasted_cov_diag, + self._capacitance_tril) + return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + log_det + M)
    + +
    [docs] def entropy(self): + log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor, + self._unbroadcasted_cov_diag, + self._capacitance_tril) + H = 0.5 * (self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + log_det) + if len(self._batch_shape) == 0: + return H + else: + return H.expand(self._batch_shape)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/multinomial.html b/docs/stable/_modules/torch/distributions/multinomial.html new file mode 100644 index 000000000000..693088eb88d9 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/multinomial.html @@ -0,0 +1,623 @@ + + + + + + + + + + + + torch.distributions.multinomial — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.multinomial

    +import torch
    +from torch._six import inf
    +from torch.distributions.distribution import Distribution
    +from torch.distributions import Categorical
    +from numbers import Number
    +from torch.distributions import constraints
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Multinomial(Distribution): + r""" + Creates a Multinomial distribution parameterized by :attr:`total_count` and + either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of + :attr:`probs` indexes over categories. All other dimensions index over batches. + + Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is + called (see example below) + + .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1. + + - :meth:`sample` requires a single shared `total_count` for all + parameters and samples. + - :meth:`log_prob` allows different `total_count` for each parameter and + sample. + + Example:: + + >>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.])) + >>> x = m.sample() # equal probability of 0, 1, 2, 3 + tensor([ 21., 24., 30., 25.]) + + >>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x) + tensor([-4.1338]) + + Args: + total_count (int): number of trials + probs (Tensor): event probabilities + logits (Tensor): event log probabilities + """ + arg_constraints = {'probs': constraints.simplex, + 'logits': constraints.real} + + @property + def mean(self): + return self.probs * self.total_count + + @property + def variance(self): + return self.total_count * self.probs * (1 - self.probs) + + def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): + if not isinstance(total_count, Number): + raise NotImplementedError('inhomogeneous total_count is not supported') + self.total_count = total_count + self._categorical = Categorical(probs=probs, logits=logits) + batch_shape = self._categorical.batch_shape + event_shape = self._categorical.param_shape[-1:] + super(Multinomial, self).__init__(batch_shape, event_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Multinomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count + new._categorical = self._categorical.expand(batch_shape) + super(Multinomial, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._categorical._new(*args, **kwargs) + + @constraints.dependent_property + def support(self): + return constraints.integer_interval(0, self.total_count) + + @property + def logits(self): + return self._categorical.logits + + @property + def probs(self): + return self._categorical.probs + + @property + def param_shape(self): + return self._categorical.param_shape + +
    [docs] def sample(self, sample_shape=torch.Size()): + sample_shape = torch.Size(sample_shape) + samples = self._categorical.sample(torch.Size((self.total_count,)) + sample_shape) + # samples.shape is (total_count, sample_shape, batch_shape), need to change it to + # (sample_shape, batch_shape, total_count) + shifted_idx = list(range(samples.dim())) + shifted_idx.append(shifted_idx.pop(0)) + samples = samples.permute(*shifted_idx) + counts = samples.new(self._extended_shape(sample_shape)).zero_() + counts.scatter_add_(-1, samples, torch.ones_like(samples)) + return counts.type_as(self.probs)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits.clone(), value) + log_factorial_n = torch.lgamma(value.sum(-1) + 1) + log_factorial_xs = torch.lgamma(value + 1).sum(-1) + logits[(value == 0) & (logits == -inf)] = 0 + log_powers = (logits * value).sum(-1) + return log_factorial_n - log_factorial_xs + log_powers
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/multivariate_normal.html b/docs/stable/_modules/torch/distributions/multivariate_normal.html new file mode 100644 index 000000000000..bec19b220da9 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/multivariate_normal.html @@ -0,0 +1,732 @@ + + + + + + + + + + + + torch.distributions.multivariate_normal — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.multivariate_normal

    +import math
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import _standard_normal, lazy_property
    +
    +
    +def _batch_mv(bmat, bvec):
    +    r"""
    +    Performs a batched matrix-vector product, with compatible but different batch shapes.
    +
    +    This function takes as input `bmat`, containing :math:`n \times n` matrices, and
    +    `bvec`, containing length :math:`n` vectors.
    +
    +    Both `bmat` and `bvec` may have any number of leading dimensions, which correspond
    +    to a batch shape. They are not necessarily assumed to have the same batch shape,
    +    just ones which can be broadcasted.
    +    """
    +    return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)
    +
    +
    +def _batch_mahalanobis(bL, bx):
    +    r"""
    +    Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
    +    for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`.
    +
    +    Accepts batches for both bL and bx. They are not necessarily assumed to have the same batch
    +    shape, but `bL` one should be able to broadcasted to `bx` one.
    +    """
    +    n = bx.size(-1)
    +    bx_batch_shape = bx.shape[:-1]
    +
    +    # Assume that bL.shape = (i, 1, n, n), bx.shape = (..., i, j, n),
    +    # we are going to make bx have shape (..., 1, j,  i, 1, n) to apply batched tri.solve
    +    bx_batch_dims = len(bx_batch_shape)
    +    bL_batch_dims = bL.dim() - 2
    +    outer_batch_dims = bx_batch_dims - bL_batch_dims
    +    old_batch_dims = outer_batch_dims + bL_batch_dims
    +    new_batch_dims = outer_batch_dims + 2 * bL_batch_dims
    +    # Reshape bx with the shape (..., 1, i, j, 1, n)
    +    bx_new_shape = bx.shape[:outer_batch_dims]
    +    for (sL, sx) in zip(bL.shape[:-2], bx.shape[outer_batch_dims:-1]):
    +        bx_new_shape += (sx // sL, sL)
    +    bx_new_shape += (n,)
    +    bx = bx.reshape(bx_new_shape)
    +    # Permute bx to make it have shape (..., 1, j, i, 1, n)
    +    permute_dims = (list(range(outer_batch_dims)) +
    +                    list(range(outer_batch_dims, new_batch_dims, 2)) +
    +                    list(range(outer_batch_dims + 1, new_batch_dims, 2)) +
    +                    [new_batch_dims])
    +    bx = bx.permute(permute_dims)
    +
    +    flat_L = bL.reshape(-1, n, n)  # shape = b x n x n
    +    flat_x = bx.reshape(-1, flat_L.size(0), n)  # shape = c x b x n
    +    flat_x_swap = flat_x.permute(1, 2, 0)  # shape = b x n x c
    +    M_swap = torch.triangular_solve(flat_x_swap, flat_L, upper=False)[0].pow(2).sum(-2)  # shape = b x c
    +    M = M_swap.t()  # shape = c x b
    +
    +    # Now we revert the above reshape and permute operators.
    +    permuted_M = M.reshape(bx.shape[:-1])  # shape = (..., 1, j, i, 1)
    +    permute_inv_dims = list(range(outer_batch_dims))
    +    for i in range(bL_batch_dims):
    +        permute_inv_dims += [outer_batch_dims + i, old_batch_dims + i]
    +    reshaped_M = permuted_M.permute(permute_inv_dims)  # shape = (..., 1, i, j, 1)
    +    return reshaped_M.reshape(bx_batch_shape)
    +
    +
    +def _precision_to_scale_tril(P):
    +    # Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
    +    Lf = torch.cholesky(torch.flip(P, (-2, -1)))
    +    L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
    +    L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
    +                               L_inv, upper=False)[0]
    +    return L
    +
    +
    +
    [docs]class MultivariateNormal(Distribution): + r""" + Creates a multivariate normal (also called Gaussian) distribution + parameterized by a mean vector and a covariance matrix. + + The multivariate normal distribution can be parameterized either + in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}` + or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}` + or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued + diagonal entries, such that + :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix + can be obtained via e.g. Cholesky decomposition of the covariance. + + Example: + + >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2)) + >>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I` + tensor([-0.2102, -0.5429]) + + Args: + loc (Tensor): mean of the distribution + covariance_matrix (Tensor): positive-definite covariance matrix + precision_matrix (Tensor): positive-definite precision matrix + scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal + + Note: + Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or + :attr:`scale_tril` can be specified. + + Using :attr:`scale_tril` will be more efficient: all computations internally + are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or + :attr:`precision_matrix` is passed instead, it is only used to compute + the corresponding lower triangular matrices using a Cholesky decomposition. + """ + arg_constraints = {'loc': constraints.real_vector, + 'covariance_matrix': constraints.positive_definite, + 'precision_matrix': constraints.positive_definite, + 'scale_tril': constraints.lower_cholesky} + support = constraints.real + has_rsample = True + + def __init__(self, loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None): + if loc.dim() < 1: + raise ValueError("loc must be at least one-dimensional.") + if (covariance_matrix is not None) + (scale_tril is not None) + (precision_matrix is not None) != 1: + raise ValueError("Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified.") + + loc_ = loc.unsqueeze(-1) # temporarily add dim on right + if scale_tril is not None: + if scale_tril.dim() < 2: + raise ValueError("scale_tril matrix must be at least two-dimensional, " + "with optional leading batch dimensions") + self.scale_tril, loc_ = torch.broadcast_tensors(scale_tril, loc_) + elif covariance_matrix is not None: + if covariance_matrix.dim() < 2: + raise ValueError("covariance_matrix must be at least two-dimensional, " + "with optional leading batch dimensions") + self.covariance_matrix, loc_ = torch.broadcast_tensors(covariance_matrix, loc_) + else: + if precision_matrix.dim() < 2: + raise ValueError("precision_matrix must be at least two-dimensional, " + "with optional leading batch dimensions") + self.precision_matrix, loc_ = torch.broadcast_tensors(precision_matrix, loc_) + self.loc = loc_[..., 0] # drop rightmost dim + + batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:] + super(MultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=validate_args) + + if scale_tril is not None: + self._unbroadcasted_scale_tril = scale_tril + elif covariance_matrix is not None: + self._unbroadcasted_scale_tril = torch.cholesky(covariance_matrix) + else: # precision_matrix is not None + self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(MultivariateNormal, _instance) + batch_shape = torch.Size(batch_shape) + loc_shape = batch_shape + self.event_shape + cov_shape = batch_shape + self.event_shape + self.event_shape + new.loc = self.loc.expand(loc_shape) + new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril + if 'covariance_matrix' in self.__dict__: + new.covariance_matrix = self.covariance_matrix.expand(cov_shape) + if 'scale_tril' in self.__dict__: + new.scale_tril = self.scale_tril.expand(cov_shape) + if 'precision_matrix' in self.__dict__: + new.precision_matrix = self.precision_matrix.expand(cov_shape) + super(MultivariateNormal, new).__init__(batch_shape, + self.event_shape, + validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] @lazy_property + def scale_tril(self): + return self._unbroadcasted_scale_tril.expand( + self._batch_shape + self._event_shape + self._event_shape)
    + +
    [docs] @lazy_property + def covariance_matrix(self): + return (torch.matmul(self._unbroadcasted_scale_tril, + self._unbroadcasted_scale_tril.transpose(-1, -2)) + .expand(self._batch_shape + self._event_shape + self._event_shape))
    + +
    [docs] @lazy_property + def precision_matrix(self): + # TODO: use `torch.potri` on `scale_tril` once a backwards pass is implemented. + scale_tril_inv = torch.inverse(self._unbroadcasted_scale_tril) + return torch.matmul(scale_tril_inv.transpose(-1, -2), scale_tril_inv).expand( + self._batch_shape + self._event_shape + self._event_shape)
    + + @property + def mean(self): + return self.loc + + @property + def variance(self): + return self._unbroadcasted_scale_tril.pow(2).sum(-1).expand( + self._batch_shape + self._event_shape) + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) + return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + diff = value - self.loc + M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff) + half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det
    + +
    [docs] def entropy(self): + half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det + if len(self._batch_shape) == 0: + return H + else: + return H.expand(self._batch_shape)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/negative_binomial.html b/docs/stable/_modules/torch/distributions/negative_binomial.html new file mode 100644 index 000000000000..5498d1a81df9 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/negative_binomial.html @@ -0,0 +1,612 @@ + + + + + + + + + + + + torch.distributions.negative_binomial — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.negative_binomial

    +import torch
    +import torch.nn.functional as F
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs
    +
    +
    +
    [docs]class NegativeBinomial(Distribution): + r""" + Creates a Negative Binomial distribution, i.e. distribution + of the number of independent identical Bernoulli trials + needed before :attr:`total_count` failures are achieved. The probability + of success of each Bernoulli trial is :attr:`probs`. + + Args: + total_count (float or Tensor): non-negative number of negative Bernoulli + trials to stop, although the distribution is still valid for real + valued count + probs (Tensor): Event probabilities of success in the half open interval [0, 1) + logits (Tensor): Event log-odds for probabilities of success + """ + arg_constraints = {'total_count': constraints.greater_than_eq(0), + 'probs': constraints.half_open_interval(0., 1.), + 'logits': constraints.real} + support = constraints.nonnegative_integer + + def __init__(self, total_count, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + self.total_count, self.probs, = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.probs) + else: + self.total_count, self.logits, = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + + self._param = self.probs if probs is not None else self.logits + batch_shape = self._param.size() + super(NegativeBinomial, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(NegativeBinomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if 'probs' in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if 'logits' in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(NegativeBinomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.total_count * torch.exp(self.logits) + + @property + def variance(self): + return self.mean / torch.sigmoid(-self.logits) + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True)
    + + @property + def param_shape(self): + return self._param.size() + + @lazy_property + def _gamma(self): + return torch.distributions.Gamma(concentration=self.total_count, + rate=torch.exp(-self.logits)) + +
    [docs] def sample(self, sample_shape=torch.Size()): + with torch.no_grad(): + rate = self._gamma.sample(sample_shape=sample_shape) + return torch.poisson(rate)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + + log_unnormalized_prob = (self.total_count * F.logsigmoid(-self.logits) + + value * F.logsigmoid(self.logits)) + + log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) + + torch.lgamma(self.total_count)) + + return log_unnormalized_prob - log_normalization
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/normal.html b/docs/stable/_modules/torch/distributions/normal.html new file mode 100644 index 000000000000..8a1dace8b205 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/normal.html @@ -0,0 +1,610 @@ + + + + + + + + + + + + torch.distributions.normal — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.normal

    +import math
    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import _standard_normal, broadcast_all
    +
    +
    +
    [docs]class Normal(ExponentialFamily): + r""" + Creates a normal (also called Gaussian) distribution parameterized by + :attr:`loc` and :attr:`scale`. + + Example:: + + >>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # normally distributed with loc=0 and scale=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of the distribution (often referred to as mu) + scale (float or Tensor): standard deviation of the distribution + (often referred to as sigma) + """ + arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} + support = constraints.real + has_rsample = True + _mean_carrier_measure = 0 + + @property + def mean(self): + return self.loc + + @property + def stddev(self): + return self.scale + + @property + def variance(self): + return self.stddev.pow(2) + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super(Normal, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Normal, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Normal, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) + return self.loc + eps * self.scale
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + # compute the variance + var = (self.scale ** 2) + log_scale = math.log(self.scale) if isinstance(self.scale, Number) else self.scale.log() + return -((value - self.loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi))
    + +
    [docs] def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
    + +
    [docs] def icdf(self, value): + if self._validate_args: + self._validate_sample(value) + return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2)
    + +
    [docs] def entropy(self): + return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale)
    + + @property + def _natural_params(self): + return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal()) + + def _log_normalizer(self, x, y): + return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/one_hot_categorical.html b/docs/stable/_modules/torch/distributions/one_hot_categorical.html new file mode 100644 index 000000000000..9af78369412a --- /dev/null +++ b/docs/stable/_modules/torch/distributions/one_hot_categorical.html @@ -0,0 +1,612 @@ + + + + + + + + + + + + torch.distributions.one_hot_categorical — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.one_hot_categorical

    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.categorical import Categorical
    +from torch.distributions.distribution import Distribution
    +
    +
    +
    [docs]class OneHotCategorical(Distribution): + r""" + Creates a one-hot categorical distribution parameterized by :attr:`probs` or + :attr:`logits`. + + Samples are one-hot coded vectors of size ``probs.size(-1)``. + + .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1. + + See also: :func:`torch.distributions.Categorical` for specifications of + :attr:`probs` and :attr:`logits`. + + Example:: + + >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) + >>> m.sample() # equal probability of 0, 1, 2, 3 + tensor([ 0., 0., 0., 1.]) + + Args: + probs (Tensor): event probabilities + logits (Tensor): event log probabilities + """ + arg_constraints = {'probs': constraints.simplex, + 'logits': constraints.real} + support = constraints.simplex + has_enumerate_support = True + + def __init__(self, probs=None, logits=None, validate_args=None): + self._categorical = Categorical(probs, logits) + batch_shape = self._categorical.batch_shape + event_shape = self._categorical.param_shape[-1:] + super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(OneHotCategorical, _instance) + batch_shape = torch.Size(batch_shape) + new._categorical = self._categorical.expand(batch_shape) + super(OneHotCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._categorical._new(*args, **kwargs) + + @property + def _param(self): + return self._categorical._param + + @property + def probs(self): + return self._categorical.probs + + @property + def logits(self): + return self._categorical.logits + + @property + def mean(self): + return self._categorical.probs + + @property + def variance(self): + return self._categorical.probs * (1 - self._categorical.probs) + + @property + def param_shape(self): + return self._categorical.param_shape + +
    [docs] def sample(self, sample_shape=torch.Size()): + sample_shape = torch.Size(sample_shape) + probs = self._categorical.probs + num_events = self._categorical._num_events + indices = self._categorical.sample(sample_shape) + return torch.nn.functional.one_hot(indices, num_events).to(probs)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + indices = value.max(-1)[1] + return self._categorical.log_prob(indices)
    + +
    [docs] def entropy(self): + return self._categorical.entropy()
    + +
    [docs] def enumerate_support(self, expand=True): + n = self.event_shape[0] + values = torch.eye(n, dtype=self._param.dtype, device=self._param.device) + values = values.view((n,) + (1,) * len(self.batch_shape) + (n,)) + if expand: + values = values.expand((n,) + self.batch_shape + (n,)) + return values
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/pareto.html b/docs/stable/_modules/torch/distributions/pareto.html new file mode 100644 index 000000000000..be3d7c8442ad --- /dev/null +++ b/docs/stable/_modules/torch/distributions/pareto.html @@ -0,0 +1,567 @@ + + + + + + + + + + + + torch.distributions.pareto — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.pareto

    +from torch.distributions import constraints
    +from torch.distributions.exponential import Exponential
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +from torch.distributions.transforms import AffineTransform, ExpTransform
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Pareto(TransformedDistribution): + r""" + Samples from a Pareto Type 1 distribution. + + Example:: + + >>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1 + tensor([ 1.5623]) + + Args: + scale (float or Tensor): Scale parameter of the distribution + alpha (float or Tensor): Shape parameter of the distribution + """ + arg_constraints = {'alpha': constraints.positive, 'scale': constraints.positive} + + def __init__(self, scale, alpha, validate_args=None): + self.scale, self.alpha = broadcast_all(scale, alpha) + base_dist = Exponential(self.alpha) + transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)] + super(Pareto, self).__init__(base_dist, transforms, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Pareto, _instance) + new.scale = self.scale.expand(batch_shape) + new.alpha = self.alpha.expand(batch_shape) + return super(Pareto, self).expand(batch_shape, _instance=new)
    + + @property + def mean(self): + # mean is inf for alpha <= 1 + a = self.alpha.clone().clamp(min=1) + return a * self.scale / (a - 1) + + @property + def variance(self): + # var is inf for alpha <= 2 + a = self.alpha.clone().clamp(min=2) + return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2)) + + @constraints.dependent_property + def support(self): + return constraints.greater_than(self.scale) + +
    [docs] def entropy(self): + return ((self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/poisson.html b/docs/stable/_modules/torch/distributions/poisson.html new file mode 100644 index 000000000000..ef5719614d7c --- /dev/null +++ b/docs/stable/_modules/torch/distributions/poisson.html @@ -0,0 +1,584 @@ + + + + + + + + + + + + torch.distributions.poisson — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.poisson

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exp_family import ExponentialFamily
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Poisson(ExponentialFamily): + r""" + Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter. + + Samples are nonnegative integers, with a pmf given by + + .. math:: + \mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!} + + Example:: + + >>> m = Poisson(torch.tensor([4])) + >>> m.sample() + tensor([ 3.]) + + Args: + rate (Number, Tensor): the rate parameter + """ + arg_constraints = {'rate': constraints.positive} + support = constraints.nonnegative_integer + + @property + def mean(self): + return self.rate + + @property + def variance(self): + return self.rate + + def __init__(self, rate, validate_args=None): + self.rate, = broadcast_all(rate) + if isinstance(rate, Number): + batch_shape = torch.Size() + else: + batch_shape = self.rate.size() + super(Poisson, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Poisson, _instance) + batch_shape = torch.Size(batch_shape) + new.rate = self.rate.expand(batch_shape) + super(Poisson, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.poisson(self.rate.expand(shape))
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + rate, value = broadcast_all(self.rate, value) + return (rate.log() * value) - rate - (value + 1).lgamma()
    + + @property + def _natural_params(self): + return (torch.log(self.rate), ) + + def _log_normalizer(self, x): + return torch.exp(x)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html b/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html new file mode 100644 index 000000000000..857e43bba7f4 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + torch.distributions.relaxed_bernoulli — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.relaxed_bernoulli

    +import torch
    +from numbers import Number
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +from torch.distributions.transforms import SigmoidTransform
    +from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
    +
    +
    +
    [docs]class LogitRelaxedBernoulli(Distribution): + r""" + Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli + distribution. + + Samples are logits of values in (0, 1). See [1] for more details. + + Args: + temperature (Tensor): relaxation temperature + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + + [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random + Variables (Maddison et al, 2017) + + [2] Categorical Reparametrization with Gumbel-Softmax + (Jang et al, 2017) + """ + arg_constraints = {'probs': constraints.unit_interval, + 'logits': constraints.real} + support = constraints.real + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + self.temperature = temperature + if (probs is None) == (logits is None): + raise ValueError("Either `probs` or `logits` must be specified, but not both.") + if probs is not None: + is_scalar = isinstance(probs, Number) + self.probs, = broadcast_all(probs) + else: + is_scalar = isinstance(logits, Number) + self.logits, = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogitRelaxedBernoulli, _instance) + batch_shape = torch.Size(batch_shape) + new.temperature = self.temperature + if 'probs' in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if 'logits' in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + +
    [docs] @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True)
    + +
    [docs] @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True)
    + + @property + def param_shape(self): + return self._param.size() + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + probs = clamp_probs(self.probs.expand(shape)) + uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device)) + return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + diff = logits - value.mul(self.temperature) + return self.temperature.log() + diff - 2 * diff.exp().log1p()
    + + +
    [docs]class RelaxedBernoulli(TransformedDistribution): + r""" + Creates a RelaxedBernoulli distribution, parametrized by + :attr:`temperature`, and either :attr:`probs` or :attr:`logits` + (but not both). This is a relaxed version of the `Bernoulli` distribution, + so the values are in (0, 1), and has reparametrizable samples. + + Example:: + + >>> m = RelaxedBernoulli(torch.tensor([2.2]), + torch.tensor([0.1, 0.2, 0.3, 0.99])) + >>> m.sample() + tensor([ 0.2951, 0.3442, 0.8918, 0.9021]) + + Args: + temperature (Tensor): relaxation temperature + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + """ + arg_constraints = {'probs': constraints.unit_interval, + 'logits': constraints.real} + support = constraints.unit_interval + has_rsample = True + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + base_dist = LogitRelaxedBernoulli(temperature, probs, logits) + super(RelaxedBernoulli, self).__init__(base_dist, + SigmoidTransform(), + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(RelaxedBernoulli, _instance) + return super(RelaxedBernoulli, self).expand(batch_shape, _instance=new)
    + + @property + def temperature(self): + return self.base_dist.temperature + + @property + def logits(self): + return self.base_dist.logits + + @property + def probs(self): + return self.base_dist.probs
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/relaxed_categorical.html b/docs/stable/_modules/torch/distributions/relaxed_categorical.html new file mode 100644 index 000000000000..01ff74d9fd6e --- /dev/null +++ b/docs/stable/_modules/torch/distributions/relaxed_categorical.html @@ -0,0 +1,645 @@ + + + + + + + + + + + + torch.distributions.relaxed_categorical — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.relaxed_categorical

    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.categorical import Categorical
    +from torch.distributions.utils import clamp_probs, broadcast_all
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +from torch.distributions.transforms import ExpTransform
    +
    +
    +class ExpRelaxedCategorical(Distribution):
    +    r"""
    +    Creates a ExpRelaxedCategorical parameterized by
    +    :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
    +    Returns the log of a point in the simplex. Based on the interface to
    +    :class:`OneHotCategorical`.
    +
    +    Implementation based on [1].
    +
    +    See also: :func:`torch.distributions.OneHotCategorical`
    +
    +    Args:
    +        temperature (Tensor): relaxation temperature
    +        probs (Tensor): event probabilities
    +        logits (Tensor): the log probability of each event.
    +
    +    [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
    +    (Maddison et al, 2017)
    +
    +    [2] Categorical Reparametrization with Gumbel-Softmax
    +    (Jang et al, 2017)
    +    """
    +    arg_constraints = {'probs': constraints.simplex,
    +                       'logits': constraints.real}
    +    support = constraints.real
    +    has_rsample = True
    +
    +    def __init__(self, temperature, probs=None, logits=None, validate_args=None):
    +        self._categorical = Categorical(probs, logits)
    +        self.temperature = temperature
    +        batch_shape = self._categorical.batch_shape
    +        event_shape = self._categorical.param_shape[-1:]
    +        super(ExpRelaxedCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
    +
    +    def expand(self, batch_shape, _instance=None):
    +        new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
    +        batch_shape = torch.Size(batch_shape)
    +        new.temperature = self.temperature
    +        new._categorical = self._categorical.expand(batch_shape)
    +        super(ExpRelaxedCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
    +        new._validate_args = self._validate_args
    +        return new
    +
    +    def _new(self, *args, **kwargs):
    +        return self._categorical._new(*args, **kwargs)
    +
    +    @property
    +    def param_shape(self):
    +        return self._categorical.param_shape
    +
    +    @property
    +    def logits(self):
    +        return self._categorical.logits
    +
    +    @property
    +    def probs(self):
    +        return self._categorical.probs
    +
    +    def rsample(self, sample_shape=torch.Size()):
    +        shape = self._extended_shape(sample_shape)
    +        uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
    +        gumbels = -((-(uniforms.log())).log())
    +        scores = (self.logits + gumbels) / self.temperature
    +        return scores - scores.logsumexp(dim=-1, keepdim=True)
    +
    +    def log_prob(self, value):
    +        K = self._categorical._num_events
    +        if self._validate_args:
    +            self._validate_sample(value)
    +        logits, value = broadcast_all(self.logits, value)
    +        log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
    +                     self.temperature.log().mul(-(K - 1)))
    +        score = logits - value.mul(self.temperature)
    +        score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
    +        return score + log_scale
    +
    +
    +
    [docs]class RelaxedOneHotCategorical(TransformedDistribution): + r""" + Creates a RelaxedOneHotCategorical distribution parametrized by + :attr:`temperature`, and either :attr:`probs` or :attr:`logits`. + This is a relaxed version of the :class:`OneHotCategorical` distribution, so + its samples are on simplex, and are reparametrizable. + + Example:: + + >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), + torch.tensor([0.1, 0.2, 0.3, 0.4])) + >>> m.sample() + tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) + + Args: + temperature (Tensor): relaxation temperature + probs (Tensor): event probabilities + logits (Tensor): the log probability of each event. + """ + arg_constraints = {'probs': constraints.simplex, + 'logits': constraints.real} + support = constraints.simplex + has_rsample = True + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + base_dist = ExpRelaxedCategorical(temperature, probs, logits) + super(RelaxedOneHotCategorical, self).__init__(base_dist, + ExpTransform(), + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(RelaxedOneHotCategorical, _instance) + return super(RelaxedOneHotCategorical, self).expand(batch_shape, _instance=new)
    + + @property + def temperature(self): + return self.base_dist.temperature + + @property + def logits(self): + return self.base_dist.logits + + @property + def probs(self): + return self.base_dist.probs
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/studentT.html b/docs/stable/_modules/torch/distributions/studentT.html new file mode 100644 index 000000000000..a224558d8033 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/studentT.html @@ -0,0 +1,605 @@ + + + + + + + + + + + + torch.distributions.studentT — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.studentT

    +import math
    +
    +import torch
    +from torch._six import inf, nan
    +from torch.distributions import Chi2, constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import _standard_normal, broadcast_all
    +
    +
    +
    [docs]class StudentT(Distribution): + r""" + Creates a Student's t-distribution parameterized by degree of + freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale`. + + Example:: + + >>> m = StudentT(torch.tensor([2.0])) + >>> m.sample() # Student's t-distributed with degrees of freedom=2 + tensor([ 0.1046]) + + Args: + df (float or Tensor): degrees of freedom + loc (float or Tensor): mean of the distribution + scale (float or Tensor): scale of the distribution + """ + arg_constraints = {'df': constraints.positive, 'loc': constraints.real, 'scale': constraints.positive} + support = constraints.real + has_rsample = True + + @property + def mean(self): + m = self.loc.clone() + m[self.df <= 1] = nan + return m + + @property + def variance(self): + m = self.df.clone() + m[self.df > 2] = self.scale[self.df > 2].pow(2) * self.df[self.df > 2] / (self.df[self.df > 2] - 2) + m[(self.df <= 2) & (self.df > 1)] = inf + m[self.df <= 1] = nan + return m + + def __init__(self, df, loc=0., scale=1., validate_args=None): + self.df, self.loc, self.scale = broadcast_all(df, loc, scale) + self._chi2 = Chi2(self.df) + batch_shape = self.df.size() + super(StudentT, self).__init__(batch_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(StudentT, _instance) + batch_shape = torch.Size(batch_shape) + new.df = self.df.expand(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + new._chi2 = self._chi2.expand(batch_shape) + super(StudentT, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + # NOTE: This does not agree with scipy implementation as much as other distributions. + # (see https://github.com/fritzo/notebooks/blob/master/debug-student-t.ipynb). Using DoubleTensor + # parameters seems to help. + + # X ~ Normal(0, 1) + # Z ~ Chi2(df) + # Y = X / sqrt(Z / df) ~ StudentT(df) + shape = self._extended_shape(sample_shape) + X = _standard_normal(shape, dtype=self.df.dtype, device=self.df.device) + Z = self._chi2.rsample(sample_shape) + Y = X * torch.rsqrt(Z / self.df) + return self.loc + self.scale * Y
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + y = (value - self.loc) / self.scale + Z = (self.scale.log() + + 0.5 * self.df.log() + + 0.5 * math.log(math.pi) + + torch.lgamma(0.5 * self.df) - + torch.lgamma(0.5 * (self.df + 1.))) + return -0.5 * (self.df + 1.) * torch.log1p(y**2. / self.df) - Z
    + +
    [docs] def entropy(self): + lbeta = torch.lgamma(0.5 * self.df) + math.lgamma(0.5) - torch.lgamma(0.5 * (self.df + 1)) + return (self.scale.log() + + 0.5 * (self.df + 1) * + (torch.digamma(0.5 * (self.df + 1)) - torch.digamma(0.5 * self.df)) + + 0.5 * self.df.log() + lbeta)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/transformed_distribution.html b/docs/stable/_modules/torch/distributions/transformed_distribution.html new file mode 100644 index 000000000000..8be5a4fa3788 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/transformed_distribution.html @@ -0,0 +1,670 @@ + + + + + + + + + + + + torch.distributions.transformed_distribution — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.transformed_distribution

    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.transforms import Transform
    +from torch.distributions.utils import _sum_rightmost
    +
    +
    +
    [docs]class TransformedDistribution(Distribution): + r""" + Extension of the Distribution class, which applies a sequence of Transforms + to a base distribution. Let f be the composition of transforms applied:: + + X ~ BaseDistribution + Y = f(X) ~ TransformedDistribution(BaseDistribution, f) + log p(Y) = log p(X) + log |det (dX/dY)| + + Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the + maximum shape of its base distribution and its transforms, since transforms + can introduce correlations among events. + + An example for the usage of :class:`TransformedDistribution` would be:: + + # Building a Logistic Distribution + # X ~ Uniform(0, 1) + # f = a + b * logit(X) + # Y ~ f(X) ~ Logistic(a, b) + base_distribution = Uniform(0, 1) + transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)] + logistic = TransformedDistribution(base_distribution, transforms) + + For more examples, please look at the implementations of + :class:`~torch.distributions.gumbel.Gumbel`, + :class:`~torch.distributions.half_cauchy.HalfCauchy`, + :class:`~torch.distributions.half_normal.HalfNormal`, + :class:`~torch.distributions.log_normal.LogNormal`, + :class:`~torch.distributions.pareto.Pareto`, + :class:`~torch.distributions.weibull.Weibull`, + :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and + :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical` + """ + arg_constraints = {} + + def __init__(self, base_distribution, transforms, validate_args=None): + self.base_dist = base_distribution + if isinstance(transforms, Transform): + self.transforms = [transforms, ] + elif isinstance(transforms, list): + if not all(isinstance(t, Transform) for t in transforms): + raise ValueError("transforms must be a Transform or a list of Transforms") + self.transforms = transforms + else: + raise ValueError("transforms must be a Transform or list, but was {}".format(transforms)) + shape = self.base_dist.batch_shape + self.base_dist.event_shape + event_dim = max([len(self.base_dist.event_shape)] + [t.event_dim for t in self.transforms]) + batch_shape = shape[:len(shape) - event_dim] + event_shape = shape[len(shape) - event_dim:] + super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(TransformedDistribution, _instance) + batch_shape = torch.Size(batch_shape) + base_dist_batch_shape = batch_shape + self.base_dist.batch_shape[len(self.batch_shape):] + new.base_dist = self.base_dist.expand(base_dist_batch_shape) + new.transforms = self.transforms + super(TransformedDistribution, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @constraints.dependent_property + def support(self): + return self.transforms[-1].codomain if self.transforms else self.base_dist.support + + @property + def has_rsample(self): + return self.base_dist.has_rsample + +
    [docs] def sample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped sample or sample_shape shaped batch of + samples if the distribution parameters are batched. Samples first from + base distribution and applies `transform()` for every transform in the + list. + """ + with torch.no_grad(): + x = self.base_dist.sample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x
    + +
    [docs] def rsample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped reparameterized sample or sample_shape + shaped batch of reparameterized samples if the distribution parameters + are batched. Samples first from base distribution and applies + `transform()` for every transform in the list. + """ + x = self.base_dist.rsample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x
    + +
    [docs] def log_prob(self, value): + """ + Scores the sample by inverting the transform(s) and computing the score + using the score of the base distribution and the log abs det jacobian. + """ + event_dim = len(self.event_shape) + log_prob = 0.0 + y = value + for transform in reversed(self.transforms): + x = transform.inv(y) + log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y), + event_dim - transform.event_dim) + y = x + + log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y), + event_dim - len(self.base_dist.event_shape)) + return log_prob
    + + def _monotonize_cdf(self, value): + """ + This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is + monotone increasing. + """ + sign = 1 + for transform in self.transforms: + sign = sign * transform.sign + if isinstance(sign, int) and sign == 1: + return value + return sign * (value - 0.5) + 0.5 + +
    [docs] def cdf(self, value): + """ + Computes the cumulative distribution function by inverting the + transform(s) and computing the score of the base distribution. + """ + for transform in self.transforms[::-1]: + value = transform.inv(value) + if self._validate_args: + self.base_dist._validate_sample(value) + value = self.base_dist.cdf(value) + value = self._monotonize_cdf(value) + return value
    + +
    [docs] def icdf(self, value): + """ + Computes the inverse cumulative distribution function using + transform(s) and computing the score of the base distribution. + """ + value = self._monotonize_cdf(value) + if self._validate_args: + self.base_dist._validate_sample(value) + value = self.base_dist.icdf(value) + for transform in self.transforms: + value = transform(value) + return value
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/transforms.html b/docs/stable/_modules/torch/distributions/transforms.html new file mode 100644 index 000000000000..84aeb98bd118 --- /dev/null +++ b/docs/stable/_modules/torch/distributions/transforms.html @@ -0,0 +1,1212 @@ + + + + + + + + + + + + torch.distributions.transforms — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.transforms

    +import math
    +import numbers
    +import weakref
    +
    +import torch
    +import torch.nn.functional as F
    +from torch.distributions import constraints
    +from torch.distributions.utils import (_sum_rightmost, broadcast_all,
    +                                       lazy_property)
    +from torch.nn.functional import pad
    +
    +__all__ = [
    +    'AbsTransform',
    +    'AffineTransform',
    +    'CatTransform',
    +    'ComposeTransform',
    +    'ExpTransform',
    +    'LowerCholeskyTransform',
    +    'PowerTransform',
    +    'SigmoidTransform',
    +    'SoftmaxTransform',
    +    'StackTransform',
    +    'StickBreakingTransform',
    +    'Transform',
    +    'identity_transform',
    +]
    +
    +
    +
    [docs]class Transform(object): + """ + Abstract class for invertable transformations with computable log + det jacobians. They are primarily used in + :class:`torch.distributions.TransformedDistribution`. + + Caching is useful for tranforms whose inverses are either expensive or + numerically unstable. Note that care must be taken with memoized values + since the autograd graph may be reversed. For example while the following + works with or without caching:: + + y = t(x) + t.log_abs_det_jacobian(x, y).backward() # x will receive gradients. + + However the following will error when caching due to dependency reversal:: + + y = t(x) + z = t.inv(y) + grad(z.sum(), [y]) # error because z is x + + Derived classes should implement one or both of :meth:`_call` or + :meth:`_inverse`. Derived classes that set `bijective=True` should also + implement :meth:`log_abs_det_jacobian`. + + Args: + cache_size (int): Size of cache. If zero, no caching is done. If one, + the latest single value is cached. Only 0 and 1 are supported. + + Attributes: + domain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid inputs to this transform. + codomain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid outputs to this transform + which are inputs to the inverse transform. + bijective (bool): Whether this transform is bijective. A transform + ``t`` is bijective iff ``t.inv(t(x)) == x`` and + ``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in + the codomain. Transforms that are not bijective should at least + maintain the weaker pseudoinverse properties + ``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``. + sign (int or Tensor): For bijective univariate transforms, this + should be +1 or -1 depending on whether transform is monotone + increasing or decreasing. + event_dim (int): Number of dimensions that are correlated together in + the transform ``event_shape``. This should be 0 for pointwise + transforms, 1 for transforms that act jointly on vectors, 2 for + transforms that act jointly on matrices, etc. + """ + bijective = False + event_dim = 0 + + def __init__(self, cache_size=0): + self._cache_size = cache_size + self._inv = None + if cache_size == 0: + pass # default behavior + elif cache_size == 1: + self._cached_x_y = None, None + else: + raise ValueError('cache_size must be 0 or 1') + super(Transform, self).__init__() + + @property + def inv(self): + """ + Returns the inverse :class:`Transform` of this transform. + This should satisfy ``t.inv.inv is t``. + """ + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = _InverseTransform(self) + self._inv = weakref.ref(inv) + return inv + + @property + def sign(self): + """ + Returns the sign of the determinant of the Jacobian, if applicable. + In general this only makes sense for bijective transforms. + """ + raise NotImplementedError + + def __eq__(self, other): + return self is other + + def __ne__(self, other): + # Necessary for Python2 + return not self.__eq__(other) + + def __call__(self, x): + """ + Computes the transform `x => y`. + """ + if self._cache_size == 0: + return self._call(x) + x_old, y_old = self._cached_x_y + if x is x_old: + return y_old + y = self._call(x) + self._cached_x_y = x, y + return y + + def _inv_call(self, y): + """ + Inverts the transform `y => x`. + """ + if self._cache_size == 0: + return self._inverse(y) + x_old, y_old = self._cached_x_y + if y is y_old: + return x_old + x = self._inverse(y) + self._cached_x_y = x, y + return x + + def _call(self, x): + """ + Abstract method to compute forward transformation. + """ + raise NotImplementedError + + def _inverse(self, y): + """ + Abstract method to compute inverse transformation. + """ + raise NotImplementedError + +
    [docs] def log_abs_det_jacobian(self, x, y): + """ + Computes the log det jacobian `log |dy/dx|` given input and output. + """ + raise NotImplementedError
    + + def __repr__(self): + return self.__class__.__name__ + '()'
    + + +class _InverseTransform(Transform): + """ + Inverts a single :class:`Transform`. + This class is private; please instead use the ``Transform.inv`` property. + """ + def __init__(self, transform): + super(_InverseTransform, self).__init__() + self._inv = transform + + @constraints.dependent_property + def domain(self): + return self._inv.codomain + + @constraints.dependent_property + def codomain(self): + return self._inv.domain + + @property + def bijective(self): + return self._inv.bijective + + @property + def sign(self): + return self._inv.sign + + @property + def event_dim(self): + return self._inv.event_dim + + @property + def inv(self): + return self._inv + + def __eq__(self, other): + if not isinstance(other, _InverseTransform): + return False + return self._inv == other._inv + + def __call__(self, x): + return self._inv._inv_call(x) + + def log_abs_det_jacobian(self, x, y): + return -self._inv.log_abs_det_jacobian(y, x) + + +
    [docs]class ComposeTransform(Transform): + """ + Composes multiple transforms in a chain. + The transforms being composed are responsible for caching. + + Args: + parts (list of :class:`Transform`): A list of transforms to compose. + """ + def __init__(self, parts): + super(ComposeTransform, self).__init__() + self.parts = parts + + def __eq__(self, other): + if not isinstance(other, ComposeTransform): + return False + return self.parts == other.parts + + @constraints.dependent_property + def domain(self): + if not self.parts: + return constraints.real + return self.parts[0].domain + + @constraints.dependent_property + def codomain(self): + if not self.parts: + return constraints.real + return self.parts[-1].codomain + + @lazy_property + def bijective(self): + return all(p.bijective for p in self.parts) + + @lazy_property + def sign(self): + sign = 1 + for p in self.parts: + sign = sign * p.sign + return sign + + @lazy_property + def event_dim(self): + return max(p.event_dim for p in self.parts) if self.parts else 0 + + @property + def inv(self): + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = ComposeTransform([p.inv for p in reversed(self.parts)]) + self._inv = weakref.ref(inv) + inv._inv = weakref.ref(self) + return inv + + def __call__(self, x): + for part in self.parts: + x = part(x) + return x + + def log_abs_det_jacobian(self, x, y): + if not self.parts: + return torch.zeros_like(x) + result = 0 + for part in self.parts[:-1]: + y_tmp = part(x) + result = result + _sum_rightmost(part.log_abs_det_jacobian(x, y_tmp), + self.event_dim - part.event_dim) + x = y_tmp + part = self.parts[-1] + result = result + _sum_rightmost(part.log_abs_det_jacobian(x, y), + self.event_dim - part.event_dim) + return result + + def __repr__(self): + fmt_string = self.__class__.__name__ + '(\n ' + fmt_string += ',\n '.join([p.__repr__() for p in self.parts]) + fmt_string += '\n)' + return fmt_string
    + + +identity_transform = ComposeTransform([]) + + +
    [docs]class ExpTransform(Transform): + r""" + Transform via the mapping :math:`y = \exp(x)`. + """ + domain = constraints.real + codomain = constraints.positive + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, ExpTransform) + + def _call(self, x): + return x.exp() + + def _inverse(self, y): + return y.log() + + def log_abs_det_jacobian(self, x, y): + return x
    + + +
    [docs]class PowerTransform(Transform): + r""" + Transform via the mapping :math:`y = x^{\text{exponent}}`. + """ + domain = constraints.positive + codomain = constraints.positive + bijective = True + sign = +1 + + def __init__(self, exponent, cache_size=0): + super(PowerTransform, self).__init__(cache_size=cache_size) + self.exponent, = broadcast_all(exponent) + + def __eq__(self, other): + if not isinstance(other, PowerTransform): + return False + return self.exponent.eq(other.exponent).all().item() + + def _call(self, x): + return x.pow(self.exponent) + + def _inverse(self, y): + return y.pow(1 / self.exponent) + + def log_abs_det_jacobian(self, x, y): + return (self.exponent * y / x).abs().log()
    + + +def _clipped_sigmoid(x): + finfo = torch.finfo(x.dtype) + return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps) + + +
    [docs]class SigmoidTransform(Transform): + r""" + Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`. + """ + domain = constraints.real + codomain = constraints.unit_interval + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, SigmoidTransform) + + def _call(self, x): + return _clipped_sigmoid(x) + + def _inverse(self, y): + finfo = torch.finfo(y.dtype) + y = y.clamp(min=finfo.tiny, max=1. - finfo.eps) + return y.log() - (-y).log1p() + + def log_abs_det_jacobian(self, x, y): + return -F.softplus(-x) - F.softplus(x)
    + + +
    [docs]class AbsTransform(Transform): + r""" + Transform via the mapping :math:`y = |x|`. + """ + domain = constraints.real + codomain = constraints.positive + + def __eq__(self, other): + return isinstance(other, AbsTransform) + + def _call(self, x): + return x.abs() + + def _inverse(self, y): + return y
    + + +
    [docs]class AffineTransform(Transform): + r""" + Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`. + + Args: + loc (Tensor or float): Location parameter. + scale (Tensor or float): Scale parameter. + event_dim (int): Optional size of `event_shape`. This should be zero + for univariate random variables, 1 for distributions over vectors, + 2 for distributions over matrices, etc. + """ + domain = constraints.real + codomain = constraints.real + bijective = True + + def __init__(self, loc, scale, event_dim=0, cache_size=0): + super(AffineTransform, self).__init__(cache_size=cache_size) + self.loc = loc + self.scale = scale + self.event_dim = event_dim + + def __eq__(self, other): + if not isinstance(other, AffineTransform): + return False + + if isinstance(self.loc, numbers.Number) and isinstance(other.loc, numbers.Number): + if self.loc != other.loc: + return False + else: + if not (self.loc == other.loc).all().item(): + return False + + if isinstance(self.scale, numbers.Number) and isinstance(other.scale, numbers.Number): + if self.scale != other.scale: + return False + else: + if not (self.scale == other.scale).all().item(): + return False + + return True + + @property + def sign(self): + if isinstance(self.scale, numbers.Number): + return 1 if self.scale > 0 else -1 if self.scale < 0 else 0 + return self.scale.sign() + + def _call(self, x): + return self.loc + self.scale * x + + def _inverse(self, y): + return (y - self.loc) / self.scale + + def log_abs_det_jacobian(self, x, y): + shape = x.shape + scale = self.scale + if isinstance(scale, numbers.Number): + result = torch.full_like(x, math.log(abs(scale))) + else: + result = torch.abs(scale).log() + if self.event_dim: + result_size = result.size()[:-self.event_dim] + (-1,) + result = result.view(result_size).sum(-1) + shape = shape[:-self.event_dim] + return result.expand(shape)
    + + +
    [docs]class SoftmaxTransform(Transform): + r""" + Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then + normalizing. + + This is not bijective and cannot be used for HMC. However this acts mostly + coordinate-wise (except for the final normalization), and thus is + appropriate for coordinate-wise optimization algorithms. + """ + domain = constraints.real + codomain = constraints.simplex + event_dim = 1 + + def __eq__(self, other): + return isinstance(other, SoftmaxTransform) + + def _call(self, x): + logprobs = x + probs = (logprobs - logprobs.max(-1, True)[0]).exp() + return probs / probs.sum(-1, True) + + def _inverse(self, y): + probs = y + return probs.log()
    + + +
    [docs]class StickBreakingTransform(Transform): + """ + Transform from unconstrained space to the simplex of one additional + dimension via a stick-breaking process. + + This transform arises as an iterated sigmoid transform in a stick-breaking + construction of the `Dirichlet` distribution: the first logit is + transformed via sigmoid to the first probability and the probability of + everything else, and then the process recurses. + + This is bijective and appropriate for use in HMC; however it mixes + coordinates together and is less appropriate for optimization. + """ + domain = constraints.real + codomain = constraints.simplex + bijective = True + event_dim = 1 + + def __eq__(self, other): + return isinstance(other, StickBreakingTransform) + + def _call(self, x): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + z = _clipped_sigmoid(x - offset.log()) + z_cumprod = (1 - z).cumprod(-1) + y = pad(z, (0, 1), value=1) * pad(z_cumprod, (1, 0), value=1) + return y + + def _inverse(self, y): + y_crop = y[..., :-1] + offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1) + sf = 1 - y_crop.cumsum(-1) + # we clamp to make sure that sf is positive which sometimes does not + # happen when y[-1] ~ 0 or y[:-1].sum() ~ 1 + sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny) + x = y_crop.log() - sf.log() + offset.log() + return x + + def log_abs_det_jacobian(self, x, y): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + x = x - offset.log() + # use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x) + detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1) + return detJ
    + + +
    [docs]class LowerCholeskyTransform(Transform): + """ + Transform from unconstrained matrices to lower-triangular matrices with + nonnegative diagonal entries. + + This is useful for parameterizing positive definite matrices in terms of + their Cholesky factorization. + """ + domain = constraints.real + codomain = constraints.lower_cholesky + event_dim = 2 + + def __eq__(self, other): + return isinstance(other, LowerCholeskyTransform) + + def _call_on_event(self, x): + return x.tril(-1) + x.diag().exp().diag() + + def _inverse_on_event(self, y): + return y.tril(-1) + y.diag().log().diag() + + def _call(self, x): + flat_x = x.reshape((-1,) + x.shape[-2:]) + return torch.stack([self._call_on_event(flat_x[i]) for i in range(flat_x.size(0))]).view(x.shape) + + def _inverse(self, y): + flat_y = y.reshape((-1,) + y.shape[-2:]) + return torch.stack([self._inverse_on_event(flat_y[i]) for i in range(flat_y.size(0))]).view(y.shape)
    + + +
    [docs]class CatTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim`, of length `lengths[dim]`, + in a way compatible with :func:`torch.cat`. + + Example:: + x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) + x = torch.cat([x0, x0], dim=0) + t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) + t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) + y = t(x) + """ + def __init__(self, tseq, dim=0, lengths=None): + assert all(isinstance(t, Transform) for t in tseq) + super(CatTransform, self).__init__() + self.transforms = list(tseq) + if lengths is None: + lengths = [1] * len(self.transforms) + self.lengths = list(lengths) + assert len(self.lengths) == len(self.transforms) + self.dim = dim + + @lazy_property + def length(self): + return sum(self.lengths) + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + yslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslices.append(trans(xslice)) + start = start + length # avoid += for jit compat + return torch.cat(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + xslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + yslice = y.narrow(self.dim, start, length) + xslices.append(trans.inv(yslice)) + start = start + length # avoid += for jit compat + return torch.cat(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + logdetjacs = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslice = y.narrow(self.dim, start, length) + logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) + start = start + length # avoid += for jit compat + return torch.cat(logdetjacs, dim=self.dim) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.cat([t.domain for t in self.transforms], + self.dim, self.lengths) + + @constraints.dependent_property + def codomain(self): + return constraints.cat([t.codomain for t in self.transforms], + self.dim, self.lengths)
    + + +
    [docs]class StackTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim` + in a way compatible with :func:`torch.stack`. + + Example:: + x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1) + t = StackTransform([ExpTransform(), identity_transform], dim=1) + y = t(x) + """ + def __init__(self, tseq, dim=0): + assert all(isinstance(t, Transform) for t in tseq) + super(StackTransform, self).__init__() + self.transforms = list(tseq) + self.dim = dim + + def _slice(self, z): + return [z.select(self.dim, i) for i in range(z.size(self.dim))] + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + yslices = [] + for xslice, trans in zip(self._slice(x), self.transforms): + yslices.append(trans(xslice)) + return torch.stack(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + xslices = [] + for yslice, trans in zip(self._slice(y), self.transforms): + xslices.append(trans.inv(yslice)) + return torch.stack(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + logdetjacs = [] + yslices = self._slice(y) + xslices = self._slice(x) + for xslice, yslice, trans in zip(xslices, yslices, self.transforms): + logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) + return torch.stack(logdetjacs, dim=self.dim) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.stack([t.domain for t in self.transforms], self.dim) + + @constraints.dependent_property + def codomain(self): + return constraints.stack([t.codomain for t in self.transforms], self.dim)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/uniform.html b/docs/stable/_modules/torch/distributions/uniform.html new file mode 100644 index 000000000000..dd6ef6502c1b --- /dev/null +++ b/docs/stable/_modules/torch/distributions/uniform.html @@ -0,0 +1,604 @@ + + + + + + + + + + + + torch.distributions.uniform — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.uniform

    +from numbers import Number
    +
    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.distribution import Distribution
    +from torch.distributions.utils import broadcast_all
    +
    +
    +
    [docs]class Uniform(Distribution): + r""" + Generates uniformly distributed random samples from the half-open interval + ``[low, high)``. + + Example:: + + >>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0])) + >>> m.sample() # uniformly distributed in the range [0.0, 5.0) + tensor([ 2.3418]) + + Args: + low (float or Tensor): lower range (inclusive). + high (float or Tensor): upper range (exclusive). + """ + # TODO allow (loc,scale) parameterization to allow independent constraints. + arg_constraints = {'low': constraints.dependent, 'high': constraints.dependent} + has_rsample = True + + @property + def mean(self): + return (self.high + self.low) / 2 + + @property + def stddev(self): + return (self.high - self.low) / 12**0.5 + + @property + def variance(self): + return (self.high - self.low).pow(2) / 12 + + def __init__(self, low, high, validate_args=None): + self.low, self.high = broadcast_all(low, high) + + if isinstance(low, Number) and isinstance(high, Number): + batch_shape = torch.Size() + else: + batch_shape = self.low.size() + super(Uniform, self).__init__(batch_shape, validate_args=validate_args) + + if self._validate_args and not torch.lt(self.low, self.high).all(): + raise ValueError("Uniform is not defined when low>= high") + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Uniform, _instance) + batch_shape = torch.Size(batch_shape) + new.low = self.low.expand(batch_shape) + new.high = self.high.expand(batch_shape) + super(Uniform, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new
    + + @constraints.dependent_property + def support(self): + return constraints.interval(self.low, self.high) + +
    [docs] def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device) + return self.low + rand * (self.high - self.low)
    + +
    [docs] def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + lb = value.ge(self.low).type_as(self.low) + ub = value.lt(self.high).type_as(self.low) + return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
    + +
    [docs] def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + result = (value - self.low) / (self.high - self.low) + return result.clamp(min=0, max=1)
    + +
    [docs] def icdf(self, value): + if self._validate_args: + self._validate_sample(value) + result = value * (self.high - self.low) + self.low + return result
    + +
    [docs] def entropy(self): + return torch.log(self.high - self.low)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/distributions/weibull.html b/docs/stable/_modules/torch/distributions/weibull.html new file mode 100644 index 000000000000..8a503aceb0bf --- /dev/null +++ b/docs/stable/_modules/torch/distributions/weibull.html @@ -0,0 +1,576 @@ + + + + + + + + + + + + torch.distributions.weibull — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.distributions.weibull

    +import torch
    +from torch.distributions import constraints
    +from torch.distributions.exponential import Exponential
    +from torch.distributions.transformed_distribution import TransformedDistribution
    +from torch.distributions.transforms import AffineTransform, PowerTransform
    +from torch.distributions.utils import broadcast_all
    +from torch.distributions.gumbel import euler_constant
    +
    +
    +
    [docs]class Weibull(TransformedDistribution): + r""" + Samples from a two-parameter Weibull distribution. + + Example: + + >>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1 + tensor([ 0.4784]) + + Args: + scale (float or Tensor): Scale parameter of distribution (lambda). + concentration (float or Tensor): Concentration parameter of distribution (k/shape). + """ + arg_constraints = {'scale': constraints.positive, 'concentration': constraints.positive} + support = constraints.positive + + def __init__(self, scale, concentration, validate_args=None): + self.scale, self.concentration = broadcast_all(scale, concentration) + self.concentration_reciprocal = self.concentration.reciprocal() + base_dist = Exponential(torch.ones_like(self.scale)) + transforms = [PowerTransform(exponent=self.concentration_reciprocal), + AffineTransform(loc=0, scale=self.scale)] + super(Weibull, self).__init__(base_dist, + transforms, + validate_args=validate_args) + +
    [docs] def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Weibull, _instance) + new.scale = self.scale.expand(batch_shape) + new.concentration = self.concentration.expand(batch_shape) + new.concentration_reciprocal = new.concentration.reciprocal() + base_dist = self.base_dist.expand(batch_shape) + transforms = [PowerTransform(exponent=new.concentration_reciprocal), + AffineTransform(loc=0, scale=new.scale)] + super(Weibull, new).__init__(base_dist, + transforms, + validate_args=False) + new._validate_args = self._validate_args + return new
    + + @property + def mean(self): + return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal)) + + @property + def variance(self): + return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) - + torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal))) + +
    [docs] def entropy(self): + return euler_constant * (1 - self.concentration_reciprocal) + \ + torch.log(self.scale * self.concentration_reciprocal) + 1
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/functional.html b/docs/stable/_modules/torch/functional.html new file mode 100644 index 000000000000..a39f8d5a50f6 --- /dev/null +++ b/docs/stable/_modules/torch/functional.html @@ -0,0 +1,1341 @@ + + + + + + + + + + + + torch.functional — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.functional

    +import torch
    +import torch.nn.functional as F
    +from torch._six import inf
    +from itertools import product
    +import warnings
    +
    +__all__ = [
    +    'broadcast_tensors',
    +    'cartesian_prod',
    +    'chain_matmul',
    +    'einsum',
    +    'gels',
    +    'isfinite',
    +    'isinf',
    +    'lu',
    +    'lu_unpack',
    +    'norm',
    +    'meshgrid',
    +    'split',
    +    'stft',
    +    'tensordot',
    +    'unique',
    +    'unique_consecutive',
    +]
    +
    +
    +
    [docs]def broadcast_tensors(*tensors): + r"""broadcast_tensors(*tensors) -> List of Tensors + + Broadcasts the given tensors according to :ref:`broadcasting-semantics`. + + Args: + *tensors: any number of tensors of the same type + + .. warning:: + + More than one element of a broadcasted tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + + Example:: + + >>> x = torch.arange(3).view(1, 3) + >>> y = torch.arange(2).view(2, 1) + >>> a, b = torch.broadcast_tensors(x, y) + >>> a.size() + torch.Size([2, 3]) + >>> a + tensor([[0, 1, 2], + [0, 1, 2]]) + """ + return torch._C._VariableFunctions.broadcast_tensors(tensors)
    + + +
    [docs]def split(tensor, split_size_or_sections, dim=0): + r"""Splits the tensor into chunks. + + If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will + be split into equally sized chunks (if possible). Last chunk will be smaller if + the tensor size along the given dimension :attr:`dim` is not divisible by + :attr:`split_size`. + + If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split + into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according + to :attr:`split_size_or_sections`. + + Arguments: + tensor (Tensor): tensor to split. + split_size_or_sections (int) or (list(int)): size of a single chunk or + list of sizes for each chunk + dim (int): dimension along which to split the tensor. + """ + # Overwriting reason: + # This dispatches to two ATen functions depending on the type of + # split_size_or_sections. The branching code is in tensor.py, which we + # call here. + return tensor.split(split_size_or_sections, dim)
    + + +
    [docs]def lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True): + r"""Unpacks the data and pivots from a LU factorization of a tensor. + + Returns a tuple of tensors as ``(the pivots, the L tensor, the U tensor)``. + + Arguments: + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked + unpack_pivots (bool): flag indicating if the pivots should be unpacked + + Example:: + + >>> A = torch.randn(2, 3, 3) + >>> A_LU, pivots = A.lu() + >>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots) + >>> + >>> # can recover A from factorization + >>> A_ = torch.bmm(P, torch.bmm(A_L, A_U)) + """ + + sz = LU_data.size(-1) + + if unpack_data: + U = LU_data.triu() + L = LU_data.tril() + L.diagonal(dim1=-2, dim2=-1).fill_(1) + else: + L = U = None + + if unpack_pivots: + LU_pivots_zero_idx = LU_pivots - 1 + if LU_data.dim() > 2: + P = torch.eye(sz, device=LU_data.device, dtype=LU_data.dtype).expand_as(LU_data).clone() + for idx in product(*map(lambda x: list(range(x)), LU_data.shape[:-2])): + final_order = list(range(sz)) + for k, j in enumerate(LU_pivots_zero_idx[idx]): + final_order[k], final_order[j] = final_order[j], final_order[k] + P[idx] = P[idx].index_select(1, torch.as_tensor(final_order, device=LU_pivots.device)) + else: + P = torch.eye(sz, device=LU_data.device, dtype=LU_data.dtype) + final_order = list(range(sz)) + for k, j, in enumerate(LU_pivots_zero_idx): + final_order[k], final_order[j] = final_order[j], final_order[k] + P = P.index_select(1, torch.as_tensor(final_order, device=LU_pivots.device)) + else: + P = None + + return P, L, U
    + + +
    [docs]def einsum(equation, *operands): + r"""einsum(equation, *operands) -> Tensor + +This function provides a way of computing multilinear expressions (i.e. sums of products) using the +Einstein summation convention. + +Args: + equation (string): The equation is given in terms of lower case letters (indices) to be associated + with each dimension of the operands and result. The left hand side lists the operands + dimensions, separated by commas. There should be one index letter per tensor dimension. + The right hand side follows after `->` and gives the indices for the output. + If the `->` and right hand side are omitted, it implicitly defined as the alphabetically + sorted list of all indices appearing exactly once in the left hand side. + The indices not apprearing in the output are summed over after multiplying the operands + entries. + If an index appears several times for the same operand, a diagonal is taken. + Ellipses `...` represent a fixed number of dimensions. If the right hand side is inferred, + the ellipsis dimensions are at the beginning of the output. + operands (list of Tensors): The operands to compute the Einstein sum of. + +Examples:: + + >>> x = torch.randn(5) + >>> y = torch.randn(4) + >>> torch.einsum('i,j->ij', x, y) # outer product + tensor([[-0.0570, -0.0286, -0.0231, 0.0197], + [ 1.2616, 0.6335, 0.5113, -0.4351], + [ 1.4452, 0.7257, 0.5857, -0.4984], + [-0.4647, -0.2333, -0.1883, 0.1603], + [-1.1130, -0.5588, -0.4510, 0.3838]]) + + + >>> A = torch.randn(3,5,4) + >>> l = torch.randn(2,5) + >>> r = torch.randn(2,4) + >>> torch.einsum('bn,anm,bm->ba', l, A, r) # compare torch.nn.functional.bilinear + tensor([[-0.3430, -5.2405, 0.4494], + [ 0.3311, 5.5201, -3.0356]]) + + + >>> As = torch.randn(3,2,5) + >>> Bs = torch.randn(3,5,4) + >>> torch.einsum('bij,bjk->bik', As, Bs) # batch matrix multiplication + tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], + [-1.6706, -0.8097, -0.8025, -2.1183]], + + [[ 4.2239, 0.3107, -0.5756, -0.2354], + [-1.4558, -0.3460, 1.5087, -0.8530]], + + [[ 2.8153, 1.8787, -4.3839, -1.2112], + [ 0.3728, -2.1131, 0.0921, 0.8305]]]) + + >>> A = torch.randn(3, 3) + >>> torch.einsum('ii->i', A) # diagonal + tensor([-0.7825, 0.8291, -0.1936]) + + >>> A = torch.randn(4, 3, 3) + >>> torch.einsum('...ii->...i', A) # batch diagonal + tensor([[-1.0864, 0.7292, 0.0569], + [-0.9725, -1.0270, 0.6493], + [ 0.5832, -1.1716, -1.5084], + [ 0.4041, -1.1690, 0.8570]]) + + >>> A = torch.randn(2, 3, 4, 5) + >>> torch.einsum('...ij->...ji', A).shape # batch permute + torch.Size([2, 3, 5, 4]) +""" + if len(operands) == 1 and isinstance(operands[0], (list, tuple)): + # the old interface of passing the operands as one list argument + operands = operands[0] + return torch._C._VariableFunctions.einsum(equation, operands)
    + + +
    [docs]def isfinite(tensor): + r"""Returns a new tensor with boolean elements representing if each element is `Finite` or not. + + Arguments: + tensor (Tensor): A tensor to check + + Returns: + Tensor: A ``torch.ByteTensor`` containing a 1 at each location of finite elements and 0 otherwise + + Example:: + + >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([ 1, 0, 1, 0, 0], dtype=torch.uint8) + """ + if not isinstance(tensor, torch.Tensor): + raise TypeError("The argument is not a tensor: {}".format(repr(tensor))) + + # Support int input, nan and inf are concepts in floating point numbers. + # Numpy uses type 'Object' when the int overflows long, but we don't + # have a similar concept. It's safe to assume any created LongTensor doesn't + # overflow and it's finite. + if not tensor.is_floating_point(): + return torch.ones_like(tensor, dtype=torch.uint8) + return (tensor == tensor) & (tensor.abs() != inf)
    + + +
    [docs]def isinf(tensor): + r"""Returns a new tensor with boolean elements representing if each element is `+/-INF` or not. + + Arguments: + tensor (Tensor): A tensor to check + + Returns: + Tensor: A ``torch.ByteTensor`` containing a 1 at each location of `+/-INF` elements and 0 otherwise + + Example:: + + >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([ 0, 1, 0, 1, 0], dtype=torch.uint8) + """ + if not isinstance(tensor, torch.Tensor): + raise TypeError("The argument is not a tensor: {}".format(repr(tensor))) + if tensor.dtype in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]: + return torch.zeros_like(tensor, dtype=torch.uint8) + return tensor.abs() == inf
    + + +
    [docs]def meshgrid(*tensors, **kwargs): + r"""Take :math:`N` tensors, each of which can be either scalar or 1-dimensional +vector, and create :math:`N` N-dimensional grids, where the :math:`i` :sup:`th` grid is defined by +expanding the :math:`i` :sup:`th` input over dimensions defined by other inputs. + + + Args: + tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be + treated as tensors of size :math:`(1,)` automatically + + Returns: + seq (sequence of Tensors): If the input has :math:`k` tensors of size + :math:`(N_1,), (N_2,), \ldots , (N_k,)`, then the output would also have :math:`k` tensors, + where all tensors are of size :math:`(N_1, N_2, \ldots , N_k)`. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([4, 5, 6]) + >>> grid_x, grid_y = torch.meshgrid(x, y) + >>> grid_x + tensor([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> grid_y + tensor([[4, 5, 6], + [4, 5, 6], + [4, 5, 6]]) + """ + if kwargs: + raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) + if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)): + # the old interface of passing the operands as one list argument + tensors = tensors[0] + return torch._C._VariableFunctions.meshgrid(tensors)
    + + +
    [docs]def stft(input, n_fft, hop_length=None, win_length=None, window=None, + center=True, pad_mode='reflect', normalized=False, onesided=True): + # type: (Tensor, int, Optional[int], Optional[int], Optional[Tensor], bool, str, bool, bool) -> Tensor + r"""Short-time Fourier transform (STFT). + + Ignoring the optional batch dimension, this method computes the following + expression: + + .. math:: + X[m, \omega] = \sum_{k = 0}^{\text{win\_length-1}}% + \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ % + \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{win\_length}}\right), + + where :math:`m` is the index of the sliding window, and :math:`\omega` is + the frequency that :math:`0 \leq \omega < \text{n\_fft}`. When + :attr:`onesided` is the default value ``True``, + + * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time + sequences. + + * If :attr:`hop_length` is ``None`` (default), it is treated as equal to + ``floor(n_fft / 4)``. + + * If :attr:`win_length` is ``None`` (default), it is treated as equal to + :attr:`n_fft`. + + * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from + :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is + treated as if having :math:`1` everywhere in the window. If + :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on + both sides to length :attr:`n_fft` before being applied. + + * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on + both sides so that the :math:`t`-th frame is centered at time + :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame + begins at time :math:`t \times \text{hop\_length}`. + + * :attr:`pad_mode` determines the padding method used on :attr:`input` when + :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for + all available options. Default is ``"reflect"``. + + * If :attr:`onesided` is ``True`` (default), only values for :math:`\omega` + in :math:`\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` + are returned because the real-to-complex Fourier transform satisfies the + conjugate symmetry, i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`. + + * If :attr:`normalized` is ``True`` (default is ``False``), the function + returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`. + + Returns the real and the imaginary parts together as one tensor of size + :math:`(* \times N \times T \times 2)`, where :math:`*` is the optional + batch size of :attr:`input`, :math:`N` is the number of frequencies where + STFT is applied, :math:`T` is the total number of frames used, and each pair + in the last dimension represents a complex number as the real part and the + imaginary part. + + .. warning:: + This function changed signature at version 0.4.1. Calling with the + previous signature may cause error or return incorrect result. + + Arguments: + input (Tensor): the input tensor + n_fft (int): size of Fourier transform + hop_length (int, optional): the distance between neighboring sliding window + frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``) + win_length (int, optional): the size of window frame and STFT filter. + Default: ``None`` (treated as equal to :attr:`n_fft`) + window (Tensor, optional): the optional window function. + Default: ``None`` (treated as window of all :math:`1` s) + center (bool, optional): whether to pad :attr:`input` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. Default: ``"reflect"`` + normalized (bool, optional): controls whether to return the normalized STFT results + Default: ``False`` + onesided (bool, optional): controls whether to return half of results to + avoid redundancy Default: ``True`` + + Returns: + Tensor: A tensor containing the STFT result with shape described above + + """ + # TODO: after having proper ways to map Python strings to ATen Enum, move + # this and F.pad to ATen. + if center: + signal_dim = input.dim() + extended_shape = [1] * (3 - signal_dim) + list(input.size()) + pad = int(n_fft // 2) + input = F.pad(input.view(extended_shape), (pad, pad), pad_mode) + input = input.view(input.shape[-signal_dim:]) + return torch._C._VariableFunctions.stft(input, n_fft, hop_length, win_length, window, normalized, onesided)
    + + +del torch.unique_dim + + +
    [docs]def unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + r"""Returns the unique elements of the input tensor. + + Arguments: + input (Tensor): the input tensor + sorted (bool): Whether to sort the unique elements in ascending order + before returning as output. + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int): the dimension to apply unique. If ``None``, the unique of the + flattened input is returned. default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long)) + >>> output + tensor([ 2, 3, 1]) + + >>> output, inverse_indices = torch.unique( + torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([ 1, 2, 3]) + >>> inverse_indices + tensor([ 0, 2, 1, 2]) + + >>> output, inverse_indices = torch.unique( + torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([ 1, 2, 3]) + >>> inverse_indices + tensor([[ 0, 2], + [ 1, 2]]) + + """ + if dim is not None: + output, inverse_indices, counts = torch._C._VariableFunctions.unique_dim( + input, + dim, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + else: + output, inverse_indices, counts = torch._unique2( + input, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + if return_inverse and return_counts: + return output, inverse_indices, counts + elif return_inverse: + return output, inverse_indices + elif return_counts: + return output, counts + else: + return output
    + + +
    [docs]def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None): + r"""Eliminates all but the first element from every consecutive group of equivalent elements. + + .. note:: This function is different from :func:`torch.unique` in the sense that this function + only eliminates consecutive duplicate values. This semantics is similar to `std::unique` + in C++. + + Arguments: + input (Tensor): the input tensor + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int): the dimension to apply unique. If ``None``, the unique of the + flattened input is returned. default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2]) + >>> output = torch.unique_consecutive(x) + >>> output + tensor([1, 2, 3, 1, 2]) + + >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> inverse_indices + tensor([0, 0, 1, 1, 2, 3, 3, 4]) + + >>> output, counts = torch.unique_consecutive(x, return_counts=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> counts + tensor([2, 2, 1, 2, 1]) + """ + output, inverse_indices, counts = torch._C._VariableFunctions.unique_consecutive( + input, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + if return_inverse and return_counts: + return output, inverse_indices, counts + if return_inverse: + return output, inverse_indices + if return_counts: + return output, counts + return output
    + + +
    [docs]def tensordot(a, b, dims=2): + r"""Returns a contraction of a and b over multiple dimensions. + + :attr:`tensordot` implements a generalized matrix product. + + Args: + a (Tensor): Left tensor to contract + b (Tensor): Right tensor to contract + dims (int or tuple of two lists of integers): number of dimensions to + contract or explicit lists of dimensions for :attr:`a` and + :attr:`b` respectively + + When called with an integer argument :attr:`dims` = :math:`d`, and the number of + dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`, respectively, + it computes + + .. math:: + r_{i_0,...,i_{m-d}, i_d,...,i_n} + = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. + + When called with :attr:`dims` of the list form, the given dimensions will be contracted + in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes + in these dimensions must match, but :attr:`tensordot` will deal with broadcasted + dimensions. + + Examples:: + + >>> a = torch.arange(60.).reshape(3, 4, 5) + >>> b = torch.arange(24.).reshape(4, 3, 2) + >>> torch.tensordot(a, b, dims=([1, 0], [0, 1])) + tensor([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + >>> a = torch.randn(3, 4, 5, device='cuda') + >>> b = torch.randn(4, 5, 6, device='cuda') + >>> c = torch.tensordot(a, b, dims=2).cpu() + tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741], + [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744], + [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]]) + + """ + if isinstance(dims, (list, tuple)) or \ + (isinstance(dims, torch.Tensor) and dims.numel() > 1): + dims_a, dims_b = dims + else: + if isinstance(dims, torch.Tensor): + dims = dims.item() + dims_a = list(range(-dims, 0)) + dims_b = list(range(dims)) + return torch._C._VariableFunctions.tensordot(a, b, dims_a, dims_b)
    + + +
    [docs]def cartesian_prod(*tensors): + """Do cartesian product of the given sequence of tensors. The behavior is similar to + python's `itertools.product`. + + Arguments: + *tensors: any number of 1 dimensional tensors. + + Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, + do `itertools.product` on these lists, and finally convert the resulting list + into tensor. + + Example:: + + >>> a = [1, 2, 3] + >>> b = [4, 5] + >>> list(itertools.product(a, b)) + [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + >>> tensor_a = torch.tensor(a) + >>> tensor_b = torch.tensor(b) + >>> torch.cartesian_prod(tensor_a, tensor_b) + tensor([[1, 4], + [1, 5], + [2, 4], + [2, 5], + [3, 4], + [3, 5]]) + """ + return torch._C._VariableFunctions.cartesian_prod(tensors)
    + + +
    [docs]def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): + r"""Returns the matrix norm or vector norm of a given tensor. + + Args: + input (Tensor): the input tensor + p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'`` + The following norms can be calculated: + + ===== ============================ ========================== + ord matrix norm vector norm + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + Other as vec norm when dim is None sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + dim (int, 2-tuple of ints, 2-list of ints, optional): If it is an int, + vector norm will be calculated, if it is 2-tuple of ints, matrix norm + will be calculated. If the value is None, matrix norm will be calculated + when the input tensor only has two dimensions, vector norm will be + calculated when the input tensor only has one dimension. If the input + tensor has more than two dimensions, the vector norm will be applied to + last dimension. + keepdim (bool, optional): whether the output tensors have :attr:`dim` + retained or not. Ignored if :attr:`dim` = ``None`` and + :attr:`out` = ``None``. Default: ``False`` + out (Tensor, optional): the output tensor. Ignored if + :attr:`dim` = ``None`` and :attr:`out` = ``None``. + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. If specified, the input tensor is casted to + :attr:'dtype' while performing the operation. Default: None. + + + Example:: + + >>> import torch + >>> a = torch.arange(9, dtype= torch.float) - 4 + >>> b = a.reshape((3, 3)) + >>> torch.norm(a) + tensor(7.7460) + >>> torch.norm(b) + tensor(7.7460) + >>> torch.norm(a, float('inf')) + tensor(4.) + >>> torch.norm(b, float('inf')) + tensor(4.) + >>> c = torch.tensor([[ 1, 2, 3],[-1, 1, 4]] , dtype= torch.float) + >>> torch.norm(c, dim=0) + tensor([1.4142, 2.2361, 5.0000]) + >>> torch.norm(c, dim=1) + tensor([3.7417, 4.2426]) + >>> torch.norm(c, p=1, dim=1) + tensor([6., 6.]) + >>> d = torch.arange(8, dtype= torch.float).reshape(2,2,2) + >>> torch.norm(d, dim=(1,2)) + tensor([ 3.7417, 11.2250]) + >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :]) + (tensor(3.7417), tensor(11.2250)) + """ + ndim = input.dim() + + # catch default case + if dim is None and out is None and dtype is None: + if p == "fro": + return torch._C._VariableFunctions.frobenius_norm(input) + elif p != "nuc": + return torch._C._VariableFunctions.norm(input, p) + + if p == "fro": + if dtype is not None: + raise ValueError("dtype argument is not supported in frobenius norm") + if dim is None: + dim = tuple(range(ndim)) + if out is None: + return torch._C._VariableFunctions.frobenius_norm(input, dim, keepdim=keepdim) + return torch._C._VariableFunctions.frobenius_norm(input, dim, keepdim=keepdim, out=out) + elif p == "nuc": + if dtype is not None: + raise ValueError("dtype argument is not supported in nuclear norm") + if dim is None: + if out is None: + return torch._C._VariableFunctions.nuclear_norm(input, keepdim=keepdim) + return torch._C._VariableFunctions.nuclear_norm(input, keepdim=keepdim, out=out) + return torch._C._VariableFunctions.nuclear_norm(input, dim, keepdim=keepdim, out=out) + else: + if dim is None: + dim = tuple(range(ndim)) + if out is None and dtype is None: + return torch._C._VariableFunctions.norm(input, p, dim, keepdim=keepdim) + elif out is None: + return torch._C._VariableFunctions.norm(input, p, dim, keepdim=keepdim, dtype=dtype) + elif dtype is None: + return torch._C._VariableFunctions.norm(input, p, dim, keepdim=keepdim, out=out) + return torch._C._VariableFunctions.norm(input, p, dim, keepdim=keepdim, dtype=dtype, out=out)
    + + +
    [docs]def chain_matmul(*matrices): + r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed + using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms + of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N` + needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. + If :math:`N` is 1, then this is a no-op - the original matrix is returned as is. + + + Args: + matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined. + + + Returns: + Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product + would be of dimensions :math:`p_{1} \times p_{N + 1}`. + + Example:: + + >>> a = torch.randn(3, 4) + >>> b = torch.randn(4, 5) + >>> c = torch.randn(5, 6) + >>> d = torch.randn(6, 7) + >>> torch.chain_matmul(a, b, c, d) + tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614], + [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163], + [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]]) + + .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition + """ + return torch._C._VariableFunctions.chain_matmul(matrices)
    + + +
    [docs]def lu(A, pivot=True, get_infos=False, out=None): + r"""Computes the LU factorization of a square matrix or batches of square matrices + :attr:`A`. Returns a tuple containing the LU factorization and pivots of :attr:`A`. + Pivoting is done if :attr:`pivot` is set to ``True``. + + .. note:: + The pivots returned by the function are 1-indexed. If :attr:`pivot` is ``False``, + then the returned pivots is a tensor filled with zeros of the appropriate size. + + .. note:: + LU factorization with :attr:`pivot` = ``False`` is not available for CPU, and attempting + to do so will throw an error. However, LU factorization with :attr:`pivot` = ``False`` is + available for CUDA. + + .. note:: + This function does not check if the factorization was successful or not if + :attr:`get_infos` is ``True`` since the status of the factorization is present in the + third element of the return tuple. + + Arguments: + A (Tensor): the tensor to factor of size :math:`(*, m, m)` + pivot (bool, optional): controls whether pivoting is done. Default: ``True`` + get_infos (bool, optional): if set to ``True``, returns an info IntTensor. + Default: ``False`` + out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, + then the elements in the tuple are Tensor, IntTensor, + and IntTensor. If :attr:`get_infos` is ``False``, then the + elements in the tuple are Tensor, IntTensor. Default: ``None`` + + Returns: + (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing + + - **factorization** (*Tensor*): the factorization of size :math:`(*, m, m)` + + - **pivots** (*IntTensor*): the pivots of size :math:`(*, m)` + + - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of + size :math:`(*)` where non-zero values indicate whether factorization for the matrix or + each minibatch has succeeded or failed + + Example:: + + >>> A = torch.randn(2, 3, 3) + >>> A_LU, pivots = torch.lu(A) + >>> A_LU + tensor([[[ 1.3506, 2.5558, -0.0816], + [ 0.1684, 1.1551, 0.1940], + [ 0.1193, 0.6189, -0.5497]], + + [[ 0.4526, 1.2526, -0.3285], + [-0.7988, 0.7175, -0.9701], + [ 0.2634, -0.9255, -0.3459]]]) + >>> pivots + tensor([[ 3, 3, 3], + [ 3, 3, 3]], dtype=torch.int32) + >>> A_LU, pivots, info = torch.lu(A, get_infos=True) + >>> if info.nonzero().size(0) == 0: + ... print('LU factorization succeeded for all samples!') + LU factorization succeeded for all samples! + """ + # If get_infos is True, then we don't need to check for errors and vice versa + result = torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) + if out is not None: + if not isinstance(out, (tuple, list)): + raise TypeError("argument 'out' must be tuple of Tensors, not {}" + .format(type(out).__name__)) + if len(out) - int(get_infos) != 2: + raise TypeError("expected tuple of {} elements but got {}" + .format(2 + int(get_infos), len(out))) + return (out[i].resize_as_(result[i]).copy_(result[i]) for i in range(len(out))) + if get_infos: + return result # A_LU, pivots, infos + else: + return result[0], result[1] # A_LU, pivots
    + + +
    [docs]def gels(input, A, out=None): + r"""Computes the solution to the least squares and least norm problems for a full + rank matrix :math:`A` of size :math:`(m \times n)` and a matrix :math:`B` of + size :math:`(m \times k)`. + + For more information regarding :func:`torch.gels`, please check :func:`torch.lstsq`. + + .. warning:: + :func:`torch.gels` is deprecated in favour of :func:`torch.lstsq` and will be removed in the + next release. Please use :func:`torch.lstsq` instead. + """ + warnings.warn("torch.gels is deprecated in favour of torch.lstsq and will be removed in " + "the next release. Please use torch.lstsq instead.", stacklevel=2) + return torch.lstsq(input, A, out=out)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/hub.html b/docs/stable/_modules/torch/hub.html new file mode 100644 index 000000000000..9659dfe8538f --- /dev/null +++ b/docs/stable/_modules/torch/hub.html @@ -0,0 +1,972 @@ + + + + + + + + + + + + torch.hub — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.hub

    +from __future__ import absolute_import, division, print_function, unicode_literals
    +import errno
    +import hashlib
    +import os
    +import re
    +import shutil
    +import sys
    +import tempfile
    +import torch
    +import warnings
    +import zipfile
    +
    +if sys.version_info[0] == 2:
    +    from urlparse import urlparse
    +    from urllib2 import urlopen  # noqa f811
    +else:
    +    from urllib.request import urlopen
    +    from urllib.parse import urlparse  # noqa: F401
    +
    +try:
    +    from tqdm import tqdm
    +except ImportError:
    +    # fake tqdm if it's not installed
    +    class tqdm(object):
    +
    +        def __init__(self, total=None, disable=False,
    +                     unit=None, unit_scale=None, unit_divisor=None):
    +            self.total = total
    +            self.disable = disable
    +            self.n = 0
    +            # ignore unit, unit_scale, unit_divisor; they're just for real tqdm
    +
    +        def update(self, n):
    +            if self.disable:
    +                return
    +
    +            self.n += n
    +            if self.total is None:
    +                sys.stderr.write("\r{0:.1f} bytes".format(self.n))
    +            else:
    +                sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total)))
    +            sys.stderr.flush()
    +
    +        def __enter__(self):
    +            return self
    +
    +        def __exit__(self, exc_type, exc_val, exc_tb):
    +            if self.disable:
    +                return
    +
    +            sys.stderr.write('\n')
    +
    +# matches bfd8deac from resnet18-bfd8deac.pth
    +HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
    +
    +MASTER_BRANCH = 'master'
    +ENV_TORCH_HOME = 'TORCH_HOME'
    +ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
    +DEFAULT_CACHE_DIR = '~/.cache'
    +VAR_DEPENDENCY = 'dependencies'
    +MODULE_HUBCONF = 'hubconf.py'
    +READ_DATA_CHUNK = 8192
    +hub_dir = None
    +
    +
    +# Copied from tools/shared/module_loader to be included in torch package
    +def import_module(name, path):
    +    if sys.version_info >= (3, 5):
    +        import importlib.util
    +        spec = importlib.util.spec_from_file_location(name, path)
    +        module = importlib.util.module_from_spec(spec)
    +        spec.loader.exec_module(module)
    +        return module
    +    elif sys.version_info >= (3, 0):
    +        from importlib.machinery import SourceFileLoader
    +        return SourceFileLoader(name, path).load_module()
    +    else:
    +        import imp
    +        return imp.load_source(name, path)
    +
    +
    +def _remove_if_exists(path):
    +    if os.path.exists(path):
    +        if os.path.isfile(path):
    +            os.remove(path)
    +        else:
    +            shutil.rmtree(path)
    +
    +
    +def _git_archive_link(repo_owner, repo_name, branch):
    +    return 'https://github.com/{}/{}/archive/{}.zip'.format(repo_owner, repo_name, branch)
    +
    +
    +def _download_archive_zip(url, filename):
    +    sys.stderr.write('Downloading: \"{}\" to {}\n'.format(url, filename))
    +    response = urlopen(url)
    +    with open(filename, 'wb') as f:
    +        while True:
    +            data = response.read(READ_DATA_CHUNK)
    +            if len(data) == 0:
    +                break
    +            f.write(data)
    +
    +
    +def _load_attr_from_module(module, func_name):
    +    # Check if callable is defined in the module
    +    if func_name not in dir(module):
    +        return None
    +    return getattr(module, func_name)
    +
    +
    +def _get_torch_home():
    +    torch_home = os.path.expanduser(
    +        os.getenv(ENV_TORCH_HOME,
    +                  os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
    +    return torch_home
    +
    +
    +def _setup_hubdir():
    +    global hub_dir
    +    # Issue warning to move data if old env is set
    +    if os.getenv('TORCH_HUB'):
    +        warnings.warn('TORCH_HUB is deprecated, please use env TORCH_HOME instead')
    +
    +    if hub_dir is None:
    +        torch_home = _get_torch_home()
    +        hub_dir = os.path.join(torch_home, 'hub')
    +
    +    if not os.path.exists(hub_dir):
    +        os.makedirs(hub_dir)
    +
    +
    +def _parse_repo_info(github):
    +    branch = MASTER_BRANCH
    +    if ':' in github:
    +        repo_info, branch = github.split(':')
    +    else:
    +        repo_info = github
    +    repo_owner, repo_name = repo_info.split('/')
    +    return repo_owner, repo_name, branch
    +
    +
    +def _get_cache_or_reload(github, force_reload):
    +    # Parse github repo information
    +    repo_owner, repo_name, branch = _parse_repo_info(github)
    +
    +    # Github renames folder repo-v1.x.x to repo-1.x.x
    +    # We don't know the repo name before downloading the zip file
    +    # and inspect name from it.
    +    # To check if cached repo exists, we need to normalize folder names.
    +    repo_dir = os.path.join(hub_dir, '_'.join([repo_owner, repo_name, branch]))
    +
    +    use_cache = (not force_reload) and os.path.exists(repo_dir)
    +
    +    if use_cache:
    +        sys.stderr.write('Using cache found in {}\n'.format(repo_dir))
    +    else:
    +        cached_file = os.path.join(hub_dir, branch + '.zip')
    +        _remove_if_exists(cached_file)
    +
    +        url = _git_archive_link(repo_owner, repo_name, branch)
    +        _download_archive_zip(url, cached_file)
    +
    +        with zipfile.ZipFile(cached_file) as cached_zipfile:
    +            extraced_repo_name = cached_zipfile.infolist()[0].filename
    +            extracted_repo = os.path.join(hub_dir, extraced_repo_name)
    +            _remove_if_exists(extracted_repo)
    +            # Unzip the code and rename the base folder
    +            cached_zipfile.extractall(hub_dir)
    +
    +        _remove_if_exists(cached_file)
    +        _remove_if_exists(repo_dir)
    +        shutil.move(extracted_repo, repo_dir)  # rename the repo
    +
    +    return repo_dir
    +
    +
    +def _check_module_exists(name):
    +    if sys.version_info >= (3, 4):
    +        import importlib.util
    +        return importlib.util.find_spec(name) is not None
    +    elif sys.version_info >= (3, 3):
    +        # Special case for python3.3
    +        import importlib.find_loader
    +        return importlib.find_loader(name) is not None
    +    else:
    +        # NB: Python2.7 imp.find_module() doesn't respect PEP 302,
    +        #     it cannot find a package installed as .egg(zip) file.
    +        #     Here we use workaround from:
    +        #     https://stackoverflow.com/questions/28962344/imp-find-module-which-supports-zipped-eggs?lq=1
    +        #     Also imp doesn't handle hierarchical module names (names contains dots).
    +        try:
    +            # 1. Try imp.find_module(), which searches sys.path, but does
    +            # not respect PEP 302 import hooks.
    +            import imp
    +            result = imp.find_module(name)
    +            if result:
    +                return True
    +        except ImportError:
    +            pass
    +        path = sys.path
    +        for item in path:
    +            # 2. Scan path for import hooks. sys.path_importer_cache maps
    +            # path items to optional "importer" objects, that implement
    +            # find_module() etc.  Note that path must be a subset of
    +            # sys.path for this to work.
    +            importer = sys.path_importer_cache.get(item)
    +            if importer:
    +                try:
    +                    result = importer.find_module(name, [item])
    +                    if result:
    +                        return True
    +                except ImportError:
    +                    pass
    +        return False
    +
    +def _check_dependencies(m):
    +    dependencies = _load_attr_from_module(m, VAR_DEPENDENCY)
    +
    +    if dependencies is not None:
    +        missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
    +        if len(missing_deps):
    +            raise RuntimeError('Missing dependencies: {}'.format(', '.join(missing_deps)))
    +
    +
    +def _load_entry_from_hubconf(m, model):
    +    if not isinstance(model, str):
    +        raise ValueError('Invalid input: model should be a string of function name')
    +
    +    # Note that if a missing dependency is imported at top level of hubconf, it will
    +    # throw before this function. It's a chicken and egg situation where we have to
    +    # load hubconf to know what're the dependencies, but to import hubconf it requires
    +    # a missing package. This is fine, Python will throw proper error message for users.
    +    _check_dependencies(m)
    +
    +    func = _load_attr_from_module(m, model)
    +
    +    if func is None or not callable(func):
    +        raise RuntimeError('Cannot find callable {} in hubconf'.format(model))
    +
    +    return func
    +
    +
    +
    [docs]def set_dir(d): + r""" + Optionally set hub_dir to a local dir to save downloaded models & weights. + + If ``set_dir`` is not called, default path is ``$TORCH_HOME/hub`` where + environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``. + ``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux + filesytem layout, with a default value ``~/.cache`` if the environment + variable is not set. + + + Args: + d: path to a local folder to save downloaded models & weights. + """ + global hub_dir + hub_dir = d
    + + +
    [docs]def list(github, force_reload=False): + r""" + List all entrypoints available in `github` hubconf. + + Args: + github: Required, a string with format "repo_owner/repo_name[:tag_name]" with an optional + tag/branch. The default branch is `master` if not specified. + Example: 'pytorch/vision[:hub]' + force_reload: Optional, whether to discard the existing cache and force a fresh download. + Default is `False`. + Returns: + entrypoints: a list of available entrypoint names + + Example: + >>> entrypoints = torch.hub.list('pytorch/vision', force_reload=True) + """ + # Setup hub_dir to save downloaded files + _setup_hubdir() + + repo_dir = _get_cache_or_reload(github, force_reload) + + sys.path.insert(0, repo_dir) + + hub_module = import_module(MODULE_HUBCONF, repo_dir + '/' + MODULE_HUBCONF) + + sys.path.remove(repo_dir) + + # We take functions starts with '_' as internal helper functions + entrypoints = [f for f in dir(hub_module) if callable(getattr(hub_module, f)) and not f.startswith('_')] + + return entrypoints
    + + +
    [docs]def help(github, model, force_reload=False): + r""" + Show the docstring of entrypoint `model`. + + Args: + github: Required, a string with format <repo_owner/repo_name[:tag_name]> with an optional + tag/branch. The default branch is `master` if not specified. + Example: 'pytorch/vision[:hub]' + model: Required, a string of entrypoint name defined in repo's hubconf.py + force_reload: Optional, whether to discard the existing cache and force a fresh download. + Default is `False`. + Example: + >>> print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True)) + """ + # Setup hub_dir to save downloaded files + _setup_hubdir() + + repo_dir = _get_cache_or_reload(github, force_reload) + + sys.path.insert(0, repo_dir) + + hub_module = import_module(MODULE_HUBCONF, repo_dir + '/' + MODULE_HUBCONF) + + sys.path.remove(repo_dir) + + entry = _load_entry_from_hubconf(hub_module, model) + + return entry.__doc__
    + + +# Ideally this should be `def load(github, model, *args, forece_reload=False, **kwargs):`, +# but Python2 complains syntax error for it. We have to skip force_reload in function +# signature here but detect it in kwargs instead. +# TODO: fix it after Python2 EOL +
    [docs]def load(github, model, *args, **kwargs): + r""" + Load a model from a github repo, with pretrained weights. + + Args: + github: Required, a string with format "repo_owner/repo_name[:tag_name]" with an optional + tag/branch. The default branch is `master` if not specified. + Example: 'pytorch/vision[:hub]' + model: Required, a string of entrypoint name defined in repo's hubconf.py + *args: Optional, the corresponding args for callable `model`. + force_reload: Optional, whether to force a fresh download of github repo unconditionally. + Default is `False`. + **kwargs: Optional, the corresponding kwargs for callable `model`. + + Returns: + a single model with corresponding pretrained weights. + + Example: + >>> model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=True) + """ + # Setup hub_dir to save downloaded files + _setup_hubdir() + + force_reload = kwargs.get('force_reload', False) + kwargs.pop('force_reload', None) + + repo_dir = _get_cache_or_reload(github, force_reload) + + sys.path.insert(0, repo_dir) + + hub_module = import_module(MODULE_HUBCONF, repo_dir + '/' + MODULE_HUBCONF) + + entry = _load_entry_from_hubconf(hub_module, model) + + model = entry(*args, **kwargs) + + sys.path.remove(repo_dir) + + return model
    + + +def _download_url_to_file(url, dst, hash_prefix, progress): + file_size = None + u = urlopen(url) + meta = u.info() + if hasattr(meta, 'getheaders'): + content_length = meta.getheaders("Content-Length") + else: + content_length = meta.get_all("Content-Length") + if content_length is not None and len(content_length) > 0: + file_size = int(content_length[0]) + + f = tempfile.NamedTemporaryFile(delete=False) + try: + if hash_prefix is not None: + sha256 = hashlib.sha256() + with tqdm(total=file_size, disable=not progress, + unit='B', unit_scale=True, unit_divisor=1024) as pbar: + while True: + buffer = u.read(8192) + if len(buffer) == 0: + break + f.write(buffer) + if hash_prefix is not None: + sha256.update(buffer) + pbar.update(len(buffer)) + + f.close() + if hash_prefix is not None: + digest = sha256.hexdigest() + if digest[:len(hash_prefix)] != hash_prefix: + raise RuntimeError('invalid hash value (expected "{}", got "{}")' + .format(hash_prefix, digest)) + shutil.move(f.name, dst) + finally: + f.close() + if os.path.exists(f.name): + os.remove(f.name) + + +def load_state_dict_from_url(url, model_dir=None, map_location=None, progress=True): + r"""Loads the Torch serialized object at the given URL. + + If the object is already present in `model_dir`, it's deserialized and + returned. The filename part of the URL should follow the naming convention + ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. + + The default value of `model_dir` is ``$TORCH_HOME/checkpoints`` where + environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``. + ``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux + filesytem layout, with a default value ``~/.cache`` if not set. + + Args: + url (string): URL of the object to download + model_dir (string, optional): directory in which to save the object + map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load) + progress (bool, optional): whether or not to display a progress bar to stderr + + Example: + >>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') + + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + if model_dir is None: + torch_home = _get_torch_home() + model_dir = os.path.join(torch_home, 'checkpoints') + + try: + os.makedirs(model_dir) + except OSError as e: + if e.errno == errno.EEXIST: + # Directory already exists, ignore. + pass + else: + # Unexpected OSError, re-raise. + raise + + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(model_dir, filename) + if not os.path.exists(cached_file): + sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = HASH_REGEX.search(filename).group(1) + _download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return torch.load(cached_file, map_location=map_location) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/jit.html b/docs/stable/_modules/torch/jit.html new file mode 100644 index 000000000000..3b4db4b09840 --- /dev/null +++ b/docs/stable/_modules/torch/jit.html @@ -0,0 +1,2644 @@ + + + + + + + + + + + + torch.jit — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.jit

    +import torch._C
    +from torch.autograd import Variable, function
    +from torch.serialization import validate_cuda_device
    +from torch.nn import Module, ModuleList, Parameter, Sequential
    +from torch.jit.frontend import get_jit_class_def, get_jit_def, get_default_args
    +import torch.backends.cudnn as cudnn
    +import torch.jit.annotations
    +import torch._jit_internal as _jit_internal
    +from torch._jit_internal import _qualified_name
    +from torch._six import PY2, PY37, with_metaclass, get_function_from_type, \
    +    string_classes
    +from ..nn.modules.utils import _single, _pair, _triple, _quadruple, \
    +    _list_with_default
    +import torch.testing
    +
    +import math
    +from collections import OrderedDict, namedtuple
    +import textwrap
    +import sys
    +import warnings
    +import weakref
    +import types
    +import contextlib
    +import os
    +import functools
    +import copy
    +import collections
    +import inspect
    +import pickle
    +
    +# These are imported so users can access them from the `torch.jit` module
    +from torch._jit_internal import Final  # noqa: F401
    +from torch._jit_internal import ignore, export  # noqa: F401
    +
    +if sys.version_info[0] > 2:
    +    import pathlib
    +
    +
    +def _parse_env(name, default, true_message, false_message):
    +    value = os.environ.get(name)
    +    if value is None:
    +        return default
    +    if value.lower() in {'1', 'true', 'yes'}:
    +        return True
    +    elif value.lower() in {'0', 'false', 'no'}:
    +        return False
    +    if value == '1v':
    +        print(true_message)
    +        return True
    +    elif value == '0v':
    +        print(false_message)
    +        return False
    +    raise ValueError('Unknown setting of {}. Try using 0 or 1.'.format(name))
    +
    +
    +_enabled = _parse_env('PYTORCH_JIT', True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED")
    +_flatten = torch._C._jit_flatten
    +_unflatten = torch._C._jit_unflatten
    +_jit_script_class_compile = torch._C._jit_script_class_compile
    +
    +# The Python CompilationUnit. All functions and modules defined in Python will
    +# live in here. It's defined in Python because doing in cpp creates static
    +# destruction order issues.
    +_python_cu = torch._C.CompilationUnit()
    +
    +Future = torch._C.Future
    +_fork = torch._C.fork
    +_wait = torch._C.wait
    +
    +
    +@contextlib.contextmanager
    +def scope(scope_name):
    +    tracing_state = torch._C._get_tracing_state()
    +    if tracing_state:
    +        tracing_state.push_scope(scope_name)
    +    try:
    +        yield
    +    finally:
    +        if tracing_state:
    +            tracing_state.pop_scope()
    +
    +@contextlib.contextmanager
    +def optimized_execution(should_optimize):
    +    """
    +    A context manager that controls whether the JIT's executor will run
    +    optimizations before executing a function.
    +    """
    +    stored_flag = torch._C._get_graph_executor_optimize()
    +    torch._C._set_graph_executor_optimize(should_optimize)
    +    try:
    +        yield
    +    finally:
    +        torch._C._set_graph_executor_optimize(stored_flag)
    +
    +
    +DEFAULT_EXTRA_FILES_MAP = torch._C.ExtraFilesMap()
    +
    +
    +
    [docs]def load(f, map_location=None, _extra_files=DEFAULT_EXTRA_FILES_MAP): + r""" + Load a ``ScriptModule`` previously saved with :func:`save <torch.jit.save>` + + All previously saved modules, no matter their device, are first loaded onto CPU, + and then are moved to the devices they were saved from. If this fails (e.g. because + the run time system doesn't have certain devices), an exception is raised. + However, storages can be dynamically remapped to an alternative set of devices + using the `map_location` argument. Comparing to :func:`torch.load`, `map_location` + in this function is simplified, which only accepts a string (e.g., 'cpu', 'cuda:0'), + or torch.device (e.g., torch.device('cpu')) + + Arguments: + f: a file-like object (has to implement read, readline, tell, and seek), + or a string containing a file name + map_location: can a string (e.g., 'cpu', 'cuda:0'), a device (e.g., + torch.device('cpu')) + _extra_files: map from filename to content. The extra + filenames given in the map would be loaded and their content + would be stored in the provided map. + + + Returns: + A ``ScriptModule`` object. + + Example: :: + + torch.jit.load('scriptmodule.pt') + + # Load ScriptModule from io.BytesIO object + with open('scriptmodule.pt', 'rb') as f: + buffer = io.BytesIO(f.read()) + + # Load all tensors to the original device + torch.jit.load(buffer) + + # Load all tensors onto CPU, using a device + torch.jit.load(buffer, map_location=torch.device('cpu')) + + # Load all tensors onto CPU, using a string + torch.jit.load(buffer, map_location='cpu') + + # Load with extra files. + files = {'metadata.json' : ''} + torch.jit.load('scriptmodule.pt', _extra_files = files) + print (files['metadata.json']) + """ + if isinstance(f, string_classes): + if not os.path.exists(f): + raise ValueError("The provided filename {} does not exist".format(f)) + if isinstance(map_location, string_classes): + map_location = torch.device(map_location) + elif not (map_location is None or + isinstance(map_location, torch.device)): + raise ValueError("map_location should be either None, string or torch.device, " + "but got type: " + str(type(map_location))) + if (str(map_location).startswith('cuda')): + validate_cuda_device(map_location) + + cu = torch._C.CompilationUnit() + if isinstance(f, str) or \ + (sys.version_info[0] == 2 and isinstance(f, unicode)) or \ + (sys.version_info[0] == 3 and isinstance(f, pathlib.Path)): + cpp_module = torch._C.import_ir_module(cu, f, map_location, _extra_files) + else: + cpp_module = torch._C.import_ir_module_from_buffer(cu, f.read(), map_location, _extra_files) + + return ScriptModule(_cpp_module=cpp_module)
    + + +
    [docs]def save(m, f, _extra_files=DEFAULT_EXTRA_FILES_MAP): + """ + Save an offline version of this module for use in a separate process. The saved + module serializes all of the methods, submodules, parameters, and attributes of this + module. It can be loaded into the C++ API using ``torch::jit::load(filename)`` or into the Python + API with :func:`load <torch.jit.load>`. + + To be able to save a module, it must not make any calls to native Python functions. + This means that all submodules must be subclasses of ``torch.jit.ScriptModule`` as well. + + .. DANGER:: + All modules, no matter their device, are always loaded onto the CPU during loading. + This is different from :func:`load <torch.jit.load>`'s semantics and may change in the future. + + Arguments: + m: a ScriptModule to save + f: a file-like object (has to implement write and flush) or a string + containing a file name + _extra_files: Map from filename to contents which will be stored as part of 'f' + + .. warning:: + If you are using Python 2, ``torch.save`` does NOT support ``StringIO.StringIO`` + as a valid file-like object. This is because the write method should return + the number of bytes written; ``StringIO.write()`` does not do this. + + Please use something like ``io.BytesIO`` instead. + + Example: :: + + import torch + import io + + + class MyModule(torch.nn.Module): + def forward(self, x): + return x + 10 + + m = torch.jit.script(MyModule()) + + # Save to file + torch.jit.save(m, 'scriptmodule.pt') + + # Save to io.BytesIO buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + + # Save with extra files + extra_files = torch._C.ExtraFilesMap() + extra_files['foo.txt'] = 'bar' + torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files) + """ + if isinstance(f, str) or \ + (sys.version_info[0] == 2 and isinstance(f, unicode)) or \ + (sys.version_info[0] == 3 and isinstance(f, pathlib.Path)): + m.save(f, _extra_files=_extra_files) + else: + ret = m.save_to_buffer(_extra_files=_extra_files) + f.write(ret)
    + + +def get_trace_graph(f, args=(), kwargs=None, _force_outplace=False, return_inputs=False): + """ + Trace a function or model, returning a tuple consisting of the both the + *trace* of an execution, as well as the original return value. If return_inputs, + also returns the trace inputs as part of the tuple + + Tracing is guaranteed not to change the semantics of the function/module + that is traced. + + Arguments: + f (torch.nn.Module or function): the function or module + to be traced. + args (tuple or Tensor): the positional arguments to pass to the + function/module to be traced. A non-tuple is assumed to + be a single positional argument to be passed to the model. + kwargs (dict): the keyword arguments to pass to the function/module + to be traced. + + Example: Trace a cell. + + >>> trace, out = jit.trace(nn.LSTMCell(), (input, hidden)) + >>> print(trace) + """ + if kwargs is None: + kwargs = {} + if not isinstance(args, tuple): + args = (args,) + return LegacyTracedModule(f, _force_outplace, return_inputs)(*args, **kwargs) + + +def _unique_state_dict(module, keep_vars=False): + # since Parameter.data always creates a new torch.Tensor instance, + # id(v) doesn't work with it. So we always get the Parameter or Buffer + # as values, and deduplicate the params using Parameters and Buffers + state_dict = module.state_dict(keep_vars=True) + filtered_dict = type(state_dict)() + seen_ids = set() + for k, v in state_dict.items(): + if id(v) in seen_ids: + continue + seen_ids.add(id(v)) + if keep_vars: + filtered_dict[k] = v + else: + filtered_dict[k] = v.data + return filtered_dict + + +def _create_interpreter_name_lookup_fn(frames_up=1): + def _get_interpreter_name_for_var(var): + frame = inspect.currentframe() + i = 0 + while i < frames_up + 1: + frame = frame.f_back + i += 1 + + f_locals = frame.f_locals + f_globals = frame.f_globals + + for k, v in f_locals.items(): + if isinstance(v, torch.Tensor) and var is v: + return k if k != 'self' else '' + for k, v in f_globals.items(): + if isinstance(v, torch.Tensor) and var is v: + return k if k != 'self' else '' + return '' + return _get_interpreter_name_for_var + + +class LegacyTracedModule(Module): + def __init__(self, inner, force_outplace=False, return_inputs=False): + super(LegacyTracedModule, self).__init__() + # inner may be a Module, or it may be an arbitrary callable + # If it's a Module, we get its parameters automatically, which lets + # us avoid a special casing functions versus modules. + self.inner = inner + self._force_outplace = force_outplace + self._return_inputs = return_inputs + + def forward(self, *args): + in_vars, in_desc = _flatten(args) + # NOTE: use full state, because we need it for BatchNorm export + # This differs from the compiler path, which doesn't support it at the moment. + module_state = list(_unique_state_dict(self, keep_vars=True).values()) + try: + trace, all_trace_inputs = torch._C._tracer_enter(*(in_vars + module_state)) + except Exception as e: + torch._C._tracer_abandon() + raise e + ret_inputs = tuple(x.clone() for x in all_trace_inputs) + torch._C._tracer_set_force_outplace(self._force_outplace) + torch._C._tracer_set_get_unique_name_fn(_create_interpreter_name_lookup_fn()) + try: + trace_inputs = _unflatten(all_trace_inputs[:len(in_vars)], in_desc) + out = self.inner(*trace_inputs) + out_vars, _ = _flatten(out) + torch._C._tracer_exit(tuple(out_vars)) + except Exception: + torch._C._tracer_abandon() + raise + if self._return_inputs: + return trace, out, ret_inputs + else: + return trace, out + + +def _clone_inputs(args): + def clone_input(a): + if a is None: + return None + elif isinstance(a, torch.Tensor): + # TODO: figure out one liner to .clone() and set requires_grad + v = Variable(a.data.clone(), requires_grad=a.requires_grad) + if a.grad is not None: + v.grad = clone_input(v.grad) + return v + else: + return a.clone() + return function._nested_map(lambda x: isinstance(x, torch.Tensor), + clone_input, condition_msg="tensors")(args) + + +# This is purely for developer debugging. We are not going to advertise it. +_JIT_DUMP = os.environ.get('PYTORCH_JIT_DUMP', False) +_JIT_TIME = os.environ.get('PYTORCH_JIT_TIME', False) # CUDA-only timing +_JIT_DISABLE = os.environ.get('PYTORCH_JIT_DISABLE', False) +_JIT_STATS = os.environ.get('PYTORCH_JIT_STATS', False) + + +def _dump_trace(trace_name, pass_name, input_key, trace): + if not _JIT_DUMP: + return + + import torch.contrib._graph_vis as graph_vis + + filename = "{}_{}".format(trace_name, pass_name) + # TODO: Also paste out the backtrace when the trace was compiled + # (and maybe also when it was run?) + with open(filename + ".ir", "w") as f: + f.write("Input key: {}\n\n{}".format(input_key, str(trace))) + graph_vis.write(trace.graph(), filename + ".html") + + +@contextlib.contextmanager +def _time(trace_name, name, time=True): + if (not _JIT_TIME and not time) or not torch.cuda.is_available(): + yield + return + stream = torch.cuda.current_stream() + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + stream.record_event(start) + try: + yield + finally: + stream.record_event(end) + end.synchronize() + print("{} {} time: {} ms".format(trace_name, name, start.elapsed_time(end))) + + +def verify(model, args, loss_fn=torch.sum, devices=None): + """ + Verify that a JIT compiled model has the same behavior as its uncompiled + version along with its backwards pass. If your model returns multiple + outputs, you must also specify a `loss_fn` to produce a loss for which + the backwards will be computed. + + This function has side-effects (e.g., it executes your model / saves and loads + parameters), so don't expect the model to come out exactly the same as what + you passed in. + + Arguments: + model (compiled torch.nn.Module or function): the module/function to be + verified. The module/function definition MUST have been decorated with + `@torch.jit.compile`. + args (tuple or Tensor): the positional arguments to pass to the + compiled function/module to be verified. A non-tuple is assumed to + be a single positional argument to be passed to the model. + loss_fn (function, optional): the loss function to be applied to + the output of the model, before backwards is invoked. By default, + we assume that a model returns a single result, and we :func:`torch.sum` + before calling backwards; if this is inappropriate, you can pass your + own loss function. Note that if a model returns a tuple of results, + these are passed as separate positional arguments to `loss_fn`. + devices (iterable of device IDs, optional): the GPU devices which the + compiled module will be run on. This determines the RNG state we + must save when running both compiled and uncompiled versions of the model. + """ + # TODO: In principle, we track device information in our trace, so it + # should be possible to check if our execution actually obeyed the 'devices' + # the user provided. + + # TODO: Consider adding a utility function to torch.jit to test + # for this case + if not isinstance(model, torch._C.CompiledFunction): + raise TypeError("Cannot verify an uncompiled module. Add @torch.jit.compile to compile it") + is_module = isinstance(model, Module) + + if not isinstance(args, tuple): + args = (args,) + + saved_args = _clone_inputs(args) + if is_module: + saved_state = copy.deepcopy(model.state_dict()) + + def run_fwd_bwd(args, force_trace=False, assert_compiled=False): + params = list(model.parameters()) if is_module else [] + in_vars, _ = _flatten((args, params)) + # We use a special API to reset the trace and compile it from scratch. + compiled_fn = model + if force_trace: + compiled_fn.clear_cache() + if assert_compiled: + hits = compiled_fn.hits + out = model(*args) + if assert_compiled and compiled_fn.hits == hits: + raise RuntimeError("failed to use the compiled function") + if not isinstance(out, tuple): + out = (out, ) + if loss_fn == torch.sum and len(out) != 1: + raise ValueError(("Model returns {} outputs, but default loss function " + "(torch.sum) can only handle a single output").format(len(out))) + out_vars, _ = _flatten(out) + saved_outs = [v.data.clone() for v in out_vars] + loss = loss_fn(*out) + grads = torch.autograd.grad([loss], in_vars) + # TODO: I'm not sure if the clone here is necessary but it is safer + saved_grads = [v.data.clone() for v in grads] + return (saved_outs, saved_grads) + + with torch.random.fork_rng(devices, _caller="torch.jit.verify"): + uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True) + assert model.has_trace_for(*args) + + if is_module: + model.load_state_dict(saved_state) + compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True) + + _verify_equal(uncompiled_outs, compiled_outs) + _verify_equal(uncompiled_grads, compiled_grads) + + +def _verify_equal(xs, ys): + for x, y in zip(xs, ys): + if x.sub(y).abs().max() > 1e-6: + raise RuntimeError("JIT and real computation mismatch") + + +def indent(s): + return '\n'.join(['\t' + line for line in s.splitlines()]) + + +class TracingCheckError(Exception): + def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None): + self.message = 'Tracing failed sanity checks!\n' + if extra_msg is not None: + self.message += extra_msg + '\n' + if graph_diff_error is not None: + self.message += 'ERROR: Graphs differed across invocations!\n' + self.message += indent(graph_diff_error) + '\n' + if tensor_compare_error is not None: + self.message += 'ERROR: Tensor-valued Constant nodes differed in value ' \ + 'across invocations. This often indicates that the tracer has' \ + ' encountered untraceable code.\n' + self.message += indent(tensor_compare_error) + '\n' + super(TracingCheckError, self).__init__(self.message) + + +# Check the traced module against a set of user-provided validation inputs +@torch.no_grad() +def _check_trace(check_inputs, func, traced_func, check_tolerance, + force_outplace, is_trace_module, _module_class): + # Note: tracing is independent of optimizations, which consume the trace + for inputs in check_inputs: + + if isinstance(inputs, torch.Tensor): + inputs = (inputs,) + + if is_trace_module: + copied_dict = {} + for name, data in inputs.items(): + copied_dict[name] = _clone_inputs(data) + check_mod = torch.jit.trace_module( + func.__self__ if hasattr(func, '__self__') else func, + copied_dict, + check_trace=False, + _force_outplace=force_outplace, + _module_class=_module_class, + _compilation_unit=torch._C.CompilationUnit(), + ) + check_mod_func = check_mod._c._get_method(traced_func.name) + inputs = inputs[traced_func.name] + if isinstance(inputs, (torch.Tensor, dict)): + inputs = (inputs,) + else: + check_mod = torch.jit.trace( + func, + _clone_inputs(inputs), + check_trace=False, + _force_outplace=force_outplace, + _module_class=_module_class, + ) + check_mod_func = check_mod + + def graph_diagnostic_info(): + mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph) + torch._C._jit_pass_erase_shape_information(mod_canonicalized) + check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph) + torch._C._jit_pass_erase_shape_information(check_canonicalized) + + graph_diff_errors = None + if str(mod_canonicalized) != str(check_canonicalized): + import difflib + graph_diff = difflib.ndiff(str(mod_canonicalized).splitlines(True), + str(check_canonicalized).splitlines(True)) + graph_diff_errors = 'Graph diff:\n' + indent(''.join(graph_diff)) + '\n' + + for n_mod, n_check in zip(mod_canonicalized.nodes(), check_canonicalized.nodes()): + if str(n_mod) != str(n_check): + graph_diff_errors += 'First diverging operator:\n' + node_diff = difflib.ndiff(str(n_mod).splitlines(True), + str(n_check).splitlines(True)) + source_printout = 'Node diff:\n' + indent(''.join(node_diff)) + '\n' + mod_stack = n_mod.sourceRange() + if mod_stack: + source_printout += 'Trace source location:\n' + indent(mod_stack) + '\n' + check_stack = n_check.sourceRange() + if check_stack: + source_printout += 'Check source location:\n' + indent(check_stack) + '\n' + graph_diff_errors += source_printout + + break # For now, only print out the first pair of nodes that diverges + + tensor_compare_errors = None + # Check Tensor-valued constant nodes + for n_mod, n_check in zip(mod_canonicalized.nodes(), check_canonicalized.nodes()): + if n_mod.kind() != n_check.kind(): + break # Graphs have already diverged + + if n_mod.kind() == 'prim::Constant' and not (n_mod.mustBeNone() or n_check.mustBeNone()): + if n_mod.kindOf('value') != 't' or n_check.kindOf('value') != 't': + continue + + mod_tensor_val = n_mod.t('value') + check_tensor_val = n_check.t('value') + + try: + torch.testing.assert_allclose(mod_tensor_val, check_tensor_val) + except (RuntimeError, AssertionError) as e: + if tensor_compare_errors is None: + tensor_compare_errors = '' + tensor_compare_errors += 'Node:\n' + indent(str(n_mod)) + '\n' + compare_stack = n_mod.sourceRange() + if compare_stack: + tensor_compare_errors += 'Source Location:\n' + indent(compare_stack) + '\n' + tensor_compare_errors += 'Comparison exception: ' + indent(str(e)) + + break # For now, only print the first diverging pair + + return graph_diff_errors, tensor_compare_errors + + def wrap_retval(x): + return x if isinstance(x, tuple) else (x,) + + def run_mod_and_filter_tensor_outputs(mod, inputs, running_what): + try: + outs = wrap_retval(mod(*_clone_inputs(inputs))) + outs = [out for out in outs if isinstance(out, torch.Tensor)] + return outs + except Exception as e: + raise TracingCheckError(*graph_diagnostic_info(), + extra_msg='Encountered an exception while running the ' + running_what + + ' with test inputs.\nException:\n' + indent(str(e))) + + has_warned = [False] + + def maybe_warn_nondeterministic(): + if has_warned[0]: + return + has_warned[0] = True + nondeterm_ops = [op for op in traced_func.graph.nodes() if op.isNondeterministic()] + if len(nondeterm_ops) > 0: + nondeterministic_ops_warning = "Trace had nondeterministic nodes. " + nondeterministic_ops_warning += "Did you forget call .eval() on your model? Nodes:\n" + nondeterministic_ops_warning += "\n".join([indent(str(op)) for op in nondeterm_ops][:20]) + nondeterministic_ops_warning += "\nThis may cause errors in trace checking. To disable trace checking,"\ + " pass check_trace=False to torch.jit.trace()" + warnings.warn(nondeterministic_ops_warning, category=TracerWarning, stacklevel=5) + + def compare_outputs(original, reference, match_what): + all_ok = True + for i, (orig, ref) in enumerate(zip(original, reference)): + try: + torch.testing.assert_allclose(orig.double(), ref.double(), rtol=check_tolerance, + atol=torch.testing._get_default_tolerance(orig, ref)[1]) + except AssertionError as e: + maybe_warn_nondeterministic() + warnings.warn('Output nr ' + str(i + 1) + '. of the traced function does not match ' + 'the corresponding output of the ' + match_what + '. Detailed error:\n' + str(e), + category=TracerWarning, stacklevel=4) + all_ok = False + + return all_ok + + traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, 'trace') + fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, 'Python function') + if compare_outputs(traced_outs, fn_outs, 'Python function'): + check_outs = run_mod_and_filter_tensor_outputs(check_mod_func, inputs, 'repeated trace') + compare_outputs(traced_outs, check_outs, 'repeated trace') + + diag_info = graph_diagnostic_info() + if any(info is not None for info in diag_info): + raise TracingCheckError(*diag_info) + + +class TracerWarning(Warning): + @staticmethod + def ignore_lib_warnings(): + # We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace + warnings.filterwarnings('ignore', category=TracerWarning, module='torch.(?!jit)') + + +# We ignore the tracer warnings coming form inside the library, because all our shape +# checks in nn will trigger them. +TracerWarning.ignore_lib_warnings() +torch._C._tracer_warn_use_python() + + +def make_tuple(example_inputs): + if isinstance(example_inputs, (torch.Tensor, dict)): + return (example_inputs,) + # done primarily so that weird iterables fail here and not pybind11 code + if not isinstance(example_inputs, tuple): + return tuple(example_inputs) + return example_inputs + + +def make_module(mod, _module_class, _compilation_unit): + if _module_class is None: + _module_class = TopLevelTracedModule + return _module_class(mod, _compilation_unit=_compilation_unit) + +def wrap_check_inputs(check_inputs): + if check_inputs is None: + return None + + return [{'forward' : c} for c in check_inputs] + +
    [docs]def trace(func, + example_inputs, + optimize=None, + check_trace=True, + check_inputs=None, + check_tolerance=1e-5, + _force_outplace=False, + _module_class=None, + _compilation_unit=_python_cu): + """ + Trace a function and return an executable ``ScriptModule`` or ``torch.jit._C.Function`` + that will be optimized using just-in-time compilation. + + .. warning:: + + Tracing only correctly records functions and modules which are not data + dependent (e.g., do not have conditionals on data in tensors) and do not have + any untracked external dependencies (e.g., perform input/output or + access global variables). If you trace such models, you may silently get + incorrect results on subsequent invocations of the model. The tracer + will try to emit warnings when doing something that may cause an + incorrect trace to be produced. + + Arguments: + func (callable or torch.nn.Module): a Python function or ``torch.nn.Module`` + that will be run with ``example_inputs``. + arguments and returns to ``func`` must be tensors + or (possibly nested) tuples that + contain tensors. + example_inputs (tuple): a tuple of example inputs that will be passed to the function + while tracing. The resulting trace can be run with + inputs of different types and shapes assuming the traced operations + support those types and shapes. ``example_inputs`` may also be a single + Tensor in which case it is automatically wrapped in a tuple + + Keyword arguments: + check_trace (bool, optional): check if the same inputs run through + traced code produce the same outputs. Default: ``True``. You might want + to disable this if, for example, your network contains non- + deterministic ops or if you are sure that the network is correct despite + a checker failure. + + check_inputs (list of tuples, optional): A list of tuples of input arguments that should be used + to check the trace against what is expected. Each tuple + is equivalent to a set of input arguments that would + be specified in ``example_inputs``. For best results, pass in a + set of checking inputs representative of the space of + shapes and types of inputs you expect the network to see. + If not specified, the original ``example_inputs`` are used for checking + check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure. + This can be used to relax the checker strictness in the event that + results diverge numerically for a known reason, such as operator fusion. + + Returns: + if ``callable`` is ``nn.Module`` or ``forward()`` of ``nn.Module``, ``trace`` returns + a ``ScriptModule`` object with a single ``forward()`` method containing the traced code. + The returned ``ScriptModule`` will have the same set of sub-modules and parameters as the + original ``nn.Module``. + If ``callable`` is a standalone function, ``trace`` returns ``torch.jit._C.Function`` + + Example:: + + class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + n = Net() + # the following two calls are equivalent + module = torch.jit.trace_module(n, example_forward_input) + module = torch.jit.trace_module(n.forward, example_forward_input) + + """ + if not _enabled: + return func + if optimize is not None: + warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead") + + if isinstance(func, torch.jit.ScriptModule): + # it is hard to trace it because the forward method on ScriptModule is already defined, so it + # would result in an error. + warnings.warn('The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is.') + return func + + if isinstance(func, torch.nn.Module): + return trace_module(func, {'forward': example_inputs}, None, + check_trace, wrap_check_inputs(check_inputs), + check_tolerance, _force_outplace, _module_class) + + if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module) and + func.__name__ == 'forward'): + + return trace_module(func.__self__, {'forward': example_inputs}, None, + check_trace, wrap_check_inputs(check_inputs), + check_tolerance, _force_outplace, _module_class) + + # Special case for common case of passing a single Tensor + if isinstance(example_inputs, (torch.Tensor, dict)): + example_inputs = (example_inputs,) + # done primarily so that weird iterables fail here and not pybind11 code + elif not isinstance(example_inputs, tuple): + example_inputs = tuple(example_inputs) + + var_lookup_fn = _create_interpreter_name_lookup_fn(0) + + if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module)): + raise AttributeError("trace doesn't support compiling individual module's functions.\n" + "Please use trace_module") + + name = _qualified_name(func) + if name == '<lambda>': + name = '_lambda' # make name a valid identifier + traced = torch._C._create_function_from_trace(name, func, example_inputs, + var_lookup_fn, + _force_outplace) + + # Check the trace against new traces created from user-specified inputs + if check_trace: + if check_inputs is not None: + _check_trace(check_inputs, func, traced, check_tolerance, _force_outplace, False, _module_class) + else: + _check_trace([example_inputs], func, traced, check_tolerance, _force_outplace, False, _module_class) + + return traced
    + + +def trace_module(mod, + inputs, + optimize=None, + check_trace=True, + check_inputs=None, + check_tolerance=1e-5, + _force_outplace=False, + _module_class=None, + _compilation_unit=_python_cu): + """ + Trace a module and return an executable ``ScriptModule`` that will be optimized + using just-in-time compilation. + + .. warning:: + + Tracing only correctly records functions and modules which are not data + dependent (e.g., do not have conditionals on data in tensors) and do not have + any untracked external dependencies (e.g., perform input/output or + access global variables). If you trace such models, you may silently get + incorrect results on subsequent invocations of the model. The tracer + will try to emit warnings when doing something that may cause an + incorrect trace to be produced. + + Arguments: + mod (torch.nn.Module): a ``torch.nn.Module`` containing methods whose names are + specified in ``example_inputs``. The given methods will be compiled + as a part of a single `ScriptModule` + example_inputs (dict): a dict containing sample inputs indexed by method names in ``mod`` + The inputs will be passed to methods whose names correspond to inputs' + keys while tracing. + ``{ 'forward' : example_forward_input, 'method2': example_method2_input}`` + Keyword arguments: + check_trace (bool, optional): check if the same inputs run through + traced code produce the same outputs. Default: ``True``. You might want + to disable this if, for example, your network contains non- + deterministic ops or if you are sure that the network is correct despite + a checker failure. + + check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used + to check the trace against what is expected. Each tuple + is equivalent to a set of input arguments that would + be specified in ``example_inputs``. For best results, pass in a + set of checking inputs representative of the space of + shapes and types of inputs you expect the network to see. + If not specified, the original ``example_inputs`` are used for checking + check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure. + This can be used to relax the checker strictness in the event that + results diverge numerically for a known reason, such as operator fusion. + + Returns: + A ``ScriptModule`` object with a single ``forward()`` method containing the traced code. + When ``func`` is a ``torch.nn.Module``, the returned ``ScriptModule`` will have the same set of + sub-modules and parameters as ``func``. + + Example:: + + class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight} + n = Net() + module = torch.jit.trace_module(n, inputs) + + """ + if not _enabled: + return mod + if optimize is not None: + warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead") + + var_lookup_fn = _create_interpreter_name_lookup_fn(0) + + if not isinstance(mod, torch.nn.Module): + raise AttributeError("expected torch.nn.Module as the first argument") + + if not isinstance(inputs, dict): + raise AttributeError("expected a dictionary of (method_name, input) pairs") + + module = make_module(mod, _module_class, _compilation_unit) + + for method_name, example_inputs in inputs.items(): + # this is needed since Module.__call__ sets up some extra tracing + func = mod if method_name == "forward" else getattr(mod, method_name) + example_inputs = make_tuple(example_inputs) + module._c._create_method_from_trace(method_name, func, example_inputs, var_lookup_fn, _force_outplace) + check_trace_method = module._c._get_method(method_name) + + # Check the trace against new traces created from user-specified inputs + if check_trace: + if check_inputs is not None: + _check_trace(check_inputs, func, check_trace_method, + check_tolerance, _force_outplace, True, _module_class) + else: + _check_trace([inputs], func, check_trace_method, + check_tolerance, _force_outplace, True, _module_class) + + return module + + +class CompilationUnit(object): + def __init__(self, lang=None, _frames_up=0): + self._c = torch._C.CompilationUnit() + if lang is not None: + self.define(lang, _frames_up=_frames_up + 1) + + def define(self, lang, rcb=None, _frames_up=0): + if not rcb: + rcb = _jit_internal.createResolutionCallback(_frames_up + 1) + self._c.define(lang, rcb) + + def __getattr__(self, attr): + r = self._c.find_function(attr) + if r is None: + raise AttributeError("'CompilationUnit' has no attribute '{}'".format(attr)) + return r + + def _import(self, src, constants, op_version_set=1): + """ test import logic for single function, use only for testing """ + src = "op_version_set = {}\n{}".format(op_version_set, src) + torch._C._jit_import_functions(self._c, src, constants) + return self + + +def _try_get_dispatched_fn(fn): + if not callable(fn): + return None + return _jit_internal.boolean_dispatched.get(fn) + + +def _try_get_overloaded_fn(mod, field): + return mod._overloads.get(field, None) if isinstance(mod, ScriptModule) else None + + +class ScriptWarning(Warning): + pass + + +def _create_constant_iterable_module(module): + modules = OrderedDict() + + for key, submodule in module._modules.items(): + if isinstance(submodule, (ModuleList, Sequential)): + # Make each item in the module a constant + modules[key] = _create_constant_iterable_module(submodule) + else: + modules[key] = _convert_to_script_module(submodule) + + if isinstance(module, Sequential): + return _ConstSequential(Sequential(modules)) + elif isinstance(module, ModuleList): + return _ConstModuleList(modules) + else: + raise RuntimeError("Only nn.ModuleList and nn.Sequential can be made " + "into constant modules, found {}".format(module)) + + +def _make_strong_submodule(field, module, parent): + if field not in parent._modules: + # It's not a submodule, don't do anything + return None + + # Convert the module to a ScriptModule + new_strong_submodule = _convert_to_script_module(module) + + # Install the ScriptModule on the python side + parent._modules._python_modules[field] = new_strong_submodule + + return new_strong_submodule + + +def _try_compile_fn(fn, loc): + if _jit_internal.is_ignored_fn(fn): + # Don't do anything for @ignore'd functions + return None + + if isinstance(fn, torch.nn.Module): + # Since modules are callable pybind recognizes them as functions, but + # don't do anything for them + return None + + if not inspect.isfunction(fn) and not inspect.ismethod(fn): + raise RuntimeError("`{}` is not a function. Recursive scripting only supports " + "Python functions or methods currently.\n" + "Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn)) + + # We don't have the actual scope where the function was defined, but we can + # extract the necessary info from the closed over variables on the function + # object + rcb = _jit_internal.createResolutionCallbackFromClosure(fn) + return torch.jit.script(fn, _rcb=rcb) + + +@contextlib.contextmanager +def _disable_emit_hooks(): + hooks = torch._C._jit_get_emit_hooks() + torch._C._jit_set_emit_hooks(None, None) + yield + torch._C._jit_set_emit_hooks(hooks[0], hooks[1]) + + +def _create_method_from_fn(module, fn): + if _jit_internal.is_ignored_fn(fn): + return None + if not inspect.ismethod(fn): + return None + stub = script_method(fn, _jit_internal.createResolutionCallbackFromClosure(fn)) + with _disable_emit_hooks(): + # We don't want to call the hooks here since the graph that is calling + # this function is not yet complete + _create_methods_from_stubs(module, (stub,)) + return stub + + +# ScriptClasses must be new-style classes because we construct them using their +# __new__ method. +def _is_new_style_class(cls): + if hasattr(cls, '__class__'): + return ('__dict__' in dir(cls) or hasattr(cls, '__slots__')) + + +def whichmodule(obj): + """Find the module an object belong to.""" + module_name = getattr(obj, '__module__', None) + # Protect the iteration by using a list copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for name, module in list(sys.modules.items()): + if name == '__main__' or module is None: + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except AttributeError: + pass + return '__main__' + + +def _compile_and_register_class(obj, rcb, qualified_name): + ast = get_jit_class_def(obj, obj.__name__) + _jit_script_class_compile(qualified_name, ast, rcb) + _add_script_class(obj, qualified_name) + + +
    [docs]def script(obj, optimize=None, _frames_up=0, _rcb=None): + r""" + Scripting a function or ``nn.Module`` will inspect the source code, compile + it as TorchScript code using the TorchScript compiler, and return a ``ScriptModule`` or + ``torch._C.Function``. + + **Scripting a function** + The ``@torch.jit.script`` decorator will construct a ``torch._C.Function``. + + Example (scripting a function):: + + import torch + @torch.jit.script + def foo(x, y): + if x.max() > y.max(): + r = x + else: + r = y + return r + + **Scripting an nn.Module** + Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively + compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses + features supported in TorchScript, no changes to the original module code should be necessary. + + Example (scripting a simple module with a Parameter):: + + import torch + + class MyModule(torch.nn.Module): + def __init__(self, N, M): + super(MyModule, self).__init__() + # This parameter will be copied to the new ScriptModule + self.weight = torch.nn.Parameter(torch.rand(N, M)) + + # When this submodule is used, it will be compiled + self.linear = torch.nn.Linear(N, M) + + def forward(self, input): + output = self.weight.mv(input) + + # This calls the `forward` method of the `nn.Linear` module, which will + # cause the `self.linear` submodule to be compiled to a `ScriptModule` here + output = self.linear(output) + return output + + scripted_module = torch.jit.script(MyModule()) + + Example (scripting a module with traced submodules):: + + import torch + import torch.nn as nn + import torch.nn.functional as F + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + # torch.jit.trace produces a ScriptModule's conv1 and conv2 + self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16)) + self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16)) + + def forward(self, input): + input = F.relu(self.conv1(input)) + input = F.relu(self.conv2(input)) + return input + + scripted_module = torch.jit.script(MyModule()) + + To compile a method other than ``forward`` (and recursively compile anything it calls), add + the ``@torch.jit.export`` decorator to the method. + """ + if not _enabled: + return obj + if optimize is not None: + warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead") + + if isinstance(obj, torch.nn.Module): + return _convert_to_script_module(obj) + + qualified_name = _qualified_name(obj) + if inspect.isclass(obj): + # If this type is a `nn.Module` subclass, they probably meant to pass + # an instance instead of a Module + if issubclass(obj, torch.nn.Module): + raise RuntimeError("Type '{}' cannot be compiled since it inherits" + " from nn.Module," + " pass an instance instead".format(obj)) + + if not _is_new_style_class(obj): + raise RuntimeError("TorchScript classes must be new-style classes. " + "Please inherit from 'object'") + if _rcb is None: + _rcb = _jit_internal.createResolutionCallback(_frames_up + 1) + _compile_and_register_class(obj, _rcb, qualified_name) + return obj + else: + ast = get_jit_def(obj) + if _rcb is None: + closure_rcb = _jit_internal.createResolutionCallbackFromClosure(obj) + stack_rcb = _jit_internal.createResolutionCallback(_frames_up + 1) + + def _rcb(name): + # since type comments aren't captured in the function's closures, + # we still need to try to the rcb based on stack frames if the + # closure rcb fails + result = closure_rcb(name) + if result: + return result + return stack_rcb(name) + fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(obj)) + # Forward docstrings + fn.__doc__ = obj.__doc__ + return fn
    + + +ScriptMethodStub = namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method')) + + +def script_method(fn, _rcb=None): + if not _enabled: + return fn + # NOTE: we need to traverse two frames here because the meta-class frame + # for ScriptModule will be present, as opposed to invoking @script on a + # a function or invoking define() on a CompilationUnit. + # The stack will look like: + # + # 0. createResolutionCallback() + # 1. script_method() + # 2. ScriptModule metaclass frame + # 3. Surrounding scope + # + # createResolutionCallback internally adds 1 to get us to the scope of this + # function (the calling function). Adding 2 gets us to the proper surrounding scope. + if _rcb is None: + _rcb = _jit_internal.createResolutionCallback(frames_up=2) + ast = get_jit_def(fn, self_name="ScriptModule") + return ScriptMethodStub(_rcb, ast, fn) + + + +# These OrderedDictWrapper classes replace the actual OrderedDicts in +# module with versions that get/set properties inside of script::Module. +# This allows us to reuse most of nn.Module while still storing the +# data in C++. +# Each OrderedDict needs to support: +# x not in view +# x in view +# view[name] = ... +# view.values() +# del view[name] +# view.items() +# view.keys() +# len(view) + +class OrderedDictWrapper(object): + def __init__(self, module): + self.module = module + + def keys(self): + return [k for k, v in self.items()] + + def values(self): + return [v for k, v in self.items()] + + def __len__(self): + return len(self.values()) + + def __delitem__(self, k): + raise RuntimeError("cannot delete methods or parameters of a script module") + + def items(self): + raise NotImplementedError + + def __contains__(self, k): + raise NotImplementedError + + def __getitem__(self, k): + raise NotImplementedError + + def __setitem__(self, k, v): + raise NotImplementedError + + +class OrderedModuleDict(OrderedDictWrapper): + def __init__(self, module): + super(OrderedModuleDict, self).__init__(module) + # contains _both_ script modules and non-script python-only modules + + # because script modules are subclassed in python and the + # C++ script::Module class will not hold references to them, + # to ensure that you always get the same python value here + # we store it in the python dict as well + self._python_modules = OrderedDict() + + def items(self): + r = self._python_modules.items() + return r + + def __contains__(self, k): + return k in self._python_modules + + def __setitem__(self, k, v): + if k in self._python_modules: + raise RuntimeError("Cannot re-assign modules in a ScriptModule, " + "tried to replace existing module '{}': {}".format(k, v)) + if isinstance(v, ScriptModule): + self.module._register_module(k, v._c) + + self._python_modules[k] = v + + def __getitem__(self, k): + return self._python_modules[k] + + +class OrderedParameterDict(OrderedDictWrapper): + def __init__(self, module): + super(OrderedParameterDict, self).__init__(module) + + def items(self): + return [(name, param) for name, param in self.module._get_parameters()] + + def __setitem__(self, k, v): + self.module._register_parameter(k, v, False) + + def __contains__(self, k): + return self.module._has_parameter(k) + + def __getitem__(self, k): + if k not in self: + raise KeyError(k) + return self.module._get_parameter(k) + + +class OrderedBufferDict(OrderedDictWrapper): + def __init__(self, module): + super(OrderedBufferDict, self).__init__(module) + + def items(self): + return [(name, param) for name, _, param in + self.module._get_attributes() if isinstance(param, torch.Tensor)] + + def __setitem__(self, k, v): + self.module._register_buffer(k, v) + + def __contains__(self, k): + return self.module._has_buffer(k) + + def __getitem__(self, k): + if k not in self: + raise KeyError(k) + return self.module._get_buffer(k) + +# base types that can be constants +# in addition, tuples and lists of these base types are also considered constants +# If you edit this list, then you also need to edit the handlers in +# ConstantValue in jit/script/init.cpp +_constant_types = (bool, float, int, str, type(None), types.FunctionType, torch.device, torch.layout, torch.dtype) + + +def _get_valid_constant(attr, v): + if isinstance(v, _constant_types): + return v + elif isinstance(v, tuple) or isinstance(v, list): + return tuple(_get_valid_constant(attr, x) for x in v) + constants = ", ".join(typ.__name__ for typ in _constant_types) + raise TypeError(textwrap.dedent(""" + '{}' object for attribute '{}' is not a valid constant. + Valid constants are: + 1. a nn.ModuleList + 2. a value of type {{{}}} + 3. a list or tuple of (2) + """.format(type(v).__name__, attr, constants))) + + +def _create_methods_from_stubs(self, stubs): + defs = [m.def_ for m in stubs] + rcbs = [m.resolution_callback for m in stubs] + defaults = [get_default_args(m.original_method) for m in stubs] + self._c._create_methods(self, defs, rcbs, defaults) + +# For each user-defined class that subclasses ScriptModule this meta-class, +# (1) finds all the methods annotated with @script_method +# in a ScriptModule and removes them from the class attributes, and +# (2) puts a wrapper around the class's __init__ method to register +# all of the script_methods with the module after the original __init__ +# has run. This has to occur after the user-defined __init__ so that +# submodules and parameters are initialized _before_ the script compiler +# resolve references to `self.param` or `self.module`. + + +class ScriptMeta(type): + # this has to inherit from pybind11's metaclass otherwise we get + # issues because ScriptModule inherits from torch._C.ScriptModule, + # a pybind11 type + def __init__(cls, name, bases, attrs): + # initialize inherited properties + cls._methods = {} + cls._constants_set = set(getattr(cls, '__constants__', ())) + for base in reversed(bases): + for k, v in getattr(base, '_methods', {}).items(): + cls._methods[k] = v + base_constants = getattr(base, '_constants_set', set()) + cls._constants_set = cls._constants_set.union(base_constants) + + # find all the script methods of the current class + for k, v in sorted(attrs.items()): + if isinstance(v, ScriptMethodStub): + delattr(cls, k) + cls._methods[v.original_method.__name__] = v + + original_init = getattr(cls, '__init__', lambda self: None) + cls._overloads = dict(getattr(cls, '__overloads__', {})) + + # after the user's __init__ register all the script methods + # with the module + @functools.wraps(original_init) + def init_then_register(self, *args, **kwargs): + original_init(self, *args, **kwargs) + if type(self) == cls: + # this is the init of the concrete type of self, + # we have already resolved all _methods + methods = [v for k, v in sorted(cls._methods.items())] + _create_methods_from_stubs(self, methods) + + cls.__init__ = init_then_register + return super(ScriptMeta, cls).__init__(name, bases, attrs) + + +if _enabled: + + # this is a Python 'non-data descriptor' that causes the first access + # to ScriptModule's forward to lookup the forward method and stash + # it in the objects dict. Due to the standard rules for attribute lookup + # subsequent lookups will just directly return the previously looked up method. + # This is necessary because nn.Module defines forward as a method. If we + # did nothing __getattr__ would not be called. Instead we'd get nn.Module.forward + # which always throws an exception. + class _CachedForward(object): + def __get__(self, obj, cls): + return self.__getattr__('forward') + + class ScriptModule(with_metaclass(ScriptMeta, Module)): + r""" + The core data structure in TorchScript is the ``ScriptModule``. It is an + analogue of torch's ``nn.Module`` and represents an entire model as a tree of + submodules. Like normal modules, each individual module in a ``ScriptModule`` can + have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented + as Python functions, but in ``ScriptModule``\s methods are implemented as + TorchScript functions, a statically-typed subset of Python that contains all + of PyTorch's built-in Tensor operations. This difference allows your + ScriptModules code to run without the need for a Python interpreter. + + ``ScriptModule``\s be created in two ways: + + **Tracing:** + + Using ``torch.jit.trace`` and ``torch.jit.trace_module``, you can turn an existing module or Python + function into a TorchScript ``torch._C.Function`` or ``ScriptModule``. You must provide example inputs, + and we run the function, recording the operations performed on all the tensors. + * The resulting recording of a standalone function produces ``torch._C.Function``. + * The resulting recording of ``forward`` function of ``nn.Module`` or ``nn.Module`` produces ``ScriptModule``. + This module also contains any parameters that the original + module had as well. + + Example (tracing a function):: + + import torch + def foo(x, y): + return 2 * x + y + traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3))) + + .. note:: + Tracing a standalone function will construct a ``torch._C.Function`` + Tracing ``nn.Module``s ``forward`` will construct a ``ScriptModule`` + + Example (tracing an existing module):: + + import torch + class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + + n = Net() + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + + # all three trace calls below are equivalent + # and construct `ScriptModule` with a single `forward` method + module = torch.jit.trace(n.forward, example_forward_input) # produces ScriptModule with `forward` + module = torch.jit.trace(n, example_forward_input) # produces ScriptModule with `forward` + module = torch.jit.trace_module(n, inputs) # produces ScriptModule with `forward` + + inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight} + # trace_module produces `ScriptModule` with two methods: + # `forward` and `weighted_kernel_sum` + module = torch.jit.trace_module(n, inputs, True, True) + + .. note:: + + * The first three trace/trace_module calls are equivalent and return ``ScriptModule`` + with a single ``forward`` method. + * The last ``trace_module`` call produces a ``ScriptModule`` with two methods. + Tracing only records operations done when the given function is run on the given + tensors. Therefore, the returned ``ScriptModule`` will always run the same traced + graph on any input. This has some important implications when your module is + expected to run different sets of operations, depending on the input and/or the + module state. For example, + + + Tracing will not record any control-flow like if-statements or loops. When + this control-flow is constant across your module, this is fine and it often + inlines the control-flow decisions. But sometimes the control-flow is + actually part of the model itself. For instance, a recurrent network is + a loop over the (possibly dynamic) length of an input sequence. + + + In the returned ``ScriptModule``, operations that have different behaviors + in ``training`` and ``eval`` modes will always behave as if it is in the + mode it was in during tracing, no matter which mode the ``ScriptModule`` + is in. + + In cases like these, tracing would not be appropriate and scripting is a better + choice. + + **Scripting:** + + You can write TorchScript code directly using Python syntax. You do this + using the ``@torch.jit.script`` decorator for functions and modules. You can + also call ``torch.jit.script`` directly with the function or module you wish to + compile. On functions, the body of the function is compiled to TorchScript. If + applied to an ``nn.Module``, by default the ``forward`` method and any methods it + calls are compiled, and all buffer and Parameters of the original module are copied + to a new ``ScriptModule``. You should not need to construct a ``ScriptModule`` manually. + TorchScript itself is a subset of the Python language, so not all + features in Python work, but we provide enough functionality to compute on + tensors and do control-dependent operations. + """ + def __init__(self, optimize=None, _qualified_name=None, _compilation_unit=None, _cpp_module=None): + if _qualified_name is None: + _qualified_name = type(self).__name__ + if _compilation_unit is None: + _compilation_unit = _python_cu + if optimize is not None: + warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead") + + # If we were give a _cpp_module, use that one as the backing cpp + # module instead of creating a fresh one. + if _cpp_module is not None: + self.__dict__['_c'] = _cpp_module + else: + self.__dict__['_c'] = torch._C.ScriptModule(_qualified_name, _compilation_unit, True) + + Module.__init__(self) + self._parameters = OrderedParameterDict(self._c) + self._buffers = OrderedBufferDict(self._c) + self._modules = OrderedModuleDict(self._c) + + # If we were given a _cpp_module, recursively create Python + # ScriptModules that mirror the submodule hierarchy. + # This has to go last due to quirks in module initialization. + if _cpp_module is not None: + for (name, cpp_mod) in self._c._get_modules(): + setattr(self, name, ScriptModule(_cpp_module=cpp_mod)) + + @property + def graph(self): + return self.forward.graph + + @property + def code(self): + return self.forward.code + + def save(self, *args, **kwargs): + return self._c.save(*args, **kwargs) + + def save_to_buffer(self, *args, **kwargs): + return self._c.save_to_buffer(*args, **kwargs) + + def get_debug_state(self, *args, **kwargs): + return self._c.get_debug_state() + + forward = _CachedForward() + + def __getattr__(self, attr): + if '_c' not in self.__dict__: + raise RuntimeError("ScriptModule has not been initialized, did you forget to call super's init?") + if self._c._has_attribute(attr): + return self._c._get_attribute(attr) + if self._c._has_method(attr): + if attr in self.__class__._methods: + original_method = self.__class__._methods[attr].original_method + script_method = self._c._get_method(attr) + script_method = functools.wraps(original_method)(script_method) + else: + script_method = self._c._get_method(attr) + # cache method so future calls do not go through __getattr__ + # to improve invocation performance + self.__dict__[attr] = script_method + return script_method + return Module.__getattr__(self, attr) + + def __setattr__(self, attr, value): + if attr not in self._constants_set: + if attr == 'training': + if self._c._has_attribute('training'): + self.__dict__['training'] = value + self._c._set_attribute('training', value) + return + if isinstance(value, Attribute): + the_type = torch.jit.annotations.ann_to_type(value.type) + try: + self._c._register_attribute(attr, the_type, value.value) + except RuntimeError: + raise RuntimeError("Could not register attribute '{}' of type '{}' for a value of type '{}'" + .format(attr, value.type, type(value.value))) + return + return super(ScriptModule, self).__setattr__(attr, value) + + if hasattr(self, attr): + raise RuntimeError("attempting to re-assign constant '{}' in {}".format(attr, type(self).__name__)) + + def conv_module_to_const(module_value): + if not isinstance(module_value, (ModuleList, Sequential)): + return module_value + for i in range(len(module_value)): + module_value[i] = conv_module_to_const(module_value[i]) + if isinstance(module_value, Sequential): + return _ConstSequential(module_value) + else: + return _ConstModuleList(module_value) + + if isinstance(value, (ModuleList, Sequential)): + # special case for list of modules. Modules need to be registered with their + # parent module. To do this, we create a ConstModuleList, which is itself a module, that + # contains each of these modules as submodules. The ConstModuleList then + # is set as an attribute of the parent module. + super(ScriptModule, self).__setattr__(attr, conv_module_to_const(value)) + else: + super(ScriptModule, self).__setattr__(attr, _get_valid_constant(attr, value)) + + def __dir__(self): + return sorted(Module.__dir__(self) + self._c._method_names()) + + def define(self, lang): + # We use frames_up=1 to get to the proper surrounding scope. The stack + # will look like: + # 0. createResolutionCallback + # 1. define() + # 2. surrounding scope. + # + # createResolutionCallback internally adds 1 to get us to our frame, then + # we add 1 to get to the proper surrounding scope. + rcb = _jit_internal.createResolutionCallback(frames_up=1) + self._c._define(self, lang, rcb) + + def copy(self): + m = ScriptModule() + + def module_lookup(names): + curr = m + for name in names: + if not hasattr(curr, name): + setattr(curr, name, ScriptModule()) + curr = getattr(curr, name) + return curr._c + self._c._copy_into(module_lookup, {}, []) + return m + + def __getstate__(self): + raise pickle.PickleError( + "ScriptModules cannot be deepcopied using copy.deepcopy or saved using torch.save. " + + "Mixed serialization of script and non-script modules is not supported. " + + "For purely script modules use my_script_module.save(<filename>) instead.") + + def graph_for(self, *args, **kwargs): + return self.forward.graph_for(*args, **kwargs) + + class WeakScriptModuleProxy(ScriptModule): + # TODO: [weak script refactor] + # WeakScriptModule proxy should be deleted since its functionality is + # subsumed by recursive scripting, and the copying code in init moved + # to a function to create a ScriptModule from an nn.Module without + # making a WeakScriptModuleProxy + """ + Copies the parameters, buffers, constants, attributes, and submodules + of an nn.Module into itself. + """ + def __init__(self, original, stubs): + # Guards behavior of __setattr__ and __getattr__ so ScriptModule + # __init__ can run correctly + self.__dict__['_initialized'] = False + super(WeakScriptModuleProxy, self).__init__(_qualified_name=_qualified_name(type(original))) + # Store a weak reference to the original module + self.__dict__["_original"] = weakref.ref(original) + + constants_set = set(getattr(original, "__constants__", [])) + self.__dict__["_constants_set"] = {} + + if not hasattr(original, '_parameters'): + raise RuntimeError("'{}' has not been initialized, did you forget to call 'super()'?" + .format(type(original).__name__)) + + # Copy Parameters and Modules + for name in dir(original): + item = getattr(original, name) + if item is None and name in original._parameters: + # XXX: treat None value simply as module attributes instead of adding them to the parameter list + # TODO: need to handle this more generally when non-tensor attributes added to module + object.__setattr__(self, name, item) + elif item is self: + continue + elif isinstance(item, (Parameter, Module, Attribute)): + ScriptModule.__setattr__(self, name, item) + + # Copy buffers + for name in original._buffers: + if original._buffers[name] is None: + object.__setattr__(self, name, None) + else: + self.register_buffer(name, original._buffers[name]) + + # Constants annotated via `Final[T]` rather than being added to `__constants__` + for name, ann in getattr(original, '__annotations__', {}).items(): + if torch._jit_internal.is_final(ann): + constants_set.add(name) + + # Copy constants + self.__dict__["_constants_set"] = constants_set + for name in self.__dict__["_constants_set"]: + if hasattr(original, name): + if (name in original._parameters or name in original._buffers) and item is not None: + # for 'None' parameters/buffers, don't actually add their values if it exists + continue + ScriptModule.__setattr__(self, name, getattr(original, name)) + + # Copy annotations, pull types from `__annotations__` or try to infer + # the type if possible + class_annotations = getattr(original, '__annotations__', {}) + for name in dir(original): + if name in ("training", "__dict__"): + # TODO: removing this skip should let us remove the code to add training as an + # attribute in python_sugared_value.cpp + continue + if hasattr(self, name): + # Don't re-copy properties + continue + item = getattr(original, name) + if name in class_annotations: + the_type = torch.jit.annotations.ann_to_type(class_annotations[name]) + else: + the_type = torch._C._jit_try_infer_type(item) + if the_type is not None: + self._c._register_attribute(name, the_type, item) + + # Copy overloads + self.__dict__["_overloads"] = dict(getattr(original, "__overloads__", {})) + + self.__dict__["_initialized"] = True + self.__dict__["_original_type"] = type(original) + _create_methods_from_stubs(self, stubs) + + def __getattr__(self, attr): + # Try to get the attribute directly, if that fails, fall back to the + # weak module itself + try: + return ScriptModule.__getattr__(self, attr) + except AttributeError as e: + # unwrap the original + original_module = self.__dict__["_original"]() + if original_module and self.__dict__["_initialized"]: + # get attr from original if it is still alive + return getattr(original_module, attr) + elif self.__dict__["_initialized"]: + # original module is dead, try looking up the value on the + # original type + fn = getattr(self.__dict__["_original_type"], attr, None) + if fn is not None and inspect.isroutine(fn): + # bind the function to this instance and return it + return fn.__get__(self, self.__dict__["_original_type"]) + # If it's not on this module and it wasn't on the original + # module (or the original is dead), throw the exception + raise e + + def __setattr__(self, attr, value): + # Once constructed, no new properties can be set + + if not self.__dict__["_initialized"]: + # If constructing, don't fall back to original module + return ScriptModule.__setattr__(self, attr, value) + + if hasattr(self, attr): + return ScriptModule.__setattr__(self, attr, value) + else: + raise AttributeError("Cannot set new attribute '{}' on " + "weak script module once it has been " + "created".format(attr)) + +else: +
    [docs] class ScriptModule(torch.nn.Module): + def __init__(self): + super(ScriptModule, self).__init__()
    + + +def _convert_to_script_module(mod): + """ + Makes a ScriptModule from an nn.Module. If `_methods` is provided, + these methods are treated as @script_methods. If not, it defaults to + `('forward',)`. Methods accessed in forward are scripted on demand. + """ + if isinstance(mod, ScriptModule): + return mod + + if isinstance(mod, (ModuleList, Sequential)): + # Create constant versions for the iterable modules + return _create_constant_iterable_module(mod) + + methods = () + if hasattr(mod, 'forward'): + if mod.forward.__func__ == torch.nn.Module.forward: + raise RuntimeError("No forward method was defined on {}".format(mod)) + if not _jit_internal.is_ignored_fn(mod.forward): + methods = ('forward',) + exported = [] + for name in dir(mod): + item = getattr(mod, name) + if callable(item): + if _jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.EXPORT: + exported.append(name) + methods = methods + tuple(exported) + + def make_stub(method): + func = get_function_from_type(type(mod), method) + return script_method(func, _jit_internal.createResolutionCallbackFromClosure(func)) + + stubs = list(map(make_stub, methods)) + return WeakScriptModuleProxy(mod, stubs) + + +def _get_methods(cls): + import inspect + # In Python 3 unbound methods are functions, but in Python 2 they are methods + return inspect.getmembers(cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)) + + +_compiled_methods_whitelist = { + 'forward', 'register_buffer', 'register_parameter', 'add_module', + '_apply', 'apply', 'cuda', 'cpu', 'to', 'type', 'float', 'double', 'half', + 'state_dict', '_save_to_state_dict', 'load_state_dict', + '_load_from_state_dict', '_named_members', 'parameters', 'named_parameters', + 'buffers', 'named_buffers', 'children', 'named_children', 'modules', + 'named_modules', 'zero_grad', 'share_memory', '_get_name', 'extra_repr', + '_slow_forward', '_tracing_name', 'eval', 'train', +} + + +def _make_fail(name): + def fail(self, *args, **kwargs): + raise RuntimeError(name + " is not supported on ScriptModules") + return fail + + +for name, method in _get_methods(torch.nn.Module): + if name.startswith('__'): + continue + if name not in ScriptModule.__dict__ and name not in _compiled_methods_whitelist: + setattr(ScriptModule, method.__name__, _make_fail(name)) + + +class TracedModule(ScriptModule): + __frozen = False + + def __init__(self, orig, id_set=None, _compilation_unit=None): + # XXX: orig can be a nn.Module or a function! + super(TracedModule, self).__init__(_qualified_name=_jit_internal._qualified_name(orig.__class__), + _compilation_unit=_compilation_unit) + if id_set is None: + id_set = set() + + assert(isinstance(orig, torch.nn.Module)) + self._name = 'TracedModule[' + type(orig).__name__ + ']' + + def check_unique(param): + if param in id_set: + raise ValueError("TracedModules don't support parameter sharing between modules") + id_set.add(param) + + self.training = orig.training + + for name, param in orig._parameters.items(): + if param is not None: + self._parameters[name] = param + check_unique(param) + for name, buf in orig._buffers.items(): + if buf is not None: + self._buffers[name] = buf + check_unique(buf) + + if orig._backward_hooks or orig._forward_hooks or orig._forward_pre_hooks: + raise ValueError("Modules that have hooks assigned can't be compiled") + + for name, submodule in orig._modules.items(): + if isinstance(submodule, ScriptModule): + self._modules[name] = submodule + else: + self._modules[name] = TracedModule(submodule, id_set) + + self._freeze() + + def forward(self, *args, **kwargs): + raise RuntimeError('Trace submodules cannot be called.') + + def _freeze(self): + self.__frozen = True + + def _get_name(self): + return self._name + + def __setattr__(self, attr, value): + if not self.__frozen or hasattr(self, attr): + return super(TracedModule, self).__setattr__(attr, value) + raise RuntimeError("Cannot set new properties on a traced module.") + + +if _enabled: + class TopLevelTracedModule(TracedModule): + forward = _CachedForward() + + +class _ConstModuleList(ScriptModule): + def __init__(self, modules): + super(_ConstModuleList, self).__init__() + + if isinstance(modules, OrderedDict): + for key, module in modules.items(): + if isinstance(module, torch.nn.Module): + module = _convert_to_script_module(module) + self.add_module(key, module) + else: + for i, module in enumerate(modules): + if isinstance(module, torch.nn.Module): + module = _convert_to_script_module(module) + self.add_module(str(i), module) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return _ConstModuleList(list(self._modules.values())[idx]) + else: + if not (-len(self) <= idx < len(self)): + raise IndexError('index {} is out of range'.format(idx)) + if idx < 0: + idx += len(self) + return self._modules[str(idx)] + + def __len__(self): + return len(self._modules) + + def __iter__(self): + return iter(self._modules.values()) + + def __dir__(self): + keys = super(_ConstModuleList, self).__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + + +class _ConstSequential(_ConstModuleList): + __constants__ = ['mods'] + + def __init__(self, mods): + super(_ConstSequential, self).__init__(mods._modules) + + # we define the forward method via self.define rather than + # making it a direct class member (with a @script) annotation + # because, in optimized runtime environments where only .pyc files + # are shipped, we cant retrieve the source code. + # TODO: find a workaround for this and remove this hack + self.define(""" + def forward(self, input): + for m in self: + input = m(input) + return input + """) + + +_builtin_table = None + +_modules_containing_builtins = (torch, torch._C._nn) + + +def _unwrap_optional(x): + assert x is not None, "Unwrapping null optional" + return x + + +# lazily built to ensure the correct initialization order +def _get_builtin_table(): + global _builtin_table + if _builtin_table is not None: + return _builtin_table + _builtin_table = {} + + def register_all(mod): + for name in dir(mod): + v = getattr(mod, name) + if callable(v): + _builtin_table[id(v)] = "aten::" + name + for mod in _modules_containing_builtins: + register_all(mod) + + builtin_ops = [ + # Pairs of (function, op_name) + (_list_with_default, "aten::list_with_default"), + (_pair, "aten::_pair"), + (_quadruple, "aten::_quadruple"), + (_single, "aten::_single"), + (_triple, "aten::_triple"), + (_unwrap_optional, "aten::_unwrap_optional"), + (_wait, 'aten::wait'), + (cudnn.is_acceptable, "aten::cudnn_is_acceptable"), + (math.ceil, "aten::ceil"), + (math.copysign, "aten::copysign"), + (math.erf, "aten::erf"), + (math.erfc, "aten::erfc"), + (math.exp, "aten::exp"), + (math.expm1, "aten::expm1"), + (math.fabs, "aten::fabs"), + (math.floor, "aten::floor"), + (math.gamma, "aten::gamma"), + (math.lgamma, "aten::lgamma"), + (math.log, "aten::log"), + (math.log10, "aten::log10"), + (math.log1p, "aten::log1p"), + (math.pow, "aten::pow"), + (math.sqrt, "aten::sqrt"), + (math.isnan, "aten::isnan"), + (math.asinh, "aten::asinh"), + (math.atanh, "aten::atanh"), + (math.cosh, "aten::cosh"), + (math.sinh, "aten::sinh"), + (math.tanh, "aten::tanh"), + (math.acos, "aten::acos"), + (math.asin, "aten::asin"), + (math.atan, "aten::atan"), + (math.atan2, "aten::atan2"), + (math.cos, "aten::cos"), + (math.sin, "aten::sin"), + (math.tan, "aten::tan"), + (math.asinh, "aten::asinh"), + (math.atanh, "aten::atanh"), + (math.acosh, "aten::acosh"), + (math.sinh, "aten::sinh"), + (math.cosh, "aten::cosh"), + (math.tanh, "aten::tanh"), + (math.fmod, "aten::fmod"), + (math.modf, "aten::modf"), + (math.factorial, "aten::factorial"), + (math.frexp, "aten::frexp"), + (math.isnan, "aten::isnan"), + (math.isinf, "aten::isinf"), + (math.degrees, "aten::degrees"), + (math.radians, "aten::radians"), + (math.ldexp, "aten::ldexp"), + (torch._C._infer_size, "aten::_infer_size"), + (torch.nn.functional._no_grad_embedding_renorm_, "aten::_no_grad_embedding_renorm_"), + (torch.nn.functional.assert_int_or_pair, "aten::_assert_int_or_pair"), + (torch.nn.functional.interpolate, "aten::__interpolate"), + (torch.nn.functional.upsample_bilinear, "aten::__upsample_bilinear"), + (torch.nn.functional.upsample_nearest, "aten::__upsample_nearest"), + (torch.nn.functional.upsample, "aten::__upsample"), + (torch.nn.init._no_grad_fill_, "aten::_no_grad_fill_"), + (torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"), + (torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"), + (torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"), + (torch.nn.utils.rnn.get_packed_sequence, "aten::_pack_sequence"), + (torch._C._get_tracing_state, "aten::_get_tracing_state"), + (warnings.warn, "aten::warn"), + ] + + for builtin, aten_op in builtin_ops: + _builtin_table[id(builtin)] = aten_op + if not PY2: + _builtin_table[id(math.gcd)] = "aten::gcd" + _builtin_table[id(math.isfinite)] = "aten::isfinite" + if PY37: + _builtin_table[id(math.remainder)] = "aten::mathremainder" + + return _builtin_table + + +def _register_builtin(fn, op): + _get_builtin_table()[id(fn)] = op + + +def _find_builtin(fn): + return _get_builtin_table().get(id(fn)) + +# qualified_name => ScriptClass mapping +_script_classes = {} + + +def _add_script_class(cls, name): + cls.__torch_script_class__ = True + global _script_classes + _script_classes[name] = cls + + +def _get_script_class(name): + global _script_classes + if name not in _script_classes: + raise RuntimeError("Unknown reference to ScriptClass '{}'. " + "Did you forget to import it?".format(name)) + return _script_classes[name] + +# torch.jit.Error +Error = torch._C.JITException + +def _get_named_tuple_properties(obj): + assert issubclass(obj, tuple) and hasattr(obj, '_fields') + fields = list(obj._fields) + annotations = [] + has_annotations = hasattr(obj, '__annotations__') + for field in fields: + if has_annotations and field in obj.__annotations__: + annotations.append(torch.jit.annotations.ann_to_type(obj.__annotations__[field])) + else: + annotations.append(torch._C.TensorType.get()) + return type(obj).__name__, fields, annotations + +def _create_named_tuple(t, unqual_name, field_names): + TupleType = collections.namedtuple(unqual_name, field_names) + return TupleType(*t) + +class _disable_tracing(object): + def __enter__(self): + self.state = torch._C._get_tracing_state() + torch._C._set_tracing_state(None) + + def __exit__(self, *args): + torch._C._set_tracing_state(self.state) + self.state = None + + +# for use in python if using annotate +def annotate(the_type, the_value): + # noop in python + return the_value + + +Attribute = collections.namedtuple('Attribute', ['value', 'type']) + +last_executed_optimized_graph = torch._C._last_executed_optimized_graph + + +def _graph_for(self, *args, **kwargs): + self(*args, **kwargs) + return last_executed_optimized_graph() + +torch._C.ScriptMethod.graph_for = _graph_for +torch._C.Function.graph_for = _graph_for +Function = torch._C.Function + +if not torch._C._jit_init(): + raise RuntimeError("JIT initialization failed") +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/multiprocessing.html b/docs/stable/_modules/torch/multiprocessing.html new file mode 100644 index 000000000000..47d2adf9ab5f --- /dev/null +++ b/docs/stable/_modules/torch/multiprocessing.html @@ -0,0 +1,592 @@ + + + + + + + + + + + + torch.multiprocessing — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.multiprocessing

    +"""
    +torch.multiprocessing is a wrapper around the native :mod:`multiprocessing`
    +module. It registers custom reducers, that use shared memory to provide shared
    +views on the same data in different processes. Once the tensor/storage is moved
    +to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
    +to send it to other processes without making any copies.
    +
    +The API is 100% compatible with the original module - it's enough to change
    +``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
    +tensors sent through the queues or shared via other mechanisms, moved to shared
    +memory.
    +
    +Because of the similarity of APIs we do not document most of this package
    +contents, and we recommend referring to very good docs of the original module.
    +"""
    +import torch
    +import sys
    +from .reductions import init_reductions
    +import multiprocessing
    +
    +__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
    +           'get_all_sharing_strategies']
    +
    +
    +from multiprocessing import *  # noqa: F401
    +
    +
    +__all__ += multiprocessing.__all__
    +
    +
    +# This call adds a Linux specific prctl(2) wrapper function to this module.
    +# See https://github.com/pytorch/pytorch/pull/14391 for more information.
    +torch._C._multiprocessing_init()
    +
    +
    +if sys.version_info < (3, 3):
    +    """Override basic classes in Python 2.7 and Python 3.3 to use ForkingPickler
    +    for serialization. Later versions of Python already use ForkingPickler."""
    +    from .queue import Queue, SimpleQueue  # noqa: F401
    +    from .pool import Pool  # noqa: F401
    +
    +
    +"""Add helper function to spawn N processes and wait for completion of any of
    +them. This depends `mp.get_context` which was added in Python 3.4."""
    +from .spawn import spawn, SpawnContext, _supports_context  # noqa: F401
    +
    +
    +if sys.platform == 'darwin' or sys.platform == 'win32':
    +    _sharing_strategy = 'file_system'
    +    _all_sharing_strategies = {'file_system'}
    +else:
    +    _sharing_strategy = 'file_descriptor'
    +    _all_sharing_strategies = {'file_descriptor', 'file_system'}
    +
    +
    +
    [docs]def set_sharing_strategy(new_strategy): + """Sets the strategy for sharing CPU tensors. + + Arguments: + new_strategy (str): Name of the selected strategy. Should be one of + the values returned by :func:`get_all_sharing_strategies()`. + """ + global _sharing_strategy + assert new_strategy in _all_sharing_strategies + _sharing_strategy = new_strategy
    + + +
    [docs]def get_sharing_strategy(): + """Returns the current strategy for sharing CPU tensors.""" + return _sharing_strategy
    + + +
    [docs]def get_all_sharing_strategies(): + """Returns a set of sharing strategies supported on a current system.""" + return _all_sharing_strategies
    + + +init_reductions() +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/multiprocessing/spawn.html b/docs/stable/_modules/torch/multiprocessing/spawn.html new file mode 100644 index 000000000000..90a91b61a7d7 --- /dev/null +++ b/docs/stable/_modules/torch/multiprocessing/spawn.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + torch.multiprocessing.spawn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.multiprocessing.spawn

    +from __future__ import absolute_import, division, print_function, unicode_literals
    +
    +import multiprocessing
    +import multiprocessing.connection
    +import signal
    +import sys
    +
    +from . import _prctl_pr_set_pdeathsig
    +
    +
    +def _wrap(fn, i, args, error_queue):
    +    # prctl(2) is a Linux specific system call.
    +    # On other systems the following function call has no effect.
    +    # This is set to ensure that non-daemonic child processes can
    +    # terminate if their parent terminates before they do.
    +    _prctl_pr_set_pdeathsig(signal.SIGINT)
    +
    +    try:
    +        fn(i, *args)
    +    except KeyboardInterrupt:
    +        pass  # SIGINT; Killed by parent, do nothing
    +    except Exception:
    +        # Propagate exception to parent process, keeping original traceback
    +        import traceback
    +        error_queue.put(traceback.format_exc())
    +        sys.exit(1)
    +
    +
    +# Multiprocessing contexts are introduced at Python 3.4
    +_supports_context = sys.version_info >= (3, 4)
    +
    +
    +def _python_version_check():
    +    if not _supports_context:
    +        raise RuntimeError("Requires python 3.4 or higher to use "
    +                           "torch.multiprocessing.spawn and "
    +                           "torch.multiprocessing.SpawnContext helper "
    +                           "to launch multiple processes. If you are using "
    +                           "this for distributed training and have a lower "
    +                           "version of python, please use "
    +                           "torch.distributed.launch instead.")
    +
    +
    +
    [docs]class SpawnContext: + def __init__(self, processes, error_queues): + _python_version_check() + self.error_queues = error_queues + self.processes = processes + self.sentinels = { + process.sentinel: index + for index, process in enumerate(processes) + } + + def pids(self): + return [int(process.pid) for process in self.processes] + +
    [docs] def join(self, timeout=None): + r""" + Tries to join one or more processes in this spawn context. + If one of them exited with a non-zero exit status, this function + kills the remaining processes and raises an exception with the cause + of the first process exiting. + + Returns ``True`` if all processes have been joined successfully, + ``False`` if there are more processes that need to be joined. + + Arguments: + timeout (float): Wait this long before giving up on waiting. + """ + # Ensure this function can be called even when we're done. + if len(self.sentinels) == 0: + return True + + # Wait for any process to fail or all of them to succeed. + ready = multiprocessing.connection.wait( + self.sentinels.keys(), + timeout=timeout, + ) + + error_index = None + for sentinel in ready: + index = self.sentinels.pop(sentinel) + process = self.processes[index] + process.join() + if process.exitcode != 0: + error_index = index + break + + # Return if there was no error. + if error_index is None: + # Return whether or not all processes have been joined. + return len(self.sentinels) == 0 + + # Assume failure. Terminate processes that are still alive. + for process in self.processes: + if process.is_alive(): + process.terminate() + process.join() + + # There won't be an error on the queue if the process crashed. + if self.error_queues[error_index].empty(): + exitcode = self.processes[error_index].exitcode + if exitcode < 0: + name = signal.Signals(-exitcode).name + raise Exception( + "process %d terminated with signal %s" % + (error_index, name) + ) + else: + raise Exception( + "process %d terminated with exit code %d" % + (error_index, exitcode) + ) + + original_trace = self.error_queues[error_index].get() + msg = "\n\n-- Process %d terminated with the following error:\n" % error_index + msg += original_trace + raise Exception(msg)
    + + +
    [docs]def spawn(fn, args=(), nprocs=1, join=True, daemon=False): + r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``. + + If one of the processes exits with a non-zero exit status, the + remaining processes are killed and an exception is raised with the + cause of termination. In the case an exception was caught in the + child process, it is forwarded and its traceback is included in + the exception raised in the parent process. + + Arguments: + fn (function): Function is called as the entrypoint of the + spawned process. This function must be defined at the top + level of a module so it can be pickled and spawned. This + is a requirement imposed by multiprocessing. + + The function is called as ``fn(i, *args)``, where ``i`` is + the process index and ``args`` is the passed through tuple + of arguments. + + args (tuple): Arguments passed to ``fn``. + nprocs (int): Number of processes to spawn. + join (bool): Perform a blocking join on all processes. + daemon (bool): The spawned processes' daemon flag. If set to True, + daemonic processes will be created. + + Returns: + None if ``join`` is ``True``, + :class:`~SpawnContext` if ``join`` is ``False`` + + """ + _python_version_check() + mp = multiprocessing.get_context('spawn') + error_queues = [] + processes = [] + for i in range(nprocs): + error_queue = mp.SimpleQueue() + process = mp.Process( + target=_wrap, + args=(fn, i, args, error_queue), + daemon=daemon, + ) + process.start() + error_queues.append(error_queue) + processes.append(process) + + spawn_context = SpawnContext(processes, error_queues) + if not join: + return spawn_context + + # Loop on join until it returns True or raises an exception. + while not spawn_context.join(): + pass
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/functional.html b/docs/stable/_modules/torch/nn/functional.html new file mode 100644 index 000000000000..1b1178548399 --- /dev/null +++ b/docs/stable/_modules/torch/nn/functional.html @@ -0,0 +1,3788 @@ + + + + + + + + + + + + torch.nn.functional — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.functional

    +r"""Functional interface"""
    +from __future__ import division
    +
    +import warnings
    +import math
    +
    +import torch
    +from torch._C import _infer_size, _add_docstr
    +from . import _reduction as _Reduction
    +from .modules import utils
    +from ._functions import vision
    +from .modules.utils import _single, _pair, _triple, _list_with_default
    +from . import grad  # noqa: F401
    +from . import _VF
    +from .._jit_internal import boolean_dispatch, List
    +
    +
    +conv1d = _add_docstr(torch.conv1d, r"""
    +conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
    +
    +Applies a 1D convolution over an input signal composed of several input
    +planes.
    +
    +See :class:`~torch.nn.Conv1d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
    +    weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)`
    +    bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None``
    +    stride: the stride of the convolving kernel. Can be a single number or
    +      a one-element tuple `(sW,)`. Default: 1
    +    padding: implicit paddings on both sides of the input. Can be a
    +      single number or a one-element tuple `(padW,)`. Default: 0
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a one-element tuple `(dW,)`. Default: 1
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
    +      the number of groups. Default: 1
    +
    +Examples::
    +
    +    >>> filters = torch.randn(33, 16, 3)
    +    >>> inputs = torch.randn(20, 16, 50)
    +    >>> F.conv1d(inputs, filters)
    +""")
    +
    +conv2d = _add_docstr(torch.conv2d, r"""
    +conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
    +
    +Applies a 2D convolution over an input image composed of several input
    +planes.
    +
    +See :class:`~torch.nn.Conv2d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
    +    weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
    +    bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None``
    +    stride: the stride of the convolving kernel. Can be a single number or a
    +      tuple `(sH, sW)`. Default: 1
    +    padding: implicit paddings on both sides of the input. Can be a
    +      single number or a tuple `(padH, padW)`. Default: 0
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a tuple `(dH, dW)`. Default: 1
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
    +      number of groups. Default: 1
    +
    +Examples::
    +
    +    >>> # With square kernels and equal stride
    +    >>> filters = torch.randn(8,4,3,3)
    +    >>> inputs = torch.randn(1,4,5,5)
    +    >>> F.conv2d(inputs, filters, padding=1)
    +""")  # noqa: E501
    +
    +conv3d = _add_docstr(torch.conv3d, r"""
    +conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
    +
    +Applies a 3D convolution over an input image composed of several input
    +planes.
    +
    +See :class:`~torch.nn.Conv3d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
    +    weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)`
    +    bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None
    +    stride: the stride of the convolving kernel. Can be a single number or a
    +      tuple `(sT, sH, sW)`. Default: 1
    +    padding: implicit paddings on both sides of the input. Can be a
    +      single number or a tuple `(padT, padH, padW)`. Default: 0
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a tuple `(dT, dH, dW)`. Default: 1
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
    +      the number of groups. Default: 1
    +
    +Examples::
    +
    +    >>> filters = torch.randn(33, 16, 3, 3, 3)
    +    >>> inputs = torch.randn(20, 16, 50, 10, 20)
    +    >>> F.conv3d(inputs, filters)
    +""")  # noqa: E501
    +
    +conv_transpose1d = _add_docstr(torch.conv_transpose1d, r"""
    +conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
    +
    +Applies a 1D transposed convolution operator over an input signal
    +composed of several input planes, sometimes also called "deconvolution".
    +
    +See :class:`~torch.nn.ConvTranspose1d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
    +    weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)`
    +    bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
    +    stride: the stride of the convolving kernel. Can be a single number or a
    +      tuple ``(sW,)``. Default: 1
    +    padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
    +      sides of each dimension in the input. Can be a single number or a tuple
    +      ``(padW,)``. Default: 0
    +    output_padding: additional size added to one side of each dimension in the
    +      output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
    +      number of groups. Default: 1
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a tuple ``(dW,)``. Default: 1
    +
    +Examples::
    +
    +    >>> inputs = torch.randn(20, 16, 50)
    +    >>> weights = torch.randn(16, 33, 5)
    +    >>> F.conv_transpose1d(inputs, weights)
    +""")
    +
    +conv_transpose2d = _add_docstr(torch.conv_transpose2d, r"""
    +conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
    +
    +Applies a 2D transposed convolution operator over an input image
    +composed of several input planes, sometimes also called "deconvolution".
    +
    +See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
    +    weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)`
    +    bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
    +    stride: the stride of the convolving kernel. Can be a single number or a
    +      tuple ``(sH, sW)``. Default: 1
    +    padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
    +      sides of each dimension in the input. Can be a single number or a tuple
    +      ``(padH, padW)``. Default: 0
    +    output_padding: additional size added to one side of each dimension in the
    +      output shape. Can be a single number or a tuple ``(out_padH, out_padW)``.
    +      Default: 0
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
    +      number of groups. Default: 1
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a tuple ``(dH, dW)``. Default: 1
    +
    +Examples::
    +
    +    >>> # With square kernels and equal stride
    +    >>> inputs = torch.randn(1, 4, 5, 5)
    +    >>> weights = torch.randn(4, 8, 3, 3)
    +    >>> F.conv_transpose2d(inputs, weights, padding=1)
    +""")  # noqa: E501
    +
    +conv_transpose3d = _add_docstr(torch.conv_transpose3d, r"""
    +conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
    +
    +Applies a 3D transposed convolution operator over an input image
    +composed of several input planes, sometimes also called "deconvolution"
    +
    +See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
    +
    +.. include:: cudnn_deterministic.rst
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
    +    weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)`
    +    bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
    +    stride: the stride of the convolving kernel. Can be a single number or a
    +      tuple ``(sT, sH, sW)``. Default: 1
    +    padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
    +      sides of each dimension in the input. Can be a single number or a tuple
    +      ``(padT, padH, padW)``. Default: 0
    +    output_padding: additional size added to one side of each dimension in the
    +      output shape. Can be a single number or a tuple
    +      ``(out_padT, out_padH, out_padW)``. Default: 0
    +    groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
    +      number of groups. Default: 1
    +    dilation: the spacing between kernel elements. Can be a single number or
    +      a tuple `(dT, dH, dW)`. Default: 1
    +
    +Examples::
    +
    +    >>> inputs = torch.randn(20, 16, 50, 10, 20)
    +    >>> weights = torch.randn(16, 33, 3, 3, 3)
    +    >>> F.conv_transpose3d(inputs, weights)
    +""")  # noqa: E501
    +
    +conv_tbc = _add_docstr(torch.conv_tbc, r"""
    +Applies a 1-dimensional sequence convolution over an input sequence.
    +Input and output dimensions are (Time, Batch, Channels) - hence TBC.
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})`
    +    weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`)
    +    bias: bias of shape (:math:`\text{out\_channels}`)
    +    pad: number of timesteps to pad. Default: 0
    +""")
    +
    +
    +# Pooling
    +avg_pool1d = _add_docstr(torch.avg_pool1d, r"""
    +avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
    +
    +Applies a 1D average pooling over an input signal composed of several
    +input planes.
    +
    +See :class:`~torch.nn.AvgPool1d` for details and output shape.
    +
    +Args:
    +    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
    +    kernel_size: the size of the window. Can be a single number or a
    +      tuple `(kW,)`
    +    stride: the stride of the window. Can be a single number or a tuple
    +      `(sW,)`. Default: :attr:`kernel_size`
    +    padding: implicit zero paddings on both sides of the input. Can be a
    +      single number or a tuple `(padW,)`. Default: 0
    +    ceil_mode: when True, will use `ceil` instead of `floor` to compute the
    +        output shape. Default: ``False``
    +    count_include_pad: when True, will include the zero-padding in the
    +        averaging calculation. Default: ``True``
    +
    +Examples::
    +
    +    >>> # pool of square window of size=3, stride=2
    +    >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
    +    >>> F.avg_pool1d(input, kernel_size=3, stride=2)
    +    tensor([[[ 2.,  4.,  6.]]])
    +
    +""")
    +
    +
    +avg_pool2d = _add_docstr(torch._C._nn.avg_pool2d, r"""
    +avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
    +
    +Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
    +:math:`sH \times sW` steps. The number of output features is equal to the number of
    +input planes.
    +
    +See :class:`~torch.nn.AvgPool2d` for details and output shape.
    +
    +Args:
    +    input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
    +    kernel_size: size of the pooling region. Can be a single number or a
    +      tuple `(kH, kW)`
    +    stride: stride of the pooling operation. Can be a single number or a
    +      tuple `(sH, sW)`. Default: :attr:`kernel_size`
    +    padding: implicit zero paddings on both sides of the input. Can be a
    +      single number or a tuple `(padH, padW)`. Default: 0
    +    ceil_mode: when True, will use `ceil` instead of `floor` in the formula
    +        to compute the output shape. Default: ``False``
    +    count_include_pad: when True, will include the zero-padding in the
    +        averaging calculation. Default: ``True``
    +    divisor_override: if specified, it will be used as divisor, otherwise
    +         size of the pooling region will be used. Default: None
    +""")
    +
    +avg_pool3d = _add_docstr(torch._C._nn.avg_pool3d, r"""
    +avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
    +
    +Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step
    +size :math:`sT \times sH \times sW` steps. The number of output features is equal to
    +:math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`.
    +
    +See :class:`~torch.nn.AvgPool3d` for details and output shape.
    +
    +Args:
    +    input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iT \times iH , iW)`
    +    kernel_size: size of the pooling region. Can be a single number or a
    +      tuple `(kT, kH, kW)`
    +    stride: stride of the pooling operation. Can be a single number or a
    +      tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
    +    padding: implicit zero paddings on both sides of the input. Can be a
    +      single number or a tuple `(padT, padH, padW)`, Default: 0
    +    ceil_mode: when True, will use `ceil` instead of `floor` in the formula
    +        to compute the output shape
    +    count_include_pad: when True, will include the zero-padding in the
    +        averaging calculation
    +    divisor_override: if specified, it will be used as divisor, otherwise
    +        size of the pooling region will be used. Default: None
    +""")
    +
    +
    +def fractional_max_pool2d_with_indices(input, kernel_size, output_size=None,
    +                                       output_ratio=None, return_indices=False,
    +                                       _random_samples=None):
    +    # type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], Optional[BroadcastingList2[float]], bool, Optional[Tensor]) -> Tuple[Tensor, Tensor]  # noqa
    +    r"""Applies 2D fractional max pooling over an input signal composed of several input planes.
    +
    +    Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
    +
    +    The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
    +    step size determined by the target output size.
    +    The number of output features is equal to the number of input planes.
    +
    +    Args:
    +        kernel_size: the size of the window to take a max over.
    +                     Can be a single number :math:`k` (for a square kernel of :math:`k \times k`)
    +                     or a tuple `(kH, kW)`
    +        output_size: the target output size of the image of the form :math:`oH \times oW`.
    +                     Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH`
    +        output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
    +                      This has to be a number or tuple in the range (0, 1)
    +        return_indices: if ``True``, will return the indices along with the outputs.
    +                        Useful to pass to :func:`~torch.nn.functional.max_unpool2d`.
    +
    +    Examples::
    +        >>> input = torch.randn(20, 16, 50, 32)
    +        >>> # pool of square window of size=3, and target output size 13x12
    +        >>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))
    +        >>> # pool of square window and target output size being half of input image size
    +        >>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))
    +
    +    .. _Fractional MaxPooling:
    +        http://arxiv.org/abs/1412.6071
    +    """
    +    if output_size is None and output_ratio is None:
    +        raise ValueError("fractional_max_pool2d requires specifying either "
    +                         "an output_size or an output_ratio")
    +    if output_size is None:
    +        _output_ratio = _pair(torch.jit._unwrap_optional(output_ratio))
    +        output_size = [int(input.size(2) * _output_ratio[0]),
    +                       int(input.size(3) * _output_ratio[1])]
    +
    +    if _random_samples is None:
    +        _random_samples = torch.rand(input.size(0), input.size(1), 2, dtype=input.dtype, device=input.device)
    +    return torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples)
    +
    +
    +def _fractional_max_pool2d(input, kernel_size, output_size=None,
    +                           output_ratio=None, return_indices=False,
    +                           _random_samples=None):
    +    # type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], Optional[BroadcastingList2[float]], bool, Optional[Tensor]) -> Tensor  # noqa
    +    return fractional_max_pool2d_with_indices(input, kernel_size, output_size,
    +                                              output_ratio, return_indices,
    +                                              _random_samples)[0]
    +
    +fractional_max_pool2d = boolean_dispatch(
    +    arg_name='return_indices',
    +    arg_index=4,
    +    default=False,
    +    if_true=fractional_max_pool2d_with_indices,
    +    if_false=_fractional_max_pool2d,
    +    module_name=__name__,
    +    func_name='fractional_max_pool2d')
    +
    +
    +def fractional_max_pool3d_with_indices(input, kernel_size, output_size=None,
    +                                       output_ratio=None, return_indices=False,
    +                                       _random_samples=None):
    +    # type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], Optional[BroadcastingList3[float]], bool, Optional[Tensor]) -> Tuple[Tensor, Tensor]  # noqa
    +    r"""Applies 3D fractional max pooling over an input signal composed of several input planes.
    +
    +    Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
    +
    +    The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
    +    step size determined by the target output size.
    +    The number of output features is equal to the number of input planes.
    +
    +    Args:
    +        kernel_size: the size of the window to take a max over.
    +                     Can be a single number :math:`k` (for a square kernel of :math:`k \times k \times k`)
    +                     or a tuple `(kT, kH, kW)`
    +        output_size: the target output size of the form :math:`oT \times oH \times oW`.
    +                     Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output
    +                      :math:`oH \times oH \times oH`
    +        output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
    +                      This has to be a number or tuple in the range (0, 1)
    +        return_indices: if ``True``, will return the indices along with the outputs.
    +                        Useful to pass to :func:`~torch.nn.functional.max_unpool3d`.
    +
    +    Examples::
    +        >>> input = torch.randn(20, 16, 50, 32, 16)
    +        >>> # pool of cubic window of size=3, and target output size 13x12x11
    +        >>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11))
    +        >>> # pool of cubic window and target output size being half of input size
    +        >>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5))
    +
    +    .. _Fractional MaxPooling:
    +        http://arxiv.org/abs/1412.6071
    +    """
    +    if output_size is None and output_ratio is None:
    +        raise ValueError("fractional_max_pool3d requires specifying either "
    +                         "an output_size or an output_ratio")
    +    if output_size is None:
    +        _output_ratio = _triple(torch.jit._unwrap_optional(output_ratio))
    +        output_size = [int(input.size(2) * _output_ratio[0]),
    +                       int(input.size(3) * _output_ratio[1]),
    +                       int(input.size(4) * _output_ratio[2])]
    +
    +    if _random_samples is None:
    +        _random_samples = torch.rand(input.size(0), input.size(1), 3, dtype=input.dtype, device=input.device)
    +    return torch._C._nn.fractional_max_pool3d(input, kernel_size, output_size, _random_samples)
    +
    +
    +def _fractional_max_pool3d(input, kernel_size, output_size=None,
    +                           output_ratio=None, return_indices=False,
    +                           _random_samples=None):
    +    # type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], Optional[BroadcastingList3[float]], bool, Optional[Tensor]) -> Tensor  # noqa
    +    return fractional_max_pool3d_with_indices(input, kernel_size, output_size,
    +                                              output_ratio, return_indices,
    +                                              _random_samples)[0]
    +
    +fractional_max_pool3d = boolean_dispatch(
    +    arg_name='return_indices',
    +    arg_index=4,
    +    default=False,
    +    if_true=fractional_max_pool3d_with_indices,
    +    if_false=_fractional_max_pool3d,
    +    module_name=__name__,
    +    func_name='fractional_max_pool3d')
    +
    +
    +def max_pool1d_with_indices(input, kernel_size, stride=None, padding=0,
    +                            dilation=1, ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], BroadcastingList1[int], bool, bool) -> Tuple[Tensor, Tensor]  # noqa
    +    r"""Applies a 1D max pooling over an input signal composed of several input
    +    planes.
    +
    +    See :class:`~torch.nn.MaxPool1d` for details.
    +    """
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch.max_pool1d_with_indices(
    +        input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +
    +def _max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
    +                ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], BroadcastingList1[int], bool, bool) -> Tensor  # noqa
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch.max_pool1d(
    +        input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +max_pool1d = boolean_dispatch(
    +    arg_name='return_indices',
    +    arg_index=6,
    +    default=False,
    +    if_true=max_pool1d_with_indices,
    +    if_false=_max_pool1d,
    +    module_name=__name__,
    +    func_name='max_pool1d')
    +
    +
    +def max_pool2d_with_indices(input, kernel_size, stride=None, padding=0, dilation=1,
    +                            ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], BroadcastingList2[int], bool, bool) -> Tuple[Tensor, Tensor]  # noqa
    +    r"""Applies a 2D max pooling over an input signal composed of several input
    +    planes.
    +
    +    See :class:`~torch.nn.MaxPool2d` for details.
    +    """
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +
    +def _max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
    +                ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], BroadcastingList2[int], bool, bool) -> Tensor  # noqa
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch.max_pool2d(
    +        input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +max_pool2d = boolean_dispatch(
    +    arg_name='return_indices',
    +    arg_index=6,
    +    default=False,
    +    if_true=max_pool2d_with_indices,
    +    if_false=_max_pool2d,
    +    module_name=__name__,
    +    func_name='max_pool2d')
    +
    +
    +def max_pool3d_with_indices(input, kernel_size, stride=None, padding=0,
    +                            dilation=1, ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], BroadcastingList3[int], bool, bool) -> Tuple[Tensor, Tensor]  # noqa
    +    r"""Applies a 3D max pooling over an input signal composed of several input
    +    planes.
    +
    +    See :class:`~torch.nn.MaxPool3d` for details.
    +    """
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch._C._nn.max_pool3d_with_indices(
    +        input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +
    +def _max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1,
    +                ceil_mode=False, return_indices=False):
    +    # type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], BroadcastingList3[int], bool, bool) -> Tensor  # noqa
    +    if stride is None:
    +        stride = torch.jit.annotate(List[int], [])
    +    return torch.max_pool3d(
    +        input, kernel_size, stride, padding, dilation, ceil_mode)
    +
    +max_pool3d = boolean_dispatch(
    +    arg_name='return_indices',
    +    arg_index=6,
    +    default=False,
    +    if_true=max_pool3d_with_indices,
    +    if_false=_max_pool3d,
    +    module_name=__name__,
    +    func_name='max_pool3d')
    +
    +
    +def _unpool_output_size(input, kernel_size, stride, padding, output_size):
    +    # type: (Tensor, List[int], List[int], List[int], Optional[List[int]]) -> List[int]
    +    input_size = input.size()
    +    default_size = torch.jit.annotate(List[int], [])
    +    for d in range(len(kernel_size)):
    +        default_size.append((input_size[d + 2] - 1) * stride[d] +
    +                            kernel_size[d] - 2 * padding[d])
    +    if output_size is None:
    +        ret = default_size
    +    else:
    +        if len(output_size) == len(kernel_size) + 2:
    +            output_size = output_size[2:]
    +        if len(output_size) != len(kernel_size):
    +            raise ValueError("output_size should be a sequence containing "
    +                             "{} or {} elements, but it has a length of '{}'"
    +                             .format(len(kernel_size), len(kernel_size) + 2,
    +                                     len(output_size)))
    +        for d in range(len(kernel_size)):
    +            min_size = default_size[d] - stride[d]
    +            max_size = default_size[d] + stride[d]
    +            if not (min_size < output_size[d] < max_size):
    +                raise ValueError(
    +                    'invalid output_size "{}" (dim {} must be between {} and {})'
    +                    .format(output_size, d, min_size, max_size))
    +
    +        ret = output_size
    +    return ret
    +
    +
    +
    [docs]def max_unpool1d(input, indices, kernel_size, stride=None, padding=0, + output_size=None): + # type: (Tensor, Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], Optional[BroadcastingList1[int]]) -> Tensor # noqa + r"""Computes a partial inverse of :class:`MaxPool1d`. + + See :class:`~torch.nn.MaxUnpool1d` for details. + """ + kernel_size = _single(kernel_size) + if stride is not None: + _stride = _single(stride) + else: + _stride = kernel_size + padding = _single(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, + output_size) + if isinstance(output_size, list): + output_size = output_size + [1] + else: + output_size = output_size + (1,) + return torch._C._nn.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), + output_size).squeeze(3)
    + + +
    [docs]def max_unpool2d(input, indices, kernel_size, stride=None, padding=0, + output_size=None): + # type: (Tensor, Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], Optional[BroadcastingList2[int]]) -> Tensor # noqa + r"""Computes a partial inverse of :class:`MaxPool2d`. + + See :class:`~torch.nn.MaxUnpool2d` for details. + """ + kernel_size = _pair(kernel_size) + if stride is not None: + _stride = _pair(stride) + else: + _stride = kernel_size + padding = _pair(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, + output_size) + return torch._C._nn.max_unpool2d(input, indices, output_size)
    + + +
    [docs]def max_unpool3d(input, indices, kernel_size, stride=None, padding=0, + output_size=None): + # type: (Tensor, Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], Optional[BroadcastingList3[int]]) -> Tensor # noqa + r"""Computes a partial inverse of :class:`MaxPool3d`. + + See :class:`~torch.nn.MaxUnpool3d` for details. + """ + kernel_size = _triple(kernel_size) + if stride is not None: + _stride = _triple(stride) + else: + _stride = kernel_size + padding = _triple(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, + output_size) + return torch._C._nn.max_unpool3d( + input, indices, output_size, _stride, padding)
    + + +
    [docs]def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False): + # type: (Tensor, float, int, Optional[BroadcastingList2[int]], bool) -> Tensor + r"""Applies a 2D power-average pooling over an input signal composed of + several input planes. If the sum of all inputs to the power of `p` is + zero, the gradient is set to zero as well. + + See :class:`~torch.nn.LPPool2d` for details. + """ + kw, kh = utils._pair(kernel_size) + if stride is not None: + out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) + else: + out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode) + + return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type)
    + + +
    [docs]def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False): + # type: (Tensor, float, int, Optional[BroadcastingList1[int]], bool) -> Tensor + r"""Applies a 1D power-average pooling over an input signal composed of + several input planes. If the sum of all inputs to the power of `p` is + zero, the gradient is set to zero as well. + + See :class:`~torch.nn.LPPool1d` for details. + """ + if stride is not None: + out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) + else: + out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode) + + return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1. / norm_type)
    + + +def adaptive_max_pool1d_with_indices(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor] + r"""Applies a 1D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape. + + Args: + output_size: the target output size (single integer) + return_indices: whether to return pooling indices. Default: ``False`` + """ + return torch.adaptive_max_pool1d(input, output_size) + + +def _adaptive_max_pool1d(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList1[int], bool) -> Tensor + return adaptive_max_pool1d_with_indices(input, output_size)[0] + +adaptive_max_pool1d = boolean_dispatch( + arg_name='return_indices', + arg_index=2, + default=False, + if_true=adaptive_max_pool1d_with_indices, + if_false=_adaptive_max_pool1d, + module_name=__name__, + func_name='adaptive_max_pool1d') + + +def adaptive_max_pool2d_with_indices(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList2[int], bool) -> Tuple[Tensor, Tensor] + r"""Applies a 2D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape. + + Args: + output_size: the target output size (single integer or + double-integer tuple) + return_indices: whether to return pooling indices. Default: ``False`` + """ + output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_max_pool2d(input, output_size) + + +def _adaptive_max_pool2d(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList2[int], bool) -> Tensor + return adaptive_max_pool2d_with_indices(input, output_size)[0] + +adaptive_max_pool2d = boolean_dispatch( + arg_name='return_indices', + arg_index=2, + default=False, + if_true=adaptive_max_pool2d_with_indices, + if_false=_adaptive_max_pool2d, + module_name=__name__, + func_name='adaptive_max_pool2d') + + +def adaptive_max_pool3d_with_indices(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList3[int], bool) -> Tuple[Tensor, Tensor] + r"""Applies a 3D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape. + + Args: + output_size: the target output size (single integer or + triple-integer tuple) + return_indices: whether to return pooling indices. Default: ``False`` + """ + output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_max_pool3d(input, output_size) + + +def _adaptive_max_pool3d(input, output_size, return_indices=False): + # type: (Tensor, BroadcastingList3[int], bool) -> Tensor + return adaptive_max_pool3d_with_indices(input, output_size)[0] + +adaptive_max_pool3d = boolean_dispatch( + arg_name='return_indices', + arg_index=2, + default=False, + if_true=adaptive_max_pool3d_with_indices, + if_false=_adaptive_max_pool3d, + module_name=__name__, + func_name='adaptive_max_pool3d') + + +adaptive_avg_pool1d = _add_docstr(torch.adaptive_avg_pool1d, r""" +adaptive_avg_pool1d(input, output_size) -> Tensor + +Applies a 1D adaptive average pooling over an input signal composed of +several input planes. + +See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape. + +Args: + output_size: the target output size (single integer) +""") + + +
    [docs]def adaptive_avg_pool2d(input, output_size): + # type: (Tensor, BroadcastingList2[int]) -> Tensor + r""" + Applies a 2D adaptive average pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape. + + Args: + output_size: the target output size (single integer or + double-integer tuple) + """ + _output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
    + + +
    [docs]def adaptive_avg_pool3d(input, output_size): + # type: (Tensor, BroadcastingList3[int]) -> Tensor + r""" + Applies a 3D adaptive average pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape. + + Args: + output_size: the target output size (single integer or + triple-integer tuple) + """ + _output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_avg_pool3d(input, _output_size)
    + + +# Activation functions +
    [docs]def dropout(input, p=0.5, training=True, inplace=False): + # type: (Tensor, float, bool, bool) -> Tensor + r""" + During training, randomly zeroes some of the elements of the input + tensor with probability :attr:`p` using samples from a Bernoulli + distribution. + + See :class:`~torch.nn.Dropout` for details. + + Args: + p: probability of an element to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if p < 0. or p > 1.: + raise ValueError("dropout probability has to be between 0 and 1, " + "but got {}".format(p)) + return (_VF.dropout_(input, p, training) + if inplace + else _VF.dropout(input, p, training))
    + + +
    [docs]def alpha_dropout(input, p=0.5, training=False, inplace=False): + # type: (Tensor, float, bool, bool) -> Tensor + r"""Applies alpha dropout to the input. + + See :class:`~torch.nn.AlphaDropout` for details. + """ + if p < 0. or p > 1.: + raise ValueError("dropout probability has to be between 0 and 1, " + "but got {}".format(p)) + return (_VF.alpha_dropout_(input, p, training) + if inplace + else _VF.alpha_dropout(input, p, training))
    + + +
    [docs]def dropout2d(input, p=0.5, training=True, inplace=False): + # type: (Tensor, float, bool, bool) -> Tensor + r""" + Randomly zero out entire channels (a channel is a 2D feature map, + e.g., the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 2D tensor :math:`\text{input}[i, j]`) of the input tensor). + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout2d` for details. + + Args: + p: probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if p < 0. or p > 1.: + raise ValueError("dropout probability has to be between 0 and 1, " + "but got {}".format(p)) + return (_VF.feature_dropout_(input, p, training) + if inplace + else _VF.feature_dropout(input, p, training))
    + + +
    [docs]def dropout3d(input, p=0.5, training=True, inplace=False): + # type: (Tensor, float, bool, bool) -> Tensor + r""" + Randomly zero out entire channels (a channel is a 3D feature map, + e.g., the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 3D tensor :math:`\text{input}[i, j]`) of the input tensor). + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout3d` for details. + + Args: + p: probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + # This is 100% the same code as dropout2d. We duplicate this code so that + # stack traces are not confusing. + if p < 0. or p > 1.: + raise ValueError("dropout probability has to be between 0 and 1, " + "but got {}".format(p)) + return (_VF.feature_dropout_(input, p, training) + if inplace + else _VF.feature_dropout(input, p, training))
    + + +def feature_alpha_dropout(input, p=0.5, training=False, inplace=False): + # type: (Tensor, float, bool, bool) -> Tensor + if p < 0. or p > 1.: + raise ValueError("dropout probability has to be between 0 and 1, " + "but got {}".format(p)) + return (_VF.feature_alpha_dropout_(input, p, training) + if inplace + else _VF.feature_alpha_dropout(input, p, training)) + + +
    [docs]def threshold(input, threshold, value, inplace=False): + # type: (Tensor, float, float, bool) -> Tensor + r"""Thresholds each element of the input Tensor. + + See :class:`~torch.nn.Threshold` for more details. + """ + if inplace: + result = _VF.threshold_(input, threshold, value) + else: + result = _VF.threshold(input, threshold, value) + return result
    + + +threshold_ = _add_docstr(_VF.threshold_, r""" +threshold_(input, threshold, value) -> Tensor + +In-place version of :func:`~threshold`. +""") + + +
    [docs]def relu(input, inplace=False): + # type: (Tensor, bool) -> Tensor + r"""relu(input, inplace=False) -> Tensor + + Applies the rectified linear unit function element-wise. See + :class:`~torch.nn.ReLU` for more details. + """ + if inplace: + result = torch.relu_(input) + else: + result = torch.relu(input) + return result
    + + +relu_ = _add_docstr(torch.relu_, r""" +relu_(input) -> Tensor + +In-place version of :func:`~relu`. +""") + + +
    [docs]def glu(input, dim=-1): + # type: (Tensor, int) -> Tensor + r""" + glu(input, dim=-1) -> Tensor + + The gated linear unit. Computes: + + .. math :: + \text{GLU}(a, b) = a \otimes \sigma(b) + + where `input` is split in half along `dim` to form `a` and `b`, :math:`\sigma` + is the sigmoid function and :math:`\otimes` is the element-wise product between matrices. + + See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_. + + Args: + input (Tensor): input tensor + dim (int): dimension on which to split the input. Default: -1 + """ + if input.dim() == 0: + raise RuntimeError("glu does not suppport scalars because halving size must be even") + return torch._C._nn.glu(input, dim)
    + + +
    [docs]def hardtanh(input, min_val=-1., max_val=1., inplace=False): + # type: (Tensor, float, float, bool) -> Tensor + r""" + hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor + + Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more + details. + """ + if inplace: + result = torch._C._nn.hardtanh_(input, min_val, max_val) + else: + result = torch._C._nn.hardtanh(input, min_val, max_val) + return result
    + + +hardtanh_ = _add_docstr(torch._C._nn.hardtanh_, r""" +hardtanh_(input, min_val=-1., max_val=1.) -> Tensor + +In-place version of :func:`~hardtanh`. +""") + + +
    [docs]def relu6(input, inplace=False): + # type: (Tensor, bool) -> Tensor + r"""relu6(input, inplace=False) -> Tensor + + Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`. + + See :class:`~torch.nn.ReLU6` for more details. + """ + return hardtanh(input, 0., 6., inplace)
    + + +
    [docs]def elu(input, alpha=1., inplace=False): + # type: (Tensor, float, bool) -> Tensor + r"""Applies element-wise, + :math:`\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))`. + + See :class:`~torch.nn.ELU` for more details. + """ + if inplace: + result = torch._C._nn.elu_(input, alpha) + else: + result = torch._C._nn.elu(input, alpha) + return result
    + + +elu_ = _add_docstr(torch._C._nn.elu_, r""" +elu_(input, alpha=1.) -> Tensor + +In-place version of :func:`~elu`. +""") + + +
    [docs]def selu(input, inplace=False): + # type: (Tensor, bool) -> Tensor + r"""selu(input, inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`, + with :math:`\alpha=1.6732632423543772848170429916717` and + :math:`scale=1.0507009873554804934193349852946`. + + See :class:`~torch.nn.SELU` for more details. + """ + if inplace: + result = torch.selu_(input) + else: + result = torch.selu(input) + return result
    + + +selu_ = _add_docstr(torch.selu_, r""" +selu_(input) -> Tensor + +In-place version of :func:`~selu`. +""") + + +
    [docs]def celu(input, alpha=1., inplace=False): + # type: (Tensor, float, bool) -> Tensor + r"""celu(input, alpha=1., inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))`. + + See :class:`~torch.nn.CELU` for more details. + """ + if inplace: + result = torch.celu_(input, alpha) + else: + result = torch.celu(input, alpha) + return result
    + +celu_ = _add_docstr(torch.celu_, r""" +celu_(input, alpha=1.) -> Tensor + +In-place version of :func:`~celu`. +""") + + +
    [docs]def leaky_relu(input, negative_slope=0.01, inplace=False): + # type: (Tensor, float, bool) -> Tensor + r""" + leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)` + + See :class:`~torch.nn.LeakyReLU` for more details. + """ + if inplace: + result = torch._C._nn.leaky_relu_(input, negative_slope) + else: + result = torch._C._nn.leaky_relu(input, negative_slope) + return result
    + + +leaky_relu_ = _add_docstr(torch._C._nn.leaky_relu_, r""" +leaky_relu_(input, negative_slope=0.01) -> Tensor + +In-place version of :func:`~leaky_relu`. +""") + + +
    [docs]def prelu(input, weight): + # type: (Tensor, Tensor) -> Tensor + r"""prelu(input, weight) -> Tensor + + Applies element-wise the function + :math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a + learnable parameter. + + See :class:`~torch.nn.PReLU` for more details. + """ + return torch.prelu(input, weight)
    + + +
    [docs]def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False): + # type: (Tensor, float, float, bool, bool) -> Tensor + r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor + + Randomized leaky ReLU. + + See :class:`~torch.nn.RReLU` for more details. + """ + if inplace: + result = torch.rrelu_(input, lower, upper, training) + else: + result = torch.rrelu(input, lower, upper, training) + return result
    + + +rrelu_ = _add_docstr(torch.rrelu_, r""" +rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor + +In-place version of :func:`~rrelu`. +""") + +logsigmoid = _add_docstr(torch._C._nn.log_sigmoid, r""" +logsigmoid(input) -> Tensor + +Applies element-wise :math:`\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)` + +See :class:`~torch.nn.LogSigmoid` for more details. +""") + +
    [docs]def gelu(input): + r"""gelu(input) -> Tensor + + Applies element-wise the function + :math:`\text{GeLU}(x) = x * \Phi(x)` + + where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution. + + See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_. + """ + return torch._C._nn.gelu(input)
    + + +
    [docs]def hardshrink(input, lambd=0.5): + # type: (Tensor, float) -> Tensor + r""" + hardshrink(input, lambd=0.5) -> Tensor + + Applies the hard shrinkage function element-wise + + See :class:`~torch.nn.Hardshrink` for more details. + """ + return torch.hardshrink(input, lambd)
    + + +
    [docs]def tanhshrink(input): + r"""tanhshrink(input) -> Tensor + + Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)` + + See :class:`~torch.nn.Tanhshrink` for more details. + """ + return input - input.tanh()
    + + +
    [docs]def softsign(input): + r"""softsign(input) -> Tensor + + Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}` + + See :class:`~torch.nn.Softsign` for more details. + """ + return input / (input.abs() + 1)
    + + +softplus = _add_docstr(torch._C._nn.softplus, r""" +softplus(input, beta=1, threshold=20) -> Tensor +""") + + +def _get_softmax_dim(name, ndim, stacklevel): + # type: (str, int, int) -> int + warnings.warn("Implicit dimension choice for {} has been deprecated. " + "Change the call to include dim=X as an argument.".format(name), stacklevel=stacklevel) + if ndim == 0 or ndim == 1 or ndim == 3: + ret = 0 + else: + ret = 1 + return ret + + +
    [docs]def softmin(input, dim=None, _stacklevel=3, dtype=None): + # type: (Tensor, Optional[int], int, Optional[int]) -> Tensor + r"""Applies a softmin function. + + Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula. + + See :class:`~torch.nn.Softmin` for more details. + + Arguments: + input (Tensor): input + dim (int): A dimension along which softmin will be computed (so every slice + along dim will sum to 1). + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + """ + if dim is None: + dim = _get_softmax_dim('softmin', input.dim(), _stacklevel) + if dtype is None: + ret = (-input).softmax(dim) + else: + ret = (-input).softmax(dim, dtype=dtype) + return ret
    + + +
    [docs]def softmax(input, dim=None, _stacklevel=3, dtype=None): + # type: (Tensor, Optional[int], int, Optional[int]) -> Tensor + r"""Applies a softmax function. + + Softmax is defined as: + + :math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}` + + It is applied to all slices along dim, and will re-scale them so that the elements + lie in the range `[0, 1]` and sum to 1. + + See :class:`~torch.nn.Softmax` for more details. + + Arguments: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + .. note:: + This function doesn't work directly with NLLLoss, + which expects the Log to be computed between the Softmax and itself. + Use log_softmax instead (it's faster and has better numerical properties). + + """ + if dim is None: + dim = _get_softmax_dim('softmax', input.dim(), _stacklevel) + if dtype is None: + ret = input.softmax(dim) + else: + ret = input.softmax(dim, dtype=dtype) + return ret
    + + +
    [docs]def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10, dim=-1): + # type: (Tensor, float, bool, float, int) -> Tensor + r""" + Samples from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretizes. + + Args: + logits: `[..., num_features]` unnormalized log probabilities + tau: non-negative scalar temperature + hard: if ``True``, the returned samples will be discretized as one-hot vectors, + but will be differentiated as if it is the soft sample in autograd + dim (int): A dimension along which softmax will be computed. Default: -1. + + Returns: + Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. + If ``hard=True``, the returned samples will be one-hot, otherwise they will + be probability distributions that sum to 1 across `dim`. + + .. note:: + This function is here for legacy reasons, may be removed from nn.Functional in the future. + + .. note:: + The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft` + + It achieves two things: + - makes the output value exactly one-hot + (since we add then subtract y_soft value) + - makes the gradient equal to y_soft gradient + (since we strip all other gradients) + + Examples:: + >>> logits = torch.randn(20, 32) + >>> # Sample soft categorical using reparametrization trick: + >>> F.gumbel_softmax(logits, tau=1, hard=False) + >>> # Sample hard categorical using "Straight-through" trick: + >>> F.gumbel_softmax(logits, tau=1, hard=True) + + .. _Link 1: + https://arxiv.org/abs/1611.00712 + .. _Link 2: + https://arxiv.org/abs/1611.01144 + """ + + if eps != 1e-10: + warnings.warn("`eps` parameter is deprecated and has no effect.") + + gumbels = -torch.empty_like(logits).exponential_().log() # ~Gumbel(0,1) + gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) + y_soft = gumbels.softmax(dim) + + if hard: + # Straight through. + index = y_soft.max(dim, keepdim=True)[1] + y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0) + ret = y_hard - y_soft.detach() + y_soft + else: + # Reparametrization trick. + ret = y_soft + return ret
    + + +
    [docs]def log_softmax(input, dim=None, _stacklevel=3, dtype=None): + # type: (Tensor, Optional[int], int, Optional[int]) -> Tensor + r"""Applies a softmax followed by a logarithm. + + While mathematically equivalent to log(softmax(x)), doing these two + operations separately is slower, and numerically unstable. This function + uses an alternative formulation to compute the output and gradient correctly. + + See :class:`~torch.nn.LogSoftmax` for more details. + + Arguments: + input (Tensor): input + dim (int): A dimension along which log_softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + """ + if dim is None: + dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel) + if dtype is None: + ret = input.log_softmax(dim) + else: + ret = input.log_softmax(dim, dtype=dtype) + return ret
    + + +softshrink = _add_docstr(torch._C._nn.softshrink, r""" +softshrink(input, lambd=0.5) -> Tensor + +Applies the soft shrinkage function elementwise + +See :class:`~torch.nn.Softshrink` for more details. +""") + + +
    [docs]def tanh(input): + r"""tanh(input) -> Tensor + + Applies element-wise, + :math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}` + + See :class:`~torch.nn.Tanh` for more details. + """ + warnings.warn("nn.functional.tanh is deprecated. Use torch.tanh instead.") + return input.tanh()
    + + +
    [docs]def sigmoid(input): + r"""sigmoid(input) -> Tensor + + Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}` + + See :class:`~torch.nn.Sigmoid` for more details. + """ + warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.") + return input.sigmoid()
    + + +
    [docs]def linear(input, weight, bias=None): + # type: (Tensor, Tensor, Optional[Tensor]) -> Tensor + r""" + Applies a linear transformation to the incoming data: :math:`y = xA^T + b`. + + Shape: + + - Input: :math:`(N, *, in\_features)` where `*` means any number of + additional dimensions + - Weight: :math:`(out\_features, in\_features)` + - Bias: :math:`(out\_features)` + - Output: :math:`(N, *, out\_features)` + """ + if input.dim() == 2 and bias is not None: + # fused op is marginally faster + ret = torch.addmm(bias, input, weight.t()) + else: + output = input.matmul(weight.t()) + if bias is not None: + output += bias + ret = output + return ret
    + + +
    [docs]def bilinear(input1, input2, weight, bias=None): + # type: (Tensor, Tensor, Tensor, Optional[Tensor]) -> Tensor + return torch.bilinear(input1, input2, weight, bias)
    + + +def _no_grad_embedding_renorm_(weight, input, max_norm, norm_type): + # type: (Tensor, Tensor, float, float) -> Tensor + with torch.no_grad(): + torch.embedding_renorm_(weight, input, max_norm, norm_type) + + +
    [docs]def embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2., + scale_grad_by_freq=False, sparse=False): + # type: (Tensor, Tensor, Optional[int], Optional[float], float, bool, bool) -> Tensor + r"""A simple lookup table that looks up embeddings in a fixed dictionary and size. + + This module is often used to retrieve word embeddings using indices. + The input to the module is a list of indices, and the embedding matrix, + and the output is the corresponding word embeddings. + + See :class:`torch.nn.Embedding` for more details. + + Args: + input (LongTensor): Tensor containing indices into the embedding matrix + weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1, + and number of columns equal to the embedding size + padding_idx (int, optional): If given, pads the output with the embedding vector at :attr:`padding_idx` + (initialized to zeros) whenever it encounters the index. + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + Note: this will modify :attr:`weight` in-place. + norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. + scale_grad_by_freq (boolean, optional): If given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under + :class:`torch.nn.Embedding` for more details regarding sparse gradients. + + Shape: + - Input: LongTensor of arbitrary shape containing the indices to extract + - Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`, + where V = maximum index + 1 and embedding_dim = the embedding size + - Output: `(*, embedding_dim)`, where `*` is the input shape + + Examples:: + + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.tensor([[1,2,4,5],[4,3,2,9]]) + >>> # an embedding matrix containing 10 tensors of size 3 + >>> embedding_matrix = torch.rand(10, 3) + >>> F.embedding(input, embedding_matrix) + tensor([[[ 0.8490, 0.9625, 0.6753], + [ 0.9666, 0.7761, 0.6108], + [ 0.6246, 0.9751, 0.3618], + [ 0.4161, 0.2419, 0.7383]], + + [[ 0.6246, 0.9751, 0.3618], + [ 0.0237, 0.7794, 0.0528], + [ 0.9666, 0.7761, 0.6108], + [ 0.3385, 0.8612, 0.1867]]]) + + >>> # example with padding_idx + >>> weights = torch.rand(10, 3) + >>> weights[0, :].zero_() + >>> embedding_matrix = weights + >>> input = torch.tensor([[0,2,0,5]]) + >>> F.embedding(input, embedding_matrix, padding_idx=0) + tensor([[[ 0.0000, 0.0000, 0.0000], + [ 0.5609, 0.5384, 0.8720], + [ 0.0000, 0.0000, 0.0000], + [ 0.6262, 0.2438, 0.7471]]]) + """ + if padding_idx is not None: + if padding_idx > 0: + assert padding_idx < weight.size(0), 'Padding_idx must be within num_embeddings' + elif padding_idx < 0: + assert padding_idx >= -weight.size(0), 'Padding_idx must be within num_embeddings' + padding_idx = weight.size(0) + padding_idx + else: + padding_idx = -1 + if max_norm is not None: + # `embedding_renorm_` will call .contiguous() on input anyways, so we + # call it here and take advantage of the improved locality in the + # `embedding` call below too. + input = input.contiguous() + # XXX: equivalent to + # with torch.no_grad(): + # torch.nembedding_renorm_ + # remove once script supports set_grad_enabled + _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) + return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
    + + +
    [docs]def embedding_bag(input, weight, offsets=None, max_norm=None, norm_type=2, + scale_grad_by_freq=False, mode='mean', sparse=False, + per_sample_weights=None): + # type: (Tensor, Tensor, Optional[Tensor], Optional[float], float, bool, str, bool, Optional[Tensor]) -> Tensor + r"""Computes sums, means or maxes of `bags` of embeddings, without instantiating the + intermediate embeddings. + + See :class:`torch.nn.EmbeddingBag` for more details. + + .. include:: cuda_deterministic_backward.rst + + Args: + input (LongTensor): Tensor containing bags of indices into the embedding matrix + weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1, + and number of columns equal to the embedding size + offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines + the starting index position of each bag (sequence) in :attr:`input`. + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + Note: this will modify :attr:`weight` in-place. + norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option. + Default ``2``. + scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + Note: this option is not supported when ``mode="max"``. + mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. + Default: ``"mean"`` + sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under + :class:`torch.nn.Embedding` for more details regarding sparse gradients. + Note: this option is not supported when ``mode="max"``. + per_sample_weights (Tensor, optional): a tensor of float / double weights, or None + to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights` + must have exactly the same shape as input and is treated as having the same + :attr:`offsets`, if those are not None. + + + Shape: + + - :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional) + + - If :attr:`input` is 2D of shape `(B, N)`, + + it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and + this will return ``B`` values aggregated in a way depending on the :attr:`mode`. + :attr:`offsets` is ignored and required to be ``None`` in this case. + + - If :attr:`input` is 1D of shape `(N)`, + + it will be treated as a concatenation of multiple bags (sequences). + :attr:`offsets` is required to be a 1D tensor containing the + starting index positions of each bag in :attr:`input`. Therefore, + for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as + having ``B`` bags. Empty bags (i.e., having 0-length) will have + returned vectors filled by zeros. + + - :attr:`weight` (Tensor): the learnable weights of the module of + shape `(num_embeddings, embedding_dim)` + + - :attr:`per_sample_weights` (Tensor, optional). Has the same shape as + :attr:`input`. + + - :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)` + + Examples:: + + >>> # an Embedding module containing 10 tensors of size 3 + >>> embedding_matrix = torch.rand(10, 3) + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.tensor([1,2,4,5,4,3,2,9]) + >>> offsets = torch.tensor([0,4]) + >>> F.embedding_bag(embedding_matrix, input, offsets) + tensor([[ 0.3397, 0.3552, 0.5545], + [ 0.5893, 0.4386, 0.5882]]) + """ + # Check for backward compatibility. + # Used to be embedding_bag(weight, input, ...) + # Now is embedding_bag(input, weight, ...) + if weight.dtype == torch.long and input.is_floating_point(): + warnings.warn("Argument order of nn.functional.embedding_bag was changed. " + "Usage `embedding_bag(weight, input, ...)` is deprecated, " + "and should now be `embedding_bag(input, weight, ...)`.") + weight, input = input, weight + + if per_sample_weights is not None and input.size() != per_sample_weights.size(): + raise ValueError("embedding_bag: If per_sample_weights ({}) is not None, " + "then it must have the same shape as the input ({})" + .format(per_sample_weights.shape, input.shape)) + + if input.dim() == 2: + if offsets is not None: + raise ValueError("if input is 2D, then offsets has to be None" + ", as input is treated is a mini-batch of" + " fixed length sequences. However, found " + "offsets of type {}".format(type(offsets))) + offsets = torch.arange(0, input.numel(), input.size(1), + dtype=torch.long, device=input.device) + + input = input.reshape(-1) + if per_sample_weights is not None: + per_sample_weights = per_sample_weights.reshape(-1) + elif input.dim() == 1: + if offsets is None: + raise ValueError("offsets has to be a 1D Tensor but got None") + offsets = torch.jit._unwrap_optional(offsets) + if offsets.dim() != 1: + raise ValueError("offsets has to be a 1D Tensor") + if int(offsets[0]) != 0: + raise ValueError("offsets[0] has to be 0, i.e., the first sequence " + "in the mini-batch has to start from position 0. " + "However, got {}".format(offsets[0].item())) + if int(offsets[-1]) > input.size(0): + raise ValueError("offsets[-1] can not be greater than input's length" + " ({}), but got offsets[-1] of {}" + .format(input.size(0), offsets[-1].item())) + else: + raise ValueError("input has to be 1D or 2D Tensor," + " but got Tensor of dimension {}".format(input.dim())) + offsets = torch.jit._unwrap_optional(offsets) # TODO remove when exception control flow logic + if mode == 'sum': + mode_enum = 0 + elif mode == 'mean': + mode_enum = 1 + elif mode == 'max': + mode_enum = 2 + + if scale_grad_by_freq: + raise ValueError("max mode does not support scaling the gradient by the frequency") + + if sparse: + raise ValueError("max mode does not support sparse weights") + + else: + mode_enum = -1 # TODO when exception control flow logic + raise ValueError("mode has to be one of sum, mean or max") + + if max_norm is not None: + # XXX: equivalent to + # with torch.no_grad(): + # torch.nembedding_renorm_ + # remove once script supports set_grad_enabled + _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) + + if per_sample_weights is not None and mode != 'sum': + raise NotImplementedError("embedding_bag: per_sample_weights was not None. " + "per_sample_weights is only supported for mode='sum' " + "(got mode='{}'). Please open a feature request on GitHub." + .format(mode)) + + ret, _, _, _ = torch.embedding_bag( + weight, + input, + offsets, + scale_grad_by_freq, + mode_enum, + sparse, + per_sample_weights) + return ret
    + + +
    [docs]def batch_norm(input, running_mean, running_var, weight=None, bias=None, + training=False, momentum=0.1, eps=1e-5): + # type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], bool, float, float) -> Tensor # noqa + r"""Applies Batch Normalization for each channel across a batch of data. + + See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`, + :class:`~torch.nn.BatchNorm3d` for details. + """ + if training: + size = input.size() + # XXX: JIT script does not support the reduce from functools, and mul op is a + # builtin, which cannot be used as a value to a func yet, so rewrite this size + # check to a simple equivalent for loop + # + # TODO: make use of reduce like below when JIT is ready with the missing features: + # from operator import mul + # from functools import reduce + # + # if reduce(mul, size[2:], size[0]) == 1 + size_prods = size[0] + for i in range(len(size) - 2): + size_prods *= size[i + 2] + if size_prods == 1: + raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size)) + + return torch.batch_norm( + input, weight, bias, running_mean, running_var, + training, momentum, eps, torch.backends.cudnn.enabled + )
    + + +
    [docs]def instance_norm(input, running_mean=None, running_var=None, weight=None, + bias=None, use_input_stats=True, momentum=0.1, eps=1e-5): + # type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], bool, float, float) -> Tensor # noqa + r"""Applies Instance Normalization for each channel in each data sample in a + batch. + + See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`, + :class:`~torch.nn.InstanceNorm3d` for details. + """ + return torch.instance_norm( + input, weight, bias, running_mean, running_var, + use_input_stats, momentum, eps, torch.backends.cudnn.enabled + )
    + + +
    [docs]def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5): + # type: (Tensor, List[int], Optional[Tensor], Optional[Tensor], float) -> Tensor + r"""Applies Layer Normalization for last certain number of dimensions. + + See :class:`~torch.nn.LayerNorm` for details. + """ + return torch.layer_norm(input, normalized_shape, weight, bias, eps, + torch.backends.cudnn.enabled)
    + + +def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5): + # type: (Tensor, int, Optional[Tensor], Optional[Tensor], float) -> Tensor + r"""Applies Group Normalization for last certain number of dimensions. + + See :class:`~torch.nn.GroupNorm` for details. + """ + return torch.group_norm(input, num_groups, weight, bias, eps, + torch.backends.cudnn.enabled) + + +
    [docs]def local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1.): + # type: (Tensor, int, float, float, float) -> Tensor + r"""Applies local response normalization over an input signal composed of + several input planes, where channels occupy the second dimension. + Applies normalization across channels. + + See :class:`~torch.nn.LocalResponseNorm` for details. + """ + dim = input.dim() + if dim < 3: + raise ValueError('Expected 3D or higher dimensionality \ + input (got {} dimensions)'.format(dim)) + div = input.mul(input).unsqueeze(1) + if dim == 3: + div = pad(div, (0, 0, size // 2, (size - 1) // 2)) + div = avg_pool2d(div, (size, 1), stride=1).squeeze(1) + else: + sizes = input.size() + div = div.view(sizes[0], 1, sizes[1], sizes[2], -1) + div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2)) + div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1) + div = div.view(sizes) + div = div.mul(alpha).add(k).pow(beta) + return input / div
    + + +# loss + +
    [docs]def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank=0, + reduction='mean', zero_infinity=False): + # type: (Tensor, Tensor, Tensor, Tensor, int, str, bool) -> Tensor + r"""The Connectionist Temporal Classification loss. + + See :class:`~torch.nn.CTCLoss` for details. + + .. include:: cudnn_deterministic.rst + .. include:: cuda_deterministic_backward.rst + + Args: + log_probs: :math:`(T, N, C)` where `C = number of characters in alphabet including blank`, + `T = input length`, and `N = batch size`. + The logarithmized probabilities of the outputs + (e.g. obtained with :func:`torch.nn.functional.log_softmax`). + targets: :math:`(N, S)` or `(sum(target_lengths))`. + Targets cannot be blank. In the second form, the targets are assumed to be concatenated. + input_lengths: :math:`(N)`. + Lengths of the inputs (must each be :math:`\leq T`) + target_lengths: :math:`(N)`. + Lengths of the targets + blank (int, optional): + Blank label. Default :math:`0`. + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the output losses will be divided by the target lengths and + then the mean over the batch is taken, ``'sum'``: the output will be + summed. Default: ``'mean'`` + zero_infinity (bool, optional): + Whether to zero infinite losses and the associated gradients. + Default: ``False`` + Infinite losses mainly occur when the inputs are too short + to be aligned to the targets. + + Example:: + + >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_() + >>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long) + >>> input_lengths = torch.full((16,), 50, dtype=torch.long) + >>> target_lengths = torch.randint(10,30,(16,), dtype=torch.long) + >>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths) + >>> loss.backward() + """ + return torch.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction), + zero_infinity)
    + + +
    [docs]def nll_loss(input, target, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[Tensor], Optional[bool], int, Optional[bool], str) -> Tensor + r"""The negative log likelihood loss. + + See :class:`~torch.nn.NLLLoss` for details. + + Args: + input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)` + in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1` + in the case of K-dimensional loss. + target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, + or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for + K-dimensional loss. + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, has to be a Tensor of size `C` + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When :attr:`size_average` is + ``True``, the loss is averaged over non-ignored targets. Default: -100 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Example:: + + >>> # input is of size N x C = 3 x 5 + >>> input = torch.randn(3, 5, requires_grad=True) + >>> # each element in target has to have 0 <= value < C + >>> target = torch.tensor([1, 0, 4]) + >>> output = F.nll_loss(F.log_softmax(input), target) + >>> output.backward() + """ + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + dim = input.dim() + if dim < 2: + raise ValueError('Expected 2 or more dimensions (got {})'.format(dim)) + + if input.size(0) != target.size(0): + raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).' + .format(input.size(0), target.size(0))) + if dim == 2: + ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index) + elif dim == 4: + ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index) + else: + # dim == 3 or dim > 4 + n = input.size(0) + c = input.size(1) + out_size = (n,) + input.size()[2:] + if target.size()[1:] != input.size()[2:]: + raise ValueError('Expected target size {}, got {}'.format( + out_size, target.size())) + input = input.contiguous().view(n, c, 1, -1) + target = target.contiguous().view(n, 1, -1) + reduction_enum = _Reduction.get_enum(reduction) + if reduction != 'none': + ret = torch._C._nn.nll_loss2d( + input, target, weight, reduction_enum, ignore_index) + else: + out = torch._C._nn.nll_loss2d( + input, target, weight, reduction_enum, ignore_index) + ret = out.view(out_size) + return ret
    + + +
    [docs]def poisson_nll_loss(input, target, log_input=True, full=False, size_average=None, eps=1e-8, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, bool, bool, Optional[bool], float, Optional[bool], str) -> Tensor + r"""Poisson negative log likelihood loss. + + See :class:`~torch.nn.PoissonNLLLoss` for details. + + Args: + input: expectation of underlying Poisson distribution. + target: random sample :math:`target \sim \text{Poisson}(input)`. + log_input: if ``True`` the loss is computed as + :math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is + :math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True`` + full: whether to compute full loss, i. e. to add the Stirling + approximation term. Default: ``False`` + :math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when + :attr:`log_input`=``False``. Default: 1e-8 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + """ + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + if reduction != 'none' and reduction != 'mean' and reduction != 'sum': + ret = input + raise ValueError(reduction + " is not valid") + + ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction)) + return ret
    + + +
    [docs]def kl_div(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""The `Kullback-Leibler divergence`_ Loss. + + See :class:`~torch.nn.KLDivLoss` for details. + + Args: + input: Tensor of arbitrary shape + target: Tensor of the same shape as input + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. + ``'none'``: no reduction will be applied + ``'batchmean'``: the sum of the output will be divided by the batchsize + ``'sum'``: the output will be summed + ``'mean'``: the output will be divided by the number of elements in the output + Default: ``'mean'`` + + .. note:: + :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, + and in the meantime, specifying either of those two args will override :attr:`reduction`. + + .. note:: + :attr:``reduction`` = ``'mean'`` doesn't return the true kl divergence value, please use + :attr:``reduction`` = ``'batchmean'`` which aligns with KL math definition. + In the next major release, ``'mean'`` will be changed to be the same as 'batchmean'. + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + if reduction == 'mean': + warnings.warn("reduction: 'mean' divides the total loss by both the batch size and the support size." + "'batchmean' divides only by the batch size, and aligns with the KL div math definition." + "'mean' will be changed to behave the same as 'batchmean' in the next major release.") + + # special case for batchmean + if reduction == 'batchmean': + reduction_enum = _Reduction.get_enum('sum') + else: + reduction_enum = _Reduction.get_enum(reduction) + + reduced = torch.kl_div(input, target, reduction_enum) + + if reduction == 'batchmean' and input.dim() != 0: + reduced = reduced / input.size()[0] + + return reduced
    + + +
    [docs]def cross_entropy(input, target, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[Tensor], Optional[bool], int, Optional[bool], str) -> Tensor + r"""This criterion combines `log_softmax` and `nll_loss` in a single + function. + + See :class:`~torch.nn.CrossEntropyLoss` for details. + + Args: + input (Tensor) : :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)` + in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1` + in the case of K-dimensional loss. + target (Tensor) : :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, + or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for + K-dimensional loss. + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, has to be a Tensor of size `C` + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When :attr:`size_average` is + ``True``, the loss is averaged over non-ignored targets. Default: -100 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Examples:: + + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.randint(5, (3,), dtype=torch.int64) + >>> loss = F.cross_entropy(input, target) + >>> loss.backward() + """ + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
    + + +
    [docs]def binary_cross_entropy(input, target, weight=None, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor + r"""Function that measures the Binary Cross Entropy + between the target and the output. + + See :class:`~torch.nn.BCELoss` for details. + + Args: + input: Tensor of arbitrary shape + target: Tensor of the same shape as input + weight (Tensor, optional): a manual rescaling weight + if provided it's repeated to match input tensor shape + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Examples:: + + >>> input = torch.randn((3, 2), requires_grad=True) + >>> target = torch.rand((3, 2), requires_grad=False) + >>> loss = F.binary_cross_entropy(F.sigmoid(input), target) + >>> loss.backward() + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if target.size() != input.size(): + warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. " + "Please ensure they have the same size.".format(target.size(), input.size()), + stacklevel=2) + if input.numel() != target.numel(): + raise ValueError("Target and input must have the same number of elements. target nelement ({}) " + "!= input nelement ({})".format(target.numel(), input.numel())) + + if weight is not None: + new_size = _infer_size(target.size(), weight.size()) + weight = weight.expand(new_size) + + return torch._C._nn.binary_cross_entropy( + input, target, weight, reduction_enum)
    + + +
    [docs]def binary_cross_entropy_with_logits(input, target, weight=None, size_average=None, + reduce=None, reduction='mean', pos_weight=None): + # type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str, Optional[Tensor]) -> Tensor + r"""Function that measures Binary Cross Entropy between target and output + logits. + + See :class:`~torch.nn.BCEWithLogitsLoss` for details. + + Args: + input: Tensor of arbitrary shape + target: Tensor of the same shape as input + weight (Tensor, optional): a manual rescaling weight + if provided it's repeated to match input tensor shape + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + pos_weight (Tensor, optional): a weight of positive examples. + Must be a vector with length equal to the number of classes. + + Examples:: + + >>> input = torch.randn(3, requires_grad=True) + >>> target = torch.empty(3).random_(2) + >>> loss = F.binary_cross_entropy_with_logits(input, target) + >>> loss.backward() + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + + if not (target.size() == input.size()): + raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) + + return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
    + + +def _pointwise_loss(lambd, lambd_optimized, input, target, reduction='mean'): + if target.requires_grad: + d = lambd(input, target) + if reduction == 'none': + return d + return torch.mean(d) if reduction == 'mean' else torch.sum(d) + else: + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + return lambd_optimized(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + + +def _smooth_l1_loss(input, target): + # type: (Tensor, Tensor) -> Tensor + t = torch.abs(input - target) + return torch.where(t < 1, 0.5 * t ** 2, t - 0.5) + + +
    [docs]def smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""Function that uses a squared term if the absolute + element-wise error falls below 1 and an L1 term otherwise. + + See :class:`~torch.nn.SmoothL1Loss` for details. + """ + if not (target.size() == input.size()): + warnings.warn("Using a target size ({}) that is different to the input size ({}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.".format(target.size(), input.size()), + stacklevel=2) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + if target.requires_grad: + ret = _smooth_l1_loss(input, target) + if reduction != 'none': + ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) + else: + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + ret = torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + return ret
    + + +
    [docs]def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + Function that takes the mean element-wise absolute value difference. + + See :class:`~torch.nn.L1Loss` for details. + """ + if not (target.size() == input.size()): + warnings.warn("Using a target size ({}) that is different to the input size ({}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.".format(target.size(), input.size()), + stacklevel=2) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + if target.requires_grad: + ret = torch.abs(input - target) + if reduction != 'none': + ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) + else: + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + ret = torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + return ret
    + + +
    [docs]def mse_loss(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + Measures the element-wise mean squared error. + + See :class:`~torch.nn.MSELoss` for details. + """ + if not (target.size() == input.size()): + warnings.warn("Using a target size ({}) that is different to the input size ({}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.".format(target.size(), input.size()), + stacklevel=2) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + if target.requires_grad: + ret = (input - target) ** 2 + if reduction != 'none': + ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret) + else: + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + ret = torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + return ret
    + + +
    [docs]def margin_ranking_loss(input1, input2, target, margin=0, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor + r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MarginRankingLoss` for details. + """ # noqa + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if input1.dim() == 0 or input2.dim() == 0 or target.dim() == 0: + raise RuntimeError(("margin_ranking_loss does not support scalars, got sizes: " + "input1: {}, input2: {}, target: {} ".format(input1.size(), input2.size(), target.size()))) + return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
    + + +
    [docs]def hinge_embedding_loss(input, target, margin=1.0, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor + r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.HingeEmbeddingLoss` for details. + """ # noqa + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
    + + +
    [docs]def multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MultiLabelMarginLoss` for details. + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
    + + +
    [docs]def soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor + r"""soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.SoftMarginLoss` for details. + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
    + + +
    [docs]def multilabel_soft_margin_loss(input, target, weight=None, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor + r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=None) -> Tensor + + See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details. + """ + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + + loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input)) + + if weight is not None: + loss = loss * weight + + loss = loss.sum(dim=1) / input.size(1) # only return N loss values + + if reduction == 'none': + ret = loss + elif reduction == 'mean': + ret = loss.mean() + elif reduction == 'sum': + ret = loss.sum() + else: + ret = input + raise ValueError(reduction + " is not valid") + return ret
    + + +
    [docs]def cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor + r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.CosineEmbeddingLoss` for details. + """ # noqa + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
    + + +
    [docs]def multi_margin_loss(input, target, p=1, margin=1., weight=None, size_average=None, + reduce=None, reduction='mean'): + # type: (Tensor, Tensor, int, float, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor + r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, + reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MultiMarginLoss` for details. + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if p != 1 and p != 2: + raise ValueError('only p == 1 and p == 2 supported') + if weight is not None: + if weight.dim() != 1: + raise ValueError('weight must be one-dimensional') + + return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)
    + + +pixel_shuffle = _add_docstr(torch.pixel_shuffle, r""" +Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a +tensor of shape :math:`(*, C, H \times r, W \times r)`. + +See :class:`~torch.nn.PixelShuffle` for details. + +Args: + input (Tensor): the input tensor + upscale_factor (int): factor to increase spatial resolution by + +Examples:: + + >>> input = torch.randn(1, 9, 4, 4) + >>> output = torch.nn.functional.pixel_shuffle(input, 3) + >>> print(output.size()) + torch.Size([1, 1, 12, 12]) +""") + + +
    [docs]def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None): + r"""Upsamples the input to either the given :attr:`size` or the given + :attr:`scale_factor` + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with ``nn.functional.interpolate(...)``. + + .. include:: cuda_deterministic_backward.rst + + The algorithm used for upsampling is determined by :attr:`mode`. + + Currently temporal, spatial and volumetric upsampling are supported, i.e. + expected inputs are 3-D, 4-D or 5-D in shape. + + The input dimensions are interpreted in the form: + `mini-batch x channels x [optional depth] x [optional height] x width`. + + The modes available for upsampling are: `nearest`, `linear` (3D-only), + `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only) + + Args: + input (Tensor): the input tensor + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): + output spatial size. + scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer. + mode (string): algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'`` + align_corners (bool, optional): Geometrically, we consider the pixels of the + input and output as squares rather than points. + If set to ``False``, the input and output tensors are aligned by the + center points of their corner pixels. If set to ``True``, the input and + output tensors are aligned by the corner points of their corner + pixels, and the interpolation uses edge value padding for out-of-boundary values. + This only has effect when :attr:`mode` is ``'linear'``, + ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``. + Default: ``False`` + + .. note:: + With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce + negative values or values greater than 255 for images. + Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot + when displaying the image. + + .. warning:: + With ``align_corners = True``, the linearly interpolating modes + (`linear`, `bilinear`, and `trilinear`) don't proportionally align the + output and input pixels, and thus the output values can depend on the + input size. This was the default behavior for these modes up to version + 0.3.1. Since then, the default behavior is ``align_corners = False``. + See :class:`~torch.nn.Upsample` for concrete examples on how this + affects the outputs. + + """ + warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode, align_corners)
    + + +
    [docs]def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None): + r"""Down/up samples the input to either the given :attr:`size` or the given + :attr:`scale_factor` + + The algorithm used for interpolation is determined by :attr:`mode`. + + Currently temporal, spatial and volumetric sampling are supported, i.e. + expected inputs are 3-D, 4-D or 5-D in shape. + + The input dimensions are interpreted in the form: + `mini-batch x channels x [optional depth] x [optional height] x width`. + + The modes available for resizing are: `nearest`, `linear` (3D-only), + `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area` + + Args: + input (Tensor): the input tensor + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): + output spatial size. + scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple. + mode (str): algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'`` | ``'area'``. Default: ``'nearest'`` + align_corners (bool, optional): Geometrically, we consider the pixels of the + input and output as squares rather than points. + If set to ``False``, the input and output tensors are aligned by the + center points of their corner pixels. If set to ``True``, the input and + output tensors are aligned by the corner points of their corner + pixels, and the interpolation uses edge value padding for out-of-boundary values. + This only has effect when :attr:`mode` is ``'linear'``, + ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``. + Default: ``False`` + + .. note:: + With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce + negative values or values greater than 255 for images. + Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot + when displaying the image. + + .. warning:: + With ``align_corners = True``, the linearly interpolating modes + (`linear`, `bilinear`, and `trilinear`) don't proportionally align the + output and input pixels, and thus the output values can depend on the + input size. This was the default behavior for these modes up to version + 0.3.1. Since then, the default behavior is ``align_corners = False``. + See :class:`~torch.nn.Upsample` for concrete examples on how this + affects the outputs. + + .. include:: cuda_deterministic_backward.rst + """ + from .modules.utils import _ntuple + + def _check_size_scale_factor(dim): + if size is None and scale_factor is None: + raise ValueError('either size or scale_factor should be defined') + if size is not None and scale_factor is not None: + raise ValueError('only one of size or scale_factor should be defined') + if scale_factor is not None and isinstance(scale_factor, tuple)\ + and len(scale_factor) != dim: + raise ValueError('scale_factor shape must match input shape. ' + 'Input is {}D, scale_factor size is {}'.format(dim, len(scale_factor))) + + def _output_size(dim): + _check_size_scale_factor(dim) + if size is not None: + return size + scale_factors = _ntuple(dim)(scale_factor) + # math.floor might return float in py2.7 + + # make scale_factor a tensor in tracing so constant doesn't get baked in + if torch._C._get_tracing_state(): + return [(torch.floor((input.size(i + 2) * torch.tensor(float(scale_factors[i]))).float())) for i in range(dim)] + else: + return [int(math.floor(float(input.size(i + 2)) * scale_factors[i])) for i in range(dim)] + + if mode in ('nearest', 'area'): + if align_corners is not None: + raise ValueError("align_corners option can only be set with the " + "interpolating modes: linear | bilinear | bicubic | trilinear") + else: + if align_corners is None: + warnings.warn("Default upsampling behavior when mode={} is changed " + "to align_corners=False since 0.4.0. Please specify " + "align_corners=True if the old behavior is desired. " + "See the documentation of nn.Upsample for details.".format(mode)) + align_corners = False + + if input.dim() == 3 and mode == 'nearest': + return torch._C._nn.upsample_nearest1d(input, _output_size(1)) + elif input.dim() == 4 and mode == 'nearest': + return torch._C._nn.upsample_nearest2d(input, _output_size(2)) + elif input.dim() == 5 and mode == 'nearest': + return torch._C._nn.upsample_nearest3d(input, _output_size(3)) + elif input.dim() == 3 and mode == 'area': + return adaptive_avg_pool1d(input, _output_size(1)) + elif input.dim() == 4 and mode == 'area': + return adaptive_avg_pool2d(input, _output_size(2)) + elif input.dim() == 5 and mode == 'area': + return adaptive_avg_pool3d(input, _output_size(3)) + elif input.dim() == 3 and mode == 'linear': + return torch._C._nn.upsample_linear1d(input, _output_size(1), align_corners) + elif input.dim() == 3 and mode == 'bilinear': + raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input") + elif input.dim() == 3 and mode == 'trilinear': + raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input") + elif input.dim() == 4 and mode == 'linear': + raise NotImplementedError("Got 4D input, but linear mode needs 3D input") + elif input.dim() == 4 and mode == 'bilinear': + return torch._C._nn.upsample_bilinear2d(input, _output_size(2), align_corners) + elif input.dim() == 4 and mode == 'trilinear': + raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input") + elif input.dim() == 5 and mode == 'linear': + raise NotImplementedError("Got 5D input, but linear mode needs 3D input") + elif input.dim() == 5 and mode == 'bilinear': + raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input") + elif input.dim() == 5 and mode == 'trilinear': + return torch._C._nn.upsample_trilinear3d(input, _output_size(3), align_corners) + elif input.dim() == 4 and mode == 'bicubic': + return torch._C._nn.upsample_bicubic2d(input, _output_size(2), align_corners) + else: + raise NotImplementedError("Input Error: Only 3D, 4D and 5D input Tensors supported" + " (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear" + " (got {})".format(input.dim(), mode))
    + + +
    [docs]def upsample_nearest(input, size=None, scale_factor=None): + r"""Upsamples the input, using nearest neighbours' pixel values. + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``. + + Currently spatial and volumetric upsampling are supported (i.e. expected + inputs are 4 or 5 dimensional). + + Args: + input (Tensor): input + size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia + size. + scale_factor (int): multiplier for spatial size. Has to be an integer. + + .. include:: cuda_deterministic_backward.rst + """ + # DeprecationWarning is ignored by default + warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode='nearest')
    + + +
    [docs]def upsample_bilinear(input, size=None, scale_factor=None): + r"""Upsamples the input, using bilinear upsampling. + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with + ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``. + + Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo + volumetric (5 dimensional) inputs. + + Args: + input (Tensor): input + size (int or Tuple[int, int]): output spatial size. + scale_factor (int or Tuple[int, int]): multiplier for spatial size + + .. include:: cuda_deterministic_backward.rst + """ + # DeprecationWarning is ignored by default + warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
    + + +GRID_SAMPLE_INTERPOLATION_MODES = { + 'bilinear': 0, + 'nearest': 1, +} + +GRID_SAMPLE_PADDING_MODES = { + 'zeros': 0, + 'border': 1, + 'reflection': 2, +} + + +
    [docs]def grid_sample(input, grid, mode='bilinear', padding_mode='zeros'): + # type: (Tensor, Tensor, str, str) -> Tensor + r"""Given an :attr:`input` and a flow-field :attr:`grid`, computes the + ``output`` using :attr:`input` values and pixel locations from :attr:`grid`. + + Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are + supported. + + In the spatial (4-D) case, for :attr:`input` with shape + :math:`(N, C, H_\text{in}, W_\text{in})` and :attr:`grid` with shape + :math:`(N, H_\text{out}, W_\text{out}, 2)`, the output will have shape + :math:`(N, C, H_\text{out}, W_\text{out})`. + + For each output location ``output[n, :, h, w]``, the size-2 vector + ``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``, + which are used to interpolate the output value ``output[n, :, h, w]``. + In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the + ``x``, ``y``, ``z`` pixel locations for interpolating + ``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or + ``bilinear`` interpolation method to sample the input pixels. + + :attr:`grid` specifies the sampling pixel locations normalized by the + :attr:`input` spatial dimensions. Therefore, it should have most values in + the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the + left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the + right-bottom pixel of :attr:`input`. + + If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding + outputs are handled as defined by :attr:`padding_mode`. Options are + + * ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations, + * ``padding_mode="border"``: use border values for out-of-bound grid locations, + * ``padding_mode="reflection"``: use values at locations reflected by + the border for out-of-bound grid locations. For location far away + from the border, it will keep being reflected until becoming in bound, + e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1`` + and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes + ``x'' = -0.5``. + + .. Note:: This function is often used in building `Spatial Transformer Networks`_ . + .. include:: cuda_deterministic_backward.rst + + Args: + input (Tensor): input of shape :math:`(N, C, H_\text{in}, W_\text{in})` (4-D case) + or :math:`(N, C, D_\text{in}, H_\text{in}, W_\text{in})` (5-D case) + grid (Tensor): flow-field of shape :math:`(N, H_\text{out}, W_\text{out}, 2)` (4-D case) + or :math:`(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)` (5-D case) + mode (str): interpolation mode to calculate output values + ``'bilinear'`` | ``'nearest'``. Default: ``'bilinear'`` + padding_mode (str): padding mode for outside grid values + ``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'`` + + Returns: + output (Tensor): output Tensor + + .. _`Spatial Transformer Networks`: + https://arxiv.org/abs/1506.02025 + """ + if mode != 'bilinear' and mode != 'nearest': + raise ValueError("nn.functional.grid_sample(): expected mode to be " + "'bilinear' or 'nearest', but got: '{}'".format(mode)) + if padding_mode != 'zeros' and padding_mode != 'border' and padding_mode != 'reflection': + raise ValueError("nn.functional.grid_sample(): expected padding_mode " + "to be 'zeros', 'border', or 'reflection', " + "but got: '{}'".format(padding_mode)) + + if mode == 'bilinear': + mode_enum = 0 + else: + mode_enum = 1 + + if padding_mode == 'zeros': + padding_mode_enum = 0 + elif padding_mode == 'border': + padding_mode_enum = 1 + else: + padding_mode_enum = 2 + + return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum)
    + + +
    [docs]def affine_grid(theta, size): + # type: (Tensor, List[int]) -> Tensor + r"""Generates a 2d flow field, given a batch of affine matrices :attr:`theta`. + Generally used in conjunction with :func:`grid_sample` to + implement Spatial Transformer Networks. + + Args: + theta (Tensor): input batch of affine matrices (:math:`N \times 2 \times 3`) + size (torch.Size): the target output image size (:math:`N \times C \times H \times W`). + Example: torch.Size((32, 3, 24, 24)) + + Returns: + output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`) + """ + return vision.affine_grid_generator(theta, size)
    + + +
    [docs]def pad(input, pad, mode='constant', value=0): + # type: (Tensor, List[int], str, float) -> Tensor + r"""Pads tensor. + + Padding size: + The padding size by which to pad some dimensions of :attr:`input` + are described starting from the last dimension and moving forward. + :math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions + of ``input`` will be padded. + For example, to pad only the last dimension of the input tensor, then + :attr:`pad` has the form + :math:`(\text{padding\_left}, \text{padding\_right})`; + to pad the last 2 dimensions of the input tensor, then use + :math:`(\text{padding\_left}, \text{padding\_right},` + :math:`\text{padding\_top}, \text{padding\_bottom})`; + to pad the last 3 dimensions, use + :math:`(\text{padding\_left}, \text{padding\_right},` + :math:`\text{padding\_top}, \text{padding\_bottom}` + :math:`\text{padding\_front}, \text{padding\_back})`. + + Padding mode: + See :class:`torch.nn.ConstantPad2d`, :class:`torch.nn.ReflectionPad2d`, and + :class:`torch.nn.ReplicationPad2d` for concrete examples on how each of the + padding modes works. Constant padding is implemented for arbitrary dimensions. + Replicate padding is implemented for padding the last 3 dimensions of 5D input + tensor, or the last 2 dimensions of 4D input tensor, or the last dimension of + 3D input tensor. Reflect padding is only implemented for padding the last 2 + dimensions of 4D input tensor, or the last dimension of 3D input tensor. + + .. include:: cuda_deterministic_backward.rst + + Args: + input (Tensor): N-dimensional tensor + pad (tuple): m-elements tuple, where + :math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even. + mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. + Default: ``'constant'`` + value: fill value for ``'constant'`` padding. Default: ``0`` + + Examples:: + + >>> t4d = torch.empty(3, 3, 4, 2) + >>> p1d = (1, 1) # pad last dim by 1 on each side + >>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding + >>> print(out.data.size()) + torch.Size([3, 3, 4, 4]) + >>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2) + >>> out = F.pad(t4d, p2d, "constant", 0) + >>> print(out.data.size()) + torch.Size([3, 3, 8, 4]) + >>> t4d = torch.empty(3, 3, 4, 2) + >>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3) + >>> out = F.pad(t4d, p3d, "constant", 0) + >>> print(out.data.size()) + torch.Size([3, 9, 7, 3]) + + """ + assert len(pad) % 2 == 0, 'Padding length must be divisible by 2' + assert len(pad) // 2 <= input.dim(), 'Padding length too large' + if mode == 'constant': + ret = _VF.constant_pad_nd(input, pad, value) + else: + assert value == 0, 'Padding mode "{}"" doesn\'t take in value argument'.format(mode) + if input.dim() == 3: + assert len(pad) == 2, '3D tensors expect 2 values for padding' + if mode == 'reflect': + ret = torch._C._nn.reflection_pad1d(input, pad) + elif mode == 'replicate': + ret = torch._C._nn.replication_pad1d(input, pad) + elif mode == 'circular': + ret = _pad_circular(input, pad) + else: + ret = input # TODO: remove this when jit raise supports control flow + raise NotImplementedError + + elif input.dim() == 4: + assert len(pad) == 4, '4D tensors expect 4 values for padding' + if mode == 'reflect': + ret = torch._C._nn.reflection_pad2d(input, pad) + elif mode == 'replicate': + ret = torch._C._nn.replication_pad2d(input, pad) + elif mode == 'circular': + ret = _pad_circular(input, pad) + else: + ret = input # TODO: remove this when jit raise supports control flow + raise NotImplementedError + + elif input.dim() == 5: + assert len(pad) == 6, '5D tensors expect 6 values for padding' + if mode == 'reflect': + ret = input # TODO: remove this when jit raise supports control flow + raise NotImplementedError + elif mode == 'replicate': + ret = torch._C._nn.replication_pad3d(input, pad) + elif mode == 'circular': + ret = _pad_circular(input, pad) + else: + ret = input # TODO: remove this when jit raise supports control flow + raise NotImplementedError + else: + ret = input # TODO: remove this when jit raise supports control flow + raise NotImplementedError("Only 3D, 4D, 5D padding with non-constant padding are supported for now") + + return ret
    + +# distance + + +
    [docs]def pairwise_distance(x1, x2, p=2., eps=1e-6, keepdim=False): + # type: (Tensor, Tensor, float, float, bool) -> Tensor + r""" + See :class:`torch.nn.PairwiseDistance` for details + """ + return torch.pairwise_distance(x1, x2, p, eps, keepdim)
    + + +pdist = _add_docstr(torch.pdist, r""" +pdist(input, p=2) -> Tensor + +Computes the p-norm distance between every pair of row vectors in the input. +This is identical to the upper triangular portion, excluding the diagonal, of +`torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster +if the rows are contiguous. + +If input has shape :math:`N \times M` then the output will have shape +:math:`\frac{1}{2} N (N - 1)`. + +This function is equivalent to `scipy.spatial.distance.pdist(input, +'minkowski', p=p)` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is +equivalent to `scipy.spatial.distance.pdist(input, 'hamming') * M`. +When :math:`p = \infty`, the closest scipy function is +`scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())`. + +Args: + input: input tensor of shape :math:`N \times M`. + p: p value for the p-norm distance to calculate between each vector pair + :math:`\in [0, \infty]`. +""") + + +cosine_similarity = _add_docstr(torch.cosine_similarity, r""" +cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor + +Returns cosine similarity between x1 and x2, computed along dim. + +.. math :: + \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)} + +Args: + x1 (Tensor): First input. + x2 (Tensor): Second input (of size matching x1). + dim (int, optional): Dimension of vectors. Default: 1 + eps (float, optional): Small value to avoid division by zero. + Default: 1e-8 + +Shape: + - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`. + - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`. + +Example:: + + >>> input1 = torch.randn(100, 128) + >>> input2 = torch.randn(100, 128) + >>> output = F.cosine_similarity(input1, input2) + >>> print(output) +""") + + +one_hot = _add_docstr(torch._C._nn.one_hot, r""" +one_hot(tensor, num_classes=-1) -> LongTensor + +Takes LongTensor with index values of shape ``(*)`` and returns a tensor +of shape ``(*, num_classes)`` that have zeros everywhere except where the +index of last dimension matches the corresponding value of the input tensor, +in which case it will be 1. + +See also `One-hot on Wikipedia`_ . + +.. _One-hot on Wikipedia: + https://en.wikipedia.org/wiki/One-hot + +Arguments: + tensor (LongTensor): class values of any shape. + num_classes (int): Total number of classes. If set to -1, the number + of classes will be inferred as one greater than the largest class + value in the input tensor. + +Returns: + LongTensor that has one more dimension with 1 values at the + index of last dimension indicated by the input, and 0 everywhere + else. + +Examples: + >>> F.one_hot(torch.arange(0, 5) % 3) + tensor([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + >>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5) + tensor([[1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0]]) + >>> F.one_hot(torch.arange(0, 6).view(3,2) % 3) + tensor([[[1, 0, 0], + [0, 1, 0]], + [[0, 0, 1], + [1, 0, 0]], + [[0, 1, 0], + [0, 0, 1]]]) +""") + + +
    [docs]def triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, size_average=None, + reduce=None, reduction="mean"): + # type: (Tensor, Tensor, Tensor, float, float, float, bool, Optional[bool], Optional[bool], str) -> Tensor + r""" + See :class:`~torch.nn.TripletMarginLoss` for details + """ + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, + swap, reduction_enum)
    + + +
    [docs]def normalize(input, p=2, dim=1, eps=1e-12, out=None): + # type: (Tensor, float, int, float, Optional[Tensor]) -> Tensor + r"""Performs :math:`L_p` normalization of inputs over specified dimension. + + For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each + :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as + + .. math:: + v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}. + + With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization. + + Args: + input: input tensor of any shape + p (float): the exponent value in the norm formulation. Default: 2 + dim (int): the dimension to reduce. Default: 1 + eps (float): small value to avoid division by zero. Default: 1e-12 + out (Tensor, optional): the output tensor. If :attr:`out` is used, this + operation won't be differentiable. + """ + if out is None: + denom = input.norm(p, dim, True).clamp_min(eps).expand_as(input) + ret = input / denom + else: + denom = input.norm(p, dim, True).clamp_min(eps).expand_as(input) + ret = torch.div(input, denom, out=out) + return ret
    + + +def assert_int_or_pair(arg, arg_name, message): + assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name) + + +
    [docs]def unfold(input, kernel_size, dilation=1, padding=0, stride=1): + # type: (Tensor, BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int]) -> Tensor # noqa + r"""Extracts sliding local blocks from an batched input tensor. + + .. warning:: + Currently, only 4-D input tensors (batched image-like tensors) are + supported. + + .. warning:: + + More than one element of the unfolded tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensor, please clone it first. + + + See :class:`torch.nn.Unfold` for details + """ + + if input.dim() == 4: + msg = '{} must be int or 2-tuple for 4D input' + assert_int_or_pair(kernel_size, 'kernel_size', msg) + assert_int_or_pair(dilation, 'dilation', msg) + assert_int_or_pair(padding, 'padding', msg) + assert_int_or_pair(stride, 'stride', msg) + + ret = torch._C._nn.im2col(input, _pair(kernel_size), + _pair(dilation), _pair(padding), _pair(stride)) + else: + raise NotImplementedError("Input Error: Only 4D input Tensors are supported (got {}D)".format(input.dim())) + ret = input # TODO: remove when jit supports exception control flow + return ret
    + + +
    [docs]def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1): + # type: (Tensor, BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int]) -> Tensor # noqa + r"""Combines an array of sliding local blocks into a large containing + tensor. + + .. warning:: + Currently, only 4-D output tensors (batched image-like tensors) are + supported. + + See :class:`torch.nn.Fold` for details + """ + if input.dim() == 3: + msg = '{} must be int or 2-tuple for 3D input' + assert_int_or_pair(output_size, 'output_size', msg) + assert_int_or_pair(kernel_size, 'kernel_size', msg) + assert_int_or_pair(dilation, 'dilation', msg) + assert_int_or_pair(padding, 'padding', msg) + assert_int_or_pair(stride, 'stride', msg) + + ret = torch._C._nn.col2im(input, _pair(output_size), _pair(kernel_size), + _pair(dilation), _pair(padding), _pair(stride)) + else: + raise NotImplementedError("Input Error: Only 3D input Tensors are supported (got {}D)".format(input.dim())) + ret = input # TODO: remove when jit supports exception control flow + return ret
    + + +def _pad_circular(input, padding): + # type: (Tensor, List[int]) -> Tensor + """ + Arguments + :param input: tensor of shape :math:`(N, C_{\text{in}}, H, [W, D]))` + :param padding: (tuple): m-elem tuple where m is the degree of convolution + Returns + :return: tensor of shape :math:`(N, C_{\text{in}}, [D + 2 * padding[0], + H + 2 * padding[1]], W + 2 * padding[2]))` + """ + + input = torch.cat([input, input[:, :, 0:padding[-1]]], dim=2) + input = torch.cat([input[:, :, -(padding[-1] + padding[-2]):-padding[-1]], input], dim=2) + + if len(padding) > 2: + input = torch.cat([input, input[:, :, :, 0:padding[-3]]], dim=3) + input = torch.cat([input[:, :, :, -(padding[-3] + padding[-4]):-padding[-3]], input], dim=3) + + if len(padding) > 4: + input = torch.cat([input, input[:, :, :, :, 0:padding[-5]]], dim=4) + input = torch.cat([input[:, :, :, :, -(padding[-5] + padding[-6]):-padding[-5]], input], dim=4) + + return input + + +def multi_head_attention_forward(query, # type: Tensor + key, # type: Tensor + value, # type: Tensor + embed_dim_to_check, # type: int + num_heads, # type: int + in_proj_weight, # type: Tensor + in_proj_bias, # type: Tensor + bias_k, # type: Optional[Tensor] + bias_v, # type: Optional[Tensor] + add_zero_attn, # type: bool + dropout_p, # type: float + out_proj_weight, # type: Tensor + out_proj_bias, # type: Tensor + training=True, # type: bool + key_padding_mask=None, # type: Optional[Tensor] + need_weights=True, # type: bool + attn_mask=None, # type: Optional[Tensor] + use_separate_proj_weight=False, # type: bool + q_proj_weight=None, # type: Optional[Tensor] + k_proj_weight=None, # type: Optional[Tensor] + v_proj_weight=None, # type: Optional[Tensor] + static_k=None, # type: Optional[Tensor] + static_v=None # type: Optional[Tensor] + ): + # type: (...) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: mask that prevents attention to certain positions. This is an additive mask + (i.e. the values will be added to the attention layer). + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in differnt forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. + - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + + qkv_same = torch.equal(query, key) and torch.equal(key, value) + kv_same = torch.equal(key, value) + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + assert list(query.size()) == [tgt_len, bsz, embed_dim] + assert key.size() == value.size() + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if use_separate_proj_weight is not True: + if qkv_same: + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif kv_same: + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, + torch.zeros((attn_mask.size(0), 1), + dtype=attn_mask.dtype, + device=attn_mask.device)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), + dtype=key_padding_mask.dtype, + device=key_padding_mask.device)], dim=1) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1), + dtype=attn_mask.dtype, + device=attn_mask.device)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), + dtype=key_padding_mask.dtype, + device=key_padding_mask.device)], dim=1) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + attn_mask = attn_mask.unsqueeze(0) + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/init.html b/docs/stable/_modules/torch/nn/init.html new file mode 100644 index 000000000000..5b0c979c2eff --- /dev/null +++ b/docs/stable/_modules/torch/nn/init.html @@ -0,0 +1,974 @@ + + + + + + + + + + + + torch.nn.init — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.init

    +from __future__ import division
    +
    +import math
    +import warnings
    +
    +import torch
    +
    +# These no_grad_* functions are necessary as wrappers around the parts of these
    +# functions that use `with torch.no_grad()`. The JIT doesn't support context
    +# managers, so these need to be implemented as builtins. Using these wrappers
    +# lets us keep those builtins small and re-usable.
    +def _no_grad_uniform_(tensor, a, b):
    +    with torch.no_grad():
    +        return tensor.uniform_(a, b)
    +
    +
    +def _no_grad_normal_(tensor, mean, std):
    +    with torch.no_grad():
    +        return tensor.normal_(mean, std)
    +
    +
    +def _no_grad_fill_(tensor, val):
    +    with torch.no_grad():
    +        return tensor.fill_(val)
    +
    +
    +def _no_grad_zero_(tensor):
    +    with torch.no_grad():
    +        return tensor.zero_()
    +
    +
    +
    [docs]def calculate_gain(nonlinearity, param=None): + r"""Return the recommended gain value for the given nonlinearity function. + The values are as follows: + + ================= ==================================================== + nonlinearity gain + ================= ==================================================== + Linear / Identity :math:`1` + Conv{1,2,3}D :math:`1` + Sigmoid :math:`1` + Tanh :math:`\frac{5}{3}` + ReLU :math:`\sqrt{2}` + Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}` + ================= ==================================================== + + Args: + nonlinearity: the non-linear function (`nn.functional` name) + param: optional parameter for the non-linear function + + Examples: + >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2 + """ + linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d'] + if nonlinearity in linear_fns or nonlinearity == 'sigmoid': + return 1 + elif nonlinearity == 'tanh': + return 5.0 / 3 + elif nonlinearity == 'relu': + return math.sqrt(2.0) + elif nonlinearity == 'leaky_relu': + if param is None: + negative_slope = 0.01 + elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError("negative_slope {} not a valid number".format(param)) + return math.sqrt(2.0 / (1 + negative_slope ** 2)) + else: + raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
    + + +
    [docs]def uniform_(tensor, a=0., b=1.): + # type: (Tensor, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from the uniform + distribution :math:`\mathcal{U}(a, b)`. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the lower bound of the uniform distribution + b: the upper bound of the uniform distribution + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.uniform_(w) + """ + return _no_grad_uniform_(tensor, a, b)
    + + +
    [docs]def normal_(tensor, mean=0., std=1.): + # type: (Tensor, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.normal_(w) + """ + return _no_grad_normal_(tensor, mean, std)
    + + +
    [docs]def constant_(tensor, val): + # type: (Tensor, float) -> Tensor + r"""Fills the input Tensor with the value :math:`\text{val}`. + + Args: + tensor: an n-dimensional `torch.Tensor` + val: the value to fill the tensor with + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.constant_(w, 0.3) + """ + return _no_grad_fill_(tensor, val)
    + + +
    [docs]def ones_(tensor): + # type: (Tensor) -> Tensor + r"""Fills the input Tensor with the scalar value `1`. + + Args: + tensor: an n-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.ones_(w) + """ + return _no_grad_fill_(tensor, 1.)
    + + +
    [docs]def zeros_(tensor): + # type: (Tensor) -> Tensor + r"""Fills the input Tensor with the scalar value `0`. + + Args: + tensor: an n-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.zeros_(w) + """ + return _no_grad_zero_(tensor)
    + + +
    [docs]def eye_(tensor): + r"""Fills the 2-dimensional input `Tensor` with the identity + matrix. Preserves the identity of the inputs in `Linear` layers, where as + many inputs are preserved as possible. + + Args: + tensor: a 2-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.eye_(w) + """ + if tensor.ndimension() != 2: + raise ValueError("Only tensors with 2 dimensions are supported") + + with torch.no_grad(): + torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad) + return tensor
    + + +
    [docs]def dirac_(tensor): + r"""Fills the {3, 4, 5}-dimensional input `Tensor` with the Dirac + delta function. Preserves the identity of the inputs in `Convolutional` + layers, where as many input channels are preserved as possible. + + Args: + tensor: a {3, 4, 5}-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 16, 5, 5) + >>> nn.init.dirac_(w) + """ + dimensions = tensor.ndimension() + if dimensions not in [3, 4, 5]: + raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported") + + sizes = tensor.size() + min_dim = min(sizes[0], sizes[1]) + with torch.no_grad(): + tensor.zero_() + + for d in range(min_dim): + if dimensions == 3: # Temporal convolution + tensor[d, d, tensor.size(2) // 2] = 1 + elif dimensions == 4: # Spatial convolution + tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1 + else: # Volumetric convolution + tensor[d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1 + return tensor
    + + +def _calculate_fan_in_and_fan_out(tensor): + dimensions = tensor.dim() + if dimensions < 2: + raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions") + + if dimensions == 2: # Linear + fan_in = tensor.size(1) + fan_out = tensor.size(0) + else: + num_input_fmaps = tensor.size(1) + num_output_fmaps = tensor.size(0) + receptive_field_size = 1 + if tensor.dim() > 2: + receptive_field_size = tensor[0][0].numel() + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +
    [docs]def xavier_uniform_(tensor, gain=1.): + # type: (Tensor, float) -> Tensor + r"""Fills the input `Tensor` with values according to the method + described in `Understanding the difficulty of training deep feedforward + neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform + distribution. The resulting tensor will have values sampled from + :math:`\mathcal{U}(-a, a)` where + + .. math:: + a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + gain: an optional scaling factor + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu')) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + + return _no_grad_uniform_(tensor, -a, a)
    + + +
    [docs]def xavier_normal_(tensor, gain=1.): + # type: (Tensor, float) -> Tensor + r"""Fills the input `Tensor` with values according to the method + described in `Understanding the difficulty of training deep feedforward + neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal + distribution. The resulting tensor will have values sampled from + :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + gain: an optional scaling factor + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.xavier_normal_(w) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + + return _no_grad_normal_(tensor, 0., std)
    + + +def _calculate_correct_fan(tensor, mode): + mode = mode.lower() + valid_modes = ['fan_in', 'fan_out'] + if mode not in valid_modes: + raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes)) + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + return fan_in if mode == 'fan_in' else fan_out + + +
    [docs]def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'): + r"""Fills the input `Tensor` with values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification` - He, K. et al. (2015), using a + uniform distribution. The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + + .. math:: + \text{bound} = \sqrt{\frac{6}{(1 + a^2) \times \text{fan\_in}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the negative slope of the rectifier used after this layer (0 for ReLU + by default) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu') + """ + fan = _calculate_correct_fan(tensor, mode) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + with torch.no_grad(): + return tensor.uniform_(-bound, bound)
    + + +
    [docs]def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'): + r"""Fills the input `Tensor` with values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification` - He, K. et al. (2015), using a + normal distribution. The resulting tensor will have values sampled from + :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan\_in}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the negative slope of the rectifier used after this layer (0 for ReLU + by default) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu') + """ + fan = _calculate_correct_fan(tensor, mode) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + with torch.no_grad(): + return tensor.normal_(0, std)
    + + +
    [docs]def orthogonal_(tensor, gain=1): + r"""Fills the input `Tensor` with a (semi) orthogonal matrix, as + described in `Exact solutions to the nonlinear dynamics of learning in deep + linear neural networks` - Saxe, A. et al. (2013). The input tensor must have + at least 2 dimensions, and for tensors with more than 2 dimensions the + trailing dimensions are flattened. + + Args: + tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2` + gain: optional scaling factor + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.orthogonal_(w) + """ + if tensor.ndimension() < 2: + raise ValueError("Only tensors with 2 or more dimensions are supported") + + rows = tensor.size(0) + cols = tensor.numel() // rows + flattened = tensor.new(rows, cols).normal_(0, 1) + + if rows < cols: + flattened.t_() + + # Compute the qr factorization + q, r = torch.qr(flattened) + # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf + d = torch.diag(r, 0) + ph = d.sign() + q *= ph + + if rows < cols: + q.t_() + + with torch.no_grad(): + tensor.view_as(q).copy_(q) + tensor.mul_(gain) + return tensor
    + + +
    [docs]def sparse_(tensor, sparsity, std=0.01): + r"""Fills the 2D input `Tensor` as a sparse matrix, where the + non-zero elements will be drawn from the normal distribution + :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via + Hessian-free optimization` - Martens, J. (2010). + + Args: + tensor: an n-dimensional `torch.Tensor` + sparsity: The fraction of elements in each column to be set to zero + std: the standard deviation of the normal distribution used to generate + the non-zero values + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.sparse_(w, sparsity=0.1) + """ + if tensor.ndimension() != 2: + raise ValueError("Only tensors with 2 dimensions are supported") + + rows, cols = tensor.shape + num_zeros = int(math.ceil(sparsity * rows)) + + with torch.no_grad(): + tensor.normal_(0, std) + for col_idx in range(cols): + row_indices = torch.randperm(rows) + zero_indices = row_indices[:num_zeros] + tensor[zero_indices, col_idx] = 0 + return tensor
    + + +# for backward compatibility +def _make_deprecate(meth): + new_name = meth.__name__ + old_name = new_name[:-1] + + def deprecated_init(*args, **kwargs): + warnings.warn("nn.init.{} is now deprecated in favor of nn.init.{}." + .format(old_name, new_name), stacklevel=2) + return meth(*args, **kwargs) + + deprecated_init.__doc__ = r""" + {old_name}(...) + + .. warning:: + This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`. + + See :func:`~torch.nn.init.{new_name}` for details.""".format( + old_name=old_name, new_name=new_name) + deprecated_init.__name__ = old_name + return deprecated_init + + +uniform = _make_deprecate(uniform_) +normal = _make_deprecate(normal_) +constant = _make_deprecate(constant_) +eye = _make_deprecate(eye_) +dirac = _make_deprecate(dirac_) +xavier_uniform = _make_deprecate(xavier_uniform_) +xavier_normal = _make_deprecate(xavier_normal_) +kaiming_uniform = _make_deprecate(kaiming_uniform_) +kaiming_normal = _make_deprecate(kaiming_normal_) +orthogonal = _make_deprecate(orthogonal_) +sparse = _make_deprecate(sparse_) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/activation.html b/docs/stable/_modules/torch/nn/modules/activation.html new file mode 100644 index 000000000000..6c151caca9c4 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/activation.html @@ -0,0 +1,1566 @@ + + + + + + + + + + + + torch.nn.modules.activation — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.activation

    +import warnings
    +import torch
    +from . import Linear
    +from torch.nn.init import xavier_uniform_
    +from torch.nn.init import constant_
    +from torch.nn.init import xavier_normal_
    +from torch.nn.parameter import Parameter
    +from .module import Module
    +from .. import functional as F
    +
    +
    +
    [docs]class Threshold(Module): + r"""Thresholds each element of the input Tensor. + + Threshold is defined as: + + .. math:: + y = + \begin{cases} + x, &\text{ if } x > \text{threshold} \\ + \text{value}, &\text{ otherwise } + \end{cases} + + Args: + threshold: The value to threshold at + value: The value to replace with + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + Examples:: + + >>> m = nn.Threshold(0.1, 20) + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['threshold', 'value', 'inplace'] + + def __init__(self, threshold, value, inplace=False): + super(Threshold, self).__init__() + self.threshold = threshold + self.value = value + self.inplace = inplace + # TODO: check in THNN (if inplace == True, then assert value <= threshold) + + def forward(self, input): + return F.threshold(input, self.threshold, self.value, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'threshold={}, value={}{}'.format( + self.threshold, self.value, inplace_str + )
    + + +
    [docs]class ReLU(Module): + r"""Applies the rectified linear unit function element-wise: + + :math:`\text{ReLU}(x)= \max(0, x)` + + Args: + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/ReLU.png + + Examples:: + + >>> m = nn.ReLU() + >>> input = torch.randn(2) + >>> output = m(input) + + + An implementation of CReLU - https://arxiv.org/abs/1603.05201 + + >>> m = nn.ReLU() + >>> input = torch.randn(2).unsqueeze(0) + >>> output = torch.cat((m(input),m(-input))) + """ + __constants__ = ['inplace'] + + def __init__(self, inplace=False): + super(ReLU, self).__init__() + self.inplace = inplace + + def forward(self, input): + return F.relu(input, inplace=self.inplace) + + def extra_repr(self): + inplace_str = 'inplace=True' if self.inplace else '' + return inplace_str
    + + +
    [docs]class RReLU(Module): + r"""Applies the randomized leaky rectified liner unit function, element-wise, + as described in the paper: + + `Empirical Evaluation of Rectified Activations in Convolutional Network`_. + + The function is defined as: + + .. math:: + \text{RReLU}(x) = + \begin{cases} + x & \text{if } x \geq 0 \\ + ax & \text{ otherwise } + \end{cases} + + where :math:`a` is randomly sampled from uniform distribution + :math:`\mathcal{U}(\text{lower}, \text{upper})`. + + See: https://arxiv.org/pdf/1505.00853.pdf + + Args: + lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}` + upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}` + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + Examples:: + + >>> m = nn.RReLU(0.1, 0.3) + >>> input = torch.randn(2) + >>> output = m(input) + + .. _`Empirical Evaluation of Rectified Activations in Convolutional Network`: + https://arxiv.org/abs/1505.00853 + """ + __constants__ = ['lower', 'upper', 'inplace'] + + def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False): + super(RReLU, self).__init__() + self.lower = lower + self.upper = upper + self.inplace = inplace + + def forward(self, input): + return F.rrelu(input, self.lower, self.upper, self.training, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'lower={}, upper={}{}'.format(self.lower, self.upper, inplace_str)
    + + +
    [docs]class Hardtanh(Module): + r"""Applies the HardTanh function element-wise + + HardTanh is defined as: + + .. math:: + \text{HardTanh}(x) = \begin{cases} + 1 & \text{ if } x > 1 \\ + -1 & \text{ if } x < -1 \\ + x & \text{ otherwise } \\ + \end{cases} + + The range of the linear region :math:`[-1, 1]` can be adjusted using + :attr:`min_val` and :attr:`max_val`. + + Args: + min_val: minimum value of the linear region range. Default: -1 + max_val: maximum value of the linear region range. Default: 1 + inplace: can optionally do the operation in-place. Default: ``False`` + + Keyword arguments :attr:`min_value` and :attr:`max_value` + have been deprecated in favor of :attr:`min_val` and :attr:`max_val`. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Hardtanh.png + + Examples:: + + >>> m = nn.Hardtanh(-2, 2) + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['min_val', 'max_val', 'inplace'] + + def __init__(self, min_val=-1., max_val=1., inplace=False, min_value=None, max_value=None): + super(Hardtanh, self).__init__() + if min_value is not None: + warnings.warn("keyword argument min_value is deprecated and renamed to min_val") + min_val = min_value + if max_value is not None: + warnings.warn("keyword argument max_value is deprecated and renamed to max_val") + max_val = max_value + + self.min_val = min_val + self.max_val = max_val + self.inplace = inplace + assert self.max_val > self.min_val + + def forward(self, input): + return F.hardtanh(input, self.min_val, self.max_val, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'min_val={}, max_val={}{}'.format( + self.min_val, self.max_val, inplace_str + )
    + + +
    [docs]class ReLU6(Hardtanh): + r"""Applies the element-wise function: + + .. math:: + \text{ReLU6}(x) = \min(\max(0,x), 6) + + Args: + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/ReLU6.png + + Examples:: + + >>> m = nn.ReLU6() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def __init__(self, inplace=False): + super(ReLU6, self).__init__(0., 6., inplace) + + def extra_repr(self): + inplace_str = 'inplace=True' if self.inplace else '' + return inplace_str
    + + +
    [docs]class Sigmoid(Module): + r"""Applies the element-wise function: + + .. math:: + \text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)} + + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Sigmoid.png + + Examples:: + + >>> m = nn.Sigmoid() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return torch.sigmoid(input)
    + + +
    [docs]class Tanh(Module): + r"""Applies the element-wise function: + + .. math:: + \text{Tanh}(x) = \tanh(x) = \frac{e^x - e^{-x}} {e^x + e^{-x}} + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Tanh.png + + Examples:: + + >>> m = nn.Tanh() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return torch.tanh(input)
    + + +
    [docs]class ELU(Module): + r"""Applies the element-wise function: + + .. math:: + \text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1)) + + Args: + alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0 + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/ELU.png + + Examples:: + + >>> m = nn.ELU() + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['alpha', 'inplace'] + + def __init__(self, alpha=1., inplace=False): + super(ELU, self).__init__() + self.alpha = alpha + self.inplace = inplace + + def forward(self, input): + return F.elu(input, self.alpha, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'alpha={}{}'.format(self.alpha, inplace_str)
    + + +
    [docs]class CELU(Module): + r"""Applies the element-wise function: + + .. math:: + \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1)) + + More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ . + + Args: + alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0 + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/CELU.png + + Examples:: + + >>> m = nn.CELU() + >>> input = torch.randn(2) + >>> output = m(input) + + .. _`Continuously Differentiable Exponential Linear Units`: + https://arxiv.org/abs/1704.07483 + """ + __constants__ = ['alpha', 'inplace'] + + def __init__(self, alpha=1., inplace=False): + super(CELU, self).__init__() + self.alpha = alpha + self.inplace = inplace + + def forward(self, input): + return F.celu(input, self.alpha, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'alpha={}{}'.format(self.alpha, inplace_str)
    + + +
    [docs]class SELU(Module): + r"""Applied element-wise, as: + + .. math:: + \text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))) + + with :math:`\alpha = 1.6732632423543772848170429916717` and + :math:`\text{scale} = 1.0507009873554804934193349852946`. + + More details can be found in the paper `Self-Normalizing Neural Networks`_ . + + Args: + inplace (bool, optional): can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/SELU.png + + Examples:: + + >>> m = nn.SELU() + >>> input = torch.randn(2) + >>> output = m(input) + + .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515 + """ + __constants__ = ['inplace'] + + def __init__(self, inplace=False): + super(SELU, self).__init__() + self.inplace = inplace + + def forward(self, input): + return F.selu(input, self.inplace) + + def extra_repr(self): + inplace_str = 'inplace=True' if self.inplace else '' + return inplace_str
    + + +class GLU(Module): + r"""Applies the gated linear unit function + :math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half + of the input matrices and :math:`b` is the second half. + + Args: + dim (int): the dimension on which to split the input. Default: -1 + + Shape: + - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional + dimensions + - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2` + + Examples:: + + >>> m = nn.GLU() + >>> input = torch.randn(4, 2) + >>> output = m(input) + """ + __constants__ = ['dim'] + + def __init__(self, dim=-1): + super(GLU, self).__init__() + self.dim = dim + + def forward(self, input): + return F.glu(input, self.dim) + + def extra_repr(self): + return 'dim={}'.format(self.dim) + + +
    [docs]class Hardshrink(Module): + r"""Applies the hard shrinkage function element-wise: + + .. math:: + \text{HardShrink}(x) = + \begin{cases} + x, & \text{ if } x > \lambda \\ + x, & \text{ if } x < -\lambda \\ + 0, & \text{ otherwise } + \end{cases} + + Args: + lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5 + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Hardshrink.png + + Examples:: + + >>> m = nn.Hardshrink() + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['lambd'] + + def __init__(self, lambd=0.5): + super(Hardshrink, self).__init__() + self.lambd = lambd + + def forward(self, input): + return F.hardshrink(input, self.lambd) + + def extra_repr(self): + return '{}'.format(self.lambd)
    + + +
    [docs]class LeakyReLU(Module): + r"""Applies the element-wise function: + + .. math:: + \text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x) + + + or + + .. math:: + \text{LeakyRELU}(x) = + \begin{cases} + x, & \text{ if } x \geq 0 \\ + \text{negative\_slope} \times x, & \text{ otherwise } + \end{cases} + + Args: + negative_slope: Controls the angle of the negative slope. Default: 1e-2 + inplace: can optionally do the operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/LeakyReLU.png + + Examples:: + + >>> m = nn.LeakyReLU(0.1) + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['inplace', 'negative_slope'] + + def __init__(self, negative_slope=1e-2, inplace=False): + super(LeakyReLU, self).__init__() + self.negative_slope = negative_slope + self.inplace = inplace + + def forward(self, input): + return F.leaky_relu(input, self.negative_slope, self.inplace) + + def extra_repr(self): + inplace_str = ', inplace=True' if self.inplace else '' + return 'negative_slope={}{}'.format(self.negative_slope, inplace_str)
    + + +
    [docs]class LogSigmoid(Module): + r"""Applies the element-wise function: + + .. math:: + \text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right) + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/LogSigmoid.png + + Examples:: + + >>> m = nn.LogSigmoid() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return F.logsigmoid(input)
    + + +
    [docs]class Softplus(Module): + r"""Applies the element-wise function: + + .. math:: + \text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) + + SoftPlus is a smooth approximation to the ReLU function and can be used + to constrain the output of a machine to always be positive. + + For numerical stability the implementation reverts to the linear function + for inputs above a certain value. + + Args: + beta: the :math:`\beta` value for the Softplus formulation. Default: 1 + threshold: values above this revert to a linear function. Default: 20 + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Softplus.png + + Examples:: + + >>> m = nn.Softplus() + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['beta', 'threshold'] + + def __init__(self, beta=1, threshold=20): + super(Softplus, self).__init__() + self.beta = beta + self.threshold = threshold + + def forward(self, input): + return F.softplus(input, self.beta, self.threshold) + + def extra_repr(self): + return 'beta={}, threshold={}'.format(self.beta, self.threshold)
    + + +
    [docs]class Softshrink(Module): + r"""Applies the soft shrinkage function elementwise: + + .. math:: + \text{SoftShrinkage}(x) = + \begin{cases} + x - \lambda, & \text{ if } x > \lambda \\ + x + \lambda, & \text{ if } x < -\lambda \\ + 0, & \text{ otherwise } + \end{cases} + + Args: + lambd: the :math:`\lambda` value for the Softshrink formulation. Default: 0.5 + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Softshrink.png + + Examples:: + + >>> m = nn.Softshrink() + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['lambd'] + + def __init__(self, lambd=0.5): + super(Softshrink, self).__init__() + self.lambd = lambd + + def forward(self, input): + return F.softshrink(input, self.lambd) + + def extra_repr(self): + return str(self.lambd)
    + + +
    [docs]class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in key. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + +
    [docs] def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None): + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: mask that prevents attention to certain positions. This is an additive mask + (i.e. the values will be added to the attention layer). + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. + - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False: + return F.multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + if not hasattr(self, '_qkv_same_embed_dim'): + warnings.warn('A new version of MultiheadAttention module has been implemented. \ + Please re-train your model with the new module', + UserWarning) + + return F.multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask)
    + + +
    [docs]class PReLU(Module): + r"""Applies the element-wise function: + + .. math:: + \text{PReLU}(x) = \max(0,x) + a * \min(0,x) + + or + + .. math:: + \text{PReLU}(x) = + \begin{cases} + x, & \text{ if } x \geq 0 \\ + ax, & \text{ otherwise } + \end{cases} + + Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single + parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`, + a separate :math:`a` is used for each input channel. + + + .. note:: + weight decay should not be used when learning :math:`a` for good performance. + + .. note:: + Channel dim is the 2nd dim of input. When input has dims < 2, then there is + no channel dim and the number of channels = 1. + + Args: + num_parameters (int): number of :math:`a` to learn. + Although it takes an int as input, there is only two values are legitimate: + 1, or the number of channels at input. Default: 1 + init (float): the initial value of :math:`a`. Default: 0.25 + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + Attributes: + weight (Tensor): the learnable weights of shape (:attr:`num_parameters`). + + .. image:: scripts/activation_images/PReLU.png + + Examples:: + + >>> m = nn.PReLU() + >>> input = torch.randn(2) + >>> output = m(input) + """ + __constants__ = ['num_parameters'] + + def __init__(self, num_parameters=1, init=0.25): + self.num_parameters = num_parameters + super(PReLU, self).__init__() + self.weight = Parameter(torch.Tensor(num_parameters).fill_(init)) + + def forward(self, input): + return F.prelu(input, self.weight) + + def extra_repr(self): + return 'num_parameters={}'.format(self.num_parameters)
    + + +
    [docs]class Softsign(Module): + r"""Applies the element-wise function: + + .. math:: + \text{SoftSign}(x) = \frac{x}{ 1 + |x|} + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Softsign.png + + Examples:: + + >>> m = nn.Softsign() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return F.softsign(input)
    + + +
    [docs]class Tanhshrink(Module): + r"""Applies the element-wise function: + + .. math:: + \text{Tanhshrink}(x) = x - \text{Tanh}(x) + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/Tanhshrink.png + + Examples:: + + >>> m = nn.Tanhshrink() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return F.tanhshrink(input)
    + + +
    [docs]class Softmin(Module): + r"""Applies the Softmin function to an n-dimensional input Tensor + rescaling them so that the elements of the n-dimensional output Tensor + lie in the range `[0, 1]` and sum to 1. + + Softmin is defined as: + + .. math:: + \text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)} + + Shape: + - Input: :math:`(*)` where `*` means, any number of additional + dimensions + - Output: :math:`(*)`, same shape as the input + + Arguments: + dim (int): A dimension along which Softmin will be computed (so every slice + along dim will sum to 1). + + Returns: + a Tensor of the same dimension and shape as the input, with + values in the range [0, 1] + + Examples:: + + >>> m = nn.Softmin() + >>> input = torch.randn(2, 3) + >>> output = m(input) + """ + __constants__ = ['dim'] + + def __init__(self, dim=None): + super(Softmin, self).__init__() + self.dim = dim + + def forward(self, input): + return F.softmin(input, self.dim, _stacklevel=5)
    + + +
    [docs]class Softmax(Module): + r"""Applies the Softmax function to an n-dimensional input Tensor + rescaling them so that the elements of the n-dimensional output Tensor + lie in the range [0,1] and sum to 1. + + Softmax is defined as: + + .. math:: + \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} + + Shape: + - Input: :math:`(*)` where `*` means, any number of additional + dimensions + - Output: :math:`(*)`, same shape as the input + + Returns: + a Tensor of the same dimension and shape as the input with + values in the range [0, 1] + + Arguments: + dim (int): A dimension along which Softmax will be computed (so every slice + along dim will sum to 1). + + .. note:: + This module doesn't work directly with NLLLoss, + which expects the Log to be computed between the Softmax and itself. + Use `LogSoftmax` instead (it's faster and has better numerical properties). + + Examples:: + + >>> m = nn.Softmax(dim=1) + >>> input = torch.randn(2, 3) + >>> output = m(input) + """ + __constants__ = ['dim'] + + def __init__(self, dim=None): + super(Softmax, self).__init__() + self.dim = dim + + def __setstate__(self, state): + self.__dict__.update(state) + if not hasattr(self, 'dim'): + self.dim = None + + def forward(self, input): + return F.softmax(input, self.dim, _stacklevel=5) + + def extra_repr(self): + return 'dim={dim}'.format(dim=self.dim)
    + + +
    [docs]class Softmax2d(Module): + r"""Applies SoftMax over features to each spatial location. + + When given an image of ``Channels x Height x Width``, it will + apply `Softmax` to each location :math:`(Channels, h_i, w_j)` + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Returns: + a Tensor of the same dimension and shape as the input with + values in the range [0, 1] + + Examples:: + + >>> m = nn.Softmax2d() + >>> # you softmax over the 2nd dimension + >>> input = torch.randn(2, 3, 12, 13) + >>> output = m(input) + """ + + def forward(self, input): + assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input' + return F.softmax(input, 1, _stacklevel=5)
    + + +
    [docs]class LogSoftmax(Module): + r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional + input Tensor. The LogSoftmax formulation can be simplified as: + + .. math:: + \text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) + + Shape: + - Input: :math:`(*)` where `*` means, any number of additional + dimensions + - Output: :math:`(*)`, same shape as the input + + Arguments: + dim (int): A dimension along which LogSoftmax will be computed. + + Returns: + a Tensor of the same dimension and shape as the input with + values in the range [-inf, 0) + + Examples:: + + >>> m = nn.LogSoftmax() + >>> input = torch.randn(2, 3) + >>> output = m(input) + """ + __constants__ = ['dim'] + + def __init__(self, dim=None): + super(LogSoftmax, self).__init__() + self.dim = dim + + def __setstate__(self, state): + self.__dict__.update(state) + if not hasattr(self, 'dim'): + self.dim = None + + def forward(self, input): + return F.log_softmax(input, self.dim, _stacklevel=5)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/adaptive.html b/docs/stable/_modules/torch/nn/modules/adaptive.html new file mode 100644 index 000000000000..ddf2a88fc7db --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/adaptive.html @@ -0,0 +1,789 @@ + + + + + + + + + + + + torch.nn.modules.adaptive — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.adaptive

    +# -*- coding: utf-8 -*-
    +
    +from collections import namedtuple
    +
    +import torch
    +
    +from . import Sequential, ModuleList, Linear
    +from .module import Module
    +from ..functional import log_softmax
    +
    +
    +_ASMoutput = namedtuple('ASMoutput', ['output', 'loss'])
    +
    +
    +
    [docs]class AdaptiveLogSoftmaxWithLoss(Module): + r"""Efficient softmax approximation as described in + `Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin, + Moustapha Cissé, David Grangier, and Hervé Jégou. + + Adaptive softmax is an approximate strategy for training models with large + output spaces. It is most effective when the label distribution is highly + imbalanced, for example in natural language modelling, where the word + frequency distribution approximately follows the `Zipf's law`_. + + Adaptive softmax partitions the labels into several clusters, according to + their frequency. These clusters may contain different number of targets + each. + Additionally, clusters containing less frequent labels assign lower + dimensional embeddings to those labels, which speeds up the computation. + For each minibatch, only clusters for which at least one target is + present are evaluated. + + The idea is that the clusters which are accessed frequently + (like the first one, containing most frequent labels), should also be cheap + to compute -- that is, contain a small number of assigned labels. + + We highly recommend taking a look at the original paper for more details. + + * :attr:`cutoffs` should be an ordered Sequence of integers sorted + in the increasing order. + It controls number of clusters and the partitioning of targets into + clusters. For example setting ``cutoffs = [10, 100, 1000]`` + means that first `10` targets will be assigned + to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be + assigned to the first cluster, and targets `101, 102, ..., 1000` will be + assigned to the second cluster, while targets + `1001, 1002, ..., n_classes - 1` will be assigned + to the last, third cluster. + + * :attr:`div_value` is used to compute the size of each additional cluster, + which is given as + :math:`\left\lfloor\frac{in\_features}{div\_value^{idx}}\right\rfloor`, + where :math:`idx` is the cluster index (with clusters + for less frequent words having larger indices, + and indices starting from :math:`1`). + + * :attr:`head_bias` if set to True, adds a bias term to the 'head' of the + adaptive softmax. See paper for details. Set to False in the official + implementation. + + .. warning:: + Labels passed as inputs to this module should be sorted accoridng to + their frequency. This means that the most frequent label should be + represented by the index `0`, and the least frequent + label should be represented by the index `n_classes - 1`. + + .. note:: + This module returns a ``NamedTuple`` with ``output`` + and ``loss`` fields. See further documentation for details. + + .. note:: + To compute log-probabilities for all classes, the ``log_prob`` + method can be used. + + Args: + in_features (int): Number of features in the input tensor + n_classes (int): Number of classes in the dataset + cutoffs (Sequence): Cutoffs used to assign targets to their buckets + div_value (float, optional): value used as an exponent to compute sizes + of the clusters. Default: 4.0 + head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the + adaptive softmax. Default: ``False`` + + Returns: + ``NamedTuple`` with ``output`` and ``loss`` fields: + * **output** is a Tensor of size ``N`` containing computed target + log probabilities for each example + * **loss** is a Scalar representing the computed negative + log likelihood loss + + Shape: + - input: :math:`(N, in\_features)` + - target: :math:`(N)` where each value satisfies :math:`0 <= target[i] <= n\_classes` + - output1: :math:`(N)` + - output2: ``Scalar`` + + + .. _Efficient softmax approximation for GPUs: + https://arxiv.org/abs/1609.04309 + + .. _Zipf's law: + https://en.wikipedia.org/wiki/Zipf%27s_law + """ + + def __init__(self, in_features, n_classes, cutoffs, div_value=4., head_bias=False): + super(AdaptiveLogSoftmaxWithLoss, self).__init__() + + cutoffs = list(cutoffs) + + if (cutoffs != sorted(cutoffs)) \ + or (min(cutoffs) <= 0) \ + or (max(cutoffs) > (n_classes - 1)) \ + or (len(set(cutoffs)) != len(cutoffs)) \ + or any([int(c) != c for c in cutoffs]): + + raise ValueError("cutoffs should be a sequence of unique, positive " + "integers sorted in an increasing order, where " + "each value is between 1 and n_classes-1") + + self.in_features = in_features + self.n_classes = n_classes + self.cutoffs = cutoffs + [n_classes] + self.div_value = div_value + self.head_bias = head_bias + + self.shortlist_size = self.cutoffs[0] + self.n_clusters = len(self.cutoffs) - 1 + self.head_size = self.shortlist_size + self.n_clusters + + self.head = Linear(self.in_features, self.head_size, bias=self.head_bias) + self.tail = ModuleList() + + for i in range(self.n_clusters): + + hsz = int(self.in_features // (self.div_value ** (i + 1))) + osz = self.cutoffs[i + 1] - self.cutoffs[i] + + projection = Sequential( + Linear(self.in_features, hsz, bias=False), + Linear(hsz, osz, bias=False) + ) + + self.tail.append(projection) + + def reset_parameters(self): + self.head.reset_parameters() + for i2h, h2o in self.tail: + i2h.reset_parameters() + h2o.reset_parameters() + + def forward(self, input, target): + if input.size(0) != target.size(0): + raise RuntimeError('Input and target should have the same size ' + 'in the batch dimension.') + + used_rows = 0 + batch_size = target.size(0) + + output = input.new_zeros(batch_size) + gather_inds = target.new_empty(batch_size) + + cutoff_values = [0] + self.cutoffs + for i in range(len(cutoff_values) - 1): + + low_idx = cutoff_values[i] + high_idx = cutoff_values[i + 1] + + target_mask = (target >= low_idx) & (target < high_idx) + row_indices = target_mask.nonzero().squeeze() + + if row_indices.numel() == 0: + continue + + if i == 0: + gather_inds.index_copy_(0, row_indices, target[target_mask]) + + else: + relative_target = target[target_mask] - low_idx + input_subset = input.index_select(0, row_indices) + + cluster_output = self.tail[i - 1](input_subset) + cluster_index = self.shortlist_size + i - 1 + + gather_inds.index_fill_(0, row_indices, cluster_index) + + cluster_logprob = log_softmax(cluster_output, dim=1) + local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1)) + output.index_copy_(0, row_indices, local_logprob.squeeze(1)) + + used_rows += row_indices.numel() + + if used_rows != batch_size: + raise RuntimeError("Target values should be in [0, {}], " + "but values in range [{}, {}] " + "were found. ".format(self.n_classes - 1, + target.min().item(), + target.max().item())) + + head_output = self.head(input) + head_logprob = log_softmax(head_output, dim=1) + output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze() + loss = (-output).mean() + + return _ASMoutput(output, loss) + + def _get_full_log_prob(self, input, head_output): + """ Given input tensor, and output of `self.head`, + compute the log of the full distribution """ + + out = input.new_empty((head_output.size(0), self.n_classes)) + head_logprob = log_softmax(head_output, dim=1) + + out[:, :self.shortlist_size] = head_logprob[:, :self.shortlist_size] + + for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])): + cluster_output = self.tail[i](input) + cluster_logprob = log_softmax(cluster_output, dim=1) + output_logprob = cluster_logprob + head_logprob[:, self.shortlist_size + i].unsqueeze(1) + + out[:, start_idx:stop_idx] = output_logprob + + return out + +
    [docs] def log_prob(self, input): + r""" Computes log probabilities for all :math:`n\_classes` + + Args: + input (Tensor): a minibatch of examples + + Returns: + log-probabilities of for each class :math:`c` + in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a + parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. + + Shape: + - Input: :math:`(N, in\_features)` + - Output: :math:`(N, n\_classes)` + + """ + + head_output = self.head(input) + return self._get_full_log_prob(input, head_output)
    + +
    [docs] def predict(self, input): + r""" This is equivalent to `self.log_pob(input).argmax(dim=1)`, + but is more efficient in some cases. + + Args: + input (Tensor): a minibatch of examples + + Returns: + output (Tensor): a class with the highest probability for each example + + Shape: + - Input: :math:`(N, in\_features)` + - Output: :math:`(N)` + """ + + head_output = self.head(input) + output = torch.argmax(head_output, dim=1) + not_in_shortlist = (output >= self.shortlist_size) + all_in_shortlist = not (not_in_shortlist.any()) + + if all_in_shortlist: + return output + + elif not_in_shortlist.all(): + log_prob = self._get_full_log_prob(input, head_output) + return torch.argmax(log_prob, dim=1) + + else: + log_prob = self._get_full_log_prob(input[not_in_shortlist], + head_output[not_in_shortlist]) + output[not_in_shortlist] = torch.argmax(log_prob, dim=1) + return output
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/batchnorm.html b/docs/stable/_modules/torch/nn/modules/batchnorm.html new file mode 100644 index 000000000000..6b9a2101e8fa --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/batchnorm.html @@ -0,0 +1,1019 @@ + + + + + + + + + + + + torch.nn.modules.batchnorm — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.batchnorm

    +from __future__ import division
    +
    +import torch
    +from ._functions import SyncBatchNorm as sync_batch_norm
    +from .module import Module
    +from torch.nn.parameter import Parameter
    +from .. import functional as F
    +from .. import init
    +
    +
    +# TODO: check contiguous in THNN
    +# TODO: use separate backend functions?
    +class _BatchNorm(Module):
    +    _version = 2
    +    __constants__ = ['track_running_stats', 'momentum', 'eps', 'weight', 'bias',
    +                     'running_mean', 'running_var', 'num_batches_tracked',
    +                     'num_features', 'affine']
    +
    +    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
    +                 track_running_stats=True):
    +        super(_BatchNorm, self).__init__()
    +        self.num_features = num_features
    +        self.eps = eps
    +        self.momentum = momentum
    +        self.affine = affine
    +        self.track_running_stats = track_running_stats
    +        if self.affine:
    +            self.weight = Parameter(torch.Tensor(num_features))
    +            self.bias = Parameter(torch.Tensor(num_features))
    +        else:
    +            self.register_parameter('weight', None)
    +            self.register_parameter('bias', None)
    +        if self.track_running_stats:
    +            self.register_buffer('running_mean', torch.zeros(num_features))
    +            self.register_buffer('running_var', torch.ones(num_features))
    +            self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
    +        else:
    +            self.register_parameter('running_mean', None)
    +            self.register_parameter('running_var', None)
    +            self.register_parameter('num_batches_tracked', None)
    +        self.reset_parameters()
    +
    +    def reset_running_stats(self):
    +        if self.track_running_stats:
    +            self.running_mean.zero_()
    +            self.running_var.fill_(1)
    +            self.num_batches_tracked.zero_()
    +
    +    def reset_parameters(self):
    +        self.reset_running_stats()
    +        if self.affine:
    +            init.ones_(self.weight)
    +            init.zeros_(self.bias)
    +
    +    def _check_input_dim(self, input):
    +        raise NotImplementedError
    +
    +    def forward(self, input):
    +        self._check_input_dim(input)
    +
    +        # exponential_average_factor is self.momentum set to
    +        # (when it is available) only so that if gets updated
    +        # in ONNX graph when this node is exported to ONNX.
    +        if self.momentum is None:
    +            exponential_average_factor = 0.0
    +        else:
    +            exponential_average_factor = self.momentum
    +
    +        if self.training and self.track_running_stats:
    +            # TODO: if statement only here to tell the jit to skip emitting this when it is None
    +            if self.num_batches_tracked is not None:
    +                self.num_batches_tracked += 1
    +                if self.momentum is None:  # use cumulative moving average
    +                    exponential_average_factor = 1.0 / float(self.num_batches_tracked)
    +                else:  # use exponential moving average
    +                    exponential_average_factor = self.momentum
    +
    +        return F.batch_norm(
    +            input, self.running_mean, self.running_var, self.weight, self.bias,
    +            self.training or not self.track_running_stats,
    +            exponential_average_factor, self.eps)
    +
    +    def extra_repr(self):
    +        return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
    +               'track_running_stats={track_running_stats}'.format(**self.__dict__)
    +
    +    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
    +                              missing_keys, unexpected_keys, error_msgs):
    +        version = local_metadata.get('version', None)
    +
    +        if (version is None or version < 2) and self.track_running_stats:
    +            # at version 2: added num_batches_tracked buffer
    +            #               this should have a default value of 0
    +            num_batches_tracked_key = prefix + 'num_batches_tracked'
    +            if num_batches_tracked_key not in state_dict:
    +                state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
    +
    +        super(_BatchNorm, self)._load_from_state_dict(
    +            state_dict, prefix, local_metadata, strict,
    +            missing_keys, unexpected_keys, error_msgs)
    +
    +
    +
    [docs]class BatchNorm1d(_BatchNorm): + r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D + inputs with optional additional channel dimension) as described in the paper + `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set + to 1 and the elements of :math:`\beta` are set to 0. + + Also by default, during training this layer keeps running estimates of its + computed mean and variance, which are then used for normalization during + evaluation. The running estimates are kept with a default :attr:`momentum` + of 0.1. + + If :attr:`track_running_stats` is set to ``False``, this layer then does not + keep running estimates, and batch statistics are instead used during + evaluation time as well. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + Because the Batch Normalization is done over the `C` dimension, computing statistics + on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Can be set to ``None`` for cumulative moving average + (i.e. simple average). Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters. Default: ``True`` + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``True`` + + Shape: + - Input: :math:`(N, C)` or :math:`(N, C, L)` + - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) + + Examples:: + + >>> # With Learnable Parameters + >>> m = nn.BatchNorm1d(100) + >>> # Without Learnable Parameters + >>> m = nn.BatchNorm1d(100, affine=False) + >>> input = torch.randn(20, 100) + >>> output = m(input) + + .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`: + https://arxiv.org/abs/1502.03167 + """ + + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError('expected 2D or 3D input (got {}D input)' + .format(input.dim()))
    + + +
    [docs]class BatchNorm2d(_BatchNorm): + r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs + with additional channel dimension) as described in the paper + `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set + to 1 and the elements of :math:`\beta` are set to 0. + + Also by default, during training this layer keeps running estimates of its + computed mean and variance, which are then used for normalization during + evaluation. The running estimates are kept with a default :attr:`momentum` + of 0.1. + + If :attr:`track_running_stats` is set to ``False``, this layer then does not + keep running estimates, and batch statistics are instead used during + evaluation time as well. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + Because the Batch Normalization is done over the `C` dimension, computing statistics + on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, H, W)` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Can be set to ``None`` for cumulative moving average + (i.e. simple average). Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters. Default: ``True`` + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``True`` + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Examples:: + + >>> # With Learnable Parameters + >>> m = nn.BatchNorm2d(100) + >>> # Without Learnable Parameters + >>> m = nn.BatchNorm2d(100, affine=False) + >>> input = torch.randn(20, 100, 35, 45) + >>> output = m(input) + + .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`: + https://arxiv.org/abs/1502.03167 + """ + + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim()))
    + + +
    [docs]class BatchNorm3d(_BatchNorm): + r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs + with additional channel dimension) as described in the paper + `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set + to 1 and the elements of :math:`\beta` are set to 0. + + Also by default, during training this layer keeps running estimates of its + computed mean and variance, which are then used for normalization during + evaluation. The running estimates are kept with a default :attr:`momentum` + of 0.1. + + If :attr:`track_running_stats` is set to ``False``, this layer then does not + keep running estimates, and batch statistics are instead used during + evaluation time as well. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + Because the Batch Normalization is done over the `C` dimension, computing statistics + on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization + or Spatio-temporal Batch Normalization. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, D, H, W)` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Can be set to ``None`` for cumulative moving average + (i.e. simple average). Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters. Default: ``True`` + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``True`` + + Shape: + - Input: :math:`(N, C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` (same shape as input) + + Examples:: + + >>> # With Learnable Parameters + >>> m = nn.BatchNorm3d(100) + >>> # Without Learnable Parameters + >>> m = nn.BatchNorm3d(100, affine=False) + >>> input = torch.randn(20, 100, 35, 45, 10) + >>> output = m(input) + + .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`: + https://arxiv.org/abs/1502.03167 + """ + + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim()))
    + + +
    [docs]class SyncBatchNorm(_BatchNorm): + r"""Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs + with additional channel dimension) as described in the paper + `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension over all + mini-batches of the same process groups. :math:`\gamma` and :math:`\beta` + are learnable parameter vectors of size `C` (where `C` is the input size). + By default, the elements of :math:`\gamma` are sampled from + :math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0. + + Also by default, during training this layer keeps running estimates of its + computed mean and variance, which are then used for normalization during + evaluation. The running estimates are kept with a default :attr:`momentum` + of 0.1. + + If :attr:`track_running_stats` is set to ``False``, this layer then does not + keep running estimates, and batch statistics are instead used during + evaluation time as well. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + Because the Batch Normalization is done over the `C` dimension, computing statistics + on `(N, +)` slices, it's common terminology to call this Volumetric Batch Normalization + or Spatio-temporal Batch Normalization. + + Currently SyncBatchNorm only supports DistributedDataParallel with single GPU per process. Use + torch.nn.SyncBatchNorm.convert_sync_batchnorm() to convert BatchNorm layer to SyncBatchNorm before wrapping + Network with DDP. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, +)` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Can be set to ``None`` for cumulative moving average + (i.e. simple average). Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters. Default: ``True`` + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``True`` + process_group: synchronization of stats happen within each process group + individually. Default behavior is synchronization across the whole + world + + Shape: + - Input: :math:`(N, C, +)` + - Output: :math:`(N, C, +)` (same shape as input) + + Examples:: + + >>> # With Learnable Parameters + >>> m = nn.SyncBatchNorm(100) + >>> # creating process group (optional) + >>> # process_ids is a list of int identifying rank ids. + >>> process_group = torch.distributed.new_group(process_ids) + >>> # Without Learnable Parameters + >>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group) + >>> input = torch.randn(20, 100, 35, 45, 10) + >>> output = m(input) + + >>> # network is nn.BatchNorm layer + >>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group) + >>> # only single gpu per process is currently supported + >>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel( + >>> sync_bn_network, + >>> device_ids=[args.local_rank], + >>> output_device=args.local_rank) + + .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`: + https://arxiv.org/abs/1502.03167 + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, process_group=None): + super(SyncBatchNorm, self).__init__(num_features, eps, momentum, affine, track_running_stats) + self.process_group = process_group + # gpu_size is set through DistributedDataParallel initialization. This is to ensure that SyncBatchNorm is used + # under supported condition (single GPU per process) + self.ddp_gpu_size = None + + def _check_input_dim(self, input): + if input.dim() <= 2: + raise ValueError('expected at least 3D input (got {}D input)' + .format(input.dim())) + + def _specify_ddp_gpu_num(self, gpu_size): + if gpu_size > 1: + raise ValueError('SyncBatchNorm is only supported for DDP with single GPU per process') + self.ddp_gpu_size = gpu_size + + def forward(self, input): + # currently only GPU input is supported + if not input.is_cuda: + raise ValueError('expected input tensor to be on GPU') + + if not self.ddp_gpu_size: + raise AttributeError('SyncBatchNorm is only supported within torch.nn.parallel.DistributedDataParallel') + + self._check_input_dim(input) + + exponential_average_factor = 0.0 + + if self.training and self.track_running_stats: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / self.num_batches_tracked.item() + else: # use exponential moving average + exponential_average_factor = self.momentum + + world_size = 1 + process_group = torch.distributed.group.WORLD + if self.process_group: + process_group = self.process_group + world_size = torch.distributed.get_world_size(process_group) + + # fallback to framework BN when synchronization is not necessary + if world_size == 1 or (not self.training and self.track_running_stats): + return F.batch_norm( + input, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, + exponential_average_factor, self.eps) + else: + return sync_batch_norm.apply( + input, self.weight, self.bias, self.running_mean, self.running_var, + self.eps, exponential_average_factor, process_group, world_size) + +
    [docs] @classmethod + def convert_sync_batchnorm(cls, module, process_group=None): + r"""Helper function to convert `torch.nn.BatchNormND` layer in the model to + `torch.nn.SyncBatchNorm` layer. + + Args: + module (nn.Module): containing module + process_group (optional): process group to scope synchronization, + default is the whole world + + Returns: + The original module with the converted `torch.nn.SyncBatchNorm` layer + + Example:: + + >>> # Network with nn.BatchNorm layer + >>> module = torch.nn.Sequential( + >>> torch.nn.Linear(20, 100), + >>> torch.nn.BatchNorm1d(100) + >>> ).cuda() + >>> # creating process group (optional) + >>> # process_ids is a list of int identifying rank ids. + >>> process_group = torch.distributed.new_group(process_ids) + >>> sync_bn_module = convert_sync_batchnorm(module, process_group) + + """ + module_output = module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + module_output = torch.nn.SyncBatchNorm(module.num_features, + module.eps, module.momentum, + module.affine, + module.track_running_stats, + process_group) + if module.affine: + module_output.weight.data = module.weight.data.clone().detach() + module_output.bias.data = module.bias.data.clone().detach() + # keep reuqires_grad unchanged + module_output.weight.requires_grad = module.weight.requires_grad + module_output.bias.requires_grad = module.bias.requires_grad + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + for name, child in module.named_children(): + module_output.add_module(name, cls.convert_sync_batchnorm(child, process_group)) + del module + return module_output
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/container.html b/docs/stable/_modules/torch/nn/modules/container.html new file mode 100644 index 000000000000..110d18d20001 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/container.html @@ -0,0 +1,1084 @@ + + + + + + + + + + + + torch.nn.modules.container — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.container

    +import warnings
    +from collections import OrderedDict
    +from torch._six import container_abcs
    +from itertools import islice
    +import operator
    +
    +import torch
    +from .module import Module
    +
    +
    +class Container(Module):
    +
    +    def __init__(self, **kwargs):
    +        super(Container, self).__init__()
    +        # DeprecationWarning is ignored by default <sigh>
    +        warnings.warn("nn.Container is deprecated. All of it's functionality "
    +                      "is now implemented in nn.Module. Subclass that instead.")
    +        for key, value in kwargs.items():
    +            self.add_module(key, value)
    +
    +
    +
    [docs]class Sequential(Module): + r"""A sequential container. + Modules will be added to it in the order they are passed in the constructor. + Alternatively, an ordered dict of modules can also be passed in. + + To make it easier to understand, here is a small example:: + + # Example of using Sequential + model = nn.Sequential( + nn.Conv2d(1,20,5), + nn.ReLU(), + nn.Conv2d(20,64,5), + nn.ReLU() + ) + + # Example of using Sequential with OrderedDict + model = nn.Sequential(OrderedDict([ + ('conv1', nn.Conv2d(1,20,5)), + ('relu1', nn.ReLU()), + ('conv2', nn.Conv2d(20,64,5)), + ('relu2', nn.ReLU()) + ])) + """ + + def __init__(self, *args): + super(Sequential, self).__init__() + if len(args) == 1 and isinstance(args[0], OrderedDict): + for key, module in args[0].items(): + self.add_module(key, module) + else: + for idx, module in enumerate(args): + self.add_module(str(idx), module) + + def _get_item_by_idx(self, iterator, idx): + """Get the idx-th item of the iterator""" + size = len(self) + idx = operator.index(idx) + if not -size <= idx < size: + raise IndexError('index {} is out of range'.format(idx)) + idx %= size + return next(islice(iterator, idx, None)) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return self.__class__(OrderedDict(list(self._modules.items())[idx])) + else: + return self._get_item_by_idx(self._modules.values(), idx) + + def __setitem__(self, idx, module): + key = self._get_item_by_idx(self._modules.keys(), idx) + return setattr(self, key, module) + + def __delitem__(self, idx): + if isinstance(idx, slice): + for key in list(self._modules.keys())[idx]: + delattr(self, key) + else: + key = self._get_item_by_idx(self._modules.keys(), idx) + delattr(self, key) + + def __len__(self): + return len(self._modules) + + def __dir__(self): + keys = super(Sequential, self).__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + + def forward(self, input): + for module in self._modules.values(): + input = module(input) + return input
    + + +
    [docs]class ModuleList(Module): + r"""Holds submodules in a list. + + :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but + modules it contains are properly registered, and will be visible by all + :class:`~torch.nn.Module` methods. + + Arguments: + modules (iterable, optional): an iterable of modules to add + + Example:: + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) + + def forward(self, x): + # ModuleList can act as an iterable, or be indexed using ints + for i, l in enumerate(self.linears): + x = self.linears[i // 2](x) + l(x) + return x + """ + + def __init__(self, modules=None): + super(ModuleList, self).__init__() + if modules is not None: + self += modules + + def _get_abs_string_index(self, idx): + """Get the absolute index for the list of modules""" + idx = operator.index(idx) + if not (-len(self) <= idx < len(self)): + raise IndexError('index {} is out of range'.format(idx)) + if idx < 0: + idx += len(self) + return str(idx) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return self.__class__(list(self._modules.values())[idx]) + else: + return self._modules[self._get_abs_string_index(idx)] + + def __setitem__(self, idx, module): + idx = self._get_abs_string_index(idx) + return setattr(self, str(idx), module) + + def __delitem__(self, idx): + if isinstance(idx, slice): + for k in range(len(self._modules))[idx]: + delattr(self, str(k)) + else: + delattr(self, self._get_abs_string_index(idx)) + # To preserve numbering, self._modules is being reconstructed with modules after deletion + str_indices = [str(i) for i in range(len(self._modules))] + self._modules = OrderedDict(list(zip(str_indices, self._modules.values()))) + + def __len__(self): + return len(self._modules) + + def __iter__(self): + return iter(self._modules.values()) + + def __iadd__(self, modules): + return self.extend(modules) + + def __dir__(self): + keys = super(ModuleList, self).__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + +
    [docs] def insert(self, index, module): + r"""Insert a given module before a given index in the list. + + Arguments: + index (int): index to insert. + module (nn.Module): module to insert + """ + for i in range(len(self._modules), index, -1): + self._modules[str(i)] = self._modules[str(i - 1)] + self._modules[str(index)] = module
    + +
    [docs] def append(self, module): + r"""Appends a given module to the end of the list. + + Arguments: + module (nn.Module): module to append + """ + self.add_module(str(len(self)), module) + return self
    + +
    [docs] def extend(self, modules): + r"""Appends modules from a Python iterable to the end of the list. + + Arguments: + modules (iterable): iterable of modules to append + """ + if not isinstance(modules, container_abcs.Iterable): + raise TypeError("ModuleList.extend should be called with an " + "iterable, but got " + type(modules).__name__) + offset = len(self) + for i, module in enumerate(modules): + self.add_module(str(offset + i), module) + return self
    + + +
    [docs]class ModuleDict(Module): + r"""Holds submodules in a dictionary. + + :class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary, + but modules it contains are properly registered, and will be visible by all + :class:`~torch.nn.Module` methods. + + :class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects + + * the order of insertion, and + + * in :meth:`~torch.nn.ModuleDict.update`, the order of the merged ``OrderedDict`` + or another :class:`~torch.nn.ModuleDict` (the argument to :meth:`~torch.nn.ModuleDict.update`). + + Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping + types (e.g., Python's plain ``dict``) does not preserve the order of the + merged mapping. + + Arguments: + modules (iterable, optional): a mapping (dictionary) of (string: module) + or an iterable of key-value pairs of type (string, module) + + Example:: + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.choices = nn.ModuleDict({ + 'conv': nn.Conv2d(10, 10, 3), + 'pool': nn.MaxPool2d(3) + }) + self.activations = nn.ModuleDict([ + ['lrelu', nn.LeakyReLU()], + ['prelu', nn.PReLU()] + ]) + + def forward(self, x, choice, act): + x = self.choices[choice](x) + x = self.activations[act](x) + return x + """ + + def __init__(self, modules=None): + super(ModuleDict, self).__init__() + if modules is not None: + self.update(modules) + + def __getitem__(self, key): + return self._modules[key] + + def __setitem__(self, key, module): + self.add_module(key, module) + + def __delitem__(self, key): + del self._modules[key] + + def __len__(self): + return len(self._modules) + + def __iter__(self): + return iter(self._modules) + + def __contains__(self, key): + return key in self._modules + +
    [docs] def clear(self): + """Remove all items from the ModuleDict. + """ + self._modules.clear()
    + +
    [docs] def pop(self, key): + r"""Remove key from the ModuleDict and return its module. + + Arguments: + key (string): key to pop from the ModuleDict + """ + v = self[key] + del self[key] + return v
    + +
    [docs] def keys(self): + r"""Return an iterable of the ModuleDict keys. + """ + return self._modules.keys()
    + +
    [docs] def items(self): + r"""Return an iterable of the ModuleDict key/value pairs. + """ + return self._modules.items()
    + +
    [docs] def values(self): + r"""Return an iterable of the ModuleDict values. + """ + return self._modules.values()
    + +
    [docs] def update(self, modules): + r"""Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a + mapping or an iterable, overwriting existing keys. + + .. note:: + If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or + an iterable of key-value pairs, the order of new elements in it is preserved. + + Arguments: + modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`, + or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`) + """ + if not isinstance(modules, container_abcs.Iterable): + raise TypeError("ModuleDict.update should be called with an " + "iterable of key/value pairs, but got " + + type(modules).__name__) + + if isinstance(modules, container_abcs.Mapping): + if isinstance(modules, (OrderedDict, ModuleDict)): + for key, module in modules.items(): + self[key] = module + else: + for key, module in sorted(modules.items()): + self[key] = module + else: + for j, m in enumerate(modules): + if not isinstance(m, container_abcs.Iterable): + raise TypeError("ModuleDict update sequence element " + "#" + str(j) + " should be Iterable; is" + + type(m).__name__) + if not len(m) == 2: + raise ValueError("ModuleDict update sequence element " + "#" + str(j) + " has length " + str(len(m)) + + "; 2 is required") + self[m[0]] = m[1]
    + + +
    [docs]class ParameterList(Module): + r"""Holds parameters in a list. + + :class:`~torch.nn.ParameterList` can be indexed like a regular Python + list, but parameters it contains are properly registered, and will be + visible by all :class:`~torch.nn.Module` methods. + + Arguments: + parameters (iterable, optional): an iterable of :class:`~torch.nn.Parameter` to add + + Example:: + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)]) + + def forward(self, x): + # ParameterList can act as an iterable, or be indexed using ints + for i, p in enumerate(self.params): + x = self.params[i // 2].mm(x) + p.mm(x) + return x + """ + + def __init__(self, parameters=None): + super(ParameterList, self).__init__() + if parameters is not None: + self += parameters + + def _get_abs_string_index(self, idx): + """Get the absolute index for the list of modules""" + idx = operator.index(idx) + if not (-len(self) <= idx < len(self)): + raise IndexError('index {} is out of range'.format(idx)) + if idx < 0: + idx += len(self) + return str(idx) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return self.__class__(list(self._parameters.values())[idx]) + else: + idx = self._get_abs_string_index(idx) + return self._parameters[str(idx)] + + def __setitem__(self, idx, param): + idx = self._get_abs_string_index(idx) + return self.register_parameter(str(idx), param) + + def __len__(self): + return len(self._parameters) + + def __iter__(self): + return iter(self._parameters.values()) + + def __iadd__(self, parameters): + return self.extend(parameters) + + def __dir__(self): + keys = super(ParameterList, self).__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + +
    [docs] def append(self, parameter): + """Appends a given parameter at the end of the list. + + Arguments: + parameter (nn.Parameter): parameter to append + """ + self.register_parameter(str(len(self)), parameter) + return self
    + +
    [docs] def extend(self, parameters): + """Appends parameters from a Python iterable to the end of the list. + + Arguments: + parameters (iterable): iterable of parameters to append + """ + if not isinstance(parameters, container_abcs.Iterable): + raise TypeError("ParameterList.extend should be called with an " + "iterable, but got " + type(parameters).__name__) + offset = len(self) + for i, param in enumerate(parameters): + self.register_parameter(str(offset + i), param) + return self
    + + def extra_repr(self): + child_lines = [] + for k, p in self._parameters.items(): + size_str = 'x'.join(str(size) for size in p.size()) + device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device()) + parastr = 'Parameter containing: [{} of size {}{}]'.format( + torch.typename(p.data), size_str, device_str) + child_lines.append(' (' + str(k) + '): ' + parastr) + tmpstr = '\n'.join(child_lines) + return tmpstr
    + + +
    [docs]class ParameterDict(Module): + r"""Holds parameters in a dictionary. + + ParameterDict can be indexed like a regular Python dictionary, but parameters it + contains are properly registered, and will be visible by all Module methods. + + :class:`~torch.nn.ParameterDict` is an **ordered** dictionary that respects + + * the order of insertion, and + + * in :meth:`~torch.nn.ParameterDict.update`, the order of the merged ``OrderedDict`` + or another :class:`~torch.nn.ParameterDict` (the argument to + :meth:`~torch.nn.ParameterDict.update`). + + Note that :meth:`~torch.nn.ParameterDict.update` with other unordered mapping + types (e.g., Python's plain ``dict``) does not preserve the order of the + merged mapping. + + Arguments: + parameters (iterable, optional): a mapping (dictionary) of + (string : :class:`~torch.nn.Parameter`) or an iterable of key-value pairs + of type (string, :class:`~torch.nn.Parameter`) + + Example:: + + class MyModule(nn.Module): + def __init__(self): + super(MyModule, self).__init__() + self.params = nn.ParameterDict({ + 'left': nn.Parameter(torch.randn(5, 10)), + 'right': nn.Parameter(torch.randn(5, 10)) + }) + + def forward(self, x, choice): + x = self.params[choice].mm(x) + return x + """ + + def __init__(self, parameters=None): + super(ParameterDict, self).__init__() + if parameters is not None: + self.update(parameters) + + def __getitem__(self, key): + return self._parameters[key] + + def __setitem__(self, key, parameter): + self.register_parameter(key, parameter) + + def __delitem__(self, key): + del self._parameters[key] + + def __len__(self): + return len(self._parameters) + + def __iter__(self): + return iter(self._parameters.keys()) + + def __contains__(self, key): + return key in self._parameters + +
    [docs] def clear(self): + """Remove all items from the ParameterDict. + """ + self._parameters.clear()
    + +
    [docs] def pop(self, key): + r"""Remove key from the ParameterDict and return its parameter. + + Arguments: + key (string): key to pop from the ParameterDict + """ + v = self[key] + del self[key] + return v
    + +
    [docs] def keys(self): + r"""Return an iterable of the ParameterDict keys. + """ + return self._parameters.keys()
    + +
    [docs] def items(self): + r"""Return an iterable of the ParameterDict key/value pairs. + """ + return self._parameters.items()
    + +
    [docs] def values(self): + r"""Return an iterable of the ParameterDict values. + """ + return self._parameters.values()
    + +
    [docs] def update(self, parameters): + r"""Update the :class:`~torch.nn.ParameterDict` with the key-value pairs from a + mapping or an iterable, overwriting existing keys. + + .. note:: + If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or + an iterable of key-value pairs, the order of new elements in it is preserved. + + Arguments: + parameters (iterable): a mapping (dictionary) from string to + :class:`~torch.nn.Parameter`, or an iterable of + key-value pairs of type (string, :class:`~torch.nn.Parameter`) + """ + if not isinstance(parameters, container_abcs.Iterable): + raise TypeError("ParametersDict.update should be called with an " + "iterable of key/value pairs, but got " + + type(parameters).__name__) + + if isinstance(parameters, container_abcs.Mapping): + if isinstance(parameters, (OrderedDict, ParameterDict)): + for key, parameter in parameters.items(): + self[key] = parameter + else: + for key, parameter in sorted(parameters.items()): + self[key] = parameter + else: + for j, p in enumerate(parameters): + if not isinstance(p, container_abcs.Iterable): + raise TypeError("ParameterDict update sequence element " + "#" + str(j) + " should be Iterable; is" + + type(p).__name__) + if not len(p) == 2: + raise ValueError("ParameterDict update sequence element " + "#" + str(j) + " has length " + str(len(p)) + + "; 2 is required") + self[p[0]] = p[1]
    + + def extra_repr(self): + child_lines = [] + for k, p in self._parameters.items(): + size_str = 'x'.join(str(size) for size in p.size()) + device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device()) + parastr = 'Parameter containing: [{} of size {}{}]'.format( + torch.typename(p.data), size_str, device_str) + child_lines.append(' (' + k + '): ' + parastr) + tmpstr = '\n'.join(child_lines) + return tmpstr
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/conv.html b/docs/stable/_modules/torch/nn/modules/conv.html new file mode 100644 index 000000000000..fda0f5014263 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/conv.html @@ -0,0 +1,1449 @@ + + + + + + + + + + + + torch.nn.modules.conv — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.conv

    +# coding=utf-8
    +import math
    +import torch
    +from torch.nn.parameter import Parameter
    +from .. import functional as F
    +from .. import init
    +from .module import Module
    +from .utils import _single, _pair, _triple
    +from ..._jit_internal import List
    +
    +
    +class _ConvNd(Module):
    +
    +    __constants__ = ['stride', 'padding', 'dilation', 'groups', 'bias',
    +                     'padding_mode', 'output_padding', 'in_channels',
    +                     'out_channels', 'kernel_size']
    +
    +    def __init__(self, in_channels, out_channels, kernel_size, stride,
    +                 padding, dilation, transposed, output_padding,
    +                 groups, bias, padding_mode):
    +        super(_ConvNd, self).__init__()
    +        if in_channels % groups != 0:
    +            raise ValueError('in_channels must be divisible by groups')
    +        if out_channels % groups != 0:
    +            raise ValueError('out_channels must be divisible by groups')
    +        self.in_channels = in_channels
    +        self.out_channels = out_channels
    +        self.kernel_size = kernel_size
    +        self.stride = stride
    +        self.padding = padding
    +        self.dilation = dilation
    +        self.transposed = transposed
    +        self.output_padding = output_padding
    +        self.groups = groups
    +        self.padding_mode = padding_mode
    +        if transposed:
    +            self.weight = Parameter(torch.Tensor(
    +                in_channels, out_channels // groups, *kernel_size))
    +        else:
    +            self.weight = Parameter(torch.Tensor(
    +                out_channels, in_channels // groups, *kernel_size))
    +        if bias:
    +            self.bias = Parameter(torch.Tensor(out_channels))
    +        else:
    +            self.register_parameter('bias', None)
    +        self.reset_parameters()
    +
    +    def reset_parameters(self):
    +        init.kaiming_uniform_(self.weight, a=math.sqrt(5))
    +        if self.bias is not None:
    +            fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
    +            bound = 1 / math.sqrt(fan_in)
    +            init.uniform_(self.bias, -bound, bound)
    +
    +    def extra_repr(self):
    +        s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
    +             ', stride={stride}')
    +        if self.padding != (0,) * len(self.padding):
    +            s += ', padding={padding}'
    +        if self.dilation != (1,) * len(self.dilation):
    +            s += ', dilation={dilation}'
    +        if self.output_padding != (0,) * len(self.output_padding):
    +            s += ', output_padding={output_padding}'
    +        if self.groups != 1:
    +            s += ', groups={groups}'
    +        if self.bias is None:
    +            s += ', bias=False'
    +        return s.format(**self.__dict__)
    +
    +    def __setstate__(self, state):
    +        super(_ConvNd, self).__setstate__(state)
    +        if not hasattr(self, 'padding_mode'):
    +            self.padding_mode = 'zeros'
    +
    +
    +
    [docs]class Conv1d(_ConvNd): + r"""Applies a 1D convolution over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size + :math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be + precisely described as: + + .. math:: + \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + + \sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k) + \star \text{input}(N_i, k) + + where :math:`\star` is the valid `cross-correlation`_ operator, + :math:`N` is a batch size, :math:`C` denotes a number of channels, + :math:`L` is a length of signal sequence. + + * :attr:`stride` controls the stride for the cross-correlation, a single + number or a one-element tuple. + + * :attr:`padding` controls the amount of implicit zero-paddings on both sides + for :attr:`padding` number of points. + + * :attr:`dilation` controls the spacing between the kernel points; also + known as the à trous algorithm. It is harder to describe, but this `link`_ + has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters, + of size + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`. + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid + `cross-correlation`_, and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + + When `groups == in_channels` and `out_channels == K * in_channels`, + where `K` is a positive integer, this operation is also termed in + literature as depthwise convolution. + + In other words, for an input of size :math:`(N, C_{in}, L_{in})`, + a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments + :math:`(C_\text{in}=C_{in}, C_\text{out}=C_{in} \times K, ..., \text{groups}=C_{in})`. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of + the input. Default: 0 + padding_mode (string, optional). Accepted values `zeros` and `circular` Default: `zeros` + dilation (int or tuple, optional): Spacing between kernel + elements. Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + + Shape: + - Input: :math:`(N, C_{in}, L_{in})` + - Output: :math:`(N, C_{out}, L_{out})` where + + .. math:: + L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation} + \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \text{kernel\_size}}` + bias (Tensor): the learnable bias of the module of shape + (out_channels). If :attr:`bias` is ``True``, then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \text{kernel\_size}}` + + Examples:: + + >>> m = nn.Conv1d(16, 33, 3, stride=2) + >>> input = torch.randn(20, 16, 50) + >>> output = m(input) + + .. _cross-correlation: + https://en.wikipedia.org/wiki/Cross-correlation + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=True, padding_mode='zeros'): + kernel_size = _single(kernel_size) + stride = _single(stride) + padding = _single(padding) + dilation = _single(dilation) + super(Conv1d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + False, _single(0), groups, bias, padding_mode) + + def forward(self, input): + if self.padding_mode == 'circular': + expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2) + return F.conv1d(F.pad(input, expanded_padding, mode='circular'), + self.weight, self.bias, self.stride, + _single(0), self.dilation, self.groups) + return F.conv1d(input, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups)
    + + +
    [docs]class Conv2d(_ConvNd): + r"""Applies a 2D convolution over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size + :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` + can be precisely described as: + + .. math:: + \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + + \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k) + + + where :math:`\star` is the valid 2D `cross-correlation`_ operator, + :math:`N` is a batch size, :math:`C` denotes a number of channels, + :math:`H` is a height of input planes in pixels, and :math:`W` is + width in pixels. + + * :attr:`stride` controls the stride for the cross-correlation, a single + number or a tuple. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension. + + * :attr:`dilation` controls the spacing between the kernel points; also + known as the à trous algorithm. It is harder to describe, but this `link`_ + has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters, of size: + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: + + - a single ``int`` -- in which case the same value is used for the height and width dimension + - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, + and the second `int` for the width dimension + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid `cross-correlation`_, + and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + + When `groups == in_channels` and `out_channels == K * in_channels`, + where `K` is a positive integer, this operation is also termed in + literature as depthwise convolution. + + In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, + a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments + :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + padding_mode (string, optional). Accepted values `zeros` and `circular` Default: `zeros` + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + + Shape: + - Input: :math:`(N, C_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},` + :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` + bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, + then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` + + Examples:: + + >>> # With square kernels and equal stride + >>> m = nn.Conv2d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + >>> # non-square kernels and unequal stride and with padding and dilation + >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) + >>> input = torch.randn(20, 16, 50, 100) + >>> output = m(input) + + .. _cross-correlation: + https://en.wikipedia.org/wiki/Cross-correlation + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=True, padding_mode='zeros'): + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + dilation = _pair(dilation) + super(Conv2d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + False, _pair(0), groups, bias, padding_mode) + + def conv2d_forward(self, input, weight): + if self.padding_mode == 'circular': + expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2, + (self.padding[0] + 1) // 2, self.padding[0] // 2) + return F.conv2d(F.pad(input, expanded_padding, mode='circular'), + weight, self.bias, self.stride, + _pair(0), self.dilation, self.groups) + return F.conv2d(input, weight, self.bias, self.stride, + self.padding, self.dilation, self.groups) + + def forward(self, input): + return self.conv2d_forward(input, self.weight)
    + +
    [docs]class Conv3d(_ConvNd): + r"""Applies a 3D convolution over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)` + and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as: + + .. math:: + out(N_i, C_{out_j}) = bias(C_{out_j}) + + \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k) + + where :math:`\star` is the valid 3D `cross-correlation`_ operator + + * :attr:`stride` controls the stride for the cross-correlation. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters, of size + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: + + - a single ``int`` -- in which case the same value is used for the depth, height and width dimension + - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, + the second `int` for the height dimension and the third `int` for the width dimension + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid `cross-correlation`_, + and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + + When `groups == in_channels` and `out_channels == K * in_channels`, + where `K` is a positive integer, this operation is also termed in + literature as depthwise convolution. + + In other words, for an input of size :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`, + a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments + :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0 + padding_mode (string, optional). Accepted values `zeros` and `circular` Default: `zeros` + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + + Shape: + - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where + + .. math:: + D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] + \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},` + :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` + bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``, + then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` + + Examples:: + + >>> # With square kernels and equal stride + >>> m = nn.Conv3d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)) + >>> input = torch.randn(20, 16, 10, 50, 100) + >>> output = m(input) + + .. _cross-correlation: + https://en.wikipedia.org/wiki/Cross-correlation + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, + bias=True, padding_mode='zeros'): + kernel_size = _triple(kernel_size) + stride = _triple(stride) + padding = _triple(padding) + dilation = _triple(dilation) + super(Conv3d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + False, _triple(0), groups, bias, padding_mode) + + def forward(self, input): + if self.padding_mode == 'circular': + expanded_padding = ((self.padding[2] + 1) // 2, self.padding[2] // 2, + (self.padding[1] + 1) // 2, self.padding[1] // 2, + (self.padding[0] + 1) // 2, self.padding[0] // 2) + return F.conv3d(F.pad(input, expanded_padding, mode='circular'), + self.weight, self.bias, self.stride, _triple(0), + self.dilation, self.groups) + return F.conv3d(input, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups)
    + + +class _ConvTransposeMixin(object): + def forward(self, input, output_size=None): + # type(Tensor, Optional[List[int]]) -> Tensor + output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size) + func = self._backend.ConvNd( + self.stride, self.padding, self.dilation, self.transposed, + output_padding, self.groups) + if self.bias is None: + return func(input, self.weight) + else: + return func(input, self.weight, self.bias) + + def _output_padding(self, input, output_size, stride, padding, kernel_size): + # type: (Tensor, Optional[List[int]], List[int], List[int], List[int]) -> List[int] + if output_size is None: + ret = _single(self.output_padding) # converting to list if was not already + else: + k = input.dim() - 2 + if len(output_size) == k + 2: + output_size = output_size[2:] + if len(output_size) != k: + raise ValueError( + "output_size must have {} or {} elements (got {})" + .format(k, k + 2, len(output_size))) + + min_sizes = torch.jit.annotate(List[int], []) + max_sizes = torch.jit.annotate(List[int], []) + for d in range(k): + dim_size = ((input.size(d + 2) - 1) * stride[d] - + 2 * padding[d] + kernel_size[d]) + min_sizes.append(dim_size) + max_sizes.append(min_sizes[d] + stride[d] - 1) + + for i in range(len(output_size)): + size = output_size[i] + min_size = min_sizes[i] + max_size = max_sizes[i] + if size < min_size or size > max_size: + raise ValueError(( + "requested an output size of {}, but valid sizes range " + "from {} to {} (for an input of {})").format( + output_size, min_sizes, max_sizes, input.size()[2:])) + + res = torch.jit.annotate(List[int], []) + for d in range(k): + res.append(output_size[d] - min_sizes[d]) + + ret = res + return ret + + +
    [docs]class ConvTranspose1d(_ConvTransposeMixin, _ConvNd): + r"""Applies a 1D transposed convolution operator over an input image + composed of several input planes. + + This module can be seen as the gradient of Conv1d with respect to its input. + It is also known as a fractionally-strided convolution or + a deconvolution (although it is not an actual deconvolution operation). + + * :attr:`stride` controls the stride for the cross-correlation. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note + below for details. + + * :attr:`output_padding` controls the additional size added to one side + of the output shape. See note below for details. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters (of size + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`). + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid `cross-correlation`_, + and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding`` + amount of zero padding to both sizes of the input. This is set so that + when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d` + are initialized with same parameters, they are inverses of each other in + regard to the input and output shapes. However, when ``stride > 1``, + :class:`~torch.nn.Conv1d` maps multiple input shapes to the same output + shape. :attr:`output_padding` is provided to resolve this ambiguity by + effectively increasing the calculated output shape on one side. Note + that :attr:`output_padding` is only used to find output shape, but does + not actually add zero-padding to output. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding + will be added to both sides of the input. Default: 0 + output_padding (int or tuple, optional): Additional size added to one side + of the output shape. Default: 0 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + + Shape: + - Input: :math:`(N, C_{in}, L_{in})` + - Output: :math:`(N, C_{out}, L_{out})` where + + .. math:: + L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation} + \times (\text{kernel\_size} - 1) + \text{output\_padding} + 1 + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},` + :math:`\text{kernel\_size})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \text{kernel\_size}}` + bias (Tensor): the learnable bias of the module of shape (out_channels). + If :attr:`bias` is ``True``, then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \text{kernel\_size}}` + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros'): + kernel_size = _single(kernel_size) + stride = _single(stride) + padding = _single(padding) + dilation = _single(dilation) + output_padding = _single(output_padding) + super(ConvTranspose1d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + True, output_padding, groups, bias, padding_mode) + + def forward(self, input, output_size=None): + # type: (Tensor, Optional[List[int]]) -> Tensor + if self.padding_mode != 'zeros': + raise ValueError('Only `zeros` padding mode is supported for ConvTranspose1d') + + output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size) + return F.conv_transpose1d( + input, self.weight, self.bias, self.stride, self.padding, + output_padding, self.groups, self.dilation)
    + + +
    [docs]class ConvTranspose2d(_ConvTransposeMixin, _ConvNd): + r"""Applies a 2D transposed convolution operator over an input image + composed of several input planes. + + This module can be seen as the gradient of Conv2d with respect to its input. + It is also known as a fractionally-strided convolution or + a deconvolution (although it is not an actual deconvolution operation). + + * :attr:`stride` controls the stride for the cross-correlation. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note + below for details. + + * :attr:`output_padding` controls the additional size added to one side + of the output shape. See note below for details. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters (of size + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`). + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` + can either be: + + - a single ``int`` -- in which case the same value is used for the height and width dimensions + - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, + and the second `int` for the width dimension + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid `cross-correlation`_, + and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding`` + amount of zero padding to both sizes of the input. This is set so that + when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d` + are initialized with same parameters, they are inverses of each other in + regard to the input and output shapes. However, when ``stride > 1``, + :class:`~torch.nn.Conv2d` maps multiple input shapes to the same output + shape. :attr:`output_padding` is provided to resolve this ambiguity by + effectively increasing the calculated output shape on one side. Note + that :attr:`output_padding` is only used to find output shape, but does + not actually add zero-padding to output. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding + will be added to both sides of each dimension in the input. Default: 0 + output_padding (int or tuple, optional): Additional size added to one side + of each dimension in the output shape. Default: 0 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + + Shape: + - Input: :math:`(N, C_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where + + .. math:: + H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1 + .. math:: + W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1 + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},` + :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` + bias (Tensor): the learnable bias of the module of shape (out_channels) + If :attr:`bias` is ``True``, then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}` + + Examples:: + + >>> # With square kernels and equal stride + >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + >>> input = torch.randn(20, 16, 50, 100) + >>> output = m(input) + >>> # exact output size can be also specified as an argument + >>> input = torch.randn(1, 16, 12, 12) + >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) + >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1) + >>> h = downsample(input) + >>> h.size() + torch.Size([1, 16, 6, 6]) + >>> output = upsample(h, output_size=input.size()) + >>> output.size() + torch.Size([1, 16, 12, 12]) + + .. _cross-correlation: + https://en.wikipedia.org/wiki/Cross-correlation + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros'): + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + dilation = _pair(dilation) + output_padding = _pair(output_padding) + super(ConvTranspose2d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + True, output_padding, groups, bias, padding_mode) + + def forward(self, input, output_size=None): + # type: (Tensor, Optional[List[int]]) -> Tensor + if self.padding_mode != 'zeros': + raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d') + + output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size) + + return F.conv_transpose2d( + input, self.weight, self.bias, self.stride, self.padding, + output_padding, self.groups, self.dilation)
    + + +
    [docs]class ConvTranspose3d(_ConvTransposeMixin, _ConvNd): + r"""Applies a 3D transposed convolution operator over an input image composed of several input + planes. + The transposed convolution operator multiplies each input value element-wise by a learnable kernel, + and sums over the outputs from all input feature planes. + + This module can be seen as the gradient of Conv3d with respect to its input. + It is also known as a fractionally-strided convolution or + a deconvolution (although it is not an actual deconvolution operation). + + * :attr:`stride` controls the stride for the cross-correlation. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note + below for details. + + * :attr:`output_padding` controls the additional size added to one side + of the output shape. See note below for details. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + * :attr:`groups` controls the connections between inputs and outputs. + :attr:`in_channels` and :attr:`out_channels` must both be divisible by + :attr:`groups`. For example, + + * At groups=1, all inputs are convolved to all outputs. + * At groups=2, the operation becomes equivalent to having two conv + layers side by side, each seeing half the input channels, + and producing half the output channels, and both subsequently + concatenated. + * At groups= :attr:`in_channels`, each input channel is convolved with + its own set of filters (of size + :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`). + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` + can either be: + + - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions + - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, + the second `int` for the height dimension and the third `int` for the width dimension + + .. note:: + + Depending of the size of your kernel, several (of the last) + columns of the input might be lost, because it is a valid `cross-correlation`_, + and not a full `cross-correlation`_. + It is up to the user to add proper padding. + + .. note:: + The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding`` + amount of zero padding to both sizes of the input. This is set so that + when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d` + are initialized with same parameters, they are inverses of each other in + regard to the input and output shapes. However, when ``stride > 1``, + :class:`~torch.nn.Conv3d` maps multiple input shapes to the same output + shape. :attr:`output_padding` is provided to resolve this ambiguity by + effectively increasing the calculated output shape on one side. Note + that :attr:`output_padding` is only used to find output shape, but does + not actually add zero-padding to output. + + .. include:: cudnn_deterministic.rst + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding + will be added to both sides of each dimension in the input. Default: 0 + output_padding (int or tuple, optional): Additional size added to one side + of each dimension in the output shape. Default: 0 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True`` + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + + Shape: + - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where + + .. math:: + D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1 + .. math:: + H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1 + .. math:: + W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2] + \times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1 + + + Attributes: + weight (Tensor): the learnable weights of the module of shape + :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},` + :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`. + The values of these weights are sampled from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` + bias (Tensor): the learnable bias of the module of shape (out_channels) + If :attr:`bias` is ``True``, then the values of these weights are + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}` + + Examples:: + + >>> # With square kernels and equal stride + >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2) + >>> # non-square kernels and unequal stride and with padding + >>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2)) + >>> input = torch.randn(20, 16, 10, 50, 100) + >>> output = m(input) + + .. _cross-correlation: + https://en.wikipedia.org/wiki/Cross-correlation + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, groups=1, bias=True, + dilation=1, padding_mode='zeros'): + kernel_size = _triple(kernel_size) + stride = _triple(stride) + padding = _triple(padding) + dilation = _triple(dilation) + output_padding = _triple(output_padding) + super(ConvTranspose3d, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, + True, output_padding, groups, bias, padding_mode) + + def forward(self, input, output_size=None): + # type: (Tensor, Optional[List[int]]) -> Tensor + if self.padding_mode != 'zeros': + raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d') + + output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size) + + return F.conv_transpose3d( + input, self.weight, self.bias, self.stride, self.padding, + output_padding, self.groups, self.dilation)
    + + +# TODO: Conv2dLocal +# TODO: Conv2dMap +# TODO: ConvTranspose2dMap +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/distance.html b/docs/stable/_modules/torch/nn/modules/distance.html new file mode 100644 index 000000000000..46a5993161ce --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/distance.html @@ -0,0 +1,582 @@ + + + + + + + + + + + + torch.nn.modules.distance — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.distance

    +from .module import Module
    +from .. import functional as F
    +
    +
    +
    [docs]class PairwiseDistance(Module): + r""" + Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm: + + .. math :: + \Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}. + + Args: + p (real): the norm degree. Default: 2 + eps (float, optional): Small value to avoid division by zero. + Default: 1e-6 + keepdim (bool, optional): Determines whether or not to keep the vector dimension. + Default: False + Shape: + - Input1: :math:`(N, D)` where `D = vector dimension` + - Input2: :math:`(N, D)`, same shape as the Input1 + - Output: :math:`(N)`. If :attr:`keepdim` is ``True``, then :math:`(N, 1)`. + Examples:: + >>> pdist = nn.PairwiseDistance(p=2) + >>> input1 = torch.randn(100, 128) + >>> input2 = torch.randn(100, 128) + >>> output = pdist(input1, input2) + """ + __constants__ = ['norm', 'eps', 'keepdim'] + + def __init__(self, p=2., eps=1e-6, keepdim=False): + super(PairwiseDistance, self).__init__() + self.norm = p + self.eps = eps + self.keepdim = keepdim + + def forward(self, x1, x2): + return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
    + + +
    [docs]class CosineSimilarity(Module): + r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along dim. + + .. math :: + \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}. + + Args: + dim (int, optional): Dimension where cosine similarity is computed. Default: 1 + eps (float, optional): Small value to avoid division by zero. + Default: 1e-8 + Shape: + - Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim` + - Input2: :math:`(\ast_1, D, \ast_2)`, same shape as the Input1 + - Output: :math:`(\ast_1, \ast_2)` + Examples:: + >>> input1 = torch.randn(100, 128) + >>> input2 = torch.randn(100, 128) + >>> cos = nn.CosineSimilarity(dim=1, eps=1e-6) + >>> output = cos(input1, input2) + """ + __constants__ = ['dim', 'eps'] + + def __init__(self, dim=1, eps=1e-8): + super(CosineSimilarity, self).__init__() + self.dim = dim + self.eps = eps + + def forward(self, x1, x2): + return F.cosine_similarity(x1, x2, self.dim, self.eps)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/dropout.html b/docs/stable/_modules/torch/nn/modules/dropout.html new file mode 100644 index 000000000000..35bb363284d6 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/dropout.html @@ -0,0 +1,700 @@ + + + + + + + + + + + + torch.nn.modules.dropout — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.dropout

    +from .module import Module
    +from .. import functional as F
    +
    +
    +class _DropoutNd(Module):
    +    __constants__ = ['p', 'inplace']
    +
    +    def __init__(self, p=0.5, inplace=False):
    +        super(_DropoutNd, self).__init__()
    +        if p < 0 or p > 1:
    +            raise ValueError("dropout probability has to be between 0 and 1, "
    +                             "but got {}".format(p))
    +        self.p = p
    +        self.inplace = inplace
    +
    +    def extra_repr(self):
    +        return 'p={}, inplace={}'.format(self.p, self.inplace)
    +
    +
    +
    [docs]class Dropout(_DropoutNd): + r"""During training, randomly zeroes some of the elements of the input + tensor with probability :attr:`p` using samples from a Bernoulli + distribution. Each channel will be zeroed out independently on every forward + call. + + This has proven to be an effective technique for regularization and + preventing the co-adaptation of neurons as described in the paper + `Improving neural networks by preventing co-adaptation of feature + detectors`_ . + + Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during + training. This means that during evaluation the module simply computes an + identity function. + + Args: + p: probability of an element to be zeroed. Default: 0.5 + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + + Shape: + - Input: :math:`(*)`. Input can be of any shape + - Output: :math:`(*)`. Output is of the same shape as input + + Examples:: + + >>> m = nn.Dropout(p=0.2) + >>> input = torch.randn(20, 16) + >>> output = m(input) + + .. _Improving neural networks by preventing co-adaptation of feature + detectors: https://arxiv.org/abs/1207.0580 + """ + + def forward(self, input): + return F.dropout(input, self.p, self.training, self.inplace)
    + + +
    [docs]class Dropout2d(_DropoutNd): + r"""Randomly zero out entire channels (a channel is a 2D feature map, + e.g., the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 2D tensor :math:`\text{input}[i, j]`). + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + Usually the input comes from :class:`nn.Conv2d` modules. + + As described in the paper + `Efficient Object Localization Using Convolutional Networks`_ , + if adjacent pixels within feature maps are strongly correlated + (as is normally the case in early convolution layers) then i.i.d. dropout + will not regularize the activations and will otherwise just result + in an effective learning rate decrease. + + In this case, :func:`nn.Dropout2d` will help promote independence between + feature maps and should be used instead. + + Args: + p (float, optional): probability of an element to be zero-ed. + inplace (bool, optional): If set to ``True``, will do this operation + in-place + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Examples:: + + >>> m = nn.Dropout2d(p=0.2) + >>> input = torch.randn(20, 16, 32, 32) + >>> output = m(input) + + .. _Efficient Object Localization Using Convolutional Networks: + http://arxiv.org/abs/1411.4280 + """ + + def forward(self, input): + return F.dropout2d(input, self.p, self.training, self.inplace)
    + + +
    [docs]class Dropout3d(_DropoutNd): + r"""Randomly zero out entire channels (a channel is a 3D feature map, + e.g., the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 3D tensor :math:`\text{input}[i, j]`). + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + Usually the input comes from :class:`nn.Conv3d` modules. + + As described in the paper + `Efficient Object Localization Using Convolutional Networks`_ , + if adjacent pixels within feature maps are strongly correlated + (as is normally the case in early convolution layers) then i.i.d. dropout + will not regularize the activations and will otherwise just result + in an effective learning rate decrease. + + In this case, :func:`nn.Dropout3d` will help promote independence between + feature maps and should be used instead. + + Args: + p (float, optional): probability of an element to be zeroed. + inplace (bool, optional): If set to ``True``, will do this operation + in-place + + Shape: + - Input: :math:`(N, C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` (same shape as input) + + Examples:: + + >>> m = nn.Dropout3d(p=0.2) + >>> input = torch.randn(20, 16, 4, 32, 32) + >>> output = m(input) + + .. _Efficient Object Localization Using Convolutional Networks: + http://arxiv.org/abs/1411.4280 + """ + + def forward(self, input): + return F.dropout3d(input, self.p, self.training, self.inplace)
    + + +
    [docs]class AlphaDropout(_DropoutNd): + r"""Applies Alpha Dropout over the input. + + Alpha Dropout is a type of Dropout that maintains the self-normalizing + property. + For an input with zero mean and unit standard deviation, the output of + Alpha Dropout maintains the original mean and standard deviation of the + input. + Alpha Dropout goes hand-in-hand with SELU activation function, which ensures + that the outputs have zero mean and unit standard deviation. + + During training, it randomly masks some of the elements of the input + tensor with probability *p* using samples from a bernoulli distribution. + The elements to masked are randomized on every forward call, and scaled + and shifted to maintain zero mean and unit standard deviation. + + During evaluation the module simply computes an identity function. + + More details can be found in the paper `Self-Normalizing Neural Networks`_ . + + Args: + p (float): probability of an element to be dropped. Default: 0.5 + inplace (bool, optional): If set to ``True``, will do this operation + in-place + + Shape: + - Input: :math:`(*)`. Input can be of any shape + - Output: :math:`(*)`. Output is of the same shape as input + + Examples:: + + >>> m = nn.AlphaDropout(p=0.2) + >>> input = torch.randn(20, 16) + >>> output = m(input) + + .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515 + """ + + def forward(self, input): + return F.alpha_dropout(input, self.p, self.training)
    + + +class FeatureAlphaDropout(_DropoutNd): + + def forward(self, input): + return F.feature_alpha_dropout(input, self.p, self.training) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/fold.html b/docs/stable/_modules/torch/nn/modules/fold.html new file mode 100644 index 000000000000..515223e78d01 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/fold.html @@ -0,0 +1,736 @@ + + + + + + + + + + + + torch.nn.modules.fold — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.fold

    +# coding=utf-8
    +from .module import Module
    +from .. import functional as F
    +
    +
    +
    [docs]class Fold(Module): + r"""Combines an array of sliding local blocks into a large containing + tensor. + + Consider a batched :attr:`input` tensor containing sliding local blocks, + e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, + where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})` + is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})` + spatial locations each containing a :math:`C`-channeled vector), and + :math:`L` is the total number of blocks. (This is exactly the + same specification as the output shape of :class:`~torch.nn.Unfold`.) This + operation combines these local blocks into the large :attr:`output` tensor + of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)` + by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the + arguments must satisfy + + .. math:: + L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + + where :math:`d` is over all spatial dimensions. + + * :attr:`output_size` describes the spatial shape of the large containing + tensor of the sliding local blocks. It is useful to resolve the ambiguity + when multiple input shapes map to same number of sliding blocks, e.g., + with ``stride > 0``. + + The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify + how the sliding blocks are retrieved. + + * :attr:`stride` controls the stride for the sliding blocks. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension before + reshaping. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + Args: + output_size (int or tuple): the shape of the spatial dimensions of the + output (i.e., ``output.sizes()[2:]``) + kernel_size (int or tuple): the size of the sliding blocks + stride (int or tuple): the stride of the sliding blocks in the input + spatial dimensions. Default: 1 + padding (int or tuple, optional): implicit zero padding to be added on + both sides of input. Default: 0 + dilation (int or tuple, optional): a parameter that controls the + stride of elements within the + neighborhood. Default: 1 + + * If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`, + :attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then + their values will be replicated across all spatial dimensions. + + * For the case of two output spatial dimensions this operation is sometimes + called ``col2im``. + + .. note:: + :class:`~torch.nn.Fold` calculates each combined value in the resulting + large tensor by summing all values from all containing blocks. + :class:`~torch.nn.Unfold` extracts the values in the local blocks by + copying from the large tensor. So, if the blocks overlap, they are not + inverses of each other. + + .. warning:: + Currently, only 4-D output tensors (batched image-like tensors) are + supported. + + Shape: + - Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` + - Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above + + Examples:: + + >>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2)) + >>> input = torch.randn(1, 3 * 2 * 2, 12) + >>> output = fold(input) + >>> output.size() + torch.Size([1, 3, 4, 5]) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + + """ + __constants__ = ['output_size', 'kernel_size', 'dilation', 'padding', + 'stride'] + + def __init__(self, output_size, kernel_size, dilation=1, padding=0, stride=1): + super(Fold, self).__init__() + self.output_size = output_size + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, input): + return F.fold(input, self.output_size, self.kernel_size, self.dilation, + self.padding, self.stride) + + def extra_repr(self): + return 'output_size={output_size}, kernel_size={kernel_size}, ' \ + 'dilation={dilation}, padding={padding}, stride={stride}'.format( + **self.__dict__ + )
    + + +
    [docs]class Unfold(Module): + r"""Extracts sliding local blocks from a batched input tensor. + + Consider an batched :attr:`input` tensor of shape :math:`(N, C, *)`, + where :math:`N` is the batch dimension, :math:`C` is the channel dimension, + and :math:`*` represent arbitrary spatial dimensions. This operation flattens + each sliding :attr:`kernel_size`-sized block within the spatial dimensions + of :attr:`input` into a column (i.e., last dimension) of a 3-D :attr:`output` + tensor of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`, where + :math:`C \times \prod(\text{kernel\_size})` is the total number of values + within each block (a block has :math:`\prod(\text{kernel\_size})` spatial + locations each containing a :math:`C`-channeled vector), and :math:`L` is + the total number of such blocks: + + .. math:: + L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + + where :math:`\text{spatial\_size}` is formed by the spatial dimensions + of :attr:`input` (:math:`*` above), and :math:`d` is over all spatial + dimensions. + + Therefore, indexing :attr:`output` at the last dimension (column dimension) + gives all values within a certain block. + + The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify + how the sliding blocks are retrieved. + + * :attr:`stride` controls the stride for the sliding blocks. + + * :attr:`padding` controls the amount of implicit zero-paddings on both + sides for :attr:`padding` number of points for each dimension before + reshaping. + + * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + Args: + kernel_size (int or tuple): the size of the sliding blocks + stride (int or tuple, optional): the stride of the sliding blocks in the input + spatial dimensions. Default: 1 + padding (int or tuple, optional): implicit zero padding to be added on + both sides of input. Default: 0 + dilation (int or tuple, optional): a parameter that controls the + stride of elements within the + neighborhood. Default: 1 + + * If :attr:`kernel_size`, :attr:`dilation`, :attr:`padding` or + :attr:`stride` is an int or a tuple of length 1, their values will be + replicated across all spatial dimensions. + + * For the case of two input spatial dimensions this operation is sometimes + called ``im2col``. + + .. note:: + :class:`~torch.nn.Fold` calculates each combined value in the resulting + large tensor by summing all values from all containing blocks. + :class:`~torch.nn.Unfold` extracts the values in the local blocks by + copying from the large tensor. So, if the blocks overlap, they are not + inverses of each other. + + .. warning:: + Currently, only 4-D input tensors (batched image-like tensors) are + supported. + + Shape: + - Input: :math:`(N, C, *)` + - Output: :math:`(N, C \times \prod(\text{kernel\_size}), L)` as described above + + Examples:: + + >>> unfold = nn.Unfold(kernel_size=(2, 3)) + >>> input = torch.randn(2, 5, 3, 4) + >>> output = unfold(input) + >>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels) + >>> # 4 blocks (2x3 kernels) in total in the 3x4 input + >>> output.size() + torch.Size([2, 30, 4]) + + >>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape) + >>> inp = torch.randn(1, 3, 10, 12) + >>> w = torch.randn(2, 3, 4, 5) + >>> inp_unf = torch.nn.functional.unfold(inp, (4, 5)) + >>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2) + >>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1)) + >>> # or equivalently (and avoiding a copy), + >>> # out = out_unf.view(1, 2, 7, 8) + >>> (torch.nn.functional.conv2d(inp, w) - out).abs().max() + tensor(1.9073e-06) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + + """ + __constants__ = ['kernel_size', 'dilation', 'padding', 'stride'] + + def __init__(self, kernel_size, dilation=1, padding=0, stride=1): + super(Unfold, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, input): + return F.unfold(input, self.kernel_size, self.dilation, + self.padding, self.stride) + + def extra_repr(self): + return 'kernel_size={kernel_size}, dilation={dilation}, padding={padding},' \ + ' stride={stride}'.format(**self.__dict__)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/instancenorm.html b/docs/stable/_modules/torch/nn/modules/instancenorm.html new file mode 100644 index 000000000000..b7f9f6b25f85 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/instancenorm.html @@ -0,0 +1,792 @@ + + + + + + + + + + + + torch.nn.modules.instancenorm — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.instancenorm

    +from .batchnorm import _BatchNorm
    +from .. import functional as F
    +
    +
    +class _InstanceNorm(_BatchNorm):
    +    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False,
    +                 track_running_stats=False):
    +        super(_InstanceNorm, self).__init__(
    +            num_features, eps, momentum, affine, track_running_stats)
    +
    +    def _check_input_dim(self, input):
    +        raise NotImplementedError
    +
    +    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
    +                              missing_keys, unexpected_keys, error_msgs):
    +        version = local_metadata.get('version', None)
    +        # at version 1: removed running_mean and running_var when
    +        # track_running_stats=False (default)
    +        if version is None and not self.track_running_stats:
    +            running_stats_keys = []
    +            for name in ('running_mean', 'running_var'):
    +                key = prefix + name
    +                if key in state_dict:
    +                    running_stats_keys.append(key)
    +            if len(running_stats_keys) > 0:
    +                error_msgs.append(
    +                    'Unexpected running stats buffer(s) {names} for {klass} '
    +                    'with track_running_stats=False. If state_dict is a '
    +                    'checkpoint saved before 0.4.0, this may be expected '
    +                    'because {klass} does not track running stats by default '
    +                    'since 0.4.0. Please remove these keys from state_dict. If '
    +                    'the running stats are actually needed, instead set '
    +                    'track_running_stats=True in {klass} to enable them. See '
    +                    'the documentation of {klass} for details.'
    +                    .format(names=" and ".join('"{}"'.format(k) for k in running_stats_keys),
    +                            klass=self.__class__.__name__))
    +                for key in running_stats_keys:
    +                    state_dict.pop(key)
    +
    +        super(_InstanceNorm, self)._load_from_state_dict(
    +            state_dict, prefix, local_metadata, strict,
    +            missing_keys, unexpected_keys, error_msgs)
    +
    +    def forward(self, input):
    +        self._check_input_dim(input)
    +
    +        return F.instance_norm(
    +            input, self.running_mean, self.running_var, self.weight, self.bias,
    +            self.training or not self.track_running_stats, self.momentum, self.eps)
    +
    +
    +
    [docs]class InstanceNorm1d(_InstanceNorm): + r"""Applies Instance Normalization over a 3D input (a mini-batch of 1D + inputs with optional additional channel dimension) as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size) if :attr:`affine` is ``True``. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm1d` is applied + on each channel of channeled data like multidimensional time series, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionaly, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm1d` usually don't apply affine + transform. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, L)` + - Output: :math:`(N, C, L)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm1d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm1d(100, affine=True) + >>> input = torch.randn(20, 100, 40) + >>> output = m(input) + + .. _`Instance Normalization: The Missing Ingredient for Fast Stylization`: + https://arxiv.org/abs/1607.08022 + """ + + def _check_input_dim(self, input): + if input.dim() == 2: + raise ValueError( + 'InstanceNorm1d returns 0-filled tensor to 2D tensor.' + 'This is because InstanceNorm1d reshapes inputs to' + '(1, N * C, ...) from (N, C,...) and this makes' + 'variances 0.' + ) + if input.dim() != 3: + raise ValueError('expected 3D input (got {}D input)' + .format(input.dim()))
    + + +
    [docs]class InstanceNorm2d(_InstanceNorm): + r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs + with additional channel dimension) as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size `C` (where `C` is the input size) if :attr:`affine` is ``True``. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm2d` is applied + on each channel of channeled data like RGB images, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionaly, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm2d` usually don't apply affine + transform. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm2d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm2d(100, affine=True) + >>> input = torch.randn(20, 100, 35, 45) + >>> output = m(input) + + .. _`Instance Normalization: The Missing Ingredient for Fast Stylization`: + https://arxiv.org/abs/1607.08022 + """ + + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim()))
    + + +
    [docs]class InstanceNorm3d(_InstanceNorm): + r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs + with additional channel dimension) as described in the paper + `Instance Normalization: The Missing Ingredient for Fast Stylization`_ . + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated per-dimension separately + for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors + of size C (where C is the input size) if :attr:`affine` is ``True``. + + By default, this layer uses instance statistics computed from input data in + both training and evaluation modes. + + If :attr:`track_running_stats` is set to ``True``, during training this + layer keeps running estimates of its computed mean and variance, which are + then used for normalization during evaluation. The running estimates are + kept with a default :attr:`momentum` of 0.1. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, the + update rule for running statistics here is + :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`, + where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the + new observed value. + + .. note:: + :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but + have some subtle differences. :class:`InstanceNorm3d` is applied + on each channel of channeled data like 3D models with RGB color, but + :class:`LayerNorm` is usually applied on entire sample and often in NLP + tasks. Additionaly, :class:`LayerNorm` applies elementwise affine + transform, while :class:`InstanceNorm3d` usually don't apply affine + transform. + + Args: + num_features: :math:`C` from an expected input of size + :math:`(N, C, D, H, W)` + eps: a value added to the denominator for numerical stability. Default: 1e-5 + momentum: the value used for the running_mean and running_var computation. Default: 0.1 + affine: a boolean value that when set to ``True``, this module has + learnable affine parameters, initialized the same way as done for batch normalization. + Default: ``False``. + track_running_stats: a boolean value that when set to ``True``, this + module tracks the running mean and variance, and when set to ``False``, + this module does not track such statistics and always uses batch + statistics in both training and eval modes. Default: ``False`` + + Shape: + - Input: :math:`(N, C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` (same shape as input) + + Examples:: + + >>> # Without Learnable Parameters + >>> m = nn.InstanceNorm3d(100) + >>> # With Learnable Parameters + >>> m = nn.InstanceNorm3d(100, affine=True) + >>> input = torch.randn(20, 100, 35, 45, 10) + >>> output = m(input) + + .. _`Instance Normalization: The Missing Ingredient for Fast Stylization`: + https://arxiv.org/abs/1607.08022 + """ + + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim()))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/linear.html b/docs/stable/_modules/torch/nn/modules/linear.html new file mode 100644 index 000000000000..dd285cf14ce0 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/linear.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + torch.nn.modules.linear — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.linear

    +import math
    +
    +import torch
    +from torch.nn.parameter import Parameter
    +from .. import functional as F
    +from .. import init
    +from .module import Module
    +
    +
    +
    [docs]class Identity(Module): + r"""A placeholder identity operator that is argument-insensitive. + + Args: + args: any argument (unused) + kwargs: any keyword argument (unused) + + Examples:: + + >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 20]) + + """ + def __init__(self, *args, **kwargs): + super(Identity, self).__init__() + + def forward(self, input): + return input
    + + +
    [docs]class Linear(Module): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Args: + in_features: size of each input sample + out_features: size of each output sample + bias: If set to ``False``, the layer will not learn an additive bias. + Default: ``True`` + + Shape: + - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of + additional dimensions and :math:`H_{in} = \text{in\_features}` + - Output: :math:`(N, *, H_{out})` where all but the last dimension + are the same shape as the input and :math:`H_{out} = \text{out\_features}`. + + Attributes: + weight: the learnable weights of the module of shape + :math:`(\text{out\_features}, \text{in\_features})`. The values are + initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where + :math:`k = \frac{1}{\text{in\_features}}` + bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. + If :attr:`bias` is ``True``, the values are initialized from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{\text{in\_features}}` + + Examples:: + + >>> m = nn.Linear(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + __constants__ = ['bias', 'in_features', 'out_features'] + + def __init__(self, in_features, out_features, bias=True): + super(Linear, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.Tensor(out_features, in_features)) + if bias: + self.bias = Parameter(torch.Tensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def forward(self, input): + return F.linear(input, self.weight, self.bias) + + def extra_repr(self): + return 'in_features={}, out_features={}, bias={}'.format( + self.in_features, self.out_features, self.bias is not None + )
    + + +
    [docs]class Bilinear(Module): + r"""Applies a bilinear transformation to the incoming data: + :math:`y = x_1 A x_2 + b` + + Args: + in1_features: size of each first input sample + in2_features: size of each second input sample + out_features: size of each output sample + bias: If set to False, the layer will not learn an additive bias. + Default: ``True`` + + Shape: + - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and + :math:`*` means any number of additional dimensions. All but the last dimension + of the inputs should be the same. + - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`. + - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}` + and all but the last dimension are the same shape as the input. + + Attributes: + weight: the learnable weights of the module of shape + :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`. + The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where + :math:`k = \frac{1}{\text{in1\_features}}` + bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. + If :attr:`bias` is ``True``, the values are initialized from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where + :math:`k = \frac{1}{\text{in1\_features}}` + + Examples:: + + >>> m = nn.Bilinear(20, 30, 40) + >>> input1 = torch.randn(128, 20) + >>> input2 = torch.randn(128, 30) + >>> output = m(input1, input2) + >>> print(output.size()) + torch.Size([128, 40]) + """ + __constants__ = ['in1_features', 'in2_features', 'out_features', 'bias'] + + def __init__(self, in1_features, in2_features, out_features, bias=True): + super(Bilinear, self).__init__() + self.in1_features = in1_features + self.in2_features = in2_features + self.out_features = out_features + self.weight = Parameter(torch.Tensor(out_features, in1_features, in2_features)) + + if bias: + self.bias = Parameter(torch.Tensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + bound = 1 / math.sqrt(self.weight.size(1)) + init.uniform_(self.weight, -bound, bound) + if self.bias is not None: + init.uniform_(self.bias, -bound, bound) + + def forward(self, input1, input2): + return F.bilinear(input1, input2, self.weight, self.bias) + + def extra_repr(self): + return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format( + self.in1_features, self.in2_features, self.out_features, self.bias is not None + )
    + +# TODO: PartialLinear - maybe in sparse? +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/loss.html b/docs/stable/_modules/torch/nn/modules/loss.html new file mode 100644 index 000000000000..e01080179285 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/loss.html @@ -0,0 +1,1813 @@ + + + + + + + + + + + + torch.nn.modules.loss — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.loss

    +import warnings
    +
    +from .module import Module
    +from .. import functional as F
    +from .. import _reduction as _Reduction
    +
    +
    +class _Loss(Module):
    +    def __init__(self, size_average=None, reduce=None, reduction='mean'):
    +        super(_Loss, self).__init__()
    +        if size_average is not None or reduce is not None:
    +            self.reduction = _Reduction.legacy_get_string(size_average, reduce)
    +        else:
    +            self.reduction = reduction
    +
    +
    +class _WeightedLoss(_Loss):
    +    def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):
    +        super(_WeightedLoss, self).__init__(size_average, reduce, reduction)
    +        self.register_buffer('weight', weight)
    +
    +
    +
    [docs]class L1Loss(_Loss): + r"""Creates a criterion that measures the mean absolute error (MAE) between each element in + the input :math:`x` and target :math:`y`. + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = \left| x_n - y_n \right|, + + where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then: + + .. math:: + \ell(x, y) = + \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + :math:`x` and :math:`y` are tensors of arbitrary shapes with a total + of :math:`n` elements each. + + The sum operation still operates over all the elements, and divides by :math:`n`. + + The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``. + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then + :math:`(N, *)`, same shape as the input + + Examples:: + + >>> loss = nn.L1Loss() + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.randn(3, 5) + >>> output = loss(input, target) + >>> output.backward() + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(L1Loss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.l1_loss(input, target, reduction=self.reduction)
    + + +
    [docs]class NLLLoss(_WeightedLoss): + r"""The negative log likelihood loss. It is useful to train a classification + problem with `C` classes. + + If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning + weight to each of the classes. This is particularly useful when you have an + unbalanced training set. + + The `input` given through a forward call is expected to contain + log-probabilities of each class. `input` has to be a Tensor of size either + :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` + with :math:`K \geq 1` for the `K`-dimensional case (described later). + + Obtaining log-probabilities in a neural network is easily achieved by + adding a `LogSoftmax` layer in the last layer of your network. + You may use `CrossEntropyLoss` instead, if you prefer not to add an extra + layer. + + The `target` that this loss expects should be a class index in the range :math:`[0, C-1]` + where `C = number of classes`; if `ignore_index` is specified, this loss also accepts + this class index (this index may not necessarily be in the class range). + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_{y_n} x_{n,y_n}, \quad + w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}, + + where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then + + .. math:: + \ell(x, y) = \begin{cases} + \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & + \text{if reduction} = \text{'mean';}\\ + \sum_{n=1}^N l_n, & + \text{if reduction} = \text{'sum'.} + \end{cases} + + Can also be used for higher dimension inputs, such as 2D images, by providing + an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`, + where :math:`K` is the number of dimensions, and a target of appropriate shape + (see below). In the case of images, it computes NLL loss per-pixel. + + Args: + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, it has to be a Tensor of size `C`. Otherwise, it is + treated as if having all ones. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When + :attr:`size_average` is ``True``, the loss is averaged over + non-ignored targets. + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, C)` where `C = number of classes`, or + :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` + in the case of `K`-dimensional loss. + - Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or + :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of + K-dimensional loss. + - Output: scalar. + If :attr:`reduction` is ``'none'``, then the same size as the target: :math:`(N)`, or + :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case + of K-dimensional loss. + + Examples:: + + >>> m = nn.LogSoftmax(dim=1) + >>> loss = nn.NLLLoss() + >>> # input is of size N x C = 3 x 5 + >>> input = torch.randn(3, 5, requires_grad=True) + >>> # each element in target has to have 0 <= value < C + >>> target = torch.tensor([1, 0, 4]) + >>> output = loss(m(input), target) + >>> output.backward() + >>> + >>> + >>> # 2D loss example (used, for example, with image inputs) + >>> N, C = 5, 4 + >>> loss = nn.NLLLoss() + >>> # input is of size N x C x height x width + >>> data = torch.randn(N, 16, 10, 10) + >>> conv = nn.Conv2d(16, C, (3, 3)) + >>> m = nn.LogSoftmax(dim=1) + >>> # each element in target has to have 0 <= value < C + >>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C) + >>> output = loss(m(conv(data)), target) + >>> output.backward() + """ + __constants__ = ['ignore_index', 'weight', 'reduction'] + + def __init__(self, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean'): + super(NLLLoss, self).__init__(weight, size_average, reduce, reduction) + self.ignore_index = ignore_index + + def forward(self, input, target): + return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
    + + +class NLLLoss2d(NLLLoss): + def __init__(self, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean'): + warnings.warn("NLLLoss2d has been deprecated. " + "Please use NLLLoss instead as a drop-in replacement and see " + "https://pytorch.org/docs/master/nn.html#torch.nn.NLLLoss for more details.") + super(NLLLoss2d, self).__init__(weight, size_average, ignore_index, reduce, reduction) + + +
    [docs]class PoissonNLLLoss(_Loss): + r"""Negative log likelihood loss with Poisson distribution of target. + + The loss can be described as: + + .. math:: + \text{target} \sim \mathrm{Poisson}(\text{input}) + + \text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input}) + + \log(\text{target!}) + + The last term can be omitted or approximated with Stirling formula. The + approximation is used for target values more than 1. For targets less or + equal to 1 zeros are added to the loss. + + Args: + log_input (bool, optional): if ``True`` the loss is computed as + :math:`\exp(\text{input}) - \text{target}*\text{input}`, if ``False`` the loss is + :math:`\text{input} - \text{target}*\log(\text{input}+\text{eps})`. + full (bool, optional): whether to compute full loss, i. e. to add the + Stirling approximation term + + .. math:: + \text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}). + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when + :attr:`log_input = False`. Default: 1e-8 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Examples:: + + >>> loss = nn.PoissonNLLLoss() + >>> log_input = torch.randn(5, 2, requires_grad=True) + >>> target = torch.randn(5, 2) + >>> output = loss(log_input, target) + >>> output.backward() + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar by default. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, + the same shape as the input + """ + __constants__ = ['log_input', 'full', 'eps', 'reduction'] + + def __init__(self, log_input=True, full=False, size_average=None, + eps=1e-8, reduce=None, reduction='mean'): + super(PoissonNLLLoss, self).__init__(size_average, reduce, reduction) + self.log_input = log_input + self.full = full + self.eps = eps + + def forward(self, log_input, target): + return F.poisson_nll_loss(log_input, target, log_input=self.log_input, full=self.full, + eps=self.eps, reduction=self.reduction)
    + + +
    [docs]class KLDivLoss(_Loss): + r"""The `Kullback-Leibler divergence`_ Loss + + KL divergence is a useful distance measure for continuous distributions + and is often useful when performing direct regression over the space of + (discretely sampled) continuous output distributions. + + As with :class:`~torch.nn.NLLLoss`, the `input` given is expected to contain + *log-probabilities* and is not restricted to a 2D Tensor. + The targets are given as *probabilities* (i.e. without taking the logarithm). + + This criterion expects a `target` `Tensor` of the same size as the + `input` `Tensor`. + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + l(x,y) = L = \{ l_1,\dots,l_N \}, \quad + l_n = y_n \cdot \left( \log y_n - x_n \right) + + where the index :math:`N` spans all dimensions of ``input`` and :math:`L` has the same + shape as ``input``. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then: + + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';} \\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + In default :attr:`reduction` mode ``'mean'``, the losses are averaged for each minibatch over observations + **as well as** over dimensions. ``'batchmean'`` mode gives the correct KL divergence where losses + are averaged over batch dimension only. ``'mean'`` mode's behavior will be changed to the same as + ``'batchmean'`` in the next major release. + + .. _Kullback-Leibler divergence: + https://en.wikipedia.org/wiki/Kullback-Leibler_divergence + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. + ``'none'``: no reduction will be applied. + ``'batchmean'``: the sum of the output will be divided by batchsize. + ``'sum'``: the output will be summed. + ``'mean'``: the output will be divided by the number of elements in the output. + Default: ``'mean'`` + + .. note:: + :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, + and in the meantime, specifying either of those two args will override :attr:`reduction`. + + .. note:: + :attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use + :attr:`reduction` = ``'batchmean'`` which aligns with KL math definition. + In the next major release, ``'mean'`` will be changed to be the same as ``'batchmean'``. + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar by default. If :attr:``reduction`` is ``'none'``, then :math:`(N, *)`, + the same shape as the input + + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(KLDivLoss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.kl_div(input, target, reduction=self.reduction)
    + + +
    [docs]class MSELoss(_Loss): + r"""Creates a criterion that measures the mean squared error (squared L2 norm) between + each element in the input :math:`x` and target :math:`y`. + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = \left( x_n - y_n \right)^2, + + where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then: + + .. math:: + \ell(x, y) = + \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + :math:`x` and :math:`y` are tensors of arbitrary shapes with a total + of :math:`n` elements each. + + The sum operation still operates over all the elements, and divides by :math:`n`. + + The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``. + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + + Examples:: + + >>> loss = nn.MSELoss() + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.randn(3, 5) + >>> output = loss(input, target) + >>> output.backward() + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(MSELoss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.mse_loss(input, target, reduction=self.reduction)
    + + +
    [docs]class BCELoss(_WeightedLoss): + r"""Creates a criterion that measures the Binary Cross Entropy + between the target and the output: + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right], + + where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then + + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + This is used for measuring the error of a reconstruction in for example + an auto-encoder. Note that the targets :math:`y` should be numbers + between 0 and 1. + + Args: + weight (Tensor, optional): a manual rescaling weight given to the loss + of each batch element. If given, has to be a Tensor of size `nbatch`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same + shape as input. + + Examples:: + + >>> m = nn.Sigmoid() + >>> loss = nn.BCELoss() + >>> input = torch.randn(3, requires_grad=True) + >>> target = torch.empty(3).random_(2) + >>> output = loss(m(input), target) + >>> output.backward() + """ + __constants__ = ['reduction', 'weight'] + + def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'): + super(BCELoss, self).__init__(weight, size_average, reduce, reduction) + + def forward(self, input, target): + return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
    + + +
    [docs]class BCEWithLogitsLoss(_Loss): + r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single + class. This version is more numerically stable than using a plain `Sigmoid` + followed by a `BCELoss` as, by combining the operations into one layer, + we take advantage of the log-sum-exp trick for numerical stability. + + The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: + + .. math:: + \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad + l_n = - w_n \left[ y_n \cdot \log \sigma(x_n) + + (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right], + + where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` + (default ``'mean'``), then + + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + This is used for measuring the error of a reconstruction in for example + an auto-encoder. Note that the targets `t[i]` should be numbers + between 0 and 1. + + It's possible to trade off recall and precision by adding weights to positive examples. + In the case of multi-label classification the loss can be described as: + + .. math:: + \ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad + l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c}) + + (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right], + + where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification, + :math:`c = 1` for single-label binary classification), + :math:`n` is the number of the sample in the batch and + :math:`p_c` is the weight of the positive answer for the class :math:`c`. + + :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision. + + For example, if a dataset contains 100 positive and 300 negative examples of a single class, + then `pos_weight` for the class should be equal to :math:`\frac{300}{100}=3`. + The loss would act as if the dataset contains :math:`3\times 100=300` positive examples. + + Examples:: + + >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10 + >>> output = torch.full([10, 64], 0.999) # A prediction (logit) + >>> pos_weight = torch.ones([64]) # All weights are equal to 1 + >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight) + >>> criterion(output, target) # -log(sigmoid(0.999)) + tensor(0.3135) + + Args: + weight (Tensor, optional): a manual rescaling weight given to the loss + of each batch element. If given, has to be a Tensor of size `nbatch`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + pos_weight (Tensor, optional): a weight of positive examples. + Must be a vector with length equal to the number of classes. + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same + shape as input. + + Examples:: + + >>> loss = nn.BCEWithLogitsLoss() + >>> input = torch.randn(3, requires_grad=True) + >>> target = torch.empty(3).random_(2) + >>> output = loss(input, target) + >>> output.backward() + """ + __constants__ = ['weight', 'pos_weight', 'reduction'] + + def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None): + super(BCEWithLogitsLoss, self).__init__(size_average, reduce, reduction) + self.register_buffer('weight', weight) + self.register_buffer('pos_weight', pos_weight) + + def forward(self, input, target): + return F.binary_cross_entropy_with_logits(input, target, + self.weight, + pos_weight=self.pos_weight, + reduction=self.reduction)
    + + +
    [docs]class HingeEmbeddingLoss(_Loss): + r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y` + (containing 1 or -1). + This is usually used for measuring whether two inputs are similar or + dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically + used for learning nonlinear embeddings or semi-supervised learning. + + The loss function for :math:`n`-th sample in the mini-batch is + + .. math:: + l_n = \begin{cases} + x_n, & \text{if}\; y_n = 1,\\ + \max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1, + \end{cases} + + and the total loss functions is + + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} + \end{cases} + + where :math:`L = \{l_1,\dots,l_N\}^\top`. + + Args: + margin (float, optional): Has a default value of `1`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation + operates over all the elements. + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input + """ + __constants__ = ['margin', 'reduction'] + + def __init__(self, margin=1.0, size_average=None, reduce=None, reduction='mean'): + super(HingeEmbeddingLoss, self).__init__(size_average, reduce, reduction) + self.margin = margin + + def forward(self, input, target): + return F.hinge_embedding_loss(input, target, margin=self.margin, reduction=self.reduction)
    + + +
    [docs]class MultiLabelMarginLoss(_Loss): + r"""Creates a criterion that optimizes a multi-class multi-classification + hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) + and output :math:`y` (which is a 2D `Tensor` of target class indices). + For each sample in the mini-batch: + + .. math:: + \text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + + where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ + :math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ + :math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ + and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. + + :math:`y` and :math:`x` must have the same size. + + The criterion only considers a contiguous block of non-negative targets that + starts at the front. + + This allows for different samples to have variable amounts of target classes. + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(C)` or :math:`(N, C)` where `N` is the batch size and `C` + is the number of classes. + - Target: :math:`(C)` or :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input. + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. + + Examples:: + + >>> loss = nn.MultiLabelMarginLoss() + >>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]]) + >>> # for target y, only consider labels 3 and 0, not after label -1 + >>> y = torch.LongTensor([[3, 0, -1, 1]]) + >>> loss(x, y) + >>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4))) + tensor(0.8500) + + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(MultiLabelMarginLoss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.multilabel_margin_loss(input, target, reduction=self.reduction)
    + + +
    [docs]class SmoothL1Loss(_Loss): + r"""Creates a criterion that uses a squared term if the absolute + element-wise error falls below 1 and an L1 term otherwise. + It is less sensitive to outliers than the `MSELoss` and in some cases + prevents exploding gradients (e.g. see `Fast R-CNN` paper by Ross Girshick). + Also known as the Huber loss: + + .. math:: + \text{loss}(x, y) = \frac{1}{n} \sum_{i} z_{i} + + where :math:`z_{i}` is given by: + + .. math:: + z_{i} = + \begin{cases} + 0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\ + |x_i - y_i| - 0.5, & \text{otherwise } + \end{cases} + + :math:`x` and :math:`y` arbitrary shapes with a total of :math:`n` elements each + the sum operation still operates over all the elements, and divides by :math:`n`. + + The division by :math:`n` can be avoided if sets ``reduction = 'sum'``. + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, *)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then + :math:`(N, *)`, same shape as the input + + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(SmoothL1Loss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.smooth_l1_loss(input, target, reduction=self.reduction)
    + + +
    [docs]class SoftMarginLoss(_Loss): + r"""Creates a criterion that optimizes a two-class classification + logistic loss between input tensor :math:`x` and target tensor :math:`y` + (containing 1 or -1). + + .. math:: + \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} + + Args: + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(*)` where :math:`*` means, any number of additional + dimensions + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input + + """ + __constants__ = ['reduction'] + + def __init__(self, size_average=None, reduce=None, reduction='mean'): + super(SoftMarginLoss, self).__init__(size_average, reduce, reduction) + + def forward(self, input, target): + return F.soft_margin_loss(input, target, reduction=self.reduction)
    + + +
    [docs]class CrossEntropyLoss(_WeightedLoss): + r"""This criterion combines :func:`nn.LogSoftmax` and :func:`nn.NLLLoss` in one single class. + + It is useful when training a classification problem with `C` classes. + If provided, the optional argument :attr:`weight` should be a 1D `Tensor` + assigning weight to each of the classes. + This is particularly useful when you have an unbalanced training set. + + The `input` is expected to contain raw, unnormalized scores for each class. + + `input` has to be a Tensor of size either :math:`(minibatch, C)` or + :math:`(minibatch, C, d_1, d_2, ..., d_K)` + with :math:`K \geq 1` for the `K`-dimensional case (described later). + + This criterion expects a class index in the range :math:`[0, C-1]` as the + `target` for each value of a 1D tensor of size `minibatch`; if `ignore_index` + is specified, this criterion also accepts this class index (this index may not + necessarily be in the class range). + + The loss can be described as: + + .. math:: + \text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right) + = -x[class] + \log\left(\sum_j \exp(x[j])\right) + + or in the case of the :attr:`weight` argument being specified: + + .. math:: + \text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right) + + The losses are averaged across observations for each minibatch. + + Can also be used for higher dimension inputs, such as 2D images, by providing + an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`, + where :math:`K` is the number of dimensions, and a target of appropriate shape + (see below). + + + Args: + weight (Tensor, optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size `C` + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When :attr:`size_average` is + ``True``, the loss is averaged over non-ignored targets. + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, C)` where `C = number of classes`, or + :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` + in the case of `K`-dimensional loss. + - Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or + :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of + K-dimensional loss. + - Output: scalar. + If :attr:`reduction` is ``'none'``, then the same size as the target: + :math:`(N)`, or + :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case + of K-dimensional loss. + + Examples:: + + >>> loss = nn.CrossEntropyLoss() + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.empty(3, dtype=torch.long).random_(5) + >>> output = loss(input, target) + >>> output.backward() + """ + __constants__ = ['weight', 'ignore_index', 'reduction'] + + def __init__(self, weight=None, size_average=None, ignore_index=-100, + reduce=None, reduction='mean'): + super(CrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction) + self.ignore_index = ignore_index + + def forward(self, input, target): + return F.cross_entropy(input, target, weight=self.weight, + ignore_index=self.ignore_index, reduction=self.reduction)
    + + +
    [docs]class MultiLabelSoftMarginLoss(_WeightedLoss): + r"""Creates a criterion that optimizes a multi-label one-versus-all + loss based on max-entropy, between input :math:`x` and target :math:`y` of size + :math:`(N, C)`. + For each sample in the minibatch: + + .. math:: + loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) + + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) + + where :math:`i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}`, + :math:`y[i] \in \left\{0, \; 1\right\}`. + + Args: + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, it has to be a Tensor of size `C`. Otherwise, it is + treated as if having all ones. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, C)` where `N` is the batch size and `C` is the number of classes. + - Target: :math:`(N, C)`, label targets padded by -1 ensuring same shape as the input. + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. + """ + __constants__ = ['weight', 'reduction'] + + def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'): + super(MultiLabelSoftMarginLoss, self).__init__(weight, size_average, reduce, reduction) + + def forward(self, input, target): + return F.multilabel_soft_margin_loss(input, target, weight=self.weight, reduction=self.reduction)
    + + +
    [docs]class CosineEmbeddingLoss(_Loss): + r"""Creates a criterion that measures the loss given input tensors + :math:`x_1`, :math:`x_2` and a `Tensor` label :math:`y` with values 1 or -1. + This is used for measuring whether two inputs are similar or dissimilar, + using the cosine distance, and is typically used for learning nonlinear + embeddings or semi-supervised learning. + + The loss function for each sample is: + + .. math:: + \text{loss}(x, y) = + \begin{cases} + 1 - \cos(x_1, x_2), & \text{if } y = 1 \\ + \max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1 + \end{cases} + + Args: + margin (float, optional): Should be a number from :math:`-1` to :math:`1`, + :math:`0` to :math:`0.5` is suggested. If :attr:`margin` is missing, the + default value is :math:`0`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + """ + __constants__ = ['margin', 'reduction'] + + def __init__(self, margin=0., size_average=None, reduce=None, reduction='mean'): + super(CosineEmbeddingLoss, self).__init__(size_average, reduce, reduction) + self.margin = margin + + def forward(self, input1, input2, target): + return F.cosine_embedding_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
    + + +
    [docs]class MarginRankingLoss(_Loss): + r"""Creates a criterion that measures the loss given + inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`, + and a label 1D mini-batch tensor :math:`y` (containing 1 or -1). + + If :math:`y = 1` then it assumed the first input should be ranked higher + (have a larger value) than the second input, and vice-versa for :math:`y = -1`. + + The loss function for each sample in the mini-batch is: + + .. math:: + \text{loss}(x, y) = \max(0, -y * (x1 - x2) + \text{margin}) + + Args: + margin (float, optional): Has a default value of :math:`0`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample. + - Target: :math:`(N)` + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. + """ + __constants__ = ['margin', 'reduction'] + + def __init__(self, margin=0., size_average=None, reduce=None, reduction='mean'): + super(MarginRankingLoss, self).__init__(size_average, reduce, reduction) + self.margin = margin + + def forward(self, input1, input2, target): + return F.margin_ranking_loss(input1, input2, target, margin=self.margin, reduction=self.reduction)
    + + +
    [docs]class MultiMarginLoss(_WeightedLoss): + r"""Creates a criterion that optimizes a multi-class classification hinge + loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and + output :math:`y` (which is a 1D tensor of target class indices, + :math:`0 \leq y \leq \text{x.size}(1)-1`): + + For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar + output :math:`y` is: + + .. math:: + \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)} + + where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}` + and :math:`i \neq y`. + + Optionally, you can give non-equal weighting on the classes by passing + a 1D :attr:`weight` tensor into the constructor. + + The loss function then becomes: + + .. math:: + \text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p)}{\text{x.size}(0)} + + Args: + p (int, optional): Has a default value of :math:`1`. :math:`1` and :math:`2` + are the only supported values. + margin (float, optional): Has a default value of :math:`1`. + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, it has to be a Tensor of size `C`. Otherwise, it is + treated as if having all ones. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + """ + __constants__ = ['p', 'margin', 'weight', 'reduction'] + + def __init__(self, p=1, margin=1., weight=None, size_average=None, + reduce=None, reduction='mean'): + super(MultiMarginLoss, self).__init__(weight, size_average, reduce, reduction) + if p != 1 and p != 2: + raise ValueError("only p == 1 and p == 2 supported") + assert weight is None or weight.dim() == 1 + self.p = p + self.margin = margin + + def forward(self, input, target): + return F.multi_margin_loss(input, target, p=self.p, margin=self.margin, + weight=self.weight, reduction=self.reduction)
    + + +
    [docs]class TripletMarginLoss(_Loss): + r"""Creates a criterion that measures the triplet loss given an input + tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`. + This is used for measuring a relative similarity between samples. A triplet + is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative + examples` respectively). The shapes of all input tensors should be + :math:`(N, D)`. + + The distance swap is described in detail in the paper `Learning shallow + convolutional feature descriptors with triplet losses`_ by + V. Balntas, E. Riba et al. + + The loss function for each sample in the mini-batch is: + + .. math:: + L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} + + + where + + .. math:: + d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p + + Args: + margin (float, optional): Default: :math:`1`. + p (int, optional): The norm degree for pairwise distance. Default: :math:`2`. + swap (bool, optional): The distance swap is described in detail in the paper + `Learning shallow convolutional feature descriptors with triplet losses` by + V. Balntas, E. Riba et al. Default: ``False``. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there are multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Shape: + - Input: :math:`(N, D)` where :math:`D` is the vector dimension. + - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. + + >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2) + >>> anchor = torch.randn(100, 128, requires_grad=True) + >>> positive = torch.randn(100, 128, requires_grad=True) + >>> negative = torch.randn(100, 128, requires_grad=True) + >>> output = triplet_loss(anchor, positive, negative) + >>> output.backward() + + .. _Learning shallow convolutional feature descriptors with triplet losses: + http://www.bmva.org/bmvc/2016/papers/paper119/index.html + """ + __constants__ = ['margin', 'p', 'eps', 'swap', 'reduction'] + + def __init__(self, margin=1.0, p=2., eps=1e-6, swap=False, size_average=None, + reduce=None, reduction='mean'): + super(TripletMarginLoss, self).__init__(size_average, reduce, reduction) + self.margin = margin + self.p = p + self.eps = eps + self.swap = swap + + def forward(self, anchor, positive, negative): + return F.triplet_margin_loss(anchor, positive, negative, margin=self.margin, p=self.p, + eps=self.eps, swap=self.swap, reduction=self.reduction)
    + + +
    [docs]class CTCLoss(_Loss): + r"""The Connectionist Temporal Classification loss. + + Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the + probability of possible alignments of input to target, producing a loss value which is differentiable + with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which + limits the length of the target sequence such that it must be :math:`\leq` the input length. + + Args: + blank (int, optional): blank label. Default :math:`0`. + reduction (string, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the output losses will be divided by the target lengths and + then the mean over the batch is taken. Default: ``'mean'`` + zero_infinity (bool, optional): + Whether to zero infinite losses and the associated gradients. + Default: ``False`` + Infinite losses mainly occur when the inputs are too short + to be aligned to the targets. + + Shape: + - Log_probs: Tensor of size :math:`(T, N, C)`, + where :math:`T = \text{input length}`, + :math:`N = \text{batch size}`, and + :math:`C = \text{number of classes (including blank)}`. + The logarithmized probabilities of the outputs (e.g. obtained with + :func:`torch.nn.functional.log_softmax`). + - Targets: Tensor of size :math:`(N, S)` or + :math:`(\operatorname{sum}(\text{target\_lengths}))`, + where :math:`N = \text{batch size}` and + :math:`S = \text{max target length, if shape is } (N, S)`. + It represent the target sequences. Each element in the target + sequence is a class index. And the target index cannot be blank (default=0). + In the :math:`(N, S)` form, targets are padded to the + length of the longest sequence, and stacked. + In the :math:`(\operatorname{sum}(\text{target\_lengths}))` form, + the targets are assumed to be un-padded and + concatenated within 1 dimension. + - Input_lengths: Tuple or tensor of size :math:`(N)`, + where :math:`N = \text{batch size}`. It represent the lengths of the + inputs (must each be :math:`\leq T`). And the lengths are specified + for each sequence to achieve masking under the assumption that sequences + are padded to equal lengths. + - Target_lengths: Tuple or tensor of size :math:`(N)`, + where :math:`N = \text{batch size}`. It represent lengths of the targets. + Lengths are specified for each sequence to achieve masking under the + assumption that sequences are padded to equal lengths. If target shape is + :math:`(N,S)`, target_lengths are effectively the stop index + :math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for + each target in a batch. Lengths must each be :math:`\leq S` + If the targets are given as a 1d tensor that is the concatenation of individual + targets, the target_lengths must add up to the total length of the tensor. + - Output: scalar. If :attr:`reduction` is ``'none'``, then + :math:`(N)`, where :math:`N = \text{batch size}`. + + Example:: + + >>> T = 50 # Input sequence length + >>> C = 20 # Number of classes (including blank) + >>> N = 16 # Batch size + >>> S = 30 # Target sequence length of longest target in batch + >>> S_min = 10 # Minimum target length, for demonstration purposes + >>> + >>> # Initialize random batch of input vectors, for *size = (T,N,C) + >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_() + >>> + >>> # Initialize random batch of targets (0 = blank, 1:C = classes) + >>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long) + >>> + >>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long) + >>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long) + >>> ctc_loss = nn.CTCLoss() + >>> loss = ctc_loss(input, target, input_lengths, target_lengths) + >>> loss.backward() + + Reference: + A. Graves et al.: Connectionist Temporal Classification: + Labelling Unsegmented Sequence Data with Recurrent Neural Networks: + https://www.cs.toronto.edu/~graves/icml_2006.pdf + + .. Note:: + In order to use CuDNN, the following must be satisfied: :attr:`targets` must be + in concatenated format, all :attr:`input_lengths` must be `T`. :math:`blank=0`, + :attr:`target_lengths` :math:`\leq 256`, the integer arguments must be of + dtype :attr:`torch.int32`. + + The regular implementation uses the (more common in PyTorch) `torch.long` dtype. + + + .. include:: cudnn_deterministic.rst + + """ + __constants__ = ['blank', 'reduction'] + + def __init__(self, blank=0, reduction='mean', zero_infinity=False): + super(CTCLoss, self).__init__(reduction=reduction) + self.blank = blank + self.zero_infinity = zero_infinity + + def forward(self, log_probs, targets, input_lengths, target_lengths): + return F.ctc_loss(log_probs, targets, input_lengths, target_lengths, self.blank, self.reduction, + self.zero_infinity)
    + +# TODO: L1HingeEmbeddingCriterion +# TODO: MSECriterion weight +# TODO: ClassSimplexCriterion +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/module.html b/docs/stable/_modules/torch/nn/modules/module.html new file mode 100644 index 000000000000..3ac4ceeb571b --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/module.html @@ -0,0 +1,1673 @@ + + + + + + + + + + + + torch.nn.modules.module — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.module

    +from collections import OrderedDict, namedtuple
    +import functools
    +import itertools
    +
    +import torch
    +from ..backends.thnn import backend as thnn_backend
    +from ..parameter import Parameter
    +import torch.utils.hooks as hooks
    +
    +
    +class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
    +    def __repr__(self):
    +        if not self.missing_keys and not self.unexpected_keys:
    +            return '<All keys matched successfully>'
    +        return super(_IncompatibleKeys, self).__repr__()
    +
    +    __str__ = __repr__
    +
    +
    +def _addindent(s_, numSpaces):
    +    s = s_.split('\n')
    +    # don't do anything for single-line stuff
    +    if len(s) == 1:
    +        return s_
    +    first = s.pop(0)
    +    s = [(numSpaces * ' ') + line for line in s]
    +    s = '\n'.join(s)
    +    s = first + '\n' + s
    +    return s
    +
    +
    +
    [docs]class Module(object): + r"""Base class for all neural network modules. + + Your models should also subclass this class. + + Modules can also contain other Modules, allowing to nest them in + a tree structure. You can assign the submodules as regular attributes:: + + import torch.nn as nn + import torch.nn.functional as F + + class Model(nn.Module): + def __init__(self): + super(Model, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5) + self.conv2 = nn.Conv2d(20, 20, 5) + + def forward(self, x): + x = F.relu(self.conv1(x)) + return F.relu(self.conv2(x)) + + Submodules assigned in this way will be registered, and will have their + parameters converted too when you call :meth:`to`, etc. + """ + + dump_patches = False + + r"""This allows better BC support for :meth:`load_state_dict`. In + :meth:`state_dict`, the version number will be saved as in the attribute + `_metadata` of the returned state dict, and thus pickled. `_metadata` is a + dictionary with keys that follow the naming convention of state dict. See + ``_load_from_state_dict`` on how to use this information in loading. + + If new parameters/buffers are added/removed from a module, this number shall + be bumped, and the module's `_load_from_state_dict` method can compare the + version number and do appropriate changes if the state dict is from before + the change.""" + _version = 1 + + def __init__(self): + torch._C._log_api_usage_once("python.nn_module") + self._backend = thnn_backend + self._parameters = OrderedDict() + self._buffers = OrderedDict() + self._backward_hooks = OrderedDict() + self._forward_hooks = OrderedDict() + self._forward_pre_hooks = OrderedDict() + self._state_dict_hooks = OrderedDict() + self._load_state_dict_pre_hooks = OrderedDict() + self._modules = OrderedDict() + self.training = True + +
    [docs] def forward(self, *input): + r"""Defines the computation performed at every call. + + Should be overridden by all subclasses. + + .. note:: + Although the recipe for forward pass needs to be defined within + this function, one should call the :class:`Module` instance afterwards + instead of this since the former takes care of running the + registered hooks while the latter silently ignores them. + """ + raise NotImplementedError
    + +
    [docs] def register_buffer(self, name, tensor): + r"""Adds a persistent buffer to the module. + + This is typically used to register a buffer that should not to be + considered a model parameter. For example, BatchNorm's ``running_mean`` + is not a parameter, but is part of the persistent state. + + Buffers can be accessed as attributes using given names. + + Args: + name (string): name of the buffer. The buffer can be accessed + from this module using the given name + tensor (Tensor): buffer to be registered. + + Example:: + + >>> self.register_buffer('running_mean', torch.zeros(num_features)) + + """ + if '_buffers' not in self.__dict__: + raise AttributeError( + "cannot assign buffer before Module.__init__() call") + elif not isinstance(name, torch._six.string_classes): + raise TypeError("buffer name should be a string. " + "Got {}".format(torch.typename(name))) + elif '.' in name: + raise KeyError("buffer name can't contain \".\"") + elif name == '': + raise KeyError("buffer name can't be empty string \"\"") + elif hasattr(self, name) and name not in self._buffers: + raise KeyError("attribute '{}' already exists".format(name)) + elif tensor is not None and not isinstance(tensor, torch.Tensor): + raise TypeError("cannot assign '{}' object to buffer '{}' " + "(torch Tensor or None required)" + .format(torch.typename(tensor), name)) + else: + self._buffers[name] = tensor
    + +
    [docs] def register_parameter(self, name, param): + r"""Adds a parameter to the module. + + The parameter can be accessed as an attribute using given name. + + Args: + name (string): name of the parameter. The parameter can be accessed + from this module using the given name + param (Parameter): parameter to be added to the module. + """ + if '_parameters' not in self.__dict__: + raise AttributeError( + "cannot assign parameter before Module.__init__() call") + + elif not isinstance(name, torch._six.string_classes): + raise TypeError("parameter name should be a string. " + "Got {}".format(torch.typename(name))) + elif '.' in name: + raise KeyError("parameter name can't contain \".\"") + elif name == '': + raise KeyError("parameter name can't be empty string \"\"") + elif hasattr(self, name) and name not in self._parameters: + raise KeyError("attribute '{}' already exists".format(name)) + + if param is None: + self._parameters[name] = None + elif not isinstance(param, Parameter): + raise TypeError("cannot assign '{}' object to parameter '{}' " + "(torch.nn.Parameter or None required)" + .format(torch.typename(param), name)) + elif param.grad_fn: + raise ValueError( + "Cannot assign non-leaf Tensor to parameter '{0}'. Model " + "parameters must be created explicitly. To express '{0}' " + "as a function of another Tensor, compute the value in " + "the forward() method.".format(name)) + else: + self._parameters[name] = param
    + +
    [docs] def add_module(self, name, module): + r"""Adds a child module to the current module. + + The module can be accessed as an attribute using the given name. + + Args: + name (string): name of the child module. The child module can be + accessed from this module using the given name + module (Module): child module to be added to the module. + """ + if not isinstance(module, Module) and module is not None: + raise TypeError("{} is not a Module subclass".format( + torch.typename(module))) + elif not isinstance(name, torch._six.string_classes): + raise TypeError("module name should be a string. Got {}".format( + torch.typename(name))) + elif hasattr(self, name) and name not in self._modules: + raise KeyError("attribute '{}' already exists".format(name)) + elif '.' in name: + raise KeyError("module name can't contain \".\"") + elif name == '': + raise KeyError("module name can't be empty string \"\"") + self._modules[name] = module
    + + def _apply(self, fn): + for module in self.children(): + module._apply(fn) + + def compute_should_use_set_data(tensor, tensor_applied): + if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): + # If the new tensor has compatible tensor type as the existing tensor, + # the current behavior is to change the tensor in-place using `.data =`, + # and the future behavior is to overwrite the existing tensor. However, + # changing the current behavior is a BC-breaking change, and we want it + # to happen in future releases. So for now we introduce the + # `torch.__future__.get_overwrite_module_params_on_conversion()` + # global flag to let the user control whether they want the future + # behavior of overwriting the existing tensor or not. + return not torch.__future__.get_overwrite_module_params_on_conversion() + else: + return False + + for key, param in self._parameters.items(): + if param is not None: + # Tensors stored in modules are graph leaves, and we don't want to + # track autograd history of `param_applied`, so we have to use + # `with torch.no_grad():` + with torch.no_grad(): + param_applied = fn(param) + should_use_set_data = compute_should_use_set_data(param, param_applied) + if should_use_set_data: + param.data = param_applied + else: + assert isinstance(param, Parameter) + assert param.is_leaf + self._parameters[key] = Parameter(param_applied, param.requires_grad) + + if param.grad is not None: + with torch.no_grad(): + grad_applied = fn(param.grad) + should_use_set_data = compute_should_use_set_data(param.grad, grad_applied) + if should_use_set_data: + param.grad.data = grad_applied + else: + assert param.grad.is_leaf + self._parameters[key].grad = grad_applied.requires_grad_(param.grad.requires_grad) + + for key, buf in self._buffers.items(): + if buf is not None: + self._buffers[key] = fn(buf) + + return self + +
    [docs] def apply(self, fn): + r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``) + as well as self. Typical use includes initializing the parameters of a model + (see also :ref:`torch-nn-init`). + + Args: + fn (:class:`Module` -> None): function to be applied to each submodule + + Returns: + Module: self + + Example:: + + >>> def init_weights(m): + >>> print(m) + >>> if type(m) == nn.Linear: + >>> m.weight.data.fill_(1.0) + >>> print(m.weight) + >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) + >>> net.apply(init_weights) + Linear(in_features=2, out_features=2, bias=True) + Parameter containing: + tensor([[ 1., 1.], + [ 1., 1.]]) + Linear(in_features=2, out_features=2, bias=True) + Parameter containing: + tensor([[ 1., 1.], + [ 1., 1.]]) + Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + ) + Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + ) + """ + for module in self.children(): + module.apply(fn) + fn(self) + return self
    + +
    [docs] def cuda(self, device=None): + r"""Moves all model parameters and buffers to the GPU. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on GPU while being optimized. + + Arguments: + device (int, optional): if specified, all parameters will be + copied to that device + + Returns: + Module: self + """ + return self._apply(lambda t: t.cuda(device))
    + +
    [docs] def cpu(self): + r"""Moves all model parameters and buffers to the CPU. + + Returns: + Module: self + """ + return self._apply(lambda t: t.cpu())
    + +
    [docs] def type(self, dst_type): + r"""Casts all parameters and buffers to :attr:`dst_type`. + + Arguments: + dst_type (type or string): the desired type + + Returns: + Module: self + """ + return self._apply(lambda t: t.type(dst_type))
    + +
    [docs] def float(self): + r"""Casts all floating point parameters and buffers to float datatype. + + Returns: + Module: self + """ + return self._apply(lambda t: t.float() if t.is_floating_point() else t)
    + +
    [docs] def double(self): + r"""Casts all floating point parameters and buffers to ``double`` datatype. + + Returns: + Module: self + """ + return self._apply(lambda t: t.double() if t.is_floating_point() else t)
    + +
    [docs] def half(self): + r"""Casts all floating point parameters and buffers to ``half`` datatype. + + Returns: + Module: self + """ + return self._apply(lambda t: t.half() if t.is_floating_point() else t)
    + +
    [docs] def to(self, *args, **kwargs): + r"""Moves and/or casts the parameters and buffers. + + This can be called as + + .. function:: to(device=None, dtype=None, non_blocking=False) + + .. function:: to(dtype, non_blocking=False) + + .. function:: to(tensor, non_blocking=False) + + Its signature is similar to :meth:`torch.Tensor.to`, but only accepts + floating point desired :attr:`dtype` s. In addition, this method will + only cast the floating point parameters and buffers to :attr:`dtype` + (if given). The integral parameters and buffers will be moved + :attr:`device`, if that is given, but with dtypes unchanged. When + :attr:`non_blocking` is set, it tries to convert/move asynchronously + with respect to the host if possible, e.g., moving CPU Tensors with + pinned memory to CUDA devices. + + See below for examples. + + .. note:: + This method modifies the module in-place. + + Args: + device (:class:`torch.device`): the desired device of the parameters + and buffers in this module + dtype (:class:`torch.dtype`): the desired floating point type of + the floating point parameters and buffers in this module + tensor (torch.Tensor): Tensor whose dtype and device are the desired + dtype and device for all parameters and buffers in this module + + Returns: + Module: self + + Example:: + + >>> linear = nn.Linear(2, 2) + >>> linear.weight + Parameter containing: + tensor([[ 0.1913, -0.3420], + [-0.5113, -0.2325]]) + >>> linear.to(torch.double) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1913, -0.3420], + [-0.5113, -0.2325]], dtype=torch.float64) + >>> gpu1 = torch.device("cuda:1") + >>> linear.to(gpu1, dtype=torch.half, non_blocking=True) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1914, -0.3420], + [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1') + >>> cpu = torch.device("cpu") + >>> linear.to(cpu) + Linear(in_features=2, out_features=2, bias=True) + >>> linear.weight + Parameter containing: + tensor([[ 0.1914, -0.3420], + [-0.5112, -0.2324]], dtype=torch.float16) + + """ + + device, dtype, non_blocking = torch._C._nn._parse_to(*args, **kwargs) + + if dtype is not None: + if not dtype.is_floating_point: + raise TypeError('nn.Module.to only accepts floating point ' + 'dtypes, but got desired dtype={}'.format(dtype)) + + def convert(t): + return t.to(device, dtype if t.is_floating_point() else None, non_blocking) + + return self._apply(convert)
    + +
    [docs] def register_backward_hook(self, hook): + r"""Registers a backward hook on the module. + + The hook will be called every time the gradients with respect to module + inputs are computed. The hook should have the following signature:: + + hook(module, grad_input, grad_output) -> Tensor or None + + The :attr:`grad_input` and :attr:`grad_output` may be tuples if the + module has multiple inputs or outputs. The hook should not modify its + arguments, but it can optionally return a new gradient with respect to + input that will be used in place of :attr:`grad_input` in subsequent + computations. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + .. warning :: + + The current implementation will not have the presented behavior + for complex :class:`Module` that perform many operations. + In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only + contain the gradients for a subset of the inputs and outputs. + For such :class:`Module`, you should use :func:`torch.Tensor.register_hook` + directly on a specific input or output to get the required gradients. + + """ + handle = hooks.RemovableHandle(self._backward_hooks) + self._backward_hooks[handle.id] = hook + return handle
    + +
    [docs] def register_forward_pre_hook(self, hook): + r"""Registers a forward pre-hook on the module. + + The hook will be called every time before :func:`forward` is invoked. + It should have the following signature:: + + hook(module, input) -> None or modified input + + The hook can modify the input. User can either return a tuple or a + single modified value in the hook. We will wrap the value into a tuple + if a single value is returned(unless that value is already a tuple). + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._forward_pre_hooks) + self._forward_pre_hooks[handle.id] = hook + return handle
    + +
    [docs] def register_forward_hook(self, hook): + r"""Registers a forward hook on the module. + + The hook will be called every time after :func:`forward` has computed an output. + It should have the following signature:: + + hook(module, input, output) -> None or modified output + + The hook can modify the output. It can modify the input inplace but + it will not have effect on forward since this is called after + :func:`forward` is called. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._forward_hooks) + self._forward_hooks[handle.id] = hook + return handle
    + + def _tracing_name(self, tracing_state): + if not tracing_state._traced_module_stack: + return None + module = tracing_state._traced_module_stack[-1] + for name, child in module.named_children(): + if child is self: + return name + return None + + def _slow_forward(self, *input, **kwargs): + tracing_state = torch._C._get_tracing_state() + if not tracing_state: + return self.forward(*input, **kwargs) + if not hasattr(tracing_state, '_traced_module_stack'): + tracing_state._traced_module_stack = [] + name = self._tracing_name(tracing_state) + if name: + tracing_state.push_scope('%s[%s]' % (self._get_name(), name)) + else: + tracing_state.push_scope(self._get_name()) + tracing_state._traced_module_stack.append(self) + try: + result = self.forward(*input, **kwargs) + finally: + tracing_state.pop_scope() + tracing_state._traced_module_stack.pop() + return result + + def __call__(self, *input, **kwargs): + for hook in self._forward_pre_hooks.values(): + result = hook(self, input) + if result is not None: + if not isinstance(result, tuple): + result = (result,) + input = result + if torch._C._get_tracing_state(): + result = self._slow_forward(*input, **kwargs) + else: + result = self.forward(*input, **kwargs) + for hook in self._forward_hooks.values(): + hook_result = hook(self, input, result) + if hook_result is not None: + result = hook_result + if len(self._backward_hooks) > 0: + var = result + while not isinstance(var, torch.Tensor): + if isinstance(var, dict): + var = next((v for v in var.values() if isinstance(v, torch.Tensor))) + else: + var = var[0] + grad_fn = var.grad_fn + if grad_fn is not None: + for hook in self._backward_hooks.values(): + wrapper = functools.partial(hook, self) + functools.update_wrapper(wrapper, hook) + grad_fn.register_hook(wrapper) + return result + + def __setstate__(self, state): + self.__dict__.update(state) + # Support loading old checkpoints that don't have the following attrs: + if '_forward_pre_hooks' not in self.__dict__: + self._forward_pre_hooks = OrderedDict() + if '_state_dict_hooks' not in self.__dict__: + self._state_dict_hooks = OrderedDict() + if '_load_state_dict_pre_hooks' not in self.__dict__: + self._load_state_dict_pre_hooks = OrderedDict() + + def __getattr__(self, name): + if '_parameters' in self.__dict__: + _parameters = self.__dict__['_parameters'] + if name in _parameters: + return _parameters[name] + if '_buffers' in self.__dict__: + _buffers = self.__dict__['_buffers'] + if name in _buffers: + return _buffers[name] + if '_modules' in self.__dict__: + modules = self.__dict__['_modules'] + if name in modules: + return modules[name] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, name)) + + def __setattr__(self, name, value): + def remove_from(*dicts): + for d in dicts: + if name in d: + del d[name] + + params = self.__dict__.get('_parameters') + if isinstance(value, Parameter): + if params is None: + raise AttributeError( + "cannot assign parameters before Module.__init__() call") + remove_from(self.__dict__, self._buffers, self._modules) + self.register_parameter(name, value) + elif params is not None and name in params: + if value is not None: + raise TypeError("cannot assign '{}' as parameter '{}' " + "(torch.nn.Parameter or None expected)" + .format(torch.typename(value), name)) + self.register_parameter(name, value) + else: + modules = self.__dict__.get('_modules') + if isinstance(value, Module): + if modules is None: + raise AttributeError( + "cannot assign module before Module.__init__() call") + remove_from(self.__dict__, self._parameters, self._buffers) + modules[name] = value + elif modules is not None and name in modules: + if value is not None: + raise TypeError("cannot assign '{}' as child module '{}' " + "(torch.nn.Module or None expected)" + .format(torch.typename(value), name)) + modules[name] = value + else: + buffers = self.__dict__.get('_buffers') + if buffers is not None and name in buffers: + if value is not None and not isinstance(value, torch.Tensor): + raise TypeError("cannot assign '{}' as buffer '{}' " + "(torch.Tensor or None expected)" + .format(torch.typename(value), name)) + buffers[name] = value + else: + object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name in self._parameters: + del self._parameters[name] + elif name in self._buffers: + del self._buffers[name] + elif name in self._modules: + del self._modules[name] + else: + object.__delattr__(self, name) + + def _register_state_dict_hook(self, hook): + r"""These hooks will be called with arguments: `self`, `state_dict`, + `prefix`, `local_metadata`, after the `state_dict` of `self` is set. + Note that only parameters and buffers of `self` or its children are + guaranteed to exist in `state_dict`. The hooks may modify `state_dict` + inplace or return a new one. + """ + handle = hooks.RemovableHandle(self._state_dict_hooks) + self._state_dict_hooks[handle.id] = hook + return handle + + def _save_to_state_dict(self, destination, prefix, keep_vars): + r"""Saves module state to `destination` dictionary, containing a state + of the module, but not its descendants. This is called on every + submodule in :meth:`~torch.nn.Module.state_dict`. + + In rare cases, subclasses can achieve class-specific behavior by + overriding this method with custom logic. + + Arguments: + destination (dict): a dict where state will be stored + prefix (str): the prefix for parameters and buffers used in this + module + """ + for name, param in self._parameters.items(): + if param is not None: + destination[prefix + name] = param if keep_vars else param.data + for name, buf in self._buffers.items(): + if buf is not None: + destination[prefix + name] = buf if keep_vars else buf.data + +
    [docs] def state_dict(self, destination=None, prefix='', keep_vars=False): + r"""Returns a dictionary containing a whole state of the module. + + Both parameters and persistent buffers (e.g. running averages) are + included. Keys are corresponding parameter and buffer names. + + Returns: + dict: + a dictionary containing a whole state of the module + + Example:: + + >>> module.state_dict().keys() + ['bias', 'weight'] + + """ + if destination is None: + destination = OrderedDict() + destination._metadata = OrderedDict() + destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version) + self._save_to_state_dict(destination, prefix, keep_vars) + for name, module in self._modules.items(): + if module is not None: + module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars) + for hook in self._state_dict_hooks.values(): + hook_result = hook(self, destination, prefix, local_metadata) + if hook_result is not None: + destination = hook_result + return destination
    + + def _register_load_state_dict_pre_hook(self, hook): + r"""These hooks will be called with arguments: `state_dict`, `prefix`, + `local_metadata`, `strict`, `missing_keys`, `unexpected_keys`, + `error_msgs`, before loading `state_dict` into `self`. These arguments + are exactly the same as those of `_load_from_state_dict`. + """ + handle = hooks.RemovableHandle(self._load_state_dict_pre_hooks) + self._load_state_dict_pre_hooks[handle.id] = hook + return handle + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + r"""Copies parameters and buffers from :attr:`state_dict` into only + this module, but not its descendants. This is called on every submodule + in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this + module in input :attr:`state_dict` is provided as :attr:`local_metadata`. + For state dicts without metadata, :attr:`local_metadata` is empty. + Subclasses can achieve class-specific backward compatible loading using + the version number at `local_metadata.get("version", None)`. + + .. note:: + :attr:`state_dict` is not the same object as the input + :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So + it can be modified. + + Arguments: + state_dict (dict): a dict containing parameters and + persistent buffers. + prefix (str): the prefix for parameters and buffers used in this + module + local_metadata (dict): a dict containing the metadata for this module. + See + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` with :attr:`prefix` match the names of + parameters and buffers in this module + missing_keys (list of str): if ``strict=True``, add missing keys to + this list + unexpected_keys (list of str): if ``strict=True``, add unexpected + keys to this list + error_msgs (list of str): error messages should be added to this + list, and will be reported together in + :meth:`~torch.nn.Module.load_state_dict` + """ + for hook in self._load_state_dict_pre_hooks.values(): + hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + local_name_params = itertools.chain(self._parameters.items(), self._buffers.items()) + local_state = {k: v.data for k, v in local_name_params if v is not None} + + for name, param in local_state.items(): + key = prefix + name + if key in state_dict: + input_param = state_dict[key] + + # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ + if len(param.shape) == 0 and len(input_param.shape) == 1: + input_param = input_param[0] + + if input_param.shape != param.shape: + # local shape should match the one in checkpoint + error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, ' + 'the shape in current model is {}.' + .format(key, input_param.shape, param.shape)) + continue + + if isinstance(input_param, Parameter): + # backwards compatibility for serialized parameters + input_param = input_param.data + try: + param.copy_(input_param) + except Exception: + error_msgs.append('While copying the parameter named "{}", ' + 'whose dimensions in the model are {} and ' + 'whose dimensions in the checkpoint are {}.' + .format(key, param.size(), input_param.size())) + elif strict: + missing_keys.append(key) + + if strict: + for key in state_dict.keys(): + if key.startswith(prefix): + input_name = key[len(prefix):] + input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child + if input_name not in self._modules and input_name not in local_state: + unexpected_keys.append(key) + +
    [docs] def load_state_dict(self, state_dict, strict=True): + r"""Copies parameters and buffers from :attr:`state_dict` into + this module and its descendants. If :attr:`strict` is ``True``, then + the keys of :attr:`state_dict` must exactly match the keys returned + by this module's :meth:`~torch.nn.Module.state_dict` function. + + Arguments: + state_dict (dict): a dict containing parameters and + persistent buffers. + strict (bool, optional): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + + Returns: + ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: + * **missing_keys** is a list of str containing the missing keys + * **unexpected_keys** is a list of str containing the unexpected keys + """ + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(self) + load = None # break load->load reference cycle + + if strict: + if len(unexpected_keys) > 0: + error_msgs.insert( + 0, 'Unexpected key(s) in state_dict: {}. '.format( + ', '.join('"{}"'.format(k) for k in unexpected_keys))) + if len(missing_keys) > 0: + error_msgs.insert( + 0, 'Missing key(s) in state_dict: {}. '.format( + ', '.join('"{}"'.format(k) for k in missing_keys))) + + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + self.__class__.__name__, "\n\t".join(error_msgs))) + return _IncompatibleKeys(missing_keys, unexpected_keys)
    + + def _named_members(self, get_members_fn, prefix='', recurse=True): + r"""Helper method for yielding various names + members of modules.""" + memo = set() + modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)] + for module_prefix, module in modules: + members = get_members_fn(module) + for k, v in members: + if v is None or v in memo: + continue + memo.add(v) + name = module_prefix + ('.' if module_prefix else '') + k + yield name, v + +
    [docs] def parameters(self, recurse=True): + r"""Returns an iterator over module parameters. + + This is typically passed to an optimizer. + + Args: + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + + Yields: + Parameter: module parameter + + Example:: + + >>> for param in model.parameters(): + >>> print(type(param.data), param.size()) + <class 'torch.FloatTensor'> (20L,) + <class 'torch.FloatTensor'> (20L, 1L, 5L, 5L) + + """ + for name, param in self.named_parameters(recurse=recurse): + yield param
    + +
    [docs] def named_parameters(self, prefix='', recurse=True): + r"""Returns an iterator over module parameters, yielding both the + name of the parameter as well as the parameter itself. + + Args: + prefix (str): prefix to prepend to all parameter names. + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + + Yields: + (string, Parameter): Tuple containing the name and parameter + + Example:: + + >>> for name, param in self.named_parameters(): + >>> if name in ['bias']: + >>> print(param.size()) + + """ + gen = self._named_members( + lambda module: module._parameters.items(), + prefix=prefix, recurse=recurse) + for elem in gen: + yield elem
    + +
    [docs] def buffers(self, recurse=True): + r"""Returns an iterator over module buffers. + + Args: + recurse (bool): if True, then yields buffers of this module + and all submodules. Otherwise, yields only buffers that + are direct members of this module. + + Yields: + torch.Tensor: module buffer + + Example:: + + >>> for buf in model.buffers(): + >>> print(type(buf.data), buf.size()) + <class 'torch.FloatTensor'> (20L,) + <class 'torch.FloatTensor'> (20L, 1L, 5L, 5L) + + """ + for name, buf in self.named_buffers(recurse=recurse): + yield buf
    + +
    [docs] def named_buffers(self, prefix='', recurse=True): + r"""Returns an iterator over module buffers, yielding both the + name of the buffer as well as the buffer itself. + + Args: + prefix (str): prefix to prepend to all buffer names. + recurse (bool): if True, then yields buffers of this module + and all submodules. Otherwise, yields only buffers that + are direct members of this module. + + Yields: + (string, torch.Tensor): Tuple containing the name and buffer + + Example:: + + >>> for name, buf in self.named_buffers(): + >>> if name in ['running_var']: + >>> print(buf.size()) + + """ + gen = self._named_members( + lambda module: module._buffers.items(), + prefix=prefix, recurse=recurse) + for elem in gen: + yield elem
    + +
    [docs] def children(self): + r"""Returns an iterator over immediate children modules. + + Yields: + Module: a child module + """ + for name, module in self.named_children(): + yield module
    + +
    [docs] def named_children(self): + r"""Returns an iterator over immediate children modules, yielding both + the name of the module as well as the module itself. + + Yields: + (string, Module): Tuple containing a name and child module + + Example:: + + >>> for name, module in model.named_children(): + >>> if name in ['conv4', 'conv5']: + >>> print(module) + + """ + memo = set() + for name, module in self._modules.items(): + if module is not None and module not in memo: + memo.add(module) + yield name, module
    + +
    [docs] def modules(self): + r"""Returns an iterator over all modules in the network. + + Yields: + Module: a module in the network + + Note: + Duplicate modules are returned only once. In the following + example, ``l`` will be returned only once. + + Example:: + + >>> l = nn.Linear(2, 2) + >>> net = nn.Sequential(l, l) + >>> for idx, m in enumerate(net.modules()): + print(idx, '->', m) + + 0 -> Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + ) + 1 -> Linear(in_features=2, out_features=2, bias=True) + + """ + for name, module in self.named_modules(): + yield module
    + +
    [docs] def named_modules(self, memo=None, prefix=''): + r"""Returns an iterator over all modules in the network, yielding + both the name of the module as well as the module itself. + + Yields: + (string, Module): Tuple of name and module + + Note: + Duplicate modules are returned only once. In the following + example, ``l`` will be returned only once. + + Example:: + + >>> l = nn.Linear(2, 2) + >>> net = nn.Sequential(l, l) + >>> for idx, m in enumerate(net.named_modules()): + print(idx, '->', m) + + 0 -> ('', Sequential( + (0): Linear(in_features=2, out_features=2, bias=True) + (1): Linear(in_features=2, out_features=2, bias=True) + )) + 1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) + + """ + + if memo is None: + memo = set() + if self not in memo: + memo.add(self) + yield prefix, self + for name, module in self._modules.items(): + if module is None: + continue + submodule_prefix = prefix + ('.' if prefix else '') + name + for m in module.named_modules(memo, submodule_prefix): + yield m
    + +
    [docs] def train(self, mode=True): + r"""Sets the module in training mode. + + This has any effect only on certain modules. See documentations of + particular modules for details of their behaviors in training/evaluation + mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, + etc. + + Args: + mode (bool): whether to set training mode (``True``) or evaluation + mode (``False``). Default: ``True``. + + Returns: + Module: self + """ + self.training = mode + for module in self.children(): + module.train(mode) + return self
    + +
    [docs] def eval(self): + r"""Sets the module in evaluation mode. + + This has any effect only on certain modules. See documentations of + particular modules for details of their behaviors in training/evaluation + mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, + etc. + + This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`. + + Returns: + Module: self + """ + return self.train(False)
    + +
    [docs] def requires_grad_(self, requires_grad=True): + r"""Change if autograd should record operations on parameters in this + module. + + This method sets the parameters' :attr:`requires_grad` attributes + in-place. + + This method is helpful for freezing part of the module for finetuning + or training parts of a model individually (e.g., GAN training). + + Args: + requires_grad (bool): whether autograd should record operations on + parameters in this module. Default: ``True``. + + Returns: + Module: self + """ + for p in self.parameters(): + p.requires_grad_(requires_grad) + return self
    + +
    [docs] def zero_grad(self): + r"""Sets gradients of all model parameters to zero.""" + for p in self.parameters(): + if p.grad is not None: + p.grad.detach_() + p.grad.zero_()
    + + def share_memory(self): + return self._apply(lambda t: t.share_memory_()) + + def _get_name(self): + return self.__class__.__name__ + +
    [docs] def extra_repr(self): + r"""Set the extra representation of the module + + To print customized extra information, you should reimplement + this method in your own modules. Both single-line and multi-line + strings are acceptable. + """ + return ''
    + + def __repr__(self): + # We treat the extra repr like the sub-module, one item per line + extra_lines = [] + extra_repr = self.extra_repr() + # empty string will be split into list [''] + if extra_repr: + extra_lines = extra_repr.split('\n') + child_lines = [] + for key, module in self._modules.items(): + mod_str = repr(module) + mod_str = _addindent(mod_str, 2) + child_lines.append('(' + key + '): ' + mod_str) + lines = extra_lines + child_lines + + main_str = self._get_name() + '(' + if lines: + # simple one-liner info, which most builtin Modules will use + if len(extra_lines) == 1 and not child_lines: + main_str += extra_lines[0] + else: + main_str += '\n ' + '\n '.join(lines) + '\n' + + main_str += ')' + return main_str + + def __dir__(self): + module_attrs = dir(self.__class__) + attrs = list(self.__dict__.keys()) + parameters = list(self._parameters.keys()) + modules = list(self._modules.keys()) + buffers = list(self._buffers.keys()) + keys = module_attrs + attrs + parameters + modules + buffers + + # Eliminate attrs that are not legal Python variable names + keys = [key for key in keys if not key[0].isdigit()] + + return sorted(keys)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/normalization.html b/docs/stable/_modules/torch/nn/modules/normalization.html new file mode 100644 index 000000000000..84d46ac10ac7 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/normalization.html @@ -0,0 +1,748 @@ + + + + + + + + + + + + torch.nn.modules.normalization — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.normalization

    +import torch
    +import numbers
    +from torch.nn.parameter import Parameter
    +from .module import Module
    +from .. import functional as F
    +from .. import init
    +
    +
    +
    [docs]class LocalResponseNorm(Module): + r"""Applies local response normalization over an input signal composed + of several input planes, where channels occupy the second dimension. + Applies normalization across channels. + + .. math:: + b_{c} = a_{c}\left(k + \frac{\alpha}{n} + \sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta} + + Args: + size: amount of neighbouring channels used for normalization + alpha: multiplicative factor. Default: 0.0001 + beta: exponent. Default: 0.75 + k: additive factor. Default: 1 + + Shape: + - Input: :math:`(N, C, *)` + - Output: :math:`(N, C, *)` (same shape as input) + + Examples:: + + >>> lrn = nn.LocalResponseNorm(2) + >>> signal_2d = torch.randn(32, 5, 24, 24) + >>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7) + >>> output_2d = lrn(signal_2d) + >>> output_4d = lrn(signal_4d) + + """ + __constants__ = ['size', 'alpha', 'beta', 'k'] + + def __init__(self, size, alpha=1e-4, beta=0.75, k=1.): + super(LocalResponseNorm, self).__init__() + self.size = size + self.alpha = alpha + self.beta = beta + self.k = k + + def forward(self, input): + return F.local_response_norm(input, self.size, self.alpha, self.beta, + self.k) + + def extra_repr(self): + return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
    + + +class CrossMapLRN2d(Module): + + def __init__(self, size, alpha=1e-4, beta=0.75, k=1): + super(CrossMapLRN2d, self).__init__() + self.size = size + self.alpha = alpha + self.beta = beta + self.k = k + + def forward(self, input): + return self._backend.CrossMapLRN2d.apply(input, self.size, self.alpha, self.beta, + self.k) + + def extra_repr(self): + return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__) + + +
    [docs]class LayerNorm(Module): + r"""Applies Layer Normalization over a mini-batch of inputs as described in + the paper `Layer Normalization`_ . + + .. math:: + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The mean and standard-deviation are calculated separately over the last + certain number dimensions which have to be of the shape specified by + :attr:`normalized_shape`. + :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of + :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. + + .. note:: + Unlike Batch Normalization and Instance Normalization, which applies + scalar scale and bias for each entire channel/plane with the + :attr:`affine` option, Layer Normalization applies per-element scale and + bias with :attr:`elementwise_affine`. + + This layer uses statistics computed from input data in both training and + evaluation modes. + + Args: + normalized_shape (int or list or torch.Size): input shape from an expected input + of size + + .. math:: + [* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1] + \times \ldots \times \text{normalized\_shape}[-1]] + + If a single integer is used, it is treated as a singleton list, and this module will + normalize over the last dimension which is expected to be of that specific size. + eps: a value added to the denominator for numerical stability. Default: 1e-5 + elementwise_affine: a boolean value that when set to ``True``, this module + has learnable per-element affine parameters initialized to ones (for weights) + and zeros (for biases). Default: ``True``. + + Shape: + - Input: :math:`(N, *)` + - Output: :math:`(N, *)` (same shape as input) + + Examples:: + + >>> input = torch.randn(20, 5, 10, 10) + >>> # With Learnable Parameters + >>> m = nn.LayerNorm(input.size()[1:]) + >>> # Without Learnable Parameters + >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) + >>> # Normalize over last two dimensions + >>> m = nn.LayerNorm([10, 10]) + >>> # Normalize over last dimension of size 10 + >>> m = nn.LayerNorm(10) + >>> # Activating the module + >>> output = m(input) + + .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 + """ + __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] + + def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): + super(LayerNorm, self).__init__() + if isinstance(normalized_shape, numbers.Integral): + normalized_shape = (normalized_shape,) + self.normalized_shape = tuple(normalized_shape) + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = Parameter(torch.Tensor(*normalized_shape)) + self.bias = Parameter(torch.Tensor(*normalized_shape)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + if self.elementwise_affine: + init.ones_(self.weight) + init.zeros_(self.bias) + + def forward(self, input): + return F.layer_norm( + input, self.normalized_shape, self.weight, self.bias, self.eps) + + def extra_repr(self): + return '{normalized_shape}, eps={eps}, ' \ + 'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
    + + +
    [docs]class GroupNorm(Module): + r"""Applies Group Normalization over a mini-batch of inputs as described in + the paper `Group Normalization`_ . + + .. math:: + y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + The input channels are separated into :attr:`num_groups` groups, each containing + ``num_channels / num_groups`` channels. The mean and standard-deviation are calculated + separately over the each group. :math:`\gamma` and :math:`\beta` are learnable + per-channel affine transform parameter vectors of size :attr:`num_channels` if + :attr:`affine` is ``True``. + + This layer uses statistics computed from input data in both training and + evaluation modes. + + Args: + num_groups (int): number of groups to separate the channels into + num_channels (int): number of channels expected in input + eps: a value added to the denominator for numerical stability. Default: 1e-5 + affine: a boolean value that when set to ``True``, this module + has learnable per-channel affine parameters initialized to ones (for weights) + and zeros (for biases). Default: ``True``. + + Shape: + - Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}` + - Output: :math:`(N, C, *)` (same shape as input) + + Examples:: + + >>> input = torch.randn(20, 6, 10, 10) + >>> # Separate 6 channels into 3 groups + >>> m = nn.GroupNorm(3, 6) + >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) + >>> m = nn.GroupNorm(6, 6) + >>> # Put all 6 channels into a single group (equivalent with LayerNorm) + >>> m = nn.GroupNorm(1, 6) + >>> # Activating the module + >>> output = m(input) + + .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 + """ + __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', + 'bias'] + + def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): + super(GroupNorm, self).__init__() + self.num_groups = num_groups + self.num_channels = num_channels + self.eps = eps + self.affine = affine + if self.affine: + self.weight = Parameter(torch.Tensor(num_channels)) + self.bias = Parameter(torch.Tensor(num_channels)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + if self.affine: + init.ones_(self.weight) + init.zeros_(self.bias) + + def forward(self, input): + return F.group_norm( + input, self.num_groups, self.weight, self.bias, self.eps) + + def extra_repr(self): + return '{num_groups}, {num_channels}, eps={eps}, ' \ + 'affine={affine}'.format(**self.__dict__)
    + + +# TODO: ContrastiveNorm2d +# TODO: DivisiveNorm2d +# TODO: SubtractiveNorm2d +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/padding.html b/docs/stable/_modules/torch/nn/modules/padding.html new file mode 100644 index 000000000000..18be56d0d480 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/padding.html @@ -0,0 +1,955 @@ + + + + + + + + + + + + torch.nn.modules.padding — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.padding

    +from .module import Module
    +from .utils import _pair, _quadruple, _ntuple
    +from .. import functional as F
    +
    +
    +# TODO: grad_output size asserts in THNN
    +
    +
    +class _ConstantPadNd(Module):
    +    __constants__ = ['padding', 'value']
    +
    +    def __init__(self, value):
    +        super(_ConstantPadNd, self).__init__()
    +        self.value = value
    +
    +    def forward(self, input):
    +        return F.pad(input, self.padding, 'constant', self.value)
    +
    +    def extra_repr(self):
    +        return 'padding={}, value={}'.format(self.padding, self.value)
    +
    +
    +
    [docs]class ConstantPad1d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in both boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(N, C, W_{in})` + - Output: :math:`(N, C, W_{out})` where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ConstantPad1d(2, 3.5) + >>> input = torch.randn(1, 2, 4) + >>> input + tensor([[[-1.0491, -0.7152, -0.0749, 0.8530], + [-1.3287, 1.8966, 0.1466, -0.2771]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000, + 3.5000], + [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000, + 3.5000]]]) + >>> m = nn.ConstantPad1d(2, 3.5) + >>> input = torch.randn(1, 2, 3) + >>> input + tensor([[[ 1.6616, 1.4523, -1.1255], + [-3.6372, 0.1182, -1.8652]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000], + [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]]) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad1d((3, 1), 3.5) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000], + [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]]) + + """ + + def __init__(self, padding, value): + super(ConstantPad1d, self).__init__(value) + self.padding = _pair(padding)
    + + +
    [docs]class ConstantPad2d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ConstantPad2d(2, 3.5) + >>> input = torch.randn(1, 2, 2) + >>> input + tensor([[[ 1.6585, 0.4320], + [-0.8701, -0.4649]]]) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000], + [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5) + >>> m(input) + tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000], + [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320], + [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649], + [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]]) + + """ + __constants__ = ['padding', 'value'] + + def __init__(self, padding, value): + super(ConstantPad2d, self).__init__(value) + self.padding = _quadruple(padding)
    + + +
    [docs]class ConstantPad3d(_ConstantPadNd): + r"""Pads the input tensor boundaries with a constant value. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ConstantPad3d(3, 3.5) + >>> input = torch.randn(16, 3, 10, 20, 30) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5) + >>> output = m(input) + + """ + + def __init__(self, padding, value): + super(ConstantPad3d, self).__init__(value) + self.padding = _ntuple(6)(padding)
    + + +class _ReflectionPadNd(Module): + __constants__ = ['padding'] + + def forward(self, input): + return F.pad(input, self.padding, 'reflect') + + def extra_repr(self): + return '{}'.format(self.padding) + + +
    [docs]class ReflectionPad1d(_ReflectionPadNd): + r"""Pads the input tensor using the reflection of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(N, C, W_{in})` + - Output: :math:`(N, C, W_{out})` where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReflectionPad1d(2) + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) + >>> input + tensor([[[0., 1., 2., 3.], + [4., 5., 6., 7.]]]) + >>> m(input) + tensor([[[2., 1., 0., 1., 2., 3., 2., 1.], + [6., 5., 4., 5., 6., 7., 6., 5.]]]) + >>> # using different paddings for different sides + >>> m = nn.ReflectionPad1d((3, 1)) + >>> m(input) + tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], + [7., 6., 5., 4., 5., 6., 7., 6.]]]) + + """ + + def __init__(self, padding): + super(ReflectionPad1d, self).__init__() + self.padding = _pair(padding)
    + + +
    [docs]class ReflectionPad2d(_ReflectionPadNd): + r"""Pads the input tensor using the reflection of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReflectionPad2d(2) + >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) + >>> input + tensor([[[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]]) + >>> m(input) + tensor([[[[8., 7., 6., 7., 8., 7., 6.], + [5., 4., 3., 4., 5., 4., 3.], + [2., 1., 0., 1., 2., 1., 0.], + [5., 4., 3., 4., 5., 4., 3.], + [8., 7., 6., 7., 8., 7., 6.], + [5., 4., 3., 4., 5., 4., 3.], + [2., 1., 0., 1., 2., 1., 0.]]]]) + >>> # using different paddings for different sides + >>> m = nn.ReflectionPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[7., 6., 7., 8., 7.], + [4., 3., 4., 5., 4.], + [1., 0., 1., 2., 1.], + [4., 3., 4., 5., 4.], + [7., 6., 7., 8., 7.]]]]) + + """ + + def __init__(self, padding): + super(ReflectionPad2d, self).__init__() + self.padding = _quadruple(padding)
    + + +class _ReplicationPadNd(Module): + __constants__ = ['padding'] + + def forward(self, input): + return F.pad(input, self.padding, 'replicate') + + def extra_repr(self): + return '{}'.format(self.padding) + + +
    [docs]class ReplicationPad1d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 2-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) + + Shape: + - Input: :math:`(N, C, W_{in})` + - Output: :math:`(N, C, W_{out})` where + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReplicationPad1d(2) + >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) + >>> input + tensor([[[0., 1., 2., 3.], + [4., 5., 6., 7.]]]) + >>> m(input) + tensor([[[0., 0., 0., 1., 2., 3., 3., 3.], + [4., 4., 4., 5., 6., 7., 7., 7.]]]) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad1d((3, 1)) + >>> m(input) + tensor([[[0., 0., 0., 0., 1., 2., 3., 3.], + [4., 4., 4., 4., 5., 6., 7., 7.]]]) + + """ + + def __init__(self, padding): + super(ReplicationPad1d, self).__init__() + self.padding = _pair(padding)
    + + +
    [docs]class ReplicationPad2d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReplicationPad2d(2) + >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) + >>> input + tensor([[[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]]) + >>> m(input) + tensor([[[[0., 0., 0., 1., 2., 2., 2.], + [0., 0., 0., 1., 2., 2., 2.], + [0., 0., 0., 1., 2., 2., 2.], + [3., 3., 3., 4., 5., 5., 5.], + [6., 6., 6., 7., 8., 8., 8.], + [6., 6., 6., 7., 8., 8., 8.], + [6., 6., 6., 7., 8., 8., 8.]]]]) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[0., 0., 1., 2., 2.], + [0., 0., 1., 2., 2.], + [0., 0., 1., 2., 2.], + [3., 3., 4., 5., 5.], + [6., 6., 7., 8., 8.]]]]) + + """ + + def __init__(self, padding): + super(ReplicationPad2d, self).__init__() + self.padding = _quadruple(padding)
    + + +
    [docs]class ReplicationPad3d(_ReplicationPadNd): + r"""Pads the input tensor using replication of the input boundary. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 6-`tuple`, uses + (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, + :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, + :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where + + :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ReplicationPad3d(3) + >>> input = torch.randn(16, 3, 8, 320, 480) + >>> output = m(input) + >>> # using different paddings for different sides + >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1)) + >>> output = m(input) + + """ + + def __init__(self, padding): + super(ReplicationPad3d, self).__init__() + self.padding = _ntuple(6)(padding)
    + + +
    [docs]class ZeroPad2d(ConstantPad2d): + r"""Pads the input tensor boundaries with zero. + + For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. + + Args: + padding (int, tuple): the size of the padding. If is `int`, uses the same + padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`, + :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`) + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` + + :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` + + Examples:: + + >>> m = nn.ZeroPad2d(2) + >>> input = torch.randn(1, 1, 3, 3) + >>> input + tensor([[[[-0.1678, -0.4418, 1.9466], + [ 0.9604, -0.4219, -0.5241], + [-0.9162, -0.5436, -0.6446]]]]) + >>> m(input) + tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + >>> # using different paddings for different sides + >>> m = nn.ZeroPad2d((1, 1, 2, 0)) + >>> m(input) + tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000], + [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000], + [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]]) + + """ + + def __init__(self, padding): + super(ZeroPad2d, self).__init__(padding, 0.)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/pixelshuffle.html b/docs/stable/_modules/torch/nn/modules/pixelshuffle.html new file mode 100644 index 000000000000..30b1aa9b09f0 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/pixelshuffle.html @@ -0,0 +1,560 @@ + + + + + + + + + + + + torch.nn.modules.pixelshuffle — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.pixelshuffle

    +from .module import Module
    +from .. import functional as F
    +
    +
    +
    [docs]class PixelShuffle(Module): + r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` + to a tensor of shape :math:`(*, C, H \times r, W \times r)`. + + This is useful for implementing efficient sub-pixel convolution + with a stride of :math:`1/r`. + + Look at the paper: + `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ + by Shi et. al (2016) for more details. + + Args: + upscale_factor (int): factor to increase spatial resolution by + + Shape: + - Input: :math:`(N, L, H_{in}, W_{in})` where :math:`L=C \times \text{upscale\_factor}^2` + - Output: :math:`(N, C, H_{out}, W_{out})` where + :math:`H_{out} = H_{in} \times \text{upscale\_factor}` + and :math:`W_{out} = W_{in} \times \text{upscale\_factor}` + + Examples:: + + >>> pixel_shuffle = nn.PixelShuffle(3) + >>> input = torch.randn(1, 9, 4, 4) + >>> output = pixel_shuffle(input) + >>> print(output.size()) + torch.Size([1, 1, 12, 12]) + + .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: + https://arxiv.org/abs/1609.05158 + """ + __constants__ = ['upscale_factor'] + + def __init__(self, upscale_factor): + super(PixelShuffle, self).__init__() + self.upscale_factor = upscale_factor + + def forward(self, input): + return F.pixel_shuffle(input, self.upscale_factor) + + def extra_repr(self): + return 'upscale_factor={}'.format(self.upscale_factor)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/pooling.html b/docs/stable/_modules/torch/nn/modules/pooling.html new file mode 100644 index 000000000000..8e28a3dd97ba --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/pooling.html @@ -0,0 +1,1574 @@ + + + + + + + + + + + + torch.nn.modules.pooling — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.pooling

    +from __future__ import absolute_import
    +from __future__ import division
    +from __future__ import print_function
    +from __future__ import unicode_literals
    +
    +from .module import Module
    +from .utils import _single, _pair, _triple
    +from .. import functional as F
    +
    +
    +class _MaxPoolNd(Module):
    +    __constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
    +                     'return_indices', 'ceil_mode']
    +
    +    def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
    +                 return_indices=False, ceil_mode=False):
    +        super(_MaxPoolNd, self).__init__()
    +        self.kernel_size = kernel_size
    +        self.stride = stride or kernel_size
    +        self.padding = padding
    +        self.dilation = dilation
    +        self.return_indices = return_indices
    +        self.ceil_mode = ceil_mode
    +
    +    def extra_repr(self):
    +        return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
    +            ', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
    +
    +
    +
    [docs]class MaxPool1d(_MaxPoolNd): + r"""Applies a 1D max pooling over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, L)` + and output :math:`(N, C, L_{out})` can be precisely described as: + + .. math:: + out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1} + input(N_i, C_j, stride \times k + m) + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides + for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + Args: + kernel_size: the size of the window to take a max over + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on both sides + dilation: a parameter that controls the stride of elements in the window + return_indices: if ``True``, will return the max indices along with the outputs. + Useful for :class:`torch.nn.MaxUnpool1d` later + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + + Shape: + - Input: :math:`(N, C, L_{in})` + - Output: :math:`(N, C, L_{out})`, where + + .. math:: + L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation} + \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor + + Examples:: + + >>> # pool of size=3, stride=2 + >>> m = nn.MaxPool1d(3, stride=2) + >>> input = torch.randn(20, 16, 50) + >>> output = m(input) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + + def forward(self, input): + return F.max_pool1d(input, self.kernel_size, self.stride, + self.padding, self.dilation, self.ceil_mode, + self.return_indices)
    + + +
    [docs]class MaxPool2d(_MaxPoolNd): + r"""Applies a 2D max pooling over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, + output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` + can be precisely described as: + + .. math:: + \begin{aligned} + out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ + & \text{input}(N_i, C_j, \text{stride[0]} \times h + m, + \text{stride[1]} \times w + n) + \end{aligned} + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides + for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: + + - a single ``int`` -- in which case the same value is used for the height and width dimension + - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, + and the second `int` for the width dimension + + Args: + kernel_size: the size of the window to take a max over + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on both sides + dilation: a parameter that controls the stride of elements in the window + return_indices: if ``True``, will return the max indices along with the outputs. + Useful for :class:`torch.nn.MaxUnpool2d` later + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})`, where + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]} + \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]} + \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor + + Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> m = nn.MaxPool2d(3, stride=2) + >>> # pool of non-square window + >>> m = nn.MaxPool2d((3, 2), stride=(2, 1)) + >>> input = torch.randn(20, 16, 50, 32) + >>> output = m(input) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ + + def forward(self, input): + return F.max_pool2d(input, self.kernel_size, self.stride, + self.padding, self.dilation, self.ceil_mode, + self.return_indices)
    + + +
    [docs]class MaxPool3d(_MaxPoolNd): + r"""Applies a 3D max pooling over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, + output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` + can be precisely described as: + + .. math:: + \begin{aligned} + \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ + & \text{input}(N_i, C_j, \text{stride[0]} \times d + k, + \text{stride[1]} \times h + m, \text{stride[2]} \times w + n) + \end{aligned} + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides + for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points. + It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: + + - a single ``int`` -- in which case the same value is used for the depth, height and width dimension + - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, + the second `int` for the height dimension and the third `int` for the width dimension + + Args: + kernel_size: the size of the window to take a max over + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on all three sides + dilation: a parameter that controls the stride of elements in the window + return_indices: if ``True``, will return the max indices along with the outputs. + Useful for :class:`torch.nn.MaxUnpool3d` later + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where + + .. math:: + D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times + (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times + (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times + (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor + + Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> m = nn.MaxPool3d(3, stride=2) + >>> # pool of non-square window + >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2)) + >>> input = torch.randn(20, 16, 50,44, 31) + >>> output = m(input) + + .. _link: + https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md + """ # noqa: E501 + + def forward(self, input): + return F.max_pool3d(input, self.kernel_size, self.stride, + self.padding, self.dilation, self.ceil_mode, + self.return_indices)
    + + +class _MaxUnpoolNd(Module): + + def extra_repr(self): + return 'kernel_size={}, stride={}, padding={}'.format( + self.kernel_size, self.stride, self.padding + ) + + +
    [docs]class MaxUnpool1d(_MaxUnpoolNd): + r"""Computes a partial inverse of :class:`MaxPool1d`. + + :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost. + + :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d` + including the indices of the maximal values and computes a partial inverse + in which all non-maximal values are set to zero. + + .. note:: :class:`MaxPool1d` can map several input sizes to the same output + sizes. Hence, the inversion process can get ambiguous. + To accommodate this, you can provide the needed output size + as an additional argument :attr:`output_size` in the forward call. + See the Inputs and Example below. + + Args: + kernel_size (int or tuple): Size of the max pooling window. + stride (int or tuple): Stride of the max pooling window. + It is set to :attr:`kernel_size` by default. + padding (int or tuple): Padding that was added to the input + + Inputs: + - `input`: the input Tensor to invert + - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d` + - `output_size` (optional): the targeted output size + + Shape: + - Input: :math:`(N, C, H_{in})` + - Output: :math:`(N, C, H_{out})`, where + + .. math:: + H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0] + + or as given by :attr:`output_size` in the call operator + + Example:: + + >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True) + >>> unpool = nn.MaxUnpool1d(2, stride=2) + >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]]) + >>> output, indices = pool(input) + >>> unpool(output, indices) + tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]]) + + >>> # Example showcasing the use of output_size + >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]]) + >>> output, indices = pool(input) + >>> unpool(output, indices, output_size=input.size()) + tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]]) + + >>> unpool(output, indices) + tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]]) + """ + + def __init__(self, kernel_size, stride=None, padding=0): + super(MaxUnpool1d, self).__init__() + self.kernel_size = _single(kernel_size) + self.stride = _single(stride or kernel_size) + self.padding = _single(padding) + + def forward(self, input, indices, output_size=None): + return F.max_unpool1d(input, indices, self.kernel_size, self.stride, + self.padding, output_size)
    + + +
    [docs]class MaxUnpool2d(_MaxUnpoolNd): + r"""Computes a partial inverse of :class:`MaxPool2d`. + + :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost. + + :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d` + including the indices of the maximal values and computes a partial inverse + in which all non-maximal values are set to zero. + + .. note:: :class:`MaxPool2d` can map several input sizes to the same output + sizes. Hence, the inversion process can get ambiguous. + To accommodate this, you can provide the needed output size + as an additional argument :attr:`output_size` in the forward call. + See the Inputs and Example below. + + Args: + kernel_size (int or tuple): Size of the max pooling window. + stride (int or tuple): Stride of the max pooling window. + It is set to :attr:`kernel_size` by default. + padding (int or tuple): Padding that was added to the input + + Inputs: + - `input`: the input Tensor to invert + - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d` + - `output_size` (optional): the targeted output size + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})`, where + + .. math:: + H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} + + .. math:: + W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} + + or as given by :attr:`output_size` in the call operator + + Example:: + + >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True) + >>> unpool = nn.MaxUnpool2d(2, stride=2) + >>> input = torch.tensor([[[[ 1., 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12], + [13, 14, 15, 16]]]]) + >>> output, indices = pool(input) + >>> unpool(output, indices) + tensor([[[[ 0., 0., 0., 0.], + [ 0., 6., 0., 8.], + [ 0., 0., 0., 0.], + [ 0., 14., 0., 16.]]]]) + + >>> # specify a different output size than input size + >>> unpool(output, indices, output_size=torch.Size([1, 1, 5, 5])) + tensor([[[[ 0., 0., 0., 0., 0.], + [ 6., 0., 8., 0., 0.], + [ 0., 0., 0., 14., 0.], + [ 16., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]]]) + """ + + def __init__(self, kernel_size, stride=None, padding=0): + super(MaxUnpool2d, self).__init__() + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride or kernel_size) + self.padding = _pair(padding) + + def forward(self, input, indices, output_size=None): + return F.max_unpool2d(input, indices, self.kernel_size, self.stride, + self.padding, output_size)
    + + +
    [docs]class MaxUnpool3d(_MaxUnpoolNd): + r"""Computes a partial inverse of :class:`MaxPool3d`. + + :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost. + :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d` + including the indices of the maximal values and computes a partial inverse + in which all non-maximal values are set to zero. + + .. note:: :class:`MaxPool3d` can map several input sizes to the same output + sizes. Hence, the inversion process can get ambiguous. + To accommodate this, you can provide the needed output size + as an additional argument :attr:`output_size` in the forward call. + See the Inputs section below. + + Args: + kernel_size (int or tuple): Size of the max pooling window. + stride (int or tuple): Stride of the max pooling window. + It is set to :attr:`kernel_size` by default. + padding (int or tuple): Padding that was added to the input + + Inputs: + - `input`: the input Tensor to invert + - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d` + - `output_size` (optional): the targeted output size + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where + + .. math:: + D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} + + .. math:: + H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} + + .. math:: + W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]} + + or as given by :attr:`output_size` in the call operator + + Example:: + + >>> # pool of square window of size=3, stride=2 + >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True) + >>> unpool = nn.MaxUnpool3d(3, stride=2) + >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15)) + >>> unpooled_output = unpool(output, indices) + >>> unpooled_output.size() + torch.Size([20, 16, 51, 33, 15]) + """ + + def __init__(self, kernel_size, stride=None, padding=0): + super(MaxUnpool3d, self).__init__() + self.kernel_size = _triple(kernel_size) + self.stride = _triple(stride or kernel_size) + self.padding = _triple(padding) + + def forward(self, input, indices, output_size=None): + return F.max_unpool3d(input, indices, self.kernel_size, self.stride, + self.padding, output_size)
    + + +class _AvgPoolNd(Module): + __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad'] + + def extra_repr(self): + return 'kernel_size={}, stride={}, padding={}'.format( + self.kernel_size, self.stride, self.padding + ) + + +
    [docs]class AvgPool1d(_AvgPoolNd): + r"""Applies a 1D average pooling over an input signal composed of several + input planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, L)`, + output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k` + can be precisely described as: + + .. math:: + + \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1} + \text{input}(N_i, C_j, \text{stride} \times l + m) + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides + for :attr:`padding` number of points. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be + an ``int`` or a one-element tuple. + + Args: + kernel_size: the size of the window + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on both sides + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + count_include_pad: when True, will include the zero-padding in the averaging calculation + + Shape: + - Input: :math:`(N, C, L_{in})` + - Output: :math:`(N, C, L_{out})`, where + + .. math:: + L_{out} = \left\lfloor \frac{L_{in} + + 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor + + Examples:: + + >>> # pool with window of size=3, stride=2 + >>> m = nn.AvgPool1d(3, stride=2) + >>> m(torch.tensor([[[1.,2,3,4,5,6,7]]])) + tensor([[[ 2., 4., 6.]]]) + """ + def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, + count_include_pad=True): + super(AvgPool1d, self).__init__() + self.kernel_size = _single(kernel_size) + self.stride = _single(stride if stride is not None else kernel_size) + self.padding = _single(padding) + self.ceil_mode = ceil_mode + self.count_include_pad = count_include_pad + + def forward(self, input): + return F.avg_pool1d( + input, self.kernel_size, self.stride, self.padding, self.ceil_mode, + self.count_include_pad)
    + + +
    [docs]class AvgPool2d(_AvgPoolNd): + r"""Applies a 2D average pooling over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, + output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` + can be precisely described as: + + .. math:: + + out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} + input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n) + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides + for :attr:`padding` number of points. + + The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be: + + - a single ``int`` -- in which case the same value is used for the height and width dimension + - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, + and the second `int` for the width dimension + + Args: + kernel_size: the size of the window + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on both sides + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + count_include_pad: when True, will include the zero-padding in the averaging calculation + divisor_override: if specified, it will be used as divisor, otherwise attr:`kernel_size` will be used + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})`, where + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - + \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - + \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor + + Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> m = nn.AvgPool2d(3, stride=2) + >>> # pool of non-square window + >>> m = nn.AvgPool2d((3, 2), stride=(2, 1)) + >>> input = torch.randn(20, 16, 50, 32) + >>> output = m(input) + """ + __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override'] + + def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None): + super(AvgPool2d, self).__init__() + self.kernel_size = kernel_size + self.stride = stride or kernel_size + self.padding = padding + self.ceil_mode = ceil_mode + self.count_include_pad = count_include_pad + self.divisor_override = divisor_override + + def forward(self, input): + return F.avg_pool2d(input, self.kernel_size, self.stride, + self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
    + + +
    [docs]class AvgPool3d(_AvgPoolNd): + r"""Applies a 3D average pooling over an input signal composed of several input + planes. + + In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, + output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` + can be precisely described as: + + .. math:: + \begin{aligned} + \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\ + & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k, + \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)} + {kD \times kH \times kW} + \end{aligned} + + If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides + for :attr:`padding` number of points. + + The parameters :attr:`kernel_size`, :attr:`stride` can either be: + + - a single ``int`` -- in which case the same value is used for the depth, height and width dimension + - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, + the second `int` for the height dimension and the third `int` for the width dimension + + Args: + kernel_size: the size of the window + stride: the stride of the window. Default value is :attr:`kernel_size` + padding: implicit zero padding to be added on all three sides + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + count_include_pad: when True, will include the zero-padding in the averaging calculation + divisor_override: if specified, it will be used as divisor, otherwise attr:`kernel_size` will be used + + Shape: + - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where + + .. math:: + D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - + \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - + \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - + \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor + + Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> m = nn.AvgPool3d(3, stride=2) + >>> # pool of non-square window + >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2)) + >>> input = torch.randn(20, 16, 50,44, 31) + >>> output = m(input) + """ + __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override'] + + def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None): + super(AvgPool3d, self).__init__() + self.kernel_size = kernel_size + self.stride = stride or kernel_size + self.padding = padding + self.ceil_mode = ceil_mode + self.count_include_pad = count_include_pad + self.divisor_override = divisor_override + + def forward(self, input): + return F.avg_pool3d(input, self.kernel_size, self.stride, + self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override) + + def __setstate__(self, d): + super(AvgPool3d, self).__setstate__(d) + self.__dict__.setdefault('padding', 0) + self.__dict__.setdefault('ceil_mode', False) + self.__dict__.setdefault('count_include_pad', True)
    + + +
    [docs]class FractionalMaxPool2d(Module): + r"""Applies a 2D fractional max pooling over an input signal composed of several input planes. + + Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham + + The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic + step size determined by the target output size. + The number of output features is equal to the number of input planes. + + Args: + kernel_size: the size of the window to take a max over. + Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)` + output_size: the target output size of the image of the form `oH x oW`. + Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH` + output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. + This has to be a number or tuple in the range (0, 1) + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False`` + + Examples: + >>> # pool of square window of size=3, and target output size 13x12 + >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12)) + >>> # pool of square window and target output size being half of input image size + >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)) + >>> input = torch.randn(20, 16, 50, 32) + >>> output = m(input) + + .. _Fractional MaxPooling: + http://arxiv.org/abs/1412.6071 + """ + __constants__ = ['kernel_size', 'return_indices', 'output_size', + 'output_ratio'] + + def __init__(self, kernel_size, output_size=None, output_ratio=None, + return_indices=False, _random_samples=None): + super(FractionalMaxPool2d, self).__init__() + self.kernel_size = _pair(kernel_size) + self.return_indices = return_indices + self.register_buffer('_random_samples', _random_samples) + self.output_size = _pair(output_size) if output_size is not None else None + self.output_ratio = _pair(output_ratio) if output_ratio is not None else None + if output_size is None and output_ratio is None: + raise ValueError("FractionalMaxPool2d requires specifying either " + "an output size, or a pooling ratio") + if output_size is not None and output_ratio is not None: + raise ValueError("only one of output_size and output_ratio may be specified") + if self.output_ratio is not None: + if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1): + raise ValueError("output_ratio must be between 0 and 1 (got {})" + .format(output_ratio)) + + def forward(self, input): + return F.fractional_max_pool2d( + input, self.kernel_size, self.output_size, self.output_ratio, + self.return_indices, + _random_samples=self._random_samples)
    + + +class FractionalMaxPool3d(Module): + r"""Applies a 3D fractional max pooling over an input signal composed of several input planes. + + Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham + + The max-pooling operation is applied in :math:`kTxkHxkW` regions by a stochastic + step size determined by the target output size. + The number of output features is equal to the number of input planes. + + Args: + kernel_size: the size of the window to take a max over. + Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)` + output_size: the target output size of the image of the form `oT x oH x oW`. + Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH` + output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. + This has to be a number or tuple in the range (0, 1) + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False`` + + Examples: + >>> # pool of cubic window of size=3, and target output size 13x12x11 + >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11)) + >>> # pool of cubic window and target output size being half of input size + >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)) + >>> input = torch.randn(20, 16, 50, 32, 16) + >>> output = m(input) + + .. _Fractional MaxPooling: + http://arxiv.org/abs/1412.6071 + """ + __constants__ = ['kernel_size', 'return_indices', 'output_size', + 'output_ratio'] + + def __init__(self, kernel_size, output_size=None, output_ratio=None, + return_indices=False, _random_samples=None): + super(FractionalMaxPool3d, self).__init__() + self.kernel_size = _triple(kernel_size) + self.return_indices = return_indices + self.register_buffer('_random_samples', _random_samples) + self.output_size = _triple(output_size) if output_size is not None else None + self.output_ratio = _triple(output_ratio) if output_ratio is not None else None + if output_size is None and output_ratio is None: + raise ValueError("FractionalMaxPool3d requires specifying either " + "an output size, or a pooling ratio") + if output_size is not None and output_ratio is not None: + raise ValueError("only one of output_size and output_ratio may be specified") + if self.output_ratio is not None: + if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1): + raise ValueError("output_ratio must be between 0 and 1 (got {})" + .format(output_ratio)) + + def forward(self, input): + return F.fractional_max_pool3d( + input, self.kernel_size, self.output_size, self.output_ratio, + self.return_indices, + _random_samples=self._random_samples) + + +class _LPPoolNd(Module): + __constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode'] + + def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False): + super(_LPPoolNd, self).__init__() + self.norm_type = norm_type + self.kernel_size = kernel_size + self.stride = stride + self.ceil_mode = ceil_mode + + def extra_repr(self): + return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \ + 'ceil_mode={ceil_mode}'.format(**self.__dict__) + + +
    [docs]class LPPool1d(_LPPoolNd): + r"""Applies a 1D power-average pooling over an input signal composed of several input + planes. + + On each window, the function computed is: + + .. math:: + f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} + + - At p = :math:`\infty`, one gets Max Pooling + - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling) + + .. note:: If the sum to the power of `p` is zero, the gradient of this function is + not defined. This implementation will set the gradient to zero in this case. + + Args: + kernel_size: a single int, the size of the window + stride: a single int, the stride of the window. Default value is :attr:`kernel_size` + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + + Shape: + - Input: :math:`(N, C, L_{in})` + - Output: :math:`(N, C, L_{out})`, where + + .. math:: + L_{out} = \left\lfloor\frac{L_{in} + + 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor + + Examples:: + >>> # power-2 pool of window of length 3, with stride 2. + >>> m = nn.LPPool1d(2, 3, stride=2) + >>> input = torch.randn(20, 16, 50) + >>> output = m(input) + """ + + def forward(self, input): + return F.lp_pool1d(input, float(self.norm_type), self.kernel_size, + self.stride, self.ceil_mode)
    + + +
    [docs]class LPPool2d(_LPPoolNd): + r"""Applies a 2D power-average pooling over an input signal composed of several input + planes. + + On each window, the function computed is: + + .. math:: + f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} + + - At p = :math:`\infty`, one gets Max Pooling + - At p = 1, one gets Sum Pooling (which is proportional to average pooling) + + The parameters :attr:`kernel_size`, :attr:`stride` can either be: + + - a single ``int`` -- in which case the same value is used for the height and width dimension + - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, + and the second `int` for the width dimension + + .. note:: If the sum to the power of `p` is zero, the gradient of this function is + not defined. This implementation will set the gradient to zero in this case. + + Args: + kernel_size: the size of the window + stride: the stride of the window. Default value is :attr:`kernel_size` + ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})`, where + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times + (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times + (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + + Examples:: + + >>> # power-2 pool of square window of size=3, stride=2 + >>> m = nn.LPPool2d(2, 3, stride=2) + >>> # pool of non-square window of power 1.2 + >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1)) + >>> input = torch.randn(20, 16, 50, 32) + >>> output = m(input) + + """ + + def forward(self, input): + return F.lp_pool2d(input, float(self.norm_type), self.kernel_size, + self.stride, self.ceil_mode)
    + + +class _AdaptiveMaxPoolNd(Module): + __constants__ = ['output_size', 'return_indices'] + + def __init__(self, output_size, return_indices=False): + super(_AdaptiveMaxPoolNd, self).__init__() + self.output_size = output_size + self.return_indices = return_indices + + def extra_repr(self): + return 'output_size={}'.format(self.output_size) + +# FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and +# output shapes are, and how the operation computes output. + + +
    [docs]class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd): + r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes. + + The output size is H, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size H + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to nn.MaxUnpool1d. Default: ``False`` + + Examples: + >>> # target output size of 5 + >>> m = nn.AdaptiveMaxPool1d(5) + >>> input = torch.randn(1, 64, 8) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
    + + +
    [docs]class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd): + r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes. + + The output is of size H x W, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size of the image of the form H x W. + Can be a tuple (H, W) or a single H for a square image H x H. + H and W can be either a ``int``, or ``None`` which means the size will + be the same as that of the input. + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to nn.MaxUnpool2d. Default: ``False`` + + Examples: + >>> # target output size of 5x7 + >>> m = nn.AdaptiveMaxPool2d((5,7)) + >>> input = torch.randn(1, 64, 8, 9) + >>> output = m(input) + >>> # target output size of 7x7 (square) + >>> m = nn.AdaptiveMaxPool2d(7) + >>> input = torch.randn(1, 64, 10, 9) + >>> output = m(input) + >>> # target output size of 10x7 + >>> m = nn.AdaptiveMaxPool2d((None, 7)) + >>> input = torch.randn(1, 64, 10, 9) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
    + + +
    [docs]class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd): + r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes. + + The output is of size D x H x W, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size of the image of the form D x H x W. + Can be a tuple (D, H, W) or a single D for a cube D x D x D. + D, H and W can be either a ``int``, or ``None`` which means the size will + be the same as that of the input. + + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to nn.MaxUnpool3d. Default: ``False`` + + Examples: + >>> # target output size of 5x7x9 + >>> m = nn.AdaptiveMaxPool3d((5,7,9)) + >>> input = torch.randn(1, 64, 8, 9, 10) + >>> output = m(input) + >>> # target output size of 7x7x7 (cube) + >>> m = nn.AdaptiveMaxPool3d(7) + >>> input = torch.randn(1, 64, 10, 9, 8) + >>> output = m(input) + >>> # target output size of 7x9x8 + >>> m = nn.AdaptiveMaxPool3d((7, None, None)) + >>> input = torch.randn(1, 64, 10, 9, 8) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
    + + +class _AdaptiveAvgPoolNd(Module): + __constants__ = ['output_size'] + + def __init__(self, output_size): + super(_AdaptiveAvgPoolNd, self).__init__() + self.output_size = output_size + + def extra_repr(self): + return 'output_size={}'.format(self.output_size) + + +
    [docs]class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd): + r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes. + + The output size is H, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size H + + Examples: + >>> # target output size of 5 + >>> m = nn.AdaptiveAvgPool1d(5) + >>> input = torch.randn(1, 64, 8) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_avg_pool1d(input, self.output_size)
    + + +
    [docs]class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd): + r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes. + + The output is of size H x W, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size of the image of the form H x W. + Can be a tuple (H, W) or a single H for a square image H x H. + H and W can be either a ``int``, or ``None`` which means the size will + be the same as that of the input. + + Examples: + >>> # target output size of 5x7 + >>> m = nn.AdaptiveAvgPool2d((5,7)) + >>> input = torch.randn(1, 64, 8, 9) + >>> output = m(input) + >>> # target output size of 7x7 (square) + >>> m = nn.AdaptiveAvgPool2d(7) + >>> input = torch.randn(1, 64, 10, 9) + >>> output = m(input) + >>> # target output size of 10x7 + >>> m = nn.AdaptiveMaxPool2d((None, 7)) + >>> input = torch.randn(1, 64, 10, 9) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_avg_pool2d(input, self.output_size)
    + + +
    [docs]class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd): + r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes. + + The output is of size D x H x W, for any input size. + The number of output features is equal to the number of input planes. + + Args: + output_size: the target output size of the form D x H x W. + Can be a tuple (D, H, W) or a single number D for a cube D x D x D. + D, H and W can be either a ``int``, or ``None`` which means the size will + be the same as that of the input. + + Examples: + >>> # target output size of 5x7x9 + >>> m = nn.AdaptiveAvgPool3d((5,7,9)) + >>> input = torch.randn(1, 64, 8, 9, 10) + >>> output = m(input) + >>> # target output size of 7x7x7 (cube) + >>> m = nn.AdaptiveAvgPool3d(7) + >>> input = torch.randn(1, 64, 10, 9, 8) + >>> output = m(input) + >>> # target output size of 7x9x8 + >>> m = nn.AdaptiveMaxPool3d((7, None, None)) + >>> input = torch.randn(1, 64, 10, 9, 8) + >>> output = m(input) + + """ + + def forward(self, input): + return F.adaptive_avg_pool3d(input, self.output_size)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/rnn.html b/docs/stable/_modules/torch/nn/modules/rnn.html new file mode 100644 index 000000000000..f62c7b44ca8b --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/rnn.html @@ -0,0 +1,1469 @@ + + + + + + + + + + + + torch.nn.modules.rnn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.rnn

    +import math
    +import torch
    +import warnings
    +import numbers
    +
    +from .module import Module
    +from ..parameter import Parameter
    +from ..utils.rnn import PackedSequence, get_packed_sequence
    +from .. import init
    +from .. import _VF
    +from ..._jit_internal import _parameter_list
    +
    +_rnn_impls = {
    +    'GRU': _VF.gru,
    +    'RNN_TANH': _VF.rnn_tanh,
    +    'RNN_RELU': _VF.rnn_relu,
    +}
    +
    +
    +def apply_permutation(tensor, permutation, dim=1):
    +    # type: (Tensor, Tensor, int) -> Tensor
    +    return tensor.index_select(dim, permutation)
    +
    +
    +class RNNBase(Module):
    +    __constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
    +                     'batch_first', 'dropout', 'bidirectional']
    +
    +    def __init__(self, mode, input_size, hidden_size,
    +                 num_layers=1, bias=True, batch_first=False,
    +                 dropout=0., bidirectional=False):
    +        super(RNNBase, self).__init__()
    +        self.mode = mode
    +        self.input_size = input_size
    +        self.hidden_size = hidden_size
    +        self.num_layers = num_layers
    +        self.bias = bias
    +        self.batch_first = batch_first
    +        self.dropout = float(dropout)
    +        self.bidirectional = bidirectional
    +        num_directions = 2 if bidirectional else 1
    +
    +        if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
    +                isinstance(dropout, bool):
    +            raise ValueError("dropout should be a number in range [0, 1] "
    +                             "representing the probability of an element being "
    +                             "zeroed")
    +        if dropout > 0 and num_layers == 1:
    +            warnings.warn("dropout option adds dropout after all but last "
    +                          "recurrent layer, so non-zero dropout expects "
    +                          "num_layers greater than 1, but got dropout={} and "
    +                          "num_layers={}".format(dropout, num_layers))
    +
    +        if mode == 'LSTM':
    +            gate_size = 4 * hidden_size
    +        elif mode == 'GRU':
    +            gate_size = 3 * hidden_size
    +        elif mode == 'RNN_TANH':
    +            gate_size = hidden_size
    +        elif mode == 'RNN_RELU':
    +            gate_size = hidden_size
    +        else:
    +            raise ValueError("Unrecognized RNN mode: " + mode)
    +
    +        self._all_weights = []
    +        for layer in range(num_layers):
    +            for direction in range(num_directions):
    +                layer_input_size = input_size if layer == 0 else hidden_size * num_directions
    +
    +                w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
    +                w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
    +                b_ih = Parameter(torch.Tensor(gate_size))
    +                # Second bias vector included for CuDNN compatibility. Only one
    +                # bias vector is needed in standard definition.
    +                b_hh = Parameter(torch.Tensor(gate_size))
    +                layer_params = (w_ih, w_hh, b_ih, b_hh)
    +
    +                suffix = '_reverse' if direction == 1 else ''
    +                param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
    +                if bias:
    +                    param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
    +                param_names = [x.format(layer, suffix) for x in param_names]
    +
    +                for name, param in zip(param_names, layer_params):
    +                    setattr(self, name, param)
    +                self._all_weights.append(param_names)
    +
    +        self.flatten_parameters()
    +        self.reset_parameters()
    +
    +    def flatten_parameters(self):
    +        """Resets parameter data pointer so that they can use faster code paths.
    +
    +        Right now, this works only if the module is on the GPU and cuDNN is enabled.
    +        Otherwise, it's a no-op.
    +        """
    +        any_param = next(self.parameters()).data
    +        if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param):
    +            return
    +
    +        # If any parameters alias, we fall back to the slower, copying code path. This is
    +        # a sufficient check, because overlapping parameter buffers that don't completely
    +        # alias would break the assumptions of the uniqueness check in
    +        # Module.named_parameters().
    +        all_weights = self._flat_weights
    +        unique_data_ptrs = set(p.data_ptr() for p in all_weights)
    +        if len(unique_data_ptrs) != len(all_weights):
    +            return
    +
    +        with torch.cuda.device_of(any_param):
    +            import torch.backends.cudnn.rnn as rnn
    +
    +            # NB: This is a temporary hack while we still don't have Tensor
    +            # bindings for ATen functions
    +            with torch.no_grad():
    +                # NB: this is an INPLACE function on all_weights, that's why the
    +                # no_grad() is necessary.
    +                torch._cudnn_rnn_flatten_weight(
    +                    all_weights, (4 if self.bias else 2),
    +                    self.input_size, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.num_layers,
    +                    self.batch_first, bool(self.bidirectional))
    +
    +    def _apply(self, fn):
    +        ret = super(RNNBase, self)._apply(fn)
    +        self.flatten_parameters()
    +        return ret
    +
    +    def reset_parameters(self):
    +        stdv = 1.0 / math.sqrt(self.hidden_size)
    +        for weight in self.parameters():
    +            init.uniform_(weight, -stdv, stdv)
    +
    +    def _get_flat_weights_names(self):
    +        return [weight for weights in self._all_weights for weight in weights]
    +
    +    @_parameter_list(_get_flat_weights_names)
    +    def _get_flat_weights(self):
    +        return self._flat_weights
    +
    +    def check_input(self, input, batch_sizes):
    +        # type: (Tensor, Optional[Tensor]) -> None
    +        expected_input_dim = 2 if batch_sizes is not None else 3
    +        if input.dim() != expected_input_dim:
    +            raise RuntimeError(
    +                'input must have {} dimensions, got {}'.format(
    +                    expected_input_dim, input.dim()))
    +        if self.input_size != input.size(-1):
    +            raise RuntimeError(
    +                'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
    +                    self.input_size, input.size(-1)))
    +
    +    def get_expected_hidden_size(self, input, batch_sizes):
    +        # type: (Tensor, Optional[Tensor]) -> Tuple[int, int, int]
    +        if batch_sizes is not None:
    +            mini_batch = batch_sizes[0]
    +            mini_batch = int(mini_batch)
    +        else:
    +            mini_batch = input.size(0) if self.batch_first else input.size(1)
    +        num_directions = 2 if self.bidirectional else 1
    +        expected_hidden_size = (self.num_layers * num_directions,
    +                                mini_batch, self.hidden_size)
    +        return expected_hidden_size
    +
    +    def check_hidden_size(self, hx, expected_hidden_size, msg='Expected hidden size {}, got {}'):
    +        # type: (Tensor, Tuple[int, int, int], str) -> None
    +        if hx.size() != expected_hidden_size:
    +            raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
    +
    +    def check_forward_args(self, input, hidden, batch_sizes):
    +        self.check_input(input, batch_sizes)
    +        expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
    +
    +        self.check_hidden_size(hidden, expected_hidden_size)
    +
    +    def permute_hidden(self, hx, permutation):
    +        if permutation is None:
    +            return hx
    +        return apply_permutation(hx, permutation)
    +
    +    def forward(self, input, hx=None):
    +        is_packed = isinstance(input, PackedSequence)
    +        if is_packed:
    +            input, batch_sizes, sorted_indices, unsorted_indices = input
    +            max_batch_size = batch_sizes[0]
    +            max_batch_size = int(max_batch_size)
    +        else:
    +            batch_sizes = None
    +            max_batch_size = input.size(0) if self.batch_first else input.size(1)
    +            sorted_indices = None
    +            unsorted_indices = None
    +
    +        if hx is None:
    +            num_directions = 2 if self.bidirectional else 1
    +            hx = torch.zeros(self.num_layers * num_directions,
    +                             max_batch_size, self.hidden_size,
    +                             dtype=input.dtype, device=input.device)
    +        else:
    +            # Each batch of the hidden state should match the input sequence that
    +            # the user believes he/she is passing in.
    +            hx = self.permute_hidden(hx, sorted_indices)
    +
    +        self.check_forward_args(input, hx, batch_sizes)
    +        _impl = _rnn_impls[self.mode]
    +        if batch_sizes is None:
    +            result = _impl(input, hx, self._get_flat_weights(), self.bias, self.num_layers,
    +                           self.dropout, self.training, self.bidirectional, self.batch_first)
    +        else:
    +            result = _impl(input, batch_sizes, hx, self._get_flat_weights(), self.bias,
    +                           self.num_layers, self.dropout, self.training, self.bidirectional)
    +        output = result[0]
    +        hidden = result[1]
    +
    +        if is_packed:
    +            output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
    +        return output, self.permute_hidden(hidden, unsorted_indices)
    +
    +    def extra_repr(self):
    +        s = '{input_size}, {hidden_size}'
    +        if self.num_layers != 1:
    +            s += ', num_layers={num_layers}'
    +        if self.bias is not True:
    +            s += ', bias={bias}'
    +        if self.batch_first is not False:
    +            s += ', batch_first={batch_first}'
    +        if self.dropout != 0:
    +            s += ', dropout={dropout}'
    +        if self.bidirectional is not False:
    +            s += ', bidirectional={bidirectional}'
    +        return s.format(**self.__dict__)
    +
    +    def __setstate__(self, d):
    +        super(RNNBase, self).__setstate__(d)
    +        if 'all_weights' in d:
    +            self._all_weights = d['all_weights']
    +        if isinstance(self._all_weights[0][0], str):
    +            return
    +        num_layers = self.num_layers
    +        num_directions = 2 if self.bidirectional else 1
    +        self._all_weights = []
    +        for layer in range(num_layers):
    +            for direction in range(num_directions):
    +                suffix = '_reverse' if direction == 1 else ''
    +                weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
    +                weights = [x.format(layer, suffix) for x in weights]
    +                if self.bias:
    +                    self._all_weights += [weights]
    +                else:
    +                    self._all_weights += [weights[:2]]
    +
    +    @property
    +    def _flat_weights(self):
    +        return [p for layerparams in self.all_weights for p in layerparams]
    +
    +    @property
    +    def all_weights(self):
    +        return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
    +
    +
    +
    [docs]class RNN(RNNBase): + r"""Applies a multi-layer Elman RNN with :math:`tanh` or :math:`ReLU` non-linearity to an + input sequence. + + + For each element in the input sequence, each layer computes the following + function: + + .. math:: + h_t = \text{tanh}(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh}) + + where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is + the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the + previous layer at time `t-1` or the initial hidden state at time `0`. + If :attr:`nonlinearity` is ``'relu'``, then `ReLU` is used instead of `tanh`. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` + would mean stacking two RNNs together to form a `stacked RNN`, + with the second RNN taking in outputs of the first RNN and + computing the final results. Default: 1 + nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'`` + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + batch_first: If ``True``, then the input and output tensors are provided + as `(batch, seq, feature)`. Default: ``False`` + dropout: If non-zero, introduces a `Dropout` layer on the outputs of each + RNN layer except the last layer, with dropout probability equal to + :attr:`dropout`. Default: 0 + bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False`` + + Inputs: input, h_0 + - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features + of the input sequence. The input can also be a packed variable length + sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` + or :func:`torch.nn.utils.rnn.pack_sequence` + for details. + - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial hidden state for each element in the batch. + Defaults to zero if not provided. If the RNN is bidirectional, + num_directions should be 2, else it should be 1. + + Outputs: output, h_n + - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor + containing the output features (`h_t`) from the last layer of the RNN, + for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has + been given as the input, the output will also be a packed sequence. + + For the unpacked case, the directions can be separated + using ``output.view(seq_len, batch, num_directions, hidden_size)``, + with forward and backward being direction `0` and `1` respectively. + Similarly, the directions can be separated in the packed case. + - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the hidden state for `t = seq_len`. + + Like *output*, the layers can be separated using + ``h_n.view(num_layers, num_directions, batch, hidden_size)``. + + Shape: + - Input1: :math:`(L, N, H_{in})` tensor containing input features where + :math:`H_{in}=\text{input\_size}` and `L` represents a sequence length. + - Input2: :math:`(S, N, H_{out})` tensor + containing the initial hidden state for each element in the batch. + :math:`H_{out}=\text{hidden\_size}` + Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}` + If the RNN is bidirectional, num_directions should be 2, else it should be 1. + - Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}` + - Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih_l[k]: the learnable input-hidden weights of the k-th layer, + of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is + `(hidden_size, num_directions * hidden_size)` + weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer, + of shape `(hidden_size, hidden_size)` + bias_ih_l[k]: the learnable input-hidden bias of the k-th layer, + of shape `(hidden_size)` + bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer, + of shape `(hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + .. include:: cudnn_persistent_rnn.rst + + Examples:: + + >>> rnn = nn.RNN(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> output, hn = rnn(input, h0) + """ + + def __init__(self, *args, **kwargs): + if 'nonlinearity' in kwargs: + if kwargs['nonlinearity'] == 'tanh': + mode = 'RNN_TANH' + elif kwargs['nonlinearity'] == 'relu': + mode = 'RNN_RELU' + else: + raise ValueError("Unknown nonlinearity '{}'".format( + kwargs['nonlinearity'])) + del kwargs['nonlinearity'] + else: + mode = 'RNN_TANH' + + super(RNN, self).__init__(mode, *args, **kwargs)
    + + +
    [docs]class LSTM(RNNBase): + r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input + sequence. + + + For each element in the input sequence, each layer computes the following + function: + + .. math:: + \begin{array}{ll} \\ + i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{(t-1)} + b_{hg}) \\ + o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + c_t = f_t * c_{(t-1)} + i_t * g_t \\ + h_t = o_t * \tanh(c_t) \\ + \end{array} + + where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell + state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{(t-1)}` + is the hidden state of the layer at time `t-1` or the initial hidden + state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`, + :math:`o_t` are the input, forget, cell, and output gates, respectively. + :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. + + In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer + (:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by + dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random + variable which is :math:`0` with probability :attr:`dropout`. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` + would mean stacking two LSTMs together to form a `stacked LSTM`, + with the second LSTM taking in outputs of the first LSTM and + computing the final results. Default: 1 + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` + dropout: If non-zero, introduces a `Dropout` layer on the outputs of each + LSTM layer except the last layer, with dropout probability equal to + :attr:`dropout`. Default: 0 + bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False`` + + Inputs: input, (h_0, c_0) + - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features + of the input sequence. + The input can also be a packed variable length sequence. + See :func:`torch.nn.utils.rnn.pack_padded_sequence` or + :func:`torch.nn.utils.rnn.pack_sequence` for details. + - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial hidden state for each element in the batch. + If the LSTM is bidirectional, num_directions should be 2, else it should be 1. + - **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial cell state for each element in the batch. + + If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero. + + + Outputs: output, (h_n, c_n) + - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor + containing the output features `(h_t)` from the last layer of the LSTM, + for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been + given as the input, the output will also be a packed sequence. + + For the unpacked case, the directions can be separated + using ``output.view(seq_len, batch, num_directions, hidden_size)``, + with forward and backward being direction `0` and `1` respectively. + Similarly, the directions can be separated in the packed case. + - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the hidden state for `t = seq_len`. + + Like *output*, the layers can be separated using + ``h_n.view(num_layers, num_directions, batch, hidden_size)`` and similarly for *c_n*. + - **c_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the cell state for `t = seq_len`. + + Attributes: + weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer + `(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`. + Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)` + weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer + `(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)` + bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer + `(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)` + bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer + `(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + .. include:: cudnn_persistent_rnn.rst + + Examples:: + + >>> rnn = nn.LSTM(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> c0 = torch.randn(2, 3, 20) + >>> output, (hn, cn) = rnn(input, (h0, c0)) + """ + __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} + + def __init__(self, *args, **kwargs): + super(LSTM, self).__init__('LSTM', *args, **kwargs) + + def check_forward_args(self, input, hidden, batch_sizes): + # type: (Tensor, Tuple[Tensor, Tensor], Optional[Tensor]) -> None + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + + self.check_hidden_size(hidden[0], expected_hidden_size, + 'Expected hidden[0] size {}, got {}') + self.check_hidden_size(hidden[1], expected_hidden_size, + 'Expected hidden[1] size {}, got {}') + + def permute_hidden(self, hx, permutation): + # type: (Tuple[Tensor, Tensor], Optional[Tensor]) -> Tuple[Tensor, Tensor] + if permutation is None: + return hx + return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation) + + def forward_impl(self, input, hx, batch_sizes, max_batch_size, sorted_indices): + # type: (Tensor, Optional[Tuple[Tensor, Tensor]], Optional[Tensor], int, Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa + if hx is None: + num_directions = 2 if self.bidirectional else 1 + zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + if batch_sizes is None: + result = _VF.lstm(input, hx, self._get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.lstm(input, batch_sizes, hx, self._get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + output = result[0] + hidden = result[1:] + + return output, hidden + + @torch._jit_internal.export + def forward_tensor(self, input, hx=None): + # type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tuple[Tensor, Tensor]] + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) + + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch._jit_internal.export + def forward_packed(self, input, hx=None): + # type: (Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Tuple[Tensor, Tensor]] # noqa + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = batch_sizes[0] + max_batch_size = int(max_batch_size) + + output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) + + output = get_packed_sequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch._jit_internal.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx)
    + + +
    [docs]class GRU(RNNBase): + r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence. + + + For each element in the input sequence, each layer computes the following + function: + + .. math:: + \begin{array}{ll} + r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} + \end{array} + + where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input + at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer + at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`, + :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively. + :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. + + In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer + (:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by + dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random + variable which is :math:`0` with probability :attr:`dropout`. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` + would mean stacking two GRUs together to form a `stacked GRU`, + with the second GRU taking in outputs of the first GRU and + computing the final results. Default: 1 + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` + dropout: If non-zero, introduces a `Dropout` layer on the outputs of each + GRU layer except the last layer, with dropout probability equal to + :attr:`dropout`. Default: 0 + bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False`` + + Inputs: input, h_0 + - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features + of the input sequence. The input can also be a packed variable length + sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` + for details. + - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial hidden state for each element in the batch. + Defaults to zero if not provided. If the RNN is bidirectional, + num_directions should be 2, else it should be 1. + + Outputs: output, h_n + - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor + containing the output features h_t from the last layer of the GRU, + for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been + given as the input, the output will also be a packed sequence. + For the unpacked case, the directions can be separated + using ``output.view(seq_len, batch, num_directions, hidden_size)``, + with forward and backward being direction `0` and `1` respectively. + + Similarly, the directions can be separated in the packed case. + - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the hidden state for `t = seq_len` + + Like *output*, the layers can be separated using + ``h_n.view(num_layers, num_directions, batch, hidden_size)``. + + Shape: + - Input1: :math:`(L, N, H_{in})` tensor containing input features where + :math:`H_{in}=\text{input\_size}` and `L` represents a sequence length. + - Input2: :math:`(S, N, H_{out})` tensor + containing the initial hidden state for each element in the batch. + :math:`H_{out}=\text{hidden\_size}` + Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}` + If the RNN is bidirectional, num_directions should be 2, else it should be 1. + - Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}` + - Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer + (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`. + Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)` + weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer + (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)` + bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer + (b_ir|b_iz|b_in), of shape `(3*hidden_size)` + bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer + (b_hr|b_hz|b_hn), of shape `(3*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + .. include:: cudnn_persistent_rnn.rst + + Examples:: + + >>> rnn = nn.GRU(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> output, hn = rnn(input, h0) + """ + + def __init__(self, *args, **kwargs): + super(GRU, self).__init__('GRU', *args, **kwargs)
    + + +class RNNCellBase(Module): + __constants__ = ['input_size', 'hidden_size', 'bias'] + + def __init__(self, input_size, hidden_size, bias, num_chunks): + super(RNNCellBase, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.bias = bias + self.weight_ih = Parameter(torch.Tensor(num_chunks * hidden_size, input_size)) + self.weight_hh = Parameter(torch.Tensor(num_chunks * hidden_size, hidden_size)) + if bias: + self.bias_ih = Parameter(torch.Tensor(num_chunks * hidden_size)) + self.bias_hh = Parameter(torch.Tensor(num_chunks * hidden_size)) + else: + self.register_parameter('bias_ih', None) + self.register_parameter('bias_hh', None) + self.reset_parameters() + + def extra_repr(self): + s = '{input_size}, {hidden_size}' + if 'bias' in self.__dict__ and self.bias is not True: + s += ', bias={bias}' + if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": + s += ', nonlinearity={nonlinearity}' + return s.format(**self.__dict__) + + def check_forward_input(self, input): + if input.size(1) != self.input_size: + raise RuntimeError( + "input has inconsistent input_size: got {}, expected {}".format( + input.size(1), self.input_size)) + + def check_forward_hidden(self, input, hx, hidden_label=''): + # type: (Tensor, Tensor, str) -> None + if input.size(0) != hx.size(0): + raise RuntimeError( + "Input batch size {} doesn't match hidden{} batch size {}".format( + input.size(0), hidden_label, hx.size(0))) + + if hx.size(1) != self.hidden_size: + raise RuntimeError( + "hidden{} has inconsistent hidden_size: got {}, expected {}".format( + hidden_label, hx.size(1), self.hidden_size)) + + def reset_parameters(self): + stdv = 1.0 / math.sqrt(self.hidden_size) + for weight in self.parameters(): + init.uniform_(weight, -stdv, stdv) + + +
    [docs]class RNNCell(RNNCellBase): + r"""An Elman RNN cell with tanh or ReLU non-linearity. + + .. math:: + + h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh}) + + If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'`` + + Inputs: input, hidden + - **input** of shape `(batch, input_size)`: tensor containing input features + - **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden + state for each element in the batch. + Defaults to zero if not provided. + + Outputs: h' + - **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state + for each element in the batch + + Shape: + - Input1: :math:`(N, H_{in})` tensor containing input features where + :math:`H_{in}` = `input_size` + - Input2: :math:`(N, H_{out})` tensor containing the initial hidden + state for each element in the batch where :math:`H_{out}` = `hidden_size` + Defaults to zero if not provided. + - Output: :math:`(N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih: the learnable input-hidden weights, of shape + `(hidden_size, input_size)` + weight_hh: the learnable hidden-hidden weights, of shape + `(hidden_size, hidden_size)` + bias_ih: the learnable input-hidden bias, of shape `(hidden_size)` + bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + Examples:: + + >>> rnn = nn.RNNCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + hx = rnn(input[i], hx) + output.append(hx) + """ + __constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity'] + + def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh"): + super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1) + self.nonlinearity = nonlinearity + + def forward(self, input, hx=None): + # type: (Tensor, Optional[Tensor]) -> Tensor + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + if self.nonlinearity == "tanh": + ret = _VF.rnn_tanh_cell( + input, hx, + self.weight_ih, self.weight_hh, + self.bias_ih, self.bias_hh, + ) + elif self.nonlinearity == "relu": + ret = _VF.rnn_relu_cell( + input, hx, + self.weight_ih, self.weight_hh, + self.bias_ih, self.bias_hh, + ) + else: + ret = input # TODO: remove when jit supports exception flow + raise RuntimeError( + "Unknown nonlinearity: {}".format(self.nonlinearity)) + return ret
    + + +
    [docs]class LSTMCell(RNNCellBase): + r"""A long short-term memory (LSTM) cell. + + .. math:: + + \begin{array}{ll} + i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ + f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\ + g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\ + o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\ + c' = f * c + i * g \\ + h' = o * \tanh(c') \\ + \end{array} + + where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + bias: If ``False``, then the layer does not use bias weights `b_ih` and + `b_hh`. Default: ``True`` + + Inputs: input, (h_0, c_0) + - **input** of shape `(batch, input_size)`: tensor containing input features + - **h_0** of shape `(batch, hidden_size)`: tensor containing the initial hidden + state for each element in the batch. + - **c_0** of shape `(batch, hidden_size)`: tensor containing the initial cell state + for each element in the batch. + + If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero. + + Outputs: (h_1, c_1) + - **h_1** of shape `(batch, hidden_size)`: tensor containing the next hidden state + for each element in the batch + - **c_1** of shape `(batch, hidden_size)`: tensor containing the next cell state + for each element in the batch + + Attributes: + weight_ih: the learnable input-hidden weights, of shape + `(4*hidden_size, input_size)` + weight_hh: the learnable hidden-hidden weights, of shape + `(4*hidden_size, hidden_size)` + bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)` + bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + Examples:: + + >>> rnn = nn.LSTMCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> cx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + hx, cx = rnn(input[i], (hx, cx)) + output.append(hx) + """ + + def __init__(self, input_size, hidden_size, bias=True): + super(LSTMCell, self).__init__(input_size, hidden_size, bias, num_chunks=4) + + def forward(self, input, hx=None): + # type: (Tensor, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor] + self.check_forward_input(input) + if hx is None: + zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + self.check_forward_hidden(input, hx[0], '[0]') + self.check_forward_hidden(input, hx[1], '[1]') + return _VF.lstm_cell( + input, hx, + self.weight_ih, self.weight_hh, + self.bias_ih, self.bias_hh, + )
    + + +
    [docs]class GRUCell(RNNCellBase): + r"""A gated recurrent unit (GRU) cell + + .. math:: + + \begin{array}{ll} + r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\ + z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\ + n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\ + h' = (1 - z) * n + z * h + \end{array} + + where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + bias: If ``False``, then the layer does not use bias weights `b_ih` and + `b_hh`. Default: ``True`` + + Inputs: input, hidden + - **input** of shape `(batch, input_size)`: tensor containing input features + - **hidden** of shape `(batch, hidden_size)`: tensor containing the initial hidden + state for each element in the batch. + Defaults to zero if not provided. + + Outputs: h' + - **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state + for each element in the batch + + Shape: + - Input1: :math:`(N, H_{in})` tensor containing input features where + :math:`H_{in}` = `input_size` + - Input2: :math:`(N, H_{out})` tensor containing the initial hidden + state for each element in the batch where :math:`H_{out}` = `hidden_size` + Defaults to zero if not provided. + - Output: :math:`(N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih: the learnable input-hidden weights, of shape + `(3*hidden_size, input_size)` + weight_hh: the learnable hidden-hidden weights, of shape + `(3*hidden_size, hidden_size)` + bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)` + bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + Examples:: + + >>> rnn = nn.GRUCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + hx = rnn(input[i], hx) + output.append(hx) + """ + + def __init__(self, input_size, hidden_size, bias=True): + super(GRUCell, self).__init__(input_size, hidden_size, bias, num_chunks=3) + + def forward(self, input, hx=None): + # type: (Tensor, Optional[Tensor]) -> Tensor + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + return _VF.gru_cell( + input, hx, + self.weight_ih, self.weight_hh, + self.bias_ih, self.bias_hh, + )
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/sparse.html b/docs/stable/_modules/torch/nn/modules/sparse.html new file mode 100644 index 000000000000..919ca8271f2a --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/sparse.html @@ -0,0 +1,848 @@ + + + + + + + + + + + + torch.nn.modules.sparse — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.sparse

    +import torch
    +from torch.nn.parameter import Parameter
    +
    +from .module import Module
    +from .. import functional as F
    +from .. import init
    +
    +
    +
    [docs]class Embedding(Module): + r"""A simple lookup table that stores embeddings of a fixed dictionary and size. + + This module is often used to store word embeddings and retrieve them using indices. + The input to the module is a list of indices, and the output is the corresponding + word embeddings. + + Args: + num_embeddings (int): size of the dictionary of embeddings + embedding_dim (int): the size of each embedding vector + padding_idx (int, optional): If given, pads the output with the embedding vector at :attr:`padding_idx` + (initialized to zeros) whenever it encounters the index. + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. + scale_grad_by_freq (boolean, optional): If given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. + See Notes for more details regarding sparse gradients. + + Attributes: + weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) + initialized from :math:`\mathcal{N}(0, 1)` + + Shape: + - Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract + - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}` + + .. note:: + Keep in mind that only a limited number of optimizers support + sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`), + :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`) + + .. note:: + With :attr:`padding_idx` set, the embedding vector at + :attr:`padding_idx` is initialized to all zeros. However, note that this + vector can be modified afterwards, e.g., using a customized + initialization method, and thus changing the vector used to pad the + output. The gradient for this vector from :class:`~torch.nn.Embedding` + is always zero. + + Examples:: + + >>> # an Embedding module containing 10 tensors of size 3 + >>> embedding = nn.Embedding(10, 3) + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]]) + >>> embedding(input) + tensor([[[-0.0251, -1.6902, 0.7172], + [-0.6431, 0.0748, 0.6969], + [ 1.4970, 1.3448, -0.9685], + [-0.3677, -2.7265, -0.1685]], + + [[ 1.4970, 1.3448, -0.9685], + [ 0.4362, -0.4004, 0.9400], + [-0.6431, 0.0748, 0.6969], + [ 0.9124, -2.3616, 1.1151]]]) + + + >>> # example with padding_idx + >>> embedding = nn.Embedding(10, 3, padding_idx=0) + >>> input = torch.LongTensor([[0,2,0,5]]) + >>> embedding(input) + tensor([[[ 0.0000, 0.0000, 0.0000], + [ 0.1535, -2.0309, 0.9315], + [ 0.0000, 0.0000, 0.0000], + [-0.1655, 0.9897, 0.0635]]]) + """ + __constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm', + 'norm_type', 'scale_grad_by_freq', 'sparse'] + + def __init__(self, num_embeddings, embedding_dim, padding_idx=None, + max_norm=None, norm_type=2., scale_grad_by_freq=False, + sparse=False, _weight=None): + super(Embedding, self).__init__() + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + if padding_idx is not None: + if padding_idx > 0: + assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings' + elif padding_idx < 0: + assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings' + padding_idx = self.num_embeddings + padding_idx + self.padding_idx = padding_idx + self.max_norm = max_norm + self.norm_type = norm_type + self.scale_grad_by_freq = scale_grad_by_freq + if _weight is None: + self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim)) + self.reset_parameters() + else: + assert list(_weight.shape) == [num_embeddings, embedding_dim], \ + 'Shape of weight does not match num_embeddings and embedding_dim' + self.weight = Parameter(_weight) + self.sparse = sparse + + def reset_parameters(self): + init.normal_(self.weight) + if self.padding_idx is not None: + with torch.no_grad(): + self.weight[self.padding_idx].fill_(0) + + def forward(self, input): + return F.embedding( + input, self.weight, self.padding_idx, self.max_norm, + self.norm_type, self.scale_grad_by_freq, self.sparse) + + def extra_repr(self): + s = '{num_embeddings}, {embedding_dim}' + if self.padding_idx is not None: + s += ', padding_idx={padding_idx}' + if self.max_norm is not None: + s += ', max_norm={max_norm}' + if self.norm_type != 2: + s += ', norm_type={norm_type}' + if self.scale_grad_by_freq is not False: + s += ', scale_grad_by_freq={scale_grad_by_freq}' + if self.sparse is not False: + s += ', sparse=True' + return s.format(**self.__dict__) + +
    [docs] @classmethod + def from_pretrained(cls, embeddings, freeze=True, padding_idx=None, + max_norm=None, norm_type=2., scale_grad_by_freq=False, + sparse=False): + r"""Creates Embedding instance from given 2-dimensional FloatTensor. + + Args: + embeddings (Tensor): FloatTensor containing weights for the Embedding. + First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``. + freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process. + Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True`` + padding_idx (int, optional): See module initialization documentation. + max_norm (float, optional): See module initialization documentation. + norm_type (float, optional): See module initialization documentation. Default ``2``. + scale_grad_by_freq (boolean, optional): See module initialization documentation. Default ``False``. + sparse (bool, optional): See module initialization documentation. + + Examples:: + + >>> # FloatTensor containing pretrained weights + >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) + >>> embedding = nn.Embedding.from_pretrained(weight) + >>> # Get embeddings for index 1 + >>> input = torch.LongTensor([1]) + >>> embedding(input) + tensor([[ 4.0000, 5.1000, 6.3000]]) + """ + assert embeddings.dim() == 2, \ + 'Embeddings parameter is expected to be 2-dimensional' + rows, cols = embeddings.shape + embedding = cls( + num_embeddings=rows, + embedding_dim=cols, + _weight=embeddings, + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + scale_grad_by_freq=scale_grad_by_freq, + sparse=sparse) + embedding.weight.requires_grad = not freeze + return embedding
    + + +
    [docs]class EmbeddingBag(Module): + r"""Computes sums or means of 'bags' of embeddings, without instantiating the + intermediate embeddings. + + For bags of constant length and no :attr:`per_sample_weights`, this class + + * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``, + * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``, + * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``. + + However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these + operations. + + EmbeddingBag also supports per-sample weights as an argument to the forward + pass. This scales the output of the Embedding before performing a weighted + reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the + only supported ``mode`` is ``"sum"``, which computes a weighted sum according to + :attr:`per_sample_weights`. + + Args: + num_embeddings (int): size of the dictionary of embeddings + embedding_dim (int): the size of each embedding vector + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. + scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + Note: this option is not supported when ``mode="max"``. + mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. + ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights` + into consideration. ``"mean"`` computes the average of the values + in the bag, ``"max"`` computes the max value over each bag. + Default: ``"mean"`` + sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See + Notes for more details regarding sparse gradients. Note: this option is not + supported when ``mode="max"``. + + Attributes: + weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)` + initialized from :math:`\mathcal{N}(0, 1)`. + + Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and + :attr:`per_index_weights` (Tensor, optional) + + - If :attr:`input` is 2D of shape `(B, N)`, + + it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and + this will return ``B`` values aggregated in a way depending on the :attr:`mode`. + :attr:`offsets` is ignored and required to be ``None`` in this case. + + - If :attr:`input` is 1D of shape `(N)`, + + it will be treated as a concatenation of multiple bags (sequences). + :attr:`offsets` is required to be a 1D tensor containing the + starting index positions of each bag in :attr:`input`. Therefore, + for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as + having ``B`` bags. Empty bags (i.e., having 0-length) will have + returned vectors filled by zeros. + + per_sample_weights (Tensor, optional): a tensor of float / double weights, or None + to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights` + must have exactly the same shape as input and is treated as having the same + :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``. + + + Output shape: `(B, embedding_dim)` + + Examples:: + + >>> # an Embedding module containing 10 tensors of size 3 + >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum') + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.LongTensor([1,2,4,5,4,3,2,9]) + >>> offsets = torch.LongTensor([0,4]) + >>> embedding_sum(input, offsets) + tensor([[-0.8861, -5.4350, -0.0523], + [ 1.1306, -2.5798, -1.0044]]) + """ + __constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type', + 'scale_grad_by_freq', 'mode', 'sparse'] + + def __init__(self, num_embeddings, embedding_dim, + max_norm=None, norm_type=2., scale_grad_by_freq=False, + mode='mean', sparse=False, _weight=None): + super(EmbeddingBag, self).__init__() + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + self.max_norm = max_norm + self.norm_type = norm_type + self.scale_grad_by_freq = scale_grad_by_freq + if _weight is None: + self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim)) + self.reset_parameters() + else: + assert list(_weight.shape) == [num_embeddings, embedding_dim], \ + 'Shape of weight does not match num_embeddings and embedding_dim' + self.weight = Parameter(_weight) + self.mode = mode + self.sparse = sparse + + def reset_parameters(self): + init.normal_(self.weight) + + def forward(self, input, offsets=None, per_sample_weights=None): + # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor + return F.embedding_bag(input, self.weight, offsets, + self.max_norm, self.norm_type, + self.scale_grad_by_freq, self.mode, self.sparse, + per_sample_weights) + + def extra_repr(self): + s = '{num_embeddings}, {embedding_dim}' + if self.max_norm is not None: + s += ', max_norm={max_norm}' + if self.norm_type != 2: + s += ', norm_type={norm_type}' + if self.scale_grad_by_freq is not False: + s += ', scale_grad_by_freq={scale_grad_by_freq}' + s += ', mode={mode}' + return s.format(**self.__dict__) + +
    [docs] @classmethod + def from_pretrained(cls, embeddings, freeze=True, max_norm=None, + norm_type=2., scale_grad_by_freq=False, + mode='mean', sparse=False): + r"""Creates EmbeddingBag instance from given 2-dimensional FloatTensor. + + Args: + embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag. + First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'. + freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process. + Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True`` + max_norm (float, optional): See module initialization documentation. Default: ``None`` + norm_type (float, optional): See module initialization documentation. Default ``2``. + scale_grad_by_freq (boolean, optional): See module initialization documentation. Default ``False``. + mode (string, optional): See module initialization documentation. Default: ``"mean"`` + sparse (bool, optional): See module initialization documentation. Default: ``False``. + + Examples:: + + >>> # FloatTensor containing pretrained weights + >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]]) + >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight) + >>> # Get embeddings for index 1 + >>> input = torch.LongTensor([[1, 0]]) + >>> embeddingbag(input) + tensor([[ 2.5000, 3.7000, 4.6500]]) + """ + assert embeddings.dim() == 2, \ + 'Embeddings parameter is expected to be 2-dimensional' + rows, cols = embeddings.shape + embeddingbag = cls( + num_embeddings=rows, + embedding_dim=cols, + _weight=embeddings, + max_norm=max_norm, + norm_type=norm_type, + scale_grad_by_freq=scale_grad_by_freq, + mode=mode, + sparse=sparse) + embeddingbag.weight.requires_grad = not freeze + return embeddingbag
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/transformer.html b/docs/stable/_modules/torch/nn/modules/transformer.html new file mode 100644 index 000000000000..cc9f22157632 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/transformer.html @@ -0,0 +1,853 @@ + + + + + + + + + + + + torch.nn.modules.transformer — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.transformer

    +import torch
    +import copy
    +from .. import functional as F
    +from .module import Module
    +from .activation import MultiheadAttention
    +from .container import ModuleList
    +from ..init import xavier_uniform_
    +from .dropout import Dropout
    +from .linear import Linear
    +from .normalization import LayerNorm
    +
    +
    [docs]class Transformer(Module): + r"""A transformer model. User is able to modify the attributes as needed. The architechture + is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, + Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and + Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information + Processing Systems, pages 6000-6010. + + Args: + d_model: the number of expected features in the encoder/decoder inputs (default=512). + nhead: the number of heads in the multiheadattention models (default=8). + num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). + num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + custom_encoder: custom encoder (default=None). + custom_decoder: custom decoder (default=None). + + Examples:: + >>> transformer_model = nn.Transformer(src_vocab, tgt_vocab) + >>> transformer_model = nn.Transformer(src_vocab, tgt_vocab, nhead=16, num_encoder_layers=12) + """ + + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + custom_encoder=None, custom_decoder=None): + super(Transformer, self).__init__() + + if custom_encoder is not None: + self.encoder = custom_encoder + else: + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout) + encoder_norm = LayerNorm(d_model) + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + if custom_decoder is not None: + self.decoder = custom_decoder + else: + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout) + decoder_norm = LayerNorm(d_model) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + +
    [docs] def forward(self, src, tgt, src_mask=None, tgt_mask=None, + memory_mask=None, src_key_padding_mask=None, + tgt_key_padding_mask=None, memory_key_padding_mask=None): + r"""Take in and process masked source/target sequences. + + Args: + src: the sequence to the encoder (required). + tgt: the sequence to the decoder (required). + src_mask: the additive mask for the src sequence (optional). + tgt_mask: the additive mask for the tgt sequence (optional). + memory_mask: the additive mask for the encoder output (optional). + src_key_padding_mask: the ByteTensor mask for src keys per batch (optional). + tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional). + memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional). + + Shape: + - src: :math:`(S, N, E)`. + - tgt: :math:`(T, N, E)`. + - src_mask: :math:`(S, S)`. + - tgt_mask: :math:`(T, T)`. + - memory_mask: :math:`(T, S)`. + - src_key_padding_mask: :math:`(N, S)`. + - tgt_key_padding_mask: :math:`(N, T)`. + - memory_key_padding_mask: :math:`(N, S)`. + + Note: [src/tgt/memory]_mask should be filled with + float('-inf') for the masked positions and float(0.0) else. These masks + ensure that predictions for position i depend only on the unmasked positions + j and are applied identically for each sequence in a batch. + [src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions + that should be masked with float('-inf') and False values will be unchanged. + This mask ensures that no information will be taken from position i if + it is masked, and has a separate mask for each sequence in a batch. + + - output: :math:`(T, N, E)`. + + Note: Due to the multi-head attention architecture in the transformer model, + the output sequence length of a transformer is same as the input sequence + (i.e. target) length of the decode. + + where S is the source sequence length, T is the target sequence length, N is the + batch size, E is the feature number + + Examples: + >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask) + """ + + if src.size(1) != tgt.size(1): + raise RuntimeError("the batch number of src and tgt must be equal") + + if src.size(2) != self.d_model or tgt.size(2) != self.d_model: + raise RuntimeError("the feature number of src and tgt must be equal to d_model") + + memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask) + output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask) + return output
    + +
    [docs] def generate_square_subsequent_mask(self, sz): + r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf'). + Unmasked positions are filled with float(0.0). + """ + mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) + mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) + return mask
    + + def _reset_parameters(self): + r"""Initiate parameters in the transformer model.""" + + for p in self.parameters(): + if p.dim() > 1: + xavier_uniform_(p)
    + + +
    [docs]class TransformerEncoder(Module): + r"""TransformerEncoder is a stack of N encoder layers + + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + + Examples:: + >>> encoder_layer = nn.TransformerEncoderLayer(d_model, nhead) + >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers) + """ + + def __init__(self, encoder_layer, num_layers, norm=None): + super(TransformerEncoder, self).__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + +
    [docs] def forward(self, src, mask=None, src_key_padding_mask=None): + r"""Pass the input through the endocder layers in turn. + + Args: + src: the sequnce to the encoder (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + see the docs in Transformer class. + """ + output = src + + for i in range(self.num_layers): + output = self.layers[i](output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask) + + if self.norm: + output = self.norm(output) + + return output
    + + +
    [docs]class TransformerDecoder(Module): + r"""TransformerDecoder is a stack of N decoder layers + + Args: + decoder_layer: an instance of the TransformerDecoderLayer() class (required). + num_layers: the number of sub-decoder-layers in the decoder (required). + norm: the layer normalization component (optional). + + Examples:: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model, nhead) + >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers) + """ + + def __init__(self, decoder_layer, num_layers, norm=None): + super(TransformerDecoder, self).__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + +
    [docs] def forward(self, tgt, memory, tgt_mask=None, + memory_mask=None, tgt_key_padding_mask=None, + memory_key_padding_mask=None): + r"""Pass the inputs (and mask) through the decoder layer in turn. + + Args: + tgt: the sequence to the decoder (required). + memory: the sequnce from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + + Shape: + see the docs in Transformer class. + """ + output = tgt + + for i in range(self.num_layers): + output = self.layers[i](output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask) + + if self.norm: + output = self.norm(output) + + return output
    + +
    [docs]class TransformerEncoderLayer(Module): + r"""TransformerEncoderLayer is made up of self-attn and feedforward network. + This standard encoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + + Examples:: + >>> encoder_layer = nn.TransformerEncoderLayer(d_model, nhead) + """ + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1): + super(TransformerEncoderLayer, self).__init__() + self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = Linear(d_model, dim_feedforward) + self.dropout = Dropout(dropout) + self.linear2 = Linear(dim_feedforward, d_model) + + self.norm1 = LayerNorm(d_model) + self.norm2 = LayerNorm(d_model) + self.dropout1 = Dropout(dropout) + self.dropout2 = Dropout(dropout) + +
    [docs] def forward(self, src, src_mask=None, src_key_padding_mask=None): + r"""Pass the input through the endocder layer. + + Args: + src: the sequnce to the encoder layer (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + see the docs in Transformer class. + """ + src2 = self.self_attn(src, src, src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(F.relu(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src
    + + +
    [docs]class TransformerDecoderLayer(Module): + r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. + This standard decoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + + Examples:: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model, nhead) + """ + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1): + super(TransformerDecoderLayer, self).__init__() + self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = Linear(d_model, dim_feedforward) + self.dropout = Dropout(dropout) + self.linear2 = Linear(dim_feedforward, d_model) + + self.norm1 = LayerNorm(d_model) + self.norm2 = LayerNorm(d_model) + self.norm3 = LayerNorm(d_model) + self.dropout1 = Dropout(dropout) + self.dropout2 = Dropout(dropout) + self.dropout3 = Dropout(dropout) + +
    [docs] def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, + tgt_key_padding_mask=None, memory_key_padding_mask=None): + r"""Pass the inputs (and mask) through the decoder layer. + + Args: + tgt: the sequence to the decoder layer (required). + memory: the sequnce from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + + Shape: + see the docs in Transformer class. + """ + tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt
    + + +def _get_clones(module, N): + return ModuleList([copy.deepcopy(module) for i in range(N)]) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/modules/upsampling.html b/docs/stable/_modules/torch/nn/modules/upsampling.html new file mode 100644 index 000000000000..08ffb9d605e4 --- /dev/null +++ b/docs/stable/_modules/torch/nn/modules/upsampling.html @@ -0,0 +1,744 @@ + + + + + + + + + + + + torch.nn.modules.upsampling — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.modules.upsampling

    +from .module import Module
    +from .. import functional as F
    +
    +
    +
    [docs]class Upsample(Module): + r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. + + The input data is assumed to be of the form + `minibatch x channels x [optional depth] x [optional height] x width`. + Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor. + + The algorithms available for upsampling are nearest neighbor and linear, + bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor, + respectively. + + One can either give a :attr:`scale_factor` or the target output :attr:`size` to + calculate the output size. (You cannot give both, as it is ambiguous) + + Args: + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional): + output spatial sizes + scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional): + multiplier for spatial size. Has to match input size if it is a tuple. + mode (str, optional): the upsampling algorithm: one of ``'nearest'``, + ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``. + Default: ``'nearest'`` + align_corners (bool, optional): if ``True``, the corner pixels of the input + and output tensors are aligned, and thus preserving the values at + those pixels. This only has effect when :attr:`mode` is + ``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False`` + + Shape: + - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})` + - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})` + or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where + + .. math:: + D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + .. warning:: + With ``align_corners = True``, the linearly interpolating modes + (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally + align the output and input pixels, and thus the output values can depend + on the input size. This was the default behavior for these modes up to + version 0.3.1. Since then, the default behavior is + ``align_corners = False``. See below for concrete examples on how this + affects the outputs. + + .. note:: + If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`. + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[ 1., 2.], + [ 3., 4.]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='nearest') + >>> m(input) + tensor([[[[ 1., 1., 2., 2.], + [ 1., 1., 2., 2.], + [ 3., 3., 4., 4.], + [ 3., 3., 4., 4.]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False + >>> m(input) + tensor([[[[ 1.0000, 1.2500, 1.7500, 2.0000], + [ 1.5000, 1.7500, 2.2500, 2.5000], + [ 2.5000, 2.7500, 3.2500, 3.5000], + [ 3.0000, 3.2500, 3.7500, 4.0000]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + >>> m(input) + tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000], + [ 1.6667, 2.0000, 2.3333, 2.6667], + [ 2.3333, 2.6667, 3.0000, 3.3333], + [ 3.0000, 3.3333, 3.6667, 4.0000]]]]) + + >>> # Try scaling the same data in a larger tensor + >>> + >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3) + >>> input_3x3[:, :, :2, :2].copy_(input) + tensor([[[[ 1., 2.], + [ 3., 4.]]]]) + >>> input_3x3 + tensor([[[[ 1., 2., 0.], + [ 3., 4., 0.], + [ 0., 0., 0.]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False + >>> # Notice that values in top left corner are the same with the small input (except at boundary) + >>> m(input_3x3) + tensor([[[[ 1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000], + [ 1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000], + [ 2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000], + [ 2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000], + [ 0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + + >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + >>> # Notice that values in top left corner are now changed + >>> m(input_3x3) + tensor([[[[ 1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000], + [ 1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000], + [ 2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000], + [ 2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000], + [ 1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]]) + """ + __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name'] + + def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None): + super(Upsample, self).__init__() + self.name = type(self).__name__ + self.size = size + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.align_corners = align_corners + + def forward(self, input): + return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners) + + def extra_repr(self): + if self.scale_factor is not None: + info = 'scale_factor=' + str(self.scale_factor) + else: + info = 'size=' + str(self.size) + info += ', mode=' + self.mode + return info
    + + +
    [docs]class UpsamplingNearest2d(Upsample): + r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input + channels. + + To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` + as it's constructor argument. + + When :attr:`size` is given, it is the output size of the image `(h, w)`. + + Args: + size (int or Tuple[int, int], optional): output spatial sizes + scale_factor (float or Tuple[float, float], optional): multiplier for + spatial size. + + .. warning:: + This class is deprecated in favor of :func:`~nn.functional.interpolate`. + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[ 1., 2.], + [ 3., 4.]]]]) + + >>> m = nn.UpsamplingNearest2d(scale_factor=2) + >>> m(input) + tensor([[[[ 1., 1., 2., 2.], + [ 1., 1., 2., 2.], + [ 3., 3., 4., 4.], + [ 3., 3., 4., 4.]]]]) + """ + def __init__(self, size=None, scale_factor=None): + super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode='nearest')
    + + +
    [docs]class UpsamplingBilinear2d(Upsample): + r"""Applies a 2D bilinear upsampling to an input signal composed of several input + channels. + + To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` + as it's constructor argument. + + When :attr:`size` is given, it is the output size of the image `(h, w)`. + + Args: + size (int or Tuple[int, int], optional): output spatial sizes + scale_factor (float or Tuple[float, float], optional): multiplier for + spatial size. + + .. warning:: + This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is + equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``. + + Shape: + - Input: :math:`(N, C, H_{in}, W_{in})` + - Output: :math:`(N, C, H_{out}, W_{out})` where + + .. math:: + H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + + .. math:: + W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + + Examples:: + + >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2) + >>> input + tensor([[[[ 1., 2.], + [ 3., 4.]]]]) + + >>> m = nn.UpsamplingBilinear2d(scale_factor=2) + >>> m(input) + tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000], + [ 1.6667, 2.0000, 2.3333, 2.6667], + [ 2.3333, 2.6667, 3.0000, 3.3333], + [ 3.0000, 3.3333, 3.6667, 4.0000]]]]) + """ + def __init__(self, size=None, scale_factor=None): + super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/parallel/data_parallel.html b/docs/stable/_modules/torch/nn/parallel/data_parallel.html new file mode 100644 index 000000000000..6b357c5ffb6a --- /dev/null +++ b/docs/stable/_modules/torch/nn/parallel/data_parallel.html @@ -0,0 +1,722 @@ + + + + + + + + + + + + torch.nn.parallel.data_parallel — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.parallel.data_parallel

    +import operator
    +import torch
    +import warnings
    +from itertools import chain
    +from ..modules import Module
    +from .scatter_gather import scatter_kwargs, gather
    +from .replicate import replicate
    +from .parallel_apply import parallel_apply
    +from torch.cuda._utils import _get_device_index
    +
    +
    +def _check_balance(device_ids):
    +    imbalance_warn = """
    +    There is an imbalance between your GPUs. You may want to exclude GPU {} which
    +    has less than 75% of the memory or cores of GPU {}. You can do so by setting
    +    the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
    +    environment variable."""
    +    device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
    +    dev_props = [torch.cuda.get_device_properties(i) for i in device_ids]
    +
    +    def warn_imbalance(get_prop):
    +        values = [get_prop(props) for props in dev_props]
    +        min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
    +        max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
    +        if min_val / max_val < 0.75:
    +            warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
    +            return True
    +        return False
    +
    +    if warn_imbalance(lambda props: props.total_memory):
    +        return
    +    if warn_imbalance(lambda props: props.multi_processor_count):
    +        return
    +
    +
    +
    [docs]class DataParallel(Module): + r"""Implements data parallelism at the module level. + + This container parallelizes the application of the given :attr:`module` by + splitting the input across the specified devices by chunking in the batch + dimension (other objects will be copied once per device). In the forward + pass, the module is replicated on each device, and each replica handles a + portion of the input. During the backwards pass, gradients from each replica + are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + See also: :ref:`cuda-nn-dataparallel-instead` + + Arbitrary positional and keyword inputs are allowed to be passed into + DataParallel but some types are specially handled. tensors will be + **scattered** on dim specified (default 0). tuple, list and dict types will + be shallow copied. The other types will be shared among different threads + and can be corrupted if written to in the model's forward pass. + + The parallelized :attr:`module` must have its parameters and buffers on + ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel` + module. + + .. warning:: + In each forward, :attr:`module` is **replicated** on each device, so any + updates to the running module in ``forward`` will be lost. For example, + if :attr:`module` has a counter attribute that is incremented in each + ``forward``, it will always stay at the initial value because the update + is done on the replicas which are destroyed after ``forward``. However, + :class:`~torch.nn.DataParallel` guarantees that the replica on + ``device[0]`` will have its parameters and buffers sharing storage with + the base parallelized :attr:`module`. So **in-place** updates to the + parameters or buffers on ``device[0]`` will be recorded. E.g., + :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm` + rely on this behavior to update the buffers. + + .. warning:: + Forward and backward hooks defined on :attr:`module` and its submodules + will be invoked ``len(device_ids)`` times, each with inputs located on + a particular device. Particularly, the hooks are only guaranteed to be + executed in correct order with respect to operations on corresponding + devices. For example, it is not guaranteed that hooks set via + :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before + `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but + that each such hook be executed before the corresponding + :meth:`~torch.nn.Module.forward` call of that device. + + .. warning:: + When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in + :func:`forward`, this wrapper will return a vector of length equal to + number of devices used in data parallelism, containing the result from + each device. + + .. note:: + There is a subtlety in using the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for + details. + + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices (default: all devices) + output_device (int or torch.device): device location of output (default: device_ids[0]) + + Attributes: + module (Module): the module to be parallelized + + Example:: + + >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + # TODO: update notes/cuda.rst when this class handles 8+ GPUs well + + def __init__(self, module, device_ids=None, output_device=None, dim=0): + super(DataParallel, self).__init__() + + if not torch.cuda.is_available(): + self.module = module + self.device_ids = [] + return + + if device_ids is None: + device_ids = list(range(torch.cuda.device_count())) + if output_device is None: + output_device = device_ids[0] + + self.dim = dim + self.module = module + self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids)) + self.output_device = _get_device_index(output_device, True) + self.src_device_obj = torch.device("cuda:{}".format(self.device_ids[0])) + + _check_balance(self.device_ids) + + if len(self.device_ids) == 1: + self.module.cuda(device_ids[0]) + + def forward(self, *inputs, **kwargs): + if not self.device_ids: + return self.module(*inputs, **kwargs) + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + "on device {} (device_ids[0]) but found one of " + "them on device: {}".format(self.src_device_obj, t.device)) + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + return self.module(*inputs[0], **kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = self.parallel_apply(replicas, inputs, kwargs) + return self.gather(outputs, self.output_device) + + def replicate(self, module, device_ids): + return replicate(module, device_ids, not torch.is_grad_enabled()) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def parallel_apply(self, replicas, inputs, kwargs): + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def gather(self, outputs, output_device): + return gather(outputs, output_device, dim=self.dim)
    + + +
    [docs]def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None): + r"""Evaluates module(input) in parallel across the GPUs given in device_ids. + + This is the functional version of the DataParallel module. + + Args: + module (Module): the module to evaluate in parallel + inputs (Tensor): inputs to the module + device_ids (list of int or torch.device): GPU ids on which to replicate module + output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU. + (default: device_ids[0]) + Returns: + a Tensor containing the result of module(input) located on + output_device + """ + if not isinstance(inputs, tuple): + inputs = (inputs,) + + if device_ids is None: + device_ids = list(range(torch.cuda.device_count())) + + if output_device is None: + output_device = device_ids[0] + + device_ids = list(map(lambda x: _get_device_index(x, True), device_ids)) + output_device = _get_device_index(output_device, True) + src_device_obj = torch.device("cuda:{}".format(device_ids[0])) + + for t in chain(module.parameters(), module.buffers()): + if t.device != src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + "on device {} (device_ids[0]) but found one of " + "them on device: {}".format(src_device_obj, t.device)) + + inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) + if len(device_ids) == 1: + return module(*inputs[0], **module_kwargs[0]) + used_device_ids = device_ids[:len(inputs)] + replicas = replicate(module, used_device_ids) + outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) + return gather(outputs, output_device, dim)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/parallel/distributed.html b/docs/stable/_modules/torch/nn/parallel/distributed.html new file mode 100644 index 000000000000..5890abf783cc --- /dev/null +++ b/docs/stable/_modules/torch/nn/parallel/distributed.html @@ -0,0 +1,1044 @@ + + + + + + + + + + + + torch.nn.parallel.distributed — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.parallel.distributed

    +from contextlib import contextmanager
    +import copy
    +import itertools
    +
    +import torch
    +
    +import torch.cuda.comm
    +import torch.distributed as dist
    +
    +if dist.is_available():
    +    from torch.distributed.distributed_c10d import _get_default_group
    +
    +from ..modules import Module
    +from .replicate import replicate
    +from .scatter_gather import scatter_kwargs, gather
    +from .parallel_apply import parallel_apply
    +from torch.cuda._utils import _get_device_index
    +
    +
    +def _find_tensors(obj):
    +    r"""
    +    Recursively find all tensors contained in the specified object.
    +    """
    +    if isinstance(obj, torch.Tensor):
    +        return [obj]
    +    if isinstance(obj, (list, tuple)):
    +        return itertools.chain(*map(_find_tensors, obj))
    +    if isinstance(obj, dict):
    +        return itertools.chain(*map(_find_tensors, obj.values()))
    +    return []
    +
    +
    +
    [docs]class DistributedDataParallel(Module): + r"""Implements distributed data parallelism that is based on + ``torch.distributed`` package at the module level. + + This container parallelizes the application of the given module by + splitting the input across the specified devices by chunking in the batch + dimension. The module is replicated on each machine and each device, and + each such replica handles a portion of the input. During the backwards + pass, gradients from each node are averaged. + + The batch size should be larger than the number of GPUs used locally. + + See also: :ref:`distributed-basics` and :ref:`cuda-nn-dataparallel-instead`. + The same constraints on input as in :class:`torch.nn.DataParallel` apply. + + Creation of this class requires that ``torch.distributed`` to be already + initialized, by calling :func:`torch.distributed.init_process_group`. + + ``DistributedDataParallel`` can be used in the following two ways: + + (1) Single-Process Multi-GPU + + In this case, a single process will be + spawned on each host/node and each process will operate on all the GPUs + of the node where it's running. To use ``DistributedDataParallel`` in + this way, you can simply construct the model as the following: + + >>> torch.distributed.init_process_group(backend="nccl") + >>> model = DistributedDataParallel(model) # device_ids will include all GPU devices by default + + (2) Multi-Process Single-GPU + + This is the highly recommended way to use ``DistributedDataParallel``, with + multiple processes, each of which operates on a single GPU. This is + currently the fastest approach to do data parallel training using PyTorch + and applies to both single-node(multi-GPU) and multi-node data + parallel training. It is proven to be significantly faster than + :class:`torch.nn.DataParallel` for single-node multi-GPU data + parallel training. + + Here is how to use it: on each host with N GPUs, you should spawn up N + processes, while ensuring that each process individually works on a single GPU + from 0 to N-1. Therefore, it is your job to ensure that your training script + operates on a single given GPU by calling: + + >>> torch.cuda.set_device(i) + + where i is from 0 to N-1. In each process, you should refer the following + to construct this module: + + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i) + + In order to spawn up multiple processes per node, you can use either + ``torch.distributed.launch`` or ``torch.multiprocessing.spawn`` + + .. note:: ``nccl`` backend is currently the fastest and + highly recommended backend to be used with Multi-Process Single-GPU + distributed training and this applies to both single-node and multi-node + distributed training + + .. note:: This module also supports mixed-precision distributed training. + This means that your model can have different types of parameters such + as mixed types of fp16 and fp32, the gradient reduction on these + mixed types of parameters will just work fine. + Also note that ``nccl`` backend is currently the fastest and highly + recommended backend for fp16/fp32 mixed-precision training. + + .. note:: If you use ``torch.save`` on one process to checkpoint the module, + and ``torch.load`` on some other processes to recover it, make sure that + ``map_location`` is configured properly for every process. Without + ``map_location``, ``torch.load`` would recover the module to devices + where the module was saved from. + + .. warning:: + This module works only with the ``gloo`` and ``nccl`` backends. + + .. warning:: + Constructor, forward method, and differentiation of the output (or a + function of the output of this module) is a distributed synchronization + point. Take that into account in case different processes might be + executing different code. + + .. warning:: + This module assumes all parameters are registered in the model by the + time it is created. No parameters should be added nor removed later. + Same applies to buffers. + + .. warning:: + This module assumes all parameters are registered in the model of each + distributed processes are in the same order. The module itself will + conduct gradient all-reduction following the reverse order of the + registered parameters of the model. In other words, it is users' + responsibility to ensure that each distributed process has the exact + same model and thus the exact same parameter registration order. + + .. warning:: + This module assumes all buffers and gradients are dense. + + .. warning:: + This module doesn't work with :func:`torch.autograd.grad` (i.e. it will + only work if gradients are to be accumulated in ``.grad`` attributes of + parameters). + + .. warning:: + + If you plan on using this module with a ``nccl`` backend or a ``gloo`` + backend (that uses Infiniband), together with a DataLoader that uses + multiple workers, please change the multiprocessing start method to + ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately + Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will + likely experience deadlocks if you don't change this setting. + + .. warning:: + Forward and backward hooks defined on :attr:`module` and its submodules + won't be invoked anymore, unless the hooks are initialized in the + :meth:`forward` method. + + .. warning:: + You should never try to change your model's parameters after wrapping + up your model with DistributedDataParallel. In other words, when + wrapping up your model with DistributedDataParallel, the constructor of + DistributedDataParallel will register the additional gradient + reduction functions on all the parameters of the model itself at the + time of construction. If you change the model's parameters after + the DistributedDataParallel construction, this is not supported and + unexpected behaviors can happen, since some parameters' gradient + reduction functions might not get called. + + .. note:: + Parameters are never broadcast between processes. The module performs + an all-reduce step on gradients and assumes that they will be modified + by the optimizer in all processes in the same way. Buffers + (e.g. BatchNorm stats) are broadcast from the module in process of rank + 0, to all other replicas in the system in every iteration. + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices. This should + only be provided when the input module resides on a single + CUDA device. For single-device modules, the ``i``th + :attr:`module` replica is placed on ``device_ids[i]``. For + multi-device modules and CPU modules, device_ids must be None + or an empty list, and input data for the forward pass must be + placed on the correct device. (default: all devices for + single-device modules) + output_device (int or torch.device): device location of output for + single-device CUDA modules. For multi-device modules and + CPU modules, it must be None, and the module itself + dictates the output location. (default: device_ids[0] for + single-device modules) + broadcast_buffers (bool): flag that enables syncing (broadcasting) buffers of + the module at beginning of the forward function. + (default: ``True``) + process_group: the process group to be used for distributed data + all-reduction. If ``None``, the default process group, which + is created by ```torch.distributed.init_process_group```, + will be used. (default: ``None``) + bucket_cap_mb: DistributedDataParallel will bucket parameters into + multiple buckets so that gradient reduction of each + bucket can potentially overlap with backward computation. + :attr:`bucket_cap_mb` controls the bucket size in MegaBytes (MB) + (default: 25) + find_unused_parameters (bool): Traverse the autograd graph of all tensors + contained in the return value of the wrapped + module's ``forward`` function. + Parameters that don't receive gradients as + part of this graph are preemptively marked + as being ready to be reduced. Note that all + ``forward`` outputs that are derived from + module parameters must participate in + calculating loss and later the gradient + computation. If they don't, this wrapper will + hang waiting for autograd to produce gradients + for those parameters. Any outputs derived from + module parameters that are otherwise unused can + be detached from the autograd graph using + ``torch.Tensor.detach``. (default: ``False``) + check_reduction: when setting to ``True``, it enables DistributedDataParallel + to automatically check if the previous iteration's + backward reductions were successfully issued at the + beginning of every iteration's forward function. + You normally don't need this option enabled unless you + are observing weird behaviors such as different ranks + are getting different gradients, which should not + happen if DistributedDataParallel is correctly used. + (default: ``False``) + + Attributes: + module (Module): the module to be parallelized + + Example:: + + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> net = torch.nn.DistributedDataParallel(model, pg) + """ + def __init__(self, module, device_ids=None, + output_device=None, dim=0, broadcast_buffers=True, + process_group=None, bucket_cap_mb=25, + find_unused_parameters=False, + check_reduction=False): + + super(DistributedDataParallel, self).__init__() + + self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1 + self.is_cuda = all([p.device.type == 'cuda' for p in module.parameters()]) + + if not self.is_cuda or self.is_multi_device_module: + assert not device_ids and not output_device, ( + "DistributedDataParallel device_ids and output_device arguments " + "only work with single-device CUDA modules, but got " + "device_ids {}, output_device {}, and module parameters {}." + ).format(device_ids, output_device, {p.device for p in module.parameters()}) + + self.device_ids = None + self.output_device = None + else: + # Use all devices by default for single-device CUDA modules + if device_ids is None: + device_ids = list(range(torch.cuda.device_count())) + + self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids)) + + if output_device is None: + output_device = device_ids[0] + + self.output_device = _get_device_index(output_device, True) + + if self.is_multi_device_module: + assert self.is_cuda, ( + "DistributedDataParallel with multi-device module only works " + "with CUDA devices, but module parameters locate in {}." + ).format({p.device for p in module.parameters()}) + + if process_group is None: + self.process_group = _get_default_group() + else: + self.process_group = process_group + + self.dim = dim + self.module = module + self.broadcast_buffers = broadcast_buffers + self.find_unused_parameters = find_unused_parameters + self.require_backward_grad_sync = True + self.require_forward_param_sync = True + + if check_reduction: + # This argument is no longer used since the reducer + # will ensure reduction completes even if some parameters + # do not receive gradients. + pass + + MB = 1024 * 1024 + + # used for intra-node param sync and inter-node sync as well + self.broadcast_bucket_size = int(250 * MB) + + # reduction bucket size + self.bucket_bytes_cap = int(bucket_cap_mb * MB) + + # Sync params and buffers + module_states = list(self.module.state_dict().values()) + if len(module_states) > 0: + self._distributed_broadcast_coalesced( + module_states, + self.broadcast_bucket_size) + + self._ddp_init_helper() + + def _ddp_init_helper(self): + """ + Initialization helper function that does the following: + + (1) replicating the module from device[0] to the other devices + (2) bucketing the parameters for reductions + (3) resetting the bucketing states + (4) registering the grad hooks + (5) passing a handle of DDP to SyncBatchNorm Layer + """ + if self.device_ids and len(self.device_ids) > 1: + # only create replicas for single-device CUDA modules + # + # TODO: we don't need to replicate params in here. they're always going to + # be broadcasted using larger blocks in broadcast_coalesced, so it might be + # better to not pollute the caches with these small blocks + self._module_copies = replicate(self.module, self.device_ids, detach=True) + self._module_copies[0] = self.module + + for module_copy in self._module_copies[1:]: + for param, copy_param in zip(self.module.parameters(), module_copy.parameters()): + copy_param.requires_grad = param.requires_grad + + else: + self._module_copies = [self.module] + + self.modules_params = [list(m.parameters()) for m in self._module_copies] + self.modules_buffers = [list(m.buffers()) for m in self._module_copies] + + # Build tuple of (module, parameter) for all parameters that require grads. + modules_and_parameters = [ + [ + (module, parameter) + for module in replica.modules() + for parameter in filter( + lambda parameter: parameter.requires_grad, + module.parameters(recurse=False)) + ] for replica in self._module_copies] + + # Build list of parameters. + parameters = [ + list(parameter for _, parameter in replica) + for replica in modules_and_parameters] + + # Checks if a module will produce a sparse gradient. + def produces_sparse_gradient(module): + if isinstance(module, torch.nn.Embedding): + return module.sparse + if isinstance(module, torch.nn.EmbeddingBag): + return module.sparse + return False + + # Build list of booleans indicating whether or not to expect sparse + # gradients for the corresponding parameters. + expect_sparse_gradient = [ + list(produces_sparse_gradient(module) for module, _ in replica) + for replica in modules_and_parameters] + + # The bucket size limit is specified in the constructor. + # Additionally, we allow for a single small bucket for parameters + # that are defined first, such that their gradients don't spill into + # a much larger bucket, adding unnecessary latency after gradient + # computation finishes. Experiments showed 1MB is a reasonable value. + bucket_indices = dist._compute_bucket_assignment_by_size( + parameters[0], + [1024 * 1024, self.bucket_bytes_cap], + expect_sparse_gradient[0]) + + # Note: reverse list of buckets because we want to approximate the + # order in which their gradients are produced, and assume they + # are used in the forward pass in the order they are defined. + self.reducer = dist.Reducer( + parameters, + list(reversed(bucket_indices)), + self.process_group, + expect_sparse_gradient) + + # passing a handle to torch.nn.SyncBatchNorm layer + self._passing_sync_batchnorm_handle(self._module_copies) + + def __getstate__(self): + self._check_default_group() + attrs = copy.copy(self.__dict__) + del attrs['process_group'] + del attrs['reducer'] + return attrs + + def __setstate__(self, state): + # If serializable, then the process group should be the default one + self.process_group = _get_default_group() + super(DistributedDataParallel, self).__setstate__(state) + self.__dict__.setdefault('require_forward_param_sync', True) + self.__dict__.setdefault('require_backward_grad_sync', True) + self._ddp_init_helper() + + def _check_default_group(self): + pickle_not_supported = False + try: + if self.process_group != _get_default_group(): + pickle_not_supported = True + except RuntimeError: + pickle_not_supported = True + + if pickle_not_supported: + raise RuntimeError("DDP Pickling/Unpickling are only supported " + "when using DDP with the default process " + "group. That is, when you have called " + "init_process_group and have not passed " + "process_group argument to DDP constructor") + +
    [docs] @contextmanager + def no_sync(self): + r""" + A context manager to disable gradient synchronizations across DDP + processes. Within this context, gradients will be accumulated on module + variables, which will later be synchronized in the first + forward-backward pass exiting the context. + + Example:: + + >>> ddp = torch.nn.DistributedDataParallel(model, pg) + >>> with ddp.no_sync(): + ... for input in inputs: + ... ddp(input).backward() # no synchronization, accumulate grads + ... ddp(another_input).backward() # synchronize grads + """ + old_require_backward_grad_sync = self.require_backward_grad_sync + self.require_backward_grad_sync = False + try: + yield + finally: + self.require_backward_grad_sync = old_require_backward_grad_sync
    + + def forward(self, *inputs, **kwargs): + if self.require_forward_param_sync: + self._sync_params() + + if self.device_ids: + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + output = self.module(*inputs[0], **kwargs[0]) + else: + outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs) + output = self.gather(outputs, self.output_device) + else: + output = self.module(*inputs, **kwargs) + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.require_forward_param_sync = True + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + self.require_forward_param_sync = False + + return output + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def parallel_apply(self, replicas, inputs, kwargs): + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def gather(self, outputs, output_device): + return gather(outputs, output_device, dim=self.dim) + + def train(self, mode=True): + super(DistributedDataParallel, self).train(mode) + for module in self._module_copies[1:]: + module.train(mode) + + def _distributed_broadcast_coalesced(self, tensors, buffer_size): + dist._broadcast_coalesced(self.process_group, tensors, buffer_size) + + def _sync_params(self): + with torch.no_grad(): + # only do intra-node parameters sync for replicated single-device + # CUDA modules + if self.device_ids and len(self.device_ids) > 1: + # intra-node parameter sync + result = torch.cuda.comm.broadcast_coalesced( + self.modules_params[0], + self.device_ids, + self.broadcast_bucket_size) + for tensors, module_params in zip(result[1:], + self.modules_params[1:]): + for tensor, param in zip(tensors, module_params): + param.set_(tensor) + # Assume we have just run the optimizer and zeroed the + # grads of the parameters on the root model. We need + # to zero the grads on all model replicas as well. + # This snippet is copied from torch.optim.Optimizer. + if param.grad is not None: + param.grad.detach_() + param.grad.zero_() + + # module buffer sync + if self.broadcast_buffers and len(self.modules_buffers[0]) > 0: + # Synchronize buffers across processes. + # The process with rank 0 is considered the authoritative copy. + self._distributed_broadcast_coalesced( + self.modules_buffers[0], + self.broadcast_bucket_size) + # only do intra-node buffer sync for replicated single-device + # CUDA modules + if self.device_ids and len(self.device_ids) > 1: + # intra-node buffer sync + result = torch.cuda.comm.broadcast_coalesced( + self.modules_buffers[0], + self.device_ids, + self.broadcast_bucket_size) + for tensors, module_buffers in zip(result[1:], + self.modules_buffers[1:]): + for tensor, buffer in zip(tensors, module_buffers): + buffer.set_(tensor) + + def _passing_sync_batchnorm_handle(self, module_copies): + for dev_idx, module in enumerate(module_copies): + for layer in module.modules(): + if isinstance(layer, torch.nn.modules.SyncBatchNorm): + assert self.is_cuda, "SyncBatchNorm layers only work with CUDA modules" + layer._specify_ddp_gpu_num( + len(self.device_ids) if self.device_ids else 1)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/parameter.html b/docs/stable/_modules/torch/nn/parameter.html new file mode 100644 index 000000000000..6f9b0c5cc5ea --- /dev/null +++ b/docs/stable/_modules/torch/nn/parameter.html @@ -0,0 +1,558 @@ + + + + + + + + + + + + torch.nn.parameter — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.parameter

    +import torch
    +from collections import OrderedDict
    +
    +
    +
    [docs]class Parameter(torch.Tensor): + r"""A kind of Tensor that is to be considered a module parameter. + + Parameters are :class:`~torch.Tensor` subclasses, that have a + very special property when used with :class:`Module` s - when they're + assigned as Module attributes they are automatically added to the list of + its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator. + Assigning a Tensor doesn't have such effect. This is because one might + want to cache some temporary state, like last hidden state of the RNN, in + the model. If there was no such class as :class:`Parameter`, these + temporaries would get registered too. + + Arguments: + data (Tensor): parameter tensor. + requires_grad (bool, optional): if the parameter requires gradient. See + :ref:`excluding-subgraphs` for more details. Default: `True` + """ + + def __new__(cls, data=None, requires_grad=True): + if data is None: + data = torch.Tensor() + return torch.Tensor._make_subclass(cls, data, requires_grad) + + def __deepcopy__(self, memo): + if id(self) in memo: + return memo[id(self)] + else: + result = type(self)(self.data.clone(), self.requires_grad) + memo[id(self)] = result + return result + + def __repr__(self): + return 'Parameter containing:\n' + super(Parameter, self).__repr__() + + def __reduce_ex__(self, proto): + # See Note [Don't serialize hooks] + return ( + torch._utils._rebuild_parameter, + (self.data, self.requires_grad, OrderedDict()) + )
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/utils/clip_grad.html b/docs/stable/_modules/torch/nn/utils/clip_grad.html new file mode 100644 index 000000000000..614efa5bfa16 --- /dev/null +++ b/docs/stable/_modules/torch/nn/utils/clip_grad.html @@ -0,0 +1,584 @@ + + + + + + + + + + + + torch.nn.utils.clip_grad — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.utils.clip_grad

    +import warnings
    +import torch
    +from torch._six import inf
    +
    +
    +
    [docs]def clip_grad_norm_(parameters, max_norm, norm_type=2): + r"""Clips gradient norm of an iterable of parameters. + + The norm is computed over all gradients together, as if they were + concatenated into a single vector. Gradients are modified in-place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + max_norm = float(max_norm) + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(p.grad.data.abs().max() for p in parameters) + else: + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + total_norm = total_norm ** (1. / norm_type) + clip_coef = max_norm / (total_norm + 1e-6) + if clip_coef < 1: + for p in parameters: + p.grad.data.mul_(clip_coef) + return total_norm
    + + +def clip_grad_norm(parameters, max_norm, norm_type=2): + r"""Clips gradient norm of an iterable of parameters. + + .. warning:: + This method is now deprecated in favor of + :func:`torch.nn.utils.clip_grad_norm_`. + """ + warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor " + "of torch.nn.utils.clip_grad_norm_.", stacklevel=2) + return clip_grad_norm_(parameters, max_norm, norm_type) + + +
    [docs]def clip_grad_value_(parameters, clip_value): + r"""Clips gradient of an iterable of parameters at specified value. + + Gradients are modified in-place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + clip_value (float or int): maximum allowed value of the gradients. + The gradients are clipped in the range + :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]` + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + clip_value = float(clip_value) + for p in filter(lambda p: p.grad is not None, parameters): + p.grad.data.clamp_(min=-clip_value, max=clip_value)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/utils/convert_parameters.html b/docs/stable/_modules/torch/nn/utils/convert_parameters.html new file mode 100644 index 000000000000..f009174139b8 --- /dev/null +++ b/docs/stable/_modules/torch/nn/utils/convert_parameters.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + torch.nn.utils.convert_parameters — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.utils.convert_parameters

    +import torch
    +
    +
    +
    [docs]def parameters_to_vector(parameters): + r"""Convert parameters to one vector + + Arguments: + parameters (Iterable[Tensor]): an iterator of Tensors that are the + parameters of a model. + + Returns: + The parameters represented by a single vector + """ + # Flag for the device where the parameter is located + param_device = None + + vec = [] + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + vec.append(param.view(-1)) + return torch.cat(vec)
    + + +
    [docs]def vector_to_parameters(vec, parameters): + r"""Convert one vector to the parameters + + Arguments: + vec (Tensor): a single vector represents the parameters of a model. + parameters (Iterable[Tensor]): an iterator of Tensors that are the + parameters of a model. + """ + # Ensure vec of type Tensor + if not isinstance(vec, torch.Tensor): + raise TypeError('expected torch.Tensor, but got: {}' + .format(torch.typename(vec))) + # Flag for the device where the parameter is located + param_device = None + + # Pointer for slicing the vector for each parameter + pointer = 0 + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + # The length of the parameter + num_param = param.numel() + # Slice the vector, reshape it, and replace the old data of the parameter + param.data = vec[pointer:pointer + num_param].view_as(param).data + + # Increment the pointer + pointer += num_param
    + + +def _check_param_device(param, old_param_device): + r"""This helper function is to check if the parameters are located + in the same device. Currently, the conversion between model parameters + and single vector form is not supported for multiple allocations, + e.g. parameters in different GPUs, or mixture of CPU/GPU. + + Arguments: + param ([Tensor]): a Tensor of a parameter of a model + old_param_device (int): the device where the first parameter of a + model is allocated. + + Returns: + old_param_device (int): report device for the first time + """ + + # Meet the first parameter + if old_param_device is None: + old_param_device = param.get_device() if param.is_cuda else -1 + else: + warn = False + if param.is_cuda: # Check if in same GPU + warn = (param.get_device() != old_param_device) + else: # Check if in CPU + warn = (old_param_device != -1) + if warn: + raise TypeError('Found two parameters on different devices, ' + 'this is currently not supported.') + return old_param_device +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/utils/rnn.html b/docs/stable/_modules/torch/nn/utils/rnn.html new file mode 100644 index 000000000000..cb575860ebfe --- /dev/null +++ b/docs/stable/_modules/torch/nn/utils/rnn.html @@ -0,0 +1,938 @@ + + + + + + + + + + + + torch.nn.utils.rnn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.utils.rnn

    +from collections import namedtuple
    +import warnings
    +
    +import torch
    +from .. import _VF
    +from ..._jit_internal import Optional
    +
    +
    +PackedSequence_ = namedtuple('PackedSequence',
    +                             ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices'])
    +
    +# type annotation for PackedSequence_ to make it compatible with TorchScript
    +PackedSequence_.__annotations__ = {'data': torch.Tensor, 'batch_sizes': torch.Tensor,
    +                                   'sorted_indices': Optional[torch.Tensor],
    +                                   'unsorted_indices': Optional[torch.Tensor]}
    +
    +def bind(optional, fn):
    +    if optional is None:
    +        return None
    +    return fn(optional)
    +
    +
    +
    [docs]class PackedSequence(PackedSequence_): + r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence. + + All RNN modules accept packed sequences as inputs. + + Note: + Instances of this class should never be created manually. They are meant + to be instantiated by functions like :func:`pack_padded_sequence`. + + Batch sizes represent the number elements at each sequence step in + the batch, not the varying sequence lengths passed to + :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x`` + the :class:`PackedSequence` would contain data ``axbc`` with + ``batch_sizes=[2,1,1]``. + + Attributes: + data (Tensor): Tensor containing packed sequence + batch_sizes (Tensor): Tensor of integers holding + information about the batch size at each sequence step + sorted_indices (Tensor, optional): Tensor of integers holding how this + :class:`PackedSequence` is constructed from sequences. + unsorted_indices (Tensor, optional): Tensor of integers holding how this + to recover the original sequences with correct order. + + .. note:: + :attr:`data` can be on arbitrary device and of arbitrary dtype. + :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64`` + tensors on the same device as :attr:`data`. + + However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor. + + This invariant is maintained throughout :class:`PackedSequence` class, + and all functions that construct a `:class:PackedSequence` in PyTorch + (i.e., they only pass in tensors conforming to this constraint). + + """ + + # NOTE [ device and dtype of a PackedSequence ] + # + # See the note above in doc string (starting with ":attr:`data` can be on + # arbitrary device..."). + + def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None): + # PackedSequence used to only have __init__(self, data, batch_sizes) + # without a __new__ like this. So to preserve BC for calling in keyword + # arg style (e.g., `PackedSequence(data=..., batch_sizes=...)`), we have + # to provide two arguments with exact names `data` and `batch_sizes`. + + # NB: if unsorted_indices is provided, it should be the inverse permutation + # to sorted_indices. Don't assert it here because the PackedSequence ctor + # should only be used internally. + if unsorted_indices is None: + unsorted_indices = invert_permutation(sorted_indices) + + # support being called as `PackedSequence(data, batch_sizes, sorted_indices)` + if batch_sizes is not None: + return super(PackedSequence, cls).__new__( + cls, data, batch_sizes, sorted_indices, unsorted_indices) + + # support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)` + else: + assert isinstance(data, (list, tuple)) and len(data) == 2 + return super(PackedSequence, cls).__new__( + cls, data[0], data[1], sorted_indices) + + def pin_memory(self): + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.pin_memory(), self.batch_sizes, + bind(self.sorted_indices, lambda t: t.pin_memory()), + bind(self.unsorted_indices, lambda t: t.pin_memory())) + + def cuda(self, *args, **kwargs): + """Returns a GPU copy if `self.data` not already on the GPU""" + if self.is_cuda: + return self + else: + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.cuda(*args, **kwargs), self.batch_sizes, + bind(self.sorted_indices, lambda t: t.cuda(*args, **kwargs)), + bind(self.unsorted_indices, lambda t: t.cuda(*args, **kwargs))) + + def cpu(self): + """Returns a CPU copy if `self.data` not already on the CPU""" + if self.is_cuda: + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.cpu(), self.batch_sizes, + bind(self.sorted_indices, lambda t: t.cpu()), + bind(self.unsorted_indices, lambda t: t.cpu())) + else: + return self + + def double(self): + r"""Returns copy with `self.data` cast to double type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.double(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def float(self): + r"""Returns copy with `self.data` cast to float type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.float(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def half(self): + r"""Returns copy with `self.data` cast to half type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.half(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def long(self): + r"""Returns copy with `self.data` cast to long type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.long(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def int(self): + r"""Returns copy with `self.data` cast to int type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.int(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def short(self): + r"""Returns copy with `self.data` cast to short type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.short(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def char(self): + r"""Returns copy with `self.data` cast to char type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.char(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def byte(self): + r"""Returns copy with `self.data` cast to byte type""" + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.byte(), self.batch_sizes, + self.sorted_indices, self.unsorted_indices) + + def to(self, *args, **kwargs): + r"""Performs dtype and/or device conversion on `self.data`. + + It has similar signature as :meth:`torch.Tensor.to`. + + .. note:: + + If the ``self.data`` Tensor already has the correct :class:`torch.dtype` + and :class:`torch.device`, then ``self`` is returned. + Otherwise, returns a copy with the desired configuration. + """ + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + data = self.data.to(*args, **kwargs) + sorted_indices = self.sorted_indices + unsorted_indices = self.unsorted_indices + device_kw = 'device' + if device_kw in kwargs: + sorted_indices = bind(sorted_indices, lambda t: t.to(kwargs[device_kw])) + unsorted_indices = bind(unsorted_indices, lambda t: t.to(kwargs[device_kw])) + if data is self.data: + return self + else: + return type(self)(data, self.batch_sizes, + sorted_indices, unsorted_indices) + + @property + def is_cuda(self): + r"""Returns true if `self.data` stored on a gpu""" + return self.data.is_cuda + + def is_pinned(self): + r"""Returns true if `self.data` stored on in pinned memory""" + return self.data.is_pinned()
    + + +def invert_permutation(permutation): + if permutation is None: + return None + output = torch.empty_like(permutation) + output.scatter_(0, permutation, + torch.arange(0, permutation.numel(), device=permutation.device)) + return output + + +
    [docs]def pack_padded_sequence(input, lengths, batch_first=False, enforce_sorted=True): + # type: (Tensor, Tensor, bool, bool) -> PackedSequence + r"""Packs a Tensor containing padded sequences of variable length. + + :attr:`input` can be of size ``T x B x *`` where `T` is the length of the + longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and + ``*`` is any number of dimensions (including 0). If ``batch_first`` is + ``True``, ``B x T x *`` :attr:`input` is expected. + + For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is + ``True``, the sequences should be sorted by length in a decreasing order, i.e. + ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest + one. `enforce_sorted = True` is only necessary for ONNX export. + + Note: + This function accepts any input that has at least two dimensions. You + can apply it to pack the labels, and use the output of the RNN with + them to compute the loss directly. A Tensor can be retrieved from + a :class:`PackedSequence` object by accessing its ``.data`` attribute. + + Arguments: + input (Tensor): padded batch of variable length sequences. + lengths (Tensor): list of sequences lengths of each batch element. + batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *`` + format. + enforce_sorted (bool, optional): if ``True``, the input is expected to + contain sequences sorted by length in a decreasing order. If + ``False``, this condition is not checked. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + if torch._C._get_tracing_state() and not isinstance(lengths, torch.Tensor): + warnings.warn('pack_padded_sequence has been called with a Python list of ' + 'sequence lengths. The tracer cannot track the data flow of Python ' + 'values, and it will treat them as constants, likely rendering ' + 'the trace incorrect for any other combination of lengths.', + stacklevel=2) + lengths = torch.as_tensor(lengths, dtype=torch.int64) + if enforce_sorted: + sorted_indices = None + else: + lengths, sorted_indices = torch.sort(lengths, descending=True) + sorted_indices = sorted_indices.to(input.device) + batch_dim = 0 if batch_first else 1 + input = input.index_select(batch_dim, sorted_indices) + + data, batch_sizes = \ + _VF._pack_padded_sequence(input, lengths, batch_first) + return PackedSequence(data, batch_sizes, sorted_indices, None)
    + + +
    [docs]def pad_packed_sequence(sequence, batch_first=False, padding_value=0.0, total_length=None): + # type: (PackedSequence, bool, float, Optional[int]) -> Tuple[Tensor, Tensor] + r"""Pads a packed batch of variable length sequences. + + It is an inverse operation to :func:`pack_padded_sequence`. + + The returned Tensor's data will be of size ``T x B x *``, where `T` is the length + of the longest sequence and `B` is the batch size. If ``batch_first`` is True, + the data will be transposed into ``B x T x *`` format. + + Batch elements will be ordered decreasingly by their length. + + .. note:: + :attr:`total_length` is useful to implement the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`this FAQ section <pack-rnn-unpack-with-data-parallelism>` for + details. + + Arguments: + sequence (PackedSequence): batch to pad + batch_first (bool, optional): if ``True``, the output will be in ``B x T x *`` + format. + padding_value (float, optional): values for padded elements. + total_length (int, optional): if not ``None``, the output will be padded to + have length :attr:`total_length`. This method will throw :class:`ValueError` + if :attr:`total_length` is less than the max sequence length in + :attr:`sequence`. + + Returns: + Tuple of Tensor containing the padded sequence, and a Tensor + containing the list of lengths of each sequence in the batch. + + """ + max_seq_length = sequence.batch_sizes.size(0) + if total_length is not None: + if total_length < max_seq_length: + raise ValueError("Expected total_length to be at least the length " + "of the longest sequence in input, but got " + "total_length={} and max sequence length being {}" + .format(total_length, max_seq_length)) + max_seq_length = total_length + padded_output, lengths = _VF._pad_packed_sequence( + sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length) + unsorted_indices = sequence.unsorted_indices + if unsorted_indices is not None: + batch_dim = 0 if batch_first else 1 + return padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices] + return padded_output, lengths
    + + +
    [docs]def pad_sequence(sequences, batch_first=False, padding_value=0): + r"""Pad a list of variable length Tensors with ``padding_value`` + + ``pad_sequence`` stacks a list of Tensors along a new dimension, + and pads them to equal length. For example, if the input is list of + sequences with size ``L x *`` and if batch_first is False, and ``T x B x *`` + otherwise. + + `B` is batch size. It is equal to the number of elements in ``sequences``. + `T` is length of the longest sequence. + `L` is length of the sequence. + `*` is any number of trailing dimensions, including none. + + Example: + >>> from torch.nn.utils.rnn import pad_sequence + >>> a = torch.ones(25, 300) + >>> b = torch.ones(22, 300) + >>> c = torch.ones(15, 300) + >>> pad_sequence([a, b, c]).size() + torch.Size([25, 3, 300]) + + Note: + This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` + where `T` is the length of the longest sequence. This function assumes + trailing dimensions and type of all the Tensors in sequences are same. + + Arguments: + sequences (list[Tensor]): list of variable length sequences. + batch_first (bool, optional): output will be in ``B x T x *`` if True, or in + ``T x B x *`` otherwise + padding_value (float, optional): value for padded elements. Default: 0. + + Returns: + Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. + Tensor of size ``B x T x *`` otherwise + """ + + # assuming trailing dimensions and type of all the Tensors + # in sequences are same and fetching those from sequences[0] + max_size = sequences[0].size() + trailing_dims = max_size[1:] + max_len = max([s.size(0) for s in sequences]) + if batch_first: + out_dims = (len(sequences), max_len) + trailing_dims + else: + out_dims = (max_len, len(sequences)) + trailing_dims + + out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) + for i, tensor in enumerate(sequences): + length = tensor.size(0) + # use index notation to prevent duplicate references to the tensor + if batch_first: + out_tensor[i, :length, ...] = tensor + else: + out_tensor[:length, i, ...] = tensor + + return out_tensor
    + + +
    [docs]def pack_sequence(sequences, enforce_sorted=True): + r"""Packs a list of variable length Tensors + + ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is + the length of a sequence and `*` is any number of trailing dimensions, + including zero. + + For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted`` + is ``True``, the sequences should be sorted in the order of decreasing length. + ``enforce_sorted = True`` is only necessary for ONNX export. + + + Example: + >>> from torch.nn.utils.rnn import pack_sequence + >>> a = torch.tensor([1,2,3]) + >>> b = torch.tensor([4,5]) + >>> c = torch.tensor([6]) + >>> pack_sequence([a, b, c]) + PackedSequence(data=tensor([ 1, 4, 6, 2, 5, 3]), batch_sizes=tensor([ 3, 2, 1])) + + + Arguments: + sequences (list[Tensor]): A list of sequences of decreasing length. + enforce_sorted (bool, optional): if ``True``, checks that the input + contains sequences sorted by length in a decreasing order. If + ``False``, this condition is not checked. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + lengths = [v.size(0) for v in sequences] + return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted)
    + + +def get_packed_sequence(data, batch_sizes, sorted_indices, unsorted_indices): + return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/utils/spectral_norm.html b/docs/stable/_modules/torch/nn/utils/spectral_norm.html new file mode 100644 index 000000000000..c69c7989707b --- /dev/null +++ b/docs/stable/_modules/torch/nn/utils/spectral_norm.html @@ -0,0 +1,802 @@ + + + + + + + + + + + + torch.nn.utils.spectral_norm — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.utils.spectral_norm

    +"""
    +Spectral Normalization from https://arxiv.org/abs/1802.05957
    +"""
    +import torch
    +from torch.nn.functional import normalize
    +
    +
    +class SpectralNorm(object):
    +    # Invariant before and after each forward call:
    +    #   u = normalize(W @ v)
    +    # NB: At initialization, this invariant is not enforced
    +
    +    _version = 1
    +    # At version 1:
    +    #   made  `W` not a buffer,
    +    #   added `v` as a buffer, and
    +    #   made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.
    +
    +    def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
    +        self.name = name
    +        self.dim = dim
    +        if n_power_iterations <= 0:
    +            raise ValueError('Expected n_power_iterations to be positive, but '
    +                             'got n_power_iterations={}'.format(n_power_iterations))
    +        self.n_power_iterations = n_power_iterations
    +        self.eps = eps
    +
    +    def reshape_weight_to_matrix(self, weight):
    +        weight_mat = weight
    +        if self.dim != 0:
    +            # permute dim to front
    +            weight_mat = weight_mat.permute(self.dim,
    +                                            *[d for d in range(weight_mat.dim()) if d != self.dim])
    +        height = weight_mat.size(0)
    +        return weight_mat.reshape(height, -1)
    +
    +    def compute_weight(self, module, do_power_iteration):
    +        # NB: If `do_power_iteration` is set, the `u` and `v` vectors are
    +        #     updated in power iteration **in-place**. This is very important
    +        #     because in `DataParallel` forward, the vectors (being buffers) are
    +        #     broadcast from the parallelized module to each module replica,
    +        #     which is a new module object created on the fly. And each replica
    +        #     runs its own spectral norm power iteration. So simply assigning
    +        #     the updated vectors to the module this function runs on will cause
    +        #     the update to be lost forever. And the next time the parallelized
    +        #     module is replicated, the same randomly initialized vectors are
    +        #     broadcast and used!
    +        #
    +        #     Therefore, to make the change propagate back, we rely on two
    +        #     important behaviors (also enforced via tests):
    +        #       1. `DataParallel` doesn't clone storage if the broadcast tensor
    +        #          is already on correct device; and it makes sure that the
    +        #          parallelized module is already on `device[0]`.
    +        #       2. If the out tensor in `out=` kwarg has correct shape, it will
    +        #          just fill in the values.
    +        #     Therefore, since the same power iteration is performed on all
    +        #     devices, simply updating the tensors in-place will make sure that
    +        #     the module replica on `device[0]` will update the _u vector on the
    +        #     parallized module (by shared storage).
    +        #
    +        #    However, after we update `u` and `v` in-place, we need to **clone**
    +        #    them before using them to normalize the weight. This is to support
    +        #    backproping through two forward passes, e.g., the common pattern in
    +        #    GAN training: loss = D(real) - D(fake). Otherwise, engine will
    +        #    complain that variables needed to do backward for the first forward
    +        #    (i.e., the `u` and `v` vectors) are changed in the second forward.
    +        weight = getattr(module, self.name + '_orig')
    +        u = getattr(module, self.name + '_u')
    +        v = getattr(module, self.name + '_v')
    +        weight_mat = self.reshape_weight_to_matrix(weight)
    +
    +        if do_power_iteration:
    +            with torch.no_grad():
    +                for _ in range(self.n_power_iterations):
    +                    # Spectral norm of weight equals to `u^T W v`, where `u` and `v`
    +                    # are the first left and right singular vectors.
    +                    # This power iteration produces approximations of `u` and `v`.
    +                    v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
    +                    u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
    +                if self.n_power_iterations > 0:
    +                    # See above on why we need to clone
    +                    u = u.clone()
    +                    v = v.clone()
    +
    +        sigma = torch.dot(u, torch.mv(weight_mat, v))
    +        weight = weight / sigma
    +        return weight
    +
    +    def remove(self, module):
    +        with torch.no_grad():
    +            weight = self.compute_weight(module, do_power_iteration=False)
    +        delattr(module, self.name)
    +        delattr(module, self.name + '_u')
    +        delattr(module, self.name + '_v')
    +        delattr(module, self.name + '_orig')
    +        module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
    +
    +    def __call__(self, module, inputs):
    +        setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
    +
    +    def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
    +        # Tries to returns a vector `v` s.t. `u = normalize(W @ v)`
    +        # (the invariant at top of this class) and `u @ W @ v = sigma`.
    +        # This uses pinverse in case W^T W is not invertible.
    +        v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1)
    +        return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
    +
    +    @staticmethod
    +    def apply(module, name, n_power_iterations, dim, eps):
    +        for k, hook in module._forward_pre_hooks.items():
    +            if isinstance(hook, SpectralNorm) and hook.name == name:
    +                raise RuntimeError("Cannot register two spectral_norm hooks on "
    +                                   "the same parameter {}".format(name))
    +
    +        fn = SpectralNorm(name, n_power_iterations, dim, eps)
    +        weight = module._parameters[name]
    +
    +        with torch.no_grad():
    +            weight_mat = fn.reshape_weight_to_matrix(weight)
    +
    +            h, w = weight_mat.size()
    +            # randomly initialize `u` and `v`
    +            u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
    +            v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
    +
    +        delattr(module, fn.name)
    +        module.register_parameter(fn.name + "_orig", weight)
    +        # We still need to assign weight back as fn.name because all sorts of
    +        # things may assume that it exists, e.g., when initializing weights.
    +        # However, we can't directly assign as it could be an nn.Parameter and
    +        # gets added as a parameter. Instead, we register weight.data as a plain
    +        # attribute.
    +        setattr(module, fn.name, weight.data)
    +        module.register_buffer(fn.name + "_u", u)
    +        module.register_buffer(fn.name + "_v", v)
    +
    +        module.register_forward_pre_hook(fn)
    +        module._register_state_dict_hook(SpectralNormStateDictHook(fn))
    +        module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))
    +        return fn
    +
    +
    +# This is a top level class because Py2 pickle doesn't like inner class nor an
    +# instancemethod.
    +class SpectralNormLoadStateDictPreHook(object):
    +    # See docstring of SpectralNorm._version on the changes to spectral_norm.
    +    def __init__(self, fn):
    +        self.fn = fn
    +
    +    # For state_dict with version None, (assuming that it has gone through at
    +    # least one training forward), we have
    +    #
    +    #    u = normalize(W_orig @ v)
    +    #    W = W_orig / sigma, where sigma = u @ W_orig @ v
    +    #
    +    # To compute `v`, we solve `W_orig @ x = u`, and let
    +    #    v = x / (u @ W_orig @ x) * (W / W_orig).
    +    def __call__(self, state_dict, prefix, local_metadata, strict,
    +                 missing_keys, unexpected_keys, error_msgs):
    +        fn = self.fn
    +        version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None)
    +        if version is None or version < 1:
    +            weight_key = prefix + fn.name
    +            if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \
    +                    weight_key not in state_dict:
    +                # Detect if it is the updated state dict and just missing metadata.
    +                # This could happen if the users are crafting a state dict themselves,
    +                # so we just pretend that this is the newest.
    +                return
    +            has_missing_keys = False
    +            for suffix in ('_orig', '', '_u'):
    +                key = weight_key + suffix
    +                if key not in state_dict:
    +                    has_missing_keys = True
    +                    if strict:
    +                        missing_keys.append(key)
    +            if has_missing_keys:
    +                return
    +            with torch.no_grad():
    +                weight_orig = state_dict[weight_key + '_orig']
    +                weight = state_dict.pop(weight_key)
    +                sigma = (weight_orig / weight).mean()
    +                weight_mat = fn.reshape_weight_to_matrix(weight_orig)
    +                u = state_dict[weight_key + '_u']
    +                v = fn._solve_v_and_rescale(weight_mat, u, sigma)
    +                state_dict[weight_key + '_v'] = v
    +
    +
    +# This is a top level class because Py2 pickle doesn't like inner class nor an
    +# instancemethod.
    +class SpectralNormStateDictHook(object):
    +    # See docstring of SpectralNorm._version on the changes to spectral_norm.
    +    def __init__(self, fn):
    +        self.fn = fn
    +
    +    def __call__(self, module, state_dict, prefix, local_metadata):
    +        if 'spectral_norm' not in local_metadata:
    +            local_metadata['spectral_norm'] = {}
    +        key = self.fn.name + '.version'
    +        if key in local_metadata['spectral_norm']:
    +            raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key))
    +        local_metadata['spectral_norm'][key] = self.fn._version
    +
    +
    +
    [docs]def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None): + r"""Applies spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by rescaling the weight tensor + with spectral norm :math:`\sigma` of the weight matrix calculated using + power iteration method. If the dimension of the weight tensor is greater + than 2, it is reshaped to 2D in power iteration method to get spectral + norm. This is implemented via a hook that calculates spectral norm and + rescales weight before every :meth:`~Module.forward` call. + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm + eps (float, optional): epsilon for numerical stability in + calculating norms + dim (int, optional): dimension corresponding to number of outputs, + the default is ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with the spectral norm hook + + Example:: + + >>> m = spectral_norm(nn.Linear(20, 40)) + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_u.size() + torch.Size([40]) + + """ + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + SpectralNorm.apply(module, name, n_power_iterations, dim, eps) + return module
    + + +
    [docs]def remove_spectral_norm(module, name='weight'): + r"""Removes the spectral normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = spectral_norm(nn.Linear(40, 10)) + >>> remove_spectral_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, SpectralNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + break + else: + raise ValueError("spectral_norm of '{}' not found in {}".format( + name, module)) + + for k, hook in module._state_dict_hooks.items(): + if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: + del module._state_dict_hooks[k] + break + + for k, hook in module._load_state_dict_pre_hooks.items(): + if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: + del module._load_state_dict_pre_hooks[k] + break + + return module
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/nn/utils/weight_norm.html b/docs/stable/_modules/torch/nn/utils/weight_norm.html new file mode 100644 index 000000000000..1a23198494f0 --- /dev/null +++ b/docs/stable/_modules/torch/nn/utils/weight_norm.html @@ -0,0 +1,633 @@ + + + + + + + + + + + + torch.nn.utils.weight_norm — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.nn.utils.weight_norm

    +r"""
    +Weight Normalization from https://arxiv.org/abs/1602.07868
    +"""
    +from torch.nn.parameter import Parameter
    +from torch import _weight_norm, norm_except_dim
    +
    +
    +class WeightNorm(object):
    +    def __init__(self, name, dim):
    +        if dim is None:
    +            dim = -1
    +        self.name = name
    +        self.dim = dim
    +
    +    def compute_weight(self, module):
    +        g = getattr(module, self.name + '_g')
    +        v = getattr(module, self.name + '_v')
    +        return _weight_norm(v, g, self.dim)
    +
    +    @staticmethod
    +    def apply(module, name, dim):
    +        for k, hook in module._forward_pre_hooks.items():
    +            if isinstance(hook, WeightNorm) and hook.name == name:
    +                raise RuntimeError("Cannot register two weight_norm hooks on "
    +                                   "the same parameter {}".format(name))
    +
    +        if dim is None:
    +            dim = -1
    +
    +        fn = WeightNorm(name, dim)
    +
    +        weight = getattr(module, name)
    +
    +        # remove w from parameter list
    +        del module._parameters[name]
    +
    +        # add g and v as new parameters and express w as g/||v|| * v
    +        module.register_parameter(name + '_g', Parameter(norm_except_dim(weight, 2, dim).data))
    +        module.register_parameter(name + '_v', Parameter(weight.data))
    +        setattr(module, name, fn.compute_weight(module))
    +
    +        # recompute weight before every forward()
    +        module.register_forward_pre_hook(fn)
    +
    +        return fn
    +
    +    def remove(self, module):
    +        weight = self.compute_weight(module)
    +        delattr(module, self.name)
    +        del module._parameters[self.name + '_g']
    +        del module._parameters[self.name + '_v']
    +        module.register_parameter(self.name, Parameter(weight.data))
    +
    +    def __call__(self, module, inputs):
    +        setattr(module, self.name, self.compute_weight(module))
    +
    +
    +
    [docs]def weight_norm(module, name='weight', dim=0): + r"""Applies weight normalization to a parameter in the given module. + + .. math:: + \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + + Weight normalization is a reparameterization that decouples the magnitude + of a weight tensor from its direction. This replaces the parameter specified + by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude + (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``). + Weight normalization is implemented via a hook that recomputes the weight + tensor from the magnitude and direction before every :meth:`~Module.forward` + call. + + By default, with ``dim=0``, the norm is computed independently per output + channel/plane. To compute a norm over the entire weight tensor, use + ``dim=None``. + + See https://arxiv.org/abs/1602.07868 + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + dim (int, optional): dimension over which to compute the norm + + Returns: + The original module with the weight norm hook + + Example:: + + >>> m = weight_norm(nn.Linear(20, 40), name='weight') + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_g.size() + torch.Size([40, 1]) + >>> m.weight_v.size() + torch.Size([40, 20]) + + """ + WeightNorm.apply(module, name, dim) + return module
    + + +
    [docs]def remove_weight_norm(module, name='weight'): + r"""Removes the weight normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = weight_norm(nn.Linear(20, 40)) + >>> remove_weight_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, WeightNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + return module + + raise ValueError("weight_norm of '{}' not found in {}" + .format(name, module))
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/onnx.html b/docs/stable/_modules/torch/onnx.html new file mode 100644 index 000000000000..264310b44ed6 --- /dev/null +++ b/docs/stable/_modules/torch/onnx.html @@ -0,0 +1,698 @@ + + + + + + + + + + + + torch.onnx — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.onnx

    +import torch._C as _C
    +
    +TensorProtoDataType = _C._onnx.TensorProtoDataType
    +OperatorExportTypes = _C._onnx.OperatorExportTypes
    +PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
    +
    +ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
    +
    +# TODO: Update these variables when there 
    +# is a new ir_version and producer_version
    +# and use these values in the exporter
    +ir_version = 4
    +producer_name = "pytorch"
    +producer_version = "1.2"
    +
    +
    +class ExportTypes:
    +    PROTOBUF_FILE = 1
    +    ZIP_ARCHIVE = 2
    +    COMPRESSED_ZIP_ARCHIVE = 3
    +    DIRECTORY = 4
    +
    +
    +def _export(*args, **kwargs):
    +    from torch.onnx import utils
    +    result = utils._export(*args, **kwargs)
    +    return result
    +
    +
    +
    [docs]def export(model, args, f, export_params=True, verbose=False, training=False, + input_names=None, output_names=None, aten=False, export_raw_ir=False, + operator_export_type=None, opset_version=None, _retain_param_name=True, + do_constant_folding=False, example_outputs=None, strip_doc_string=True, dynamic_axes=None): + r""" + Export a model into ONNX format. This exporter runs your model + once in order to get a trace of its execution to be exported; + at the moment, it supports a limited set of dynamic models (e.g., RNNs.) + See also: :ref:`onnx-export` + Arguments: + model (torch.nn.Module): the model to be exported. + args (tuple of arguments): the inputs to + the model, e.g., such that ``model(*args)`` is a valid + invocation of the model. Any non-Tensor arguments will + be hard-coded into the exported model; any Tensor arguments + will become inputs of the exported model, in the order they + occur in args. If args is a Tensor, this is equivalent + to having called it with a 1-ary tuple of that Tensor. + (Note: passing keyword arguments to the model is not currently + supported. Give us a shout if you need it.) + f: a file-like object (has to implement fileno that returns a file descriptor) + or a string containing a file name. A binary Protobuf will be written + to this file. + export_params (bool, default True): if specified, all parameters will + be exported. Set this to False if you want to export an untrained model. + In this case, the exported model will first take all of its parameters + as arguments, the ordering as specified by ``model.state_dict().values()`` + verbose (bool, default False): if specified, we will print out a debug + description of the trace being exported. + training (bool, default False): export the model in training mode. At + the moment, ONNX is oriented towards exporting models for inference + only, so you will generally not need to set this to True. + input_names(list of strings, default empty list): names to assign to the + input nodes of the graph, in order + output_names(list of strings, default empty list): names to assign to the + output nodes of the graph, in order + aten (bool, default False): [DEPRECATED. use operator_export_type] export the + model in aten mode. If using aten mode, all the ops original exported + by the functions in symbolic_opset<version>.py are exported as ATen ops. + export_raw_ir (bool, default False): [DEPRECATED. use operator_export_type] + export the internal IR directly instead of converting it to ONNX ops. + operator_export_type (enum, default OperatorExportTypes.ONNX): + OperatorExportTypes.ONNX: all ops are exported as regular ONNX ops. + OperatorExportTypes.ONNX_ATEN: all ops are exported as ATen ops. + OperatorExportTypes.ONNX_ATEN_FALLBACK: if symbolic is missing, + fall back on ATen op. + OperatorExportTypes.RAW: export raw ir. + opset_version (int, default is 9): by default we export the model to the + opset version of the onnx submodule. Since ONNX's latest opset may + evolve before next stable release, by default we export to one stable + opset version. Right now, supported stable opset version is 9. + The opset_version must be _onnx_master_opset or in _onnx_stable_opsets + which are defined in torch/onnx/symbolic_helper.py + do_constant_folding (bool, default False): If True, the constant-folding + optimization is applied to the model during export. Constant-folding + optimization will replace some of the ops that have all constant + inputs, with pre-computed constant nodes. + example_outputs (tuple of Tensors, default None): example_outputs must be provided + when exporting a ScriptModule or TorchScript Function. + strip_doc_string (bool, default True): if True, strips the field + "doc_string" from the exported model, which information about the stack + trace. + example_outputs: example outputs of the model that is being exported. + dynamic_axes (dict<string, dict<int, string>> or dict<string, list(int)>, default empty dict): + a dictionary to specify dynamic axes of input/output, such that: + - KEY: input and/or output names + - VALUE: index of dynamic axes for given key and potentially the name to be used for + exported dynamic axes. In general the value is defined according to one of the following + ways or a combination of both: + (1). A list of integers specifiying the dynamic axes of provided input. In this scenario + automated names will be generated and applied to dynamic axes of provided input/output + during export. + OR (2). An inner dictionary that specifies a mapping FROM the index of dynamic axis in + corresponding input/output TO the name that is desired to be applied on such axis of + such input/output during export. + Example. if we have the following shape for inputs and outputs: + shape(input_1) = ('b', 3, 'w', 'h') + and shape(input_2) = ('b', 4) + and shape(output) = ('b', 'd', 5) + + Then dynamic axes can be defined either as: + (a). ONLY INDICES: + dynamic_axes = {'input_1':[0, 2, 3], 'input_2':[0], 'output':[0, 1]} + + where automatic names will be generated for exported dynamic axes + + (b). INDICES WITH CORRESPONDING NAMES: + dynamic_axes = {'input_1':{0:'batch', 1:'width', 2:'height'}, + 'input_2':{0:'batch'}, + 'output':{0:'batch', 1:'detections'} + + where provided names will be applied to exported dynamic axes + + (c). MIXED MODE OF (a) and (b) + dynamic_axes = {'input_1':[0, 2, 3], 'input_2':{0:'batch'}, 'output':[0,1]} + """ + + from torch.onnx import utils + return utils.export(model, args, f, export_params, verbose, training, + input_names, output_names, aten, export_raw_ir, + operator_export_type, opset_version, _retain_param_name, + do_constant_folding, example_outputs, + strip_doc_string, dynamic_axes)
    + + +def export_to_pretty_string(*args, **kwargs): + from torch.onnx import utils + return utils.export_to_pretty_string(*args, **kwargs) + + +def _export_to_pretty_string(*args, **kwargs): + from torch.onnx import utils + return utils._export_to_pretty_string(*args, **kwargs) + + +def _optimize_trace(trace, operator_export_type): + from torch.onnx import utils + trace.set_graph(utils._optimize_graph(trace.graph(), operator_export_type)) + + +
    [docs]def set_training(model, mode): + r""" + A context manager to temporarily set the training mode of 'model' + to 'mode', resetting it when we exit the with-block. A no-op if + mode is None. + """ + + from torch.onnx import utils + return utils.set_training(model, mode)
    + + +def _run_symbolic_function(*args, **kwargs): + from torch.onnx import utils + return utils._run_symbolic_function(*args, **kwargs) + + +def _run_symbolic_method(*args, **kwargs): + from torch.onnx import utils + return utils._run_symbolic_method(*args, **kwargs) + + +
    [docs]def is_in_onnx_export(): + r""" + Check whether it's in the middle of the ONNX export. + This function returns True in the middle of torch.onnx.export(). + torch.onnx.export should be executed with single thread. + """ + + from torch.onnx import utils + return utils.is_in_onnx_export()
    + + +
    [docs]def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version): + from torch.onnx import utils + return utils.register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/onnx/operators.html b/docs/stable/_modules/torch/onnx/operators.html new file mode 100644 index 000000000000..079c680c963c --- /dev/null +++ b/docs/stable/_modules/torch/onnx/operators.html @@ -0,0 +1,536 @@ + + + + + + + + + + + + torch.onnx.operators — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.onnx.operators

    +r"""This file provides a location for operators that help exporting
    +models via onnx. E.g. shape_as_tensor and reshape_from_tensor_shape
    +are to make all dynamic sizes operations traceble.
    +
    +NOTE: at one point these functions were implemented differently.
    +Since then we have implemented these directly in ATen, so this
    +file is kept purely for backward-compatibility.
    +"""
    +
    +import torch
    +import torch.onnx
    +import torch.onnx.utils
    +
    +
    +
    [docs]def shape_as_tensor(x): + return torch._shape_as_tensor(x)
    + + +def reshape_from_tensor_shape(x, shape): + return torch._reshape_from_tensor(x, shape) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/adadelta.html b/docs/stable/_modules/torch/optim/adadelta.html new file mode 100644 index 000000000000..eefec9861b15 --- /dev/null +++ b/docs/stable/_modules/torch/optim/adadelta.html @@ -0,0 +1,592 @@ + + + + + + + + + + + + torch.optim.adadelta — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.adadelta

    +import torch
    +
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class Adadelta(Optimizer): + """Implements Adadelta algorithm. + + It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + rho (float, optional): coefficient used for computing a running average + of squared gradients (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-6) + lr (float, optional): coefficient that scale delta before it is applied + to the parameters (default: 1.0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + + __ https://arxiv.org/abs/1212.5701 + """ + + def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= rho <= 1.0: + raise ValueError("Invalid rho value: {}".format(rho)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + + defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay) + super(Adadelta, self).__init__(params, defaults) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adadelta does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.zeros_like(p.data) + state['acc_delta'] = torch.zeros_like(p.data) + + square_avg, acc_delta = state['square_avg'], state['acc_delta'] + rho, eps = group['rho'], group['eps'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + square_avg.mul_(rho).addcmul_(1 - rho, grad, grad) + std = square_avg.add(eps).sqrt_() + delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad) + p.data.add_(-group['lr'], delta) + acc_delta.mul_(rho).addcmul_(1 - rho, delta, delta) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/adagrad.html b/docs/stable/_modules/torch/optim/adagrad.html new file mode 100644 index 000000000000..5b01a4894130 --- /dev/null +++ b/docs/stable/_modules/torch/optim/adagrad.html @@ -0,0 +1,610 @@ + + + + + + + + + + + + torch.optim.adagrad — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.adagrad

    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class Adagrad(Optimizer): + """Implements Adagrad algorithm. + + It has been proposed in `Adaptive Subgradient Methods for Online Learning + and Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lr_decay (float, optional): learning rate decay (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + + .. _Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization: http://jmlr.org/papers/v12/duchi11a.html + """ + + def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= lr_decay: + raise ValueError("Invalid lr_decay value: {}".format(lr_decay)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= initial_accumulator_value: + raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value)) + + defaults = dict(lr=lr, lr_decay=lr_decay, weight_decay=weight_decay, + initial_accumulator_value=initial_accumulator_value) + super(Adagrad, self).__init__(params, defaults) + + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + state['step'] = 0 + state['sum'] = torch.full_like(p.data, initial_accumulator_value) + + def share_memory(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + state['sum'].share_memory_() + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad.data + state = self.state[p] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if p.grad.data.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad = grad.add(group['weight_decay'], p.data) + + clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay']) + + if grad.is_sparse: + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + size = grad.size() + + def make_sparse(values): + constructor = grad.new + if grad_indices.dim() == 0 or values.dim() == 0: + return constructor().resize_as_(grad) + return constructor(grad_indices, values, size) + state['sum'].add_(make_sparse(grad_values.pow(2))) + std = state['sum'].sparse_mask(grad) + std_values = std._values().sqrt_().add_(1e-10) + p.data.add_(-clr, make_sparse(grad_values / std_values)) + else: + state['sum'].addcmul_(1, grad, grad) + std = state['sum'].sqrt().add_(1e-10) + p.data.addcdiv_(-clr, grad, std) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/adam.html b/docs/stable/_modules/torch/optim/adam.html new file mode 100644 index 000000000000..50623a37eca9 --- /dev/null +++ b/docs/stable/_modules/torch/optim/adam.html @@ -0,0 +1,623 @@ + + + + + + + + + + + + torch.optim.adam — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.adam

    +import math
    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class Adam(Optimizer): + r"""Implements Adam algorithm. + + It has been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(Adam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(Adam, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad.add_(group['weight_decay'], p.data) + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + + p.data.addcdiv_(-step_size, exp_avg, denom) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/adamax.html b/docs/stable/_modules/torch/optim/adamax.html new file mode 100644 index 000000000000..209cc4346c33 --- /dev/null +++ b/docs/stable/_modules/torch/optim/adamax.html @@ -0,0 +1,602 @@ + + + + + + + + + + + + torch.optim.adamax — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.adamax

    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class Adamax(Optimizer): + """Implements Adamax algorithm (a variant of Adam based on infinity norm). + + It has been proposed in `Adam: A Method for Stochastic Optimization`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + + __ https://arxiv.org/abs/1412.6980 + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super(Adamax, self).__init__(params, defaults) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adamax does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p.data) + state['exp_inf'] = torch.zeros_like(p.data) + + exp_avg, exp_inf = state['exp_avg'], state['exp_inf'] + beta1, beta2 = group['betas'] + eps = group['eps'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + # Update biased first moment estimate. + exp_avg.mul_(beta1).add_(1 - beta1, grad) + # Update the exponentially weighted infinity norm. + norm_buf = torch.cat([ + exp_inf.mul_(beta2).unsqueeze(0), + grad.abs().add_(eps).unsqueeze_(0) + ], 0) + torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) + + bias_correction = 1 - beta1 ** state['step'] + clr = group['lr'] / bias_correction + + p.data.addcdiv_(-clr, exp_avg, exp_inf) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/adamw.html b/docs/stable/_modules/torch/optim/adamw.html new file mode 100644 index 000000000000..679141f67854 --- /dev/null +++ b/docs/stable/_modules/torch/optim/adamw.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + torch.optim.adamw — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.adamw

    +import math
    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + + p.data.addcdiv_(-step_size, exp_avg, denom) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/asgd.html b/docs/stable/_modules/torch/optim/asgd.html new file mode 100644 index 000000000000..76d262c0f5d7 --- /dev/null +++ b/docs/stable/_modules/torch/optim/asgd.html @@ -0,0 +1,598 @@ + + + + + + + + + + + + torch.optim.asgd — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.asgd

    +import math
    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class ASGD(Optimizer): + """Implements Averaged Stochastic Gradient Descent. + + It has been proposed in `Acceleration of stochastic approximation by + averaging`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lambd (float, optional): decay term (default: 1e-4) + alpha (float, optional): power for eta update (default: 0.75) + t0 (float, optional): point at which to start averaging (default: 1e6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + + .. _Acceleration of stochastic approximation by averaging: + http://dl.acm.org/citation.cfm?id=131098 + """ + + def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + + defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0, + weight_decay=weight_decay) + super(ASGD, self).__init__(params, defaults) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('ASGD does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['eta'] = group['lr'] + state['mu'] = 1 + state['ax'] = torch.zeros_like(p.data) + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + # decay term + p.data.mul_(1 - group['lambd'] * state['eta']) + + # update parameter + p.data.add_(-state['eta'], grad) + + # averaging + if state['mu'] != 1: + state['ax'].add_(p.data.sub(state['ax']).mul(state['mu'])) + else: + state['ax'].copy_(p.data) + + # update eta and mu + state['eta'] = (group['lr'] / + math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha'])) + state['mu'] = 1 / max(1, state['step'] - group['t0']) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/lbfgs.html b/docs/stable/_modules/torch/optim/lbfgs.html new file mode 100644 index 000000000000..18fac532a870 --- /dev/null +++ b/docs/stable/_modules/torch/optim/lbfgs.html @@ -0,0 +1,984 @@ + + + + + + + + + + + + torch.optim.lbfgs — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.lbfgs

    +import torch
    +from functools import reduce
    +from .optimizer import Optimizer
    +
    +
    +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
    +    # ported from https://github.com/torch/optim/blob/master/polyinterp.lua
    +    # Compute bounds of interpolation area
    +    if bounds is not None:
    +        xmin_bound, xmax_bound = bounds
    +    else:
    +        xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
    +
    +    # Code for most common case: cubic interpolation of 2 points
    +    #   w/ function and derivative values for both
    +    # Solution in this case (where x2 is the farthest point):
    +    #   d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
    +    #   d2 = sqrt(d1^2 - g1*g2);
    +    #   min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
    +    #   t_new = min(max(min_pos,xmin_bound),xmax_bound);
    +    d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
    +    d2_square = d1**2 - g1 * g2
    +    if d2_square >= 0:
    +        d2 = d2_square.sqrt()
    +        if x1 <= x2:
    +            min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
    +        else:
    +            min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
    +        return min(max(min_pos, xmin_bound), xmax_bound)
    +    else:
    +        return (xmin_bound + xmax_bound) / 2.
    +
    +
    +def _strong_wolfe(obj_func,
    +                  x,
    +                  t,
    +                  d,
    +                  f,
    +                  g,
    +                  gtd,
    +                  c1=1e-4,
    +                  c2=0.9,
    +                  tolerance_change=1e-9,
    +                  max_ls=25):
    +    # ported from https://github.com/torch/optim/blob/master/lswolfe.lua
    +    d_norm = d.abs().max()
    +    g = g.clone()
    +    # evaluate objective and gradient using initial step
    +    f_new, g_new = obj_func(x, t, d)
    +    ls_func_evals = 1
    +    gtd_new = g_new.dot(d)
    +
    +    # bracket an interval containing a point satisfying the Wolfe criteria
    +    t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
    +    done = False
    +    ls_iter = 0
    +    while ls_iter < max_ls:
    +        # check conditions
    +        if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
    +            bracket = [t_prev, t]
    +            bracket_f = [f_prev, f_new]
    +            bracket_g = [g_prev, g_new.clone()]
    +            bracket_gtd = [gtd_prev, gtd_new]
    +            break
    +
    +        if abs(gtd_new) <= -c2 * gtd:
    +            bracket = [t]
    +            bracket_f = [f_new]
    +            bracket_g = [g_new]
    +            done = True
    +            break
    +
    +        if gtd_new >= 0:
    +            bracket = [t_prev, t]
    +            bracket_f = [f_prev, f_new]
    +            bracket_g = [g_prev, g_new.clone()]
    +            bracket_gtd = [gtd_prev, gtd_new]
    +            break
    +
    +        # interpolate
    +        min_step = t + 0.01 * (t - t_prev)
    +        max_step = t * 10
    +        tmp = t
    +        t = _cubic_interpolate(
    +            t_prev,
    +            f_prev,
    +            gtd_prev,
    +            t,
    +            f_new,
    +            gtd_new,
    +            bounds=(min_step, max_step))
    +
    +        # next step
    +        t_prev = tmp
    +        f_prev = f_new
    +        g_prev = g_new.clone()
    +        gtd_prev = gtd_new
    +        f_new, g_new = obj_func(x, t, d)
    +        ls_func_evals += 1
    +        gtd_new = g_new.dot(d)
    +        ls_iter += 1
    +
    +    # reached max number of iterations?
    +    if ls_iter == max_ls:
    +        bracket = [0, t]
    +        bracket_f = [f, f_new]
    +        bracket_g = [g, g_new]
    +
    +    # zoom phase: we now have a point satisfying the criteria, or
    +    # a bracket around it. We refine the bracket until we find the
    +    # exact point satisfying the criteria
    +    insuf_progress = False
    +    # find high and low points in bracket
    +    low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0)
    +    while not done and ls_iter < max_ls:
    +        # compute new trial value
    +        t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0],
    +                               bracket[1], bracket_f[1], bracket_gtd[1])
    +
    +        # test that we are making sufficient progress:
    +        # in case `t` is so close to boundary, we mark that we are making
    +        # insufficient progress, and if
    +        #   + we have made insufficient progress in the last step, or
    +        #   + `t` is at one of the boundary,
    +        # we will move `t` to a position which is `0.1 * len(bracket)`
    +        # away from the nearest boundary point.
    +        eps = 0.1 * (max(bracket) - min(bracket))
    +        if min(max(bracket) - t, t - min(bracket)) < eps:
    +            # interpolation close to boundary
    +            if insuf_progress or t >= max(bracket) or t <= min(bracket):
    +                # evaluate at 0.1 away from boundary
    +                if abs(t - max(bracket)) < abs(t - min(bracket)):
    +                    t = max(bracket) - eps
    +                else:
    +                    t = min(bracket) + eps
    +                insuf_progress = False
    +            else:
    +                insuf_progress = True
    +        else:
    +            insuf_progress = False
    +
    +        # Evaluate new point
    +        f_new, g_new = obj_func(x, t, d)
    +        ls_func_evals += 1
    +        gtd_new = g_new.dot(d)
    +        ls_iter += 1
    +
    +        if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
    +            # Armijo condition not satisfied or not lower than lowest point
    +            bracket[high_pos] = t
    +            bracket_f[high_pos] = f_new
    +            bracket_g[high_pos] = g_new.clone()
    +            bracket_gtd[high_pos] = gtd_new
    +            low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
    +        else:
    +            if abs(gtd_new) <= -c2 * gtd:
    +                # Wolfe conditions satisfied
    +                done = True
    +            elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
    +                # old high becomes new low
    +                bracket[high_pos] = bracket[low_pos]
    +                bracket_f[high_pos] = bracket_f[low_pos]
    +                bracket_g[high_pos] = bracket_g[low_pos]
    +                bracket_gtd[high_pos] = bracket_gtd[low_pos]
    +
    +            # new point becomes new low
    +            bracket[low_pos] = t
    +            bracket_f[low_pos] = f_new
    +            bracket_g[low_pos] = g_new.clone()
    +            bracket_gtd[low_pos] = gtd_new
    +
    +        # line-search bracket is so small
    +        if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change:
    +            break
    +
    +    # return stuff
    +    t = bracket[low_pos]
    +    f_new = bracket_f[low_pos]
    +    g_new = bracket_g[low_pos]
    +    return f_new, g_new, t, ls_func_evals
    +
    +
    +
    [docs]class LBFGS(Optimizer): + """Implements L-BFGS algorithm, heavily inspired by `minFunc + <https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Arguments: + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-5). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__(self, + params, + lr=1, + max_iter=20, + max_eval=None, + tolerance_grad=1e-5, + tolerance_change=1e-9, + history_size=100, + line_search_fn=None): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + history_size=history_size, + line_search_fn=line_search_fn) + super(LBFGS, self).__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError("LBFGS doesn't support per-parameter options " + "(parameter groups)") + + self._params = self.param_groups[0]['params'] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + views.append(view) + return torch.cat(views, 0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.data.add_(step_size, update[offset:offset + numel].view_as(p.data)) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone() for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.data.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + +
    [docs] def step(self, closure): + """Performs a single optimization step. + + Arguments: + closure (callable): A closure that reevaluates the model + and returns the loss. + """ + assert len(self.param_groups) == 1 + + group = self.param_groups[0] + lr = group['lr'] + max_iter = group['max_iter'] + max_eval = group['max_eval'] + tolerance_grad = group['tolerance_grad'] + tolerance_change = group['tolerance_change'] + line_search_fn = group['line_search_fn'] + history_size = group['history_size'] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault('func_evals', 0) + state.setdefault('n_iter', 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state['func_evals'] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get('d') + t = state.get('t') + old_dirs = state.get('old_dirs') + old_stps = state.get('old_stps') + ro = state.get('ro') + H_diag = state.get('H_diag') + prev_flat_grad = state.get('prev_flat_grad') + prev_loss = state.get('prev_loss') + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state['n_iter'] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state['n_iter'] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > 1e-10: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1. / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if 'al' not in state: + state['al'] = [None] * history_size + al = state['al'] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(-al[i], old_dirs[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(al[i] - be_i, old_stps[i]) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone() + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state['n_iter'] == 1: + t = min(1., 1. / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state['func_evals'] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state['d'] = d + state['t'] = t + state['old_dirs'] = old_dirs + state['old_stps'] = old_stps + state['ro'] = ro + state['H_diag'] = H_diag + state['prev_flat_grad'] = prev_flat_grad + state['prev_loss'] = prev_loss + + return orig_loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/lr_scheduler.html b/docs/stable/_modules/torch/optim/lr_scheduler.html new file mode 100644 index 000000000000..faa854fd6779 --- /dev/null +++ b/docs/stable/_modules/torch/optim/lr_scheduler.html @@ -0,0 +1,1275 @@ + + + + + + + + + + + + torch.optim.lr_scheduler — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.lr_scheduler

    +import types
    +import math
    +from torch._six import inf
    +from functools import partial, wraps
    +import warnings
    +from bisect import bisect_right
    +
    +from .optimizer import Optimizer
    +
    +
    +class _LRScheduler(object):
    +    def __init__(self, optimizer, last_epoch=-1):
    +        if not isinstance(optimizer, Optimizer):
    +            raise TypeError('{} is not an Optimizer'.format(
    +                type(optimizer).__name__))
    +        self.optimizer = optimizer
    +        if last_epoch == -1:
    +            for group in optimizer.param_groups:
    +                group.setdefault('initial_lr', group['lr'])
    +            last_epoch = 0
    +        else:
    +            for i, group in enumerate(optimizer.param_groups):
    +                if 'initial_lr' not in group:
    +                    raise KeyError("param 'initial_lr' is not specified "
    +                                   "in param_groups[{}] when resuming an optimizer".format(i))
    +        self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
    +        self.last_epoch = last_epoch
    +
    +        # Following https://github.com/pytorch/pytorch/issues/20124
    +        # We would like to ensure that `lr_scheduler.step()` is called after
    +        # `optimizer.step()`
    +        def with_counter(func, opt):
    +            @wraps(func)
    +            def wrapper(*args, **kwargs):
    +                opt._step_count += 1
    +                return func(*args, **kwargs)
    +            wrapper._with_counter = True
    +            return wrapper
    +
    +        self.optimizer.step = with_counter(self.optimizer.step, self.optimizer)
    +        self.optimizer._step_count = 0
    +        self._step_count = 0
    +        self.step(last_epoch)
    +
    +    def state_dict(self):
    +        """Returns the state of the scheduler as a :class:`dict`.
    +
    +        It contains an entry for every variable in self.__dict__ which
    +        is not the optimizer.
    +        """
    +        return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
    +
    +    def load_state_dict(self, state_dict):
    +        """Loads the schedulers state.
    +
    +        Arguments:
    +            state_dict (dict): scheduler state. Should be an object returned
    +                from a call to :meth:`state_dict`.
    +        """
    +        self.__dict__.update(state_dict)
    +
    +    def get_lr(self):
    +        raise NotImplementedError
    +
    +    def step(self, epoch=None):
    +        # Raise a warning if old pattern is detected
    +        # https://github.com/pytorch/pytorch/issues/20124
    +        if self._step_count == 1:
    +            if not hasattr(self.optimizer.step, "_with_counter"):
    +                warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
    +                              "initialization. Please, make sure to call `optimizer.step()` before "
    +                              "`lr_scheduler.step()`. See more details at "
    +                              "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
    +
    +            # Just check if there were two first lr_scheduler.step() calls before optimizer.step()
    +            elif self.optimizer._step_count < 1:
    +                warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
    +                              "In PyTorch 1.1.0 and later, you should call them in the opposite order: "
    +                              "`optimizer.step()` before `lr_scheduler.step()`.  Failure to do this "
    +                              "will result in PyTorch skipping the first value of the learning rate schedule."
    +                              "See more details at "
    +                              "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
    +        self._step_count += 1
    +
    +        if epoch is None:
    +            epoch = self.last_epoch + 1
    +        self.last_epoch = epoch
    +        for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
    +            param_group['lr'] = lr
    +
    +
    +
    [docs]class LambdaLR(_LRScheduler): + """Sets the learning rate of each parameter group to the initial lr + times a given function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # Assuming optimizer has two groups. + >>> lambda1 = lambda epoch: epoch // 30 + >>> lambda2 = lambda epoch: 0.95 ** epoch + >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, lr_lambda, last_epoch=-1): + self.optimizer = optimizer + if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): + self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) + else: + if len(lr_lambda) != len(optimizer.param_groups): + raise ValueError("Expected {} lr_lambdas, but got {}".format( + len(optimizer.param_groups), len(lr_lambda))) + self.lr_lambdas = list(lr_lambda) + self.last_epoch = last_epoch + super(LambdaLR, self).__init__(optimizer, last_epoch) + +
    [docs] def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + """ + state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} + state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) + + for idx, fn in enumerate(self.lr_lambdas): + if not isinstance(fn, types.FunctionType): + state_dict['lr_lambdas'][idx] = fn.__dict__.copy() + + return state_dict
    + +
    [docs] def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Arguments: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + lr_lambdas = state_dict.pop('lr_lambdas') + self.__dict__.update(state_dict) + + for idx, fn in enumerate(lr_lambdas): + if fn is not None: + self.lr_lambdas[idx].__dict__.update(fn)
    + + def get_lr(self): + return [base_lr * lmbda(self.last_epoch) + for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
    + + +
    [docs]class StepLR(_LRScheduler): + """Sets the learning rate of each parameter group to the initial lr + decayed by gamma every step_size epochs. When last_epoch=-1, sets + initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + step_size (int): Period of learning rate decay. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 60 + >>> # lr = 0.0005 if 60 <= epoch < 90 + >>> # ... + >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1): + self.step_size = step_size + self.gamma = gamma + super(StepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + return [base_lr * self.gamma ** (self.last_epoch // self.step_size) + for base_lr in self.base_lrs]
    + + +
    [docs]class MultiStepLR(_LRScheduler): + """Set the learning rate of each parameter group to the initial lr decayed + by gamma once the number of epoch reaches one of the milestones. When + last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + milestones (list): List of epoch indices. Must be increasing. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 80 + >>> # lr = 0.0005 if epoch >= 80 + >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1): + if not list(milestones) == sorted(milestones): + raise ValueError('Milestones should be a list of' + ' increasing integers. Got {}', milestones) + self.milestones = milestones + self.gamma = gamma + super(MultiStepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs]
    + + +
    [docs]class ExponentialLR(_LRScheduler): + """Set the learning rate of each parameter group to the initial lr decayed + by gamma every epoch. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + gamma (float): Multiplicative factor of learning rate decay. + last_epoch (int): The index of last epoch. Default: -1. + """ + + def __init__(self, optimizer, gamma, last_epoch=-1): + self.gamma = gamma + super(ExponentialLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + return [base_lr * self.gamma ** self.last_epoch + for base_lr in self.base_lrs]
    + + +
    [docs]class CosineAnnealingLR(_LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr and + :math:`T_{cur}` is the number of epochs since the last restart in SGDR: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + + \cos(\frac{T_{cur}}{T_{max}}\pi)) + + When last_epoch=-1, sets initial lr as lr. + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only + implements the cosine annealing part of SGDR, and not the restarts. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_max (int): Maximum number of iterations. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): + self.T_max = T_max + self.eta_min = eta_min + super(CosineAnnealingLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + return [self.eta_min + (base_lr - self.eta_min) * + (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 + for base_lr in self.base_lrs]
    + + +
    [docs]class ReduceLROnPlateau(object): + """Reduce learning rate when a metric has stopped improving. + Models often benefit from reducing the learning rate by a factor + of 2-10 once learning stagnates. This scheduler reads a metrics + quantity and if no improvement is seen for a 'patience' number + of epochs, the learning rate is reduced. + + Args: + optimizer (Optimizer): Wrapped optimizer. + mode (str): One of `min`, `max`. In `min` mode, lr will + be reduced when the quantity monitored has stopped + decreasing; in `max` mode it will be reduced when the + quantity monitored has stopped increasing. Default: 'min'. + factor (float): Factor by which the learning rate will be + reduced. new_lr = lr * factor. Default: 0.1. + patience (int): Number of epochs with no improvement after + which learning rate will be reduced. For example, if + `patience = 2`, then we will ignore the first 2 epochs + with no improvement, and will only decrease the LR after the + 3rd epoch if the loss still hasn't improved then. + Default: 10. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + threshold (float): Threshold for measuring the new optimum, + to only focus on significant changes. Default: 1e-4. + threshold_mode (str): One of `rel`, `abs`. In `rel` mode, + dynamic_threshold = best * ( 1 + threshold ) in 'max' + mode or best * ( 1 - threshold ) in `min` mode. + In `abs` mode, dynamic_threshold = best + threshold in + `max` mode or best - threshold in `min` mode. Default: 'rel'. + cooldown (int): Number of epochs to wait before resuming + normal operation after lr has been reduced. Default: 0. + min_lr (float or list): A scalar or a list of scalars. A + lower bound on the learning rate of all param groups + or each group respectively. Default: 0. + eps (float): Minimal decay applied to lr. If the difference + between new and old lr is smaller than eps, the update is + ignored. Default: 1e-8. + + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = ReduceLROnPlateau(optimizer, 'min') + >>> for epoch in range(10): + >>> train(...) + >>> val_loss = validate(...) + >>> # Note that step should be called after validate() + >>> scheduler.step(val_loss) + """ + + def __init__(self, optimizer, mode='min', factor=0.1, patience=10, + verbose=False, threshold=1e-4, threshold_mode='rel', + cooldown=0, min_lr=0, eps=1e-8): + + if factor >= 1.0: + raise ValueError('Factor should be < 1.0.') + self.factor = factor + + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + self.optimizer = optimizer + + if isinstance(min_lr, list) or isinstance(min_lr, tuple): + if len(min_lr) != len(optimizer.param_groups): + raise ValueError("expected {} min_lrs, got {}".format( + len(optimizer.param_groups), len(min_lr))) + self.min_lrs = list(min_lr) + else: + self.min_lrs = [min_lr] * len(optimizer.param_groups) + + self.patience = patience + self.verbose = verbose + self.cooldown = cooldown + self.cooldown_counter = 0 + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + self.best = None + self.num_bad_epochs = None + self.mode_worse = None # the worse value for the chosen mode + self.is_better = None + self.eps = eps + self.last_epoch = -1 + self._init_is_better(mode=mode, threshold=threshold, + threshold_mode=threshold_mode) + self._reset() + + def _reset(self): + """Resets num_bad_epochs counter and cooldown counter.""" + self.best = self.mode_worse + self.cooldown_counter = 0 + self.num_bad_epochs = 0 + + def step(self, metrics, epoch=None): + # convert `metrics` to float, in case it's a zero-dim Tensor + current = float(metrics) + if epoch is None: + epoch = self.last_epoch = self.last_epoch + 1 + self.last_epoch = epoch + + if self.is_better(current, self.best): + self.best = current + self.num_bad_epochs = 0 + else: + self.num_bad_epochs += 1 + + if self.in_cooldown: + self.cooldown_counter -= 1 + self.num_bad_epochs = 0 # ignore any bad epochs in cooldown + + if self.num_bad_epochs > self.patience: + self._reduce_lr(epoch) + self.cooldown_counter = self.cooldown + self.num_bad_epochs = 0 + + def _reduce_lr(self, epoch): + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + new_lr = max(old_lr * self.factor, self.min_lrs[i]) + if old_lr - new_lr > self.eps: + param_group['lr'] = new_lr + if self.verbose: + print('Epoch {:5d}: reducing learning rate' + ' of group {} to {:.4e}.'.format(epoch, i, new_lr)) + + @property + def in_cooldown(self): + return self.cooldown_counter > 0 + + def _cmp(self, mode, threshold_mode, threshold, a, best): + if mode == 'min' and threshold_mode == 'rel': + rel_epsilon = 1. - threshold + return a < best * rel_epsilon + + elif mode == 'min' and threshold_mode == 'abs': + return a < best - threshold + + elif mode == 'max' and threshold_mode == 'rel': + rel_epsilon = threshold + 1. + return a > best * rel_epsilon + + else: # mode == 'max' and epsilon_mode == 'abs': + return a > best + threshold + + def _init_is_better(self, mode, threshold, threshold_mode): + if mode not in {'min', 'max'}: + raise ValueError('mode ' + mode + ' is unknown!') + if threshold_mode not in {'rel', 'abs'}: + raise ValueError('threshold mode ' + threshold_mode + ' is unknown!') + + if mode == 'min': + self.mode_worse = inf + else: # mode == 'max': + self.mode_worse = -inf + + self.is_better = partial(self._cmp, mode, threshold_mode, threshold) + + def state_dict(self): + return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'is_better'}} + + def load_state_dict(self, state_dict): + self.__dict__.update(state_dict) + self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)
    + + +
    [docs]class CyclicLR(_LRScheduler): + """Sets the learning rate of each parameter group according to + cyclical learning rate policy (CLR). The policy cycles the learning + rate between two boundaries with a constant frequency, as detailed in + the paper `Cyclical Learning Rates for Training Neural Networks`_. + The distance between the two boundaries can be scaled on a per-iteration + or per-cycle basis. + + Cyclical learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This class has three built-in policies, as put forth in the paper: + "triangular": + A basic triangular cycle w/ no amplitude scaling. + "triangular2": + A basic triangular cycle that scales initial amplitude by half each cycle. + "exp_range": + A cycle that scales initial amplitude by gamma**(cycle iterations) at each + cycle iteration. + + This implementation was adapted from the github repo: `bckenstler/CLR`_ + + Args: + optimizer (Optimizer): Wrapped optimizer. + base_lr (float or list): Initial learning rate which is the + lower boundary in the cycle for each parameter group. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_lr - base_lr). + The lr at any cycle is the sum of base_lr + and some scaling of the amplitude; therefore + max_lr may not actually be reached depending on + scaling function. + step_size_up (int): Number of training iterations in the + increasing half of a cycle. Default: 2000 + step_size_down (int): Number of training iterations in the + decreasing half of a cycle. If step_size_down is None, + it is set to step_size_up. Default: None + mode (str): One of {triangular, triangular2, exp_range}. + Values correspond to policies detailed above. + If scale_fn is not None, this argument is ignored. + Default: 'triangular' + gamma (float): Constant in 'exp_range' scaling function: + gamma**(cycle iterations) + Default: 1.0 + scale_fn (function): Custom scaling policy defined by a single + argument lambda function, where + 0 <= scale_fn(x) <= 1 for all x >= 0. + If specified, then 'mode' is ignored. + Default: None + scale_mode (str): {'cycle', 'iterations'}. + Defines whether scale_fn is evaluated on + cycle number or cycle iterations (training + iterations since start of cycle). + Default: 'cycle' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.8 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + The momentum at any cycle is the difference of max_momentum + and some scaling of the amplitude; therefore + base_momentum may not actually be reached depending on + scaling function. Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.9 + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + + .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + .. _bckenstler/CLR: https://github.com/bckenstler/CLR + """ + + def __init__(self, + optimizer, + base_lr, + max_lr, + step_size_up=2000, + step_size_down=None, + mode='triangular', + gamma=1., + scale_fn=None, + scale_mode='cycle', + cycle_momentum=True, + base_momentum=0.8, + max_momentum=0.9, + last_epoch=-1): + + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + self.optimizer = optimizer + + base_lrs = self._format_param('base_lr', optimizer, base_lr) + if last_epoch == -1: + for lr, group in zip(base_lrs, optimizer.param_groups): + group['lr'] = lr + + self.max_lrs = self._format_param('max_lr', optimizer, max_lr) + + step_size_up = float(step_size_up) + step_size_down = float(step_size_down) if step_size_down is not None else step_size_up + self.total_size = step_size_up + step_size_down + self.step_ratio = step_size_up / self.total_size + + if mode not in ['triangular', 'triangular2', 'exp_range'] \ + and scale_fn is None: + raise ValueError('mode is invalid and scale_fn is None') + + self.mode = mode + self.gamma = gamma + + if scale_fn is None: + if self.mode == 'triangular': + self.scale_fn = self._triangular_scale_fn + self.scale_mode = 'cycle' + elif self.mode == 'triangular2': + self.scale_fn = self._triangular2_scale_fn + self.scale_mode = 'cycle' + elif self.mode == 'exp_range': + self.scale_fn = self._exp_range_scale_fn + self.scale_mode = 'iterations' + else: + self.scale_fn = scale_fn + self.scale_mode = scale_mode + + self.cycle_momentum = cycle_momentum + if cycle_momentum: + if 'momentum' not in optimizer.defaults: + raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') + + base_momentums = self._format_param('base_momentum', optimizer, base_momentum) + if last_epoch == -1: + for momentum, group in zip(base_momentums, optimizer.param_groups): + group['momentum'] = momentum + self.base_momentums = list(map(lambda group: group['momentum'], optimizer.param_groups)) + self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum) + + super(CyclicLR, self).__init__(optimizer, last_epoch) + + def _format_param(self, name, optimizer, param): + """Return correctly formatted lr/momentum for each param group.""" + if isinstance(param, (list, tuple)): + if len(param) != len(optimizer.param_groups): + raise ValueError("expected {} values for {}, got {}".format( + len(optimizer.param_groups), name, len(param))) + return param + else: + return [param] * len(optimizer.param_groups) + + def _triangular_scale_fn(self, x): + return 1. + + def _triangular2_scale_fn(self, x): + return 1 / (2. ** (x - 1)) + + def _exp_range_scale_fn(self, x): + return self.gamma**(x) + +
    [docs] def get_lr(self): + """Calculates the learning rate at batch index. This function treats + `self.last_epoch` as the last batch index. + + If `self.cycle_momentum` is ``True``, this function has a side effect of + updating the optimizer's momentum. + """ + cycle = math.floor(1 + self.last_epoch / self.total_size) + x = 1. + self.last_epoch / self.total_size - cycle + if x <= self.step_ratio: + scale_factor = x / self.step_ratio + else: + scale_factor = (x - 1) / (self.step_ratio - 1) + + lrs = [] + for base_lr, max_lr in zip(self.base_lrs, self.max_lrs): + base_height = (max_lr - base_lr) * scale_factor + if self.scale_mode == 'cycle': + lr = base_lr + base_height * self.scale_fn(cycle) + else: + lr = base_lr + base_height * self.scale_fn(self.last_epoch) + lrs.append(lr) + + if self.cycle_momentum: + momentums = [] + for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums): + base_height = (max_momentum - base_momentum) * scale_factor + if self.scale_mode == 'cycle': + momentum = max_momentum - base_height * self.scale_fn(cycle) + else: + momentum = max_momentum - base_height * self.scale_fn(self.last_epoch) + momentums.append(momentum) + for param_group, momentum in zip(self.optimizer.param_groups, momentums): + param_group['momentum'] = momentum + + return lrs
    + + +class CosineAnnealingWarmRestarts(_LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` + is the number of epochs since the last restart and :math:`T_{i}` is the number + of epochs between two warm restarts in SGDR: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + + \cos(\frac{T_{cur}}{T_{i}}\pi)) + + When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. + When :math:`T_{cur}=0`(after restart), set :math:`\eta_t=\eta_{max}`. + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_0 (int): Number of iterations for the first restart. + T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1. + eta_min (float, optional): Minimum learning rate. Default: 0. + last_epoch (int, optional): The index of last epoch. Default: -1. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1): + if T_0 <= 0 or not isinstance(T_0, int): + raise ValueError("Expected positive integer T_0, but got {}".format(T_0)) + if T_mult < 1 or not isinstance(T_mult, int): + raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult)) + self.T_0 = T_0 + self.T_i = T_0 + self.T_mult = T_mult + self.eta_min = eta_min + super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch) + self.T_cur = last_epoch + + def get_lr(self): + return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2 + for base_lr in self.base_lrs] + + def step(self, epoch=None): + """Step could be called after every batch update + + Example: + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> iters = len(dataloader) + >>> for epoch in range(20): + >>> for i, sample in enumerate(dataloader): + >>> inputs, labels = sample['inputs'], sample['labels'] + >>> scheduler.step(epoch + i / iters) + >>> optimizer.zero_grad() + >>> outputs = net(inputs) + >>> loss = criterion(outputs, labels) + >>> loss.backward() + >>> optimizer.step() + + This function can be called in an interleaved way. + + Example: + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> for epoch in range(20): + >>> scheduler.step() + >>> scheduler.step(26) + >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) + """ + if epoch is None: + epoch = self.last_epoch + 1 + self.T_cur = self.T_cur + 1 + if self.T_cur >= self.T_i: + self.T_cur = self.T_cur - self.T_i + self.T_i = self.T_i * self.T_mult + else: + if epoch < 0: + raise ValueError("Expected non-negative epoch, but got {}".format(epoch)) + if epoch >= self.T_0: + if self.T_mult == 1: + self.T_cur = epoch % self.T_0 + else: + n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult)) + self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1) + self.T_i = self.T_0 * self.T_mult ** (n) + else: + self.T_i = self.T_0 + self.T_cur = epoch + self.last_epoch = math.floor(epoch) + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/optimizer.html b/docs/stable/_modules/torch/optim/optimizer.html new file mode 100644 index 000000000000..6897119b0285 --- /dev/null +++ b/docs/stable/_modules/torch/optim/optimizer.html @@ -0,0 +1,732 @@ + + + + + + + + + + + + torch.optim.optimizer — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.optimizer

    +from collections import defaultdict
    +from torch._six import container_abcs
    +
    +import torch
    +from copy import deepcopy
    +from itertools import chain
    +
    +
    +class _RequiredParameter(object):
    +    """Singleton class representing a required parameter for an Optimizer."""
    +    def __repr__(self):
    +        return "<required parameter>"
    +
    +required = _RequiredParameter()
    +
    +
    +
    [docs]class Optimizer(object): + r"""Base class for all optimizers. + + .. warning:: + Parameters need to be specified as collections that have a deterministic + ordering that is consistent between runs. Examples of objects that don't + satisfy those properties are sets and iterators over values of dictionaries. + + Arguments: + params (iterable): an iterable of :class:`torch.Tensor` s or + :class:`dict` s. Specifies what Tensors should be optimized. + defaults: (dict): a dict containing default values of optimization + options (used when a parameter group doesn't specify them). + """ + + def __init__(self, params, defaults): + torch._C._log_api_usage_once("python.optimizer") + self.defaults = defaults + + if isinstance(params, torch.Tensor): + raise TypeError("params argument given to the optimizer should be " + "an iterable of Tensors or dicts, but got " + + torch.typename(params)) + + self.state = defaultdict(dict) + self.param_groups = [] + + param_groups = list(params) + if len(param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") + if not isinstance(param_groups[0], dict): + param_groups = [{'params': param_groups}] + + for param_group in param_groups: + self.add_param_group(param_group) + + def __getstate__(self): + return { + 'defaults': self.defaults, + 'state': self.state, + 'param_groups': self.param_groups, + } + + def __setstate__(self, state): + self.__dict__.update(state) + + def __repr__(self): + format_string = self.__class__.__name__ + ' (' + for i, group in enumerate(self.param_groups): + format_string += '\n' + format_string += 'Parameter Group {0}\n'.format(i) + for key in sorted(group.keys()): + if key != 'params': + format_string += ' {0}: {1}\n'.format(key, group[key]) + format_string += ')' + return format_string + +
    [docs] def state_dict(self): + r"""Returns the state of the optimizer as a :class:`dict`. + + It contains two entries: + + * state - a dict holding current optimization state. Its content + differs between optimizer classes. + * param_groups - a dict containing all parameter groups + """ + # Save ids instead of Tensors + def pack_group(group): + packed = {k: v for k, v in group.items() if k != 'params'} + packed['params'] = [id(p) for p in group['params']] + return packed + param_groups = [pack_group(g) for g in self.param_groups] + # Remap state to use ids as keys + packed_state = {(id(k) if isinstance(k, torch.Tensor) else k): v + for k, v in self.state.items()} + return { + 'state': packed_state, + 'param_groups': param_groups, + }
    + +
    [docs] def load_state_dict(self, state_dict): + r"""Loads the optimizer state. + + Arguments: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # deepcopy, to be consistent with module API + state_dict = deepcopy(state_dict) + # Validate the state_dict + groups = self.param_groups + saved_groups = state_dict['param_groups'] + + if len(groups) != len(saved_groups): + raise ValueError("loaded state dict has a different number of " + "parameter groups") + param_lens = (len(g['params']) for g in groups) + saved_lens = (len(g['params']) for g in saved_groups) + if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): + raise ValueError("loaded state dict contains a parameter group " + "that doesn't match the size of optimizer's group") + + # Update the state + id_map = {old_id: p for old_id, p in + zip(chain(*(g['params'] for g in saved_groups)), + chain(*(g['params'] for g in groups)))} + + def cast(param, value): + r"""Make a deep copy of value, casting all tensors to device of param.""" + if isinstance(value, torch.Tensor): + # Floating-point types are a bit special here. They are the only ones + # that are assumed to always match the type of params. + if param.is_floating_point(): + value = value.to(param.dtype) + value = value.to(param.device) + return value + elif isinstance(value, dict): + return {k: cast(param, v) for k, v in value.items()} + elif isinstance(value, container_abcs.Iterable): + return type(value)(cast(param, v) for v in value) + else: + return value + + # Copy state assigned to params (and cast tensors to appropriate types). + # State that is not assigned to params is copied as is (needed for + # backward compatibility). + state = defaultdict(dict) + for k, v in state_dict['state'].items(): + if k in id_map: + param = id_map[k] + state[param] = cast(param, v) + else: + state[k] = v + + # Update parameter groups, setting their 'params' value + def update_group(group, new_group): + new_group['params'] = group['params'] + return new_group + param_groups = [ + update_group(g, ng) for g, ng in zip(groups, saved_groups)] + self.__setstate__({'state': state, 'param_groups': param_groups})
    + +
    [docs] def zero_grad(self): + r"""Clears the gradients of all optimized :class:`torch.Tensor` s.""" + for group in self.param_groups: + for p in group['params']: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_()
    + +
    [docs] def step(self, closure): + r"""Performs a single optimization step (parameter update). + + Arguments: + closure (callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + """ + raise NotImplementedError
    + +
    [docs] def add_param_group(self, param_group): + r"""Add a param group to the :class:`Optimizer` s `param_groups`. + + This can be useful when fine tuning a pre-trained network as frozen layers can be made + trainable and added to the :class:`Optimizer` as training progresses. + + Arguments: + param_group (dict): Specifies what Tensors should be optimized along with group + specific optimization options. + """ + assert isinstance(param_group, dict), "param group must be a dict" + + params = param_group['params'] + if isinstance(params, torch.Tensor): + param_group['params'] = [params] + elif isinstance(params, set): + raise TypeError('optimizer parameters need to be organized in ordered collections, but ' + 'the ordering of tensors in sets will change between runs. Please use a list instead.') + else: + param_group['params'] = list(params) + + for param in param_group['params']: + if not isinstance(param, torch.Tensor): + raise TypeError("optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param)) + if not param.is_leaf: + raise ValueError("can't optimize a non-leaf Tensor") + + for name, default in self.defaults.items(): + if default is required and name not in param_group: + raise ValueError("parameter group didn't specify a value of required optimization parameter " + + name) + else: + param_group.setdefault(name, default) + + param_set = set() + for group in self.param_groups: + param_set.update(set(group['params'])) + + if not param_set.isdisjoint(set(param_group['params'])): + raise ValueError("some parameters appear in more than one parameter group") + + self.param_groups.append(param_group)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/rmsprop.html b/docs/stable/_modules/torch/optim/rmsprop.html new file mode 100644 index 000000000000..b2881190442c --- /dev/null +++ b/docs/stable/_modules/torch/optim/rmsprop.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + torch.optim.rmsprop — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.rmsprop

    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class RMSprop(Optimizer): + """Implements RMSprop algorithm. + + Proposed by G. Hinton in his + `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing constant (default: 0.99) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + + """ + + def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay) + super(RMSprop, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSprop, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.zeros_like(p.data) + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p.data) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p.data) + + square_avg = state['square_avg'] + alpha = group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + grad = grad.add(group['weight_decay'], p.data) + + square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.mul_(alpha).add_(1 - alpha, grad) + avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps']) + else: + avg = square_avg.sqrt().add_(group['eps']) + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.data.add_(-group['lr'], buf) + else: + p.data.addcdiv_(-group['lr'], grad, avg) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/rprop.html b/docs/stable/_modules/torch/optim/rprop.html new file mode 100644 index 000000000000..5e6faaa7fb59 --- /dev/null +++ b/docs/stable/_modules/torch/optim/rprop.html @@ -0,0 +1,592 @@ + + + + + + + + + + + + torch.optim.rprop — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.rprop

    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class Rprop(Optimizer): + """Implements the resilient backpropagation algorithm. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + etas (Tuple[float, float], optional): pair of (etaminus, etaplis), that + are multiplicative increase and decrease factors + (default: (0.5, 1.2)) + step_sizes (Tuple[float, float], optional): a pair of minimal and + maximal allowed step sizes (default: (1e-6, 50)) + """ + + def __init__(self, params, lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50)): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 < etas[0] < 1.0 < etas[1]: + raise ValueError("Invalid eta values: {}, {}".format(etas[0], etas[1])) + + defaults = dict(lr=lr, etas=etas, step_sizes=step_sizes) + super(Rprop, self).__init__(params, defaults) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Rprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['prev'] = torch.zeros_like(p.data) + state['step_size'] = grad.new().resize_as_(grad).fill_(group['lr']) + + etaminus, etaplus = group['etas'] + step_size_min, step_size_max = group['step_sizes'] + step_size = state['step_size'] + + state['step'] += 1 + + sign = grad.mul(state['prev']).sign() + sign[sign.gt(0)] = etaplus + sign[sign.lt(0)] = etaminus + sign[sign.eq(0)] = 1 + + # update stepsizes with step size updates + step_size.mul_(sign).clamp_(step_size_min, step_size_max) + + # for dir<0, dfdx=0 + # for dir>=0 dfdx=dfdx + grad = grad.clone() + grad[sign.eq(etaminus)] = 0 + + # update parameters + p.data.addcmul_(-1, grad.sign(), step_size) + + state['prev'].copy_(grad) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/sgd.html b/docs/stable/_modules/torch/optim/sgd.html new file mode 100644 index 000000000000..5aef0b8eb97c --- /dev/null +++ b/docs/stable/_modules/torch/optim/sgd.html @@ -0,0 +1,622 @@ + + + + + + + + + + + + torch.optim.sgd — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.sgd

    +import torch
    +from .optimizer import Optimizer, required
    +
    +
    +
    [docs]class SGD(Optimizer): + r"""Implements stochastic gradient descent (optionally with momentum). + + Nesterov momentum is based on the formula from + `On the importance of initialization and momentum in deep learning`__. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float): learning rate + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + + __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf + + .. note:: + The implementation of SGD with Momentum/Nesterov subtly differs from + Sutskever et. al. and implementations in some other frameworks. + + Considering the specific case of Momentum, the update can be written as + + .. math:: + v = \rho * v + g \\ + p = p - lr * v + + where p, g, v and :math:`\rho` denote the parameters, gradient, + velocity, and momentum respectively. + + This is in contrast to Sutskever et. al. and + other frameworks which employ an update of the form + + .. math:: + v = \rho * v + lr * g \\ + p = p - v + + The Nesterov version is analogously modified. + """ + + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if weight_decay < 0.0: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + + defaults = dict(lr=lr, momentum=momentum, dampening=dampening, + weight_decay=weight_decay, nesterov=nesterov) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + super(SGD, self).__init__(params, defaults) + + def __setstate__(self, state): + super(SGD, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('nesterov', False) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + d_p = p.grad.data + if weight_decay != 0: + d_p.add_(weight_decay, p.data) + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(d_p).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(1 - dampening, d_p) + if nesterov: + d_p = d_p.add(momentum, buf) + else: + d_p = buf + + p.data.add_(-group['lr'], d_p) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/optim/sparse_adam.html b/docs/stable/_modules/torch/optim/sparse_adam.html new file mode 100644 index 000000000000..427b3495a43c --- /dev/null +++ b/docs/stable/_modules/torch/optim/sparse_adam.html @@ -0,0 +1,618 @@ + + + + + + + + + + + + torch.optim.sparse_adam — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.optim.sparse_adam

    +import math
    +import torch
    +from .optimizer import Optimizer
    +
    +
    +
    [docs]class SparseAdam(Optimizer): + r"""Implements lazy version of Adam algorithm suitable for sparse tensors. + + In this variant, only moments that show up in the gradient get updated, and + only those portions of the gradient get applied to the parameters. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8): + if not 0.0 < lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 < eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps) + super(SparseAdam, self).__init__(params, defaults) + +
    [docs] def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if not grad.is_sparse: + raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + state['step'] += 1 + + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + size = grad.size() + + def make_sparse(values): + constructor = grad.new + if grad_indices.dim() == 0 or values.dim() == 0: + return constructor().resize_as_(grad) + return constructor(grad_indices, values, size) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + # Decay the first and second moment running average coefficient + # old <- b * old + (1 - b) * new + # <==> old += (1 - b) * (new - old) + old_exp_avg_values = exp_avg.sparse_mask(grad)._values() + exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1) + exp_avg.add_(make_sparse(exp_avg_update_values)) + old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values() + exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2) + exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) + + # Dense addition again is intended, avoiding another sparse_mask + numer = exp_avg_update_values.add_(old_exp_avg_values) + exp_avg_sq_update_values.add_(old_exp_avg_sq_values) + denom = exp_avg_sq_update_values.sqrt_().add_(group['eps']) + del exp_avg_update_values, exp_avg_sq_update_values + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 + + p.data.add_(make_sparse(-step_size * numer.div_(denom))) + + return loss
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/quasirandom.html b/docs/stable/_modules/torch/quasirandom.html new file mode 100644 index 000000000000..0677b501a31a --- /dev/null +++ b/docs/stable/_modules/torch/quasirandom.html @@ -0,0 +1,636 @@ + + + + + + + + + + + + torch.quasirandom — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.quasirandom

    +import torch
    +
    +
    +
    [docs]class SobolEngine(object): + r""" + The :class:`torch.quasirandom.SobolEngine` is an engine for generating + (scrambled) Sobol sequences. Sobol sequences are an example of low + discrepancy quasi-random sequences. + + This implementation of an engine for Sobol sequences is capable of + sampling sequences up to a maximum dimension of 1111. It uses direction + numbers to generate these sequences, and these numbers have been adapted + from `here <http://web.maths.unsw.edu.au/~fkuo/sobol/joe-kuo-old.1111>`_. + + References: + - Art B. Owen. Scrambling Sobol and Niederreiter-Xing points. + Journal of Complexity, 14(4):466-489, December 1998. + + - I. M. Sobol. The distribution of points in a cube and the accurate + evaluation of integrals. + Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967. + + Args: + dimension (Int): The dimensionality of the sequence to be drawn + scramble (bool, optional): Setting this to ``True`` will produce + scrambled Sobol sequences. Scrambling is + capable of producing better Sobol + sequences. Default: ``False``. + seed (Int, optional): This is the seed for the scrambling. The seed + of the random number generator is set to this, + if specified. Default: ``None`` + + Examples:: + + >>> soboleng = torch.quasirandom.SobolEngine(dimension=5) + >>> soboleng.draw(3) + tensor([[0.5000, 0.5000, 0.5000, 0.5000, 0.5000], + [0.7500, 0.2500, 0.7500, 0.2500, 0.7500], + [0.2500, 0.7500, 0.2500, 0.7500, 0.2500]]) + """ + MAXBIT = 30 + MAXDIM = 1111 + + def __init__(self, dimension, scramble=False, seed=None): + if dimension > self.MAXDIM or dimension < 1: + raise ValueError("Supported range of dimensionality " + "for SobolEngine is [1, {}]".format(self.MAXDIM)) + + self.seed = seed + self.scramble = scramble + self.dimension = dimension + + self.sobolstate = torch.zeros(dimension, self.MAXBIT, dtype=torch.long) + torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension) + + if self.scramble: + g = torch.Generator() + if self.seed is not None: + g.manual_seed(self.seed) + + self.shift = torch.mv(torch.randint(2, (self.dimension, self.MAXBIT), generator=g), + torch.pow(2, torch.arange(0, self.MAXBIT))) + + ltm = torch.randint(2, (self.dimension, self.MAXBIT, self.MAXBIT), generator=g).tril() + + torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension) + else: + self.shift = torch.zeros(self.dimension, dtype=torch.long) + + self.quasi = self.shift.clone() + self.num_generated = 0 + +
    [docs] def draw(self, n=1, out=None, dtype=torch.float32): + r""" + Function to draw a sequence of :attr:`n` points from a Sobol sequence. + Note that the samples are dependent on the previous samples. The size + of the result is :math:`(n, dimension)`. + + Args: + n (Int, optional): The length of sequence of points to draw. + Default: 1 + out (Tensor, optional): The output tensor + dtype (:class:`torch.dtype`, optional): the desired data type of the + returned tensor. + Default: ``torch.float32`` + """ + result, self.quasi = torch._sobol_engine_draw(self.quasi, n, self.sobolstate, + self.dimension, self.num_generated, dtype=dtype) + self.num_generated += n + if out is not None: + out.resize_as_(result).copy_(result) + return out + return result
    + +
    [docs] def reset(self): + r""" + Function to reset the ``SobolEngine`` to base state. + """ + self.quasi.copy_(self.shift) + self.num_generated = 0 + return self
    + +
    [docs] def fast_forward(self, n): + r""" + Function to fast-forward the state of the ``SobolEngine`` by + :attr:`n` steps. This is equivalent to drawing :attr:`n` samples + without using the samples. + + Args: + n (Int): The number of steps to fast-forward by. + """ + torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated) + self.num_generated += n + return self
    + + def __repr__(self): + fmt_string = ['dimension={}'.format(self.dimension)] + if self.scramble: + fmt_string += ['scramble=True'] + if self.seed is not None: + fmt_string += ['seed={}'.format(self.seed)] + return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')'
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/random.html b/docs/stable/_modules/torch/random.html new file mode 100644 index 000000000000..07aa39991713 --- /dev/null +++ b/docs/stable/_modules/torch/random.html @@ -0,0 +1,635 @@ + + + + + + + + + + + + torch.random — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.random

    +import contextlib
    +import warnings
    +
    +from torch._C import default_generator
    +
    +
    +
    [docs]def set_rng_state(new_state): + r"""Sets the random number generator state. + + Args: + new_state (torch.ByteTensor): The desired state + """ + default_generator.set_state(new_state)
    + + +
    [docs]def get_rng_state(): + r"""Returns the random number generator state as a `torch.ByteTensor`.""" + return default_generator.get_state()
    + + +
    [docs]def manual_seed(seed): + r"""Sets the seed for generating random numbers. Returns a + `torch.Generator` object. + + Args: + seed (int): The desired seed. + """ + seed = int(seed) + import torch.cuda + + if not torch.cuda._in_bad_fork: + torch.cuda.manual_seed_all(seed) + + return default_generator.manual_seed(seed)
    + + +
    [docs]def seed(): + r"""Sets the seed for generating random numbers to a non-deterministic + random number. Returns a 64 bit number used to seed the RNG. + """ + seed = default_generator.seed() + import torch.cuda + + if not torch.cuda._in_bad_fork: + torch.cuda.manual_seed_all(seed) + + return seed
    + + +
    [docs]def initial_seed(): + r"""Returns the initial seed for generating random numbers as a + Python `long`. + """ + return default_generator.initial_seed()
    + + +_fork_rng_warned_already = False + + +@contextlib.contextmanager +def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices"): + """ + Forks the RNG, so that when you return, the RNG is reset + to the state that it was previously in. + + Arguments: + devices (iterable of CUDA IDs): CUDA devices for which to fork + the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates + on all devices, but will emit a warning if your machine has a lot + of devices, since this function will run very slowly in that case. + If you explicitly specify devices, this warning will be suppressed + enabled (bool): if ``False``, the RNG is not forked. This is a convenience + argument for easily disabling the context manager without having + to delete it and unindent your Python code under it. + """ + + import torch.cuda + global _fork_rng_warned_already + + # Internal arguments: + # _caller: the function which called fork_rng, which the user used + # _devices_kw: the devices keyword of _caller + + if not enabled: + yield + return + + if devices is None: + num_devices = torch.cuda.device_count() + if num_devices > 1 and not _fork_rng_warned_already: + warnings.warn( + ("CUDA reports that you have {num_devices} available devices, and you " + "have used {caller} without explicitly specifying which devices are being used. " + "For safety, we initialize *every* CUDA device by default, which " + "can be quite slow if you have a lot of GPUs. If you know that you are only " + "making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES " + "or the '{devices_kw}' keyword argument of {caller} with the set of devices " + "you are actually using. For example, if you are using CPU only, " + "set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using " + "GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0]. To initialize " + "all devices and suppress this warning, set the '{devices_kw}' keyword argument " + "to `range(torch.cuda.device_count())`." + ).format(num_devices=num_devices, caller=_caller, devices_kw=_devices_kw)) + _fork_rng_warned_already = True + devices = list(range(num_devices)) + else: + # Protect against user passing us a generator; we need to traverse this + # multiple times but a generator will be exhausted upon first traversal + devices = list(devices) + + cpu_rng_state = torch.get_rng_state() + gpu_rng_states = [] + for device in devices: + gpu_rng_states.append(torch.cuda.get_rng_state(device)) + + try: + yield + finally: + torch.set_rng_state(cpu_rng_state) + for device, gpu_rng_state in zip(devices, gpu_rng_states): + torch.cuda.set_rng_state(gpu_rng_state, device) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/serialization.html b/docs/stable/_modules/torch/serialization.html new file mode 100644 index 000000000000..a186d363e20d --- /dev/null +++ b/docs/stable/_modules/torch/serialization.html @@ -0,0 +1,1098 @@ + + + + + + + + + + + + torch.serialization — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.serialization

    +import difflib
    +import inspect
    +import os
    +import io
    +import shutil
    +import struct
    +import sys
    +import torch
    +import tarfile
    +import zipfile
    +import tempfile
    +import warnings
    +from contextlib import closing, contextmanager
    +from ._utils import _import_dotted_name
    +from ._six import string_classes as _string_classes
    +if sys.version_info[0] == 2:
    +    import cPickle as pickle
    +else:
    +    import pickle
    +    import pathlib
    +
    +DEFAULT_PROTOCOL = 2
    +
    +LONG_SIZE = struct.Struct('=l').size
    +INT_SIZE = struct.Struct('=i').size
    +SHORT_SIZE = struct.Struct('=h').size
    +
    +MAGIC_NUMBER = 0x1950a86a20f9469cfc6c
    +PROTOCOL_VERSION = 1001
    +STORAGE_KEY_SEPARATOR = ','
    +
    +
    +class SourceChangeWarning(Warning):
    +    pass
    +
    +
    +@contextmanager
    +def mkdtemp():
    +    path = tempfile.mkdtemp()
    +    yield path
    +    shutil.rmtree(path)
    +
    +
    +_package_registry = []
    +
    +
    +def register_package(priority, tagger, deserializer):
    +    queue_elem = (priority, tagger, deserializer)
    +    _package_registry.append(queue_elem)
    +    _package_registry.sort()
    +
    +
    +def _cpu_tag(obj):
    +    if type(obj).__module__ == 'torch':
    +        return 'cpu'
    +
    +
    +def _cuda_tag(obj):
    +    if type(obj).__module__ == 'torch.cuda':
    +        return 'cuda:' + str(obj.get_device())
    +
    +
    +def _cpu_deserialize(obj, location):
    +    if location == 'cpu':
    +        return obj
    +
    +
    +def validate_cuda_device(location):
    +    if isinstance(location, torch.device):
    +        location = str(location)
    +    if not isinstance(location, _string_classes):
    +        raise ValueError("location should be a string or torch.device")
    +    if location[5:] == '':
    +        device = 0
    +    else:
    +        device = max(int(location[5:]), 0)
    +
    +    if not torch.cuda.is_available():
    +        raise RuntimeError('Attempting to deserialize object on a CUDA '
    +                           'device but torch.cuda.is_available() is False. '
    +                           'If you are running on a CPU-only machine, '
    +                           'please use torch.load with map_location=torch.device(\'cpu\') '
    +                           'to map your storages to the CPU.')
    +    if device >= torch.cuda.device_count():
    +        raise RuntimeError('Attempting to deserialize object on CUDA device '
    +                           '{} but torch.cuda.device_count() is {}. Please use '
    +                           'torch.load with map_location to map your storages '
    +                           'to an existing device.'.format(
    +                               device, torch.cuda.device_count()))
    +    return device
    +
    +
    +def _cuda_deserialize(obj, location):
    +    if location.startswith('cuda'):
    +        device = validate_cuda_device(location)
    +        if getattr(obj, "_torch_load_uninitialized", False):
    +            storage_type = getattr(torch.cuda, type(obj).__name__)
    +            with torch.cuda.device(device):
    +                return storage_type(obj.size())
    +        else:
    +            return obj.cuda(device)
    +
    +
    +register_package(10, _cpu_tag, _cpu_deserialize)
    +register_package(20, _cuda_tag, _cuda_deserialize)
    +
    +
    +def location_tag(storage):
    +    for _, tagger, _ in _package_registry:
    +        location = tagger(storage)
    +        if location:
    +            return location
    +    raise RuntimeError("don't know how to determine data location of " +
    +                       torch.typename(storage))
    +
    +
    +def default_restore_location(storage, location):
    +    for _, _, fn in _package_registry:
    +        result = fn(storage, location)
    +        if result is not None:
    +            return result
    +    raise RuntimeError("don't know how to restore data location of " +
    +                       torch.typename(storage) + " (tagged with " +
    +                       location + ")")
    +
    +
    +def normalize_storage_type(storage_type):
    +    return getattr(torch, storage_type.__name__)
    +
    +
    +def storage_to_tensor_type(storage):
    +    storage_type = type(storage)
    +    module = _import_dotted_name(storage_type.__module__)
    +    return getattr(module, storage_type.__name__.replace('Storage', 'Tensor'))
    +
    +
    +def _with_file_like(f, mode, body):
    +    """
    +    Executes a body function with a file object for f, opening
    +    it in 'mode' if it is a string filename.
    +    """
    +    new_fd = False
    +    if isinstance(f, str) or \
    +            (sys.version_info[0] == 2 and isinstance(f, unicode)) or \
    +            (sys.version_info[0] == 3 and isinstance(f, pathlib.Path)):
    +        new_fd = True
    +        f = open(f, mode)
    +    try:
    +        return body(f)
    +    finally:
    +        if new_fd:
    +            f.close()
    +
    +
    +def _is_compressed_file(f):
    +    compress_modules = ['gzip']
    +    try:
    +        return f.__module__ in compress_modules
    +    except AttributeError:
    +        return False
    +
    +
    +def _should_read_directly(f):
    +    """
    +    Checks if f is a file that should be read directly. It should be read
    +    directly if it is backed by a real file (has a fileno) and is not a
    +    a compressed file (e.g. gzip)
    +    """
    +    if _is_compressed_file(f):
    +        return False
    +    try:
    +        return f.fileno() >= 0
    +    except io.UnsupportedOperation:
    +        return False
    +    except AttributeError:
    +        return False
    +
    +
    +def _check_seekable(f):
    +
    +    def raise_err_msg(patterns, e):
    +        for p in patterns:
    +            if p in str(e):
    +                msg = (str(e) + ". You can only torch.load from a file that is seekable." +
    +                                " Please pre-load the data into a buffer like io.BytesIO and" +
    +                                " try to load from it instead.")
    +                raise type(e)(msg)
    +        raise e
    +
    +    try:
    +        f.seek(f.tell())
    +        return True
    +    except (io.UnsupportedOperation, AttributeError) as e:
    +        raise_err_msg(["seek", "tell"], e)
    +
    +
    +
    [docs]def save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL): + """Saves an object to a disk file. + + See also: :ref:`recommend-saving-models` + + Args: + obj: saved object + f: a file-like object (has to implement write and flush) or a string + containing a file name + pickle_module: module used for pickling metadata and objects + pickle_protocol: can be specified to override the default protocol + + .. warning:: + If you are using Python 2, :func:`torch.save` does NOT support :class:`StringIO.StringIO` + as a valid file-like object. This is because the write method should return + the number of bytes written; :meth:`StringIO.write()` does not do this. + + Please use something like :class:`io.BytesIO` instead. + + Example: + >>> # Save to file + >>> x = torch.tensor([0, 1, 2, 3, 4]) + >>> torch.save(x, 'tensor.pt') + >>> # Save to io.BytesIO buffer + >>> buffer = io.BytesIO() + >>> torch.save(x, buffer) + """ + return _with_file_like(f, "wb", lambda f: _save(obj, f, pickle_module, pickle_protocol))
    + + +def _save(obj, f, pickle_module, pickle_protocol): + if sys.version_info[0] == 2: + import StringIO + if isinstance(f, StringIO.StringIO): + msg = ('torch.save received unsupported StringIO.StringIO file object, whose ' + 'write method does not return the number of bytes written. ' + 'Please use something like io.BytesIO for torch.save instead.') + raise RuntimeError(msg) + + import torch.nn as nn + serialized_container_types = {} + serialized_storages = {} + + def persistent_id(obj): + # FIXME: the docs say that persistent_id should only return a string + # but torch store returns tuples. This works only in the binary protocol + # see + # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects + # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 + if isinstance(obj, type) and issubclass(obj, nn.Module): + if obj in serialized_container_types: + return None + serialized_container_types[obj] = True + source_file = source = None + try: + source_file = inspect.getsourcefile(obj) + source = inspect.getsource(obj) + except Exception: # saving the source is optional, so we can ignore any errors + warnings.warn("Couldn't retrieve source code for container of " + "type " + obj.__name__ + ". It won't be checked " + "for correctness upon loading.") + return ('module', obj, source_file, source) + elif torch.is_storage(obj): + storage_type = normalize_storage_type(type(obj)) + # Offset is always 0, but we keep it for backwards compatibility + # with the old serialization format (which supported storage views) + offset = 0 + obj_key = str(obj._cdata) + location = location_tag(obj) + serialized_storages[obj_key] = obj + is_view = obj._cdata != obj._cdata + if is_view: + view_metadata = (str(obj._cdata), offset, obj.size()) + else: + view_metadata = None + + return ('storage', + storage_type, + obj_key, + location, + obj.size(), + view_metadata) + return None + + sys_info = dict( + protocol_version=PROTOCOL_VERSION, + little_endian=sys.byteorder == 'little', + type_sizes=dict( + short=SHORT_SIZE, + int=INT_SIZE, + long=LONG_SIZE, + ), + ) + + pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol) + pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol) + pickle_module.dump(sys_info, f, protocol=pickle_protocol) + pickler = pickle_module.Pickler(f, protocol=pickle_protocol) + pickler.persistent_id = persistent_id + pickler.dump(obj) + + serialized_storage_keys = sorted(serialized_storages.keys()) + pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol) + f.flush() + for key in serialized_storage_keys: + serialized_storages[key]._write_file(f, _should_read_directly(f)) + + +
    [docs]def load(f, map_location=None, pickle_module=pickle, **pickle_load_args): + """Loads an object saved with :func:`torch.save` from a file. + + :func:`torch.load` uses Python's unpickling facilities but treats storages, + which underlie tensors, specially. They are first deserialized on the + CPU and are then moved to the device they were saved from. If this fails + (e.g. because the run time system doesn't have certain devices), an exception + is raised. However, storages can be dynamically remapped to an alternative + set of devices using the :attr:`map_location` argument. + + If :attr:`map_location` is a callable, it will be called once for each serialized + storage with two arguments: storage and location. The storage argument + will be the initial deserialization of the storage, residing on the CPU. + Each serialized storage has a location tag associated with it which + identifies the device it was saved from, and this tag is the second + argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'`` + for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors. + :attr:`map_location` should return either ``None`` or a storage. If + :attr:`map_location` returns a storage, it will be used as the final deserialized + object, already moved to the right device. Otherwise, :func:`torch.load` will + fall back to the default behavior, as if :attr:`map_location` wasn't specified. + + If :attr:`map_location` is a :class:`torch.device` object or a string contraining + a device tag, it indicates the location where all tensors should be loaded. + + Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags + appearing in the file (keys), to ones that specify where to put the + storages (values). + + User extensions can register their own location tags and tagging and + deserialization methods using :func:`torch.serialization.register_package`. + + Args: + f: a file-like object (has to implement :meth:`read`, :meth`readline`, :meth`tell`, and :meth`seek`), + or a string containing a file name + map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage + locations + pickle_module: module used for unpickling metadata and objects (has to + match the :attr:`pickle_module` used to serialize file) + pickle_load_args: optional keyword arguments passed over to + :func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g., + :attr:`encoding=...`. + + .. note:: + When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors + will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')`` + and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint. + + .. note:: + In Python 3, when loading files saved by Python 2, you may encounter + ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...``. This is + caused by the difference of handling in byte strings in Python2 and + Python 3. You may use extra :attr:`encoding` keyword argument to specify how + these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them + to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them + as byte arrays which can be decoded later with ``byte_array.decode(...)``. + + Example: + >>> torch.load('tensors.pt') + # Load all tensors onto the CPU + >>> torch.load('tensors.pt', map_location=torch.device('cpu')) + # Load all tensors onto the CPU, using a function + >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage) + # Load all tensors onto GPU 1 + >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1)) + # Map tensors from GPU 1 to GPU 0 + >>> torch.load('tensors.pt', map_location={'cuda:1':'cuda:0'}) + # Load tensor from io.BytesIO object + >>> with open('tensor.pt', 'rb') as f: + buffer = io.BytesIO(f.read()) + >>> torch.load(buffer) + """ + new_fd = False + if isinstance(f, str) or \ + (sys.version_info[0] == 2 and isinstance(f, unicode)): + new_fd = True + f = open(f, 'rb') + elif (sys.version_info[0] == 3 and isinstance(f, pathlib.Path)): + new_fd = True + f = f.open('rb') + try: + return _load(f, map_location, pickle_module, **pickle_load_args) + finally: + if new_fd: + f.close()
    + + +def _load(f, map_location, pickle_module, **pickle_load_args): + deserialized_objects = {} + + if map_location is None: + restore_location = default_restore_location + elif isinstance(map_location, dict): + def restore_location(storage, location): + location = map_location.get(location, location) + return default_restore_location(storage, location) + elif isinstance(map_location, _string_classes): + def restore_location(storage, location): + return default_restore_location(storage, map_location) + elif isinstance(map_location, torch.device): + def restore_location(storage, location): + return default_restore_location(storage, str(map_location)) + else: + def restore_location(storage, location): + result = map_location(storage, location) + if result is None: + result = default_restore_location(storage, location) + return result + + def _check_container_source(container_type, source_file, original_source): + try: + current_source = inspect.getsource(container_type) + except Exception: # saving the source is optional, so we can ignore any errors + warnings.warn("Couldn't retrieve source code for container of " + "type " + container_type.__name__ + ". It won't be checked " + "for correctness upon loading.") + return + if original_source != current_source: + if container_type.dump_patches: + file_name = container_type.__name__ + '.patch' + diff = difflib.unified_diff(current_source.split('\n'), + original_source.split('\n'), + source_file, + source_file, lineterm="") + lines = '\n'.join(diff) + try: + with open(file_name, 'a+') as f: + file_size = f.seek(0, 2) + f.seek(0) + if file_size == 0: + f.write(lines) + elif file_size != len(lines) or f.read() != lines: + raise IOError + msg = ("Saved a reverse patch to " + file_name + ". " + "Run `patch -p0 < " + file_name + "` to revert your " + "changes.") + except IOError: + msg = ("Tried to save a patch, but couldn't create a " + "writable file " + file_name + ". Make sure it " + "doesn't exist and your working directory is " + "writable.") + else: + msg = ("you can retrieve the original source code by " + "accessing the object's source attribute or set " + "`torch.nn.Module.dump_patches = True` and use the " + "patch tool to revert the changes.") + msg = ("source code of class '{}' has changed. {}" + .format(torch.typename(container_type), msg)) + warnings.warn(msg, SourceChangeWarning) + + def legacy_load(f): + deserialized_objects = {} + + def persistent_load(saved_id): + if isinstance(saved_id, tuple): + # Ignore containers that don't have any sources saved + if all(saved_id[1:]): + _check_container_source(*saved_id) + return saved_id[0] + return deserialized_objects[int(saved_id)] + + with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \ + mkdtemp() as tmpdir: + + tar.extract('storages', path=tmpdir) + with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f: + num_storages = pickle_module.load(f, **pickle_load_args) + for i in range(num_storages): + args = pickle_module.load(f, **pickle_load_args) + key, location, storage_type = args + obj = storage_type._new_with_file(f) + obj = restore_location(obj, location) + deserialized_objects[key] = obj + + storage_views = pickle_module.load(f, **pickle_load_args) + for target_cdata, root_cdata, offset, size in storage_views: + root = deserialized_objects[root_cdata] + deserialized_objects[target_cdata] = root[offset:offset + size] + + tar.extract('tensors', path=tmpdir) + with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f: + num_tensors = pickle_module.load(f, **pickle_load_args) + for _ in range(num_tensors): + args = pickle_module.load(f, **pickle_load_args) + key, storage_id, original_tensor_type = args + storage = deserialized_objects[storage_id] + tensor_type = storage_to_tensor_type(storage) + ndim, = struct.unpack('<i', f.read(4)) + # skip next 4 bytes; legacy encoding treated ndim as 8 bytes + f.read(4) + size = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim)) + stride = struct.unpack('<{}q'.format(ndim), f.read(8 * ndim)) + storage_offset, = struct.unpack('<q', f.read(8)) + tensor = tensor_type().set_(storage, storage_offset, size, stride) + deserialized_objects[key] = tensor + + pickle_file = tar.extractfile('pickle') + unpickler = pickle_module.Unpickler(pickle_file, **pickle_load_args) + unpickler.persistent_load = persistent_load + result = unpickler.load() + return result + + deserialized_objects = {} + + def maybe_decode_ascii(bytes_str): + # When using encoding='bytes' in Py3, some **internal** keys stored as + # strings in Py2 are loaded as bytes. This function decodes them with + # ascii encoding, one that Py3 uses by default. + # + # NOTE: This should only be used on internal keys (e.g., `typename` and + # `location` in `persistent_load` below! + if isinstance(bytes_str, bytes): + return bytes_str.decode('ascii') + return bytes_str + + def persistent_load(saved_id): + assert isinstance(saved_id, tuple) + typename = maybe_decode_ascii(saved_id[0]) + data = saved_id[1:] + + if typename == 'module': + # Ignore containers that don't have any sources saved + if all(data[1:]): + _check_container_source(*data) + return data[0] + elif typename == 'storage': + data_type, root_key, location, size, view_metadata = data + location = maybe_decode_ascii(location) + if root_key not in deserialized_objects: + obj = data_type(size) + obj._torch_load_uninitialized = True + deserialized_objects[root_key] = restore_location(obj, location) + storage = deserialized_objects[root_key] + if view_metadata is not None: + view_key, offset, view_size = view_metadata + if view_key not in deserialized_objects: + deserialized_objects[view_key] = storage[offset:offset + view_size] + return deserialized_objects[view_key] + else: + return storage + else: + raise RuntimeError("Unknown saved id type: %s" % saved_id[0]) + + _check_seekable(f) + f_should_read_directly = _should_read_directly(f) + + if f_should_read_directly and f.tell() == 0: + # legacy_load requires that f has fileno() + # only if offset is zero we can attempt the legacy tar file loader + try: + return legacy_load(f) + except tarfile.TarError: + if zipfile.is_zipfile(f): + # .zip is used for torch.jit.save and will throw an un-pickling error here + raise RuntimeError("{} is a zip archive (did you mean to use torch.jit.load()?)".format(f.name)) + # if not a tarfile, reset file offset and proceed + f.seek(0) + + magic_number = pickle_module.load(f, **pickle_load_args) + if magic_number != MAGIC_NUMBER: + raise RuntimeError("Invalid magic number; corrupt file?") + protocol_version = pickle_module.load(f, **pickle_load_args) + if protocol_version != PROTOCOL_VERSION: + raise RuntimeError("Invalid protocol version: %s" % protocol_version) + + _sys_info = pickle_module.load(f, **pickle_load_args) + unpickler = pickle_module.Unpickler(f, **pickle_load_args) + unpickler.persistent_load = persistent_load + result = unpickler.load() + + deserialized_storage_keys = pickle_module.load(f, **pickle_load_args) + + offset = f.tell() if f_should_read_directly else None + for key in deserialized_storage_keys: + assert key in deserialized_objects + deserialized_objects[key]._set_from_file(f, offset, f_should_read_directly) + if offset is not None: + offset = f.tell() + + return result +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/sparse.html b/docs/stable/_modules/torch/sparse.html new file mode 100644 index 000000000000..a314ada0d3ff --- /dev/null +++ b/docs/stable/_modules/torch/sparse.html @@ -0,0 +1,648 @@ + + + + + + + + + + + + torch.sparse — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.sparse

    +# The Tensor classes are added to this module by python_tensor.cpp
    +import torch
    +
    +__all__ = [
    +    'addmm',
    +    'mm',
    +    'sum',
    +]
    +
    +
    +
    [docs]def addmm(mat, mat1, mat2, beta=1, alpha=1): + r""" + This function does exact same thing as :func:`torch.addmm` in the forward, + except that it supports backward for sparse matrix :attr:`mat1`. :attr:`mat1` + need to have `sparse_dim = 2`. Note that the gradients of :attr:`mat1` is a + coalesced sparse tensor. + + Args: + mat (Tensor): a dense matrix to be added + mat1 (SparseTensor): a sparse matrix to be multiplied + mat2 (Tensor): a dense matrix be multiplied + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + """ + return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
    + + +
    [docs]def mm(mat1, mat2): + r""" + Performs a matrix multiplication of the sparse matrix :attr:`mat1` + and dense matrix :attr:`mat2`. Similar to :func:`torch.mm`, If :attr:`mat1` is a + :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a + :math:`(n \times p)` dense tensor. :attr:`mat1` need to have `sparse_dim = 2`. + This function also supports backward for both matrices. Note that the gradients of + :attr:`mat1` is a coalesced sparse tensor. + + Args: + mat1 (SparseTensor): the first sparse matrix to be multiplied + mat2 (Tensor): the second dense matrix to be multiplied + + Example:: + + >>> a = torch.randn(2, 3).to_sparse().requires_grad_(True) + >>> a + tensor(indices=tensor([[0, 0, 0, 1, 1, 1], + [0, 1, 2, 0, 1, 2]]), + values=tensor([ 1.5901, 0.0183, -0.6146, 1.8061, -0.0112, 0.6302]), + size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True) + + >>> b = torch.randn(3, 2, requires_grad=True) + >>> b + tensor([[-0.6479, 0.7874], + [-1.2056, 0.5641], + [-1.1716, -0.9923]], requires_grad=True) + + >>> y = torch.sparse.mm(a, b) + >>> y + tensor([[-0.3323, 1.8723], + [-1.8951, 0.7904]], grad_fn=<SparseAddmmBackward>) + >>> y.sum().backward() + >>> a.grad + tensor(indices=tensor([[0, 0, 0, 1, 1, 1], + [0, 1, 2, 0, 1, 2]]), + values=tensor([ 0.1394, -0.6415, -2.1639, 0.1394, -0.6415, -2.1639]), + size=(2, 3), nnz=6, layout=torch.sparse_coo) + """ + return torch._sparse_mm(mat1, mat2)
    + + +
    [docs]def sum(input, dim=None, dtype=None): + r""" + Returns the sum of each row of SparseTensor :attr:`input` in the given + dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. When sum over all ``sparse_dim``, this method + returns a Tensor instead of SparseTensor. + + All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output + tensor having :attr:`dim` fewer dimensions than :attr:`input`. + + During backward, only gradients at ``nnz`` locations of :attr:`input` + will propagate back. Note that the gradients of :attr:`input` is coalesced. + + Args: + input (Tensor): the input SparseTensor + dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce + over all dims. + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: dtype of :attr:`input`. + + Example:: + + >>> nnz = 3 + >>> dims = [5, 5, 2, 3] + >>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)), + torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz) + >>> V = torch.randn(nnz, dims[2], dims[3]) + >>> size = torch.Size(dims) + >>> S = torch.sparse_coo_tensor(I, V, size) + >>> S + tensor(indices=tensor([[2, 0, 3], + [2, 4, 1]]), + values=tensor([[[-0.6438, -1.6467, 1.4004], + [ 0.3411, 0.0918, -0.2312]], + + [[ 0.5348, 0.0634, -2.0494], + [-0.7125, -1.0646, 2.1844]], + + [[ 0.1276, 0.1874, -0.6334], + [-1.9682, -0.5340, 0.7483]]]), + size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo) + + # when sum over only part of sparse_dims, return a SparseTensor + >>> torch.sparse.sum(S, [1, 3]) + tensor(indices=tensor([[0, 2, 3]]), + values=tensor([[-1.4512, 0.4073], + [-0.8901, 0.2017], + [-0.3183, -1.7539]]), + size=(5, 2), nnz=3, layout=torch.sparse_coo) + + # when sum over all sparse dim, return a dense Tensor + # with summed dims squeezed + >>> torch.sparse.sum(S, [0, 1, 3]) + tensor([-2.6596, -1.1450]) + """ + if dtype is None: + if dim is not None: + return torch._sparse_sum(input, dim) + else: + return torch._sparse_sum(input) + else: + if dim is not None: + return torch._sparse_sum(input, dim, dtype=dtype) + else: + return torch._sparse_sum(input, dtype=dtype)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/storage.html b/docs/stable/_modules/torch/storage.html new file mode 100644 index 000000000000..46709ebfa72b --- /dev/null +++ b/docs/stable/_modules/torch/storage.html @@ -0,0 +1,652 @@ + + + + + + + + + + + + torch.storage — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.storage

    +import io
    +
    +import torch
    +from ._utils import _type, _cuda
    +
    +
    +class _StorageBase(object):
    +    is_cuda = False
    +    is_sparse = False
    +
    +    def __str__(self):
    +        content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
    +        return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
    +
    +    def __repr__(self):
    +        return str(self)
    +
    +    def __iter__(self):
    +        return iter(map(lambda i: self[i], range(self.size())))
    +
    +    def __copy__(self):
    +        return self.clone()
    +
    +    def __deepcopy__(self, memo):
    +        memo = memo.setdefault('torch', {})
    +        if self._cdata in memo:
    +            return memo[self._cdata]
    +        new_storage = self.clone()
    +        memo[self._cdata] = new_storage
    +        return new_storage
    +
    +    def __reduce__(self):
    +        b = io.BytesIO()
    +        torch.save(self, b)
    +        return (_load_from_bytes, (b.getvalue(),))
    +
    +    def __sizeof__(self):
    +        return super(_StorageBase, self).__sizeof__() + self.element_size() * self.size()
    +
    +    def clone(self):
    +        """Returns a copy of this storage"""
    +        device = self.get_device() if self.is_cuda else -1
    +        with torch.cuda.device(device):
    +            return type(self)(self.size()).copy_(self)
    +
    +    def tolist(self):
    +        """Returns a list containing the elements of this storage"""
    +        return [v for v in self]
    +
    +    def cpu(self):
    +        """Returns a CPU copy of this storage if it's not already on the CPU"""
    +        return self.type(getattr(torch, self.__class__.__name__))
    +
    +    def double(self):
    +        """Casts this storage to double type"""
    +        return self.type(type(self).__module__ + '.DoubleStorage')
    +
    +    def float(self):
    +        """Casts this storage to float type"""
    +        return self.type(type(self).__module__ + '.FloatStorage')
    +
    +    def half(self):
    +        """Casts this storage to half type"""
    +        return self.type(type(self).__module__ + '.HalfStorage')
    +
    +    def long(self):
    +        """Casts this storage to long type"""
    +        return self.type(type(self).__module__ + '.LongStorage')
    +
    +    def int(self):
    +        """Casts this storage to int type"""
    +        return self.type(type(self).__module__ + '.IntStorage')
    +
    +    def short(self):
    +        """Casts this storage to short type"""
    +        return self.type(type(self).__module__ + '.ShortStorage')
    +
    +    def char(self):
    +        """Casts this storage to char type"""
    +        return self.type(type(self).__module__ + '.CharStorage')
    +
    +    def byte(self):
    +        """Casts this storage to byte type"""
    +        return self.type(type(self).__module__ + '.ByteStorage')
    +
    +    def bool(self):
    +        """Casts this storage to bool type"""
    +        return self.type(type(self).__module__ + '.BoolStorage')
    +
    +    def bfloat16(self):
    +        """Casts this storage to bfloat16 type"""
    +        return self.type(type(self).__module__ + '.BFloat16Storage')
    +
    +    def pin_memory(self):
    +        """Copies the storage to pinned memory, if it's not already pinned."""
    +        if self.is_cuda:
    +            raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
    +                            .format(self.type()))
    +        import torch.cuda
    +        allocator = torch.cuda._host_allocator()
    +        return type(self)(self.size(), allocator=allocator).copy_(self)
    +
    +    def share_memory_(self):
    +        """Moves the storage to shared memory.
    +
    +        This is a no-op for storages already in shared memory and for CUDA
    +        storages, which do not need to be moved for sharing across processes.
    +        Storages in shared memory cannot be resized.
    +
    +        Returns: self
    +        """
    +        from torch.multiprocessing import get_sharing_strategy
    +        if self.is_cuda:
    +            pass  # CUDA doesn't use POSIX shared memory
    +        elif get_sharing_strategy() == 'file_system':
    +            self._share_filename_()
    +        else:
    +            self._share_fd_()
    +        return self
    +
    +    @classmethod
    +    def _new_shared(cls, size):
    +        """Creates a new storage in shared memory with the same data type"""
    +        from torch.multiprocessing import get_sharing_strategy
    +        if cls.is_cuda:
    +            return cls(size)
    +        elif get_sharing_strategy() == 'file_system':
    +            return cls._new_using_filename(size)
    +        else:
    +            return cls._new_using_fd(size)
    +
    +
    +def _load_from_bytes(b):
    +    return torch.load(io.BytesIO(b))
    +
    +
    +_StorageBase.type = _type
    +_StorageBase.cuda = _cuda
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/tensor.html b/docs/stable/_modules/torch/tensor.html new file mode 100644 index 000000000000..4339332358a5 --- /dev/null +++ b/docs/stable/_modules/torch/tensor.html @@ -0,0 +1,1001 @@ + + + + + + + + + + + + torch.tensor — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.tensor

    +import sys
    +import torch
    +import torch._C as _C
    +from collections import OrderedDict
    +import torch.utils.hooks as hooks
    +import warnings
    +import weakref
    +from torch._six import imap
    +from torch._C import _add_docstr
    +from numbers import Number
    +
    +
    +# NB: If you subclass Tensor, and want to share the subclassed class
    +# across processes, you must also update torch/multiprocessing/reductions.py
    +# to define a ForkingPickler serialization mode for the class.
    +#
    +# NB: If you add a new method to Tensor, you must update
    +# torch/__init__.py.in to add a type annotation for your method;
    +# otherwise, it will not show up in autocomplete.
    +class Tensor(torch._C._TensorBase):
    +    def __deepcopy__(self, memo):
    +        if not self.is_leaf:
    +            raise RuntimeError("Only Tensors created explicitly by the user "
    +                               "(graph leaves) support the deepcopy protocol at the moment")
    +        if id(self) in memo:
    +            return memo[id(self)]
    +        with torch.no_grad():
    +            if self.is_sparse:
    +                new_tensor = self.clone()
    +            else:
    +                new_storage = self.storage().__deepcopy__(memo)
    +                new_tensor = self.new()
    +                new_tensor.set_(new_storage, self.storage_offset(), self.size(), self.stride())
    +            memo[id(self)] = new_tensor
    +            new_tensor.requires_grad = self.requires_grad
    +            return new_tensor
    +
    +    def __reduce_ex__(self, proto):
    +        # See Note [Don't serialize hooks]
    +        torch.utils.hooks.warn_if_has_hooks(self)
    +        if self.is_quantized:
    +            args = (self.storage(),
    +                    self.storage_offset(),
    +                    tuple(self.size()),
    +                    self.stride(),
    +                    self.q_scale(),
    +                    self.q_zero_point(),
    +                    self.requires_grad,
    +                    OrderedDict())  # TODO: self.qscheme()
    +            return (torch._utils._rebuild_qtensor, args)
    +        else:
    +            args = (self.storage(),
    +                    self.storage_offset(),
    +                    tuple(self.size()),
    +                    self.stride(),
    +                    self.requires_grad,
    +                    OrderedDict())  # previously was self._backward_hooks
    +            return (torch._utils._rebuild_tensor_v2, args)
    +
    +    def __setstate__(self, state):
    +        # Warning: this method is NOT called when you torch.load() a tensor;
    +        # that is managed by _rebuild_tensor_v2
    +        if not self.is_leaf:
    +            raise RuntimeError('__setstate__ can be only called on leaf Tensors')
    +        if len(state) == 4:
    +            # legacy serialization of Tensor
    +            self.set_(*state)
    +            return
    +        elif len(state) == 5:
    +            # legacy serialization of Variable
    +            self.data = state[0]
    +            state = (state[3], state[4], state[2])
    +        # The setting of _backward_hooks is expected to be a no-op.
    +        # See Note [Don't serialize hooks]
    +        self.requires_grad, _, self._backward_hooks = state
    +
    +    def __repr__(self):
    +        # All strings are unicode in Python 3, while we have to encode unicode
    +        # strings in Python2. If we can't, let python decide the best
    +        # characters to replace unicode characters with.
    +        if sys.version_info > (3,):
    +            return torch._tensor_str._str(self)
    +        else:
    +            if hasattr(sys.stdout, 'encoding'):
    +                return torch._tensor_str._str(self).encode(
    +                    sys.stdout.encoding or 'UTF-8', 'replace')
    +            else:
    +                return torch._tensor_str._str(self).encode('UTF-8', 'replace')
    +
    +
    [docs] def backward(self, gradient=None, retain_graph=None, create_graph=False): + r"""Computes the gradient of current tensor w.r.t. graph leaves. + + The graph is differentiated using the chain rule. If the tensor is + non-scalar (i.e. its data has more than one element) and requires + gradient, the function additionally requires specifying ``gradient``. + It should be a tensor of matching type and location, that contains + the gradient of the differentiated function w.r.t. ``self``. + + This function accumulates gradients in the leaves - you might need to + zero them before calling it. + + Arguments: + gradient (Tensor or None): Gradient w.r.t. the + tensor. If it is a tensor, it will be automatically converted + to a Tensor that does not require grad unless ``create_graph`` is True. + None values can be specified for scalar Tensors or ones that + don't require grad. If a None value would be acceptable then + this argument is optional. + retain_graph (bool, optional): If ``False``, the graph used to compute + the grads will be freed. Note that in nearly all cases setting + this option to True is not needed and often can be worked around + in a much more efficient way. Defaults to the value of + ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative + products. Defaults to ``False``. + """ + torch.autograd.backward(self, gradient, retain_graph, create_graph)
    + +
    [docs] def register_hook(self, hook): + r"""Registers a backward hook. + + The hook will be called every time a gradient with respect to the + Tensor is computed. The hook should have the following signature:: + + hook(grad) -> Tensor or None + + + The hook should not modify its argument, but it can optionally return + a new gradient which will be used in place of :attr:`grad`. + + This function returns a handle with a method ``handle.remove()`` + that removes the hook from the module. + + Example:: + + >>> v = torch.tensor([0., 0., 0.], requires_grad=True) + >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient + >>> v.backward(torch.tensor([1., 2., 3.])) + >>> v.grad + + 2 + 4 + 6 + [torch.FloatTensor of size (3,)] + + >>> h.remove() # removes the hook + """ + if not self.requires_grad: + raise RuntimeError("cannot register a hook on a tensor that " + "doesn't require gradient") + if self._backward_hooks is None: + self._backward_hooks = OrderedDict() + if self.grad_fn is not None: + self.grad_fn._register_hook_dict(self) + handle = hooks.RemovableHandle(self._backward_hooks) + self._backward_hooks[handle.id] = hook + return handle
    + + def reinforce(self, reward): + def trim(str): + return '\n'.join([line.strip() for line in str.split('\n')]) + + raise RuntimeError(trim(r"""reinforce() was removed. + Use torch.distributions instead. + See https://pytorch.org/docs/master/distributions.html + + Instead of: + + probs = policy_network(state) + action = probs.multinomial() + next_state, reward = env.step(action) + action.reinforce(reward) + action.backward() + + Use: + + probs = policy_network(state) + # NOTE: categorical is equivalent to what used to be called multinomial + m = torch.distributions.Categorical(probs) + action = m.sample() + next_state, reward = env.step(action) + loss = -m.log_prob(action) * reward + loss.backward() + """)) + + detach = _add_docstr(_C._TensorBase.detach, r""" + Returns a new Tensor, detached from the current graph. + + The result will never require gradient. + + .. note:: + + Returned Tensor shares the same storage with the original one. + In-place modifications on either of them will be seen, and may trigger + errors in correctness checks. + IMPORTANT NOTE: Previously, in-place size / stride / storage changes + (such as `resize_` / `resize_as_` / `set_` / `transpose_`) to the returned tensor + also update the original tensor. Now, these in-place changes will not update the + original tensor anymore, and will instead trigger an error. + For sparse tensors: + In-place indices / values changes (such as `zero_` / `copy_` / `add_`) to the + returned tensor will not update the original tensor anymore, and will instead + trigger an error. + """) + + detach_ = _add_docstr(_C._TensorBase.detach_, r""" + Detaches the Tensor from the graph that created it, making it a leaf. + Views cannot be detached in-place. + """) + +
    [docs] def retain_grad(self): + r"""Enables .grad attribute for non-leaf Tensors.""" + if self.grad_fn is None: # no-op for leaves + return + if not self.requires_grad: + raise RuntimeError("can't retain_grad on Tensor that has requires_grad=False") + if hasattr(self, 'retains_grad'): + return + weak_self = weakref.ref(self) + + def retain_grad_hook(grad): + var = weak_self() + if var is None: + return + if var._grad is None: + var._grad = grad.clone() + else: + var._grad = var._grad + grad + + self.register_hook(retain_grad_hook) + self.retains_grad = True
    + +
    [docs] def is_pinned(self): + r"""Returns true if this tensor resides in pinned memory""" + storage = self.storage() + return storage.is_pinned() if storage else False
    + +
    [docs] def is_shared(self): + r"""Checks if tensor is in shared memory. + + This is always ``True`` for CUDA tensors. + """ + return self.storage().is_shared()
    + +
    [docs] def share_memory_(self): + r"""Moves the underlying storage to shared memory. + + This is a no-op if the underlying storage is already in shared memory + and for CUDA tensors. Tensors in shared memory cannot be resized. + """ + self.storage().share_memory_() + return self
    + + def __reversed__(self): + r"""Reverses the tensor along dimension 0.""" + if self.dim() == 0: + return self + else: + return self.flip(0) + +
    [docs] def norm(self, p="fro", dim=None, keepdim=False, dtype=None): + r"""See :func:`torch.norm`""" + return torch.norm(self, p, dim, keepdim, dtype=dtype)
    + +
    [docs] def lu(self, pivot=True, get_infos=False): + r"""See :func:`torch.lu`""" + # If get_infos is True, then we don't need to check for errors and vice versa + LU, pivots, infos = torch._lu_with_info(self, pivot=pivot, check_errors=(not get_infos)) + if get_infos: + return LU, pivots, infos + else: + return LU, pivots
    + +
    [docs] def gels(self, A): + r"""See :func:`torch.lstsq`""" + warnings.warn("torch.gels is deprecated in favour of torch.lstsq and will be " + "removed in the next release. Please use torch.lstsq instead.", stacklevel=2) + return super(Tensor, self).lstsq(A)
    + +
    [docs] def stft(self, n_fft, hop_length=None, win_length=None, window=None, + center=True, pad_mode='reflect', normalized=False, onesided=True): + r"""See :func:`torch.stft` + + .. warning:: + This function changed signature at version 0.4.1. Calling with + the previous signature may cause error or return incorrect result. + """ + return torch.stft(self, n_fft, hop_length, win_length, window, center, + pad_mode, normalized, onesided)
    + + def resize(self, *sizes): + warnings.warn("non-inplace resize is deprecated") + from torch.autograd._functions import Resize + return Resize.apply(self, sizes) + + def resize_as(self, tensor): + warnings.warn("non-inplace resize_as is deprecated") + from torch.autograd._functions import Resize + return Resize.apply(self, tensor.size()) + +
    [docs] def split(self, split_size, dim=0): + r"""See :func:`torch.split` + """ + if isinstance(split_size, int): + return super(Tensor, self).split(split_size, dim) + else: + return super(Tensor, self).split_with_sizes(split_size, dim)
    + +
    [docs] def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None): + r"""Returns the unique elements of the input tensor. + + See :func:`torch.unique` + """ + return torch.unique(self, sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
    + +
    [docs] def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None): + r"""Eliminates all but the first element from every consecutive group of equivalent elements. + + See :func:`torch.unique_consecutive` + """ + return torch.unique_consecutive(self, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
    + + def __rsub__(self, other): + return _C._VariableFunctions.rsub(self, other) + + def __rdiv__(self, other): + if self.dtype.is_floating_point: + return self.reciprocal() * other + else: + return (self.double().reciprocal() * other).type_as(self) + + __rtruediv__ = __rdiv__ + __itruediv__ = _C._TensorBase.__idiv__ + + __pow__ = _C._TensorBase.pow + + def __format__(self, format_spec): + if self.dim() == 0: + return self.item().__format__(format_spec) + return object.__format__(self, format_spec) + + def __ipow__(self, other): + raise NotImplementedError("in-place pow not implemented") + + def __rpow__(self, other): + return self.new_tensor(other) ** self + + def __floordiv__(self, other): + result = self / other + if result.dtype.is_floating_point: + result = result.trunc() + return result + + def __rfloordiv__(self, other): + result = other / self + if result.dtype.is_floating_point: + result = result.trunc() + return result + + __neg__ = _C._TensorBase.neg + + __eq__ = _C._TensorBase.eq + __ne__ = _C._TensorBase.ne + __lt__ = _C._TensorBase.lt + __le__ = _C._TensorBase.le + __gt__ = _C._TensorBase.gt + __ge__ = _C._TensorBase.ge + __abs__ = _C._TensorBase.abs + + def __len__(self): + if self.dim() == 0: + raise TypeError("len() of a 0-d tensor") + return self.shape[0] + + def __iter__(self): + # NB: we use 'imap' and not 'map' here, so that in Python 2 we get a + # generator and don't eagerly perform all the indexes. This could + # save us work, and also helps keep trace ordering deterministic + # (e.g., if you zip(*hiddens), the eager map will force all the + # indexes of hiddens[0] before hiddens[1], while the generator + # map will interleave them.) + if self.dim() == 0: + raise TypeError('iteration over a 0-d tensor') + if torch._C._get_tracing_state(): + warnings.warn('Iterating over a tensor might cause the trace to be incorrect. ' + 'Passing a tensor of different shape won\'t change the number of ' + 'iterations executed (and might lead to errors or silently give ' + 'incorrect results).', category=RuntimeWarning) + return iter(imap(lambda i: self[i], range(self.size(0)))) + + def __hash__(self): + return id(self) + + def __dir__(self): + tensor_methods = dir(self.__class__) + tensor_methods.remove('volatile') # deprecated + attrs = list(self.__dict__.keys()) + keys = tensor_methods + attrs + + # property only available dense, cuda tensors + if (not self.is_cuda) or self.is_sparse: + keys.remove("__cuda_array_interface__") + + return sorted(keys) + + # Numpy array interface, to support `numpy.asarray(tensor) -> ndarray` + __array_priority__ = 1000 # prefer Tensor ops over numpy ones + + def __array__(self, dtype=None): + if dtype is None: + return self.numpy() + else: + return self.numpy().astype(dtype, copy=False) + + # Wrap Numpy array again in a suitable tensor when done, to support e.g. + # `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor` + def __array_wrap__(self, array): + if array.dtype == bool: + # Workaround, torch has no built-in bool tensor + array = array.astype('uint8') + return torch.from_numpy(array) + + def __contains__(self, element): + r"""Check if `element` is present in tensor + + Arguments: + element (Tensor or scalar): element to be checked + for presence in current tensor" + """ + if isinstance(element, (torch.Tensor, Number)): + return (element == self).any().item() + return NotImplemented + + @property + def __cuda_array_interface__(self): + """Array view description for cuda tensors. + + See: + https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html + """ + + # raise AttributeError for unsupported tensors, so that + # hasattr(cpu_tensor, "__cuda_array_interface__") is False. + if not self.is_cuda: + raise AttributeError( + "Can't get __cuda_array_interface__ on non-CUDA tensor type: %s " + "If CUDA data is required use tensor.cuda() to copy tensor to device memory." % + self.type() + ) + + if self.is_sparse: + raise AttributeError( + "Can't get __cuda_array_interface__ on sparse type: %s " + "Use Tensor.to_dense() to convert to a dense tensor first." % + self.type() + ) + + # RuntimeError, matching tensor.__array__() behavior. + if self.requires_grad: + raise RuntimeError( + "Can't get __cuda_array_interface__ on Variable that requires grad. " + "If gradients aren't required, use var.detach() to get Variable that doesn't require grad." + ) + + # CUDA devices are little-endian and tensors are stored in native byte + # order. 1-byte entries are endian-agnostic. + typestr = { + torch.float16: "<f2", + torch.float32: "<f4", + torch.float64: "<f8", + torch.uint8: "|u1", + torch.int8: "|i1", + torch.int16: "<i2", + torch.int32: "<i4", + torch.int64: "<i8", + }[self.dtype] + + itemsize = self.storage().element_size() + + shape = self.shape + strides = tuple(s * itemsize for s in self.stride()) + data = (self.data_ptr(), False) # read-only is false + + return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=0) + + __module__ = 'torch' +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/checkpoint.html b/docs/stable/_modules/torch/utils/checkpoint.html new file mode 100644 index 000000000000..2b186a834f9c --- /dev/null +++ b/docs/stable/_modules/torch/utils/checkpoint.html @@ -0,0 +1,750 @@ + + + + + + + + + + + + torch.utils.checkpoint — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.checkpoint

    +from __future__ import absolute_import, division, print_function, unicode_literals
    +import torch
    +import warnings
    +
    +
    +def detach_variable(inputs):
    +    if isinstance(inputs, tuple):
    +        out = []
    +        for inp in inputs:
    +            if not isinstance(inp, torch.Tensor):
    +                out.append(inp)
    +                continue
    +
    +            x = inp.detach()
    +            x.requires_grad = inp.requires_grad
    +            out.append(x)
    +        return tuple(out)
    +    else:
    +        raise RuntimeError(
    +            "Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
    +
    +
    +def check_backward_validity(inputs):
    +    if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)):
    +        warnings.warn("None of the inputs have requires_grad=True. Gradients will be None")
    +
    +
    +# We can't know if the run_fn will internally move some args to different devices,
    +# which would require logic to preserve rng states for those devices as well.
    +# We could paranoically stash and restore ALL the rng states for all visible devices,
    +# but that seems very wasteful for most cases.  Compromise:  Stash the RNG state for
    +# the device of all Tensor args.
    +#
    +# To consider:  maybe get_device_states and set_device_states should reside in torch/random.py?
    +def get_device_states(*args):
    +    # This will not error out if "arg" is a CPU tensor or a non-tensor type because
    +    # the conditionals short-circuit.
    +    fwd_gpu_devices = list(set(arg.get_device() for arg in args
    +                               if isinstance(arg, torch.Tensor) and arg.is_cuda))
    +
    +    fwd_gpu_states = []
    +    for device in fwd_gpu_devices:
    +        with torch.cuda.device(device):
    +            fwd_gpu_states.append(torch.cuda.get_rng_state())
    +
    +    return fwd_gpu_devices, fwd_gpu_states
    +
    +
    +def set_device_states(devices, states):
    +    for device, state in zip(devices, states):
    +        with torch.cuda.device(device):
    +            torch.cuda.set_rng_state(state)
    +
    +
    +class CheckpointFunction(torch.autograd.Function):
    +
    +    @staticmethod
    +    def forward(ctx, run_function, preserve_rng_state, *args):
    +        check_backward_validity(args)
    +        ctx.run_function = run_function
    +        ctx.preserve_rng_state = preserve_rng_state
    +        if preserve_rng_state:
    +            ctx.fwd_cpu_state = torch.get_rng_state()
    +            # Don't eagerly initialize the cuda context by accident.
    +            # (If the user intends that the context is initialized later, within their
    +            # run_function, we SHOULD actually stash the cuda state here.  Unfortunately,
    +            # we have no way to anticipate this will happen before we run the function.)
    +            ctx.had_cuda_in_fwd = False
    +            if torch.cuda._initialized:
    +                ctx.had_cuda_in_fwd = True
    +                ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args)
    +        ctx.save_for_backward(*args)
    +        with torch.no_grad():
    +            outputs = run_function(*args)
    +        return outputs
    +
    +    @staticmethod
    +    def backward(ctx, *args):
    +        if not torch.autograd._is_checkpoint_valid():
    +            raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
    +        inputs = ctx.saved_tensors
    +        # Stash the surrounding rng state, and mimic the state that was
    +        # present at this time during forward.  Restore the surrouding state
    +        # when we're done.
    +        rng_devices = []
    +        if ctx.preserve_rng_state and ctx.had_cuda_in_fwd:
    +            rng_devices = ctx.fwd_gpu_devices
    +        with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
    +            if ctx.preserve_rng_state:
    +                torch.set_rng_state(ctx.fwd_cpu_state)
    +                if ctx.had_cuda_in_fwd:
    +                    set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
    +            detached_inputs = detach_variable(inputs)
    +            with torch.enable_grad():
    +                outputs = ctx.run_function(*detached_inputs)
    +
    +        if isinstance(outputs, torch.Tensor):
    +            outputs = (outputs,)
    +        torch.autograd.backward(outputs, args)
    +        grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
    +                      for inp in detached_inputs)
    +        return (None, None) + grads
    +
    +
    +
    [docs]def checkpoint(function, *args, **kwargs): + r"""Checkpoint a model or part of the model + + Checkpointing works by trading compute for memory. Rather than storing all + intermediate activations of the entire computation graph for computing + backward, the checkpointed part does **not** save intermediate activations, + and instead recomputes them in backward pass. It can be applied on any part + of a model. + + Specifically, in the forward pass, :attr:`function` will run in + :func:`torch.no_grad` manner, i.e., not storing the intermediate + activations. Instead, the forward pass saves the inputs tuple and the + :attr:`function` parameter. In the backwards pass, the saved inputs and + :attr:`function` is retreived, and the forward pass is computed on + :attr:`function` again, now tracking the intermediate activations, and then + the gradients are calculated using these activation values. + + .. warning:: + Checkpointing doesn't work with :func:`torch.autograd.grad`, but only + with :func:`torch.autograd.backward`. + + .. warning:: + If :attr:`function` invocation during backward does anything different + than the one during forward, e.g., due to some global variable, the + checkpointed version won't be equivalent, and unfortunately it can't be + detected. + + .. warning: + At least one of the inputs needs to have :code:`requires_grad=True` if + grads are needed for model inputs, otherwise the checkpointed part of the + model won't have gradients. + + Args: + function: describes what to run in the forward pass of the model or + part of the model. It should also know how to handle the inputs + passed as the tuple. For example, in LSTM, if user passes + ``(activation, hidden)``, :attr:`function` should correctly use the + first input as ``activation`` and the second input as ``hidden`` + preserve_rng_state(bool, optional, default=True): Omit stashing and restoring + the RNG state during each checkpoint. + args: tuple containing inputs to the :attr:`function` + + Returns: + Output of running :attr:`function` on :attr:`*args` + """ + # Hack to mix *args with **kwargs in a python 2.7-compliant way + preserve = kwargs.pop('preserve_rng_state', True) + if kwargs: + raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) + + return CheckpointFunction.apply(function, preserve, *args)
    + + +# TODO(sublee): When releasing PyTorch 1.3, +# fix the function signature to not accept variadic arguments. +# See also: https://github.com/pytorch/pytorch/issues/19260 +
    [docs]def checkpoint_sequential(functions, segments, *inputs, **kwargs): + r"""A helper function for checkpointing sequential models. + + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a model in various segments + and checkpoint each segment. All segments except the last will run in + :func:`torch.no_grad` manner, i.e., not storing the intermediate + activations. The inputs of each checkpointed segment will be saved for + re-running the segment in the backward pass. + + See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. + + .. warning:: + Checkpointing doesn't work with :func:`torch.autograd.grad`, but only + with :func:`torch.autograd.backward`. + + .. warning: + At least one of the inputs needs to have :code:`requires_grad=True` if + grads are needed for model inputs, otherwise the checkpointed part of the + model won't have gradients. + + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or + functions (comprising the model) to run sequentially. + segments: Number of chunks to create in the model + inputs: tuple of Tensors that are inputs to :attr:`functions` + preserve_rng_state(bool, optional, default=True): Omit stashing and restoring + the RNG state during each checkpoint. + + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + + Example: + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_sequential(model, chunks, input_var) + """ + # Hack to mix *args with **kwargs in a python 2.7-compliant way + preserve = kwargs.pop('preserve_rng_state', True) + if kwargs: + raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) + + # To accept variadic arguments is not consistent with nn.Sequential. + # This interface will be changed at PyTorch 1.3. + # See also: https://github.com/pytorch/pytorch/issues/19260 + if not inputs: + warnings.warn('Giving no input to checkpoint_sequential has been deprecated, ' + 'a TypeError will be raised after PyTorch 1.3', + DeprecationWarning) + elif len(inputs) > 1: + warnings.warn('multiple inputs to checkpoint_sequential has been deprecated, ' + 'a TypeError will be raised after PyTorch 1.3', + DeprecationWarning) + + def run_function(start, end, functions): + def forward(*inputs): + for j in range(start, end + 1): + if isinstance(inputs, tuple): + inputs = functions[j](*inputs) + else: + inputs = functions[j](inputs) + return inputs + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = list(functions.children()) + + segment_size = len(functions) // segments + # the last chunk has to be non-volatile + end = -1 + for start in range(0, segment_size * (segments - 1), segment_size): + end = start + segment_size - 1 + inputs = checkpoint(run_function(start, end, functions), *inputs, + preserve_rng_state=preserve) + if not isinstance(inputs, tuple): + inputs = (inputs,) + return run_function(end + 1, len(functions) - 1, functions)(*inputs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/cpp_extension.html b/docs/stable/_modules/torch/utils/cpp_extension.html new file mode 100644 index 000000000000..21c1545dc859 --- /dev/null +++ b/docs/stable/_modules/torch/utils/cpp_extension.html @@ -0,0 +1,1666 @@ + + + + + + + + + + + + torch.utils.cpp_extension — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.cpp_extension

    +from __future__ import absolute_import, division, print_function, unicode_literals
    +import copy
    +import glob
    +import imp
    +import os
    +import re
    +import setuptools
    +import subprocess
    +import sys
    +import sysconfig
    +import tempfile
    +import warnings
    +
    +import torch
    +from .file_baton import FileBaton
    +from ._cpp_extension_versioner import ExtensionVersioner
    +
    +from setuptools.command.build_ext import build_ext
    +
    +
    +IS_WINDOWS = sys.platform == 'win32'
    +
    +BUILD_NAMEDTENSOR = os.getenv('BUILD_NAMEDTENSOR', '').upper() == '1'
    +
    +def _find_cuda_home():
    +    '''Finds the CUDA install path.'''
    +    # Guess #1
    +    cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
    +    if cuda_home is None:
    +        # Guess #2
    +        try:
    +            which = 'where' if IS_WINDOWS else 'which'
    +            nvcc = subprocess.check_output(
    +                [which, 'nvcc']).decode().rstrip('\r\n')
    +            cuda_home = os.path.dirname(os.path.dirname(nvcc))
    +        except Exception:
    +            # Guess #3
    +            if IS_WINDOWS:
    +                cuda_homes = glob.glob(
    +                    'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
    +                if len(cuda_homes) == 0:
    +                    cuda_home = ''
    +                else:
    +                    cuda_home = cuda_homes[0]
    +            else:
    +                cuda_home = '/usr/local/cuda'
    +            if not os.path.exists(cuda_home):
    +                cuda_home = None
    +    if cuda_home and not torch.cuda.is_available():
    +        print("No CUDA runtime is found, using CUDA_HOME='{}'".format(cuda_home))
    +    return cuda_home
    +
    +
    +MINIMUM_GCC_VERSION = (4, 9, 0)
    +MINIMUM_MSVC_VERSION = (19, 0, 24215)
    +ABI_INCOMPATIBILITY_WARNING = '''
    +
    +                               !! WARNING !!
    +
    +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    +Your compiler ({}) may be ABI-incompatible with PyTorch!
    +Please use a compiler that is ABI-compatible with GCC 4.9 and above.
    +See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html.
    +
    +See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6
    +for instructions on how to install GCC 4.9 or higher.
    +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    +
    +                              !! WARNING !!
    +'''
    +WRONG_COMPILER_WARNING = '''
    +
    +                               !! WARNING !!
    +
    +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    +Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was
    +built with for this platform, which is {pytorch_compiler} on {platform}. Please
    +use {pytorch_compiler} to to compile your extension. Alternatively, you may
    +compile PyTorch from source using {user_compiler}, and then you can also use
    +{user_compiler} to compile your extension.
    +
    +See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help
    +with compiling PyTorch from source.
    +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    +
    +                              !! WARNING !!
    +'''
    +CUDA_HOME = _find_cuda_home()
    +CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH')
    +# PyTorch releases have the version pattern major.minor.patch, whereas when
    +# PyTorch is built from source, we append the git commit hash, which gives
    +# it the below pattern.
    +BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+')
    +
    +COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/EHsc']
    +
    +COMMON_NVCC_FLAGS = [
    +    '-D__CUDA_NO_HALF_OPERATORS__',
    +    '-D__CUDA_NO_HALF_CONVERSIONS__',
    +    '-D__CUDA_NO_HALF2_OPERATORS__',
    +    '--expt-relaxed-constexpr'
    +]
    +
    +
    +JIT_EXTENSION_VERSIONER = ExtensionVersioner()
    +
    +
    +def _is_binary_build():
    +    return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__)
    +
    +
    +def _accepted_compilers_for_platform():
    +    # gnu-c++ and gnu-cc are the conda gcc compilers
    +    return ['clang++', 'clang'] if sys.platform.startswith('darwin') else ['g++', 'gcc', 'gnu-c++', 'gnu-cc']
    +
    +
    +def get_default_build_root():
    +    '''
    +    Returns the path to the root folder under which extensions will built.
    +
    +    For each extension module built, there will be one folder underneath the
    +    folder returned by this function. For example, if ``p`` is the path
    +    returned by this function and ``ext`` the name of an extension, the build
    +    folder for the extension will be ``p/ext``.
    +    '''
    +    # tempfile.gettempdir() will be /tmp on UNIX and \TEMP on Windows.
    +    return os.path.realpath(os.path.join(tempfile.gettempdir(), 'torch_extensions'))
    +
    +
    +def check_compiler_ok_for_platform(compiler):
    +    '''
    +    Verifies that the compiler is the expected one for the current platform.
    +
    +    Arguments:
    +        compiler (str): The compiler executable to check.
    +
    +    Returns:
    +        True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS,
    +        and always True for Windows.
    +    '''
    +    if IS_WINDOWS:
    +        return True
    +    which = subprocess.check_output(['which', compiler], stderr=subprocess.STDOUT)
    +    # Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'.
    +    compiler_path = os.path.realpath(which.decode().strip())
    +    return any(name in compiler_path for name in _accepted_compilers_for_platform())
    +
    +
    +
    [docs]def check_compiler_abi_compatibility(compiler): + ''' + Verifies that the given compiler is ABI-compatible with PyTorch. + + Arguments: + compiler (str): The compiler executable name to check (e.g. ``g++``). + Must be executable in a shell process. + + Returns: + False if the compiler is (likely) ABI-incompatible with PyTorch, + else True. + ''' + if not _is_binary_build(): + return True + if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']: + return True + + # First check if the compiler is one of the expected ones for the particular platform. + if not check_compiler_ok_for_platform(compiler): + warnings.warn(WRONG_COMPILER_WARNING.format( + user_compiler=compiler, + pytorch_compiler=_accepted_compilers_for_platform()[0], + platform=sys.platform)) + return False + + if sys.platform.startswith('darwin'): + # There is no particular minimum version we need for clang, so we're good here. + return True + try: + if sys.platform.startswith('linux'): + minimum_required_version = MINIMUM_GCC_VERSION + version = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion']) + version = version.decode().strip().split('.') + else: + minimum_required_version = MINIMUM_MSVC_VERSION + compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT) + match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode().strip()) + version = (0, 0, 0) if match is None else match.groups() + except Exception: + _, error, _ = sys.exc_info() + warnings.warn('Error checking compiler version for {}: {}'.format(compiler, error)) + return False + + if tuple(map(int, version)) >= minimum_required_version: + return True + + compiler = '{} {}'.format(compiler, ".".join(version)) + warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler)) + + return False
    + + +# See below for why we inherit BuildExtension from object. +# https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj-when + + +
    [docs]class BuildExtension(build_ext, object): + ''' + A custom :mod:`setuptools` build extension . + + This :class:`setuptools.build_ext` subclass takes care of passing the + minimum required compiler flags (e.g. ``-std=c++11``) as well as mixed + C++/CUDA compilation (and support for CUDA files in general). + + When using :class:`BuildExtension`, it is allowed to supply a dictionary + for ``extra_compile_args`` (rather than the usual list) that maps from + languages (``cxx`` or ``cuda``) to a list of additional compiler flags to + supply to the compiler. This makes it possible to supply different flags to + the C++ and CUDA compiler during mixed compilation. + ''' + + @classmethod + def with_options(cls, **options): + ''' + Returns an alternative constructor that extends any original keyword + arguments to the original constructor with the given options. + ''' + def init_with_options(*args, **kwargs): + kwargs = kwargs.copy() + kwargs.update(options) + return cls(*args, **kwargs) + return init_with_options + + def __init__(self, *args, **kwargs): + super(BuildExtension, self).__init__(*args, **kwargs) + self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False) + + def build_extensions(self): + self._check_abi() + for extension in self.extensions: + self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H') + if BUILD_NAMEDTENSOR: + self._add_compile_flag(extension, '-DBUILD_NAMEDTENSOR') + self._define_torch_extension_name(extension) + self._add_gnu_cpp_abi_flag(extension) + + # Register .cu and .cuh as valid source extensions. + self.compiler.src_extensions += ['.cu', '.cuh'] + # Save the original _compile method for later. + if self.compiler.compiler_type == 'msvc': + self.compiler._cpp_extensions += ['.cu', '.cuh'] + original_compile = self.compiler.compile + original_spawn = self.compiler.spawn + else: + original_compile = self.compiler._compile + + def unix_wrap_compile(obj, src, ext, cc_args, extra_postargs, pp_opts): + # Copy before we make any modifications. + cflags = copy.deepcopy(extra_postargs) + try: + original_compiler = self.compiler.compiler_so + if _is_cuda_file(src): + nvcc = _join_cuda_home('bin', 'nvcc') + if not isinstance(nvcc, list): + nvcc = [nvcc] + self.compiler.set_executable('compiler_so', nvcc) + if isinstance(cflags, dict): + cflags = cflags['nvcc'] + cflags = COMMON_NVCC_FLAGS + ['--compiler-options', "'-fPIC'"] + cflags + elif isinstance(cflags, dict): + cflags = cflags['cxx'] + # NVCC does not allow multiple -std to be passed, so we avoid + # overriding the option if the user explicitly passed it. + if not any(flag.startswith('-std=') for flag in cflags): + cflags.append('-std=c++11') + + original_compile(obj, src, ext, cc_args, cflags, pp_opts) + finally: + # Put the original compiler back in place. + self.compiler.set_executable('compiler_so', original_compiler) + + def win_wrap_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + + self.cflags = copy.deepcopy(extra_postargs) + extra_postargs = None + + def spawn(cmd): + # Using regex to match src, obj and include files + src_regex = re.compile('/T(p|c)(.*)') + src_list = [ + m.group(2) for m in (src_regex.match(elem) for elem in cmd) + if m + ] + + obj_regex = re.compile('/Fo(.*)') + obj_list = [ + m.group(1) for m in (obj_regex.match(elem) for elem in cmd) + if m + ] + + include_regex = re.compile(r'((\-|\/)I.*)') + include_list = [ + m.group(1) + for m in (include_regex.match(elem) for elem in cmd) if m + ] + + if len(src_list) >= 1 and len(obj_list) >= 1: + src = src_list[0] + obj = obj_list[0] + if _is_cuda_file(src): + nvcc = _join_cuda_home('bin', 'nvcc') + if isinstance(self.cflags, dict): + cflags = self.cflags['nvcc'] + elif isinstance(self.cflags, list): + cflags = self.cflags + else: + cflags = [] + cflags = COMMON_NVCC_FLAGS + cflags + for flag in COMMON_MSVC_FLAGS: + cflags = ['-Xcompiler', flag] + cflags + cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags + elif isinstance(self.cflags, dict): + cflags = COMMON_MSVC_FLAGS + self.cflags['cxx'] + cmd += cflags + elif isinstance(self.cflags, list): + cflags = COMMON_MSVC_FLAGS + self.cflags + cmd += cflags + + return original_spawn(cmd) + + try: + self.compiler.spawn = spawn + return original_compile(sources, output_dir, macros, + include_dirs, debug, extra_preargs, + extra_postargs, depends) + finally: + self.compiler.spawn = original_spawn + + # Monkey-patch the _compile method. + if self.compiler.compiler_type == 'msvc': + self.compiler.compile = win_wrap_compile + else: + self.compiler._compile = unix_wrap_compile + + build_ext.build_extensions(self) + + def get_ext_filename(self, ext_name): + # Get the original shared library name. For Python 3, this name will be + # suffixed with "<SOABI>.so", where <SOABI> will be something like + # cpython-37m-x86_64-linux-gnu. On Python 2, there is no such ABI name. + # The final extension, .so, would be .lib/.dll on Windows of course. + ext_filename = super(BuildExtension, self).get_ext_filename(ext_name) + # If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI + # component. This makes building shared libraries with setuptools that + # aren't Python modules nicer. + if self.no_python_abi_suffix and sys.version_info >= (3, 0): + # The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"]. + ext_filename_parts = ext_filename.split('.') + # Omit the second to last element. + without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:] + ext_filename = '.'.join(without_abi) + return ext_filename + + def _check_abi(self): + # On some platforms, like Windows, compiler_cxx is not available. + if hasattr(self.compiler, 'compiler_cxx'): + compiler = self.compiler.compiler_cxx[0] + elif IS_WINDOWS: + compiler = os.environ.get('CXX', 'cl') + else: + compiler = os.environ.get('CXX', 'c++') + check_compiler_abi_compatibility(compiler) + + def _add_compile_flag(self, extension, flag): + extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args) + if isinstance(extension.extra_compile_args, dict): + for args in extension.extra_compile_args.values(): + args.append(flag) + else: + extension.extra_compile_args.append(flag) + + def _define_torch_extension_name(self, extension): + # pybind11 doesn't support dots in the names + # so in order to support extensions in the packages + # like torch._C, we take the last part of the string + # as the library name + names = extension.name.split('.') + name = names[-1] + define = '-DTORCH_EXTENSION_NAME={}'.format(name) + self._add_compile_flag(extension, define) + + def _add_gnu_cpp_abi_flag(self, extension): + # use the same CXX ABI as what PyTorch was compiled with + self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI)))
    + + +
    [docs]def CppExtension(name, sources, *args, **kwargs): + ''' + Creates a :class:`setuptools.Extension` for C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a C++ extension. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. + + Example: + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CppExtension + >>> setup( + name='extension', + ext_modules=[ + CppExtension( + name='extension', + sources=['extension.cpp'], + extra_compile_args=['-g']), + ], + cmdclass={ + 'build_ext': BuildExtension + }) + ''' + include_dirs = kwargs.get('include_dirs', []) + include_dirs += include_paths() + kwargs['include_dirs'] = include_dirs + + if IS_WINDOWS: + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths() + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('c10') + libraries.append('torch') + libraries.append('torch_python') + libraries.append('_C') + kwargs['libraries'] = libraries + + kwargs['language'] = 'c++' + return setuptools.Extension(name, sources, *args, **kwargs)
    + + +
    [docs]def CUDAExtension(name, sources, *args, **kwargs): + ''' + Creates a :class:`setuptools.Extension` for CUDA/C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a CUDA/C++ + extension. This includes the CUDA include path, library path and runtime + library. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. + + Example: + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension + >>> setup( + name='cuda_extension', + ext_modules=[ + CUDAExtension( + name='cuda_extension', + sources=['extension.cpp', 'extension_kernel.cu'], + extra_compile_args={'cxx': ['-g'], + 'nvcc': ['-O2']}) + ], + cmdclass={ + 'build_ext': BuildExtension + }) + ''' + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths(cuda=True) + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('cudart') + if IS_WINDOWS: + libraries.append('c10') + libraries.append('c10_cuda') + libraries.append('torch') + libraries.append('torch_python') + libraries.append('_C') + kwargs['libraries'] = libraries + + include_dirs = kwargs.get('include_dirs', []) + include_dirs += include_paths(cuda=True) + kwargs['include_dirs'] = include_dirs + + kwargs['language'] = 'c++' + + return setuptools.Extension(name, sources, *args, **kwargs)
    + + +
    [docs]def include_paths(cuda=False): + ''' + Get the include paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific include paths. + + Returns: + A list of include path strings. + ''' + here = os.path.abspath(__file__) + torch_path = os.path.dirname(os.path.dirname(here)) + lib_include = os.path.join(torch_path, 'include') + paths = [ + lib_include, + # Remove this once torch/torch.h is officially no longer supported for C++ extensions. + os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'), + # Some internal (old) Torch headers don't properly prefix their includes, + # so we need to pass -Itorch/lib/include/TH as well. + os.path.join(lib_include, 'TH'), + os.path.join(lib_include, 'THC') + ] + if cuda: + cuda_home_include = _join_cuda_home('include') + # if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home. + # but gcc dosn't like having /usr/include passed explicitly + if cuda_home_include != '/usr/include': + paths.append(cuda_home_include) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, 'include')) + return paths
    + + +def library_paths(cuda=False): + ''' + Get the library paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific library paths. + + Returns: + A list of library path strings. + ''' + paths = [] + + if IS_WINDOWS: + here = os.path.abspath(__file__) + torch_path = os.path.dirname(os.path.dirname(here)) + lib_path = os.path.join(torch_path, 'lib') + + paths.append(lib_path) + + if cuda: + if IS_WINDOWS: + lib_dir = 'lib/x64' + else: + lib_dir = 'lib64' + if (not os.path.exists(_join_cuda_home(lib_dir)) and + os.path.exists(_join_cuda_home('lib'))): + # 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955) + # Note that it's also possible both don't exist (see + # _find_cuda_home) - in that case we stay with 'lib64'. + lib_dir = 'lib' + + paths.append(_join_cuda_home(lib_dir)) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, lib_dir)) + return paths + + +
    [docs]def load(name, + sources, + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda=None, + is_python_module=True): + ''' + Loads a PyTorch C++ extension just-in-time (JIT). + + To load an extension, a Ninja build file is emitted, which is used to + compile the given sources into a dynamic library. This library is + subsequently loaded into the current Python process as a module and + returned from this function, ready for use. + + By default, the directory to which the build file is emitted and the + resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where + ``<tmp>`` is the temporary folder on the current platform and ``<name>`` + the name of the extension. This location can be overridden in two ways. + First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it + replaces ``<tmp>/torch_extensions`` and all extensions will be compiled + into subfolders of this directory. Second, if the ``build_directory`` + argument to this function is supplied, it overrides the entire path, i.e. + the library will be compiled into that folder directly. + + To compile the sources, the default system compiler (``c++``) is used, + which can be overridden by setting the ``CXX`` environment variable. To pass + additional arguments to the compilation process, ``extra_cflags`` or + ``extra_ldflags`` can be provided. For example, to compile your extension + with optimizations, pass ``extra_cflags=['-O3']``. You can also use + ``extra_cflags`` to pass further include directories. + + CUDA support with mixed compilation is provided. Simply pass CUDA source + files (``.cu`` or ``.cuh``) along with other sources. Such files will be + detected and compiled with nvcc rather than the C++ compiler. This includes + passing the CUDA lib64 directory as a library directory, and linking + ``cudart``. You can pass additional flags to nvcc via + ``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various + heuristics for finding the CUDA install directory are used, which usually + work fine. If not, setting the ``CUDA_HOME`` environment variable is the + safest option. + + Args: + name: The name of the extension to build. This MUST be the same as the + name of the pybind11 module! + sources: A list of relative or absolute paths to C++ source files. + extra_cflags: optional list of compiler flags to forward to the build. + extra_cuda_cflags: optional list of compiler flags to forward to nvcc + when building CUDA sources. + extra_ldflags: optional list of linker flags to forward to the build. + extra_include_paths: optional list of include directories to forward + to the build. + build_directory: optional path to use as build workspace. + verbose: If ``True``, turns on verbose logging of load steps. + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on the existence of ``.cu`` or + ``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers + and libraries to be included. + is_python_module: If ``True`` (default), imports the produced shared + library as a Python module. If ``False``, loads it into the process + as a plain dynamic library. + + Returns: + If ``is_python_module`` is ``True``, returns the loaded PyTorch + extension as a Python module. If ``is_python_module`` is ``False`` + returns nothing (the shared library is loaded into the process as a side + effect). + + Example: + >>> from torch.utils.cpp_extension import load + >>> module = load( + name='extension', + sources=['extension.cpp', 'extension_kernel.cu'], + extra_cflags=['-O2'], + verbose=True) + ''' + return _jit_compile( + name, + [sources] if isinstance(sources, str) else sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory or _get_build_directory(name, verbose), + verbose, + with_cuda, + is_python_module)
    + + +
    [docs]def load_inline(name, + cpp_sources, + cuda_sources=None, + functions=None, + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda=None, + is_python_module=True): + ''' + Loads a PyTorch C++ extension just-in-time (JIT) from string sources. + + This function behaves exactly like :func:`load`, but takes its sources as + strings rather than filenames. These strings are stored to files in the + build directory, after which the behavior of :func:`load_inline` is + identical to :func:`load`. + + See `the + tests <https://github.com/pytorch/pytorch/blob/master/test/test_cpp_extensions.py>`_ + for good examples of using this function. + + Sources may omit two required parts of a typical non-inline C++ extension: + the necessary header includes, as well as the (pybind11) binding code. More + precisely, strings passed to ``cpp_sources`` are first concatenated into a + single ``.cpp`` file. This file is then prepended with ``#include + <torch/extension.h>``. + + Furthermore, if the ``functions`` argument is supplied, bindings will be + automatically generated for each function specified. ``functions`` can + either be a list of function names, or a dictionary mapping from function + names to docstrings. If a list is given, the name of each function is used + as its docstring. + + The sources in ``cuda_sources`` are concatenated into a separate ``.cu`` + file and prepended with ``torch/types.h``, ``cuda.h`` and + ``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled + separately, but ultimately linked into a single library. Note that no + bindings are generated for functions in ``cuda_sources`` per se. To bind + to a CUDA kernel, you must create a C++ function that calls it, and either + declare or define this C++ function in one of the ``cpp_sources`` (and + include its name in ``functions``). + + See :func:`load` for a description of arguments omitted below. + + Args: + cpp_sources: A string, or list of strings, containing C++ source code. + cuda_sources: A string, or list of strings, containing CUDA source code. + functions: A list of function names for which to generate function + bindings. If a dictionary is given, it should map function names to + docstrings (which are otherwise just the function names). + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on whether ``cuda_sources`` is + provided. Set it to `True`` to force CUDA headers + and libraries to be included. + + Example: + >>> from torch.utils.cpp_extension import load_inline + >>> source = \'\'\' + at::Tensor sin_add(at::Tensor x, at::Tensor y) { + return x.sin() + y.sin(); + } + \'\'\' + >>> module = load_inline(name='inline_extension', + cpp_sources=[source], + functions=['sin_add']) + ''' + build_directory = build_directory or _get_build_directory(name, verbose) + + if isinstance(cpp_sources, str): + cpp_sources = [cpp_sources] + cuda_sources = cuda_sources or [] + if isinstance(cuda_sources, str): + cuda_sources = [cuda_sources] + + cpp_sources.insert(0, '#include <torch/extension.h>') + + # If `functions` is supplied, we create the pybind11 bindings for the user. + # Here, `functions` is (or becomes, after some processing) a map from + # function names to function docstrings. + if functions is not None: + cpp_sources.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {') + if isinstance(functions, str): + functions = [functions] + if isinstance(functions, list): + # Make the function docstring the same as the function name. + functions = dict((f, f) for f in functions) + elif not isinstance(functions, dict): + raise ValueError( + "Expected 'functions' to be a list or dict, but was {}".format( + type(functions))) + for function_name, docstring in functions.items(): + cpp_sources.append('m.def("{0}", &{0}, "{1}");'.format( + function_name, docstring)) + cpp_sources.append('}') + + cpp_source_path = os.path.join(build_directory, 'main.cpp') + with open(cpp_source_path, 'w') as cpp_source_file: + cpp_source_file.write('\n'.join(cpp_sources)) + + sources = [cpp_source_path] + + if cuda_sources: + cuda_sources.insert(0, '#include <torch/types.h>') + cuda_sources.insert(1, '#include <cuda.h>') + cuda_sources.insert(2, '#include <cuda_runtime.h>') + + cuda_source_path = os.path.join(build_directory, 'cuda.cu') + with open(cuda_source_path, 'w') as cuda_source_file: + cuda_source_file.write('\n'.join(cuda_sources)) + + sources.append(cuda_source_path) + + return _jit_compile( + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory, + verbose, + with_cuda, + is_python_module)
    + + +def _jit_compile(name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory, + verbose, + with_cuda, + is_python_module): + old_version = JIT_EXTENSION_VERSIONER.get_version(name) + version = JIT_EXTENSION_VERSIONER.bump_version_if_changed( + name, + sources, + build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths], + build_directory=build_directory, + with_cuda=with_cuda + ) + if version > 0: + if version != old_version and verbose: + print('The input conditions for extension module {} have changed. '.format(name) + + 'Bumping to version {0} and re-building as {1}_v{0}...'.format(version, name)) + name = '{}_v{}'.format(name, version) + + if version != old_version: + baton = FileBaton(os.path.join(build_directory, 'lock')) + if baton.try_acquire(): + try: + _write_ninja_file_and_build( + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + build_directory=build_directory, + verbose=verbose, + with_cuda=with_cuda) + finally: + baton.release() + else: + baton.wait() + elif verbose: + print('No modifications detected for re-loaded extension ' + 'module {}, skipping build step...'.format(name)) + + if verbose: + print('Loading extension module {}...'.format(name)) + return _import_module_from_library(name, build_directory, is_python_module) + + +def _write_ninja_file_and_build(name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory, + verbose, + with_cuda): + verify_ninja_availability() + if IS_WINDOWS: + compiler = os.environ.get('CXX', 'cl') + else: + compiler = os.environ.get('CXX', 'c++') + check_compiler_abi_compatibility(compiler) + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + extra_ldflags = _prepare_ldflags( + extra_ldflags or [], + with_cuda, + verbose) + build_file_path = os.path.join(build_directory, 'build.ninja') + if verbose: + print( + 'Emitting ninja build file {}...'.format(build_file_path)) + # NOTE: Emitting a new ninja build file does not cause re-compilation if + # the sources did not change, so it's ok to re-emit (and it's fast). + _write_ninja_file( + path=build_file_path, + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + with_cuda=with_cuda) + + if verbose: + print('Building extension module {}...'.format(name)) + _build_extension_module(name, build_directory, verbose) + + +
    [docs]def verify_ninja_availability(): + ''' + Returns ``True`` if the `ninja <https://ninja-build.org/>`_ build system is + available on the system. + ''' + with open(os.devnull, 'wb') as devnull: + try: + subprocess.check_call('ninja --version'.split(), stdout=devnull) + except OSError: + raise RuntimeError("Ninja is required to load C++ extensions") + else: + return True
    + + +def _prepare_ldflags(extra_ldflags, with_cuda, verbose): + if IS_WINDOWS: + python_path = os.path.dirname(sys.executable) + python_lib_path = os.path.join(python_path, 'libs') + + here = os.path.abspath(__file__) + torch_path = os.path.dirname(os.path.dirname(here)) + lib_path = os.path.join(torch_path, 'lib') + + extra_ldflags.append('c10.lib') + extra_ldflags.append('torch.lib') + extra_ldflags.append('torch_python.lib') + extra_ldflags.append('_C.lib') + extra_ldflags.append('/LIBPATH:{}'.format(python_lib_path)) + extra_ldflags.append('/LIBPATH:{}'.format(lib_path)) + + if with_cuda: + if verbose: + print('Detected CUDA files, patching ldflags') + if IS_WINDOWS: + extra_ldflags.append('/LIBPATH:{}'.format( + _join_cuda_home('lib/x64'))) + extra_ldflags.append('cudart.lib') + if CUDNN_HOME is not None: + extra_ldflags.append(os.path.join(CUDNN_HOME, 'lib/x64')) + else: + extra_ldflags.append('-L{}'.format(_join_cuda_home('lib64'))) + extra_ldflags.append('-lcudart') + if CUDNN_HOME is not None: + extra_ldflags.append('-L{}'.format(os.path.join(CUDNN_HOME, 'lib64'))) + + return extra_ldflags + + +def _get_build_directory(name, verbose): + root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR') + if root_extensions_directory is None: + root_extensions_directory = get_default_build_root() + + if verbose: + print('Using {} as PyTorch extensions root...'.format( + root_extensions_directory)) + + build_directory = os.path.join(root_extensions_directory, name) + if not os.path.exists(build_directory): + if verbose: + print('Creating extension directory {}...'.format(build_directory)) + # This is like mkdir -p, i.e. will also create parent directories. + os.makedirs(build_directory) + + return build_directory + + +def _build_extension_module(name, build_directory, verbose): + try: + sys.stdout.flush() + sys.stderr.flush() + if sys.version_info >= (3, 5): + subprocess.run( + ['ninja', '-v'], + stdout=None if verbose else subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=build_directory, + check=True) + else: + subprocess.check_output( + ['ninja', '-v'], + stderr=subprocess.STDOUT, + cwd=build_directory) + except subprocess.CalledProcessError: + # Python 2 and 3 compatible way of getting the error object. + _, error, _ = sys.exc_info() + # error.output contains the stdout and stderr of the build attempt. + message = "Error building extension '{}'".format(name) + if hasattr(error, 'output') and error.output: + message += ": {}".format(error.output.decode()) + raise RuntimeError(message) + + +def _import_module_from_library(module_name, path, is_python_module): + # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path + file, path, description = imp.find_module(module_name, [path]) + # Close the .so file after load. + with file: + if is_python_module: + return imp.load_module(module_name, file, path, description) + else: + torch.ops.load_library(path) + + +def _write_ninja_file(path, + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + with_cuda): + extra_cflags = [flag.strip() for flag in extra_cflags] + extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags] + extra_ldflags = [flag.strip() for flag in extra_ldflags] + extra_include_paths = [flag.strip() for flag in extra_include_paths] + + if IS_WINDOWS: + compiler = os.environ.get('CXX', 'cl') + else: + compiler = os.environ.get('CXX', 'c++') + + # Version 1.3 is required for the `deps` directive. + config = ['ninja_required_version = 1.3'] + config.append('cxx = {}'.format(compiler)) + if with_cuda: + config.append('nvcc = {}'.format(_join_cuda_home('bin', 'nvcc'))) + + # Turn into absolute paths so we can emit them into the ninja build + # file wherever it is. + sources = [os.path.abspath(file) for file in sources] + user_includes = [os.path.abspath(file) for file in extra_include_paths] + + # include_paths() gives us the location of torch/extension.h + system_includes = include_paths(with_cuda) + # sysconfig.get_paths()['include'] gives us the location of Python.h + system_includes.append(sysconfig.get_paths()['include']) + + # Windows does not understand `-isystem`. + if IS_WINDOWS: + user_includes += system_includes + system_includes.clear() + + common_cflags = ['-DTORCH_EXTENSION_NAME={}'.format(name)] + common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H') + if BUILD_NAMEDTENSOR: + common_cflags.append('-DBUILD_NAMEDTENSOR') + common_cflags += ['-I{}'.format(include) for include in user_includes] + common_cflags += ['-isystem {}'.format(include) for include in system_includes] + + common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))] + + if IS_WINDOWS: + cflags = common_cflags + COMMON_MSVC_FLAGS + extra_cflags + from distutils.spawn import _nt_quote_args + cflags = _nt_quote_args(cflags) + else: + cflags = common_cflags + ['-fPIC', '-std=c++11'] + extra_cflags + flags = ['cflags = {}'.format(' '.join(cflags))] + + if with_cuda: + cuda_flags = common_cflags + COMMON_NVCC_FLAGS + if IS_WINDOWS: + for flag in COMMON_MSVC_FLAGS: + cuda_flags = ['-Xcompiler', flag] + cuda_flags + cuda_flags = _nt_quote_args(cuda_flags) + cuda_flags += _nt_quote_args(extra_cuda_cflags) + else: + cuda_flags += ['--compiler-options', "'-fPIC'"] + cuda_flags += extra_cuda_cflags + if not any(flag.startswith('-std=') for flag in cuda_flags): + cuda_flags.append('-std=c++11') + + flags.append('cuda_flags = {}'.format(' '.join(cuda_flags))) + + if IS_WINDOWS: + ldflags = ['/DLL'] + extra_ldflags + else: + ldflags = ['-shared'] + extra_ldflags + # The darwin linker needs explicit consent to ignore unresolved symbols. + if sys.platform.startswith('darwin'): + ldflags.append('-undefined dynamic_lookup') + elif IS_WINDOWS: + ldflags = _nt_quote_args(ldflags) + flags.append('ldflags = {}'.format(' '.join(ldflags))) + + # See https://ninja-build.org/build.ninja.html for reference. + compile_rule = ['rule compile'] + if IS_WINDOWS: + compile_rule.append( + ' command = cl /showIncludes $cflags -c $in /Fo$out') + compile_rule.append(' deps = msvc') + else: + compile_rule.append( + ' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out') + compile_rule.append(' depfile = $out.d') + compile_rule.append(' deps = gcc') + + if with_cuda: + cuda_compile_rule = ['rule cuda_compile'] + cuda_compile_rule.append( + ' command = $nvcc $cuda_flags -c $in -o $out') + + link_rule = ['rule link'] + if IS_WINDOWS: + cl_paths = subprocess.check_output(['where', + 'cl']).decode().split('\r\n') + if len(cl_paths) >= 1: + cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:') + else: + raise RuntimeError("MSVC is required to load C++ extensions") + link_rule.append( + ' command = "{}/link.exe" $in /nologo $ldflags /out:$out'.format( + cl_path)) + else: + link_rule.append(' command = $cxx $in $ldflags -o $out') + + # Emit one build rule per source to enable incremental build. + object_files = [] + build = [] + for source_file in sources: + # '/path/to/file.cpp' -> 'file' + file_name = os.path.splitext(os.path.basename(source_file))[0] + if _is_cuda_file(source_file) and with_cuda: + rule = 'cuda_compile' + # Use a different object filename in case a C++ and CUDA file have + # the same filename but different extension (.cpp vs. .cu). + target = '{}.cuda.o'.format(file_name) + else: + rule = 'compile' + target = '{}.o'.format(file_name) + object_files.append(target) + if IS_WINDOWS: + source_file = source_file.replace(':', '$:') + source_file = source_file.replace(" ", "$ ") + build.append('build {}: {} {}'.format(target, rule, source_file)) + + ext = 'pyd' if IS_WINDOWS else 'so' + library_target = '{}.{}'.format(name, ext) + + link = ['build {}: link {}'.format(library_target, ' '.join(object_files))] + + default = ['default {}'.format(library_target)] + + # 'Blocks' should be separated by newlines, for visual benefit. + blocks = [config, flags, compile_rule] + if with_cuda: + blocks.append(cuda_compile_rule) + blocks += [link_rule, build, link, default] + with open(path, 'w') as build_file: + for block in blocks: + lines = '\n'.join(block) + build_file.write('{}\n\n'.format(lines)) + + +def _join_cuda_home(*paths): + ''' + Joins paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set. + + This is basically a lazy way of raising an error for missing $CUDA_HOME + only once we need to get any CUDA-specific path. + ''' + if CUDA_HOME is None: + raise EnvironmentError('CUDA_HOME environment variable is not set. ' + 'Please set it to your CUDA install root.') + return os.path.join(CUDA_HOME, *paths) + + +def _is_cuda_file(path): + return os.path.splitext(path)[1] in ['.cu', '.cuh'] +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/data/_utils/worker.html b/docs/stable/_modules/torch/utils/data/_utils/worker.html new file mode 100644 index 000000000000..5cf8f0d2237d --- /dev/null +++ b/docs/stable/_modules/torch/utils/data/_utils/worker.html @@ -0,0 +1,713 @@ + + + + + + + + + + + + torch.utils.data._utils.worker — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.data._utils.worker

    +r""""Contains definitions of the methods used by the _DataLoaderIter workers.
    +
    +These **needs** to be in global scope since Py2 doesn't support serializing
    +static methods.
    +"""
    +
    +import torch
    +import random
    +import os
    +from collections import namedtuple
    +from torch._six import queue
    +from torch._utils import ExceptionWrapper
    +from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS
    +
    +if IS_WINDOWS:
    +    import ctypes
    +    from ctypes.wintypes import DWORD, BOOL, HANDLE
    +
    +    # On Windows, the parent ID of the worker process remains unchanged when the manager process
    +    # is gone, and the only way to check it through OS is to let the worker have a process handle
    +    # of the manager and ask if the process status has changed.
    +    class ManagerWatchdog(object):
    +        def __init__(self):
    +            self.manager_pid = os.getppid()
    +
    +            self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
    +            self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
    +            self.kernel32.OpenProcess.restype = HANDLE
    +            self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
    +            self.kernel32.WaitForSingleObject.restype = DWORD
    +
    +            # Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
    +            SYNCHRONIZE = 0x00100000
    +            self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
    +
    +            if not self.manager_handle:
    +                raise ctypes.WinError(ctypes.get_last_error())
    +
    +            self.manager_dead = False
    +
    +        def is_alive(self):
    +            if not self.manager_dead:
    +                # Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
    +                self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
    +            return not self.manager_dead
    +else:
    +    class ManagerWatchdog(object):
    +        def __init__(self):
    +            self.manager_pid = os.getppid()
    +            self.manager_dead = False
    +
    +        def is_alive(self):
    +            if not self.manager_dead:
    +                self.manager_dead = os.getppid() != self.manager_pid
    +            return not self.manager_dead
    +
    +_worker_info = None
    +
    +
    +class WorkerInfo(object):
    +    __initialized = False
    +
    +    def __init__(self, **kwargs):
    +        for k, v in kwargs.items():
    +            setattr(self, k, v)
    +        self.__initialized = True
    +
    +    def __setattr__(self, key, val):
    +        if self.__initialized:
    +            raise RuntimeError("Cannot assign attributes to {} objects".format(self.__class__.__name__))
    +        return super(WorkerInfo, self).__setattr__(key, val)
    +
    +
    +
    [docs]def get_worker_info(): + r"""Returns the information about the current + :class:`~torch.utils.data.DataLoader` iterator worker process. + + When called in a worker, this returns an object guaranteed to have the + following attributes: + + * :attr:`id`: the current worker id. + * :attr:`num_workers`: the total number of workers. + * :attr:`seed`: the random seed set for the current worker. This value is + determined by main process RNG and the worker id. See + :class:`~torch.utils.data.DataLoader`'s documentation for more details. + * :attr:`dataset`: the copy of the dataset object in **this** process. Note + that this will be a different object in a different process than the one + in the main process. + + When called in the main process, this returns ``None``. + + .. note:: + When used in a :attr:`worker_init_fn` passed over to + :class:`~torch.utils.data.DataLoader`, this method can be useful to + set up each worker process differently, for instance, using ``worker_id`` + to configure the ``dataset`` object to only read a specific fraction of a + sharded dataset, or use ``seed`` to seed other libraries used in dataset + code (e.g., NumPy). + """ + return _worker_info
    + + +r"""Dummy class used to signal the end of an IterableDataset""" +_IterableDatasetStopIteration = namedtuple('_IterableDatasetStopIteration', ['worker_id']) + + +def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event, + auto_collation, collate_fn, drop_last, seed, init_fn, worker_id, + num_workers): + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + + try: + # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal + # module's handlers are executed after Python returns from C low-level + # handlers, likely when the same fatal signal had already happened + # again. + # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers + signal_handling._set_worker_signal_handlers() + + torch.set_num_threads(1) + random.seed(seed) + torch.manual_seed(seed) + + global _worker_info + _worker_info = WorkerInfo(id=worker_id, num_workers=num_workers, + seed=seed, dataset=dataset) + + from torch.utils.data import _DatasetKind + + init_exception = None + + try: + if init_fn is not None: + init_fn(worker_id) + + fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last) + except Exception: + init_exception = ExceptionWrapper( + where="in DataLoader worker process {}".format(worker_id)) + + # When using Iterable mode, some worker can exit earlier than others due + # to the IterableDataset behaving differently for different workers. + # When such things happen, an `_IterableDatasetStopIteration` object is + # sent over to the main process with the ID of this worker, so that the + # main process won't send more tasks to this worker, and will send + # `None` to this worker to properly exit it. + # + # Note that we cannot set `done_event` from a worker as it is shared + # among all processes. Instead, we set the `iteration_end` flag to + # signify that the iterator is exhausted. When either `done_event` or + # `iteration_end` is set, we skip all processing step and just wait for + # `None`. + iteration_end = False + + watchdog = ManagerWatchdog() + + while watchdog.is_alive(): + try: + r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + continue + if r is None: + # Received the final signal + assert done_event.is_set() or iteration_end + break + elif done_event.is_set() or iteration_end: + # `done_event` is set. But I haven't received the final signal + # (None) yet. I will keep continuing until get it, and skip the + # processing steps. + continue + idx, index = r + if init_exception is not None: + data = init_exception + init_exception = None + else: + try: + data = fetcher.fetch(index) + except Exception as e: + if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable: + data = _IterableDatasetStopIteration(worker_id) + # Set `iteration_end` + # (1) to save future `next(...)` calls, and + # (2) to avoid sending multiple `_IterableDatasetStopIteration`s. + iteration_end = True + else: + # It is important that we don't store exc_info in a variable. + # `ExceptionWrapper` does the correct thing. + # See NOTE [ Python Traceback Reference Cycle Problem ] + data = ExceptionWrapper( + where="in DataLoader worker process {}".format(worker_id)) + data_queue.put((idx, data)) + del data, idx, index, r # save memory + except KeyboardInterrupt: + # Main process will raise KeyboardInterrupt anyways. + pass + if done_event.is_set(): + data_queue.cancel_join_thread() + data_queue.close() +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/data/dataloader.html b/docs/stable/_modules/torch/utils/data/dataloader.html new file mode 100644 index 000000000000..b00b2784d4b7 --- /dev/null +++ b/docs/stable/_modules/torch/utils/data/dataloader.html @@ -0,0 +1,1437 @@ + + + + + + + + + + + + torch.utils.data.dataloader — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.data.dataloader

    +r"""Definition of the DataLoader and it's iterator _DataLoaderIter classes.
    +
    +To support these two classes, in `./_utils` we define many utility methods and
    +functions to be run in multiprocessing. E.g., the data loading worker loop is
    +in `./_utils/worker.py`.
    +"""
    +
    +import torch
    +import multiprocessing as python_multiprocessing
    +import torch.multiprocessing as multiprocessing
    +from . import IterableDataset, Sampler, SequentialSampler, RandomSampler, BatchSampler
    +from . import _utils
    +from torch._utils import ExceptionWrapper
    +import threading
    +import itertools
    +from torch._six import queue, string_classes
    +
    +
    +get_worker_info = _utils.worker.get_worker_info
    +
    +# This function used to be defined in this file. However, it was moved to
    +# _utils/collate.py. Although it is rather hard to access this from user land
    +# (one has to explicitly directly `import torch.utils.data.dataloader`), there
    +# probably is user code out there using it. This aliasing maintains BC in this
    +# aspect.
    +default_collate = _utils.collate.default_collate
    +
    +
    +class _DatasetKind(object):
    +    Map = 0
    +    Iterable = 1
    +
    +    @staticmethod
    +    def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
    +        if kind == _DatasetKind.Map:
    +            return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
    +        else:
    +            return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
    +
    +
    +class _InfiniteConstantSampler(Sampler):
    +    r"""Analogous to ``itertools.repeat(None, None)``.
    +    Used as sampler for :class:`~torch.utils.data.IterableDataset`.
    +    """
    +
    +    def __init__(self):
    +        super(_InfiniteConstantSampler, self).__init__(None)
    +
    +    def __iter__(self):
    +        while True:
    +            yield None
    +
    +    def __len__(self):
    +        # This has to be a TypeError, otherwise, since this is used in
    +        # `len(dataloader)`, `list(dataloader)` will fail.
    +        # see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
    +        raise TypeError('Cannot determine the DataLoader length of a IterableDataset')
    +
    +
    +
    [docs]class DataLoader(object): + r""" + Data loader. Combines a dataset and a sampler, and provides an iterable over + the given dataset. + + The :class:`~torch.utils.data.DataLoader` supports both map-style and + iterable-style datasets with single- or multi-process loading, customizing + loading order and optional automatic batching (collation) and memory pinning. + + See :py:mod:`torch.utils.data` documentation page for more details. + + Arguments: + dataset (Dataset): dataset from which to load the data. + batch_size (int, optional): how many samples per batch to load + (default: ``1``). + shuffle (bool, optional): set to ``True`` to have the data reshuffled + at every epoch (default: ``False``). + sampler (Sampler, optional): defines the strategy to draw samples from + the dataset. If specified, :attr:`shuffle` must be ``False``. + batch_sampler (Sampler, optional): like :attr:`sampler`, but returns a batch of + indices at a time. Mutually exclusive with :attr:`batch_size`, + :attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`. + num_workers (int, optional): how many subprocesses to use for data + loading. ``0`` means that the data will be loaded in the main process. + (default: ``0``) + collate_fn (callable, optional): merges a list of samples to form a + mini-batch of Tensor(s). Used when using batched loading from a + map-style dataset. + pin_memory (bool, optional): If ``True``, the data loader will copy Tensors + into CUDA pinned memory before returning them. If your data elements + are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, + see the example below. + drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If ``False`` and + the size of dataset is not divisible by the batch size, then the last batch + will be smaller. (default: ``False``) + timeout (numeric, optional): if positive, the timeout value for collecting a batch + from workers. Should always be non-negative. (default: ``0``) + worker_init_fn (callable, optional): If not ``None``, this will be called on each + worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as + input, after seeding and before data loading. (default: ``None``) + + + .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn` + cannot be an unpicklable object, e.g., a lambda function. See + :ref:`multiprocessing-best-practices` on more details related + to multiprocessing in PyTorch. + + .. note:: ``len(dataloader)`` heuristic is based on the length of the sampler used. + When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`, + an infinite sampler is used, whose :meth:`__len__` is not + implemented, because the actual length depends on both the + iterable as well as multi-process loading configurations. So one + should not query this method unless they work with a map-style + dataset. See `Dataset Types`_ for more details on these two types + of datasets. + """ + + __initialized = False + + def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, + batch_sampler=None, num_workers=0, collate_fn=None, + pin_memory=False, drop_last=False, timeout=0, + worker_init_fn=None, multiprocessing_context=None): + torch._C._log_api_usage_once("python.data_loader") + + if num_workers < 0: + raise ValueError('num_workers option should be non-negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + self.dataset = dataset + self.num_workers = num_workers + self.pin_memory = pin_memory + self.timeout = timeout + self.worker_init_fn = worker_init_fn + self.multiprocessing_context = multiprocessing_context + + # Arg-check dataset related before checking samplers because we want to + # tell users that iterable-style datasets are incompatible with custom + # samplers first, so that they don't learn that this combo doesn't work + # after spending time fixing the custom sampler errors. + if isinstance(dataset, IterableDataset): + self.dataset_kind = _DatasetKind.Iterable + # NOTE [ Custom Samplers and `IterableDataset` ] + # + # `IterableDataset` does not support custom `batch_sampler` or + # `sampler` since the key is irrelevant (unless we support + # generator-style dataset one day...). + # + # For `sampler`, we always create a dummy sampler. This is an + # infinite sampler even when the dataset may have an implemented + # finite `__len__` because in multi-process data loading, naive + # settings will return duplicated data (which may be desired), and + # thus using a sampler with length matching that of dataset will + # cause data lost (you may have duplicates of the first couple + # batches, but never see anything afterwards). Therefore, + # `Iterabledataset` always uses an infinite sampler, an instance of + # `_InfiniteConstantSampler` defined above. + # + # A custom `batch_sampler` essentially only controls the batch size. + # However, it is unclear how useful it would be since an iterable-style + # dataset can handle that within itself. Moreover, it is pointless + # in multi-process data loading as the assignment order of batches + # to workers is an implementation detail so users can not control + # how to batchify each worker's iterable. Thus, we disable this + # option. If this turns out to be useful in future, we can re-enable + # this, and support custom samplers that specify the assignments to + # specific workers. + if shuffle is not False: + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "shuffle option, but got shuffle={}".format(shuffle)) + elif sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "sampler option, but got sampler={}".format(sampler)) + elif batch_sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "batch_sampler option, but got batch_sampler={}".format(batch_sampler)) + else: + self.dataset_kind = _DatasetKind.Map + + if sampler is not None and shuffle: + raise ValueError('sampler option is mutually exclusive with ' + 'shuffle') + + if batch_sampler is not None: + # auto_collation with custom batch_sampler + if batch_size != 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler option is mutually exclusive ' + 'with batch_size, shuffle, sampler, and ' + 'drop_last') + batch_size = None + drop_last = False + elif batch_size is None: + # no auto_collation + if shuffle or sampler is not None or drop_last: + raise ValueError('batch_size=None option disables auto-batching ' + 'and is mutually exclusive with ' + 'shuffle, sampler, and drop_last') + + if sampler is None: # give default samplers + if self.dataset_kind == _DatasetKind.Iterable: + # See NOTE [ Custom Samplers and IterableDataset ] + sampler = _InfiniteConstantSampler() + else: # map-style + if shuffle: + sampler = RandomSampler(dataset) + else: + sampler = SequentialSampler(dataset) + + if batch_size is not None and batch_sampler is None: + # auto_collation without custom batch_sampler + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.batch_size = batch_size + self.drop_last = drop_last + self.sampler = sampler + self.batch_sampler = batch_sampler + + if collate_fn is None: + if self._auto_collation: + collate_fn = _utils.collate.default_collate + else: + collate_fn = _utils.collate.default_convert + + self.collate_fn = collate_fn + self.__initialized = True + + @property + def multiprocessing_context(self): + return self.__multiprocessing_context + + @multiprocessing_context.setter + def multiprocessing_context(self, multiprocessing_context): + if multiprocessing_context is not None: + if self.num_workers > 0: + if not multiprocessing._supports_context: + raise ValueError('multiprocessing_context relies on Python >= 3.4, with ' + 'support for different start methods') + + if isinstance(multiprocessing_context, string_classes): + valid_start_methods = multiprocessing.get_all_start_methods() + if multiprocessing_context not in valid_start_methods: + raise ValueError( + ('multiprocessing_context option ' + 'should specify a valid start method in {}, but got ' + 'multiprocessing_context={}').format(valid_start_methods, multiprocessing_context)) + multiprocessing_context = multiprocessing.get_context(multiprocessing_context) + + if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): + raise ValueError(('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + 'multiprocessing_context={}').format(multiprocessing_context)) + else: + raise ValueError(('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + 'num_workers={}').format(self.num_workers)) + + self.__multiprocessing_context = multiprocessing_context + + def __setattr__(self, attr, val): + if self.__initialized and attr in ('batch_size', 'sampler', 'drop_last'): + raise ValueError('{} attribute should not be set after {} is ' + 'initialized'.format(attr, self.__class__.__name__)) + + super(DataLoader, self).__setattr__(attr, val) + + def __iter__(self): + if self.num_workers == 0: + return _SingleProcessDataLoaderIter(self) + else: + return _MultiProcessingDataLoaderIter(self) + + @property + def _auto_collation(self): + return self.batch_sampler is not None + + @property + def _index_sampler(self): + # The actual sampler used for generating indices for `_DatasetFetcher` + # (see _utils/fetch.py) to read data at each time. This would be + # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise. + # We can't change `.sampler` and `.batch_sampler` attributes for BC + # reasons. + if self._auto_collation: + return self.batch_sampler + else: + return self.sampler + + def __len__(self): + return len(self._index_sampler) # with iterable-style dataset, this will error
    + + +class _BaseDataLoaderIter(object): + def __init__(self, loader): + self.dataset = loader.dataset + self.dataset_kind = loader.dataset_kind + self.auto_collation = loader._auto_collation + self.drop_last = loader.drop_last + self.index_sampler = loader._index_sampler + self.num_workers = loader.num_workers + self.pin_memory = loader.pin_memory and torch.cuda.is_available() + self.timeout = loader.timeout + self.collate_fn = loader.collate_fn + self.sampler_iter = iter(self.index_sampler) + self.base_seed = torch.empty((), dtype=torch.int64).random_().item() + + def __iter__(self): + return self + + def _next_index(self): + return next(self.sampler_iter) # may raise StopIteration + + def __next__(self): + raise NotImplementedError + + def __len__(self): + return len(self.index_sampler) + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError("{} cannot be pickled", self.__class__.__name__) + + +class _SingleProcessDataLoaderIter(_BaseDataLoaderIter): + def __init__(self, loader): + super(_SingleProcessDataLoaderIter, self).__init__(loader) + assert self.timeout == 0 + assert self.num_workers == 0 + + self.dataset_fetcher = _DatasetKind.create_fetcher( + self.dataset_kind, self.dataset, self.auto_collation, self.collate_fn, self.drop_last) + + def __next__(self): + index = self._next_index() # may raise StopIteration + data = self.dataset_fetcher.fetch(index) # may raise StopIteration + if self.pin_memory: + data = _utils.pin_memory.pin_memory(data) + return data + + next = __next__ # Python 2 compatibility + + +class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter): + r"""Iterates once over the DataLoader's dataset, as specified by the sampler""" + + # NOTE [ Data Loader Multiprocessing Shutdown Logic ] + # + # Preliminary: + # + # Our data model looks like this (queues are indicated with curly brackets): + # + # main process || + # | || + # {index_queue} || + # | || + # worker processes || DATA + # | || + # {worker_result_queue} || FLOW + # | || + # pin_memory_thread of main process || DIRECTION + # | || + # {data_queue} || + # | || + # data output \/ + # + # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if + # `pin_memory=False`. + # + # + # Terminating multiprocessing logic requires very careful design. In + # particular, we need to make sure that + # + # 1. The iterator gracefully exits the workers when its last reference is + # gone or it is depleted. + # + # In this case, the workers should be gracefully exited because the + # main process may still need to continue to run, and we want cleaning + # up code in the workers to be executed (e.g., releasing GPU memory). + # Naturally, we implement the shutdown logic in `__del__` of + # DataLoaderIterator. + # + # We delay the discussion on the logic in this case until later. + # + # 2. The iterator exits the workers when the loader process and/or worker + # processes exits normally or with error. + # + # We set all workers and `pin_memory_thread` to have `daemon=True`. + # + # You may ask, why can't we make the workers non-daemonic, and + # gracefully exit using the same logic as we have in `__del__` when the + # iterator gets deleted (see 1 above)? + # + # First of all, `__del__` is **not** guaranteed to be called when + # interpreter exits. Even if it is called, by the time it executes, + # many Python core library resources may alreay be freed, and even + # simple things like acquiring an internal lock of a queue may hang. + # Therefore, in this case, we actually need to prevent `__del__` from + # being executed, and rely on the automatic termination of daemonic + # children. Thus, we register an `atexit` hook that sets a global flag + # `_utils.python_exit_status`. Since `atexit` hooks are executed in the + # reverse order of registration, we are guaranteed that this flag is + # set before library resources we use are freed. (Hooks freeing those + # resources are registered at importing the Python core libraries at + # the top of this file.) So in `__del__`, we check if + # `_utils.python_exit_status` is set or `None` (freed), and perform + # no-op if so. + # + # Another problem with `__del__` is also related to the library cleanup + # calls. When a process ends, it shuts the all its daemonic children + # down with a SIGTERM (instead of joining them without a timeout). + # Simiarly for threads, but by a different mechanism. This fact, + # together with a few implementation details of multiprocessing, forces + # us to make workers daemonic. All of our problems arise when a + # DataLoader is used in a subprocess, and are caused by multiprocessing + # code which looks more or less like this: + # + # try: + # your_function_using_a_dataloader() + # finally: + # multiprocessing.util._exit_function() + # + # The joining/termination mentioned above happens inside + # `_exit_function()`. Now, if `your_function_using_a_dataloader()` + # throws, the stack trace stored in the exception will prevent the + # frame which uses `DataLoaderIter` to be freed. If the frame has any + # reference to the `DataLoaderIter` (e.g., in a method of the iter), + # its `__del__`, which starts the shutdown procedure, will not be + # called. That, in turn, means that workers aren't notified. Attempting + # to join in `_exit_function` will then result in a hang. + # + # For context, `_exit_function` is also registered as an `atexit` call. + # So it is unclear to me (@ssnl) why this is needed in a finally block. + # The code dates back to 2008 and there is no comment on the original + # PEP 371 or patch https://bugs.python.org/issue3050 (containing both + # the finally block and the `atexit` registration) that explains this. + # + # Another choice is to just shutdown workers with logic in 1 above + # whenever we see an error in `next`. This isn't ideal because + # a. It prevents users from using try-catch to resume data loading. + # b. It doesn't prevent hanging if users have references to the + # iterator. + # + # 3. All processes exit if any of them die unexpectedly by fatal signals. + # + # As shown above, the workers are set as daemonic children of the main + # process. However, automatic cleaning-up of such child processes only + # happens if the parent process exits gracefully (e.g., not via fatal + # signals like SIGKILL). So we must ensure that each process will exit + # even the process that should send/receive data to/from it were + # killed, i.e., + # + # a. A process won't hang when getting from a queue. + # + # Even with carefully designed data dependencies (i.e., a `put()` + # always corresponding to a `get()`), hanging on `get()` can still + # happen when data in queue is corrupted (e.g., due to + # `cancel_join_thread` or unexpected exit). + # + # For child exit, we set a timeout whenever we try to get data + # from `data_queue`, and check the workers' status on each timeout + # and error. + # See `_DataLoaderiter._get_batch()` and + # `_DataLoaderiter._try_get_data()` for details. + # + # Additionally, for child exit on non-Windows platforms, we also + # register a SIGCHLD handler (which is supported on Windows) on + # the main process, which checks if any of the workers fail in the + # (Python) handler. This is more efficient and faster in detecting + # worker failures, compared to only using the above mechanism. + # See `DataLoader.cpp` and `_utils/signal_handling.py` for details. + # + # For `.get()` calls where the sender(s) is not the workers, we + # guard them with timeouts, and check the status of the sender + # when timeout happens: + # + in the workers, the `_utils.worker.ManagerWatchdog` class + # checks the status of the main process. + # + if `pin_memory=True`, when getting from `pin_memory_thread`, + # check `pin_memory_thread` status periodically until `.get()` + # returns or see that `pin_memory_thread` died. + # + # b. A process won't hang when putting into a queue; + # + # We use `mp.Queue` which has a separate background thread to put + # objects from an unbounded buffer array. The background thread is + # daemonic and usually automatically joined when the process + # exits. + # + # However, in case that the receiver has ended abruptly while + # reading from the pipe, the join will hang forever. Therefore, + # for both `worker_result_queue` (worker -> main process/pin_memory_thread) + # and each `index_queue` (main process -> worker), we use + # `q.cancel_join_thread()` in sender process before any `q.put` to + # prevent this automatic join. + # + # Moreover, having all queues called `cancel_join_thread` makes + # implementing graceful shutdown logic in `__del__` much easier. + # It won't need to get from any queue, which would also need to be + # guarded by periodic status checks. + # + # Nonetheless, `cancel_join_thread` must only be called when the + # queue is **not** going to be read from or write into by another + # process, because it may hold onto a lock or leave corrupted data + # in the queue, leading other readers/writers to hang. + # + # `pin_memory_thread`'s `data_queue` is a `queue.Queue` that does + # a blocking `put` if the queue is full. So there is no above + # problem, but we do need to wrap the `put` in a loop that breaks + # not only upon success, but also when the main process stops + # reading, i.e., is shutting down. + # + # + # Now let's get back to 1: + # how we gracefully exit the workers when the last reference to the + # iterator is gone. + # + # To achieve this, we implement the following logic along with the design + # choices mentioned above: + # + # `workers_done_event`: + # A `multiprocessing.Event` shared among the main process and all worker + # processes. This is used to signal the workers that the iterator is + # shutting down. After it is set, they will not send processed data to + # queues anymore, and only wait for the final `None` before exiting. + # `done_event` isn't strictly needed. I.e., we can just check for `None` + # from the input queue, but it allows us to skip wasting resources + # processing data if we are already shutting down. + # + # `pin_memory_thread_done_event`: + # A `threading.Event` for a similar purpose to that of + # `workers_done_event`, but is for the `pin_memory_thread`. The reason + # that separate events are neede is that `pin_memory_thread` reads from + # the output queue of the workers. But the workers, upon seeing that + # `workers_done_event` is set, only wants to see the final `None`, and is + # not required to flush all data in the output queue (e.g., it may call + # `cancel_join_thread` on that queue if its `IterableDataset` iterator + # happens to exhaust coincidentally, which is out of the control of the + # main process). Thus, since we will exit `pin_memory_thread` before the + # workers (see below), two separete events are used. + # + # NOTE: In short, the protocol is that the main process will set these + # `done_event`s and then the corresponding processes/threads a `None`, + # and that they may exit at any time after receiving the `None`. + # + # NOTE: Using `None` as the final signal is valid, since normal data will + # always be a 2-tuple with the 1st element being the index of the data + # transferred (different from dataset index/key), and the 2nd being + # either the dataset key or the data sample (depending on which part + # of the data model the queue is at). + # + # [ worker processes ] + # While loader process is alive: + # Get from `index_queue`. + # If get anything else, + # Check `workers_done_event`. + # If set, continue to next iteration + # i.e., keep getting until see the `None`, then exit. + # Otherwise, process data: + # If is fetching from an `IterableDataset` and the iterator + # is exhausted, send an `_IterableDatasetStopIteration` + # object to signal iteration end. The main process, upon + # receiving such an object, will send `None` to this + # worker and not use the corresponding `index_queue` + # anymore. + # If timed out, + # No matter `workers_done_event` is set (still need to see `None`) + # or not, must continue to next iteration. + # (outside loop) + # If `workers_done_event` is set, (this can be False with `IterableDataset`) + # `data_queue.cancel_join_thread()`. (Everything is ending here: + # main process won't read from it; + # other workers will also call + # `cancel_join_thread`.) + # + # [ pin_memory_thread ] + # # No need to check main thread. If this thread is alive, the main loader + # # thread must be alive, because this thread is set as daemonic. + # While `pin_memory_thread_done_event` is not set: + # Get from `index_queue`. + # If timed out, continue to get in the next iteration. + # Otherwise, process data. + # While `pin_memory_thread_done_event` is not set: + # Put processed data to `data_queue` (a `queue.Queue` with blocking put) + # If timed out, continue to put in the next iteration. + # Otherwise, break, i.e., continuing to the out loop. + # + # NOTE: we don't check the status of the main thread because + # 1. if the process is killed by fatal signal, `pin_memory_thread` + # ends. + # 2. in other cases, either the cleaning-up in __del__ or the + # automatic exit of daemonic thread will take care of it. + # This won't busy-wait either because `.get(timeout)` does not + # busy-wait. + # + # [ main process ] + # In the DataLoader Iter's `__del__` + # b. Exit `pin_memory_thread` + # i. Set `pin_memory_thread_done_event`. + # ii Put `None` in `worker_result_queue`. + # iii. Join the `pin_memory_thread`. + # iv. `worker_result_queue.cancel_join_thread()`. + # + # c. Exit the workers. + # i. Set `workers_done_event`. + # ii. Put `None` in each worker's `index_queue`. + # iii. Join the workers. + # iv. Call `.cancel_join_thread()` on each worker's `index_queue`. + # + # NOTE: (c) is better placed after (b) because it may leave corrupted + # data in `worker_result_queue`, which `pin_memory_thread` + # reads from, in which case the `pin_memory_thread` can only + # happen at timeing out, which is slow. Nonetheless, same thing + # happens if a worker is killed by signal at unfortunate times, + # but in other cases, we are better off having a non-corrupted + # `worker_result_queue` for `pin_memory_thread`. + # + # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b) + # can be omitted + # + # NB: `done_event`s isn't strictly needed. E.g., we can just check for + # `None` from `index_queue`, but it allows us to skip wasting resources + # processing indices already in `index_queue` if we are already shutting + # down. + + def __init__(self, loader): + super(_MultiProcessingDataLoaderIter, self).__init__(loader) + + assert self.num_workers > 0 + + if loader.multiprocessing_context is None: + multiprocessing_context = multiprocessing + else: + multiprocessing_context = loader.multiprocessing_context + + self.worker_init_fn = loader.worker_init_fn + self.worker_queue_idx_cycle = itertools.cycle(range(self.num_workers)) + self.worker_result_queue = multiprocessing_context.Queue() + self.worker_pids_set = False + self.shutdown = False + self.send_idx = 0 # idx of the next task to be sent to workers + self.rcvd_idx = 0 # idx of the next task to be returned in __next__ + # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx). + # map: task idx => - (worker_id,) if data isn't fetched (outstanding) + # \ (worker_id, data) if data is already fetched (out-of-order) + self.task_info = {} + self.tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1) + self.workers_done_event = multiprocessing_context.Event() + + self.index_queues = [] + self.workers = [] + # A list of booleans representing whether each worker still has work to + # do, i.e., not having exhausted its iterable dataset object. It always + # contains all `True`s if not using an iterable-style dataset + # (i.e., if kind != Iterable). + self.workers_status = [] + for i in range(self.num_workers): + index_queue = multiprocessing_context.Queue() + # index_queue.cancel_join_thread() + w = multiprocessing_context.Process( + target=_utils.worker._worker_loop, + args=(self.dataset_kind, self.dataset, index_queue, + self.worker_result_queue, self.workers_done_event, + self.auto_collation, self.collate_fn, self.drop_last, + self.base_seed + i, self.worker_init_fn, i, self.num_workers)) + w.daemon = True + # NB: Process.start() actually take some time as it needs to + # start a process and pass the arguments over via a pipe. + # Therefore, we only add a worker to self.workers list after + # it started, so that we do not call .join() if program dies + # before it starts, and __del__ tries to join but will get: + # AssertionError: can only join a started process. + w.start() + self.index_queues.append(index_queue) + self.workers.append(w) + self.workers_status.append(True) + + if self.pin_memory: + self.pin_memory_thread_done_event = threading.Event() + self.data_queue = queue.Queue() + pin_memory_thread = threading.Thread( + target=_utils.pin_memory._pin_memory_loop, + args=(self.worker_result_queue, self.data_queue, + torch.cuda.current_device(), + self.pin_memory_thread_done_event)) + pin_memory_thread.daemon = True + pin_memory_thread.start() + # Similar to workers (see comment above), we only register + # pin_memory_thread once it is started. + self.pin_memory_thread = pin_memory_thread + else: + self.data_queue = self.worker_result_queue + + _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self.workers)) + _utils.signal_handling._set_SIGCHLD_handler() + self.worker_pids_set = True + + # prime the prefetch loop + for _ in range(2 * self.num_workers): + self._try_put_index() + + def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL): + # Tries to fetch data from `self.data_queue` once for a given timeout. + # This can also be used as inner loop of fetching without timeout, with + # the sender status as the loop condition. + # + # This raises a `RuntimeError` if any worker died expectedly. This error + # can come from either the SIGCHLD handler in `_utils/signal_handling.py` + # (only for non-Windows platforms), or the manual check below on errors + # and timeouts. + # + # Returns a 2-tuple: + # (bool: whether successfully get data, any: data if successful else None) + try: + data = self.data_queue.get(timeout=timeout) + return (True, data) + except Exception as e: + # At timeout and error, we manually check whether any worker has + # failed. Note that this is the only mechanism for Windows to detect + # worker failures. + failed_workers = [] + for worker_id, w in enumerate(self.workers): + if self.workers_status[worker_id] and not w.is_alive(): + failed_workers.append(w) + self._shutdown_worker(worker_id) + if len(failed_workers) > 0: + pids_str = ', '.join(str(w.pid) for w in failed_workers) + raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) + if isinstance(e, queue.Empty): + return (False, None) + raise + + def _get_data(self): + # Fetches data from `self.data_queue`. + # + # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds, + # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)` + # in a loop. This is the only mechanism to detect worker failures for + # Windows. For other platforms, a SIGCHLD handler is also used for + # worker failure detection. + # + # If `pin_memory=True`, we also need check if `pin_memory_thread` had + # died at timeouts. + if self.timeout > 0: + success, data = self._try_get_data(self.timeout) + if success: + return data + else: + raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) + elif self.pin_memory: + while self.pin_memory_thread.is_alive(): + success, data = self._try_get_data() + if success: + return data + else: + # while condition is false, i.e., pin_memory_thread died. + raise RuntimeError('Pin memory thread exited unexpectedly') + # In this case, `self.data_queue` is a `queue.Queue`,. But we don't + # need to call `.task_done()` because we don't use `.join()`. + else: + while True: + success, data = self._try_get_data() + if success: + return data + + def __next__(self): + while True: + # If the worker responsible for `self.rcvd_idx` has already ended + # and was unable to fulfill this task (due to exhausting an `IterableDataset`), + # we try to advance `self.rcvd_idx` to find the next valid index. + # + # This part needs to run in the loop because both the `self._get_data()` + # call and `_IterableDatasetStopIteration` check below can mark + # extra worker(s) as dead. + while self.rcvd_idx < self.send_idx: + info = self.task_info[self.rcvd_idx] + worker_id = info[0] + if len(info) == 2 or self.workers_status[worker_id]: # has data or is still active + break + del self.task_info[self.rcvd_idx] + self.rcvd_idx += 1 + else: + # no valid `self.rcvd_idx` is found (i.e., didn't break) + self._shutdown_workers() + raise StopIteration + + # Now `self.rcvd_idx` is the batch index we want to fetch + + # Check if the next sample has already been generated + if len(self.task_info[self.rcvd_idx]) == 2: + data = self.task_info.pop(self.rcvd_idx)[1] + return self._process_data(data) + + assert not self.shutdown and self.tasks_outstanding > 0 + idx, data = self._get_data() + self.tasks_outstanding -= 1 + + if self.dataset_kind == _DatasetKind.Iterable: + # Check for _IterableDatasetStopIteration + if isinstance(data, _utils.worker._IterableDatasetStopIteration): + self._shutdown_worker(data.worker_id) + self._try_put_index() + continue + + if idx != self.rcvd_idx: + # store out-of-order samples + self.task_info[idx] += (data,) + else: + del self.task_info[idx] + return self._process_data(data) + + next = __next__ # Python 2 compatibility + + def _try_put_index(self): + assert self.tasks_outstanding < 2 * self.num_workers + try: + index = self._next_index() + except StopIteration: + return + for _ in range(self.num_workers): # find the next active worker, if any + worker_queue_idx = next(self.worker_queue_idx_cycle) + if self.workers_status[worker_queue_idx]: + break + else: + # not found (i.e., didn't break) + return + + self.index_queues[worker_queue_idx].put((self.send_idx, index)) + self.task_info[self.send_idx] = (worker_queue_idx,) + self.tasks_outstanding += 1 + self.send_idx += 1 + + def _process_data(self, data): + self.rcvd_idx += 1 + self._try_put_index() + if isinstance(data, ExceptionWrapper): + data.reraise() + return data + + def _shutdown_worker(self, worker_id): + # Mark a worker as having finished its work and dead, e.g., due to + # exhausting an `IterableDataset`. This should be used only when this + # `_MultiProcessingDataLoaderIter` is going to continue running. + + assert self.workers_status[worker_id] + + # Signal termination to that specific worker. + q = self.index_queues[worker_id] + # Indicate that no more data will be put on this queue by the current + # process. + q.put(None) + + # Note that we don't actually join the worker here, nor do we remove the + # worker's pid from C side struct because (1) joining may be slow, and + # (2) since we don't join, the worker may still raise error, and we + # prefer capturing those, rather than ignoring them, even though they + # are raised after the worker has finished its job. + # Joinning is deferred to `_shutdown_workers`, which it is called when + # all workers finish their jobs (e.g., `IterableDataset` replicas) or + # when this iterator is garbage collected. + self.workers_status[worker_id] = False + + def _shutdown_workers(self): + # Called when shutting down this `_MultiProcessingDataLoaderIter`. + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on + # the logic of this function. + python_exit_status = _utils.python_exit_status + if python_exit_status is True or python_exit_status is None: + # See (2) of the note. If Python is shutting down, do no-op. + return + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + if not self.shutdown: + self.shutdown = True + try: + # Exit `pin_memory_thread` first because exiting workers may leave + # corrupted data in `worker_result_queue` which `pin_memory_thread` + # reads from. + if hasattr(self, 'pin_memory_thread'): + # Use hasattr in case error happens before we set the attribute. + self.pin_memory_thread_done_event.set() + # Send something to pin_memory_thread in case it is waiting + # so that it can wake up and check `pin_memory_thread_done_event` + self.worker_result_queue.put((None, None)) + self.pin_memory_thread.join() + self.worker_result_queue.close() + + # Exit workers now. + self.workers_done_event.set() + for worker_id in range(self.num_workers): + if self.workers_status[worker_id]: + self._shutdown_worker(worker_id) + for w in self.workers: + w.join() + for q in self.index_queues: + q.cancel_join_thread() + q.close() + finally: + # Even though all this function does is putting into queues that + # we have called `cancel_join_thread` on, weird things can + # happen when a worker is killed by a signal, e.g., hanging in + # `Event.set()`. So we need to guard this with SIGCHLD handler, + # and remove pids from the C side data structure only at the + # end. + # + # FIXME: Unfortunately, for Windows, we are missing a worker + # error detection mechanism here in this function, as it + # doesn't provide a SIGCHLD handler. + if self.worker_pids_set: + _utils.signal_handling._remove_worker_pids(id(self)) + self.worker_pids_set = False + + def __del__(self): + self._shutdown_workers() +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/data/dataset.html b/docs/stable/_modules/torch/utils/data/dataset.html new file mode 100644 index 000000000000..4868b9a48895 --- /dev/null +++ b/docs/stable/_modules/torch/utils/data/dataset.html @@ -0,0 +1,788 @@ + + + + + + + + + + + + torch.utils.data.dataset — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.data.dataset

    +import bisect
    +import warnings
    +
    +from torch._utils import _accumulate
    +from torch import randperm
    +
    +
    +
    [docs]class Dataset(object): + r"""An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs a index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + """ + + def __getitem__(self, index): + raise NotImplementedError + + def __add__(self, other): + return ConcatDataset([self, other])
    + + # No `def __len__(self)` default? + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + + +
    [docs]class IterableDataset(Dataset): + r"""An iterable Dataset. + + All datasets that represent an iterable of data samples should subclass it. + Such form of datasets is particularly useful when data come from a stream. + + All subclasses should overrite :meth:`__iter__`, which would return an + iterator of samples in this dataset. + + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the dataset object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Example 1: splitting workload across all workers in :meth:`__iter__`:: + + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... worker_info = torch.utils.data.get_worker_info() + ... if worker_info is None: # single-process data loading, return the full iterator + ... iter_start = self.start + ... iter_end = self.end + ... else: # in a worker process + ... # split workload + ... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... iter_start = self.start + worker_id * per_worker + ... iter_end = min(iter_start + per_worker, self.end) + ... return iter(range(iter_start, iter_end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [3, 4, 5, 6] + + >>> # Mult-process loading with two worker processes + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [3, 5, 4, 6] + + >>> # With even more workers + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=20))) + [3, 4, 5, 6] + + Example 2: splitting workload across all workers using :attr:`worker_init_fn`:: + + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [3, 4, 5, 6] + >>> + >>> # Directly doing multi-process loading yields duplicate data + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [3, 3, 4, 4, 5, 5, 6, 6] + + >>> # Define a `worker_init_fn` that configures each dataset copy differently + >>> def worker_init_fn(worker_id): + ... worker_info = torch.utils.data.get_worker_info() + ... dataset = worker_info.dataset # the dataset copy in this worker process + ... overall_start = dataset.start + ... overall_end = dataset.end + ... # configure the dataset to only process the split workload + ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... dataset.start = overall_start + worker_id * per_worker + ... dataset.end = min(dataset.start + per_worker, overall_end) + ... + + >>> # Mult-process loading with the custom `worker_init_fn` + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn))) + [3, 5, 4, 6] + + >>> # With even more workers + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=20, worker_init_fn=worker_init_fn))) + [3, 4, 5, 6] + """ + + def __iter__(self): + raise NotImplementedError + + def __add__(self, other): + return ChainDataset([self, other])
    + + # No `def __len__(self)` default? + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + + +
    [docs]class TensorDataset(Dataset): + r"""Dataset wrapping tensors. + + Each sample will be retrieved by indexing tensors along the first dimension. + + Arguments: + *tensors (Tensor): tensors that have the same size of the first dimension. + """ + + def __init__(self, *tensors): + assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) + self.tensors = tensors + + def __getitem__(self, index): + return tuple(tensor[index] for tensor in self.tensors) + + def __len__(self): + return self.tensors[0].size(0)
    + + +
    [docs]class ConcatDataset(Dataset): + r"""Dataset as a concatenation of multiple datasets. + + This class is useful to assemble different existing datasets. + + Arguments: + datasets (sequence): List of datasets to be concatenated + """ + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, datasets): + super(ConcatDataset, self).__init__() + assert len(datasets) > 0, 'datasets should not be an empty iterable' + self.datasets = list(datasets) + for d in self.datasets: + assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset" + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError("absolute value of index should not exceed dataset length") + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + @property + def cummulative_sizes(self): + warnings.warn("cummulative_sizes attribute is renamed to " + "cumulative_sizes", DeprecationWarning, stacklevel=2) + return self.cumulative_sizes
    + + +
    [docs]class ChainDataset(IterableDataset): + r"""Dataset for chainning multiple :class:`IterableDataset` s. + + This class is useful to assemble different existing dataset streams. The + chainning operation is done on-the-fly, so concatenating large-scale + datasets with this class will be efficient. + + Arguments: + datasets (iterable of IterableDataset): datasets to be chained together + """ + def __init__(self, datasets): + super(ChainDataset, self).__init__() + self.datasets = datasets + + def __iter__(self): + for d in self.datasets: + assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset" + for x in d: + yield x + + def __len__(self): + total = 0 + for d in self.datasets: + assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset" + total += len(d) + return total
    + + +
    [docs]class Subset(Dataset): + r""" + Subset of a dataset at specified indices. + + Arguments: + dataset (Dataset): The whole Dataset + indices (sequence): Indices in the whole set selected for subset + """ + def __init__(self, dataset, indices): + self.dataset = dataset + self.indices = indices + + def __getitem__(self, idx): + return self.dataset[self.indices[idx]] + + def __len__(self): + return len(self.indices)
    + + +
    [docs]def random_split(dataset, lengths): + r""" + Randomly split a dataset into non-overlapping new datasets of given lengths. + + Arguments: + dataset (Dataset): Dataset to be split + lengths (sequence): lengths of splits to be produced + """ + if sum(lengths) != len(dataset): + raise ValueError("Sum of input lengths does not equal the length of the input dataset!") + + indices = randperm(sum(lengths)).tolist() + return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/data/distributed.html b/docs/stable/_modules/torch/utils/data/distributed.html new file mode 100644 index 000000000000..8c1f7def5e3f --- /dev/null +++ b/docs/stable/_modules/torch/utils/data/distributed.html @@ -0,0 +1,581 @@ + + + + + + + + + + + + torch.utils.data.distributed — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.data.distributed

    +import math
    +import torch
    +from . import Sampler
    +import torch.distributed as dist
    +
    +
    +
    [docs]class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + shuffle (optional): If true (default), sampler will shuffle the indices + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/data/sampler.html b/docs/stable/_modules/torch/utils/data/sampler.html new file mode 100644 index 000000000000..f29341048634 --- /dev/null +++ b/docs/stable/_modules/torch/utils/data/sampler.html @@ -0,0 +1,726 @@ + + + + + + + + + + + + torch.utils.data.sampler — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.data.sampler

    +import torch
    +from torch._six import int_classes as _int_classes
    +
    +
    +
    [docs]class Sampler(object): + r"""Base class for all Samplers. + + Every Sampler subclass has to provide an :meth:`__iter__` method, providing a + way to iterate over indices of dataset elements, and a :meth:`__len__` method + that returns the length of the returned iterators. + + .. note:: The :meth:`__len__` method isn't strictly required by + :class:`~torch.utils.data.DataLoader`, but is expected in any + calculation involving the length of a :class:`~torch.utils.data.DataLoader`. + """ + + def __init__(self, data_source): + pass + + def __iter__(self): + raise NotImplementedError
    + + # NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # + # Many times we have an abstract class representing a collection/iterable of + # data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally + # implementing a `__len__` method. In such cases, we must make sure to not + # provide a default implementation, because both straightforward default + # implementations have their issues: + # + # + `return NotImplemented`: + # Calling `len(subclass_instance)` raises: + # TypeError: 'NotImplementedType' object cannot be interpreted as an integer + # + # + `raise NotImplementedError()`: + # This prevents triggering some fallback behavior. E.g., the built-in + # `list(X)` tries to call `len(X)` first, and executes a different code + # path if the method is not found or `NotImplemented` is returned, while + # raising an `NotImplementedError` will propagate and and make the call + # fail where it could have use `__iter__` to complete the call. + # + # Thus, the only two sensible things to do are + # + # + **not** provide a default `__len__`. + # + # + raise a `TypeError` instead, which is what Python uses when users call + # a method that is not defined on an object. + # (@ssnl verifies that this works on at least Python 3.7.) + + +
    [docs]class SequentialSampler(Sampler): + r"""Samples elements sequentially, always in the same order. + + Arguments: + data_source (Dataset): dataset to sample from + """ + + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source)
    + + +
    [docs]class RandomSampler(Sampler): + r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset. + If with replacement, then user can specify :attr:`num_samples` to draw. + + Arguments: + data_source (Dataset): dataset to sample from + replacement (bool): samples are drawn with replacement if ``True``, default=``False`` + num_samples (int): number of samples to draw, default=`len(dataset)`. This argument + is supposed to be specified only when `replacement` is ``True``. + """ + + def __init__(self, data_source, replacement=False, num_samples=None): + self.data_source = data_source + self.replacement = replacement + self._num_samples = num_samples + + if not isinstance(self.replacement, bool): + raise ValueError("replacement should be a boolean value, but got " + "replacement={}".format(self.replacement)) + + if self._num_samples is not None and not replacement: + raise ValueError("With replacement=False, num_samples should not be specified, " + "since a random permute will be performed.") + + if not isinstance(self.num_samples, int) or self.num_samples <= 0: + raise ValueError("num_samples should be a positive integer " + "value, but got num_samples={}".format(self.num_samples)) + + @property + def num_samples(self): + # dataset size might change at runtime + if self._num_samples is None: + return len(self.data_source) + return self._num_samples + + def __iter__(self): + n = len(self.data_source) + if self.replacement: + return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist()) + return iter(torch.randperm(n).tolist()) + + def __len__(self): + return self.num_samples
    + + +
    [docs]class SubsetRandomSampler(Sampler): + r"""Samples elements randomly from a given list of indices, without replacement. + + Arguments: + indices (sequence): a sequence of indices + """ + + def __init__(self, indices): + self.indices = indices + + def __iter__(self): + return (self.indices[i] for i in torch.randperm(len(self.indices))) + + def __len__(self): + return len(self.indices)
    + + +
    [docs]class WeightedRandomSampler(Sampler): + r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights). + + Args: + weights (sequence) : a sequence of weights, not necessary summing up to one + num_samples (int): number of samples to draw + replacement (bool): if ``True``, samples are drawn with replacement. + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + + Example: + >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)) + [0, 0, 0, 1, 0] + >>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False)) + [0, 1, 4, 3, 2] + """ + + def __init__(self, weights, num_samples, replacement=True): + if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \ + num_samples <= 0: + raise ValueError("num_samples should be a positive integer " + "value, but got num_samples={}".format(num_samples)) + if not isinstance(replacement, bool): + raise ValueError("replacement should be a boolean value, but got " + "replacement={}".format(replacement)) + self.weights = torch.as_tensor(weights, dtype=torch.double) + self.num_samples = num_samples + self.replacement = replacement + + def __iter__(self): + return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist()) + + def __len__(self): + return self.num_samples
    + + +
    [docs]class BatchSampler(Sampler): + r"""Wraps another sampler to yield a mini-batch of indices. + + Args: + sampler (Sampler): Base sampler. + batch_size (int): Size of mini-batch. + drop_last (bool): If ``True``, the sampler will drop the last batch if + its size would be less than ``batch_size`` + + Example: + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + def __init__(self, sampler, batch_size, drop_last): + if not isinstance(sampler, Sampler): + raise ValueError("sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}" + .format(sampler)) + if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \ + batch_size <= 0: + raise ValueError("batch_size should be a positive integer value, " + "but got batch_size={}".format(batch_size)) + if not isinstance(drop_last, bool): + raise ValueError("drop_last should be a boolean value, but got " + "drop_last={}".format(drop_last)) + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + def __iter__(self): + batch = [] + for idx in self.sampler: + batch.append(idx) + if len(batch) == self.batch_size: + yield batch + batch = [] + if len(batch) > 0 and not self.drop_last: + yield batch + + def __len__(self): + if self.drop_last: + return len(self.sampler) // self.batch_size + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torch/utils/tensorboard/writer.html b/docs/stable/_modules/torch/utils/tensorboard/writer.html new file mode 100644 index 000000000000..8e8ee3cb621f --- /dev/null +++ b/docs/stable/_modules/torch/utils/tensorboard/writer.html @@ -0,0 +1,1452 @@ + + + + + + + + + + + + torch.utils.tensorboard.writer — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torch.utils.tensorboard.writer

    +"""Provides an API for writing protocol buffers to event files to be
    +consumed by TensorBoard for visualization."""
    +
    +from __future__ import absolute_import
    +from __future__ import division
    +from __future__ import print_function
    +
    +import os
    +import six
    +import time
    +import torch
    +
    +from tensorboard.compat.proto.event_pb2 import SessionLog
    +from tensorboard.compat.proto.event_pb2 import Event
    +from tensorboard.compat.proto import event_pb2
    +from tensorboard.summary.writer.event_file_writer import EventFileWriter
    +
    +from ._convert_np import make_np
    +from ._embedding import make_mat, make_sprite, make_tsv, append_pbtxt
    +from ._onnx_graph import load_onnx_graph
    +from ._pytorch_graph import graph
    +from ._utils import figure_to_image
    +from .summary import (
    +    scalar, histogram, histogram_raw, image, audio, text,
    +    pr_curve, pr_curve_raw, video, custom_scalars, image_boxes, mesh
    +)
    +
    +
    +class FileWriter(object):
    +    """Writes protocol buffers to event files to be consumed by TensorBoard.
    +
    +    The `FileWriter` class provides a mechanism to create an event file in a
    +    given directory and add summaries and events to it. The class updates the
    +    file contents asynchronously. This allows a training program to call methods
    +    to add data to the file directly from the training loop, without slowing down
    +    training.
    +    """
    +
    +    def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=''):
    +        """Creates a `FileWriter` and an event file.
    +        On construction the writer creates a new event file in `log_dir`.
    +        The other arguments to the constructor control the asynchronous writes to
    +        the event file.
    +
    +        Args:
    +          log_dir: A string. Directory where event file will be written.
    +          max_queue: Integer. Size of the queue for pending events and
    +            summaries before one of the 'add' calls forces a flush to disk.
    +            Default is ten items.
    +          flush_secs: Number. How often, in seconds, to flush the
    +            pending events and summaries to disk. Default is every two minutes.
    +          filename_suffix: A string. Suffix added to all event filenames
    +            in the log_dir directory. More details on filename construction in
    +            tensorboard.summary.writer.event_file_writer.EventFileWriter.
    +        """
    +        # Sometimes PosixPath is passed in and we need to coerce it to
    +        # a string in all cases
    +        # TODO: See if we can remove this in the future if we are
    +        # actually the ones passing in a PosixPath
    +        log_dir = str(log_dir)
    +        self.event_writer = EventFileWriter(
    +            log_dir, max_queue, flush_secs, filename_suffix)
    +
    +    def get_logdir(self):
    +        """Returns the directory where event file will be written."""
    +        return self.event_writer.get_logdir()
    +
    +    def add_event(self, event, step=None, walltime=None):
    +        """Adds an event to the event file.
    +        Args:
    +          event: An `Event` protocol buffer.
    +          step: Number. Optional global step value for training process
    +            to record with the event.
    +          walltime: float. Optional walltime to override the default (current)
    +            walltime (from time.time()) seconds after epoch
    +        """
    +        event.wall_time = time.time() if walltime is None else walltime
    +        if step is not None:
    +            # Make sure step is converted from numpy or other formats
    +            # since protobuf might not convert depending on version
    +            event.step = int(step)
    +        self.event_writer.add_event(event)
    +
    +    def add_summary(self, summary, global_step=None, walltime=None):
    +        """Adds a `Summary` protocol buffer to the event file.
    +        This method wraps the provided summary in an `Event` protocol buffer
    +        and adds it to the event file.
    +
    +        Args:
    +          summary: A `Summary` protocol buffer.
    +          global_step: Number. Optional global step value for training process
    +            to record with the summary.
    +          walltime: float. Optional walltime to override the default (current)
    +            walltime (from time.time()) seconds after epoch
    +        """
    +        event = event_pb2.Event(summary=summary)
    +        self.add_event(event, global_step, walltime)
    +
    +    def add_graph(self, graph_profile, walltime=None):
    +        """Adds a `Graph` and step stats protocol buffer to the event file.
    +
    +        Args:
    +          graph_profile: A `Graph` and step stats protocol buffer.
    +          walltime: float. Optional walltime to override the default (current)
    +            walltime (from time.time()) seconds after epoch
    +        """
    +        graph = graph_profile[0]
    +        stepstats = graph_profile[1]
    +        event = event_pb2.Event(graph_def=graph.SerializeToString())
    +        self.add_event(event, None, walltime)
    +
    +        trm = event_pb2.TaggedRunMetadata(
    +            tag='step1', run_metadata=stepstats.SerializeToString())
    +        event = event_pb2.Event(tagged_run_metadata=trm)
    +        self.add_event(event, None, walltime)
    +
    +    def add_onnx_graph(self, graph, walltime=None):
    +        """Adds a `Graph` protocol buffer to the event file.
    +
    +        Args:
    +          graph: A `Graph` protocol buffer.
    +          walltime: float. Optional walltime to override the default (current)
    +            _get_file_writerfrom time.time())
    +        """
    +        event = event_pb2.Event(graph_def=graph.SerializeToString())
    +        self.add_event(event, None, walltime)
    +
    +    def flush(self):
    +        """Flushes the event file to disk.
    +        Call this method to make sure that all pending events have been written to
    +        disk.
    +        """
    +        self.event_writer.flush()
    +
    +    def close(self):
    +        """Flushes the event file to disk and close the file.
    +        Call this method when you do not need the summary writer anymore.
    +        """
    +        self.event_writer.close()
    +
    +    def reopen(self):
    +        """Reopens the EventFileWriter.
    +        Can be called after `close()` to add more events in the same directory.
    +        The events will go into a new events file.
    +        Does nothing if the EventFileWriter was not closed.
    +        """
    +        self.event_writer.reopen()
    +
    +
    +
    [docs]class SummaryWriter(object): + """Writes entries directly to event files in the log_dir to be + consumed by TensorBoard. + + The `SummaryWriter` class provides a high-level API to create an event file + in a given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + +
    [docs] def __init__(self, log_dir=None, comment='', purge_step=None, max_queue=10, + flush_secs=120, filename_suffix=''): + """Creates a `SummaryWriter` that will write out events and summaries + to the event file. + + Args: + log_dir (string): Save directory location. Default is + runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. + Use hierarchical folder structure to compare + between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc. + for each new experiment to compare across them. + comment (string): Comment log_dir suffix appended to the default + ``log_dir``. If ``log_dir`` is assigned, this argument has no effect. + purge_step (int): + When logging crashes at step :math:`T+X` and restarts at step :math:`T`, + any events whose global_step larger or equal to :math:`T` will be + purged and hidden from TensorBoard. + Note that crashed and resumed experiments should have the same ``log_dir``. + max_queue (int): Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs (int): How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix (string): Suffix added to all event filenames in + the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + + # create a summary writer with automatically generated folder name. + writer = SummaryWriter() + # folder location: runs/May04_22-14-54_s-MacBook-Pro.local/ + + # create a summary writer using the specified folder name. + writer = SummaryWriter("my_experiment") + # folder location: my_experiment + + # create a summary writer with comment appended. + writer = SummaryWriter(comment="LR_0.1_BATCH_16") + # folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/ + + """ + torch._C._log_api_usage_once("tensorboard.create.summarywriter") + if not log_dir: + import socket + from datetime import datetime + current_time = datetime.now().strftime('%b%d_%H-%M-%S') + log_dir = os.path.join( + 'runs', current_time + '_' + socket.gethostname() + comment) + self.log_dir = log_dir + self.purge_step = purge_step + self.max_queue = max_queue + self.flush_secs = flush_secs + self.filename_suffix = filename_suffix + + # Initialize the file writers, but they can be cleared out on close + # and recreated later as needed. + self.file_writer = self.all_writers = None + self._get_file_writer() + + # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard + v = 1E-12 + buckets = [] + neg_buckets = [] + while v < 1E20: + buckets.append(v) + neg_buckets.append(-v) + v *= 1.1 + self.default_bins = neg_buckets[::-1] + [0] + buckets
    + + def _check_caffe2_blob(self, item): + """ + Caffe2 users have the option of passing a string representing the name of + a blob in the workspace instead of passing the actual Tensor/array containing + the numeric values. Thus, we need to check if we received a string as input + instead of an actual Tensor/array, and if so, we need to fetch the Blob + from the workspace corresponding to that name. Fetching can be done with the + following: + + from caffe2.python import workspace (if not already imported) + workspace.FetchBlob(blob_name) + workspace.FetchBlobs([blob_name1, blob_name2, ...]) + """ + return isinstance(item, six.string_types) + + def _get_file_writer(self): + """Returns the default FileWriter instance. Recreates it if closed.""" + if self.all_writers is None or self.file_writer is None: + self.file_writer = FileWriter(self.log_dir, self.max_queue, + self.flush_secs, self.filename_suffix) + self.all_writers = {self.file_writer.get_logdir(): self.file_writer} + if self.purge_step is not None: + most_recent_step = self.purge_step + self.file_writer.add_event( + Event(step=most_recent_step, file_version='brain.Event:2')) + self.file_writer.add_event( + Event(step=most_recent_step, session_log=SessionLog(status=SessionLog.START))) + self.purge_step = None + return self.file_writer + + def get_logdir(self): + """Returns the directory where event files will be written.""" + return self.log_dir + +
    [docs] def add_scalar(self, tag, scalar_value, global_step=None, walltime=None): + """Add scalar data to summary. + + Args: + tag (string): Data identifier + scalar_value (float or string/blobname): Value to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + with seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = range(100) + for i in x: + writer.add_scalar('y=2x', i * 2, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_scalar.png + :scale: 50 % + + """ + if self._check_caffe2_blob(scalar_value): + scalar_value = workspace.FetchBlob(scalar_value) + self._get_file_writer().add_summary( + scalar(tag, scalar_value), global_step, walltime)
    + +
    [docs] def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): + """Adds many scalar data to summary. + + Note that this function also keeps logged scalars in memory. In extreme case it explodes your RAM. + + Args: + main_tag (string): The parent name for the tags + tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + r = 5 + for i in range(100): + writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), + 'xcosx':i*np.cos(i/r), + 'tanx': np.tan(i/r)}, i) + writer.close() + # This call adds three values to the same scalar plot with the tag + # 'run_14h' in TensorBoard's scalar section. + + Expected result: + + .. image:: _static/img/tensorboard/add_scalars.png + :scale: 50 % + + """ + walltime = time.time() if walltime is None else walltime + fw_logdir = self._get_file_writer().get_logdir() + for tag, scalar_value in tag_scalar_dict.items(): + fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag + if fw_tag in self.all_writers.keys(): + fw = self.all_writers[fw_tag] + else: + fw = FileWriter(fw_tag, self.max_queue, self.flush_secs, + self.filename_suffix) + self.all_writers[fw_tag] = fw + if self._check_caffe2_blob(scalar_value): + scalar_value = workspace.FetchBlob(scalar_value) + fw.add_summary(scalar(main_tag, scalar_value), + global_step, walltime)
    + +
    [docs] def add_histogram(self, tag, values, global_step=None, bins='tensorflow', walltime=None, max_bins=None): + """Add histogram to summary. + + Args: + tag (string): Data identifier + values (torch.Tensor, numpy.array, or string/blobname): Values to build histogram + global_step (int): Global step value to record + bins (string): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find + other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + for i in range(10): + x = np.random.random(1000) + writer.add_histogram('distribution centers', x + i, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram.png + :scale: 50 % + + """ + if self._check_caffe2_blob(values): + values = workspace.FetchBlob(values) + if isinstance(bins, six.string_types) and bins == 'tensorflow': + bins = self.default_bins + self._get_file_writer().add_summary( + histogram(tag, values, bins, max_bins=max_bins), global_step, walltime)
    + + def add_histogram_raw(self, tag, min, max, num, sum, sum_squares, + bucket_limits, bucket_counts, global_step=None, + walltime=None): + """Adds histogram with raw data. + + Args: + tag (string): Data identifier + min (float or int): Min value + max (float or int): Max value + num (int): Number of values + sum (float or int): Sum of all values + sum_squares (float or int): Sum of squares for all values + bucket_limits (torch.Tensor, numpy.array): Upper value per bucket. + The number of elements of it should be the same as `bucket_counts`. + bucket_counts (torch.Tensor, numpy.array): Number of values per bucket + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + dummy_data = [] + for idx, value in enumerate(range(50)): + dummy_data += [idx + 0.001] * value + + bins = list(range(50+2)) + bins = np.array(bins) + values = np.array(dummy_data).astype(float).reshape(-1) + counts, limits = np.histogram(values, bins=bins) + sum_sq = values.dot(values) + writer.add_histogram_raw( + tag='histogram_with_raw_data', + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limits=limits[1:].tolist(), + bucket_counts=counts.tolist(), + global_step=0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram_raw.png + :scale: 50 % + + """ + if len(bucket_limits) != len(bucket_counts): + raise ValueError('len(bucket_limits) != len(bucket_counts), see the document.') + self._get_file_writer().add_summary( + histogram_raw(tag, + min, + max, + num, + sum, + sum_squares, + bucket_limits, + bucket_counts), + global_step, + walltime) + +
    [docs] def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'): + """Add image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (string): Data identifier + img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to + convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. + Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitible as long as + corresponding ``dataformats`` argument is passed. e.g. CHW, HWC, HW. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + img = np.zeros((3, 100, 100)) + img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + img_HWC = np.zeros((100, 100, 3)) + img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + writer = SummaryWriter() + writer.add_image('my_image', img, 0) + + # If you have non-default dimension setting, set the dataformats argument. + writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_image.png + :scale: 50 % + + """ + if self._check_caffe2_blob(img_tensor): + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime)
    + +
    [docs] def add_images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'): + """Add batched image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (string): Data identifier + img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (string): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be + accepted. e.g. NCHW or NHWC. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + + img_batch = np.zeros((16, 3, 100, 100)) + for i in range(16): + img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i + img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i + + writer = SummaryWriter() + writer.add_images('my_image_batch', img_batch, 0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_images.png + :scale: 30 % + + """ + if self._check_caffe2_blob(img_tensor): + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime)
    + + def add_image_with_boxes(self, tag, img_tensor, box_tensor, global_step=None, + walltime=None, rescale=1, dataformats='CHW'): + """Add image and draw bounding boxes on the image. + + Args: + tag (string): Data identifier + img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data + box_tensor (torch.Tensor, numpy.array, or string/blobname): Box data (for detected objects) + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + rescale (float): Optional scale override + dataformats (string): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformat`` agrument. + e.g. CHW or HWC + + box_tensor: (torch.Tensor, numpy.array, or string/blobname): NX4, where N is the number of + boxes and each 4 elememts in a row represents (xmin, ymin, xmax, ymax). + """ + if self._check_caffe2_blob(img_tensor): + img_tensor = workspace.FetchBlob(img_tensor) + if self._check_caffe2_blob(box_tensor): + box_tensor = workspace.FetchBlob(box_tensor) + self._get_file_writer().add_summary(image_boxes( + tag, img_tensor, box_tensor, rescale=rescale, dataformats=dataformats), global_step, walltime) + +
    [docs] def add_figure(self, tag, figure, global_step=None, close=True, walltime=None): + """Render matplotlib figure into an image and add it to summary. + + Note that this requires the ``matplotlib`` package. + + Args: + tag (string): Data identifier + figure (matplotlib.pyplot.figure) or list of figures: Figure or a list of figures + global_step (int): Global step value to record + close (bool): Flag to automatically close the figure + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + """ + if isinstance(figure, list): + self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='NCHW') + else: + self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='CHW')
    + +
    [docs] def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): + """Add video data to summary. + + Note that this requires the ``moviepy`` package. + + Args: + tag (string): Data identifier + vid_tensor (torch.Tensor): Video data + global_step (int): Global step value to record + fps (float or int): Frames per second + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + """ + self._get_file_writer().add_summary( + video(tag, vid_tensor, fps), global_step, walltime)
    + +
    [docs] def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None): + """Add audio data to summary. + + Args: + tag (string): Data identifier + snd_tensor (torch.Tensor): Sound data + global_step (int): Global step value to record + sample_rate (int): sample rate in Hz + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1]. + """ + if self._check_caffe2_blob(snd_tensor): + snd_tensor = workspace.FetchBlob(snd_tensor) + self._get_file_writer().add_summary( + audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime)
    + +
    [docs] def add_text(self, tag, text_string, global_step=None, walltime=None): + """Add text data to summary. + + Args: + tag (string): Data identifier + text_string (string): String to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Examples:: + + writer.add_text('lstm', 'This is an lstm', 0) + writer.add_text('rnn', 'This is an rnn', 10) + """ + self._get_file_writer().add_summary( + text(tag, text_string), global_step, walltime)
    + + def add_onnx_graph(self, prototxt): + self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt)) + +
    [docs] def add_graph(self, model, input_to_model=None, verbose=False): + # prohibit second call? + # no, let tensorboard handle it and show its warning message. + """Add graph data to summary. + + Args: + model (torch.nn.Module): Model to draw. + input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of + variables to be fed. + verbose (bool): Whether to print graph structure in console. + """ + if hasattr(model, 'forward'): + # A valid PyTorch model should have a 'forward' method + self._get_file_writer().add_graph(graph(model, input_to_model, verbose)) + else: + # Caffe2 models do not have the 'forward' method + from caffe2.proto import caffe2_pb2 + from caffe2.python import core + from ._caffe2_graph import ( + model_to_graph_def, nets_to_graph_def, protos_to_graph_def + ) + if isinstance(model, list): + if isinstance(model[0], core.Net): + current_graph = nets_to_graph_def(model) + elif isinstance(model[0], caffe2_pb2.NetDef): + current_graph = protos_to_graph_def(model) + else: + # Handles cnn.CNNModelHelper, model_helper.ModelHelper + current_graph = model_to_graph_def(model) + event = event_pb2.Event( + graph_def=current_graph.SerializeToString()) + self._get_file_writer().add_event(event)
    + + @staticmethod + def _encode(rawstr): + # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. + retval = rawstr + retval = retval.replace("%", "%%%02x" % (ord("%"))) + retval = retval.replace("/", "%%%02x" % (ord("/"))) + retval = retval.replace("\\", "%%%02x" % (ord("\\"))) + return retval + +
    [docs] def add_embedding(self, mat, metadata=None, label_img=None, global_step=None, tag='default', metadata_header=None): + """Add embedding projector data to summary. + + Args: + mat (torch.Tensor or numpy.array): A matrix which each row is the feature vector of the data point + metadata (list): A list of labels, each element will be convert to string + label_img (torch.Tensor): Images correspond to each data point + global_step (int): Global step value to record + tag (string): Name for the embedding + Shape: + mat: :math:`(N, D)`, where N is number of data and D is feature dimension + + label_img: :math:`(N, C, H, W)` + + Examples:: + + import keyword + import torch + meta = [] + while len(meta)<100: + meta = meta+keyword.kwlist # get some strings + meta = meta[:100] + + for i, v in enumerate(meta): + meta[i] = v+str(i) + + label_img = torch.rand(100, 3, 10, 32) + for i in range(100): + label_img[i]*=i/100.0 + + writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img) + writer.add_embedding(torch.randn(100, 5), label_img=label_img) + writer.add_embedding(torch.randn(100, 5), metadata=meta) + """ + mat = make_np(mat) + if global_step is None: + global_step = 0 + # clear pbtxt? + # Maybe we should encode the tag so slashes don't trip us up? + # I don't think this will mess us up, but better safe than sorry. + subdir = "%s/%s" % (str(global_step).zfill(5), self._encode(tag)) + save_path = os.path.join(self._get_file_writer().get_logdir(), subdir) + try: + os.makedirs(save_path) + except OSError: + print( + 'warning: Embedding dir exists, did you set global_step for add_embedding()?') + if metadata is not None: + assert mat.shape[0] == len( + metadata), '#labels should equal with #data points' + make_tsv(metadata, save_path, metadata_header=metadata_header) + if label_img is not None: + assert mat.shape[0] == label_img.shape[0], '#images should equal with #data points' + make_sprite(label_img, save_path) + assert mat.ndim == 2, 'mat should be 2D, where mat.size(0) is the number of data points' + make_mat(mat, save_path) + # new funcion to append to the config file a new embedding + append_pbtxt(metadata, label_img, + self._get_file_writer().get_logdir(), subdir, global_step, tag)
    + +
    [docs] def add_pr_curve(self, tag, labels, predictions, global_step=None, + num_thresholds=127, weights=None, walltime=None): + """Adds precision recall curve. + Plotting a precision-recall curve lets you understand your model's + performance under different threshold settings. With this function, + you provide the ground truth labeling (T/F) and prediction confidence + (usually the output of your model) for each target. The TensorBoard UI + will let you choose the threshold interactively. + + Args: + tag (string): Data identifier + labels (torch.Tensor, numpy.array, or string/blobname): + Ground truth data. Binary label for each element. + predictions (torch.Tensor, numpy.array, or string/blobname): + The probability that an element be classified as true. + Value should in [0, 1] + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + labels = np.random.randint(2, size=100) # binary label + predictions = np.random.rand(100) + writer = SummaryWriter() + writer.add_pr_curve('pr_curve', labels, predictions, 0) + writer.close() + + """ + labels, predictions = make_np(labels), make_np(predictions) + self._get_file_writer().add_summary( + pr_curve(tag, labels, predictions, num_thresholds, weights), + global_step, walltime)
    + + def add_pr_curve_raw(self, tag, true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None): + """Adds precision recall curve with raw data. + + Args: + tag (string): Data identifier + true_positive_counts (torch.Tensor, numpy.array, or string/blobname): true positive counts + false_positive_counts (torch.Tensor, numpy.array, or string/blobname): false positive counts + true_negative_counts (torch.Tensor, numpy.array, or string/blobname): true negative counts + false_negative_counts (torch.Tensor, numpy.array, or string/blobname): false negative counts + precision (torch.Tensor, numpy.array, or string/blobname): precision + recall (torch.Tensor, numpy.array, or string/blobname): recall + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md + """ + self._get_file_writer().add_summary( + pr_curve_raw(tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + num_thresholds, + weights), + global_step, + walltime) + + def add_custom_scalars_multilinechart(self, tags, category='default', title='untitled'): + """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument + is *tags*. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330']) + """ + layout = {category: {title: ['Multiline', tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars_marginchart(self, tags, category='default', title='untitled'): + """Shorthand for creating marginchart. Similar to ``add_custom_scalars()``, but the only necessary argument + is *tags*, which should have exactly 3 elements. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006']) + """ + assert len(tags) == 3 + layout = {category: {title: ['Margin', tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + +
    [docs] def add_custom_scalars(self, layout): + """Create special chart by collecting charts tags in 'scalars'. Note that this function can only be called once + for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called + before or after the training loop. + + Args: + layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary + {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type + (one of **Multiline** or **Margin**) and the second element should be a list containing the tags + you have used in add_scalar function, which will be collected into the new chart. + + Examples:: + + layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, + 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], + 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} + + writer.add_custom_scalars(layout) + """ + self._get_file_writer().add_summary(custom_scalars(layout))
    + +
    [docs] def add_mesh(self, tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None): + """Add meshes or 3D point clouds to TensorBoard. The visualization is based on Three.js, + so it allows users to interact with the rendered object. Besides the basic definitions + such as vertices, faces, users can further provide camera parameter, lighting condition, etc. + Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for + advanced usage. Note that currently this depends on tb-nightly to show. + + Args: + tag (string): Data identifier + vertices (torch.Tensor): List of the 3D coordinates of vertices. + colors (torch.Tensor): Colors for each vertex + faces (torch.Tensor): Indices of vertices within each triangle. (Optional) + config_dict: Dictionary with ThreeJS classes names and configuration. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Shape: + vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels) + + colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + + faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + vertices_tensor = torch.as_tensor([ + [1, 1, 1], + [-1, -1, 1], + [1, -1, -1], + [-1, 1, -1], + ], dtype=torch.float).unsqueeze(0) + colors_tensor = torch.as_tensor([ + [255, 0, 0], + [0, 255, 0], + [0, 0, 255], + [255, 0, 255], + ], dtype=torch.int).unsqueeze(0) + faces_tensor = torch.as_tensor([ + [0, 2, 3], + [0, 3, 1], + [0, 1, 2], + [1, 3, 2], + ], dtype=torch.int).unsqueeze(0) + + writer = SummaryWriter() + writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor) + + writer.close() + """ + self._get_file_writer().add_summary(mesh(tag, vertices, colors, faces, config_dict), global_step, walltime)
    + +
    [docs] def flush(self): + """Flushes the event file to disk. + Call this method to make sure that all pending events have been written to + disk. + """ + if self.all_writers is None: + return + for writer in self.all_writers.values(): + writer.flush()
    + +
    [docs] def close(self): + if self.all_writers is None: + return # ignore double close + for writer in self.all_writers.values(): + writer.flush() + writer.close() + self.file_writer = self.all_writers = None
    + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close()
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision.html b/docs/stable/_modules/torchvision.html new file mode 100644 index 000000000000..e37569057c96 --- /dev/null +++ b/docs/stable/_modules/torchvision.html @@ -0,0 +1,548 @@ + + + + + + + + + + + + torchvision — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision

    +from torchvision import models
    +from torchvision import datasets
    +from torchvision import ops
    +from torchvision import transforms
    +from torchvision import utils
    +from torchvision import io
    +
    +try:
    +    from .version import __version__  # noqa: F401
    +except ImportError:
    +    pass
    +
    +_image_backend = 'PIL'
    +
    +
    +
    [docs]def set_image_backend(backend): + """ + Specifies the package used to load images. + + Args: + backend (string): Name of the image backend. one of {'PIL', 'accimage'}. + The :mod:`accimage` package uses the Intel IPP library. It is + generally faster than PIL, but does not support as many operations. + """ + global _image_backend + if backend not in ['PIL', 'accimage']: + raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'" + .format(backend)) + _image_backend = backend
    + + +
    [docs]def get_image_backend(): + """ + Gets the name of the package used to load images + """ + return _image_backend
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/cifar.html b/docs/stable/_modules/torchvision/datasets/cifar.html new file mode 100644 index 000000000000..0d18d5c0cfaa --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/cifar.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + torchvision.datasets.cifar — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.cifar

    +from __future__ import print_function
    +from PIL import Image
    +import os
    +import os.path
    +import numpy as np
    +import sys
    +
    +if sys.version_info[0] == 2:
    +    import cPickle as pickle
    +else:
    +    import pickle
    +
    +from .vision import VisionDataset
    +from .utils import check_integrity, download_and_extract_archive
    +
    +
    +
    [docs]class CIFAR10(VisionDataset): + """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``cifar-10-batches-py`` exists or will be saved to if download is set to True. + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + base_folder = 'cifar-10-batches-py' + url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + filename = "cifar-10-python.tar.gz" + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + + def __init__(self, root, train=True, transform=None, target_transform=None, + download=False): + + super(CIFAR10, self).__init__(root, transform=transform, + target_transform=target_transform) + + self.train = train # training set or test set + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + if self.train: + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + self.data = [] + self.targets = [] + + # now load the picked numpy arrays + for file_name, checksum in downloaded_list: + file_path = os.path.join(self.root, self.base_folder, file_name) + with open(file_path, 'rb') as f: + if sys.version_info[0] == 2: + entry = pickle.load(f) + else: + entry = pickle.load(f, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.targets.extend(entry['labels']) + else: + self.targets.extend(entry['fine_labels']) + + self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + + self._load_meta() + + def _load_meta(self): + path = os.path.join(self.root, self.base_folder, self.meta['filename']) + if not check_integrity(path, self.meta['md5']): + raise RuntimeError('Dataset metadata file not found or corrupted.' + + ' You can use download=True to download it') + with open(path, 'rb') as infile: + if sys.version_info[0] == 2: + data = pickle.load(infile) + else: + data = pickle.load(infile, encoding='latin1') + self.classes = data[self.meta['key']] + self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], self.targets[index] + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return len(self.data) + + def _check_integrity(self): + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self): + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + + def extra_repr(self): + return "Split: {}".format("Train" if self.train is True else "Test")
    + + +
    [docs]class CIFAR100(CIFAR10): + """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + This is a subclass of the `CIFAR10` Dataset. + """ + base_folder = 'cifar-100-python' + url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" + filename = "cifar-100-python.tar.gz" + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + }
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/cityscapes.html b/docs/stable/_modules/torchvision/datasets/cityscapes.html new file mode 100644 index 000000000000..69312efd6b2a --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/cityscapes.html @@ -0,0 +1,721 @@ + + + + + + + + + + + + torchvision.datasets.cityscapes — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.cityscapes

    +import json
    +import os
    +from collections import namedtuple
    +import zipfile
    +
    +from .utils import extract_archive, verify_str_arg, iterable_to_str
    +from .vision import VisionDataset
    +from PIL import Image
    +
    +
    +
    [docs]class Cityscapes(VisionDataset): + """`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory ``leftImg8bit`` + and ``gtFine`` or ``gtCoarse`` are located. + split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="gtFine" + otherwise ``train``, ``train_extra`` or ``val`` + mode (string, optional): The quality mode to use, ``gtFine`` or ``gtCoarse`` + target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon`` + or ``color``. Can also be a list to output a tuple with all specified target types. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Examples: + + Get semantic segmentation target + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type='semantic') + + img, smnt = dataset[0] + + Get multiple targets + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type=['instance', 'color', 'polygon']) + + img, (inst, col, poly) = dataset[0] + + Validate on the "coarse" set + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse', + target_type='semantic') + + img, smnt = dataset[0] + """ + + # Based on https://github.com/mcordts/cityscapesScripts + CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id', + 'has_instances', 'ignore_in_eval', 'color']) + + classes = [ + CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)), + CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)), + CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)), + CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)), + CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)), + CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)), + CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)), + CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)), + CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)), + CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)), + CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)), + CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)), + CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)), + CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)), + CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)), + CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)), + CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)), + CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)), + CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)), + CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)), + CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)), + CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)), + CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)), + CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)), + CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)), + CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)), + CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)), + CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)), + CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)), + CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)), + ] + + def __init__(self, root, split='train', mode='fine', target_type='instance', + transform=None, target_transform=None, transforms=None): + super(Cityscapes, self).__init__(root, transforms, transform, target_transform) + self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse' + self.images_dir = os.path.join(self.root, 'leftImg8bit', split) + self.targets_dir = os.path.join(self.root, self.mode, split) + self.target_type = target_type + self.split = split + self.images = [] + self.targets = [] + + verify_str_arg(mode, "mode", ("fine", "coarse")) + if mode == "fine": + valid_modes = ("train", "test", "val") + else: + valid_modes = ("train", "train_extra", "val") + msg = ("Unknown value '{}' for argument split if mode is '{}'. " + "Valid values are {{{}}}.") + msg = msg.format(split, mode, iterable_to_str(valid_modes)) + verify_str_arg(split, "split", valid_modes, msg) + + if not isinstance(target_type, list): + self.target_type = [target_type] + [verify_str_arg(value, "target_type", + ("instance", "semantic", "polygon", "color")) + for value in self.target_type] + + if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): + + if split == 'train_extra': + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainextra.zip')) + else: + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainvaltest.zip')) + + if self.mode == 'gtFine': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '_trainvaltest.zip')) + elif self.mode == 'gtCoarse': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '.zip')) + + if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip): + extract_archive(from_path=image_dir_zip, to_path=self.root) + extract_archive(from_path=target_dir_zip, to_path=self.root) + else: + raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the' + ' specified "split" and "mode" are inside the "root" directory') + + for city in os.listdir(self.images_dir): + img_dir = os.path.join(self.images_dir, city) + target_dir = os.path.join(self.targets_dir, city) + for file_name in os.listdir(img_dir): + target_types = [] + for t in self.target_type: + target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], + self._get_target_suffix(self.mode, t)) + target_types.append(os.path.join(target_dir, target_name)) + + self.images.append(os.path.join(img_dir, file_name)) + self.targets.append(target_types) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is a tuple of all target types if target_type is a list with more + than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation. + """ + + image = Image.open(self.images[index]).convert('RGB') + + targets = [] + for i, t in enumerate(self.target_type): + if t == 'polygon': + target = self._load_json(self.targets[index][i]) + else: + target = Image.open(self.targets[index][i]) + + targets.append(target) + + target = tuple(targets) if len(targets) > 1 else targets[0] + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target
    + + def __len__(self): + return len(self.images) + + def extra_repr(self): + lines = ["Split: {split}", "Mode: {mode}", "Type: {target_type}"] + return '\n'.join(lines).format(**self.__dict__) + + def _load_json(self, path): + with open(path, 'r') as file: + data = json.load(file) + return data + + def _get_target_suffix(self, mode, target_type): + if target_type == 'instance': + return '{}_instanceIds.png'.format(mode) + elif target_type == 'semantic': + return '{}_labelIds.png'.format(mode) + elif target_type == 'color': + return '{}_color.png'.format(mode) + else: + return '{}_polygons.json'.format(mode)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/coco.html b/docs/stable/_modules/torchvision/datasets/coco.html new file mode 100644 index 000000000000..ebd65f916bd1 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/coco.html @@ -0,0 +1,637 @@ + + + + + + + + + + + + torchvision.datasets.coco — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.coco

    +from .vision import VisionDataset
    +from PIL import Image
    +import os
    +import os.path
    +
    +
    +
    [docs]class CocoCaptions(VisionDataset): + """`MS Coco Captions <http://mscoco.org/dataset/#captions-challenge2015>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Example: + + .. code:: python + + import torchvision.datasets as dset + import torchvision.transforms as transforms + cap = dset.CocoCaptions(root = 'dir where images are', + annFile = 'json annotation file', + transform=transforms.ToTensor()) + + print('Number of samples: ', len(cap)) + img, target = cap[3] # load 4th sample + + print("Image Size: ", img.size()) + print(target) + + Output: :: + + Number of samples: 82783 + Image Size: (3L, 427L, 640L) + [u'A plane emitting smoke stream flying over a mountain.', + u'A plane darts across a bright blue sky behind a mountain covered in snow', + u'A plane leaves a contrail above the snowy mountain top.', + u'A mountain that has a plane flying overheard in the distance.', + u'A mountain view with a plume of smoke in the background'] + + """ + + def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None): + super(CocoCaptions, self).__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + anns = coco.loadAnns(ann_ids) + target = [ann['caption'] for ann in anns] + + path = coco.loadImgs(img_id)[0]['file_name'] + + img = Image.open(os.path.join(self.root, path)).convert('RGB') + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
    + + def __len__(self): + return len(self.ids)
    + + +
    [docs]class CocoDetection(VisionDataset): + """`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None): + super(CocoDetection, self).__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + target = coco.loadAnns(ann_ids) + + path = coco.loadImgs(img_id)[0]['file_name'] + + img = Image.open(os.path.join(self.root, path)).convert('RGB') + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
    + + def __len__(self): + return len(self.ids)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/fakedata.html b/docs/stable/_modules/torchvision/datasets/fakedata.html new file mode 100644 index 000000000000..8b50bb3491b3 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/fakedata.html @@ -0,0 +1,572 @@ + + + + + + + + + + + + torchvision.datasets.fakedata — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.fakedata

    +import torch
    +from .vision import VisionDataset
    +from .. import transforms
    +
    +
    +
    [docs]class FakeData(VisionDataset): + """A fake dataset that returns randomly generated images and returns them as PIL images + + Args: + size (int, optional): Size of the dataset. Default: 1000 images + image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224) + num_classes(int, optional): Number of classes in the datset. Default: 10 + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + random_offset (int): Offsets the index-based random seed used to + generate each image. Default: 0 + + """ + + def __init__(self, size=1000, image_size=(3, 224, 224), num_classes=10, + transform=None, target_transform=None, random_offset=0): + super(FakeData, self).__init__(None, transform=transform, + target_transform=target_transform) + self.size = size + self.num_classes = num_classes + self.image_size = image_size + self.random_offset = random_offset + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is class_index of the target class. + """ + # create random image that is consistent with the index id + if index >= len(self): + raise IndexError("{} index out of range".format(self.__class__.__name__)) + rng_state = torch.get_rng_state() + torch.manual_seed(index + self.random_offset) + img = torch.randn(*self.image_size) + target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0] + torch.set_rng_state(rng_state) + + # convert to PIL Image + img = transforms.ToPILImage()(img) + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return self.size
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/flickr.html b/docs/stable/_modules/torchvision/datasets/flickr.html new file mode 100644 index 000000000000..3dd708501085 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/flickr.html @@ -0,0 +1,668 @@ + + + + + + + + + + + + torchvision.datasets.flickr — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.flickr

    +from collections import defaultdict
    +from PIL import Image
    +from six.moves import html_parser
    +
    +import glob
    +import os
    +from .vision import VisionDataset
    +
    +
    +class Flickr8kParser(html_parser.HTMLParser):
    +    """Parser for extracting captions from the Flickr8k dataset web page."""
    +
    +    def __init__(self, root):
    +        super(Flickr8kParser, self).__init__()
    +
    +        self.root = root
    +
    +        # Data structure to store captions
    +        self.annotations = {}
    +
    +        # State variables
    +        self.in_table = False
    +        self.current_tag = None
    +        self.current_img = None
    +
    +    def handle_starttag(self, tag, attrs):
    +        self.current_tag = tag
    +
    +        if tag == 'table':
    +            self.in_table = True
    +
    +    def handle_endtag(self, tag):
    +        self.current_tag = None
    +
    +        if tag == 'table':
    +            self.in_table = False
    +
    +    def handle_data(self, data):
    +        if self.in_table:
    +            if data == 'Image Not Found':
    +                self.current_img = None
    +            elif self.current_tag == 'a':
    +                img_id = data.split('/')[-2]
    +                img_id = os.path.join(self.root, img_id + '_*.jpg')
    +                img_id = glob.glob(img_id)[0]
    +                self.current_img = img_id
    +                self.annotations[img_id] = []
    +            elif self.current_tag == 'li' and self.current_img:
    +                img_id = self.current_img
    +                self.annotations[img_id].append(data.strip())
    +
    +
    +
    [docs]class Flickr8k(VisionDataset): + """`Flickr8k Entities <http://nlp.cs.illinois.edu/HockenmaierGroup/8k-pictures.html>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__(self, root, ann_file, transform=None, target_transform=None): + super(Flickr8k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + parser = Flickr8kParser(self.root) + with open(self.ann_file) as fh: + parser.feed(fh.read()) + self.annotations = parser.annotations + + self.ids = list(sorted(self.annotations.keys())) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + img = Image.open(img_id).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return len(self.ids)
    + + +
    [docs]class Flickr30k(VisionDataset): + """`Flickr30k Entities <http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__(self, root, ann_file, transform=None, target_transform=None): + super(Flickr30k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + self.annotations = defaultdict(list) + with open(self.ann_file) as fh: + for line in fh: + img_id, caption = line.strip().split('\t') + self.annotations[img_id[:-2]].append(caption) + + self.ids = list(sorted(self.annotations.keys())) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + filename = os.path.join(self.root, img_id) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return len(self.ids)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/folder.html b/docs/stable/_modules/torchvision/datasets/folder.html new file mode 100644 index 000000000000..e272c0d69ae6 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/folder.html @@ -0,0 +1,724 @@ + + + + + + + + + + + + torchvision.datasets.folder — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.folder

    +from .vision import VisionDataset
    +
    +from PIL import Image
    +
    +import os
    +import os.path
    +import sys
    +
    +
    +def has_file_allowed_extension(filename, extensions):
    +    """Checks if a file is an allowed extension.
    +
    +    Args:
    +        filename (string): path to a file
    +        extensions (tuple of strings): extensions to consider (lowercase)
    +
    +    Returns:
    +        bool: True if the filename ends with one of given extensions
    +    """
    +    return filename.lower().endswith(extensions)
    +
    +
    +def is_image_file(filename):
    +    """Checks if a file is an allowed image extension.
    +
    +    Args:
    +        filename (string): path to a file
    +
    +    Returns:
    +        bool: True if the filename ends with a known image extension
    +    """
    +    return has_file_allowed_extension(filename, IMG_EXTENSIONS)
    +
    +
    +def make_dataset(dir, class_to_idx, extensions=None, is_valid_file=None):
    +    images = []
    +    dir = os.path.expanduser(dir)
    +    if not ((extensions is None) ^ (is_valid_file is None)):
    +        raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
    +    if extensions is not None:
    +        def is_valid_file(x):
    +            return has_file_allowed_extension(x, extensions)
    +    for target in sorted(class_to_idx.keys()):
    +        d = os.path.join(dir, target)
    +        if not os.path.isdir(d):
    +            continue
    +        for root, _, fnames in sorted(os.walk(d)):
    +            for fname in sorted(fnames):
    +                path = os.path.join(root, fname)
    +                if is_valid_file(path):
    +                    item = (path, class_to_idx[target])
    +                    images.append(item)
    +
    +    return images
    +
    +
    +
    [docs]class DatasetFolder(VisionDataset): + """A generic data loader where the samples are arranged in this way: :: + + root/class_x/xxx.ext + root/class_x/xxy.ext + root/class_x/xxz.ext + + root/class_y/123.ext + root/class_y/nsdf3.ext + root/class_y/asd932_.ext + + Args: + root (string): Root directory path. + loader (callable): A function to load a sample given its path. + extensions (tuple[string]): A list of allowed extensions. + both extensions and is_valid_file should not be passed. + transform (callable, optional): A function/transform that takes in + a sample and returns a transformed version. + E.g, ``transforms.RandomCrop`` for images. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid_file (used to check of corrupt files) + both extensions and is_valid_file should not be passed. + + Attributes: + classes (list): List of the class names. + class_to_idx (dict): Dict with items (class_name, class_index). + samples (list): List of (sample path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__(self, root, loader, extensions=None, transform=None, + target_transform=None, is_valid_file=None): + super(DatasetFolder, self).__init__(root, transform=transform, + target_transform=target_transform) + classes, class_to_idx = self._find_classes(self.root) + samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file) + if len(samples) == 0: + raise (RuntimeError("Found 0 files in subfolders of: " + self.root + "\n" + "Supported extensions are: " + ",".join(extensions))) + + self.loader = loader + self.extensions = extensions + + self.classes = classes + self.class_to_idx = class_to_idx + self.samples = samples + self.targets = [s[1] for s in samples] + + def _find_classes(self, dir): + """ + Finds the class folders in a dataset. + + Args: + dir (string): Root directory path. + + Returns: + tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. + + Ensures: + No class is a subdirectory of another. + """ + if sys.version_info >= (3, 5): + # Faster and available in Python 3.5 and above + classes = [d.name for d in os.scandir(dir) if d.is_dir()] + else: + classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] + classes.sort() + class_to_idx = {classes[i]: i for i in range(len(classes))} + return classes, class_to_idx + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + path, target = self.samples[index] + sample = self.loader(path) + if self.transform is not None: + sample = self.transform(sample) + if self.target_transform is not None: + target = self.target_transform(target) + + return sample, target
    + + def __len__(self): + return len(self.samples)
    + + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') + + +def pil_loader(path): + # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) + with open(path, 'rb') as f: + img = Image.open(f) + return img.convert('RGB') + + +def accimage_loader(path): + import accimage + try: + return accimage.Image(path) + except IOError: + # Potentially a decoding problem, fall back to PIL.Image + return pil_loader(path) + + +def default_loader(path): + from torchvision import get_image_backend + if get_image_backend() == 'accimage': + return accimage_loader(path) + else: + return pil_loader(path) + + +
    [docs]class ImageFolder(DatasetFolder): + """A generic data loader where the images are arranged in this way: :: + + root/dog/xxx.png + root/dog/xxy.png + root/dog/xxz.png + + root/cat/123.png + root/cat/nsdf3.png + root/cat/asd932_.png + + Args: + root (string): Root directory path. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid_file (used to check of corrupt files) + + Attributes: + classes (list): List of the class names. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + """ + + def __init__(self, root, transform=None, target_transform=None, + loader=default_loader, is_valid_file=None): + super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None, + transform=transform, + target_transform=target_transform, + is_valid_file=is_valid_file) + self.imgs = self.samples
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/hmdb51.html b/docs/stable/_modules/torchvision/datasets/hmdb51.html new file mode 100644 index 000000000000..3cd20e72b9a5 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/hmdb51.html @@ -0,0 +1,611 @@ + + + + + + + + + + + + torchvision.datasets.hmdb51 — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.hmdb51

    +import glob
    +import os
    +
    +from .video_utils import VideoClips
    +from .utils import list_dir
    +from .folder import make_dataset
    +from .vision import VisionDataset
    +
    +
    +
    [docs]class HMDB51(VisionDataset): + """ + HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_ + dataset. + + HMDB51 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the HMDB51 Dataset. + annotation_path (str): path to the folder containing the split files + frames_per_clip (int): number of frames in a clip. + step_between_clips (int): number of frames between each clip. + fold (int, optional): which fold to use. Should be between 1 and 3. + train (bool, optional): if ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + data_url = "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar" + splits = { + "url": "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar", + "md5": "15e67781e70dcfbdce2d7dbb9b3344b5" + } + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + fold=1, train=True, transform=None): + super(HMDB51, self).__init__(root) + if not 1 <= fold <= 3: + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + self.fold = fold + self.train = train + + classes = list(sorted(list_dir(root))) + class_to_idx = {classes[i]: i for i in range(len(classes))} + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + self.classes = classes + video_list = [x[0] for x in self.samples] + video_clips = VideoClips(video_list, frames_per_clip, step_between_clips) + indices = self._select_fold(video_list, annotation_path, fold, train) + self.video_clips = video_clips.subset(indices) + self.transform = transform + + def _select_fold(self, video_list, annotation_path, fold, train): + target_tag = 1 if train else 2 + name = "*test_split{}.txt".format(fold) + files = glob.glob(os.path.join(annotation_path, name)) + selected_files = [] + for f in files: + with open(f, "r") as fid: + data = fid.readlines() + data = [x.strip().split(" ") for x in data] + data = [x[0] for x in data if int(x[1]) == target_tag] + selected_files.extend(data) + selected_files = set(selected_files) + indices = [i for i in range(len(video_list)) if os.path.basename(video_list[i]) in selected_files] + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[video_idx][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/imagenet.html b/docs/stable/_modules/torchvision/datasets/imagenet.html new file mode 100644 index 000000000000..209564612c02 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/imagenet.html @@ -0,0 +1,685 @@ + + + + + + + + + + + + torchvision.datasets.imagenet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.imagenet

    +from __future__ import print_function
    +import os
    +import shutil
    +import tempfile
    +import torch
    +from .folder import ImageFolder
    +from .utils import check_integrity, download_and_extract_archive, extract_archive, \
    +    verify_str_arg
    +
    +ARCHIVE_DICT = {
    +    'train': {
    +        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',
    +        'md5': '1d675b47d978889d74fa0da5fadfb00e',
    +    },
    +    'val': {
    +        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',
    +        'md5': '29b22e2961454d5413ddabcf34fc5622',
    +    },
    +    'devkit': {
    +        'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',
    +        'md5': 'fa75699e90414af021442c21a62c3abf',
    +    }
    +}
    +
    +
    +
    [docs]class ImageNet(ImageFolder): + """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. + + Args: + root (string): Root directory of the ImageNet Dataset. + split (string, optional): The dataset split, supports ``train``, or ``val``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class name tuples. + class_to_idx (dict): Dict with items (class_name, class_index). + wnids (list): List of the WordNet IDs. + wnid_to_idx (dict): Dict with items (wordnet_id, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__(self, root, split='train', download=False, **kwargs): + root = self.root = os.path.expanduser(root) + self.split = verify_str_arg(split, "split", ("train", "val")) + + if download: + self.download() + wnid_to_classes = self._load_meta_file()[0] + + super(ImageNet, self).__init__(self.split_folder, **kwargs) + self.root = root + + self.wnids = self.classes + self.wnid_to_idx = self.class_to_idx + self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] + self.class_to_idx = {cls: idx + for idx, clss in enumerate(self.classes) + for cls in clss} + + def download(self): + if not check_integrity(self.meta_file): + tmp_dir = tempfile.mkdtemp() + + archive_dict = ARCHIVE_DICT['devkit'] + download_and_extract_archive(archive_dict['url'], self.root, + extract_root=tmp_dir, + md5=archive_dict['md5']) + devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0] + meta = parse_devkit(os.path.join(tmp_dir, devkit_folder)) + self._save_meta_file(*meta) + + shutil.rmtree(tmp_dir) + + if not os.path.isdir(self.split_folder): + archive_dict = ARCHIVE_DICT[self.split] + download_and_extract_archive(archive_dict['url'], self.root, + extract_root=self.split_folder, + md5=archive_dict['md5']) + + if self.split == 'train': + prepare_train_folder(self.split_folder) + elif self.split == 'val': + val_wnids = self._load_meta_file()[1] + prepare_val_folder(self.split_folder, val_wnids) + else: + msg = ("You set download=True, but a folder '{}' already exist in " + "the root directory. If you want to re-download or re-extract the " + "archive, delete the folder.") + print(msg.format(self.split)) + + @property + def meta_file(self): + return os.path.join(self.root, 'meta.bin') + + def _load_meta_file(self): + if check_integrity(self.meta_file): + return torch.load(self.meta_file) + else: + raise RuntimeError("Meta file not found or corrupted.", + "You can use download=True to create it.") + + def _save_meta_file(self, wnid_to_class, val_wnids): + torch.save((wnid_to_class, val_wnids), self.meta_file) + + @property + def split_folder(self): + return os.path.join(self.root, self.split) + + def extra_repr(self): + return "Split: {split}".format(**self.__dict__)
    + + +def parse_devkit(root): + idx_to_wnid, wnid_to_classes = parse_meta(root) + val_idcs = parse_val_groundtruth(root) + val_wnids = [idx_to_wnid[idx] for idx in val_idcs] + return wnid_to_classes, val_wnids + + +def parse_meta(devkit_root, path='data', filename='meta.mat'): + import scipy.io as sio + + metafile = os.path.join(devkit_root, path, filename) + meta = sio.loadmat(metafile, squeeze_me=True)['synsets'] + nums_children = list(zip(*meta))[4] + meta = [meta[idx] for idx, num_children in enumerate(nums_children) + if num_children == 0] + idcs, wnids, classes = list(zip(*meta))[:3] + classes = [tuple(clss.split(', ')) for clss in classes] + idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} + wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} + return idx_to_wnid, wnid_to_classes + + +def parse_val_groundtruth(devkit_root, path='data', + filename='ILSVRC2012_validation_ground_truth.txt'): + with open(os.path.join(devkit_root, path, filename), 'r') as txtfh: + val_idcs = txtfh.readlines() + return [int(val_idx) for val_idx in val_idcs] + + +def prepare_train_folder(folder): + for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]: + extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True) + + +def prepare_val_folder(folder, wnids): + img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)]) + + for wnid in set(wnids): + os.mkdir(os.path.join(folder, wnid)) + + for wnid, img_file in zip(wnids, img_files): + shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file))) + + +def _splitexts(root): + exts = [] + ext = '.' + while ext: + root, ext = os.path.splitext(root) + exts.append(ext) + return root, ''.join(reversed(exts)) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/kinetics.html b/docs/stable/_modules/torchvision/datasets/kinetics.html new file mode 100644 index 000000000000..afa4d8e433c2 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/kinetics.html @@ -0,0 +1,575 @@ + + + + + + + + + + + + torchvision.datasets.kinetics — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.kinetics

    +from .video_utils import VideoClips
    +from .utils import list_dir
    +from .folder import make_dataset
    +from .vision import VisionDataset
    +
    +
    +
    [docs]class Kinetics400(VisionDataset): + """ + `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_ + dataset. + + Kinetics-400 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the Kinetics-400 Dataset. + frames_per_clip (int): number of frames in a clip + step_between_clips (int): number of frames between each clip + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + def __init__(self, root, frames_per_clip, step_between_clips=1, transform=None): + super(Kinetics400, self).__init__(root) + extensions = ('avi',) + + classes = list(sorted(list_dir(root))) + class_to_idx = {classes[i]: i for i in range(len(classes))} + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + self.classes = classes + video_list = [x[0] for x in self.samples] + self.video_clips = VideoClips(video_list, frames_per_clip, step_between_clips) + self.transform = transform + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[video_idx][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/lsun.html b/docs/stable/_modules/torchvision/datasets/lsun.html new file mode 100644 index 000000000000..150d51847ebb --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/lsun.html @@ -0,0 +1,672 @@ + + + + + + + + + + + + torchvision.datasets.lsun — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.lsun

    +from .vision import VisionDataset
    +from PIL import Image
    +import os
    +import os.path
    +import six
    +import string
    +import sys
    +from collections import Iterable
    +
    +if sys.version_info[0] == 2:
    +    import cPickle as pickle
    +else:
    +    import pickle
    +
    +from .utils import verify_str_arg, iterable_to_str
    +
    +
    +class LSUNClass(VisionDataset):
    +    def __init__(self, root, transform=None, target_transform=None):
    +        import lmdb
    +        super(LSUNClass, self).__init__(root, transform=transform,
    +                                        target_transform=target_transform)
    +
    +        self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False,
    +                             readahead=False, meminit=False)
    +        with self.env.begin(write=False) as txn:
    +            self.length = txn.stat()['entries']
    +        cache_file = '_cache_' + ''.join(c for c in root if c in string.ascii_letters)
    +        if os.path.isfile(cache_file):
    +            self.keys = pickle.load(open(cache_file, "rb"))
    +        else:
    +            with self.env.begin(write=False) as txn:
    +                self.keys = [key for key, _ in txn.cursor()]
    +            pickle.dump(self.keys, open(cache_file, "wb"))
    +
    +    def __getitem__(self, index):
    +        img, target = None, None
    +        env = self.env
    +        with env.begin(write=False) as txn:
    +            imgbuf = txn.get(self.keys[index])
    +
    +        buf = six.BytesIO()
    +        buf.write(imgbuf)
    +        buf.seek(0)
    +        img = Image.open(buf).convert('RGB')
    +
    +        if self.transform is not None:
    +            img = self.transform(img)
    +
    +        if self.target_transform is not None:
    +            target = self.target_transform(target)
    +
    +        return img, target
    +
    +    def __len__(self):
    +        return self.length
    +
    +
    +
    [docs]class LSUN(VisionDataset): + """ + `LSUN <http://lsun.cs.princeton.edu>`_ dataset. + + Args: + root (string): Root directory for the database files. + classes (string or list): One of {'train', 'val', 'test'} or a list of + categories to load. e,g. ['bedroom_train', 'church_train']. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__(self, root, classes='train', transform=None, target_transform=None): + super(LSUN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.classes = self._verify_classes(classes) + + # for each class, create an LSUNClassDataset + self.dbs = [] + for c in self.classes: + self.dbs.append(LSUNClass( + root=root + '/' + c + '_lmdb', + transform=transform)) + + self.indices = [] + count = 0 + for db in self.dbs: + count += len(db) + self.indices.append(count) + + self.length = count + + def _verify_classes(self, classes): + categories = ['bedroom', 'bridge', 'church_outdoor', 'classroom', + 'conference_room', 'dining_room', 'kitchen', + 'living_room', 'restaurant', 'tower'] + dset_opts = ['train', 'val', 'test'] + + try: + verify_str_arg(classes, "classes", dset_opts) + if classes == 'test': + classes = [classes] + else: + classes = [c + '_' + classes for c in categories] + except ValueError: + if not isinstance(classes, Iterable): + msg = ("Expected type str or Iterable for argument classes, " + "but got type {}.") + raise ValueError(msg.format(type(classes))) + + classes = list(classes) + msg_fmtstr = ("Expected type str for elements in argument classes, " + "but got type {}.") + for c in classes: + verify_str_arg(c, custom_msg=msg_fmtstr.format(type(c))) + c_short = c.split('_') + category, dset_opt = '_'.join(c_short[:-1]), c_short[-1] + + msg_fmtstr = "Unknown value '{}' for {}. Valid values are {{{}}}." + msg = msg_fmtstr.format(category, "LSUN class", + iterable_to_str(categories)) + verify_str_arg(category, valid_values=categories, custom_msg=msg) + + msg = msg_fmtstr.format(dset_opt, "postfix", iterable_to_str(dset_opts)) + verify_str_arg(dset_opt, valid_values=dset_opts, custom_msg=msg) + + return classes + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target) where target is the index of the target category. + """ + target = 0 + sub = 0 + for ind in self.indices: + if index < ind: + break + target += 1 + sub = ind + + db = self.dbs[target] + index = index - sub + + if self.target_transform is not None: + target = self.target_transform(target) + + img, _ = db[index] + return img, target
    + + def __len__(self): + return self.length + + def extra_repr(self): + return "Classes: {classes}".format(**self.__dict__)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/mnist.html b/docs/stable/_modules/torchvision/datasets/mnist.html new file mode 100644 index 000000000000..f85556e654a7 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/mnist.html @@ -0,0 +1,969 @@ + + + + + + + + + + + + torchvision.datasets.mnist — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.mnist

    +from __future__ import print_function
    +from .vision import VisionDataset
    +import warnings
    +from PIL import Image
    +import os
    +import os.path
    +import numpy as np
    +import torch
    +import codecs
    +from .utils import download_url, download_and_extract_archive, extract_archive, \
    +    makedir_exist_ok, verify_str_arg
    +
    +
    +
    [docs]class MNIST(VisionDataset): + """`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``MNIST/processed/training.pt`` + and ``MNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + urls = [ + 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', + 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', + 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', + 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', + ] + training_file = 'training.pt' + test_file = 'test.pt' + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + @property + def train_labels(self): + warnings.warn("train_labels has been renamed targets") + return self.targets + + @property + def test_labels(self): + warnings.warn("test_labels has been renamed targets") + return self.targets + + @property + def train_data(self): + warnings.warn("train_data has been renamed data") + return self.data + + @property + def test_data(self): + warnings.warn("test_data has been renamed data") + return self.data + + def __init__(self, root, train=True, transform=None, target_transform=None, + download=False): + super(MNIST, self).__init__(root, transform=transform, + target_transform=target_transform) + self.train = train # training set or test set + + if download: + self.download() + + if not self._check_exists(): + raise RuntimeError('Dataset not found.' + + ' You can use download=True to download it') + + if self.train: + data_file = self.training_file + else: + data_file = self.test_file + self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file)) + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img.numpy(), mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.data) + + @property + def raw_folder(self): + return os.path.join(self.root, self.__class__.__name__, 'raw') + + @property + def processed_folder(self): + return os.path.join(self.root, self.__class__.__name__, 'processed') + + @property + def class_to_idx(self): + return {_class: i for i, _class in enumerate(self.classes)} + + def _check_exists(self): + return (os.path.exists(os.path.join(self.processed_folder, + self.training_file)) and + os.path.exists(os.path.join(self.processed_folder, + self.test_file))) + + def download(self): + """Download the MNIST data if it doesn't exist in processed_folder already.""" + + if self._check_exists(): + return + + makedir_exist_ok(self.raw_folder) + makedir_exist_ok(self.processed_folder) + + # download files + for url in self.urls: + filename = url.rpartition('/')[2] + download_and_extract_archive(url, download_root=self.raw_folder, filename=filename) + + # process and save as torch files + print('Processing...') + + training_set = ( + read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')), + read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')) + ) + test_set = ( + read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')), + read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')) + ) + with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f: + torch.save(training_set, f) + with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f: + torch.save(test_set, f) + + print('Done!') + + def extra_repr(self): + return "Split: {}".format("Train" if self.train is True else "Test")
    + + +
    [docs]class FashionMNIST(MNIST): + """`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``Fashion-MNIST/processed/training.pt`` + and ``Fashion-MNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + urls = [ + 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', + 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz', + 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', + 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz', + ] + classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', + 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    + + +
    [docs]class KMNIST(MNIST): + """`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``KMNIST/processed/training.pt`` + and ``KMNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + urls = [ + 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-images-idx3-ubyte.gz', + 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz', + 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz', + 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz', + ] + classes = ['o', 'ki', 'su', 'tsu', 'na', 'ha', 'ma', 'ya', 're', 'wo']
    + + +
    [docs]class EMNIST(MNIST): + """`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``EMNIST/processed/training.pt`` + and ``EMNIST/processed/test.pt`` exist. + split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``, + ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies + which one to use. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + # Updated URL from https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist + url = 'https://cloudstor.aarnet.edu.au/plus/index.php/s/54h3OuGJhFLwAlQ/download' + splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist') + + def __init__(self, root, split, **kwargs): + self.split = verify_str_arg(split, "split", self.splits) + self.training_file = self._training_file(split) + self.test_file = self._test_file(split) + super(EMNIST, self).__init__(root, **kwargs) + + @staticmethod + def _training_file(split): + return 'training_{}.pt'.format(split) + + @staticmethod + def _test_file(split): + return 'test_{}.pt'.format(split) + + def download(self): + """Download the EMNIST data if it doesn't exist in processed_folder already.""" + import shutil + + if self._check_exists(): + return + + makedir_exist_ok(self.raw_folder) + makedir_exist_ok(self.processed_folder) + + # download files + print('Downloading and extracting zip archive') + download_and_extract_archive(self.url, download_root=self.raw_folder, filename="emnist.zip", + remove_finished=True) + gzip_folder = os.path.join(self.raw_folder, 'gzip') + for gzip_file in os.listdir(gzip_folder): + if gzip_file.endswith('.gz'): + extract_archive(os.path.join(gzip_folder, gzip_file), gzip_folder) + + # process and save as torch files + for split in self.splits: + print('Processing ' + split) + training_set = ( + read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))), + read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split))) + ) + test_set = ( + read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))), + read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split))) + ) + with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f: + torch.save(training_set, f) + with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f: + torch.save(test_set, f) + shutil.rmtree(gzip_folder) + + print('Done!')
    + + +
    [docs]class QMNIST(MNIST): + """`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset whose ``processed'' + subdir contains torch binary files with the datasets. + what (string,optional): Can be 'train', 'test', 'test10k', + 'test50k', or 'nist' for respectively the mnist compatible + training set, the 60k qmnist testing set, the 10k qmnist + examples that match the mnist testing set, the 50k + remaining qmnist testing examples, or all the nist + digits. The default is to select 'train' or 'test' + according to the compatibility argument 'train'. + compat (bool,optional): A boolean that says whether the target + for each example is class number (for compatibility with + the MNIST dataloader) or a torch vector containing the + full qmnist information. Default=True. + download (bool, optional): If true, downloads the dataset from + the internet and puts it in root directory. If dataset is + already downloaded, it is not downloaded again. + transform (callable, optional): A function/transform that + takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform + that takes in the target and transforms it. + train (bool,optional,compatibility): When argument 'what' is + not specified, this boolean decides whether to load the + training set ot the testing set. Default: True. + + """ + + subsets = { + 'train': 'train', + 'test': 'test', 'test10k': 'test', 'test50k': 'test', + 'nist': 'nist' + } + urls = { + 'train': ['https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz', + 'https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz'], + 'test': ['https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz', + 'https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz'], + 'nist': ['https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz', + 'https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz'] + } + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + def __init__(self, root, what=None, compat=True, train=True, **kwargs): + if what is None: + what = 'train' if train else 'test' + self.what = verify_str_arg(what, "what", tuple(self.subsets.keys())) + self.compat = compat + self.data_file = what + '.pt' + self.training_file = self.data_file + self.test_file = self.data_file + super(QMNIST, self).__init__(root, train, **kwargs) + + def download(self): + """Download the QMNIST data if it doesn't exist in processed_folder already. + Note that we only download what has been asked for (argument 'what'). + """ + if self._check_exists(): + return + makedir_exist_ok(self.raw_folder) + makedir_exist_ok(self.processed_folder) + urls = self.urls[self.subsets[self.what]] + files = [] + + # download data files if not already there + for url in urls: + filename = url.rpartition('/')[2] + file_path = os.path.join(self.raw_folder, filename) + if not os.path.isfile(file_path): + download_url(url, root=self.raw_folder, filename=filename, md5=None) + files.append(file_path) + + # process and save as torch files + print('Processing...') + data = read_sn3_pascalvincent_tensor(files[0]) + assert(data.dtype == torch.uint8) + assert(data.ndimension() == 3) + targets = read_sn3_pascalvincent_tensor(files[1]).long() + assert(targets.ndimension() == 2) + if self.what == 'test10k': + data = data[0:10000, :, :].clone() + targets = targets[0:10000, :].clone() + if self.what == 'test50k': + data = data[10000:, :, :].clone() + targets = targets[10000:, :].clone() + with open(os.path.join(self.processed_folder, self.data_file), 'wb') as f: + torch.save((data, targets), f) + + def __getitem__(self, index): + # redefined to handle the compat flag + img, target = self.data[index], self.targets[index] + img = Image.fromarray(img.numpy(), mode='L') + if self.transform is not None: + img = self.transform(img) + if self.compat: + target = int(target[0]) + if self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def extra_repr(self): + return "Split: {}".format(self.what)
    + + +def get_int(b): + return int(codecs.encode(b, 'hex'), 16) + + +def open_maybe_compressed_file(path): + """Return a file object that possibly decompresses 'path' on the fly. + Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'. + """ + if not isinstance(path, torch._six.string_classes): + return path + if path.endswith('.gz'): + import gzip + return gzip.open(path, 'rb') + if path.endswith('.xz'): + import lzma + return lzma.open(path, 'rb') + return open(path, 'rb') + + +def read_sn3_pascalvincent_tensor(path, strict=True): + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh'). + Argument may be a filename, compressed filename, or file object. + """ + # typemap + if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'): + read_sn3_pascalvincent_tensor.typemap = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8')} + # read + with open_maybe_compressed_file(path) as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert nd >= 1 and nd <= 3 + assert ty >= 8 and ty <= 14 + m = read_sn3_pascalvincent_tensor.typemap[ty] + s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 1) + return x.long() + + +def read_image_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 3) + return x +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/phototour.html b/docs/stable/_modules/torchvision/datasets/phototour.html new file mode 100644 index 000000000000..209112c56ef0 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/phototour.html @@ -0,0 +1,723 @@ + + + + + + + + + + + + torchvision.datasets.phototour — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.phototour

    +import os
    +import numpy as np
    +from PIL import Image
    +
    +import torch
    +from .vision import VisionDataset
    +
    +from .utils import download_url
    +
    +
    +
    [docs]class PhotoTour(VisionDataset): + """`Learning Local Image Descriptors Data <http://phototour.cs.washington.edu/patches/default.htm>`_ Dataset. + + + Args: + root (string): Root directory where images are. + name (string): Name of the dataset to load. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + urls = { + 'notredame_harris': [ + 'http://matthewalunbrown.com/patchdata/notredame_harris.zip', + 'notredame_harris.zip', + '69f8c90f78e171349abdf0307afefe4d' + ], + 'yosemite_harris': [ + 'http://matthewalunbrown.com/patchdata/yosemite_harris.zip', + 'yosemite_harris.zip', + 'a73253d1c6fbd3ba2613c45065c00d46' + ], + 'liberty_harris': [ + 'http://matthewalunbrown.com/patchdata/liberty_harris.zip', + 'liberty_harris.zip', + 'c731fcfb3abb4091110d0ae8c7ba182c' + ], + 'notredame': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip', + 'notredame.zip', + '509eda8535847b8c0a90bbb210c83484' + ], + 'yosemite': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip', + 'yosemite.zip', + '533b2e8eb7ede31be40abc317b2fd4f0' + ], + 'liberty': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip', + 'liberty.zip', + 'fdd9152f138ea5ef2091746689176414' + ], + } + mean = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437, + 'notredame_harris': 0.4854, 'yosemite_harris': 0.4844, 'liberty_harris': 0.4437} + std = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019, + 'notredame_harris': 0.1864, 'yosemite_harris': 0.1818, 'liberty_harris': 0.2019} + lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, + 'liberty_harris': 379587, 'yosemite_harris': 450912, 'notredame_harris': 325295} + image_ext = 'bmp' + info_file = 'info.txt' + matches_files = 'm50_100000_100000_0.txt' + + def __init__(self, root, name, train=True, transform=None, download=False): + super(PhotoTour, self).__init__(root, transform=transform) + self.name = name + self.data_dir = os.path.join(self.root, name) + self.data_down = os.path.join(self.root, '{}.zip'.format(name)) + self.data_file = os.path.join(self.root, '{}.pt'.format(name)) + + self.train = train + self.mean = self.mean[name] + self.std = self.std[name] + + if download: + self.download() + + if not self._check_datafile_exists(): + raise RuntimeError('Dataset not found.' + + ' You can use download=True to download it') + + # load the serialized data + self.data, self.labels, self.matches = torch.load(self.data_file) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (data1, data2, matches) + """ + if self.train: + data = self.data[index] + if self.transform is not None: + data = self.transform(data) + return data + m = self.matches[index] + data1, data2 = self.data[m[0]], self.data[m[1]] + if self.transform is not None: + data1 = self.transform(data1) + data2 = self.transform(data2) + return data1, data2, m[2]
    + + def __len__(self): + if self.train: + return self.lens[self.name] + return len(self.matches) + + def _check_datafile_exists(self): + return os.path.exists(self.data_file) + + def _check_downloaded(self): + return os.path.exists(self.data_dir) + + def download(self): + if self._check_datafile_exists(): + print('# Found cached data {}'.format(self.data_file)) + return + + if not self._check_downloaded(): + # download files + url = self.urls[self.name][0] + filename = self.urls[self.name][1] + md5 = self.urls[self.name][2] + fpath = os.path.join(self.root, filename) + + download_url(url, self.root, filename, md5) + + print('# Extracting data {}\n'.format(self.data_down)) + + import zipfile + with zipfile.ZipFile(fpath, 'r') as z: + z.extractall(self.data_dir) + + os.unlink(fpath) + + # process and save as torch files + print('# Caching data {}'.format(self.data_file)) + + dataset = ( + read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), + read_info_file(self.data_dir, self.info_file), + read_matches_files(self.data_dir, self.matches_files) + ) + + with open(self.data_file, 'wb') as f: + torch.save(dataset, f) + + def extra_repr(self): + return "Split: {}".format("Train" if self.train is True else "Test")
    + + +def read_image_file(data_dir, image_ext, n): + """Return a Tensor containing the patches + """ + + def PIL2array(_img): + """Convert PIL image type to numpy 2D array + """ + return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64) + + def find_files(_data_dir, _image_ext): + """Return a list with the file names of the images containing the patches + """ + files = [] + # find those files with the specified extension + for file_dir in os.listdir(_data_dir): + if file_dir.endswith(_image_ext): + files.append(os.path.join(_data_dir, file_dir)) + return sorted(files) # sort files in ascend order to keep relations + + patches = [] + list_files = find_files(data_dir, image_ext) + + for fpath in list_files: + img = Image.open(fpath) + for y in range(0, 1024, 64): + for x in range(0, 1024, 64): + patch = img.crop((x, y, x + 64, y + 64)) + patches.append(PIL2array(patch)) + return torch.ByteTensor(np.array(patches[:n])) + + +def read_info_file(data_dir, info_file): + """Return a Tensor containing the list of labels + Read the file and keep only the ID of the 3D point. + """ + labels = [] + with open(os.path.join(data_dir, info_file), 'r') as f: + labels = [int(line.split()[0]) for line in f] + return torch.LongTensor(labels) + + +def read_matches_files(data_dir, matches_file): + """Return a Tensor containing the ground truth matches + Read the file and keep only 3D point ID. + Matches are represented with a 1, non matches with a 0. + """ + matches = [] + with open(os.path.join(data_dir, matches_file), 'r') as f: + for line in f: + line_split = line.split() + matches.append([int(line_split[0]), int(line_split[3]), + int(line_split[1] == line_split[4])]) + return torch.LongTensor(matches) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/sbd.html b/docs/stable/_modules/torchvision/datasets/sbd.html new file mode 100644 index 000000000000..22f20ded3118 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/sbd.html @@ -0,0 +1,638 @@ + + + + + + + + + + + + torchvision.datasets.sbd — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.sbd

    +import os
    +import shutil
    +from .vision import VisionDataset
    +
    +import numpy as np
    +
    +from PIL import Image
    +from .utils import download_url, verify_str_arg
    +from .voc import download_extract
    +
    +
    +
    [docs]class SBDataset(VisionDataset): + """`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_ + + The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset. + + .. note :: + + Please note that the train and val splits included with this dataset are different from + the splits in the PASCAL VOC dataset. In particular some "train" images might be part of + VOC2012 val. + If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`, + which excludes all val images. + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format. + + Args: + root (string): Root directory of the Semantic Boundaries Dataset + image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``. + Image set ``train_noval`` excludes VOC 2012 val images. + mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'. + In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`, + where `num_classes=20`. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. Input sample is PIL image and target is a numpy array + if `mode='boundaries'` or PIL image if `mode='segmentation'`. + """ + + url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz" + md5 = "82b4d87ceb2ed10f6038a1cba92111cb" + filename = "benchmark.tgz" + + voc_train_url = "http://home.bharathh.info/pubs/codes/SBD/train_noval.txt" + voc_split_filename = "train_noval.txt" + voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722" + + def __init__(self, + root, + image_set='train', + mode='boundaries', + download=False, + transforms=None): + + try: + from scipy.io import loadmat + self._loadmat = loadmat + except ImportError: + raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: " + "pip install scipy") + + super(SBDataset, self).__init__(root, transforms) + self.image_set = verify_str_arg(image_set, "image_set", + ("train", "val", "train_noval")) + self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries")) + self.num_classes = 20 + + sbd_root = self.root + image_dir = os.path.join(sbd_root, 'img') + mask_dir = os.path.join(sbd_root, 'cls') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset") + for f in ["cls", "img", "inst", "train.txt", "val.txt"]: + old_path = os.path.join(extracted_ds_root, f) + shutil.move(old_path, sbd_root) + download_url(self.voc_train_url, sbd_root, self.voc_split_filename, + self.voc_split_md5) + + if not os.path.isdir(sbd_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + split_f = os.path.join(sbd_root, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names] + assert (len(self.images) == len(self.masks)) + + self._get_target = self._get_segmentation_target \ + if self.mode == "segmentation" else self._get_boundaries_target + + def _get_segmentation_target(self, filepath): + mat = self._loadmat(filepath) + return Image.fromarray(mat['GTcls'][0]['Segmentation'][0]) + + def _get_boundaries_target(self, filepath): + mat = self._loadmat(filepath) + return np.concatenate([np.expand_dims(mat['GTcls'][0]['Boundaries'][0][i][0].toarray(), axis=0) + for i in range(self.num_classes)], axis=0) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + target = self._get_target(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self): + return len(self.images) + + def extra_repr(self): + lines = ["Image set: {image_set}", "Mode: {mode}"] + return '\n'.join(lines).format(**self.__dict__)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/sbu.html b/docs/stable/_modules/torchvision/datasets/sbu.html new file mode 100644 index 000000000000..b7a824173385 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/sbu.html @@ -0,0 +1,622 @@ + + + + + + + + + + + + torchvision.datasets.sbu — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.sbu

    +from PIL import Image
    +from six.moves import zip
    +from .utils import download_url, check_integrity
    +
    +import os
    +from .vision import VisionDataset
    +
    +
    +
    [docs]class SBU(VisionDataset): + """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset. + + Args: + root (string): Root directory of dataset where tarball + ``SBUCaptionedPhotoDataset.tar.gz`` exists. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz" + filename = "SBUCaptionedPhotoDataset.tar.gz" + md5_checksum = '9aec147b3488753cf758b4d493422285' + + def __init__(self, root, transform=None, target_transform=None, download=True): + super(SBU, self).__init__(root, transform=transform, + target_transform=target_transform) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # Read the caption for each photo + self.photos = [] + self.captions = [] + + file1 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt') + file2 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_captions.txt') + + for line1, line2 in zip(open(file1), open(file2)): + url = line1.rstrip() + photo = os.path.basename(url) + filename = os.path.join(self.root, 'dataset', photo) + if os.path.exists(filename): + caption = line2.rstrip() + self.photos.append(photo) + self.captions.append(caption) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a caption for the photo. + """ + filename = os.path.join(self.root, 'dataset', self.photos[index]) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + target = self.captions[index] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + """The number of photos in the dataset.""" + return len(self.photos) + + def _check_integrity(self): + """Check the md5 checksum of the downloaded tarball.""" + root = self.root + fpath = os.path.join(root, self.filename) + if not check_integrity(fpath, self.md5_checksum): + return False + return True + + def download(self): + """Download and extract the tarball, and download each individual photo.""" + import tarfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + download_url(self.url, self.root, self.filename, self.md5_checksum) + + # Extract file + with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar: + tar.extractall(path=self.root) + + # Download individual photos + with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh: + for line in fh: + url = line.rstrip() + try: + download_url(url, os.path.join(self.root, 'dataset')) + except OSError: + # The images point to public images on Flickr. + # Note: Images might be removed by users at anytime. + pass
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/stl10.html b/docs/stable/_modules/torchvision/datasets/stl10.html new file mode 100644 index 000000000000..31c2deab371b --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/stl10.html @@ -0,0 +1,691 @@ + + + + + + + + + + + + torchvision.datasets.stl10 — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.stl10

    +from __future__ import print_function
    +from PIL import Image
    +import os
    +import os.path
    +import numpy as np
    +
    +from .vision import VisionDataset
    +from .utils import check_integrity, download_and_extract_archive, verify_str_arg
    +
    +
    +
    [docs]class STL10(VisionDataset): + """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``stl10_binary`` exists. + split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. + Accordingly dataset is selected. + folds (int, optional): One of {0-9} or None. + For training, loads one of the 10 pre-defined folds of 1k samples for the + standard evaluation procedure. If no value is passed, loads the 5k samples. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + base_folder = 'stl10_binary' + url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz" + filename = "stl10_binary.tar.gz" + tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb' + class_names_file = 'class_names.txt' + folds_list_file = 'fold_indices.txt' + train_list = [ + ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], + ['train_y.bin', '5a34089d4802c674881badbb80307741'], + ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4'] + ] + + test_list = [ + ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'], + ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e'] + ] + splits = ('train', 'train+unlabeled', 'unlabeled', 'test') + + def __init__(self, root, split='train', folds=None, transform=None, + target_transform=None, download=False): + super(STL10, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", self.splits) + self.folds = self._verify_folds(folds) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError( + 'Dataset not found or corrupted. ' + 'You can use download=True to download it') + + # now load the picked numpy arrays + if self.split == 'train': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + + elif self.split == 'train+unlabeled': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) + self.data = np.concatenate((self.data, unlabeled_data)) + self.labels = np.concatenate( + (self.labels, np.asarray([-1] * unlabeled_data.shape[0]))) + + elif self.split == 'unlabeled': + self.data, _ = self.__loadfile(self.train_list[2][0]) + self.labels = np.asarray([-1] * self.data.shape[0]) + else: # self.split == 'test': + self.data, self.labels = self.__loadfile( + self.test_list[0][0], self.test_list[1][0]) + + class_file = os.path.join( + self.root, self.base_folder, self.class_names_file) + if os.path.isfile(class_file): + with open(class_file) as f: + self.classes = f.read().splitlines() + + def _verify_folds(self, folds): + if folds is None: + return folds + elif isinstance(folds, int): + if folds in range(10): + return folds + msg = ("Value for argument folds should be in the range [0, 10), " + "but got {}.") + raise ValueError(msg.format(folds)) + else: + msg = "Expected type None or int for argument folds, but got type {}." + raise ValueError(msg.format(type(folds))) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + if self.labels is not None: + img, target = self.data[index], int(self.labels[index]) + else: + img, target = self.data[index], None + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return self.data.shape[0] + + def __loadfile(self, data_file, labels_file=None): + labels = None + if labels_file: + path_to_labels = os.path.join( + self.root, self.base_folder, labels_file) + with open(path_to_labels, 'rb') as f: + labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based + + path_to_data = os.path.join(self.root, self.base_folder, data_file) + with open(path_to_data, 'rb') as f: + # read whole file in uint8 chunks + everything = np.fromfile(f, dtype=np.uint8) + images = np.reshape(everything, (-1, 3, 96, 96)) + images = np.transpose(images, (0, 1, 3, 2)) + + return images, labels + + def _check_integrity(self): + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self): + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + + def extra_repr(self): + return "Split: {split}".format(**self.__dict__) + + def __load_folds(self, folds): + # loads one of the folds if specified + if folds is None: + return + path_to_folds = os.path.join( + self.root, self.base_folder, self.folds_list_file) + with open(path_to_folds, 'r') as f: + str_idx = f.read().splitlines()[folds] + list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ') + self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/svhn.html b/docs/stable/_modules/torchvision/datasets/svhn.html new file mode 100644 index 000000000000..aef2a3cd52cd --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/svhn.html @@ -0,0 +1,628 @@ + + + + + + + + + + + + torchvision.datasets.svhn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.svhn

    +from __future__ import print_function
    +from .vision import VisionDataset
    +from PIL import Image
    +import os
    +import os.path
    +import numpy as np
    +from .utils import download_url, check_integrity, verify_str_arg
    +
    +
    +
    [docs]class SVHN(VisionDataset): + """`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset. + Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, + we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which + expect the class labels to be in the range `[0, C-1]` + + Args: + root (string): Root directory of dataset where directory + ``SVHN`` exists. + split (string): One of {'train', 'test', 'extra'}. + Accordingly dataset is selected. 'extra' is Extra training set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + url = "" + filename = "" + file_md5 = "" + + split_list = { + 'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat", + "train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"], + 'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat", + "test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"], + 'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat", + "extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]} + + def __init__(self, root, split='train', transform=None, target_transform=None, + download=False): + super(SVHN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", tuple(self.split_list.keys())) + self.url = self.split_list[split][0] + self.filename = self.split_list[split][1] + self.file_md5 = self.split_list[split][2] + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # import here rather than at top of file because this is + # an optional dependency for torchvision + import scipy.io as sio + + # reading(loading) mat file as array + loaded_mat = sio.loadmat(os.path.join(self.root, self.filename)) + + self.data = loaded_mat['X'] + # loading from the .mat file gives an np array of type np.uint8 + # converting to np.int64, so that we have a LongTensor after + # the conversion from the numpy array + # the squeeze is needed to obtain a 1D tensor + self.labels = loaded_mat['y'].astype(np.int64).squeeze() + + # the svhn dataset assigns the class label "10" to the digit 0 + # this makes it inconsistent with several loss functions + # which expect the class labels to be in the range [0, C-1] + np.place(self.labels, self.labels == 10, 0) + self.data = np.transpose(self.data, (3, 2, 0, 1)) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.labels[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return len(self.data) + + def _check_integrity(self): + root = self.root + md5 = self.split_list[self.split][2] + fpath = os.path.join(root, self.filename) + return check_integrity(fpath, md5) + + def download(self): + md5 = self.split_list[self.split][2] + download_url(self.url, self.root, self.filename, md5) + + def extra_repr(self): + return "Split: {split}".format(**self.__dict__)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/ucf101.html b/docs/stable/_modules/torchvision/datasets/ucf101.html new file mode 100644 index 000000000000..af15fa6df370 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/ucf101.html @@ -0,0 +1,603 @@ + + + + + + + + + + + + torchvision.datasets.ucf101 — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.ucf101

    +import glob
    +import os
    +
    +from .video_utils import VideoClips
    +from .utils import list_dir
    +from .folder import make_dataset
    +from .vision import VisionDataset
    +
    +
    +
    [docs]class UCF101(VisionDataset): + """ + UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset. + + UCF101 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the UCF101 Dataset. + annotation_path (str): path to the folder containing the split files + frames_per_clip (int): number of frames in a clip. + step_between_clips (int, optional): number of frames between each clip. + fold (int, optional): which fold to use. Should be between 1 and 3. + train (bool, optional): if ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + video (Tensor[T, H, W, C]): the `T` video frames + audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + label (int): class of the video clip + """ + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + fold=1, train=True, transform=None): + super(UCF101, self).__init__(root) + if not 1 <= fold <= 3: + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + self.fold = fold + self.train = train + + classes = list(sorted(list_dir(root))) + class_to_idx = {classes[i]: i for i in range(len(classes))} + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + self.classes = classes + video_list = [x[0] for x in self.samples] + video_clips = VideoClips(video_list, frames_per_clip, step_between_clips) + indices = self._select_fold(video_list, annotation_path, fold, train) + self.video_clips = video_clips.subset(indices) + self.transform = transform + + def _select_fold(self, video_list, annotation_path, fold, train): + name = "train" if train else "test" + name = "{}list{:02d}.txt".format(name, fold) + f = os.path.join(annotation_path, name) + selected_files = [] + with open(f, "r") as fid: + data = fid.readlines() + data = [x.strip().split(" ") for x in data] + data = [x[0] for x in data] + selected_files.extend(data) + selected_files = set(selected_files) + indices = [i for i in range(len(video_list)) if video_list[i][len(self.root) + 1:] in selected_files] + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[video_idx][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/usps.html b/docs/stable/_modules/torchvision/datasets/usps.html new file mode 100644 index 000000000000..88da37f218c2 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/usps.html @@ -0,0 +1,599 @@ + + + + + + + + + + + + torchvision.datasets.usps — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.usps

    +from __future__ import print_function
    +from PIL import Image
    +import os
    +import numpy as np
    +
    +from .utils import download_url
    +from .vision import VisionDataset
    +
    +
    +
    [docs]class USPS(VisionDataset): + """`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset. + The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``. + The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]`` + and make pixel values in ``[0, 255]``. + + Args: + root (string): Root directory of dataset to store``USPS`` data files. + train (bool, optional): If True, creates dataset from ``usps.bz2``, + otherwise from ``usps.t.bz2``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + split_list = { + 'train': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2", + "usps.bz2", 'ec16c51db3855ca6c91edd34d0e9b197' + ], + 'test': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2", + "usps.t.bz2", '8ea070ee2aca1ac39742fdd1ef5ed118' + ], + } + + def __init__(self, root, train=True, transform=None, target_transform=None, + download=False): + super(USPS, self).__init__(root, transform=transform, + target_transform=target_transform) + split = 'train' if train else 'test' + url, filename, checksum = self.split_list[split] + full_path = os.path.join(self.root, filename) + + if download and not os.path.exists(full_path): + download_url(url, self.root, filename, md5=checksum) + + import bz2 + with bz2.open(full_path) as fp: + raw_data = [l.decode().split() for l in fp.readlines()] + imgs = [[x.split(':')[-1] for x in data[1:]] for data in raw_data] + imgs = np.asarray(imgs, dtype=np.float32).reshape((-1, 16, 16)) + imgs = ((imgs + 1) / 2 * 255).astype(dtype=np.uint8) + targets = [int(d[0]) - 1 for d in raw_data] + + self.data = imgs + self.targets = targets + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img, mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target
    + + def __len__(self): + return len(self.data)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/datasets/voc.html b/docs/stable/_modules/torchvision/datasets/voc.html new file mode 100644 index 000000000000..c674676376e8 --- /dev/null +++ b/docs/stable/_modules/torchvision/datasets/voc.html @@ -0,0 +1,746 @@ + + + + + + + + + + + + torchvision.datasets.voc — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.datasets.voc

    +import os
    +import sys
    +import tarfile
    +import collections
    +from .vision import VisionDataset
    +
    +if sys.version_info[0] == 2:
    +    import xml.etree.cElementTree as ET
    +else:
    +    import xml.etree.ElementTree as ET
    +
    +from PIL import Image
    +from .utils import download_url, check_integrity, verify_str_arg
    +
    +DATASET_YEAR_DICT = {
    +    '2012': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
    +        'filename': 'VOCtrainval_11-May-2012.tar',
    +        'md5': '6cd6e144f989b92b3379bac3b3de84fd',
    +        'base_dir': 'VOCdevkit/VOC2012'
    +    },
    +    '2011': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
    +        'filename': 'VOCtrainval_25-May-2011.tar',
    +        'md5': '6c3384ef61512963050cb5d687e5bf1e',
    +        'base_dir': 'TrainVal/VOCdevkit/VOC2011'
    +    },
    +    '2010': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
    +        'filename': 'VOCtrainval_03-May-2010.tar',
    +        'md5': 'da459979d0c395079b5c75ee67908abb',
    +        'base_dir': 'VOCdevkit/VOC2010'
    +    },
    +    '2009': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
    +        'filename': 'VOCtrainval_11-May-2009.tar',
    +        'md5': '59065e4b188729180974ef6572f6a212',
    +        'base_dir': 'VOCdevkit/VOC2009'
    +    },
    +    '2008': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
    +        'filename': 'VOCtrainval_11-May-2012.tar',
    +        'md5': '2629fa636546599198acfcfbfcf1904a',
    +        'base_dir': 'VOCdevkit/VOC2008'
    +    },
    +    '2007': {
    +        'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
    +        'filename': 'VOCtrainval_06-Nov-2007.tar',
    +        'md5': 'c52e279531787c972589f7e41ab4ae64',
    +        'base_dir': 'VOCdevkit/VOC2007'
    +    }
    +}
    +
    +
    +
    [docs]class VOCSegmentation(VisionDataset): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years 2007 to 2012. + image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val`` + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__(self, + root, + year='2012', + image_set='train', + download=False, + transform=None, + target_transform=None, + transforms=None): + super(VOCSegmentation, self).__init__(root, transforms, transform, target_transform) + self.year = year + self.url = DATASET_YEAR_DICT[year]['url'] + self.filename = DATASET_YEAR_DICT[year]['filename'] + self.md5 = DATASET_YEAR_DICT[year]['md5'] + self.image_set = verify_str_arg(image_set, "image_set", + ("train", "trainval", "val")) + base_dir = DATASET_YEAR_DICT[year]['base_dir'] + voc_root = os.path.join(self.root, base_dir) + image_dir = os.path.join(voc_root, 'JPEGImages') + mask_dir = os.path.join(voc_root, 'SegmentationClass') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation') + + split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names] + assert (len(self.images) == len(self.masks)) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is the image segmentation. + """ + img = Image.open(self.images[index]).convert('RGB') + target = Image.open(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
    + + def __len__(self): + return len(self.images)
    + + +
    [docs]class VOCDetection(VisionDataset): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years 2007 to 2012. + image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val`` + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + (default: alphabetic indexing of VOC's 20 classes). + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, required): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__(self, + root, + year='2012', + image_set='train', + download=False, + transform=None, + target_transform=None, + transforms=None): + super(VOCDetection, self).__init__(root, transforms, transform, target_transform) + self.year = year + self.url = DATASET_YEAR_DICT[year]['url'] + self.filename = DATASET_YEAR_DICT[year]['filename'] + self.md5 = DATASET_YEAR_DICT[year]['md5'] + self.image_set = verify_str_arg(image_set, "image_set", + ("train", "trainval", "val")) + + base_dir = DATASET_YEAR_DICT[year]['base_dir'] + voc_root = os.path.join(self.root, base_dir) + image_dir = os.path.join(voc_root, 'JPEGImages') + annotation_dir = os.path.join(voc_root, 'Annotations') + + if download: + download_extract(self.url, self.root, self.filename, self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + splits_dir = os.path.join(voc_root, 'ImageSets/Main') + + split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.annotations = [os.path.join(annotation_dir, x + ".xml") for x in file_names] + assert (len(self.images) == len(self.annotations)) + +
    [docs] def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dictionary of the XML tree. + """ + img = Image.open(self.images[index]).convert('RGB') + target = self.parse_voc_xml( + ET.parse(self.annotations[index]).getroot()) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target
    + + def __len__(self): + return len(self.images) + + def parse_voc_xml(self, node): + voc_dict = {} + children = list(node) + if children: + def_dic = collections.defaultdict(list) + for dc in map(self.parse_voc_xml, children): + for ind, v in dc.items(): + def_dic[ind].append(v) + voc_dict = { + node.tag: + {ind: v[0] if len(v) == 1 else v + for ind, v in def_dic.items()} + } + if node.text: + text = node.text.strip() + if not children: + voc_dict[node.tag] = text + return voc_dict
    + + +def download_extract(url, root, filename, md5): + download_url(url, root, filename, md5) + with tarfile.open(os.path.join(root, filename), "r") as tar: + tar.extractall(path=root) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/alexnet.html b/docs/stable/_modules/torchvision/models/alexnet.html new file mode 100644 index 000000000000..9c387967374b --- /dev/null +++ b/docs/stable/_modules/torchvision/models/alexnet.html @@ -0,0 +1,579 @@ + + + + + + + + + + + + torchvision.models.alexnet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.alexnet

    +import torch
    +import torch.nn as nn
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = ['AlexNet', 'alexnet']
    +
    +
    +model_urls = {
    +    'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
    +}
    +
    +
    +class AlexNet(nn.Module):
    +
    +    def __init__(self, num_classes=1000):
    +        super(AlexNet, self).__init__()
    +        self.features = nn.Sequential(
    +            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
    +            nn.ReLU(inplace=True),
    +            nn.MaxPool2d(kernel_size=3, stride=2),
    +            nn.Conv2d(64, 192, kernel_size=5, padding=2),
    +            nn.ReLU(inplace=True),
    +            nn.MaxPool2d(kernel_size=3, stride=2),
    +            nn.Conv2d(192, 384, kernel_size=3, padding=1),
    +            nn.ReLU(inplace=True),
    +            nn.Conv2d(384, 256, kernel_size=3, padding=1),
    +            nn.ReLU(inplace=True),
    +            nn.Conv2d(256, 256, kernel_size=3, padding=1),
    +            nn.ReLU(inplace=True),
    +            nn.MaxPool2d(kernel_size=3, stride=2),
    +        )
    +        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
    +        self.classifier = nn.Sequential(
    +            nn.Dropout(),
    +            nn.Linear(256 * 6 * 6, 4096),
    +            nn.ReLU(inplace=True),
    +            nn.Dropout(),
    +            nn.Linear(4096, 4096),
    +            nn.ReLU(inplace=True),
    +            nn.Linear(4096, num_classes),
    +        )
    +
    +    def forward(self, x):
    +        x = self.features(x)
    +        x = self.avgpool(x)
    +        x = torch.flatten(x, 1)
    +        x = self.classifier(x)
    +        return x
    +
    +
    +
    [docs]def alexnet(pretrained=False, progress=True, **kwargs): + r"""AlexNet model architecture from the + `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = AlexNet(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['alexnet'], + progress=progress) + model.load_state_dict(state_dict) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/densenet.html b/docs/stable/_modules/torchvision/models/densenet.html new file mode 100644 index 000000000000..cef1c9bf9573 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/densenet.html @@ -0,0 +1,756 @@ + + + + + + + + + + + + torchvision.models.densenet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.densenet

    +import re
    +import torch
    +import torch.nn as nn
    +import torch.nn.functional as F
    +import torch.utils.checkpoint as cp
    +from collections import OrderedDict
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
    +
    +model_urls = {
    +    'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
    +    'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
    +    'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
    +    'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
    +}
    +
    +
    +def _bn_function_factory(norm, relu, conv):
    +    def bn_function(*inputs):
    +        concated_features = torch.cat(inputs, 1)
    +        bottleneck_output = conv(relu(norm(concated_features)))
    +        return bottleneck_output
    +
    +    return bn_function
    +
    +
    +class _DenseLayer(nn.Sequential):
    +    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):
    +        super(_DenseLayer, self).__init__()
    +        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
    +        self.add_module('relu1', nn.ReLU(inplace=True)),
    +        self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
    +                                           growth_rate, kernel_size=1, stride=1,
    +                                           bias=False)),
    +        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
    +        self.add_module('relu2', nn.ReLU(inplace=True)),
    +        self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
    +                                           kernel_size=3, stride=1, padding=1,
    +                                           bias=False)),
    +        self.drop_rate = drop_rate
    +        self.memory_efficient = memory_efficient
    +
    +    def forward(self, *prev_features):
    +        bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
    +        if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
    +            bottleneck_output = cp.checkpoint(bn_function, *prev_features)
    +        else:
    +            bottleneck_output = bn_function(*prev_features)
    +        new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
    +        if self.drop_rate > 0:
    +            new_features = F.dropout(new_features, p=self.drop_rate,
    +                                     training=self.training)
    +        return new_features
    +
    +
    +class _DenseBlock(nn.Module):
    +    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
    +        super(_DenseBlock, self).__init__()
    +        for i in range(num_layers):
    +            layer = _DenseLayer(
    +                num_input_features + i * growth_rate,
    +                growth_rate=growth_rate,
    +                bn_size=bn_size,
    +                drop_rate=drop_rate,
    +                memory_efficient=memory_efficient,
    +            )
    +            self.add_module('denselayer%d' % (i + 1), layer)
    +
    +    def forward(self, init_features):
    +        features = [init_features]
    +        for name, layer in self.named_children():
    +            new_features = layer(*features)
    +            features.append(new_features)
    +        return torch.cat(features, 1)
    +
    +
    +class _Transition(nn.Sequential):
    +    def __init__(self, num_input_features, num_output_features):
    +        super(_Transition, self).__init__()
    +        self.add_module('norm', nn.BatchNorm2d(num_input_features))
    +        self.add_module('relu', nn.ReLU(inplace=True))
    +        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
    +                                          kernel_size=1, stride=1, bias=False))
    +        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
    +
    +
    +class DenseNet(nn.Module):
    +    r"""Densenet-BC model class, based on
    +    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
    +
    +    Args:
    +        growth_rate (int) - how many filters to add each layer (`k` in paper)
    +        block_config (list of 4 ints) - how many layers in each pooling block
    +        num_init_features (int) - the number of filters to learn in the first convolution layer
    +        bn_size (int) - multiplicative factor for number of bottle neck layers
    +          (i.e. bn_size * k features in the bottleneck layer)
    +        drop_rate (float) - dropout rate after each dense layer
    +        num_classes (int) - number of classification classes
    +        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
    +          but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
    +    """
    +
    +    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
    +                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):
    +
    +        super(DenseNet, self).__init__()
    +
    +        # First convolution
    +        self.features = nn.Sequential(OrderedDict([
    +            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
    +                                padding=3, bias=False)),
    +            ('norm0', nn.BatchNorm2d(num_init_features)),
    +            ('relu0', nn.ReLU(inplace=True)),
    +            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
    +        ]))
    +
    +        # Each denseblock
    +        num_features = num_init_features
    +        for i, num_layers in enumerate(block_config):
    +            block = _DenseBlock(
    +                num_layers=num_layers,
    +                num_input_features=num_features,
    +                bn_size=bn_size,
    +                growth_rate=growth_rate,
    +                drop_rate=drop_rate,
    +                memory_efficient=memory_efficient
    +            )
    +            self.features.add_module('denseblock%d' % (i + 1), block)
    +            num_features = num_features + num_layers * growth_rate
    +            if i != len(block_config) - 1:
    +                trans = _Transition(num_input_features=num_features,
    +                                    num_output_features=num_features // 2)
    +                self.features.add_module('transition%d' % (i + 1), trans)
    +                num_features = num_features // 2
    +
    +        # Final batch norm
    +        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
    +
    +        # Linear layer
    +        self.classifier = nn.Linear(num_features, num_classes)
    +
    +        # Official init from torch repo.
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                nn.init.kaiming_normal_(m.weight)
    +            elif isinstance(m, nn.BatchNorm2d):
    +                nn.init.constant_(m.weight, 1)
    +                nn.init.constant_(m.bias, 0)
    +            elif isinstance(m, nn.Linear):
    +                nn.init.constant_(m.bias, 0)
    +
    +    def forward(self, x):
    +        features = self.features(x)
    +        out = F.relu(features, inplace=True)
    +        out = F.adaptive_avg_pool2d(out, (1, 1))
    +        out = torch.flatten(out, 1)
    +        out = self.classifier(out)
    +        return out
    +
    +
    +def _load_state_dict(model, model_url, progress):
    +    # '.'s are no longer allowed in module names, but previous _DenseLayer
    +    # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
    +    # They are also in the checkpoints in model_urls. This pattern is used
    +    # to find such keys.
    +    pattern = re.compile(
    +        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
    +
    +    state_dict = load_state_dict_from_url(model_url, progress=progress)
    +    for key in list(state_dict.keys()):
    +        res = pattern.match(key)
    +        if res:
    +            new_key = res.group(1) + res.group(2)
    +            state_dict[new_key] = state_dict[key]
    +            del state_dict[key]
    +    model.load_state_dict(state_dict)
    +
    +
    +def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
    +              **kwargs):
    +    model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
    +    if pretrained:
    +        _load_state_dict(model, model_urls[arch], progress)
    +    return model
    +
    +
    +
    [docs]def densenet121(pretrained=False, progress=True, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, + **kwargs)
    + + +
    [docs]def densenet161(pretrained=False, progress=True, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, + **kwargs)
    + + +
    [docs]def densenet169(pretrained=False, progress=True, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, + **kwargs)
    + + +
    [docs]def densenet201(pretrained=False, progress=True, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ + """ + return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, + **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html b/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html new file mode 100644 index 000000000000..3a29158c381c --- /dev/null +++ b/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html @@ -0,0 +1,852 @@ + + + + + + + + + + + + torchvision.models.detection.faster_rcnn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.detection.faster_rcnn

    +from collections import OrderedDict
    +
    +import torch
    +from torch import nn
    +import torch.nn.functional as F
    +
    +from torchvision.ops import misc as misc_nn_ops
    +from torchvision.ops import MultiScaleRoIAlign
    +
    +from ..utils import load_state_dict_from_url
    +
    +from .generalized_rcnn import GeneralizedRCNN
    +from .rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
    +from .roi_heads import RoIHeads
    +from .transform import GeneralizedRCNNTransform
    +from .backbone_utils import resnet_fpn_backbone
    +
    +
    +__all__ = [
    +    "FasterRCNN", "fasterrcnn_resnet50_fpn",
    +]
    +
    +
    +class FasterRCNN(GeneralizedRCNN):
    +    """
    +    Implements Faster R-CNN.
    +
    +    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
    +    image, and should be in 0-1 range. Different images can have different sizes.
    +
    +    The behavior of the model changes depending if it is in training or evaluation mode.
    +
    +    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
    +    containing:
    +        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values
    +          between 0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the class label for each ground-truth box
    +
    +    The model returns a Dict[Tensor] during training, containing the classification and regression
    +    losses for both the RPN and the R-CNN.
    +
    +    During inference, the model requires only the input tensors, and returns the post-processed
    +    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
    +    follows:
    +        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between
    +          0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the predicted labels for each image
    +        - scores (Tensor[N]): the scores or each prediction
    +
    +    Arguments:
    +        backbone (nn.Module): the network used to compute the features for the model.
    +            It should contain a out_channels attribute, which indicates the number of output
    +            channels that each feature map has (and it should be the same for all feature maps).
    +            The backbone should return a single Tensor or and OrderedDict[Tensor].
    +        num_classes (int): number of output classes of the model (including the background).
    +            If box_predictor is specified, num_classes should be None.
    +        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
    +        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
    +        image_mean (Tuple[float, float, float]): mean values used for input normalization.
    +            They are generally the mean values of the dataset on which the backbone has been trained
    +            on
    +        image_std (Tuple[float, float, float]): std values used for input normalization.
    +            They are generally the std values of the dataset on which the backbone has been trained on
    +        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
    +            maps.
    +        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
    +        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
    +        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
    +        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
    +        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
    +        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
    +        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
    +            considered as positive during training of the RPN.
    +        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
    +            considered as negative during training of the RPN.
    +        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
    +            for computing the loss
    +        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
    +            of the RPN
    +        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
    +            the locations indicated by the bounding boxes
    +        box_head (nn.Module): module that takes the cropped feature maps as input
    +        box_predictor (nn.Module): module that takes the output of box_head and returns the
    +            classification logits and box regression deltas.
    +        box_score_thresh (float): during inference, only return proposals with a classification score
    +            greater than box_score_thresh
    +        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
    +        box_detections_per_img (int): maximum number of detections per image, for all classes.
    +        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
    +            considered as positive during training of the classification head
    +        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
    +            considered as negative during training of the classification head
    +        box_batch_size_per_image (int): number of proposals that are sampled during training of the
    +            classification head
    +        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
    +            of the classification head
    +        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
    +            bounding boxes
    +
    +    Example::
    +
    +        >>> import torch
    +        >>> import torchvision
    +        >>> from torchvision.models.detection import FasterRCNN
    +        >>> from torchvision.models.detection.rpn import AnchorGenerator
    +        >>> # load a pre-trained model for classification and return
    +        >>> # only the features
    +        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
    +        >>> # FasterRCNN needs to know the number of
    +        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
    +        >>> # so we need to add it here
    +        >>> backbone.out_channels = 1280
    +        >>>
    +        >>> # let's make the RPN generate 5 x 3 anchors per spatial
    +        >>> # location, with 5 different sizes and 3 different aspect
    +        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
    +        >>> # map could potentially have different sizes and
    +        >>> # aspect ratios
    +        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
    +        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
    +        >>>
    +        >>> # let's define what are the feature maps that we will
    +        >>> # use to perform the region of interest cropping, as well as
    +        >>> # the size of the crop after rescaling.
    +        >>> # if your backbone returns a Tensor, featmap_names is expected to
    +        >>> # be [0]. More generally, the backbone should return an
    +        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
    +        >>> # feature maps to use.
    +        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
    +        >>>                                                 output_size=7,
    +        >>>                                                 sampling_ratio=2)
    +        >>>
    +        >>> # put the pieces together inside a FasterRCNN model
    +        >>> model = FasterRCNN(backbone,
    +        >>>                    num_classes=2,
    +        >>>                    rpn_anchor_generator=anchor_generator,
    +        >>>                    box_roi_pool=roi_pooler)
    +        >>> model.eval()
    +        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +        >>> predictions = model(x)
    +    """
    +
    +    def __init__(self, backbone, num_classes=None,
    +                 # transform parameters
    +                 min_size=800, max_size=1333,
    +                 image_mean=None, image_std=None,
    +                 # RPN parameters
    +                 rpn_anchor_generator=None, rpn_head=None,
    +                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
    +                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
    +                 rpn_nms_thresh=0.7,
    +                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
    +                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
    +                 # Box parameters
    +                 box_roi_pool=None, box_head=None, box_predictor=None,
    +                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
    +                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
    +                 box_batch_size_per_image=512, box_positive_fraction=0.25,
    +                 bbox_reg_weights=None):
    +
    +        if not hasattr(backbone, "out_channels"):
    +            raise ValueError(
    +                "backbone should contain an attribute out_channels "
    +                "specifying the number of output channels (assumed to be the "
    +                "same for all the levels)")
    +
    +        assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
    +        assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
    +
    +        if num_classes is not None:
    +            if box_predictor is not None:
    +                raise ValueError("num_classes should be None when box_predictor is specified")
    +        else:
    +            if box_predictor is None:
    +                raise ValueError("num_classes should not be None when box_predictor "
    +                                 "is not specified")
    +
    +        out_channels = backbone.out_channels
    +
    +        if rpn_anchor_generator is None:
    +            anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
    +            aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
    +            rpn_anchor_generator = AnchorGenerator(
    +                anchor_sizes, aspect_ratios
    +            )
    +        if rpn_head is None:
    +            rpn_head = RPNHead(
    +                out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
    +            )
    +
    +        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
    +        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
    +
    +        rpn = RegionProposalNetwork(
    +            rpn_anchor_generator, rpn_head,
    +            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
    +            rpn_batch_size_per_image, rpn_positive_fraction,
    +            rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh)
    +
    +        if box_roi_pool is None:
    +            box_roi_pool = MultiScaleRoIAlign(
    +                featmap_names=[0, 1, 2, 3],
    +                output_size=7,
    +                sampling_ratio=2)
    +
    +        if box_head is None:
    +            resolution = box_roi_pool.output_size[0]
    +            representation_size = 1024
    +            box_head = TwoMLPHead(
    +                out_channels * resolution ** 2,
    +                representation_size)
    +
    +        if box_predictor is None:
    +            representation_size = 1024
    +            box_predictor = FastRCNNPredictor(
    +                representation_size,
    +                num_classes)
    +
    +        roi_heads = RoIHeads(
    +            # Box
    +            box_roi_pool, box_head, box_predictor,
    +            box_fg_iou_thresh, box_bg_iou_thresh,
    +            box_batch_size_per_image, box_positive_fraction,
    +            bbox_reg_weights,
    +            box_score_thresh, box_nms_thresh, box_detections_per_img)
    +
    +        if image_mean is None:
    +            image_mean = [0.485, 0.456, 0.406]
    +        if image_std is None:
    +            image_std = [0.229, 0.224, 0.225]
    +        transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
    +
    +        super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)
    +
    +
    +class TwoMLPHead(nn.Module):
    +    """
    +    Standard heads for FPN-based models
    +
    +    Arguments:
    +        in_channels (int): number of input channels
    +        representation_size (int): size of the intermediate representation
    +    """
    +
    +    def __init__(self, in_channels, representation_size):
    +        super(TwoMLPHead, self).__init__()
    +
    +        self.fc6 = nn.Linear(in_channels, representation_size)
    +        self.fc7 = nn.Linear(representation_size, representation_size)
    +
    +    def forward(self, x):
    +        x = x.flatten(start_dim=1)
    +
    +        x = F.relu(self.fc6(x))
    +        x = F.relu(self.fc7(x))
    +
    +        return x
    +
    +
    +class FastRCNNPredictor(nn.Module):
    +    """
    +    Standard classification + bounding box regression layers
    +    for Fast R-CNN.
    +
    +    Arguments:
    +        in_channels (int): number of input channels
    +        num_classes (int): number of output classes (including background)
    +    """
    +
    +    def __init__(self, in_channels, num_classes):
    +        super(FastRCNNPredictor, self).__init__()
    +        self.cls_score = nn.Linear(in_channels, num_classes)
    +        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
    +
    +    def forward(self, x):
    +        if x.ndimension() == 4:
    +            assert list(x.shape[2:]) == [1, 1]
    +        x = x.flatten(start_dim=1)
    +        scores = self.cls_score(x)
    +        bbox_deltas = self.bbox_pred(x)
    +
    +        return scores, bbox_deltas
    +
    +
    +model_urls = {
    +    'fasterrcnn_resnet50_fpn_coco':
    +        'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth',
    +}
    +
    +
    +
    [docs]def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, **kwargs): + """ + Constructs a Faster R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values + between ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values between + ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + """ + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone) + model = FasterRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html b/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html new file mode 100644 index 000000000000..3d73eecb9aa4 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html @@ -0,0 +1,831 @@ + + + + + + + + + + + + torchvision.models.detection.keypoint_rcnn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.detection.keypoint_rcnn

    +import torch
    +from torch import nn
    +
    +from torchvision.ops import misc as misc_nn_ops
    +from torchvision.ops import MultiScaleRoIAlign
    +
    +from ..utils import load_state_dict_from_url
    +
    +from .faster_rcnn import FasterRCNN
    +from .backbone_utils import resnet_fpn_backbone
    +
    +
    +__all__ = [
    +    "KeypointRCNN", "keypointrcnn_resnet50_fpn"
    +]
    +
    +
    +class KeypointRCNN(FasterRCNN):
    +    """
    +    Implements Keypoint R-CNN.
    +
    +    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
    +    image, and should be in 0-1 range. Different images can have different sizes.
    +
    +    The behavior of the model changes depending if it is in training or evaluation mode.
    +
    +    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
    +    containing:
    +        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values
    +          between 0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the class label for each ground-truth box
    +        - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
    +          format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
    +
    +    The model returns a Dict[Tensor] during training, containing the classification and regression
    +    losses for both the RPN and the R-CNN, and the keypoint loss.
    +
    +    During inference, the model requires only the input tensors, and returns the post-processed
    +    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
    +    follows:
    +        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between
    +          0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the predicted labels for each image
    +        - scores (Tensor[N]): the scores or each prediction
    +        - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
    +
    +    Arguments:
    +        backbone (nn.Module): the network used to compute the features for the model.
    +            It should contain a out_channels attribute, which indicates the number of output
    +            channels that each feature map has (and it should be the same for all feature maps).
    +            The backbone should return a single Tensor or and OrderedDict[Tensor].
    +        num_classes (int): number of output classes of the model (including the background).
    +            If box_predictor is specified, num_classes should be None.
    +        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
    +        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
    +        image_mean (Tuple[float, float, float]): mean values used for input normalization.
    +            They are generally the mean values of the dataset on which the backbone has been trained
    +            on
    +        image_std (Tuple[float, float, float]): std values used for input normalization.
    +            They are generally the std values of the dataset on which the backbone has been trained on
    +        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
    +            maps.
    +        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
    +        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
    +        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
    +        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
    +        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
    +        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
    +        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
    +            considered as positive during training of the RPN.
    +        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
    +            considered as negative during training of the RPN.
    +        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
    +            for computing the loss
    +        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
    +            of the RPN
    +        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
    +            the locations indicated by the bounding boxes
    +        box_head (nn.Module): module that takes the cropped feature maps as input
    +        box_predictor (nn.Module): module that takes the output of box_head and returns the
    +            classification logits and box regression deltas.
    +        box_score_thresh (float): during inference, only return proposals with a classification score
    +            greater than box_score_thresh
    +        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
    +        box_detections_per_img (int): maximum number of detections per image, for all classes.
    +        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
    +            considered as positive during training of the classification head
    +        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
    +            considered as negative during training of the classification head
    +        box_batch_size_per_image (int): number of proposals that are sampled during training of the
    +            classification head
    +        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
    +            of the classification head
    +        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
    +            bounding boxes
    +        keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
    +             the locations indicated by the bounding boxes, which will be used for the keypoint head.
    +        keypoint_head (nn.Module): module that takes the cropped feature maps as input
    +        keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
    +            heatmap logits
    +
    +    Example::
    +
    +        >>> import torchvision
    +        >>> from torchvision.models.detection import KeypointRCNN
    +        >>> from torchvision.models.detection.rpn import AnchorGenerator
    +        >>>
    +        >>> # load a pre-trained model for classification and return
    +        >>> # only the features
    +        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
    +        >>> # KeypointRCNN needs to know the number of
    +        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
    +        >>> # so we need to add it here
    +        >>> backbone.out_channels = 1280
    +        >>>
    +        >>> # let's make the RPN generate 5 x 3 anchors per spatial
    +        >>> # location, with 5 different sizes and 3 different aspect
    +        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
    +        >>> # map could potentially have different sizes and
    +        >>> # aspect ratios
    +        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
    +        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
    +        >>>
    +        >>> # let's define what are the feature maps that we will
    +        >>> # use to perform the region of interest cropping, as well as
    +        >>> # the size of the crop after rescaling.
    +        >>> # if your backbone returns a Tensor, featmap_names is expected to
    +        >>> # be [0]. More generally, the backbone should return an
    +        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
    +        >>> # feature maps to use.
    +        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
    +        >>>                                                 output_size=7,
    +        >>>                                                 sampling_ratio=2)
    +        >>>
    +        >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
    +        >>>                                                          output_size=14,
    +        >>>                                                          sampling_ratio=2)
    +        >>> # put the pieces together inside a FasterRCNN model
    +        >>> model = KeypointRCNN(backbone,
    +        >>>                      num_classes=2,
    +        >>>                      rpn_anchor_generator=anchor_generator,
    +        >>>                      box_roi_pool=roi_pooler,
    +        >>>                      keypoint_roi_pool=keypoint_roi_pooler)
    +        >>> model.eval()
    +        >>> model.eval()
    +        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +        >>> predictions = model(x)
    +    """
    +    def __init__(self, backbone, num_classes=None,
    +                 # transform parameters
    +                 min_size=None, max_size=1333,
    +                 image_mean=None, image_std=None,
    +                 # RPN parameters
    +                 rpn_anchor_generator=None, rpn_head=None,
    +                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
    +                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
    +                 rpn_nms_thresh=0.7,
    +                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
    +                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
    +                 # Box parameters
    +                 box_roi_pool=None, box_head=None, box_predictor=None,
    +                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
    +                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
    +                 box_batch_size_per_image=512, box_positive_fraction=0.25,
    +                 bbox_reg_weights=None,
    +                 # keypoint parameters
    +                 keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None,
    +                 num_keypoints=17):
    +
    +        assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None)))
    +        if min_size is None:
    +            min_size = (640, 672, 704, 736, 768, 800)
    +
    +        if num_classes is not None:
    +            if keypoint_predictor is not None:
    +                raise ValueError("num_classes should be None when keypoint_predictor is specified")
    +
    +        out_channels = backbone.out_channels
    +
    +        if keypoint_roi_pool is None:
    +            keypoint_roi_pool = MultiScaleRoIAlign(
    +                featmap_names=[0, 1, 2, 3],
    +                output_size=14,
    +                sampling_ratio=2)
    +
    +        if keypoint_head is None:
    +            keypoint_layers = tuple(512 for _ in range(8))
    +            keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)
    +
    +        if keypoint_predictor is None:
    +            keypoint_dim_reduced = 512  # == keypoint_layers[-1]
    +            keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)
    +
    +        super(KeypointRCNN, self).__init__(
    +            backbone, num_classes,
    +            # transform parameters
    +            min_size, max_size,
    +            image_mean, image_std,
    +            # RPN-specific parameters
    +            rpn_anchor_generator, rpn_head,
    +            rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,
    +            rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,
    +            rpn_nms_thresh,
    +            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
    +            rpn_batch_size_per_image, rpn_positive_fraction,
    +            # Box parameters
    +            box_roi_pool, box_head, box_predictor,
    +            box_score_thresh, box_nms_thresh, box_detections_per_img,
    +            box_fg_iou_thresh, box_bg_iou_thresh,
    +            box_batch_size_per_image, box_positive_fraction,
    +            bbox_reg_weights)
    +
    +        self.roi_heads.keypoint_roi_pool = keypoint_roi_pool
    +        self.roi_heads.keypoint_head = keypoint_head
    +        self.roi_heads.keypoint_predictor = keypoint_predictor
    +
    +
    +class KeypointRCNNHeads(nn.Sequential):
    +    def __init__(self, in_channels, layers):
    +        d = []
    +        next_feature = in_channels
    +        for l in layers:
    +            d.append(misc_nn_ops.Conv2d(next_feature, l, 3, stride=1, padding=1))
    +            d.append(nn.ReLU(inplace=True))
    +            next_feature = l
    +        super(KeypointRCNNHeads, self).__init__(*d)
    +        for m in self.children():
    +            if isinstance(m, misc_nn_ops.Conv2d):
    +                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
    +                nn.init.constant_(m.bias, 0)
    +
    +
    +class KeypointRCNNPredictor(nn.Module):
    +    def __init__(self, in_channels, num_keypoints):
    +        super(KeypointRCNNPredictor, self).__init__()
    +        input_features = in_channels
    +        deconv_kernel = 4
    +        self.kps_score_lowres = misc_nn_ops.ConvTranspose2d(
    +            input_features,
    +            num_keypoints,
    +            deconv_kernel,
    +            stride=2,
    +            padding=deconv_kernel // 2 - 1,
    +        )
    +        nn.init.kaiming_normal_(
    +            self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
    +        )
    +        nn.init.constant_(self.kps_score_lowres.bias, 0)
    +        self.up_scale = 2
    +        self.out_channels = num_keypoints
    +
    +    def forward(self, x):
    +        x = self.kps_score_lowres(x)
    +        x = misc_nn_ops.interpolate(
    +            x, scale_factor=self.up_scale, mode="bilinear", align_corners=False
    +        )
    +        return x
    +
    +
    +model_urls = {
    +    'keypointrcnn_resnet50_fpn_coco':
    +        'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth',
    +}
    +
    +
    +
    [docs]def keypointrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=2, num_keypoints=17, + pretrained_backbone=True, **kwargs): + """ + Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values + between ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the + format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible. + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values between + ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format. + + Example:: + + >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + """ + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone) + model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['keypointrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html new file mode 100644 index 000000000000..8fd31fa535db --- /dev/null +++ b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html @@ -0,0 +1,831 @@ + + + + + + + + + + + + torchvision.models.detection.mask_rcnn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.detection.mask_rcnn

    +from collections import OrderedDict
    +
    +import torch
    +from torch import nn
    +import torch.nn.functional as F
    +
    +from torchvision.ops import misc as misc_nn_ops
    +from torchvision.ops import MultiScaleRoIAlign
    +
    +from ..utils import load_state_dict_from_url
    +
    +from .faster_rcnn import FasterRCNN
    +from .backbone_utils import resnet_fpn_backbone
    +
    +__all__ = [
    +    "MaskRCNN", "maskrcnn_resnet50_fpn",
    +]
    +
    +
    +class MaskRCNN(FasterRCNN):
    +    """
    +    Implements Mask R-CNN.
    +
    +    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
    +    image, and should be in 0-1 range. Different images can have different sizes.
    +
    +    The behavior of the model changes depending if it is in training or evaluation mode.
    +
    +    During training, the model expects both the input tensors, as well as a targets (list of dictionary),
    +    containing:
    +        - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values
    +          between 0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the class label for each ground-truth box
    +        - masks (UInt8Tensor[N, 1, H, W]): the segmentation binary masks for each instance
    +
    +    The model returns a Dict[Tensor] during training, containing the classification and regression
    +    losses for both the RPN and the R-CNN, and the mask loss.
    +
    +    During inference, the model requires only the input tensors, and returns the post-processed
    +    predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
    +    follows:
    +        - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between
    +          0 and H and 0 and W
    +        - labels (Int64Tensor[N]): the predicted labels for each image
    +        - scores (Tensor[N]): the scores or each prediction
    +        - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to
    +          obtain the final segmentation masks, the soft masks can be thresholded, generally
    +          with a value of 0.5 (mask >= 0.5)
    +
    +    Arguments:
    +        backbone (nn.Module): the network used to compute the features for the model.
    +            It should contain a out_channels attribute, which indicates the number of output
    +            channels that each feature map has (and it should be the same for all feature maps).
    +            The backbone should return a single Tensor or and OrderedDict[Tensor].
    +        num_classes (int): number of output classes of the model (including the background).
    +            If box_predictor is specified, num_classes should be None.
    +        min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
    +        max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
    +        image_mean (Tuple[float, float, float]): mean values used for input normalization.
    +            They are generally the mean values of the dataset on which the backbone has been trained
    +            on
    +        image_std (Tuple[float, float, float]): std values used for input normalization.
    +            They are generally the std values of the dataset on which the backbone has been trained on
    +        rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
    +            maps.
    +        rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
    +        rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
    +        rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
    +        rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
    +        rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
    +        rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
    +        rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
    +            considered as positive during training of the RPN.
    +        rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
    +            considered as negative during training of the RPN.
    +        rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
    +            for computing the loss
    +        rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
    +            of the RPN
    +        box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
    +            the locations indicated by the bounding boxes
    +        box_head (nn.Module): module that takes the cropped feature maps as input
    +        box_predictor (nn.Module): module that takes the output of box_head and returns the
    +            classification logits and box regression deltas.
    +        box_score_thresh (float): during inference, only return proposals with a classification score
    +            greater than box_score_thresh
    +        box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
    +        box_detections_per_img (int): maximum number of detections per image, for all classes.
    +        box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
    +            considered as positive during training of the classification head
    +        box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
    +            considered as negative during training of the classification head
    +        box_batch_size_per_image (int): number of proposals that are sampled during training of the
    +            classification head
    +        box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
    +            of the classification head
    +        bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
    +            bounding boxes
    +        mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
    +             the locations indicated by the bounding boxes, which will be used for the mask head.
    +        mask_head (nn.Module): module that takes the cropped feature maps as input
    +        mask_predictor (nn.Module): module that takes the output of the mask_head and returns the
    +            segmentation mask logits
    +
    +    Example::
    +
    +        >>> import torchvision
    +        >>> from torchvision.models.detection import MaskRCNN
    +        >>> from torchvision.models.detection.rpn import AnchorGenerator
    +        >>>
    +        >>> # load a pre-trained model for classification and return
    +        >>> # only the features
    +        >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features
    +        >>> # MaskRCNN needs to know the number of
    +        >>> # output channels in a backbone. For mobilenet_v2, it's 1280
    +        >>> # so we need to add it here
    +        >>> backbone.out_channels = 1280
    +        >>>
    +        >>> # let's make the RPN generate 5 x 3 anchors per spatial
    +        >>> # location, with 5 different sizes and 3 different aspect
    +        >>> # ratios. We have a Tuple[Tuple[int]] because each feature
    +        >>> # map could potentially have different sizes and
    +        >>> # aspect ratios
    +        >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
    +        >>>                                    aspect_ratios=((0.5, 1.0, 2.0),))
    +        >>>
    +        >>> # let's define what are the feature maps that we will
    +        >>> # use to perform the region of interest cropping, as well as
    +        >>> # the size of the crop after rescaling.
    +        >>> # if your backbone returns a Tensor, featmap_names is expected to
    +        >>> # be [0]. More generally, the backbone should return an
    +        >>> # OrderedDict[Tensor], and in featmap_names you can choose which
    +        >>> # feature maps to use.
    +        >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
    +        >>>                                                 output_size=7,
    +        >>>                                                 sampling_ratio=2)
    +        >>>
    +        >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
    +        >>>                                                      output_size=14,
    +        >>>                                                      sampling_ratio=2)
    +        >>> # put the pieces together inside a FasterRCNN model
    +        >>> model = MaskRCNN(backbone,
    +        >>>                  num_classes=2,
    +        >>>                  rpn_anchor_generator=anchor_generator,
    +        >>>                  box_roi_pool=roi_pooler,
    +        >>>                  mask_roi_pool=mask_roi_pooler)
    +        >>> model.eval()
    +        >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +        >>> predictions = model(x)
    +    """
    +    def __init__(self, backbone, num_classes=None,
    +                 # transform parameters
    +                 min_size=800, max_size=1333,
    +                 image_mean=None, image_std=None,
    +                 # RPN parameters
    +                 rpn_anchor_generator=None, rpn_head=None,
    +                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
    +                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
    +                 rpn_nms_thresh=0.7,
    +                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
    +                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
    +                 # Box parameters
    +                 box_roi_pool=None, box_head=None, box_predictor=None,
    +                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
    +                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
    +                 box_batch_size_per_image=512, box_positive_fraction=0.25,
    +                 bbox_reg_weights=None,
    +                 # Mask parameters
    +                 mask_roi_pool=None, mask_head=None, mask_predictor=None):
    +
    +        assert isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None)))
    +
    +        if num_classes is not None:
    +            if mask_predictor is not None:
    +                raise ValueError("num_classes should be None when mask_predictor is specified")
    +
    +        out_channels = backbone.out_channels
    +
    +        if mask_roi_pool is None:
    +            mask_roi_pool = MultiScaleRoIAlign(
    +                featmap_names=[0, 1, 2, 3],
    +                output_size=14,
    +                sampling_ratio=2)
    +
    +        if mask_head is None:
    +            mask_layers = (256, 256, 256, 256)
    +            mask_dilation = 1
    +            mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)
    +
    +        if mask_predictor is None:
    +            mask_predictor_in_channels = 256  # == mask_layers[-1]
    +            mask_dim_reduced = 256
    +            mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels,
    +                                               mask_dim_reduced, num_classes)
    +
    +        super(MaskRCNN, self).__init__(
    +            backbone, num_classes,
    +            # transform parameters
    +            min_size, max_size,
    +            image_mean, image_std,
    +            # RPN-specific parameters
    +            rpn_anchor_generator, rpn_head,
    +            rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,
    +            rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,
    +            rpn_nms_thresh,
    +            rpn_fg_iou_thresh, rpn_bg_iou_thresh,
    +            rpn_batch_size_per_image, rpn_positive_fraction,
    +            # Box parameters
    +            box_roi_pool, box_head, box_predictor,
    +            box_score_thresh, box_nms_thresh, box_detections_per_img,
    +            box_fg_iou_thresh, box_bg_iou_thresh,
    +            box_batch_size_per_image, box_positive_fraction,
    +            bbox_reg_weights)
    +
    +        self.roi_heads.mask_roi_pool = mask_roi_pool
    +        self.roi_heads.mask_head = mask_head
    +        self.roi_heads.mask_predictor = mask_predictor
    +
    +
    +class MaskRCNNHeads(nn.Sequential):
    +    def __init__(self, in_channels, layers, dilation):
    +        """
    +        Arguments:
    +            num_classes (int): number of output classes
    +            input_size (int): number of channels of the input once it's flattened
    +            representation_size (int): size of the intermediate representation
    +        """
    +        d = OrderedDict()
    +        next_feature = in_channels
    +        for layer_idx, layer_features in enumerate(layers, 1):
    +            d["mask_fcn{}".format(layer_idx)] = misc_nn_ops.Conv2d(
    +                next_feature, layer_features, kernel_size=3,
    +                stride=1, padding=dilation, dilation=dilation)
    +            d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True)
    +            next_feature = layer_features
    +
    +        super(MaskRCNNHeads, self).__init__(d)
    +        for name, param in self.named_parameters():
    +            if "weight" in name:
    +                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
    +            # elif "bias" in name:
    +            #     nn.init.constant_(param, 0)
    +
    +
    +class MaskRCNNPredictor(nn.Sequential):
    +    def __init__(self, in_channels, dim_reduced, num_classes):
    +        super(MaskRCNNPredictor, self).__init__(OrderedDict([
    +            ("conv5_mask", misc_nn_ops.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
    +            ("relu", nn.ReLU(inplace=True)),
    +            ("mask_fcn_logits", misc_nn_ops.Conv2d(dim_reduced, num_classes, 1, 1, 0)),
    +        ]))
    +
    +        for name, param in self.named_parameters():
    +            if "weight" in name:
    +                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
    +            # elif "bias" in name:
    +            #     nn.init.constant_(param, 0)
    +
    +
    +model_urls = {
    +    'maskrcnn_resnet50_fpn_coco':
    +        'https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth',
    +}
    +
    +
    +
    [docs]def maskrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, **kwargs): + """ + Constructs a Mask R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values + between ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - masks (``UInt8Tensor[N, 1, H, W]``): the segmentation binary masks for each instance + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values between + ``0`` and ``H`` and ``0`` and ``W`` + - labels (``Int64Tensor[N]``): the predicted labels for each image + - scores (``Tensor[N]``): the scores or each prediction + - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (``mask >= 0.5``) + + Example:: + + >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Arguments: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + """ + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone) + model = MaskRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['maskrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/googlenet.html b/docs/stable/_modules/torchvision/models/googlenet.html new file mode 100644 index 000000000000..938d3694852c --- /dev/null +++ b/docs/stable/_modules/torchvision/models/googlenet.html @@ -0,0 +1,747 @@ + + + + + + + + + + + + torchvision.models.googlenet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.googlenet

    +import warnings
    +from collections import namedtuple
    +import torch
    +import torch.nn as nn
    +import torch.nn.functional as F
    +from .utils import load_state_dict_from_url
    +
    +__all__ = ['GoogLeNet', 'googlenet']
    +
    +model_urls = {
    +    # GoogLeNet ported from TensorFlow
    +    'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
    +}
    +
    +_GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
    +
    +
    +
    [docs]def googlenet(pretrained=False, progress=True, **kwargs): + r"""GoogLeNet (Inception v1) model architecture from + `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, adds two auxiliary branches that can improve training. + Default: *False* when pretrained is True otherwise *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' not in kwargs: + kwargs['aux_logits'] = False + if kwargs['aux_logits']: + warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' + 'so make sure to train them') + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + model = GoogLeNet(**kwargs) + state_dict = load_state_dict_from_url(model_urls['googlenet'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + del model.aux1, model.aux2 + return model + + return GoogLeNet(**kwargs)
    + + +class GoogLeNet(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True): + super(GoogLeNet, self).__init__() + self.aux_logits = aux_logits + self.transform_input = transform_input + + self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.conv2 = BasicConv2d(64, 64, kernel_size=1) + self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) + self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) + self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) + self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) + self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) + self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) + self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) + self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) + self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) + + if aux_logits: + self.aux1 = InceptionAux(512, num_classes) + self.aux2 = InceptionAux(528, num_classes) + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(0.2) + self.fc = nn.Linear(1024, num_classes) + + if init_weights: + self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + X = stats.truncnorm(-2, 2, scale=0.01) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + + # N x 3 x 224 x 224 + x = self.conv1(x) + # N x 64 x 112 x 112 + x = self.maxpool1(x) + # N x 64 x 56 x 56 + x = self.conv2(x) + # N x 64 x 56 x 56 + x = self.conv3(x) + # N x 192 x 56 x 56 + x = self.maxpool2(x) + + # N x 192 x 28 x 28 + x = self.inception3a(x) + # N x 256 x 28 x 28 + x = self.inception3b(x) + # N x 480 x 28 x 28 + x = self.maxpool3(x) + # N x 480 x 14 x 14 + x = self.inception4a(x) + # N x 512 x 14 x 14 + if self.training and self.aux_logits: + aux1 = self.aux1(x) + + x = self.inception4b(x) + # N x 512 x 14 x 14 + x = self.inception4c(x) + # N x 512 x 14 x 14 + x = self.inception4d(x) + # N x 528 x 14 x 14 + if self.training and self.aux_logits: + aux2 = self.aux2(x) + + x = self.inception4e(x) + # N x 832 x 14 x 14 + x = self.maxpool4(x) + # N x 832 x 7 x 7 + x = self.inception5a(x) + # N x 832 x 7 x 7 + x = self.inception5b(x) + # N x 1024 x 7 x 7 + + x = self.avgpool(x) + # N x 1024 x 1 x 1 + x = torch.flatten(x, 1) + # N x 1024 + x = self.dropout(x) + x = self.fc(x) + # N x 1000 (num_classes) + if self.training and self.aux_logits: + return _GoogLeNetOutputs(x, aux2, aux1) + return x + + +class Inception(nn.Module): + + def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj): + super(Inception, self).__init__() + + self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) + + self.branch2 = nn.Sequential( + BasicConv2d(in_channels, ch3x3red, kernel_size=1), + BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) + ) + + self.branch3 = nn.Sequential( + BasicConv2d(in_channels, ch5x5red, kernel_size=1), + BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1) + ) + + self.branch4 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), + BasicConv2d(in_channels, pool_proj, kernel_size=1) + ) + + def forward(self, x): + branch1 = self.branch1(x) + branch2 = self.branch2(x) + branch3 = self.branch3(x) + branch4 = self.branch4(x) + + outputs = [branch1, branch2, branch3, branch4] + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + self.conv = BasicConv2d(in_channels, 128, kernel_size=1) + + self.fc1 = nn.Linear(2048, 1024) + self.fc2 = nn.Linear(1024, num_classes) + + def forward(self, x): + # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 + x = F.adaptive_avg_pool2d(x, (4, 4)) + # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 + x = self.conv(x) + # N x 128 x 4 x 4 + x = torch.flatten(x, 1) + # N x 2048 + x = F.relu(self.fc1(x), inplace=True) + # N x 2048 + x = F.dropout(x, 0.7, training=self.training) + # N x 2048 + x = self.fc2(x) + # N x 1024 + + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/inception.html b/docs/stable/_modules/torchvision/models/inception.html new file mode 100644 index 000000000000..7c994bba4891 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/inception.html @@ -0,0 +1,868 @@ + + + + + + + + + + + + torchvision.models.inception — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.inception

    +from collections import namedtuple
    +import torch
    +import torch.nn as nn
    +import torch.nn.functional as F
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = ['Inception3', 'inception_v3']
    +
    +
    +model_urls = {
    +    # Inception v3 ported from TensorFlow
    +    'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
    +}
    +
    +_InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits'])
    +
    +
    +
    [docs]def inception_v3(pretrained=False, progress=True, **kwargs): + r"""Inception v3 model architecture from + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. + + .. note:: + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of + N x 3 x 299 x 299, so ensure your images are sized accordingly. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, add an auxiliary branch that can improve training. + Default: *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + model = Inception3(**kwargs) + state_dict = load_state_dict_from_url(model_urls['inception_v3_google'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + del model.AuxLogits + return model + + return Inception3(**kwargs)
    + + +class Inception3(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False): + super(Inception3, self).__init__() + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.fc = nn.Linear(2048, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = F.max_pool2d(x, kernel_size=3, stride=2) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + if self.training and self.aux_logits: + aux = self.AuxLogits(x) + # N x 768 x 17 x 17 + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 2048 x 1 x 1 + x = F.dropout(x, training=self.training) + # N x 2048 x 1 x 1 + x = torch.flatten(x, 1) + # N x 2048 + x = self.fc(x) + # N x 1000 (num_classes) + if self.training and self.aux_logits: + return _InceptionOutputs(x, aux) + return x + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features): + super(InceptionA, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1) + self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1) + + self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels): + super(InceptionB, self).__init__() + self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7): + super(InceptionC, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels): + super(InceptionD, self).__init__() + self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2) + + def forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels): + super(InceptionE, self).__init__() + self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1) + self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1) + self.conv1 = BasicConv2d(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = nn.Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/mnasnet.html b/docs/stable/_modules/torchvision/models/mnasnet.html new file mode 100644 index 000000000000..18798ecd0316 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/mnasnet.html @@ -0,0 +1,721 @@ + + + + + + + + + + + + torchvision.models.mnasnet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.mnasnet

    +import math
    +
    +import torch
    +import torch.nn as nn
    +from .utils import load_state_dict_from_url
    +
    +__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3']
    +
    +_MODEL_URLS = {
    +    "mnasnet0_5":
    +    "https://download.pytorch.org/models/mnasnet0.5_top1_67.592-7c6cb539b9.pth",
    +    "mnasnet0_75": None,
    +    "mnasnet1_0":
    +    "https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
    +    "mnasnet1_3": None
    +}
    +
    +# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
    +# 1.0 - tensorflow.
    +_BN_MOMENTUM = 1 - 0.9997
    +
    +
    +class _InvertedResidual(nn.Module):
    +
    +    def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor,
    +                 bn_momentum=0.1):
    +        super(_InvertedResidual, self).__init__()
    +        assert stride in [1, 2]
    +        assert kernel_size in [3, 5]
    +        mid_ch = in_ch * expansion_factor
    +        self.apply_residual = (in_ch == out_ch and stride == 1)
    +        self.layers = nn.Sequential(
    +            # Pointwise
    +            nn.Conv2d(in_ch, mid_ch, 1, bias=False),
    +            nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
    +            nn.ReLU(inplace=True),
    +            # Depthwise
    +            nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2,
    +                      stride=stride, groups=mid_ch, bias=False),
    +            nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
    +            nn.ReLU(inplace=True),
    +            # Linear pointwise. Note that there's no activation.
    +            nn.Conv2d(mid_ch, out_ch, 1, bias=False),
    +            nn.BatchNorm2d(out_ch, momentum=bn_momentum))
    +
    +    def forward(self, input):
    +        if self.apply_residual:
    +            return self.layers(input) + input
    +        else:
    +            return self.layers(input)
    +
    +
    +def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats,
    +           bn_momentum):
    +    """ Creates a stack of inverted residuals. """
    +    assert repeats >= 1
    +    # First one has no skip, because feature map size changes.
    +    first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor,
    +                              bn_momentum=bn_momentum)
    +    remaining = []
    +    for _ in range(1, repeats):
    +        remaining.append(
    +            _InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor,
    +                              bn_momentum=bn_momentum))
    +    return nn.Sequential(first, *remaining)
    +
    +
    +def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
    +    """ Asymmetric rounding to make `val` divisible by `divisor`. With default
    +    bias, will round up, unless the number is no more than 10% greater than the
    +    smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """
    +    assert 0.0 < round_up_bias < 1.0
    +    new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
    +    return new_val if new_val >= round_up_bias * val else new_val + divisor
    +
    +
    +def _scale_depths(depths, alpha):
    +    """ Scales tensor depths as in reference MobileNet code, prefers rouding up
    +    rather than down. """
    +    return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
    +
    +
    +class MNASNet(torch.nn.Module):
    +    """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf.
    +    >>> model = MNASNet(1000, 1.0)
    +    >>> x = torch.rand(1, 3, 224, 224)
    +    >>> y = model(x)
    +    >>> y.dim()
    +    1
    +    >>> y.nelement()
    +    1000
    +    """
    +
    +    def __init__(self, alpha, num_classes=1000, dropout=0.2):
    +        super(MNASNet, self).__init__()
    +        depths = _scale_depths([24, 40, 80, 96, 192, 320], alpha)
    +        layers = [
    +            # First layer: regular conv.
    +            nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
    +            nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
    +            nn.ReLU(inplace=True),
    +            # Depthwise separable, no skip.
    +            nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
    +            nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
    +            nn.ReLU(inplace=True),
    +            nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
    +            nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
    +            # MNASNet blocks: stacks of inverted residuals.
    +            _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM),
    +            _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM),
    +            _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM),
    +            _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM),
    +            _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM),
    +            _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM),
    +            # Final mapping to classifier input.
    +            nn.Conv2d(depths[5], 1280, 1, padding=0, stride=1, bias=False),
    +            nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
    +            nn.ReLU(inplace=True),
    +        ]
    +        self.layers = nn.Sequential(*layers)
    +        self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True),
    +                                        nn.Linear(1280, num_classes))
    +        self._initialize_weights()
    +
    +    def forward(self, x):
    +        x = self.layers(x)
    +        # Equivalent to global avgpool and removing H and W dimensions.
    +        x = x.mean([2, 3])
    +        return self.classifier(x)
    +
    +    def _initialize_weights(self):
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                nn.init.kaiming_normal_(m.weight, mode="fan_out",
    +                                        nonlinearity="relu")
    +                if m.bias is not None:
    +                    nn.init.zeros_(m.bias)
    +            elif isinstance(m, nn.BatchNorm2d):
    +                nn.init.ones_(m.weight)
    +                nn.init.zeros_(m.bias)
    +            elif isinstance(m, nn.Linear):
    +                nn.init.normal_(m.weight, 0.01)
    +                nn.init.zeros_(m.bias)
    +
    +
    +def _load_pretrained(model_name, model, progress):
    +    if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None:
    +        raise ValueError(
    +            "No checkpoint is available for model type {}".format(model_name))
    +    checkpoint_url = _MODEL_URLS[model_name]
    +    model.load_state_dict(load_state_dict_from_url(checkpoint_url, progress=progress))
    +
    +
    +
    [docs]def mnasnet0_5(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.5 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.5, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_5", model, progress) + return model
    + + +
    [docs]def mnasnet0_75(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.75 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.75, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_75", model, progress) + return model
    + + +
    [docs]def mnasnet1_0(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.0 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.0, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_0", model, progress) + return model
    + + +
    [docs]def mnasnet1_3(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.3 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.3, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_3", model, progress) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/mobilenet.html b/docs/stable/_modules/torchvision/models/mobilenet.html new file mode 100644 index 000000000000..99496a04a932 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/mobilenet.html @@ -0,0 +1,676 @@ + + + + + + + + + + + + torchvision.models.mobilenet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.mobilenet

    +from torch import nn
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = ['MobileNetV2', 'mobilenet_v2']
    +
    +
    +model_urls = {
    +    'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
    +}
    +
    +
    +def _make_divisible(v, divisor, min_value=None):
    +    """
    +    This function is taken from the original tf repo.
    +    It ensures that all layers have a channel number that is divisible by 8
    +    It can be seen here:
    +    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    +    :param v:
    +    :param divisor:
    +    :param min_value:
    +    :return:
    +    """
    +    if min_value is None:
    +        min_value = divisor
    +    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    +    # Make sure that round down does not go down by more than 10%.
    +    if new_v < 0.9 * v:
    +        new_v += divisor
    +    return new_v
    +
    +
    +class ConvBNReLU(nn.Sequential):
    +    def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
    +        padding = (kernel_size - 1) // 2
    +        super(ConvBNReLU, self).__init__(
    +            nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
    +            nn.BatchNorm2d(out_planes),
    +            nn.ReLU6(inplace=True)
    +        )
    +
    +
    +class InvertedResidual(nn.Module):
    +    def __init__(self, inp, oup, stride, expand_ratio):
    +        super(InvertedResidual, self).__init__()
    +        self.stride = stride
    +        assert stride in [1, 2]
    +
    +        hidden_dim = int(round(inp * expand_ratio))
    +        self.use_res_connect = self.stride == 1 and inp == oup
    +
    +        layers = []
    +        if expand_ratio != 1:
    +            # pw
    +            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
    +        layers.extend([
    +            # dw
    +            ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
    +            # pw-linear
    +            nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
    +            nn.BatchNorm2d(oup),
    +        ])
    +        self.conv = nn.Sequential(*layers)
    +
    +    def forward(self, x):
    +        if self.use_res_connect:
    +            return x + self.conv(x)
    +        else:
    +            return self.conv(x)
    +
    +
    +class MobileNetV2(nn.Module):
    +    def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
    +        """
    +        MobileNet V2 main class
    +
    +        Args:
    +            num_classes (int): Number of classes
    +            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
    +            inverted_residual_setting: Network structure
    +            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
    +            Set to 1 to turn off rounding
    +        """
    +        super(MobileNetV2, self).__init__()
    +        block = InvertedResidual
    +        input_channel = 32
    +        last_channel = 1280
    +
    +        if inverted_residual_setting is None:
    +            inverted_residual_setting = [
    +                # t, c, n, s
    +                [1, 16, 1, 1],
    +                [6, 24, 2, 2],
    +                [6, 32, 3, 2],
    +                [6, 64, 4, 2],
    +                [6, 96, 3, 1],
    +                [6, 160, 3, 2],
    +                [6, 320, 1, 1],
    +            ]
    +
    +        # only check the first element, assuming user knows t,c,n,s are required
    +        if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
    +            raise ValueError("inverted_residual_setting should be non-empty "
    +                             "or a 4-element list, got {}".format(inverted_residual_setting))
    +
    +        # building first layer
    +        input_channel = _make_divisible(input_channel * width_mult, round_nearest)
    +        self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
    +        features = [ConvBNReLU(3, input_channel, stride=2)]
    +        # building inverted residual blocks
    +        for t, c, n, s in inverted_residual_setting:
    +            output_channel = _make_divisible(c * width_mult, round_nearest)
    +            for i in range(n):
    +                stride = s if i == 0 else 1
    +                features.append(block(input_channel, output_channel, stride, expand_ratio=t))
    +                input_channel = output_channel
    +        # building last several layers
    +        features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
    +        # make it nn.Sequential
    +        self.features = nn.Sequential(*features)
    +
    +        # building classifier
    +        self.classifier = nn.Sequential(
    +            nn.Dropout(0.2),
    +            nn.Linear(self.last_channel, num_classes),
    +        )
    +
    +        # weight initialization
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                nn.init.kaiming_normal_(m.weight, mode='fan_out')
    +                if m.bias is not None:
    +                    nn.init.zeros_(m.bias)
    +            elif isinstance(m, nn.BatchNorm2d):
    +                nn.init.ones_(m.weight)
    +                nn.init.zeros_(m.bias)
    +            elif isinstance(m, nn.Linear):
    +                nn.init.normal_(m.weight, 0, 0.01)
    +                nn.init.zeros_(m.bias)
    +
    +    def forward(self, x):
    +        x = self.features(x)
    +        x = x.mean([2, 3])
    +        x = self.classifier(x)
    +        return x
    +
    +
    +
    [docs]def mobilenet_v2(pretrained=False, progress=True, **kwargs): + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MobileNetV2(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], + progress=progress) + model.load_state_dict(state_dict) + return model
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/resnet.html b/docs/stable/_modules/torchvision/models/resnet.html new file mode 100644 index 000000000000..1178421bab5c --- /dev/null +++ b/docs/stable/_modules/torchvision/models/resnet.html @@ -0,0 +1,857 @@ + + + + + + + + + + + + torchvision.models.resnet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.resnet

    +import torch
    +import torch.nn as nn
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
    +           'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
    +           'wide_resnet50_2', 'wide_resnet101_2']
    +
    +
    +model_urls = {
    +    'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
    +    'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
    +    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
    +    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
    +    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
    +    'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
    +    'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
    +    'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
    +    'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
    +}
    +
    +
    +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
    +    """3x3 convolution with padding"""
    +    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
    +                     padding=dilation, groups=groups, bias=False, dilation=dilation)
    +
    +
    +def conv1x1(in_planes, out_planes, stride=1):
    +    """1x1 convolution"""
    +    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
    +
    +
    +class BasicBlock(nn.Module):
    +    expansion = 1
    +
    +    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
    +                 base_width=64, dilation=1, norm_layer=None):
    +        super(BasicBlock, self).__init__()
    +        if norm_layer is None:
    +            norm_layer = nn.BatchNorm2d
    +        if groups != 1 or base_width != 64:
    +            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
    +        if dilation > 1:
    +            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
    +        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
    +        self.conv1 = conv3x3(inplanes, planes, stride)
    +        self.bn1 = norm_layer(planes)
    +        self.relu = nn.ReLU(inplace=True)
    +        self.conv2 = conv3x3(planes, planes)
    +        self.bn2 = norm_layer(planes)
    +        self.downsample = downsample
    +        self.stride = stride
    +
    +    def forward(self, x):
    +        identity = x
    +
    +        out = self.conv1(x)
    +        out = self.bn1(out)
    +        out = self.relu(out)
    +
    +        out = self.conv2(out)
    +        out = self.bn2(out)
    +
    +        if self.downsample is not None:
    +            identity = self.downsample(x)
    +
    +        out += identity
    +        out = self.relu(out)
    +
    +        return out
    +
    +
    +class Bottleneck(nn.Module):
    +    expansion = 4
    +
    +    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
    +                 base_width=64, dilation=1, norm_layer=None):
    +        super(Bottleneck, self).__init__()
    +        if norm_layer is None:
    +            norm_layer = nn.BatchNorm2d
    +        width = int(planes * (base_width / 64.)) * groups
    +        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
    +        self.conv1 = conv1x1(inplanes, width)
    +        self.bn1 = norm_layer(width)
    +        self.conv2 = conv3x3(width, width, stride, groups, dilation)
    +        self.bn2 = norm_layer(width)
    +        self.conv3 = conv1x1(width, planes * self.expansion)
    +        self.bn3 = norm_layer(planes * self.expansion)
    +        self.relu = nn.ReLU(inplace=True)
    +        self.downsample = downsample
    +        self.stride = stride
    +
    +    def forward(self, x):
    +        identity = x
    +
    +        out = self.conv1(x)
    +        out = self.bn1(out)
    +        out = self.relu(out)
    +
    +        out = self.conv2(out)
    +        out = self.bn2(out)
    +        out = self.relu(out)
    +
    +        out = self.conv3(out)
    +        out = self.bn3(out)
    +
    +        if self.downsample is not None:
    +            identity = self.downsample(x)
    +
    +        out += identity
    +        out = self.relu(out)
    +
    +        return out
    +
    +
    +class ResNet(nn.Module):
    +
    +    def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
    +                 groups=1, width_per_group=64, replace_stride_with_dilation=None,
    +                 norm_layer=None):
    +        super(ResNet, self).__init__()
    +        if norm_layer is None:
    +            norm_layer = nn.BatchNorm2d
    +        self._norm_layer = norm_layer
    +
    +        self.inplanes = 64
    +        self.dilation = 1
    +        if replace_stride_with_dilation is None:
    +            # each element in the tuple indicates if we should replace
    +            # the 2x2 stride with a dilated convolution instead
    +            replace_stride_with_dilation = [False, False, False]
    +        if len(replace_stride_with_dilation) != 3:
    +            raise ValueError("replace_stride_with_dilation should be None "
    +                             "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
    +        self.groups = groups
    +        self.base_width = width_per_group
    +        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
    +                               bias=False)
    +        self.bn1 = norm_layer(self.inplanes)
    +        self.relu = nn.ReLU(inplace=True)
    +        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    +        self.layer1 = self._make_layer(block, 64, layers[0])
    +        self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
    +                                       dilate=replace_stride_with_dilation[0])
    +        self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
    +                                       dilate=replace_stride_with_dilation[1])
    +        self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
    +                                       dilate=replace_stride_with_dilation[2])
    +        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
    +        self.fc = nn.Linear(512 * block.expansion, num_classes)
    +
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
    +            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
    +                nn.init.constant_(m.weight, 1)
    +                nn.init.constant_(m.bias, 0)
    +
    +        # Zero-initialize the last BN in each residual branch,
    +        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
    +        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
    +        if zero_init_residual:
    +            for m in self.modules():
    +                if isinstance(m, Bottleneck):
    +                    nn.init.constant_(m.bn3.weight, 0)
    +                elif isinstance(m, BasicBlock):
    +                    nn.init.constant_(m.bn2.weight, 0)
    +
    +    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
    +        norm_layer = self._norm_layer
    +        downsample = None
    +        previous_dilation = self.dilation
    +        if dilate:
    +            self.dilation *= stride
    +            stride = 1
    +        if stride != 1 or self.inplanes != planes * block.expansion:
    +            downsample = nn.Sequential(
    +                conv1x1(self.inplanes, planes * block.expansion, stride),
    +                norm_layer(planes * block.expansion),
    +            )
    +
    +        layers = []
    +        layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
    +                            self.base_width, previous_dilation, norm_layer))
    +        self.inplanes = planes * block.expansion
    +        for _ in range(1, blocks):
    +            layers.append(block(self.inplanes, planes, groups=self.groups,
    +                                base_width=self.base_width, dilation=self.dilation,
    +                                norm_layer=norm_layer))
    +
    +        return nn.Sequential(*layers)
    +
    +    def forward(self, x):
    +        x = self.conv1(x)
    +        x = self.bn1(x)
    +        x = self.relu(x)
    +        x = self.maxpool(x)
    +
    +        x = self.layer1(x)
    +        x = self.layer2(x)
    +        x = self.layer3(x)
    +        x = self.layer4(x)
    +
    +        x = self.avgpool(x)
    +        x = torch.flatten(x, 1)
    +        x = self.fc(x)
    +
    +        return x
    +
    +
    +def _resnet(arch, block, layers, pretrained, progress, **kwargs):
    +    model = ResNet(block, layers, **kwargs)
    +    if pretrained:
    +        state_dict = load_state_dict_from_url(model_urls[arch],
    +                                              progress=progress)
    +        model.load_state_dict(state_dict)
    +    return model
    +
    +
    +
    [docs]def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs)
    + + +
    [docs]def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs)
    + + +
    [docs]def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs)
    + + +
    [docs]def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs)
    + + +
    [docs]def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs)
    + + +
    [docs]def resnext50_32x4d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs)
    + + +
    [docs]def resnext101_32x8d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs)
    + + +
    [docs]def wide_resnet50_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs)
    + + +
    [docs]def wide_resnet101_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/segmentation/segmentation.html b/docs/stable/_modules/torchvision/models/segmentation/segmentation.html new file mode 100644 index 000000000000..8dc77158554d --- /dev/null +++ b/docs/stable/_modules/torchvision/models/segmentation/segmentation.html @@ -0,0 +1,620 @@ + + + + + + + + + + + + torchvision.models.segmentation.segmentation — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.segmentation.segmentation

    +from .._utils import IntermediateLayerGetter
    +from ..utils import load_state_dict_from_url
    +from .. import resnet
    +from .deeplabv3 import DeepLabHead, DeepLabV3
    +from .fcn import FCN, FCNHead
    +
    +
    +__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101']
    +
    +
    +model_urls = {
    +    'fcn_resnet50_coco': None,
    +    'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',
    +    'deeplabv3_resnet50_coco': None,
    +    'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
    +}
    +
    +
    +def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
    +    backbone = resnet.__dict__[backbone_name](
    +        pretrained=pretrained_backbone,
    +        replace_stride_with_dilation=[False, True, True])
    +
    +    return_layers = {'layer4': 'out'}
    +    if aux:
    +        return_layers['layer3'] = 'aux'
    +    backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
    +
    +    aux_classifier = None
    +    if aux:
    +        inplanes = 1024
    +        aux_classifier = FCNHead(inplanes, num_classes)
    +
    +    model_map = {
    +        'deeplabv3': (DeepLabHead, DeepLabV3),
    +        'fcn': (FCNHead, FCN),
    +    }
    +    inplanes = 2048
    +    classifier = model_map[name][0](inplanes, num_classes)
    +    base_model = model_map[name][1]
    +
    +    model = base_model(backbone, classifier, aux_classifier)
    +    return model
    +
    +
    +def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
    +    if pretrained:
    +        aux_loss = True
    +    model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)
    +    if pretrained:
    +        arch = arch_type + '_' + backbone + '_coco'
    +        model_url = model_urls[arch]
    +        if model_url is None:
    +            raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
    +        else:
    +            state_dict = load_state_dict_from_url(model_url, progress=progress)
    +            model.load_state_dict(state_dict)
    +    return model
    +
    +
    +
    [docs]def fcn_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
    + + +
    [docs]def fcn_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
    + + +
    [docs]def deeplabv3_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
    + + +
    [docs]def deeplabv3_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/shufflenetv2.html b/docs/stable/_modules/torchvision/models/shufflenetv2.html new file mode 100644 index 000000000000..1f78d6c76142 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/shufflenetv2.html @@ -0,0 +1,715 @@ + + + + + + + + + + + + torchvision.models.shufflenetv2 — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.shufflenetv2

    +import torch
    +import torch.nn as nn
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = [
    +    'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',
    +    'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0'
    +]
    +
    +model_urls = {
    +    'shufflenetv2_x0.5': 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth',
    +    'shufflenetv2_x1.0': 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth',
    +    'shufflenetv2_x1.5': None,
    +    'shufflenetv2_x2.0': None,
    +}
    +
    +
    +def channel_shuffle(x, groups):
    +    batchsize, num_channels, height, width = x.data.size()
    +    channels_per_group = num_channels // groups
    +
    +    # reshape
    +    x = x.view(batchsize, groups,
    +               channels_per_group, height, width)
    +
    +    x = torch.transpose(x, 1, 2).contiguous()
    +
    +    # flatten
    +    x = x.view(batchsize, -1, height, width)
    +
    +    return x
    +
    +
    +class InvertedResidual(nn.Module):
    +    def __init__(self, inp, oup, stride):
    +        super(InvertedResidual, self).__init__()
    +
    +        if not (1 <= stride <= 3):
    +            raise ValueError('illegal stride value')
    +        self.stride = stride
    +
    +        branch_features = oup // 2
    +        assert (self.stride != 1) or (inp == branch_features << 1)
    +
    +        if self.stride > 1:
    +            self.branch1 = nn.Sequential(
    +                self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
    +                nn.BatchNorm2d(inp),
    +                nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
    +                nn.BatchNorm2d(branch_features),
    +                nn.ReLU(inplace=True),
    +            )
    +
    +        self.branch2 = nn.Sequential(
    +            nn.Conv2d(inp if (self.stride > 1) else branch_features,
    +                      branch_features, kernel_size=1, stride=1, padding=0, bias=False),
    +            nn.BatchNorm2d(branch_features),
    +            nn.ReLU(inplace=True),
    +            self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
    +            nn.BatchNorm2d(branch_features),
    +            nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
    +            nn.BatchNorm2d(branch_features),
    +            nn.ReLU(inplace=True),
    +        )
    +
    +    @staticmethod
    +    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
    +        return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
    +
    +    def forward(self, x):
    +        if self.stride == 1:
    +            x1, x2 = x.chunk(2, dim=1)
    +            out = torch.cat((x1, self.branch2(x2)), dim=1)
    +        else:
    +            out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
    +
    +        out = channel_shuffle(out, 2)
    +
    +        return out
    +
    +
    +class ShuffleNetV2(nn.Module):
    +    def __init__(self, stages_repeats, stages_out_channels, num_classes=1000):
    +        super(ShuffleNetV2, self).__init__()
    +
    +        if len(stages_repeats) != 3:
    +            raise ValueError('expected stages_repeats as list of 3 positive ints')
    +        if len(stages_out_channels) != 5:
    +            raise ValueError('expected stages_out_channels as list of 5 positive ints')
    +        self._stage_out_channels = stages_out_channels
    +
    +        input_channels = 3
    +        output_channels = self._stage_out_channels[0]
    +        self.conv1 = nn.Sequential(
    +            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
    +            nn.BatchNorm2d(output_channels),
    +            nn.ReLU(inplace=True),
    +        )
    +        input_channels = output_channels
    +
    +        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    +
    +        stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
    +        for name, repeats, output_channels in zip(
    +                stage_names, stages_repeats, self._stage_out_channels[1:]):
    +            seq = [InvertedResidual(input_channels, output_channels, 2)]
    +            for i in range(repeats - 1):
    +                seq.append(InvertedResidual(output_channels, output_channels, 1))
    +            setattr(self, name, nn.Sequential(*seq))
    +            input_channels = output_channels
    +
    +        output_channels = self._stage_out_channels[-1]
    +        self.conv5 = nn.Sequential(
    +            nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
    +            nn.BatchNorm2d(output_channels),
    +            nn.ReLU(inplace=True),
    +        )
    +
    +        self.fc = nn.Linear(output_channels, num_classes)
    +
    +    def forward(self, x):
    +        x = self.conv1(x)
    +        x = self.maxpool(x)
    +        x = self.stage2(x)
    +        x = self.stage3(x)
    +        x = self.stage4(x)
    +        x = self.conv5(x)
    +        x = x.mean([2, 3])  # globalpool
    +        x = self.fc(x)
    +        return x
    +
    +
    +def _shufflenetv2(arch, pretrained, progress, *args, **kwargs):
    +    model = ShuffleNetV2(*args, **kwargs)
    +
    +    if pretrained:
    +        model_url = model_urls[arch]
    +        if model_url is None:
    +            raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
    +        else:
    +            state_dict = load_state_dict_from_url(model_url, progress=progress)
    +            model.load_state_dict(state_dict)
    +
    +    return model
    +
    +
    +
    [docs]def shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 0.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, + [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
    + + +
    [docs]def shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, + [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
    + + +
    [docs]def shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, + [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
    + + +
    [docs]def shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs): + """ + Constructs a ShuffleNetV2 with 2.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, + [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/squeezenet.html b/docs/stable/_modules/torchvision/models/squeezenet.html new file mode 100644 index 000000000000..675c12d1923f --- /dev/null +++ b/docs/stable/_modules/torchvision/models/squeezenet.html @@ -0,0 +1,651 @@ + + + + + + + + + + + + torchvision.models.squeezenet — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.squeezenet

    +import torch
    +import torch.nn as nn
    +import torch.nn.init as init
    +from .utils import load_state_dict_from_url
    +
    +__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
    +
    +model_urls = {
    +    'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
    +    'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
    +}
    +
    +
    +class Fire(nn.Module):
    +
    +    def __init__(self, inplanes, squeeze_planes,
    +                 expand1x1_planes, expand3x3_planes):
    +        super(Fire, self).__init__()
    +        self.inplanes = inplanes
    +        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
    +        self.squeeze_activation = nn.ReLU(inplace=True)
    +        self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
    +                                   kernel_size=1)
    +        self.expand1x1_activation = nn.ReLU(inplace=True)
    +        self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
    +                                   kernel_size=3, padding=1)
    +        self.expand3x3_activation = nn.ReLU(inplace=True)
    +
    +    def forward(self, x):
    +        x = self.squeeze_activation(self.squeeze(x))
    +        return torch.cat([
    +            self.expand1x1_activation(self.expand1x1(x)),
    +            self.expand3x3_activation(self.expand3x3(x))
    +        ], 1)
    +
    +
    +class SqueezeNet(nn.Module):
    +
    +    def __init__(self, version='1_0', num_classes=1000):
    +        super(SqueezeNet, self).__init__()
    +        self.num_classes = num_classes
    +        if version == '1_0':
    +            self.features = nn.Sequential(
    +                nn.Conv2d(3, 96, kernel_size=7, stride=2),
    +                nn.ReLU(inplace=True),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(96, 16, 64, 64),
    +                Fire(128, 16, 64, 64),
    +                Fire(128, 32, 128, 128),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(256, 32, 128, 128),
    +                Fire(256, 48, 192, 192),
    +                Fire(384, 48, 192, 192),
    +                Fire(384, 64, 256, 256),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(512, 64, 256, 256),
    +            )
    +        elif version == '1_1':
    +            self.features = nn.Sequential(
    +                nn.Conv2d(3, 64, kernel_size=3, stride=2),
    +                nn.ReLU(inplace=True),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(64, 16, 64, 64),
    +                Fire(128, 16, 64, 64),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(128, 32, 128, 128),
    +                Fire(256, 32, 128, 128),
    +                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
    +                Fire(256, 48, 192, 192),
    +                Fire(384, 48, 192, 192),
    +                Fire(384, 64, 256, 256),
    +                Fire(512, 64, 256, 256),
    +            )
    +        else:
    +            # FIXME: Is this needed? SqueezeNet should only be called from the
    +            # FIXME: squeezenet1_x() functions
    +            # FIXME: This checking is not done for the other models
    +            raise ValueError("Unsupported SqueezeNet version {version}:"
    +                             "1_0 or 1_1 expected".format(version=version))
    +
    +        # Final convolution is initialized differently from the rest
    +        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
    +        self.classifier = nn.Sequential(
    +            nn.Dropout(p=0.5),
    +            final_conv,
    +            nn.ReLU(inplace=True),
    +            nn.AdaptiveAvgPool2d((1, 1))
    +        )
    +
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                if m is final_conv:
    +                    init.normal_(m.weight, mean=0.0, std=0.01)
    +                else:
    +                    init.kaiming_uniform_(m.weight)
    +                if m.bias is not None:
    +                    init.constant_(m.bias, 0)
    +
    +    def forward(self, x):
    +        x = self.features(x)
    +        x = self.classifier(x)
    +        return torch.flatten(x, 1)
    +
    +
    +def _squeezenet(version, pretrained, progress, **kwargs):
    +    model = SqueezeNet(version, **kwargs)
    +    if pretrained:
    +        arch = 'squeezenet' + version
    +        state_dict = load_state_dict_from_url(model_urls[arch],
    +                                              progress=progress)
    +        model.load_state_dict(state_dict)
    +    return model
    +
    +
    +
    [docs]def squeezenet1_0(pretrained=False, progress=True, **kwargs): + r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level + accuracy with 50x fewer parameters and <0.5MB model size" + <https://arxiv.org/abs/1602.07360>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_0', pretrained, progress, **kwargs)
    + + +
    [docs]def squeezenet1_1(pretrained=False, progress=True, **kwargs): + r"""SqueezeNet 1.1 model from the `official SqueezeNet repo + <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. + SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters + than SqueezeNet 1.0, without sacrificing accuracy. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_1', pretrained, progress, **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/models/vgg.html b/docs/stable/_modules/torchvision/models/vgg.html new file mode 100644 index 000000000000..b8b9a1f9aba0 --- /dev/null +++ b/docs/stable/_modules/torchvision/models/vgg.html @@ -0,0 +1,697 @@ + + + + + + + + + + + + torchvision.models.vgg — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.vgg

    +import torch
    +import torch.nn as nn
    +from .utils import load_state_dict_from_url
    +
    +
    +__all__ = [
    +    'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
    +    'vgg19_bn', 'vgg19',
    +]
    +
    +
    +model_urls = {
    +    'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
    +    'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
    +    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    +    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
    +    'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
    +    'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
    +    'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
    +    'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
    +}
    +
    +
    +class VGG(nn.Module):
    +
    +    def __init__(self, features, num_classes=1000, init_weights=True):
    +        super(VGG, self).__init__()
    +        self.features = features
    +        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
    +        self.classifier = nn.Sequential(
    +            nn.Linear(512 * 7 * 7, 4096),
    +            nn.ReLU(True),
    +            nn.Dropout(),
    +            nn.Linear(4096, 4096),
    +            nn.ReLU(True),
    +            nn.Dropout(),
    +            nn.Linear(4096, num_classes),
    +        )
    +        if init_weights:
    +            self._initialize_weights()
    +
    +    def forward(self, x):
    +        x = self.features(x)
    +        x = self.avgpool(x)
    +        x = torch.flatten(x, 1)
    +        x = self.classifier(x)
    +        return x
    +
    +    def _initialize_weights(self):
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv2d):
    +                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
    +                if m.bias is not None:
    +                    nn.init.constant_(m.bias, 0)
    +            elif isinstance(m, nn.BatchNorm2d):
    +                nn.init.constant_(m.weight, 1)
    +                nn.init.constant_(m.bias, 0)
    +            elif isinstance(m, nn.Linear):
    +                nn.init.normal_(m.weight, 0, 0.01)
    +                nn.init.constant_(m.bias, 0)
    +
    +
    +def make_layers(cfg, batch_norm=False):
    +    layers = []
    +    in_channels = 3
    +    for v in cfg:
    +        if v == 'M':
    +            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
    +        else:
    +            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
    +            if batch_norm:
    +                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
    +            else:
    +                layers += [conv2d, nn.ReLU(inplace=True)]
    +            in_channels = v
    +    return nn.Sequential(*layers)
    +
    +
    +cfgs = {
    +    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    +    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    +    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    +    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
    +}
    +
    +
    +def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
    +    if pretrained:
    +        kwargs['init_weights'] = False
    +    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
    +    if pretrained:
    +        state_dict = load_state_dict_from_url(model_urls[arch],
    +                                              progress=progress)
    +        model.load_state_dict(state_dict)
    +    return model
    +
    +
    +
    [docs]def vgg11(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg11_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg13(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg13_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg16(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg16_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg19(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
    + + +
    [docs]def vgg19_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/transforms/functional.html b/docs/stable/_modules/torchvision/transforms/functional.html new file mode 100644 index 000000000000..14e35a37f2ca --- /dev/null +++ b/docs/stable/_modules/torchvision/transforms/functional.html @@ -0,0 +1,1357 @@ + + + + + + + + + + + + torchvision.transforms.functional — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.transforms.functional

    +from __future__ import division
    +import torch
    +import sys
    +import math
    +from PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION
    +try:
    +    import accimage
    +except ImportError:
    +    accimage = None
    +import numpy as np
    +import numbers
    +import collections
    +import warnings
    +
    +if sys.version_info < (3, 3):
    +    Sequence = collections.Sequence
    +    Iterable = collections.Iterable
    +else:
    +    Sequence = collections.abc.Sequence
    +    Iterable = collections.abc.Iterable
    +
    +
    +def _is_pil_image(img):
    +    if accimage is not None:
    +        return isinstance(img, (Image.Image, accimage.Image))
    +    else:
    +        return isinstance(img, Image.Image)
    +
    +
    +def _is_tensor_image(img):
    +    return torch.is_tensor(img) and img.ndimension() == 3
    +
    +
    +def _is_numpy(img):
    +    return isinstance(img, np.ndarray)
    +
    +
    +def _is_numpy_image(img):
    +    return img.ndim in {2, 3}
    +
    +
    +
    [docs]def to_tensor(pic): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + + See ``ToTensor`` for more details. + + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not(_is_pil_image(pic) or _is_numpy(pic)): + raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic))) + + if _is_numpy(pic) and not _is_numpy_image(pic): + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + if isinstance(pic, np.ndarray): + # handle numpy array + if pic.ndim == 2: + pic = pic[:, :, None] + + img = torch.from_numpy(pic.transpose((2, 0, 1))) + # backward compatibility + if isinstance(img, torch.ByteTensor): + return img.float().div(255) + else: + return img + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + return torch.from_numpy(nppic) + + # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + elif pic.mode == 'F': + img = torch.from_numpy(np.array(pic, np.float32, copy=False)) + elif pic.mode == '1': + img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False)) + else: + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + return img.float().div(255) + else: + return img
    + + +
    [docs]def to_pil_image(pic, mode=None): + """Convert a tensor or an ndarray to PIL Image. + + See :class:`~torchvision.transforms.ToPILImage` for more details. + + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + + Returns: + PIL Image: Image converted to PIL Image. + """ + if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): + raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic))) + + elif isinstance(pic, torch.Tensor): + if pic.ndimension() not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension())) + + elif pic.ndimension() == 2: + # if 2D image, add channel dimension (CHW) + pic = pic.unsqueeze(0) + + elif isinstance(pic, np.ndarray): + if pic.ndim not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + elif pic.ndim == 2: + # if 2D image, add channel dimension (HWC) + pic = np.expand_dims(pic, 2) + + npimg = pic + if isinstance(pic, torch.FloatTensor) and mode != 'F': + pic = pic.mul(255).byte() + if isinstance(pic, torch.Tensor): + npimg = np.transpose(pic.numpy(), (1, 2, 0)) + + if not isinstance(npimg, np.ndarray): + raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' + + 'not {}'.format(type(npimg))) + + if npimg.shape[2] == 1: + expected_mode = None + npimg = npimg[:, :, 0] + if npimg.dtype == np.uint8: + expected_mode = 'L' + elif npimg.dtype == np.int16: + expected_mode = 'I;16' + elif npimg.dtype == np.int32: + expected_mode = 'I' + elif npimg.dtype == np.float32: + expected_mode = 'F' + if mode is not None and mode != expected_mode: + raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}" + .format(mode, np.dtype, expected_mode)) + mode = expected_mode + + elif npimg.shape[2] == 2: + permitted_2_channel_modes = ['LA'] + if mode is not None and mode not in permitted_2_channel_modes: + raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'LA' + + elif npimg.shape[2] == 4: + permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX'] + if mode is not None and mode not in permitted_4_channel_modes: + raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'RGBA' + else: + permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV'] + if mode is not None and mode not in permitted_3_channel_modes: + raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes)) + if mode is None and npimg.dtype == np.uint8: + mode = 'RGB' + + if mode is None: + raise TypeError('Input type {} is not supported'.format(npimg.dtype)) + + return Image.fromarray(npimg, mode=mode)
    + + +
    [docs]def normalize(tensor, mean, std, inplace=False): + """Normalize a tensor image with mean and standard deviation. + + .. note:: + This transform acts out of place by default, i.e., it does not mutates the input tensor. + + See :class:`~torchvision.transforms.Normalize` for more details. + + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation inplace. + + Returns: + Tensor: Normalized Tensor image. + """ + if not _is_tensor_image(tensor): + raise TypeError('tensor is not a torch image.') + + if not inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(std, dtype=dtype, device=tensor.device) + tensor.sub_(mean[:, None, None]).div_(std[:, None, None]) + return tensor
    + + +
    [docs]def resize(img, size, interpolation=Image.BILINEAR): + r"""Resize the input PIL Image to the given size. + + Args: + img (PIL Image): Image to be resized. + size (sequence or int): Desired output size. If size is a sequence like + (h, w), the output size will be matched to this. If size is an int, + the smaller edge of the image will be matched to this number maintaing + the aspect ratio. i.e, if height > width, then image will be rescaled to + :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)` + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR`` + + Returns: + PIL Image: Resized image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)): + raise TypeError('Got inappropriate size arg: {}'.format(size)) + + if isinstance(size, int): + w, h = img.size + if (w <= h and w == size) or (h <= w and h == size): + return img + if w < h: + ow = size + oh = int(size * h / w) + return img.resize((ow, oh), interpolation) + else: + oh = size + ow = int(size * w / h) + return img.resize((ow, oh), interpolation) + else: + return img.resize(size[::-1], interpolation)
    + + +def scale(*args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + return resize(*args, **kwargs) + + +
    [docs]def pad(img, padding, fill=0, padding_mode='constant'): + r"""Pad the given PIL Image on all sides with specified padding mode and fill value. + + Args: + img (PIL Image): Image to be padded. + padding (int or tuple): Padding on each border. If a single int is provided this + is used to pad all borders. If tuple of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a tuple of length 4 is provided + this is the padding for the left, top, right and bottom borders + respectively. + fill: Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant + padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value on the edge of the image + + - reflect: pads with reflection of image (without repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image (repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + PIL Image: Padded image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if not isinstance(padding, (numbers.Number, tuple)): + raise TypeError('Got inappropriate padding arg') + if not isinstance(fill, (numbers.Number, str, tuple)): + raise TypeError('Got inappropriate fill arg') + if not isinstance(padding_mode, str): + raise TypeError('Got inappropriate padding_mode arg') + + if isinstance(padding, Sequence) and len(padding) not in [2, 4]: + raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ + 'Padding mode should be either constant, edge, reflect or symmetric' + + if padding_mode == 'constant': + if img.mode == 'P': + palette = img.getpalette() + image = ImageOps.expand(img, border=padding, fill=fill) + image.putpalette(palette) + return image + + return ImageOps.expand(img, border=padding, fill=fill) + else: + if isinstance(padding, int): + pad_left = pad_right = pad_top = pad_bottom = padding + if isinstance(padding, Sequence) and len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + if isinstance(padding, Sequence) and len(padding) == 4: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + if img.mode == 'P': + palette = img.getpalette() + img = np.asarray(img) + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) + img = Image.fromarray(img) + img.putpalette(palette) + return img + + img = np.asarray(img) + # RGB image + if len(img.shape) == 3: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) + # Grayscale image + if len(img.shape) == 2: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) + + return Image.fromarray(img)
    + + +
    [docs]def crop(img, i, j, h, w): + """Crop the given PIL Image. + + Args: + img (PIL Image): Image to be cropped. + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the cropped image. + w (int): Width of the cropped image. + + Returns: + PIL Image: Cropped image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.crop((j, i, j + w, i + h))
    + + +def center_crop(img, output_size): + if isinstance(output_size, numbers.Number): + output_size = (int(output_size), int(output_size)) + w, h = img.size + th, tw = output_size + i = int(round((h - th) / 2.)) + j = int(round((w - tw) / 2.)) + return crop(img, i, j, th, tw) + + +
    [docs]def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR): + """Crop the given PIL Image and resize it to desired size. + + Notably used in :class:`~torchvision.transforms.RandomResizedCrop`. + + Args: + img (PIL Image): Image to be cropped. + i (int): i in (i,j) i.e coordinates of the upper left corner + j (int): j in (i,j) i.e coordinates of the upper left corner + h (int): Height of the cropped image. + w (int): Width of the cropped image. + size (sequence or int): Desired output size. Same semantics as ``resize``. + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR``. + Returns: + PIL Image: Cropped image. + """ + assert _is_pil_image(img), 'img should be PIL Image' + img = crop(img, i, j, h, w) + img = resize(img, size, interpolation) + return img
    + + +
    [docs]def hflip(img): + """Horizontally flip the given PIL Image. + + Args: + img (PIL Image): Image to be flipped. + + Returns: + PIL Image: Horizontall flipped image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.transpose(Image.FLIP_LEFT_RIGHT)
    + + +def _get_perspective_coeffs(startpoints, endpoints): + """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. + + In Perspective Transform each pixel (x, y) in the orignal image gets transformed as, + (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) + + Args: + List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, + List containing [top-left, top-right, bottom-right, bottom-left] of the transformed + image + Returns: + octuple (a, b, c, d, e, f, g, h) for transforming each pixel. + """ + matrix = [] + + for p1, p2 in zip(endpoints, startpoints): + matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) + matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) + + A = torch.tensor(matrix, dtype=torch.float) + B = torch.tensor(startpoints, dtype=torch.float).view(8) + res = torch.gels(B, A)[0] + return res.squeeze_(1).tolist() + + +
    [docs]def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC): + """Perform perspective transform of the given PIL Image. + + Args: + img (PIL Image): Image to be transformed. + startpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image + endpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image + interpolation: Default- Image.BICUBIC + Returns: + PIL Image: Perspectively transformed Image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + coeffs = _get_perspective_coeffs(startpoints, endpoints) + return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
    + + +
    [docs]def vflip(img): + """Vertically flip the given PIL Image. + + Args: + img (PIL Image): Image to be flipped. + + Returns: + PIL Image: Vertically flipped image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.transpose(Image.FLIP_TOP_BOTTOM)
    + + +
    [docs]def five_crop(img, size): + """Crop the given PIL Image into four corners and the central crop. + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + + Returns: + tuple: tuple (tl, tr, bl, br, center) + Corresponding top left, top right, bottom left, bottom right and center crop. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + else: + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + + w, h = img.size + crop_h, crop_w = size + if crop_w > w or crop_h > h: + raise ValueError("Requested crop size {} is bigger than input size {}".format(size, + (h, w))) + tl = img.crop((0, 0, crop_w, crop_h)) + tr = img.crop((w - crop_w, 0, w, crop_h)) + bl = img.crop((0, h - crop_h, crop_w, h)) + br = img.crop((w - crop_w, h - crop_h, w, h)) + center = center_crop(img, (crop_h, crop_w)) + return (tl, tr, bl, br, center)
    + + +
    [docs]def ten_crop(img, size, vertical_flip=False): + r"""Crop the given PIL Image into four corners and the central crop plus the + flipped version of these (horizontal flipping is used by default). + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + vertical_flip (bool): Use vertical flipping instead of horizontal + + Returns: + tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip) + Corresponding top left, top right, bottom left, bottom right and center crop + and same for the flipped image. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + else: + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + + first_five = five_crop(img, size) + + if vertical_flip: + img = vflip(img) + else: + img = hflip(img) + + second_five = five_crop(img, size) + return first_five + second_five
    + + +
    [docs]def adjust_brightness(img, brightness_factor): + """Adjust brightness of an Image. + + Args: + img (PIL Image): PIL Image to be adjusted. + brightness_factor (float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL Image: Brightness adjusted image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Brightness(img) + img = enhancer.enhance(brightness_factor) + return img
    + + +
    [docs]def adjust_contrast(img, contrast_factor): + """Adjust contrast of an Image. + + Args: + img (PIL Image): PIL Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL Image: Contrast adjusted image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Contrast(img) + img = enhancer.enhance(contrast_factor) + return img
    + + +
    [docs]def adjust_saturation(img, saturation_factor): + """Adjust color saturation of an image. + + Args: + img (PIL Image): PIL Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL Image: Saturation adjusted image. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Color(img) + img = enhancer.enhance(saturation_factor) + return img
    + + +
    [docs]def adjust_hue(img, hue_factor): + """Adjust hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + See `Hue`_ for more details. + + .. _Hue: https://en.wikipedia.org/wiki/Hue + + Args: + img (PIL Image): PIL Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL Image: Hue adjusted image. + """ + if not(-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor)) + + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + input_mode = img.mode + if input_mode in {'L', '1', 'I', 'F'}: + return img + + h, s, v = img.convert('HSV').split() + + np_h = np.array(h, dtype=np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over='ignore'): + np_h += np.uint8(hue_factor * 255) + h = Image.fromarray(np_h, 'L') + + img = Image.merge('HSV', (h, s, v)).convert(input_mode) + return img
    + + +
    [docs]def adjust_gamma(img, gamma, gain=1): + r"""Perform gamma correction on an image. + + Also known as Power Law Transform. Intensities in RGB mode are adjusted + based on the following equation: + + .. math:: + I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} + + See `Gamma Correction`_ for more details. + + .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction + + Args: + img (PIL Image): PIL Image to be adjusted. + gamma (float): Non negative real number, same as :math:`\gamma` in the equation. + gamma larger than 1 make the shadows darker, + while gamma smaller than 1 make dark regions lighter. + gain (float): The constant multiplier. + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if gamma < 0: + raise ValueError('Gamma should be a non-negative real number') + + input_mode = img.mode + img = img.convert('RGB') + + gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3 + img = img.point(gamma_map) # use PIL's point-function to accelerate this part + + img = img.convert(input_mode) + return img
    + + +
    [docs]def rotate(img, angle, resample=False, expand=False, center=None): + """Rotate the image by angle. + + + Args: + img (PIL Image): PIL Image to be rotated. + angle (float or int): In degrees degrees counter clockwise order. + resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional): + An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``. + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.rotate(angle, resample, expand, center)
    + + +def _get_inverse_affine_matrix(center, angle, translate, scale, shear): + # Helper method to compute inverse matrix for affine transformation + + # As it is explained in PIL.Image.rotate + # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1 + # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1] + # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1] + # RSS is rotation with scale and shear matrix + # RSS(a, scale, shear) = [ cos(a + shear_y)*scale -sin(a + shear_x)*scale 0] + # [ sin(a + shear_y)*scale cos(a + shear_x)*scale 0] + # [ 0 0 1] + # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 + + angle = math.radians(angle) + if isinstance(shear, (tuple, list)) and len(shear) == 2: + shear = [math.radians(s) for s in shear] + elif isinstance(shear, numbers.Number): + shear = math.radians(shear) + shear = [shear, 0] + else: + raise ValueError( + "Shear should be a single value or a tuple/list containing " + + "two values. Got {}".format(shear)) + scale = 1.0 / scale + + # Inverted rotation matrix with scale and shear + d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ + math.sin(angle + shear[0]) * math.sin(angle + shear[1]) + matrix = [ + math.cos(angle + shear[0]), math.sin(angle + shear[0]), 0, + -math.sin(angle + shear[1]), math.cos(angle + shear[1]), 0 + ] + matrix = [scale / d * m for m in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1]) + matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1]) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += center[0] + matrix[5] += center[1] + return matrix + + +
    [docs]def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None): + """Apply affine transformation on the image keeping image center invariant + + Args: + img (PIL Image): PIL Image to be rotated. + angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction. + translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation) + scale (float): overall scale + shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction. + If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while + the second value corresponds to a shear parallel to the y axis. + resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional): + An optional resampling filter. + See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``. + fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0) + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + assert isinstance(translate, (tuple, list)) and len(translate) == 2, \ + "Argument translate should be a list or tuple of length 2" + + assert scale > 0.0, "Argument scale should be positive" + + output_size = img.size + center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5) + matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + kwargs = {"fillcolor": fillcolor} if PILLOW_VERSION[0] >= '5' else {} + return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
    + + +
    [docs]def to_grayscale(img, num_output_channels=1): + """Convert image to grayscale version of image. + + Args: + img (PIL Image): Image to be converted to grayscale. + + Returns: + PIL Image: Grayscale version of the image. + if num_output_channels = 1 : returned image is single channel + + if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if num_output_channels == 1: + img = img.convert('L') + elif num_output_channels == 3: + img = img.convert('L') + np_img = np.array(img, dtype=np.uint8) + np_img = np.dstack([np_img, np_img, np_img]) + img = Image.fromarray(np_img, 'RGB') + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return img
    + + +
    [docs]def erase(img, i, j, h, w, v, inplace=False): + """ Erase the input Tensor Image with given value. + + Args: + img (Tensor Image): Tensor image of size (C, H, W) to be erased + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the erased region. + w (int): Width of the erased region. + v: Erasing value. + inplace(bool, optional): For in-place operations. By default is set False. + + Returns: + Tensor Image: Erased image. + """ + if not isinstance(img, torch.Tensor): + raise TypeError('img should be Tensor Image. Got {}'.format(type(img))) + + if not inplace: + img = img.clone() + + img[:, i:i + h, j:j + w] = v + return img
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/transforms/transforms.html b/docs/stable/_modules/torchvision/transforms/transforms.html new file mode 100644 index 000000000000..fd75d31d9dcb --- /dev/null +++ b/docs/stable/_modules/torchvision/transforms/transforms.html @@ -0,0 +1,1802 @@ + + + + + + + + + + + + torchvision.transforms.transforms — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.transforms.transforms

    +from __future__ import division
    +import torch
    +import math
    +import sys
    +import random
    +from PIL import Image
    +try:
    +    import accimage
    +except ImportError:
    +    accimage = None
    +import numpy as np
    +import numbers
    +import types
    +import collections
    +import warnings
    +
    +from . import functional as F
    +
    +if sys.version_info < (3, 3):
    +    Sequence = collections.Sequence
    +    Iterable = collections.Iterable
    +else:
    +    Sequence = collections.abc.Sequence
    +    Iterable = collections.abc.Iterable
    +
    +
    +__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "Resize", "Scale", "CenterCrop", "Pad",
    +           "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop", "RandomHorizontalFlip",
    +           "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop", "LinearTransformation",
    +           "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
    +           "RandomPerspective", "RandomErasing"]
    +
    +_pil_interpolation_to_str = {
    +    Image.NEAREST: 'PIL.Image.NEAREST',
    +    Image.BILINEAR: 'PIL.Image.BILINEAR',
    +    Image.BICUBIC: 'PIL.Image.BICUBIC',
    +    Image.LANCZOS: 'PIL.Image.LANCZOS',
    +    Image.HAMMING: 'PIL.Image.HAMMING',
    +    Image.BOX: 'PIL.Image.BOX',
    +}
    +
    +
    +
    [docs]class Compose(object): + """Composes several transforms together. + + Args: + transforms (list of ``Transform`` objects): list of transforms to compose. + + Example: + >>> transforms.Compose([ + >>> transforms.CenterCrop(10), + >>> transforms.ToTensor(), + >>> ]) + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, img): + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string
    + + +
    [docs]class ToTensor(object): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + + Converts a PIL Image or numpy.ndarray (H x W x C) in the range + [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] + if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) + or if the numpy.ndarray has dtype = np.uint8 + + In the other cases, tensors are returned without scaling. + """ + +
    [docs] def __call__(self, pic): + """ + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + return F.to_tensor(pic)
    + + def __repr__(self): + return self.__class__.__name__ + '()'
    + + +
    [docs]class ToPILImage(object): + """Convert a tensor or an ndarray to PIL Image. + + Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape + H x W x C to a PIL Image while preserving the value range. + + Args: + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + If ``mode`` is ``None`` (default) there are some assumptions made about the input data: + - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``. + - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``. + - If the input has 2 channels, the ``mode`` is assumed to be ``LA``. + - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, + ``short``). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + """ + def __init__(self, mode=None): + self.mode = mode + +
    [docs] def __call__(self, pic): + """ + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + + Returns: + PIL Image: Image converted to PIL Image. + + """ + return F.to_pil_image(pic, self.mode)
    + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + if self.mode is not None: + format_string += 'mode={0}'.format(self.mode) + format_string += ')' + return format_string
    + + +
    [docs]class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + + .. note:: + This transform acts out of place, i.e., it does not mutates the input tensor. + + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation in-place. + + """ + + def __init__(self, mean, std, inplace=False): + self.mean = mean + self.std = std + self.inplace = inplace + +
    [docs] def __call__(self, tensor): + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + + Returns: + Tensor: Normalized Tensor image. + """ + return F.normalize(tensor, self.mean, self.std, self.inplace)
    + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
    + + +
    [docs]class Resize(object): + """Resize the input PIL Image to the given size. + + Args: + size (sequence or int): Desired output size. If size is a sequence like + (h, w), output size will be matched to this. If size is an int, + smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to + (size * height / width, size) + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR`` + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2) + self.size = size + self.interpolation = interpolation + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be scaled. + + Returns: + PIL Image: Rescaled image. + """ + return F.resize(img, self.size, self.interpolation) + + def __repr__(self): + interpolate_str = _pil_interpolation_to_str[self.interpolation] + return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
    + + +
    [docs]class Scale(Resize): + """ + Note: This transform is deprecated in favor of Resize. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + super(Scale, self).__init__(*args, **kwargs)
    + + +
    [docs]class CenterCrop(object): + """Crops the given PIL Image at the center. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped. + + Returns: + PIL Image: Cropped image. + """ + return F.center_crop(img, self.size) + + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size)
    + + +
    [docs]class Pad(object): + """Pad the given PIL Image on all sides with the given "pad" value. + + Args: + padding (int or tuple): Padding on each border. If a single int is provided this + is used to pad all borders. If tuple of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a tuple of length 4 is provided + this is the padding for the left, top, right and bottom borders + respectively. + fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image + + - reflect: pads with reflection of image without repeating the last value on the edge + + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge + + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + def __init__(self, padding, fill=0, padding_mode='constant'): + assert isinstance(padding, (numbers.Number, tuple)) + assert isinstance(fill, (numbers.Number, str, tuple)) + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + if isinstance(padding, Sequence) and len(padding) not in [2, 4]: + raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + self.padding = padding + self.fill = fill + self.padding_mode = padding_mode + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be padded. + + Returns: + PIL Image: Padded image. + """ + return F.pad(img, self.padding, self.fill, self.padding_mode) + + def __repr__(self): + return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\ + format(self.padding, self.fill, self.padding_mode)
    + + +
    [docs]class Lambda(object): + """Apply a user-defined lambda as a transform. + + Args: + lambd (function): Lambda/function to be used for transform. + """ + + def __init__(self, lambd): + assert callable(lambd), repr(type(lambd).__name__) + " object is not callable" + self.lambd = lambd + + def __call__(self, img): + return self.lambd(img) + + def __repr__(self): + return self.__class__.__name__ + '()'
    + + +class RandomTransforms(object): + """Base class for a list of transformations with randomness + + Args: + transforms (list or tuple): list of transformations + """ + + def __init__(self, transforms): + assert isinstance(transforms, (list, tuple)) + self.transforms = transforms + + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + + +
    [docs]class RandomApply(RandomTransforms): + """Apply randomly a list of transformations with a given probability + + Args: + transforms (list or tuple): list of transformations + p (float): probability + """ + + def __init__(self, transforms, p=0.5): + super(RandomApply, self).__init__(transforms) + self.p = p + + def __call__(self, img): + if self.p < random.random(): + return img + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += '\n p={}'.format(self.p) + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string
    + + +
    [docs]class RandomOrder(RandomTransforms): + """Apply a list of transformations in a random order + """ + def __call__(self, img): + order = list(range(len(self.transforms))) + random.shuffle(order) + for i in order: + img = self.transforms[i](img) + return img
    + + +
    [docs]class RandomChoice(RandomTransforms): + """Apply single transformation randomly picked from a list + """ + def __call__(self, img): + t = random.choice(self.transforms) + return t(img)
    + + +
    [docs]class RandomCrop(object): + """Crop the given PIL Image at a random location. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + padding (int or sequence, optional): Optional padding on each border + of the image. Default is None, i.e no padding. If a sequence of length + 4 is provided, it is used to pad left, top, right, bottom borders + respectively. If a sequence of length 2 is provided, it is used to + pad left/right, top/bottom borders, respectively. + pad_if_needed (boolean): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + fill: Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant + padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value on the edge of the image + + - reflect: pads with reflection of image (without repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image (repeating the last value on the edge) + + padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + """ + + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + self.padding = padding + self.pad_if_needed = pad_if_needed + self.fill = fill + self.padding_mode = padding_mode + + @staticmethod + def get_params(img, output_size): + """Get parameters for ``crop`` for a random crop. + + Args: + img (PIL Image): Image to be cropped. + output_size (tuple): Expected output size of the crop. + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. + """ + w, h = img.size + th, tw = output_size + if w == tw and h == th: + return 0, 0, h, w + + i = random.randint(0, h - th) + j = random.randint(0, w - tw) + return i, j, th, tw + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped. + + Returns: + PIL Image: Cropped image. + """ + if self.padding is not None: + img = F.pad(img, self.padding, self.fill, self.padding_mode) + + # pad the width if needed + if self.pad_if_needed and img.size[0] < self.size[1]: + img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode) + # pad the height if needed + if self.pad_if_needed and img.size[1] < self.size[0]: + img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode) + + i, j, h, w = self.get_params(img, self.size) + + return F.crop(img, i, j, h, w) + + def __repr__(self): + return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
    + + +
    [docs]class RandomHorizontalFlip(object): + """Horizontally flip the given PIL Image randomly with a given probability. + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be flipped. + + Returns: + PIL Image: Randomly flipped image. + """ + if random.random() < self.p: + return F.hflip(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
    + + +
    [docs]class RandomVerticalFlip(object): + """Vertically flip the given PIL Image randomly with a given probability. + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be flipped. + + Returns: + PIL Image: Randomly flipped image. + """ + if random.random() < self.p: + return F.vflip(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
    + + +
    [docs]class RandomPerspective(object): + """Performs Perspective transformation of the given PIL Image randomly with a given probability. + + Args: + interpolation : Default- Image.BICUBIC + + p (float): probability of the image being perspectively transformed. Default value is 0.5 + + distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5. + + """ + + def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC): + self.p = p + self.interpolation = interpolation + self.distortion_scale = distortion_scale + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be Perspectively transformed. + + Returns: + PIL Image: Random perspectivley transformed image. + """ + if not F._is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if random.random() < self.p: + width, height = img.size + startpoints, endpoints = self.get_params(width, height, self.distortion_scale) + return F.perspective(img, startpoints, endpoints, self.interpolation) + return img + + @staticmethod + def get_params(width, height, distortion_scale): + """Get parameters for ``perspective`` for a random perspective transform. + + Args: + width : width of the image. + height : height of the image. + + Returns: + List containing [top-left, top-right, bottom-right, bottom-left] of the original image, + List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image. + """ + half_height = int(height / 2) + half_width = int(width / 2) + topleft = (random.randint(0, int(distortion_scale * half_width)), + random.randint(0, int(distortion_scale * half_height))) + topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1), + random.randint(0, int(distortion_scale * half_height))) + botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1), + random.randint(height - int(distortion_scale * half_height) - 1, height - 1)) + botleft = (random.randint(0, int(distortion_scale * half_width)), + random.randint(height - int(distortion_scale * half_height) - 1, height - 1)) + startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)] + endpoints = [topleft, topright, botright, botleft] + return startpoints, endpoints + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p)
    + + +
    [docs]class RandomResizedCrop(object): + """Crop the given PIL Image to random size and aspect ratio. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + self.interpolation = interpolation + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if (in_ratio < min(ratio)): + w = img.size[0] + h = int(round(w / min(ratio))) + elif (in_ratio > max(ratio)): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + return F.resized_crop(img, i, j, h, w, self.size, self.interpolation) + + def __repr__(self): + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string
    + + +
    [docs]class RandomSizedCrop(RandomResizedCrop): + """ + Note: This transform is deprecated in favor of RandomResizedCrop. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " + + "please use transforms.RandomResizedCrop instead.") + super(RandomSizedCrop, self).__init__(*args, **kwargs)
    + + +
    [docs]class FiveCrop(object): + """Crop the given PIL Image into four corners and the central crop + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an ``int`` + instead of sequence like (h, w), a square crop of size (size, size) is made. + + Example: + >>> transform = Compose([ + >>> FiveCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size): + self.size = size + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + self.size = size + + def __call__(self, img): + return F.five_crop(img, self.size) + + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size)
    + + +
    [docs]class TenCrop(object): + """Crop the given PIL Image into four corners and the central crop plus the flipped version of + these (horizontal flipping is used by default) + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + vertical_flip (bool): Use vertical flipping instead of horizontal + + Example: + >>> transform = Compose([ + >>> TenCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size, vertical_flip=False): + self.size = size + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + self.size = size + self.vertical_flip = vertical_flip + + def __call__(self, img): + return F.ten_crop(img, self.size, self.vertical_flip) + + def __repr__(self): + return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
    + + +
    [docs]class LinearTransformation(object): + """Transform a tensor image with a square transformation matrix and a mean_vector computed + offline. + Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and + subtract mean_vector from it which is then followed by computing the dot + product with the transformation matrix and then reshaping the tensor to its + original shape. + + Applications: + whitening transformation: Suppose X is a column vector zero-centered data. + Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X), + perform SVD on this matrix and pass it as transformation_matrix. + + Args: + transformation_matrix (Tensor): tensor [D x D], D = C x H x W + mean_vector (Tensor): tensor [D], D = C x H x W + """ + + def __init__(self, transformation_matrix, mean_vector): + if transformation_matrix.size(0) != transformation_matrix.size(1): + raise ValueError("transformation_matrix should be square. Got " + + "[{} x {}] rectangular matrix.".format(*transformation_matrix.size())) + + if mean_vector.size(0) != transformation_matrix.size(0): + raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) + + " as any one of the dimensions of the transformation_matrix [{} x {}]" + .format(transformation_matrix.size())) + + self.transformation_matrix = transformation_matrix + self.mean_vector = mean_vector + + def __call__(self, tensor): + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be whitened. + + Returns: + Tensor: Transformed image. + """ + if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0): + raise ValueError("tensor and transformation matrix have incompatible shape." + + "[{} x {} x {}] != ".format(*tensor.size()) + + "{}".format(self.transformation_matrix.size(0))) + flat_tensor = tensor.view(1, -1) - self.mean_vector + transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix) + tensor = transformed_tensor.view(tensor.size()) + return tensor + + def __repr__(self): + format_string = self.__class__.__name__ + '(transformation_matrix=' + format_string += (str(self.transformation_matrix.tolist()) + ')') + format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')') + return format_string
    + + +
    [docs]class ColorJitter(object): + """Randomly change the brightness, contrast and saturation of an image. + + Args: + brightness (float or tuple of float (min, max)): How much to jitter brightness. + brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness] + or the given [min, max]. Should be non negative numbers. + contrast (float or tuple of float (min, max)): How much to jitter contrast. + contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast] + or the given [min, max]. Should be non negative numbers. + saturation (float or tuple of float (min, max)): How much to jitter saturation. + saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation] + or the given [min, max]. Should be non negative numbers. + hue (float or tuple of float (min, max)): How much to jitter hue. + hue_factor is chosen uniformly from [-hue, hue] or the given [min, max]. + Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + """ + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), + clip_first_on_zero=False) + + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - value, center + value] + if clip_first_on_zero: + value[0] = max(value[0], 0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + return value + + @staticmethod + def get_params(brightness, contrast, saturation, hue): + """Get a randomized transform to be applied on image. + + Arguments are same as that of __init__. + + Returns: + Transform which randomly adjusts brightness, contrast and + saturation in a random order. + """ + transforms = [] + + if brightness is not None: + brightness_factor = random.uniform(brightness[0], brightness[1]) + transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor))) + + if contrast is not None: + contrast_factor = random.uniform(contrast[0], contrast[1]) + transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor))) + + if saturation is not None: + saturation_factor = random.uniform(saturation[0], saturation[1]) + transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor))) + + if hue is not None: + hue_factor = random.uniform(hue[0], hue[1]) + transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor))) + + random.shuffle(transforms) + transform = Compose(transforms) + + return transform + + def __call__(self, img): + """ + Args: + img (PIL Image): Input image. + + Returns: + PIL Image: Color jittered image. + """ + transform = self.get_params(self.brightness, self.contrast, + self.saturation, self.hue) + return transform(img) + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += 'brightness={0}'.format(self.brightness) + format_string += ', contrast={0}'.format(self.contrast) + format_string += ', saturation={0}'.format(self.saturation) + format_string += ', hue={0})'.format(self.hue) + return format_string
    + + +
    [docs]class RandomRotation(object): + """Rotate the image by angle. + + Args: + degrees (sequence or float or int): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional): + An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. + expand (bool, optional): Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__(self, degrees, resample=False, expand=False, center=None): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError("If degrees is a single number, it must be positive.") + self.degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError("If degrees is a sequence, it must be of len 2.") + self.degrees = degrees + + self.resample = resample + self.expand = expand + self.center = center + + @staticmethod + def get_params(degrees): + """Get parameters for ``rotate`` for a random rotation. + + Returns: + sequence: params to be passed to ``rotate`` for random rotation. + """ + angle = random.uniform(degrees[0], degrees[1]) + + return angle + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be rotated. + + Returns: + PIL Image: Rotated image. + """ + + angle = self.get_params(self.degrees) + + return F.rotate(img, angle, self.resample, self.expand, self.center) + + def __repr__(self): + format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees) + format_string += ', resample={0}'.format(self.resample) + format_string += ', expand={0}'.format(self.expand) + if self.center is not None: + format_string += ', center={0}'.format(self.center) + format_string += ')' + return format_string
    + + +
    [docs]class RandomAffine(object): + """Random affine transformation of the image keeping center invariant + + Args: + degrees (sequence or float or int): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). Set to 0 to deactivate rotations. + translate (tuple, optional): tuple of maximum absolute fraction for horizontal + and vertical translations. For example translate=(a, b), then horizontal shift + is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is + randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default. + scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is + randomly sampled from the range a <= scale <= b. Will keep original scale by default. + shear (sequence or float or int, optional): Range of degrees to select from. + If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) + will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the + range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values, + a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default + resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional): + An optional resampling filter. See `filters`_ for more information. + If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. + fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area + outside the transform in the output image.(Pillow>=5.0.0) + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError("If degrees is a single number, it must be positive.") + self.degrees = (-degrees, degrees) + else: + assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \ + "degrees should be a list or tuple and it must be of length 2." + self.degrees = degrees + + if translate is not None: + assert isinstance(translate, (tuple, list)) and len(translate) == 2, \ + "translate should be a list or tuple and it must be of length 2." + for t in translate: + if not (0.0 <= t <= 1.0): + raise ValueError("translation values should be between 0 and 1") + self.translate = translate + + if scale is not None: + assert isinstance(scale, (tuple, list)) and len(scale) == 2, \ + "scale should be a list or tuple and it must be of length 2." + for s in scale: + if s <= 0: + raise ValueError("scale values should be positive") + self.scale = scale + + if shear is not None: + if isinstance(shear, numbers.Number): + if shear < 0: + raise ValueError("If shear is a single number, it must be positive.") + self.shear = (-shear, shear) + else: + assert isinstance(shear, (tuple, list)) and \ + (len(shear) == 2 or len(shear) == 4), \ + "shear should be a list or tuple and it must be of length 2 or 4." + # X-Axis shear with [min, max] + if len(shear) == 2: + self.shear = [shear[0], shear[1], 0., 0.] + elif len(shear) == 4: + self.shear = [s for s in shear] + else: + self.shear = shear + + self.resample = resample + self.fillcolor = fillcolor + + @staticmethod + def get_params(degrees, translate, scale_ranges, shears, img_size): + """Get parameters for affine transformation + + Returns: + sequence: params to be passed to the affine transformation + """ + angle = random.uniform(degrees[0], degrees[1]) + if translate is not None: + max_dx = translate[0] * img_size[0] + max_dy = translate[1] * img_size[1] + translations = (np.round(random.uniform(-max_dx, max_dx)), + np.round(random.uniform(-max_dy, max_dy))) + else: + translations = (0, 0) + + if scale_ranges is not None: + scale = random.uniform(scale_ranges[0], scale_ranges[1]) + else: + scale = 1.0 + + if shears is not None: + if len(shears) == 2: + shear = [random.uniform(shears[0], shears[1]), 0.] + elif len(shears) == 4: + shear = [random.uniform(shears[0], shears[1]), + random.uniform(shears[2], shears[3])] + else: + shear = 0.0 + + return angle, translations, scale, shear + + def __call__(self, img): + """ + img (PIL Image): Image to be transformed. + + Returns: + PIL Image: Affine transformed image. + """ + ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size) + return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor) + + def __repr__(self): + s = '{name}(degrees={degrees}' + if self.translate is not None: + s += ', translate={translate}' + if self.scale is not None: + s += ', scale={scale}' + if self.shear is not None: + s += ', shear={shear}' + if self.resample > 0: + s += ', resample={resample}' + if self.fillcolor != 0: + s += ', fillcolor={fillcolor}' + s += ')' + d = dict(self.__dict__) + d['resample'] = _pil_interpolation_to_str[d['resample']] + return s.format(name=self.__class__.__name__, **d)
    + + +
    [docs]class Grayscale(object): + """Convert image to grayscale. + + Args: + num_output_channels (int): (1 or 3) number of channels desired for output image + + Returns: + PIL Image: Grayscale version of the input. + - If num_output_channels == 1 : returned image is single channel + - If num_output_channels == 3 : returned image is 3 channel with r == g == b + + """ + + def __init__(self, num_output_channels=1): + self.num_output_channels = num_output_channels + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be converted to grayscale. + + Returns: + PIL Image: Randomly grayscaled image. + """ + return F.to_grayscale(img, num_output_channels=self.num_output_channels) + + def __repr__(self): + return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
    + + +
    [docs]class RandomGrayscale(object): + """Randomly convert image to grayscale with a probability of p (default 0.1). + + Args: + p (float): probability that image should be converted to grayscale. + + Returns: + PIL Image: Grayscale version of the input image with probability p and unchanged + with probability (1-p). + - If input image is 1 channel: grayscale version is 1 channel + - If input image is 3 channel: grayscale version is 3 channel with r == g == b + + """ + + def __init__(self, p=0.1): + self.p = p + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be converted to grayscale. + + Returns: + PIL Image: Randomly grayscaled image. + """ + num_output_channels = 1 if img.mode == 'L' else 3 + if random.random() < self.p: + return F.to_grayscale(img, num_output_channels=num_output_channels) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={0})'.format(self.p)
    + + +
    [docs]class RandomErasing(object): + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + Args: + p: probability that the random erasing operation will be performed. + scale: range of proportion of erased area against input image. + ratio: range of aspect ratio of erased area. + value: erasing value. Default is 0. If a single int, it is used to + erase all pixels. If a tuple of length 3, it is used to erase + R, G, B channels respectively. + If a str of 'random', erasing each pixel with random values. + inplace: boolean to make this transform inplace. Default set to False. + + Returns: + Erased Image. + # Examples: + >>> transform = transforms.Compose([ + >>> transforms.RandomHorizontalFlip(), + >>> transforms.ToTensor(), + >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + >>> transforms.RandomErasing(), + >>> ]) + """ + + def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): + assert isinstance(value, (numbers.Number, str, tuple, list)) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + if scale[0] < 0 or scale[1] > 1: + raise ValueError("range of scale should be between 0 and 1") + if p < 0 or p > 1: + raise ValueError("range of random erasing probability should be between 0 and 1") + + self.p = p + self.scale = scale + self.ratio = ratio + self.value = value + self.inplace = inplace + + @staticmethod + def get_params(img, scale, ratio, value=0): + """Get parameters for ``erase`` for a random erasing. + + Args: + img (Tensor): Tensor image of size (C, H, W) to be erased. + scale: range of proportion of erased area against input image. + ratio: range of aspect ratio of erased area. + + Returns: + tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing. + """ + img_c, img_h, img_w = img.shape + area = img_h * img_w + + for attempt in range(10): + erase_area = random.uniform(scale[0], scale[1]) * area + aspect_ratio = random.uniform(ratio[0], ratio[1]) + + h = int(round(math.sqrt(erase_area * aspect_ratio))) + w = int(round(math.sqrt(erase_area / aspect_ratio))) + + if h < img_h and w < img_w: + i = random.randint(0, img_h - h) + j = random.randint(0, img_w - w) + if isinstance(value, numbers.Number): + v = value + elif isinstance(value, torch._six.string_classes): + v = torch.empty([img_c, h, w], dtype=torch.float32).normal_() + elif isinstance(value, (list, tuple)): + v = torch.tensor(value, dtype=torch.float32).view(-1, 1, 1).expand(-1, h, w) + return i, j, h, w, v + + # Return original image + return 0, 0, img_h, img_w, img + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W) to be erased. + + Returns: + img (Tensor): Erased Tensor image. + """ + if random.uniform(0, 1) < self.p: + x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=self.value) + return F.erase(img, x, y, h, w, v, self.inplace) + return img
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/utils.html b/docs/stable/_modules/torchvision/utils.html new file mode 100644 index 000000000000..c2876ea214af --- /dev/null +++ b/docs/stable/_modules/torchvision/utils.html @@ -0,0 +1,619 @@ + + + + + + + + + + + + torchvision.utils — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.utils

    +import torch
    +import math
    +irange = range
    +
    +
    +
    [docs]def make_grid(tensor, nrow=8, padding=2, + normalize=False, range=None, scale_each=False, pad_value=0): + """Make a grid of images. + + Args: + tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) + or a list of images all of the same size. + nrow (int, optional): Number of images displayed in each row of the grid. + The final grid size is ``(B / nrow, nrow)``. Default: ``8``. + padding (int, optional): amount of padding. Default: ``2``. + normalize (bool, optional): If True, shift the image to the range (0, 1), + by the min and max values specified by :attr:`range`. Default: ``False``. + range (tuple, optional): tuple (min, max) where min and max are numbers, + then these numbers are used to normalize the image. By default, min and max + are computed from the tensor. + scale_each (bool, optional): If ``True``, scale each image in the batch of + images separately rather than the (min, max) over all images. Default: ``False``. + pad_value (float, optional): Value for the padded pixels. Default: ``0``. + + Example: + See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_ + + """ + if not (torch.is_tensor(tensor) or + (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = torch.stack(tensor, dim=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.size(0) == 1: # if single-channel, convert to 3-channel + tensor = torch.cat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images + tensor = torch.cat((tensor, tensor, tensor), 1) + + if normalize is True: + tensor = tensor.clone() # avoid modifying tensor in-place + if range is not None: + assert isinstance(range, tuple), \ + "range has to be a tuple (min, max) if specified. min and max are numbers" + + def norm_ip(img, min, max): + img.clamp_(min=min, max=max) + img.add_(-min).div_(max - min + 1e-5) + + def norm_range(t, range): + if range is not None: + norm_ip(t, range[0], range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, range) + else: + norm_range(tensor, range) + + if tensor.size(0) == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) + grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value) + k = 0 + for y in irange(ymaps): + for x in irange(xmaps): + if k >= nmaps: + break + grid.narrow(1, y * height + padding, height - padding)\ + .narrow(2, x * width + padding, width - padding)\ + .copy_(tensor[k]) + k = k + 1 + return grid
    + + +
    [docs]def save_image(tensor, filename, nrow=8, padding=2, + normalize=False, range=None, scale_each=False, pad_value=0): + """Save a given Tensor into an image file. + + Args: + tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, + saves the tensor as a grid of images by calling ``make_grid``. + **kwargs: Other arguments are documented in ``make_grid``. + """ + from PIL import Image + grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, + normalize=normalize, range=range, scale_each=scale_each) + # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer + ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() + im = Image.fromarray(ndarr) + im.save(filename)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_sources/__config__.rst.txt b/docs/stable/_sources/__config__.rst.txt new file mode 100644 index 000000000000..e4a6ac890493 --- /dev/null +++ b/docs/stable/_sources/__config__.rst.txt @@ -0,0 +1,7 @@ +torch.__config__ +=================================== + +.. automodule:: torch.__config__ + +.. autofunction:: show +.. autofunction:: parallel_info diff --git a/docs/stable/_sources/autograd.rst.txt b/docs/stable/_sources/autograd.rst.txt new file mode 100644 index 000000000000..135cfcf3393a --- /dev/null +++ b/docs/stable/_sources/autograd.rst.txt @@ -0,0 +1,107 @@ +.. role:: hidden + :class: hidden-section + +Automatic differentiation package - torch.autograd +================================================== + +.. automodule:: torch.autograd +.. currentmodule:: torch.autograd + +.. autofunction:: backward + +.. autofunction:: grad + +.. _locally-disable-grad: + +Locally disabling gradient computation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: no_grad + +.. autoclass:: enable_grad + +.. autoclass:: set_grad_enabled + +In-place operations on Tensors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Supporting in-place operations in autograd is a hard matter, and we discourage +their use in most cases. Autograd's aggressive buffer freeing and reuse makes +it very efficient and there are very few occasions when in-place operations +actually lower memory usage by any significant amount. Unless you're operating +under heavy memory pressure, you might never need to use them. + +In-place correctness checks +--------------------------- + +All :class:`Tensor` s keep track of in-place operations applied to them, and +if the implementation detects that a tensor was saved for backward in one of +the functions, but it was modified in-place afterwards, an error will be raised +once backward pass is started. This ensures that if you're using in-place +functions and not seeing any errors, you can be sure that the computed +gradients are correct. + +Variable (deprecated) +^^^^^^^^^^^^^^^^^^^^^ + +.. warning:: + The Variable API has been deprecated: Variables are no longer necessary to + use autograd with tensors. Autograd automatically supports Tensors with + ``requires_grad`` set to ``True``. Below please find a quick guide on what + has changed: + + - ``Variable(tensor)`` and ``Variable(tensor, requires_grad)`` still work as expected, + but they return Tensors instead of Variables. + - ``var.data`` is the same thing as ``tensor.data``. + - Methods such as ``var.backward(), var.detach(), var.register_hook()`` now work on tensors + with the same method names. + + In addition, one can now create tensors with ``requires_grad=True`` using factory + methods such as :func:`torch.randn`, :func:`torch.zeros`, :func:`torch.ones`, and others + like the following: + + ``autograd_tensor = torch.randn((2, 3, 4), requires_grad=True)`` + +Tensor autograd functions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: torch.Tensor + :members: grad, requires_grad, is_leaf, backward, detach, detach_, register_hook, retain_grad + +:hidden:`Function` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: Function + :members: + +.. _grad-check: + +Numerical gradient checking +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autofunction:: gradcheck + +.. autofunction:: gradgradcheck + +Profiler +^^^^^^^^ + +Autograd includes a profiler that lets you inspect the cost of different +operators inside your model - both on the CPU and GPU. There are two modes +implemented at the moment - CPU-only using :class:`~torch.autograd.profiler.profile`. +and nvprof based (registers both CPU and GPU activity) using +:class:`~torch.autograd.profiler.emit_nvtx`. + +.. autoclass:: torch.autograd.profiler.profile + :members: + +.. autoclass:: torch.autograd.profiler.emit_nvtx + :members: + +.. autofunction:: torch.autograd.profiler.load_nvprof + +Anomaly detection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: detect_anomaly + +.. autoclass:: set_detect_anomaly diff --git a/docs/stable/_sources/bottleneck.rst.txt b/docs/stable/_sources/bottleneck.rst.txt new file mode 100644 index 000000000000..d6ce122234fb --- /dev/null +++ b/docs/stable/_sources/bottleneck.rst.txt @@ -0,0 +1,59 @@ +torch.utils.bottleneck +====================== + +.. currentmodule:: torch.utils.bottleneck + +`torch.utils.bottleneck` is a tool that can be used as an initial step for +debugging bottlenecks in your program. It summarizes runs of your script with +the Python profiler and PyTorch's autograd profiler. + +Run it on the command line with + +:: + + python -m torch.utils.bottleneck /path/to/source/script.py [args] + +where [args] are any number of arguments to `script.py`, or run +``python -m torch.utils.bottleneck -h`` for more usage instructions. + +.. warning:: + Because your script will be profiled, please ensure that it exits in a + finite amount of time. + +.. warning:: + Due to the asynchronous nature of CUDA kernels, when running against + CUDA code, the cProfile output and CPU-mode autograd profilers may + not show correct timings: the reported CPU time reports the amount of time + used to launch the kernels but does not include the time the kernel + spent executing on a GPU unless the operation does a synchronize. + Ops that do synchronize appear to be extremely expensive under regular + CPU-mode profilers. + In these case where timings are incorrect, the CUDA-mode autograd profiler + may be helpful. + +.. note:: + To decide which (CPU-only-mode or CUDA-mode) autograd profiler output to + look at, you should first check if your script is CPU-bound + ("CPU total time is much greater than CUDA total time"). + If it is CPU-bound, looking at the results of the CPU-mode autograd + profiler will help. If on the other hand your script spends most of its + time executing on the GPU, then it makes sense to start + looking for responsible CUDA operators in the output of the CUDA-mode + autograd profiler. + + Of course the reality is much more complicated and your script might not be + in one of those two extremes depending on the part of the model you're + evaluating. If the profiler outputs don't help, you could try looking at + the result of :func:`torch.autograd.profiler.emit_nvtx()` with ``nvprof``. + However, please take into account that the NVTX overhead is very high and + often gives a heavily skewed timeline. + +.. warning:: + If you are profiling CUDA code, the first profiler that ``bottleneck`` runs + (cProfile) will include the CUDA startup time (CUDA buffer allocation cost) + in its time reporting. This should not matter if your bottlenecks result + in code much slower than the CUDA startup time. + +For more complicated uses of the profilers (like in a multi-GPU case), +please see https://docs.python.org/3/library/profile.html +or :func:`torch.autograd.profiler.profile()` for more information. diff --git a/docs/stable/_sources/checkpoint.rst.txt b/docs/stable/_sources/checkpoint.rst.txt new file mode 100644 index 000000000000..3affd71d9d3b --- /dev/null +++ b/docs/stable/_sources/checkpoint.rst.txt @@ -0,0 +1,28 @@ +torch.utils.checkpoint +====================== + +.. note:: + Checkpointing is implemented by rerunning a forward-pass segment for + each checkpointed segment during backward. This can cause persistent + states like the RNG state to be advanced than they would without + checkpointing. By default, checkpointing includes logic to juggle + the RNG state such that checkpointed passes making use of RNG + (through dropout for example) have deterministic output as + compared to non-checkpointed passes. The logic to stash and restore + RNG states can incur a moderate performance hit depending on the runtime + of checkpointed operations. If deterministic output compared to + non-checkpointed passes is not required, supply ``preserve_rng_state=False`` + to ``checkpoint`` or ``checkpoint_sequential`` to omit stashing and + restoring the RNG state during each checkpoint. + + The stashing logic saves and restores the RNG state for the current device + and the device of all cuda Tensor arguments to the ``run_fn``. + However, the logic has no way to anticipate if the user will move + Tensors to a new device within the ``run_fn`` itself. Therefore, if you move + Tensors to a new device ("new" meaning not belonging to the set of + [current device + devices of Tensor arguments]) within ``run_fn``, deterministic + output compared to non-checkpointed passes is never guaranteed. + +.. currentmodule:: torch.utils.checkpoint +.. autofunction:: checkpoint +.. autofunction:: checkpoint_sequential diff --git a/docs/stable/_sources/community/contribution_guide.rst.txt b/docs/stable/_sources/community/contribution_guide.rst.txt new file mode 100644 index 000000000000..a09a149ae359 --- /dev/null +++ b/docs/stable/_sources/community/contribution_guide.rst.txt @@ -0,0 +1,357 @@ +PyTorch Contribution Guide +========================== + +PyTorch is a GPU-accelerated Python tensor computation package for +building deep neural networks built on tape-based autograd systems. + +The PyTorch Contribution Process +-------------------------------- + +The PyTorch organization is governed by `PyTorch +Governance `__. + +The PyTorch development process involves a healthy amount of open +discussions between the core development team and the community. + +PyTorch operates similar to most open source projects on GitHub. +However, if you've never contributed to an open source project before, +here is the basic process. + +- **Figure out what you're going to work on.** The majority of open + source contributions come from people scratching their own itches. + However, if you don't know what you want to work on, or are just + looking to get more acquainted with the project, here are some tips + for how to find appropriate tasks: + + - Look through the `issue + tracker `__ and see if + there are any issues you know how to fix. Issues that are + confirmed by other contributors tend to be better to investigate. + We also maintain some labels for issues which are likely to be + good for new people, e.g., **bootcamp** and **1hr**, although + these labels are less well maintained. + - Join us on Slack and let us know you're interested in getting to + know PyTorch. We're very happy to help out researchers and + partners get up to speed with the codebase. + +- **Figure out the scope of your change and reach out for design + comments on a GitHub issue if it's large.** The majority of pull + requests are small; in that case, no need to let us know about what + you want to do, just get cracking. But if the change is going to be + large, it's usually a good idea to get some design comments about it + first. + + - If you don't know how big a change is going to be, we can help you + figure it out! Just post about it on issues or Slack. + - Some feature additions are very standardized; for example, lots of + people add new operators or optimizers to PyTorch. Design + discussion in these cases boils down mostly to, “Do we want this + operator/optimizer?” Giving evidence for its utility, e.g., usage + in peer reviewed papers, or existence in other frameworks, helps a + bit when making this case. + - **Adding operators / algorithms from recently-released research** + is generally not accepted, unless there is overwhelming evidence that + this newly published work has ground-breaking results and will eventually + become a standard in the field. If you are not sure where your method falls, + open an issue first before implementing a PR. + - Core changes and refactors can be quite difficult to coordinate, + as the pace of development on PyTorch master is quite fast. + Definitely reach out about fundamental or cross-cutting changes; + we can often give guidance about how to stage such changes into + more easily reviewable pieces. + +- **Code it out!** + + - See the technical guide for advice for working with PyTorch in a + technical form. + +- **Open a pull request.** + + - If you are not ready for the pull request to be reviewed, tag it + with [WIP]. We will ignore it when doing review passes. If you are + working on a complex change, it's good to start things off as WIP, + because you will need to spend time looking at CI results to see + if things worked out or not. + - Find an appropriate reviewer for your change. We have some folks + who regularly go through the PR queue and try to review + everything, but if you happen to know who the maintainer for a + given subsystem affected by your patch is, feel free to include + them directly on the pull request. You can learn more about this + structure at PyTorch Subsystem Ownership. + +- **Iterate on the pull request until it's accepted!** + + - We'll try our best to minimize the number of review roundtrips and + block PRs only when there are major issues. For the most common + issues in pull requests, take a look at `Common Mistakes <#common-mistakes-to-avoid>`__. + - Once a pull request is accepted and CI is passing, there is + nothing else you need to do; we will merge the PR for you. + +Getting Started +--------------- + +Proposing new features +~~~~~~~~~~~~~~~~~~~~~~ + +New feature ideas are best discussed on a specific issue. Please include +as much information as you can, any accompanying data, and your proposed +solution. The PyTorch team and community frequently reviews new issues +and comments where they think they can help. If you feel confident in +your solution, go ahead and implement it. + +Reporting Issues +~~~~~~~~~~~~~~~~ + +If you've identified an issue, first search through the `list of +existing issues `__ on the +repo. If you are unable to find a similar issue, then create a new one. +Supply as much information you can to reproduce the problematic +behavior. Also, include any additional insights like the behavior you +expect. + +Implementing Features or Fixing Bugs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want to fix a specific issue, it's best to comment on the +individual issue with your intent. However, we do not lock or assign +issues except in cases where we have worked with the developer before. +It's best to strike up a conversation on the issue and discuss your +proposed solution. The PyTorch team can provide guidance that saves you +time. + +Issues that are labeled first-new-issue, low, or medium priority provide +the best entrance point are great places to start. + +Adding Tutorials +~~~~~~~~~~~~~~~~ + +A great deal of the tutorials on `pytorch.org `__ +come from the community itself and we welcome additional contributions. +To learn more about how to contribute a new tutorial you can learn more +here: `PyTorch.org Tutorial Contribution Guide on +Github `__ + +Improving Documentation & Tutorials +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We aim to produce high quality documentation and tutorials. On rare +occasions that content includes typos or bugs. If you find something you +can fix, send us a pull request for consideration. + +Take a look at the `Documentation <#on-documentation>`__ section to learn how our system +works. + +Participating in online discussions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can find active discussions happening on the PyTorch Discussion +`forum `__. + +Submitting pull requests to fix open issues +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can view a list of all open issues +`here `__. Commenting on an +issue is a great way to get the attention of the team. From here you can +share your ideas and how you plan to resolve the issue. + +For more challenging issues, the team will provide feedback and +direction for how to best solve the issue. + +If you're not able to fix the issue itself, commenting and sharing +whether you can reproduce the issue can be useful for helping the team +identify problem areas. + +Reviewing open pull requests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We appreciate your help reviewing and commenting on pull requests. Our +team strives to keep the number of open pull requests at a manageable +size, we respond quickly for more information if we need it, and we +merge PRs that we think are useful. However, due to the high level of +interest, additional eyes on pull requests is appreciated. + +Improving code readability +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Improve code readability helps everyone. It is often better to submit a +small number of pull requests that touch few files versus a large pull +request that touches many files. Starting a discussion in the PyTorch +forum `here `__ or on an issue related to +your improvement is the best way to get started. + +Adding test cases to make the codebase more robust +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Additional test coverage is appreciated. + +Promoting PyTorch +~~~~~~~~~~~~~~~~~ + +Your use of PyTorch in your projects, research papers, write ups, blogs, +or general discussions around the internet helps to raise awareness for +PyTorch and our growing community. Please reach out to +`pytorch-marketing@fb.com `__ +for marketing support. + +Triaging issues +~~~~~~~~~~~~~~~ + +If you feel that an issue could benefit from a particular tag or level +of complexity comment on the issue and share your opinion. If an you +feel an issue isn't categorized properly comment and let the team know. + +About open source development +----------------------------- + +If this is your first time contributing to an open source project, some +aspects of the development process may seem unusual to you. + +- **There is no way to “claim” issues.** People often want to “claim” + an issue when they decide to work on it, to ensure that there isn't + wasted work when someone else ends up working on it. This doesn't + really work too well in open source, since someone may decide to work + on something, and end up not having time to do it. Feel free to give + information in an advisory fashion, but at the end of the day, we + will take running code and rough consensus. +- **There is a high bar for new functionality that is added.** Unlike + in a corporate environment, where the person who wrote code + implicitly “owns” it and can be expected to take care of it in the + beginning of its lifetime, once a pull request is merged into an open + source project, it immediately becomes the collective responsibility + of all maintainers on the project. When we merge code, we are saying + that we, the maintainers, are able to review subsequent changes and + make a bugfix to the code. This naturally leads to a higher standard + of contribution. + +Common Mistakes To Avoid +------------------------ + +- **Did you add tests?** (Or if the change is hard to test, did you + describe how you tested your change?) + + - We have a few motivations for why we ask for tests: + + 1. to help us tell if we break it later + 2. to help us tell if the patch is correct in the first place + (yes, we did review it, but as Knuth says, “beware of the + following code, for I have not run it, merely proven it + correct”) + + - When is it OK not to add a test? Sometimes a change can't be + conveniently tested, or the change is so obviously correct (and + unlikely to be broken) that it's OK not to test it. On the + contrary, if a change is seems likely (or is known to be likely) + to be accidentally broken, it's important to put in the time to + work out a testing strategy. + +- **Is your PR too long?** + + - It's easier for us to review and merge small PRs. Difficulty of + reviewing a PR scales nonlinearly with its size. + - When is it OK to submit a large PR? It helps a lot if there was a + corresponding design discussion in an issue, with sign off from + the people who are going to review your diff. We can also help + give advice about how to split up a large change into individually + shippable parts. Similarly, it helps if there is a complete + description of the contents of the PR: it's easier to review code + if we know what's inside! + +- **Comments for subtle things?** In cases where behavior of your code + is nuanced, please include extra comments and documentation to allow + us to better understand the intention of your code. +- **Did you add a hack?** Sometimes a hack is the right answer. But + usually we will have to discuss it. +- **Do you want to touch a very core component?** In order to prevent + major regressions, pull requests that touch core components receive + extra scrutiny. Make sure you've discussed your changes with the team + before undertaking major changes. +- **Want to add a new feature?** If you want to add new features, + comment your intention on the related issue. Our team tries to + comment on and provide feedback to the community. It's better to have + an open discussion with the team and the rest of the community prior + to building new features. This helps us stay aware of what you're + working on and increases the chance that it'll be merged. +- **Did you touch unrelated code to the PR?** To aid in code review, + please only include files in your pull request that are directly + related to your changes. + +Frequently asked questions + +- **How can I contribute as a reviewer?** There is lots of value if + community developer reproduce issues, try out new functionality, or + otherwise help us identify or troubleshoot issues. Commenting on + tasks or pull requests with your enviroment details is helpful and + appreciated. +- **CI tests failed, what does it mean?** Maybe you need to merge with + master or rebase with latest changes. Pushing your changes should + re-trigger CI tests. If the tests persist, you'll want to trace + through the error messages and resolve the related issues. +- **What are the most high risk changes?** Anything that touches build + configuration is an risky area. Please avoid changing these unless + you've had a discussion with the team beforehand. +- **Hey, a commit showed up on my branch, what's up with that?** + Sometimes another community member will provide a patch or fix to + your pull request or branch. This is often needed for getting CI tests + to pass. + +On Documentation +---------------- + +Python Docs +~~~~~~~~~~~ + +PyTorch documentation is generated from python source using +`Sphinx `__. Generated HTML is +copied to the docs folder in the master branch of +`pytorch.github.io `__, +and is served via GitHub pages. + +- Site: http://pytorch.org/docs +- GitHub: https://github.com/pytorch/pytorch/tree/master/docs +- Served from: + `https://github.com/pytorch/pytorch.github.io/tree/master/doc `__ + +C++ Docs +~~~~~~~~ + +For C++ code we use Doxygen to generate the content files. The C++ docs +are built on a special server and the resulting files are copied to the +https://github.com/pytorch/cppdocs repo, and are served from GitHub +pages. + +- Site: http://pytorch.org/cppdocs +- GitHub: https://github.com/pytorch/pytorch/tree/master/docs/cpp +- Served from: https://github.com/pytorch/cppdocs + +Tutorials +--------- + +PyTorch tutorials are documents used to help understand using PyTorch to +accomplish specific tasks or to understand more holistic concepts. +Tutorials are built using +`Sphinx-Gallery `__ +from executable python sources files, or from restructured-text (rst) +files. + +- Site: http://pytorch.org/tutorials +- GitHub: http://github.com/pytorch/tutorials + +Tutorials Build Overview +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For tutorials, `pull +requests `__ trigger a +rebuild the entire site using CircleCI to test the effects of the +change. This build is sharded into 9 worker builds and takes around 40 +minutes total. At the same time, we do a Netlify build using *make +html-noplot*, which builds the site without rendering the notebook +output into pages for quick review. + +After a PR is accepted, the site is rebuilt and deployed from CircleCI. + +Contributing a new Tutorial +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`PyTorch.org Tutorial Contribution +Guide `__ diff --git a/docs/stable/_sources/community/governance.rst.txt b/docs/stable/_sources/community/governance.rst.txt new file mode 100644 index 000000000000..74bd59393ae2 --- /dev/null +++ b/docs/stable/_sources/community/governance.rst.txt @@ -0,0 +1,154 @@ +PyTorch Governance +========================== + +Governance Philosophy and Guiding Tenets +----------------------------------------- + +PyTorch adopts a governance structure with a small set of maintainers +driving the overall project direction with a strong bias towards +PyTorch's design philosophy where design and code contributions are +valued. Beyond the core maintainers, there is also a slightly broader +set of core developers that have the ability to directly merge pull +requests and own various parts of the core code base. + +Beyond the maintainers and core devs, the community is encouraged to +contribute, file issues, make proposals, review pull requests and be +present in the community. Given contributions and willingness to +invest, anyone can be provided write access or ownership of parts of +the codebase. + +Based on this governance structure, the project has the following core +operating tenets by which decisions are made and overall culture is +derived: + +1. **Code contributions** matter much more than corporate sponsorship + and independent developers are highly valued. +2. **Project influence** is gained through contributions (whether PRs, + forum answers, code reviews or otherwise) + +Key people and their functions +------------------------------ + +Project Maintainers +~~~~~~~~~~~~~~~~~~~ + +Project maintainers provide leadership and direction for the PyTorch +project. Specifics include: + +- Articulate a cohesive long-term vision for the project +- Possess a deep understanding of the PyTorch code base +- Negotiate and resolve contentious issues in ways acceptable to all + parties involved + +PyTorch Maintainers: + +- Adam Paszke (`apaszke `__) +- Soumith Chintala (`soumith `__) +- Edward Yang (`ezyang `__) +- Greg Chanan (`gchanan `__) +- Dmytro Dzhulgakov (`dzhulgakov `__) +- (sunsetting) Sam Gross (`colesbury `__) + +Core Developers +~~~~~~~~~~~~~~~ + +The PyTorch project is developed by a team of core developers. You can +find the list of core developers at `PyTorch Governance \| Persons of +Interest `__. + +While membership is determined by presence in the "PyTorch core" team in +the "PyTorch" +`organization `__ on +GitHub, contribution takes many forms: + +- committing changes to the repository; +- reviewing pull requests by others; +- triaging bug reports on the issue tracker; +- discussing topics on official PyTorch communication channels. + +Moderators +~~~~~~~~~~ + +There is a group of people, some of which are not core developers, +responsible for ensuring that discussions on official communication +channels adhere to the Code of Conduct. They take action in view of +violations and help to support a healthy community. You can find the +list of moderators `here `__. + +Decision Making +--------------- + +Uncontroversial Changes +~~~~~~~~~~~~~~~~~~~~~~~ + +Primary work happens through bug tracker issues and pull requests on +GitHub. Core developers should avoid pushing their changes directly to +the PyTorch repository, instead relying on pull requests. Approving a +pull request by a core developer allows it to be merged without further +process. Core Developers and Project Maintainers ultimately approve +these changes. + +Notifying relevant experts about a bug tracker issue or a pull request +is important. Reviews from experts in the given interest area are +strongly preferred, especially on pull request approvals. Failure to do +so might end up with the change being reverted by the relevant expert. + +Controversial decision process +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Substantial changes in a given interest area require a GitHub issue to +be opened for discussion. This includes: + +- Any semantic or syntactic change to the framework. +- Backwards-incompatible changes to the Python or Cpp API. +- Additions to the core framework, including substantial new + functionality within an existing library. +- Removing core features + +Project Maintainers ultimately approve these changes. + +FAQ +--- + +**Q: What if I would like to own (or partly own) a part of the project +such as a domain api (i.e. Torch Vision)?** This is absolutely possible. +The first step is to start contributing to the existing project area and +contributing to its health and success. In addition to this, you can +make a proposal through a GitHub issue for new functionality or changes +to improve the project area. + +**Q: What if I am a company looking to use PyTorch internally for +development, can I be granted or purchase a board seat to drive the +project direction?** No, the PyTorch project is strictly driven by the +maintainer-driven project philosophy and does not have a board or +vehicle to take financial contributions relating to gaining influence +over technical direction. + +**Q: Does the PyTorch project support grants or ways to support +independent developers using or contributing to the project?** No, not +at this point. We are however looking at ways to better support the +community of independent developers around PyTorch. If you have +suggestions or inputs, please reach out on the PyTorch forums to +discuss. + +**Q: How do I contribute code to the project?** If the change is +relatively minor, a pull request on GitHub can be opened up immediately +for review and merge by the project committers. For larger changes, +please open an issue to make a proposal to discuss prior. Please also +see the `PyTorch Contributor +Guide `__ for contribution +guidelines. + +**Q: Can I become a committer on the project?** Unfortunately, the +current commit process to PyTorch involves an interaction with Facebook +infrastructure that can only be triggered by Facebook employees. We are +however looking at ways to expand the committer base to individuals +outside of Facebook and will provide an update when the tooling exists +to allow this. + +**Q: What if i would like to deliver a PyTorch tutorial at a conference +or otherwise? Do I need to be 'officially' a committer to do this?** No, +we encourage community members to showcase their work wherever and +whenever they can. Please reach out to +`pytorch-marketing@fb.com `__ +for marketing support. diff --git a/docs/stable/_sources/community/persons_of_interest.rst.txt b/docs/stable/_sources/community/persons_of_interest.rst.txt new file mode 100644 index 000000000000..19474a9a7ba2 --- /dev/null +++ b/docs/stable/_sources/community/persons_of_interest.rst.txt @@ -0,0 +1,130 @@ +PyTorch Governance | Persons of Interest +========================================= + +General Maintainers +------------------- + +- Adam Paszke (`apaszke `__) +- Soumith Chintala (`soumith `__) +- Edward Yang (`ezyang `__) +- Greg Chanan (`gchanan `__) +- Dmytro Dzhulgakov (`dzhulgakov `__) +- (sunsetting) Sam Gross + (`colesbury `__) + +Module-level maintainers +------------------------ + +JIT +~~~ + +- Zach Devito (`zdevito `__) +- Michael Suo (`suo `__) + +Distributed +~~~~~~~~~~~ + +- Pieter Noordhuis (`pietern `__) +- Shen Li (`mrshenli `__) +- (sunsetting) Teng Li (`teng-li `__) + +Autograd Engine +~~~~~~~~~~~~~~~ + +- Alban Desmaison (`alband `__) +- Adam Paszke (`apaszke `__) + +Multiprocessing and DataLoaders +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Simon Wang (`SsnL `__) +- Adam Paszke (`apaszke `__) +- (proposed) Vitaly Fedyunin + (`VitalyFedyunin `__) + +CUDA +~~~~ + +- Edward Yang (`ezyang `__) +- Natalia Gimelshein (`ngimel `__) + +C++ +~~~ + +- Will Feng (`yf225 `__) +- (sunsetting) Peter Goldsborough + (`goldsborough `__) + +Build + CI +~~~~~~~~~~ + +- Will Feng (`yf225 `__) +- Edward Yang (`ezyang `__) +- Jesse Hellemn (`pjh5 `__) +- Soumith Chintala (`soumith `__) +- (sunsetting) Orion Reblitz-Richardson + (`orionr `__) + +Distributions & RNG +~~~~~~~~~~~~~~~~~~~ + +- Fritz Obermeyer (`fritzo `__) +- Neeraj Pradhan (`neerajprad `__) +- Alican Bozkurt (`alicanb `__) +- Vishwak Srinivasan (`vishwakftw `__) + +C10 +~~~ + +- Sebastian Messmer (`smessmer `__) +- Edward Yang (`ezyang `__) + +ONNX <-> PyTorch +~~~~~~~~~~~~~~~~ + +- Lu Fang (`houseroad `__) + +torch.nn +~~~~~~~~ + +- Thomas Viehmann (`t-vi `__) +- Adam Paszke (`apaszke `__) +- Greg Chanan (`gchanan `__) +- Soumith Chintala (`soumith `__) +- Sam Gross (`colesbury `__) + +CPU Performance / SIMD +~~~~~~~~~~~~~~~~~~~~~~ + +- Christian Puhrsch (`cpuhrsch `__) +- Sam Gross (`colesbury `__) +- Richard Zou (`zou3519 `__) + +AMD/ROCm/HIP +~~~~~~~~~~~~ + +- Junjie Bai (`bddppq `__) +- Johannes M. Dieterich (`iotamudelta `__) + +Windows +~~~~~~~ + +- Peter Johnson (`peterjc123 `__) + +MKLDNN +~~~~~~ + +- Yinghai Lu (`yinghai `__) + +XLA +~~~ + +- Ailing Zhang (`ailzhang `__) +- Gregory Chanan (`gchanan `__) +- Davide Libenzi (`dlibenzi `__) +- Alex Suhan (`asuhan `__) + +PPC +~~~ + +- Alfredo Mendoza (`avmgithub `__) diff --git a/docs/stable/_sources/cpp_extension.rst.txt b/docs/stable/_sources/cpp_extension.rst.txt new file mode 100644 index 000000000000..d355aeeb7806 --- /dev/null +++ b/docs/stable/_sources/cpp_extension.rst.txt @@ -0,0 +1,12 @@ +torch.utils.cpp_extension +========================= + +.. currentmodule:: torch.utils.cpp_extension +.. autofunction:: CppExtension +.. autofunction:: CUDAExtension +.. autofunction:: BuildExtension +.. autofunction:: load +.. autofunction:: load_inline +.. autofunction:: include_paths +.. autofunction:: check_compiler_abi_compatibility +.. autofunction:: verify_ninja_availability diff --git a/docs/stable/_sources/cuda.rst.txt b/docs/stable/_sources/cuda.rst.txt new file mode 100644 index 000000000000..462967461cba --- /dev/null +++ b/docs/stable/_sources/cuda.rst.txt @@ -0,0 +1,59 @@ +torch.cuda +=================================== + +.. currentmodule:: torch.cuda + +.. automodule:: torch.cuda + :members: + +Random Number Generator +------------------------- +.. autofunction:: get_rng_state +.. autofunction:: get_rng_state_all +.. autofunction:: set_rng_state +.. autofunction:: set_rng_state_all +.. autofunction:: manual_seed +.. autofunction:: manual_seed_all +.. autofunction:: seed +.. autofunction:: seed_all +.. autofunction:: initial_seed + + +Communication collectives +------------------------- + +.. autofunction:: torch.cuda.comm.broadcast + +.. autofunction:: torch.cuda.comm.broadcast_coalesced + +.. autofunction:: torch.cuda.comm.reduce_add + +.. autofunction:: torch.cuda.comm.scatter + +.. autofunction:: torch.cuda.comm.gather + +Streams and events +------------------ + +.. autoclass:: Stream + :members: + +.. autoclass:: Event + :members: + +Memory management +----------------- +.. autofunction:: empty_cache +.. autofunction:: memory_allocated +.. autofunction:: max_memory_allocated +.. autofunction:: reset_max_memory_allocated +.. autofunction:: memory_cached +.. autofunction:: max_memory_cached +.. autofunction:: reset_max_memory_cached + +NVIDIA Tools Extension (NVTX) +----------------------------- + +.. autofunction:: torch.cuda.nvtx.mark +.. autofunction:: torch.cuda.nvtx.range_push +.. autofunction:: torch.cuda.nvtx.range_pop diff --git a/docs/stable/_sources/cuda_deterministic.rst.txt b/docs/stable/_sources/cuda_deterministic.rst.txt new file mode 100644 index 000000000000..ca8a8cd23c40 --- /dev/null +++ b/docs/stable/_sources/cuda_deterministic.rst.txt @@ -0,0 +1,5 @@ +.. note:: + + When using the CUDA backend, this operation may induce nondeterministic + behaviour that is not easily switched off. + Please see the notes on :doc:`/notes/randomness` for background. diff --git a/docs/stable/_sources/cuda_deterministic_backward.rst.txt b/docs/stable/_sources/cuda_deterministic_backward.rst.txt new file mode 100644 index 000000000000..30e80ebffb6e --- /dev/null +++ b/docs/stable/_sources/cuda_deterministic_backward.rst.txt @@ -0,0 +1,5 @@ +.. note:: + + When using the CUDA backend, this operation may induce nondeterministic + behaviour in be backward that is not easily switched off. + Please see the notes on :doc:`/notes/randomness` for background. diff --git a/docs/stable/_sources/cudnn_deterministic.rst.txt b/docs/stable/_sources/cudnn_deterministic.rst.txt new file mode 100644 index 000000000000..a82686d5b62e --- /dev/null +++ b/docs/stable/_sources/cudnn_deterministic.rst.txt @@ -0,0 +1,8 @@ +.. note:: + + In some circumstances when using the CUDA backend with CuDNN, this operator + may select a nondeterministic algorithm to increase performance. If this is + undesirable, you can try to make the operation deterministic (potentially at + a performance cost) by setting ``torch.backends.cudnn.deterministic = + True``. + Please see the notes on :doc:`/notes/randomness` for background. diff --git a/docs/stable/_sources/cudnn_persistent_rnn.rst.txt b/docs/stable/_sources/cudnn_persistent_rnn.rst.txt new file mode 100644 index 000000000000..31938121cd71 --- /dev/null +++ b/docs/stable/_sources/cudnn_persistent_rnn.rst.txt @@ -0,0 +1,9 @@ +.. note:: + + If the following conditions are satisfied: + 1) cudnn is enabled, + 2) input data is on the GPU + 3) input data has dtype ``torch.float16`` + 4) V100 GPU is used, + 5) input data is not in ``PackedSequence`` format + persistent algorithm can be selected to improve performance. diff --git a/docs/stable/_sources/data.rst.txt b/docs/stable/_sources/data.rst.txt new file mode 100644 index 000000000000..e074943fe544 --- /dev/null +++ b/docs/stable/_sources/data.rst.txt @@ -0,0 +1,413 @@ +torch.utils.data +=================================== + +.. automodule:: torch.utils.data + +At the heart of PyTorch data loading utility is the :class:`torch.utils.data.DataLoader` +class. It represents a Python iterable over a dataset, with support for + +* `map-style and iterable-style datasets `_, + +* `customizing data loading order `_, + +* `automatic batching `_, + +* `single- and multi-process data loading `_, + +* `automatic memory pinning `_. + +These options are configured by the constructor arguments of a +:class:`~torch.utils.data.DataLoader`, which has signature:: + + DataLoader(dataset, batch_size=1, shuffle=False, sampler=None, + batch_sampler=None, num_workers=0, collate_fn=None, + pin_memory=False, drop_last=False, timeout=0, + worker_init_fn=None) + +The sections below describe in details the effects and usages of these options. + +Dataset Types +------------- + +The most important argument of :class:`~torch.utils.data.DataLoader` +constructor is :attr:`dataset`, which indicates a dataset object to load data +from. PyTorch supports two different types of datasets: + +* `map-style datasets `_, + +* `iterable-style datasets `_. + +Map-style datasets +^^^^^^^^^^^^^^^^^^ + +A map-style dataset is one that implements the :meth:`__getitem__` and +:meth:`__len__` protocols, and represents a map from (possibly non-integral) +indices/keys to data samples. + +For example, such a dataset, when accessed with ``dataset[idx]``, could read +the ``idx``-th image and its corresponding label from a folder on the disk. + +See :class:`~torch.utils.data.Dataset` for more details. + +Iterable-style datasets +^^^^^^^^^^^^^^^^^^^^^^^ + +An iterable-style dataset is an instance of a subclass of :class:`~torch.utils.data.IterableDataset` +that implements the :meth:`__iter__` protocol, and represents an iterable over +data samples. This type of datasets is particularly suitable for cases where +random reads are expensive or even improbable, and where the batch size depends +on the fetched data. + +For example, such a dataset, when called ``iter(dataset)``, could return a +stream of data reading from a database, a remote server, or even logs generated +in real time. + +See :class:`~torch.utils.data.IterableDataset` for more details. + +.. note:: When using an :class:`~torch.utils.data.IterableDataset` with + `multi-process data loading `_. The same + dataset object is replicated on each worker process, and thus the + replicas must be configured differently to avoid duplicated data. See + :class:`~torch.utils.data.IterableDataset` documentations for how to + achieve this. + +Data Loading Order and :class:`~torch.utils.data.Sampler` +--------------------------------------------------------- + +For `iterable-style datasets `_, data loading order +is entirely controlled by the user-defined iterable. This allows easier +implementations of chunk-reading and dynamic batch size (e.g., by yielding a +batched sample at each time). + +The rest of this section concerns the case with +`map-style datasets `_. :class:`torch.utils.data.Sampler` +classes are used to specify the sequence of indices/keys used in data loading. +They represent iterable objects over the indices to datasets. E.g., in the +common case with stochastic gradient decent (SGD), a +:class:`~torch.utils.data.Sampler` could randomly permute a list of indices +and yield each one at a time, or yield a small number of them for mini-batch +SGD. + +A sequential or shuffled sampler will be automatically constructed based on the :attr:`shuffle` argument to a :class:`~torch.utils.data.DataLoader`. +Alternatively, users may use the :attr:`sampler` argument to specify a +custom :class:`~torch.utils.data.Sampler` object that at each time yields +the next index/key to fetch. + +A custom :class:`~torch.utils.data.Sampler` that yields a list of batch +indices at a time can be passed as the :attr:`batch_sampler` argument. +Automatic batching can also be enabled via :attr:`batch_size` and +:attr:`drop_last` arguments. See +`the next section `_ for more details +on this. + +.. note:: + Neither :attr:`sampler` nor :attr:`batch_sampler` is compatible with + iterable-style datasets, since such datasets have no notion of a key or an + index. + +Loading Batched and Non-Batched Data +------------------------------------ + +:class:`~torch.utils.data.DataLoader` supports automatically collating +individual fetched data samples into batches via arguments +:attr:`batch_size`, :attr:`drop_last`, and :attr:`batch_sampler`. + + +Automatic batching (default) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is the most common case, and corresponds to fetching a minibatch of +data and collating them into batched samples, i.e., containing Tensors with +one dimension being the batch dimension (usually the first). + +When :attr:`batch_size` (default ``1``) is not ``None``, the data loader yields +batched samples instead of individual samples. :attr:`batch_size` and +:attr:`drop_last` arguments are used to specify how the data loader obtains +batches of dataset keys. For map-style datasets, users can alternatively +specify :attr:`batch_sampler`, which yields a list of keys at a time. + +.. note:: + The :attr:`batch_size` and :attr:`drop_last` arguments essentially are used + to construct a :attr:`batch_sampler` from :attr:`sampler`. For map-style + datasets, the :attr:`sampler` is either provided by user or constructed + based on the :attr:`shuffle` argument. For iterable-style datasets, the + :attr:`sampler` is a dummy infinite one. See + `this section `_ on more details on + samplers. + +.. note:: + When fetching from + `iterable-style datasets `_ with + `multi-processing `_, the :attr:`drop_last` + argument drops the last non-full batch of each worker's dataset replica. + +After fetching a list of samples using the indices from sampler, the function +passed as the :attr:`collate_fn` argument is used to collate lists of samples +into batches. + +In this case, loading from a map-style dataset is roughly equivalent with:: + + for indices in batch_sampler: + yield collate_fn([dataset[i] for i in indices]) + +and loading from an iterable-style dataset is roughly equivalent with:: + + dataset_iter = iter(dataset) + for indices in batch_sampler: + yield collate_fn([next(dataset_iter) for _ in indices]) + +A custom :attr:`collate_fn` can be used to customize collation, e.g., padding +sequential data to max length of a batch. See +`this section `_ on more about :attr:`collate_fn`. + +Disable automatic batching +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In certain cases, users may want to handle batching manually in dataset code, +or simply load individual samples. For example, it could cheaper to directly +load batched data (e.g., bulk reads from a database or reading continuous +chunks of memory), or the batch size is data dependent, or the program is +designed to work on individual samples. Under these scenarios, it's likely +better to not use automatic batching (where :attr:`collate_fn` is used to +collate the samples), but let the data loader directly return each member of +the :attr:`dataset` object. + +When both :attr:`batch_size` and :attr:`batch_sampler` are ``None``, automatic +batching is disabled. Each sample obtained from the :attr:`dataset` is +processed with the function passed as the :attr:`collate_fn` argument. + +**When automatic batching is disabled**, the default :attr:`collate_fn` simply +converts NumPy arrays into PyTorch Tensors, and keeps everything else untouched. + +In this case, loading from a map-style dataset is roughly equivalent with:: + + for index in sampler: + yield collate_fn(dataset[index]) + +and loading from an iterable-style dataset is roughly equivalent with:: + + for data in iter(dataset): + yield collate_fn(data) + +See `this section `_ on more about :attr:`collate_fn`. + +.. _dataloader-collate_fn: + +Working with :attr:`collate_fn` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The use of :attr:`collate_fn` is slightly different when automatic batching is +enabled or disabled. + +**When automatic batching is disabled**, :attr:`collate_fn` is called with +each individual data sample, and the output is yielded from the data loader +iterator. In this case, the default :attr:`collate_fn` simply converts NumPy +arrays in PyTorch tensors. + +**When automatic batching is enabled**, :attr:`collate_fn` is called with a list +of data samples at each time. It is expected to collate the input samples into +a batch for yielding from the data loader iterator. The rest of this section +describes behavior of the default :attr:`collate_fn` in this case. + +For instance, if each data sample consists of a 3-channel image and an integral +class label, i.e., each element of the dataset returns a tuple +``(image, class_index)``, the default :attr:`collate_fn` collates a list of +such tuples into a single tuple of a batched image tensor and a batched class +label Tensor. In particular, the default :attr:`collate_fn` has the following +properties: + +* It always prepends a new dimension as the batch dimension. + +* It automatically converts NumPy arrays and Python numerical values into + PyTorch Tensors. + +* It preserves the data structure, e.g., if each sample is a dictionary, it + outputs a dictionary with the same set of keys but batched Tensors as values + (or lists if the values can not be converted into Tensors). Same + for ``list`` s, ``tuple`` s, ``namedtuple`` s, etc. + +Users may use customized :attr:`collate_fn` to achieve custom batching, e.g., +collating along a dimension other than the first, padding sequences of +various lengths, or adding support for custom data types. + +Single- and Multi-process Data Loading +-------------------------------------- + +A :class:`~torch.utils.data.DataLoader` uses single-process data loading by +default. + +Within a Python process, the +`Global Interpreter Lock (GIL) `_ +prevents true fully parallelizing Python code across threads. To avoid blocking +computation code with data loading, PyTorch provides an easy switch to perform +multi-process data loading by simply setting the argument :attr:`num_workers` +to a positive integer. + +Single-process data loading (default) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In this mode, data fetching is done in the same process a +:class:`~torch.utils.data.DataLoader` is initialized. Therefore, data loading +may block computing. However, this mode may be preferred when resource(s) used +for sharing data among processes (e.g., shared memory, file descriptors) is +limited, or when the entire dataset is small and can be loaded entirely in +memory. Additionally, single-process loading often shows more readable error +traces and thus is useful for debugging. + + +Multi-process data loading +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting the argument :attr:`num_workers` as a positive integer will +turn on multi-process data loading with the specified number of loader worker +processes. + +In this mode, each time an iterator of a :class:`~torch.utils.data.DataLoader` +is created (e.g., when you call ``enumerate(dataloader)``), :attr:`num_workers` +worker processes are created. At this point, the :attr:`dataset`, +:attr:`collate_fn`, and :attr:`worker_init_fn` are passed to each +worker, where they are used to initialize, and fetch data. This means that +dataset access together with its internal IO, transforms +(including :attr:`collate_fn`) runs in the worker process. + +:func:`torch.utils.data.get_worker_info()` returns various useful information +in a worker process (including the worker id, dataset replica, initial seed, +etc.), and returns ``None`` in main process. Users may use this function in +dataset code and/or :attr:`worker_init_fn` to individually configure each +dataset replica, and to determine whether the code is running in a worker +process. For example, this can be particularly helpful in sharding the dataset. + +For map-style datasets, the main process generates the indices using +:attr:`sampler` and sends them to the workers. So any shuffle randomization is +done in the main process which guides loading by assigning indices to load. + +For iterable-style datasets, since each worker process gets a replica of the +:attr:`dataset` object, naive multi-process loading will often result in +duplicated data. Using :func:`torch.utils.data.get_worker_info()` and/or +:attr:`worker_init_fn`, users may configure each replica independently. (See +:class:`~torch.utils.data.IterableDataset` documentations for how to achieve +this. ) For similar reasons, in multi-process loading, the :attr:`drop_last` +argument drops the last non-full batch of each worker's iterable-style dataset +replica. + +Workers are shut down once the end of the iteration is reached, or when the +iterator becomes garbage collected. + +.. warning:: + It is generally not recommended to return CUDA tensors in multi-process + loading because of many subtleties in using CUDA and sharing CUDA tensors in + multiprocessing (see :ref:`multiprocessing-cuda-note`). Instead, we recommend + using `automatic memory pinning `_ (i.e., setting + :attr:`pin_memory=True`), which enables fast data transfer to CUDA-enabled + GPUs. + +Platform-specific behaviors +""""""""""""""""""""""""""" + +Since workers rely on Python :py:mod:`multiprocessing`, worker launch behavior is +different on Windows compared to Unix. + +* On Unix, :func:`fork()` is the default :py:mod:`multiprocessing` start method. + Using :func:`fork`, child workers typically can access the :attr:`dataset` and + Python argument functions directly through the cloned address space. + +* On Windows, :func:`spawn()` is the default :py:mod:`multiprocessing` start method. + Using :func:`spawn()`, another interpreter is launched which runs your main script, + followed by the internal worker function that receives the :attr:`dataset`, + :attr:`collate_fn` and other arguments through :py:mod:`pickle` serialization. + +This separate serialization means that you should take two steps to ensure you +are compatible with Windows while using multi-process data loading: + +- Wrap most of you main script's code within ``if __name__ == '__main__':`` block, + to make sure it doesn't run again (most likely generating error) when each worker + process is launched. You can place your dataset and :class:`~torch.utils.data.DataLoader` + instance creation logic here, as it doesn't need to be re-executed in workers. + +- Make sure that any custom :attr:`collate_fn`, :attr:`worker_init_fn` + or :attr:`dataset` code is declared as top level definitions, outside of the + ``__main__`` check. This ensures that they are available in worker processes. + (this is needed since functions are pickled as references only, not ``bytecode``.) + +Randomness in multi-process data loading +"""""""""""""""""""""""""""""""""""""""""" + +By default, each worker will have its PyTorch seed set to ``base_seed + worker_id``, +where ``base_seed`` is a long generated by main process using its RNG (thereby, +consuming a RNG state mandatorily). However, seeds for other libraries may be +duplicated upon initializing workers (w.g., NumPy), causing each worker to return +identical random numbers. (See :ref:`this section ` in FAQ.). + +In :attr:`worker_init_fn`, you may access the PyTorch seed set for each worker +with either :func:`torch.utils.data.get_worker_info().seed ` +or :func:`torch.initial_seed()`, and use it to seed other libraries before data +loading. + +Memory Pinning +-------------- + +Host to GPU copies are much faster when they originate from pinned (page-locked) +memory. See :ref:`cuda-memory-pinning` for more details on when and how to use +pinned memory generally. + +For data loading, passing :attr:`pin_memory=True` to a +:class:`~torch.utils.data.DataLoader` will automatically put the fetched data +Tensors in pinned memory, and thus enables faster data transfer to CUDA-enabled +GPUs. + +The default memory pinning logic only recognizes Tensors and maps and iterables +containing Tensors. By default, if the pinning logic sees a batch that is a +custom type (which will occur if you have a :attr:`collate_fn` that returns a +custom batch type), or if each element of your batch is a custom type, the +pinning logic will not recognize them, and it will return that batch (or those +elements) without pinning the memory. To enable memory pinning for custom +batch or data type(s), define a :meth:`pin_memory` method on your custom +type(s). + +See the example below. + +Example:: + + class SimpleCustomBatch: + def __init__(self, data): + transposed_data = list(zip(*data)) + self.inp = torch.stack(transposed_data[0], 0) + self.tgt = torch.stack(transposed_data[1], 0) + + # custom memory pinning method on custom type + def pin_memory(self): + self.inp = self.inp.pin_memory() + self.tgt = self.tgt.pin_memory() + return self + + def collate_wrapper(batch): + return SimpleCustomBatch(batch) + + inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) + tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) + dataset = TensorDataset(inps, tgts) + + loader = DataLoader(dataset, batch_size=2, collate_fn=collate_wrapper, + pin_memory=True) + + for batch_ndx, sample in enumerate(loader): + print(sample.inp.is_pinned()) + print(sample.tgt.is_pinned()) + + +.. autoclass:: DataLoader +.. autoclass:: Dataset +.. autoclass:: IterableDataset +.. autoclass:: TensorDataset +.. autoclass:: ConcatDataset +.. autoclass:: ChainDataset +.. autoclass:: Subset +.. autofunction:: torch.utils.data.get_worker_info +.. autofunction:: torch.utils.data.random_split +.. autoclass:: torch.utils.data.Sampler +.. autoclass:: torch.utils.data.SequentialSampler +.. autoclass:: torch.utils.data.RandomSampler +.. autoclass:: torch.utils.data.SubsetRandomSampler +.. autoclass:: torch.utils.data.WeightedRandomSampler +.. autoclass:: torch.utils.data.BatchSampler +.. autoclass:: torch.utils.data.distributed.DistributedSampler diff --git a/docs/stable/_sources/distributed.rst.txt b/docs/stable/_sources/distributed.rst.txt new file mode 100644 index 000000000000..f0d35df46eae --- /dev/null +++ b/docs/stable/_sources/distributed.rst.txt @@ -0,0 +1,422 @@ +.. role:: hidden + :class: hidden-section + +Distributed communication package - torch.distributed +===================================================== + +.. automodule:: torch.distributed +.. currentmodule:: torch.distributed + +Backends +-------- + +``torch.distributed`` supports three backends, each with +different capabilities. The table below shows which functions are available +for use with CPU / CUDA tensors. +MPI supports CUDA only if the implementation used to build PyTorch supports it. + + ++------------+-----------+-----------+-----------+ +| Backend | ``gloo`` | ``mpi`` | ``nccl`` | ++------------+-----+-----+-----+-----+-----+-----+ +| Device | CPU | GPU | CPU | GPU | CPU | GPU | ++============+=====+=====+=====+=====+=====+=====+ +| send | ✓ | ✘ | ✓ | ? | ✘ | ✘ | ++------------+-----+-----+-----+-----+-----+-----+ +| recv | ✓ | ✘ | ✓ | ? | ✘ | ✘ | ++------------+-----+-----+-----+-----+-----+-----+ +| broadcast | ✓ | ✓ | ✓ | ? | ✘ | ✓ | ++------------+-----+-----+-----+-----+-----+-----+ +| all_reduce | ✓ | ✓ | ✓ | ? | ✘ | ✓ | ++------------+-----+-----+-----+-----+-----+-----+ +| reduce | ✓ | ✘ | ✓ | ? | ✘ | ✓ | ++------------+-----+-----+-----+-----+-----+-----+ +| all_gather | ✓ | ✘ | ✓ | ? | ✘ | ✓ | ++------------+-----+-----+-----+-----+-----+-----+ +| gather | ✓ | ✘ | ✓ | ? | ✘ | ✘ | ++------------+-----+-----+-----+-----+-----+-----+ +| scatter | ✓ | ✘ | ✓ | ? | ✘ | ✘ | ++------------+-----+-----+-----+-----+-----+-----+ +| barrier | ✓ | ✘ | ✓ | ? | ✘ | ✓ | ++------------+-----+-----+-----+-----+-----+-----+ + + +Backends that come with PyTorch +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +PyTorch distributed currently only supports Linux. By default, the Gloo and NCCL backends +are built and included in PyTorch distributed (NCCL only when building with CUDA). +MPI is an +optional backend that can only be included if you build PyTorch from source. (e.g. +building PyTorch on a host that has MPI installed.) + + +Which backend to use? +^^^^^^^^^^^^^^^^^^^^^ + +In the past, we were often asked: "which backend should I use?". + +- Rule of thumb + + - Use the NCCL backend for distributed **GPU** training + - Use the Gloo backend for distributed **CPU** training. + +- GPU hosts with InfiniBand interconnect + + - Use NCCL, since it's the only backend that currently supports + InfiniBand and GPUDirect. + +- GPU hosts with Ethernet interconnect + + - Use NCCL, since it currently provides the best distributed GPU + training performance, especially for multiprocess single-node or + multi-node distributed training. If you encounter any problem with + NCCL, use Gloo as the fallback option. (Note that Gloo currently + runs slower than NCCL for GPUs.) + +- CPU hosts with InfiniBand interconnect + + - If your InfiniBand has enabled IP over IB, use Gloo, otherwise, + use MPI instead. We are planning on adding InfiniBand support for + Gloo in the upcoming releases. + +- CPU hosts with Ethernet interconnect + + - Use Gloo, unless you have specific reasons to use MPI. + +Common environment variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Choosing the network interface to use +""""""""""""""""""""""""""""""""""""" + +By default, both the NCCL and Gloo backends will try to find the right network interface to use. +If the automatically detected interface is not correct, you can override it using the following +environment variables (applicable to the respective backend): + +* **NCCL_SOCKET_IFNAME**, for example ``export NCCL_SOCKET_IFNAME=eth0`` +* **GLOO_SOCKET_IFNAME**, for example ``export GLOO_SOCKET_IFNAME=eth0`` + +If you're using the Gloo backend, you can specify multiple interfaces by separating +them by a comma, like this: ``export GLOO_SOCKET_IFNAME=eth0,eth1,eth2,eth3``. +The backend will dispatch operations in a round-robin fashion across these interfaces. +It is imperative that all processes specify the same number of interfaces in this variable. + +Other NCCL environment variables +"""""""""""""""""""""""""""""""" + +NCCL has also provided a number of environment variables for fine-tuning purposes. + +Commonly used ones include the following for debugging purposes: + +- ``export NCCL_DEBUG=INFO`` +- ``export NCCL_DEBUG_SUBSYS=ALL`` + +For the full list of NCCL environment variables, please refer to +`NVIDIA NCCL's official documentation `_ + + +.. _distributed-basics: + +Basics +------ + +The `torch.distributed` package provides PyTorch support and communication primitives +for multiprocess parallelism across several computation nodes running on one or more +machines. The class :func:`torch.nn.parallel.DistributedDataParallel` builds on this +functionality to provide synchronous distributed training as a wrapper around any +PyTorch model. This differs from the kinds of parallelism provided by +:doc:`multiprocessing` and :func:`torch.nn.DataParallel` in that it supports +multiple network-connected machines and in that the user must explicitly launch a separate +copy of the main training script for each process. + +In the single-machine synchronous case, `torch.distributed` or the +:func:`torch.nn.parallel.DistributedDataParallel` wrapper may still have advantages over other +approaches to data-parallelism, including :func:`torch.nn.DataParallel`: + +* Each process maintains its own optimizer and performs a complete optimization step with each + iteration. While this may appear redundant, since the gradients have already been gathered + together and averaged across processes and are thus the same for every process, this means + that no parameter broadcast step is needed, reducing time spent transferring tensors between + nodes. +* Each process contains an independent Python interpreter, eliminating the extra interpreter + overhead and "GIL-thrashing" that comes from driving several execution threads, model + replicas, or GPUs from a single Python process. This is especially important for models that + make heavy use of the Python runtime, including models with recurrent layers or many small + components. + +Initialization +-------------- + +The package needs to be initialized using the :func:`torch.distributed.init_process_group` +function before calling any other methods. This blocks until all processes have +joined. + +.. autofunction:: init_process_group + +.. autoclass:: Backend + +.. autofunction:: get_backend + +.. autofunction:: get_rank + +.. autofunction:: get_world_size + +.. autofunction:: is_initialized + +.. autofunction:: is_mpi_available + +.. autofunction:: is_nccl_available + +-------------------------------------------------------------------------------- + +Currently three initialization methods are supported: + +TCP initialization +^^^^^^^^^^^^^^^^^^ + +There are two ways to initialize using TCP, both requiring a network address +reachable from all processes and a desired ``world_size``. The first way +requires specifying an address that belongs to the rank 0 process. This +initialization method requires that all processes have manually specified ranks. + +Note that multicast address is not supported anymore in the latest distributed +package. ``group_name`` is deprecated as well. + +:: + + import torch.distributed as dist + + # Use address of one of the machines + dist.init_process_group(backend, init_method='tcp://10.1.1.20:23456', + rank=args.rank, world_size=4) + +Shared file-system initialization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Another initialization method makes use of a file system that is shared and +visible from all machines in a group, along with a desired ``world_size``. The URL should start +with ``file://`` and contain a path to a non-existent file (in an existing +directory) on a shared file system. File-system initialization will automatically +create that file if it doesn't exist, but will not delete the file. Therefore, it +is your responsibility to make sure that the file is cleaned up before the next +:func:`init_process_group` call on the same file path/name. + +Note that automatic rank assignment is not supported anymore in the latest +distributed package and ``group_name`` is deprecated as well. + +.. warning:: + This method assumes that the file system supports locking using ``fcntl`` - most + local systems and NFS support it. + +.. warning:: + This method will always create the file and try its best to clean up and remove + the file at the end of the program. In other words, each initialization with + the file init method will need a brand new empty file in order for the initialization + to succeed. If the same file used by the previous initialization (which happens not + to get cleaned up) is used again, this is unexpected behavior and can often cause + deadlocks and failures. Therefore, even though this method will try its best to clean up + the file, if the auto-delete happens to be unsuccessful, it is your responsibility + to ensure that the file is removed at the end of the training to prevent the same + file to be reused again during the next time. This is especially important + if you plan to call :func:`init_process_group` multiple times on the same file name. + In other words, if the file is not removed/cleaned up and you call + :func:`init_process_group` again on that file, failures are expected. + The rule of thumb here is that, make sure that the file is non-existent or + empty everytime :func:`init_process_group` is called. + +:: + + import torch.distributed as dist + + # rank should always be specified + dist.init_process_group(backend, init_method='file:///mnt/nfs/sharedfile', + world_size=4, rank=args.rank) + +Environment variable initialization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This method will read the configuration from environment variables, allowing +one to fully customize how the information is obtained. The variables to be set +are: + +* ``MASTER_PORT`` - required; has to be a free port on machine with rank 0 +* ``MASTER_ADDR`` - required (except for rank 0); address of rank 0 node +* ``WORLD_SIZE`` - required; can be set either here, or in a call to init function +* ``RANK`` - required; can be set either here, or in a call to init function + +The machine with rank 0 will be used to set up all connections. + +This is the default method, meaning that ``init_method`` does not have to be specified (or +can be ``env://``). + +Groups +------ + +By default collectives operate on the default group (also called the world) and +require all processes to enter the distributed function call. However, some workloads can benefit +from more fine-grained communication. This is where distributed groups come +into play. :func:`~torch.distributed.new_group` function can be +used to create new groups, with arbitrary subsets of all processes. It returns +an opaque group handle that can be given as a ``group`` argument to all collectives +(collectives are distributed functions to exchange information in certain well-known programming patterns). + + +.. autofunction:: new_group + +Point-to-point communication +---------------------------- + +.. autofunction:: send + +.. autofunction:: recv + +:func:`~torch.distributed.isend` and :func:`~torch.distributed.irecv` +return distributed request objects when used. In general, the type of this object is unspecified +as they should never be created manually, but they are guaranteed to support two methods: + +* ``is_completed()`` - returns True if the operation has finished +* ``wait()`` - will block the process until the operation is finished. + ``is_completed()`` is guaranteed to return True once it returns. + +.. autofunction:: isend + +.. autofunction:: irecv + +Synchronous and asynchronous collective operations +-------------------------------------------------- +Every collective operation function supports the following two kinds of operations: + +synchronous operation - the default mode, when ``async_op`` is set to False. +when the function returns, it is guaranteed that +the collective operation is performed (not necessarily completed if it's a CUDA op since all +CUDA ops are asynchronous), and any further function calls depending on the data of the +collective operation can be called. In the synchronous mode, the collective function does not +return anything + +asynchronous operation - when ``async_op`` is set to True. The collective operation function +returns a distributed request object. In general, you don't need to create it manually and it +is guaranteed to support two methods: + +* ``is_completed()`` - returns True if the operation has finished +* ``wait()`` - will block the process until the operation is finished. + + +Collective functions +-------------------- + +.. autofunction:: broadcast + +.. autofunction:: all_reduce + +.. autofunction:: reduce + +.. autofunction:: all_gather + +.. autofunction:: gather + +.. autofunction:: scatter + +.. autofunction:: barrier + +.. autoclass:: ReduceOp + +.. class:: reduce_op + + Deprecated enum-like class for reduction operations: ``SUM``, ``PRODUCT``, + ``MIN``, and ``MAX``. + + :class:`~torch.distributed.ReduceOp` is recommended to use instead. + + +Multi-GPU collective functions +------------------------------ + +If you have more than one GPU on each node, when using the NCCL and Gloo backend, +:func:`~torch.distributed.broadcast_multigpu` +:func:`~torch.distributed.all_reduce_multigpu` +:func:`~torch.distributed.reduce_multigpu` and +:func:`~torch.distributed.all_gather_multigpu` support distributed collective +operations among multiple GPUs within each node. These functions can potentially +improve the overall distributed training performance and be easily used by +passing a list of tensors. Each Tensor in the passed tensor list needs +to be on a separate GPU device of the host where the function is called. Note +that the length of the tensor list needs to be identical among all the +distributed processes. Also note that currently the multi-GPU collective +functions are only supported by the NCCL backend. + +For example, if the system we use for distributed training has 2 nodes, each +of which has 8 GPUs. On each of the 16 GPUs, there is a tensor that we would +like to all-reduce. The following code can serve as a reference: + +Code running on Node 0 + +:: + + import torch + import torch.distributed as dist + + dist.init_process_group(backend="nccl", + init_method="file:///distributed_test", + world_size=2, + rank=0) + tensor_list = [] + for dev_idx in range(torch.cuda.device_count()): + tensor_list.append(torch.FloatTensor([1]).cuda(dev_idx)) + + dist.all_reduce_multigpu(tensor_list) + +Code running on Node 1 + +:: + + import torch + import torch.distributed as dist + + dist.init_process_group(backend="nccl", + init_method="file:///distributed_test", + world_size=2, + rank=1) + tensor_list = [] + for dev_idx in range(torch.cuda.device_count()): + tensor_list.append(torch.FloatTensor([1]).cuda(dev_idx)) + + dist.all_reduce_multigpu(tensor_list) + +After the call, all 16 tensors on the two nodes will have the all-reduced value +of 16 + +.. autofunction:: broadcast_multigpu + +.. autofunction:: all_reduce_multigpu + +.. autofunction:: reduce_multigpu + +.. autofunction:: all_gather_multigpu + + +Launch utility +-------------- + +The `torch.distributed` package also provides a launch utility in +`torch.distributed.launch`. This helper utility can be used to launch +multiple processes per node for distributed training. This utility also supports +both python2 and python3. + + +.. automodule:: torch.distributed.launch + + +Spawn utility +------------- + +The :doc:`torch.multiprocessing` package also provides a ``spawn`` +function in :func:`torch.multiprocessing.spawn`. This helper function +can be used to spawn multiple processes. It works by passing in the +function that you want to run and spawns N processes to run it. This +can be used for multiprocess distributed training as well. + +For references on how to use it, please refer to `PyTorch example - ImageNet +implementation `_ + +Note that this function requires Python 3.4 or higher. diff --git a/docs/stable/_sources/distributions.rst.txt b/docs/stable/_sources/distributions.rst.txt new file mode 100644 index 000000000000..d8f84acd20e5 --- /dev/null +++ b/docs/stable/_sources/distributions.rst.txt @@ -0,0 +1,342 @@ +.. role:: hidden + :class: hidden-section + +Probability distributions - torch.distributions +================================================== + +.. automodule:: torch.distributions +.. currentmodule:: torch.distributions + +:hidden:`Distribution` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.distribution +.. autoclass:: Distribution + :members: + :show-inheritance: + +:hidden:`ExponentialFamily` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.exp_family +.. autoclass:: ExponentialFamily + :members: + :show-inheritance: + +:hidden:`Bernoulli` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.bernoulli +.. autoclass:: Bernoulli + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Beta` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.beta +.. autoclass:: Beta + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Binomial` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.binomial +.. autoclass:: Binomial + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Categorical` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.categorical +.. autoclass:: Categorical + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Cauchy` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.cauchy +.. autoclass:: Cauchy + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Chi2` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.chi2 +.. autoclass:: Chi2 + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Dirichlet` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.dirichlet +.. autoclass:: Dirichlet + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Exponential` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.exponential +.. autoclass:: Exponential + :members: + :undoc-members: + :show-inheritance: + +:hidden:`FisherSnedecor` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.fishersnedecor +.. autoclass:: FisherSnedecor + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Gamma` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.gamma +.. autoclass:: Gamma + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Geometric` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.geometric +.. autoclass:: Geometric + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Gumbel` +~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.gumbel +.. autoclass:: Gumbel + :members: + :undoc-members: + :show-inheritance: + +:hidden:`HalfCauchy` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.half_cauchy +.. autoclass:: HalfCauchy + :members: + :undoc-members: + :show-inheritance: + +:hidden:`HalfNormal` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.half_normal +.. autoclass:: HalfNormal + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Independent` +~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.independent +.. autoclass:: Independent + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Laplace` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.laplace +.. autoclass:: Laplace + :members: + :undoc-members: + :show-inheritance: + +:hidden:`LogNormal` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.log_normal +.. autoclass:: LogNormal + :members: + :undoc-members: + :show-inheritance: + +:hidden:`LowRankMultivariateNormal` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.lowrank_multivariate_normal +.. autoclass:: LowRankMultivariateNormal + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Multinomial` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.multinomial +.. autoclass:: Multinomial + :members: + :undoc-members: + :show-inheritance: + +:hidden:`MultivariateNormal` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.multivariate_normal +.. autoclass:: MultivariateNormal + :members: + :undoc-members: + :show-inheritance: + +:hidden:`NegativeBinomial` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.negative_binomial +.. autoclass:: NegativeBinomial + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Normal` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.normal +.. autoclass:: Normal + :members: + :undoc-members: + :show-inheritance: + +:hidden:`OneHotCategorical` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.one_hot_categorical +.. autoclass:: OneHotCategorical + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Pareto` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.pareto +.. autoclass:: Pareto + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Poisson` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.poisson +.. autoclass:: Poisson + :members: + :undoc-members: + :show-inheritance: + +:hidden:`RelaxedBernoulli` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.relaxed_bernoulli +.. autoclass:: RelaxedBernoulli + :members: + :undoc-members: + :show-inheritance: + +:hidden:`LogitRelaxedBernoulli` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.relaxed_bernoulli +.. autoclass:: LogitRelaxedBernoulli + :members: + :undoc-members: + :show-inheritance: + +:hidden:`RelaxedOneHotCategorical` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.relaxed_categorical +.. autoclass:: RelaxedOneHotCategorical + :members: + :undoc-members: + :show-inheritance: + +:hidden:`StudentT` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.studentT +.. autoclass:: StudentT + :members: + :undoc-members: + :show-inheritance: + +:hidden:`TransformedDistribution` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.transformed_distribution +.. autoclass:: TransformedDistribution + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Uniform` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.uniform +.. autoclass:: Uniform + :members: + :undoc-members: + :show-inheritance: + +:hidden:`Weibull` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: torch.distributions.weibull +.. autoclass:: Weibull + :members: + :undoc-members: + :show-inheritance: + +`KL Divergence` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: torch.distributions.kl +.. currentmodule:: torch.distributions.kl + +.. autofunction:: kl_divergence +.. autofunction:: register_kl + +`Transforms` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: torch.distributions.transforms + :members: + :member-order: bysource + +`Constraints` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: torch.distributions.constraints + :members: + :member-order: bysource + +`Constraint Registry` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: torch.distributions.constraint_registry + :members: + :member-order: bysource diff --git a/docs/stable/_sources/dlpack.rst.txt b/docs/stable/_sources/dlpack.rst.txt new file mode 100644 index 000000000000..869285de792d --- /dev/null +++ b/docs/stable/_sources/dlpack.rst.txt @@ -0,0 +1,8 @@ +torch.utils.dlpack +================== + +.. currentmodule:: torch.utils.dlpack + +.. autofunction:: from_dlpack +.. autofunction:: to_dlpack + diff --git a/docs/stable/_sources/hub.rst.txt b/docs/stable/_sources/hub.rst.txt new file mode 100644 index 000000000000..41331ed230a5 --- /dev/null +++ b/docs/stable/_sources/hub.rst.txt @@ -0,0 +1,137 @@ +torch.hub +=================================== +Pytorch Hub is a pre-trained model repository designed to facilitate research reproducibility. + +Publishing models +----------------- + +Pytorch Hub supports publishing pre-trained models(model definitions and pre-trained weights) +to a github repository by adding a simple ``hubconf.py`` file; + +``hubconf.py`` can have multiple entrypoints. Each entrypoint is defined as a python function +(example: a pre-trained model you want to publish). + +:: + + def entrypoint_name(*args, **kwargs): + # args & kwargs are optional, for models which take positional/keyword arguments. + ... + +How to implement an entrypoint? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Here is a code snippet specifies an entrypoint for ``resnet18`` model if we expand +the implementation in ``pytorch/vision/hubconf.py``. +In most case importing the right function in ``hubconf.py`` is sufficient. Here we +just want to use the expanded version as an example to show how it works. +You can see the full script in +`pytorch/vision repo `_ + +:: + + dependencies = ['torch'] + from torchvision.models.resnet import resnet18 as _resnet18 + + # resnet18 is the name of entrypoint + def resnet18(pretrained=False, **kwargs): + """ # This docstring shows up in hub.help() + Resnet18 model + pretrained (bool): kwargs, load pretrained weights into the model + """ + # Call the model, load pretrained weights + model = _resnet18(pretrained=pretrained, **kwargs) + return model + + +- ``dependencies`` variable is a **list** of package names required to **load** the model. Note this might + be slightly different from dependencies required for training a model. +- ``args`` and ``kwargs`` are passed along to the real callable function. +- Docstring of the function works as a help message. It explains what does the model do and what + are the allowed positional/keyword arguments. It's highly recommended to add a few examples here. +- Entrypoint function can either return a model(nn.module), or auxiliary tools to make the user workflow smoother, e.g. tokenizers. +- Callables prefixed with underscore are considered as helper functions which won't show up in ``torch.hub.list()``. +- Pretrained weights can either be stored locally in the github repo, or loadable by + ``torch.hub.load_state_dict_from_url()``. If less than 2GB, it's recommended to attach it to a `project release `_ + and use the url from the release. + In the example above ``torchvision.models.resnet.resnet18`` handles ``pretrained``, alternatively you can put the following logic in the entrypoint definition. + +:: + + if pretrained: + # For checkpoint saved in local github repo, e.g. =weights/save.pth + dirname = os.path.dirname(__file__) + checkpoint = os.path.join(dirname, ) + state_dict = torch.load(checkpoint) + model.load_state_dict(state_dict) + + # For checkpoint saved elsewhere + checkpoint = 'https://download.pytorch.org/models/resnet18-5c106cde.pth' + model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False)) + + +Important Notice +^^^^^^^^^^^^^^^^ + +- The published models should be at least in a branch/tag. It can't be a random commit. + + +Loading models from Hub +----------------------- + +Pytorch Hub provides convenient APIs to explore all available models in hub through ``torch.hub.list()``, +show docstring and examples through ``torch.hub.help()`` and load the pre-trained models using ``torch.hub.load()`` + + +.. automodule:: torch.hub + +.. autofunction:: list + +.. autofunction:: help + +.. autofunction:: load + +Running a loaded model: +^^^^^^^^^^^^^^^^^^^^^^^ + +Note that ``*args, **kwargs`` in ``torch.load()`` are used to **instantiate** a model. +After you loaded a model, how can you find out what you can do with the model? +A suggested workflow is + +- ``dir(model)`` to see all avaialble methods of the model. +- ``help(model.foo)`` to check what arguments ``model.foo`` takes to run + +To help users explore without refering to documentation back and forth, we strongly +recommend repo owners make function help messages clear and succinct. It's also helpful +to include a minimal working example. + +Where are my downloaded models saved? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The locations are used in the order of + +- Calling ``hub.set_dir()`` +- ``$TORCH_HOME/hub``, if environment variable ``TORCH_HOME`` is set. +- ``$XDG_CACHE_HOME/torch/hub``, if environment variable ``XDG_CACHE_HOME`` is set. +- ``~/.cache/torch/hub`` + +.. autofunction:: set_dir + +Caching logic +^^^^^^^^^^^^^ + +By default, we don't clean up files after loading it. Hub uses the cache by default if it already exists in ``hub_dir``. + +Users can force a reload by calling ``hub.load(..., force_reload=True)``. This will delete +the existing github folder and downloaded weights, reinitialize a fresh download. This is useful +when updates are published to the same branch, users can keep up with the latest release. + + +Known limitations: +^^^^^^^^^^^^^^^^^^ +Torch hub works by importing the package as if it was installed. There're some side effects +introduced by importing in Python. For example, you can see new items in Python caches +``sys.modules`` and ``sys.path_importer_cache`` which is normal Python behavior. + +A known limitation that worth mentioning here is user **CANNOT** load two different branches of +the same repo in the **same python process**. It's just like installing two packages with the +same name in Python, which is not good. Cache might join the party and give you surprises if you +actually try that. Of course it's totally fine to load them in separate processes. diff --git a/docs/stable/_sources/index.rst.txt b/docs/stable/_sources/index.rst.txt new file mode 100644 index 000000000000..b22cb913d816 --- /dev/null +++ b/docs/stable/_sources/index.rst.txt @@ -0,0 +1,70 @@ +.. PyTorch documentation master file, created by + sphinx-quickstart on Fri Dec 23 13:31:47 2016. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +:github_url: https://github.com/pytorch/pytorch + +PyTorch documentation +=================================== + +PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Notes + + notes/* + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Community + + community/* + +.. toctree:: + :maxdepth: 1 + :caption: Package Reference + + torch + tensors + tensor_attributes + type_info + sparse + cuda + storage + nn + nn.functional + nn.init + optim + torch.autograd + torch.distributed + torch.distributions + torch.hub + torch.jit + torch.multiprocessing + torch.utils.bottleneck + torch.utils.checkpoint + torch.utils.cpp_extension + torch.utils.data + torch.utils.dlpack + torch.utils.model_zoo + torch.utils.tensorboard + onnx + torch.__config__ <__config__> + +.. toctree:: + :glob: + :maxdepth: 2 + :caption: torchvision Reference + + torchvision/index + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` diff --git a/docs/stable/_sources/jit.rst.txt b/docs/stable/_sources/jit.rst.txt new file mode 100644 index 000000000000..8d15ffa37bd2 --- /dev/null +++ b/docs/stable/_sources/jit.rst.txt @@ -0,0 +1,1169 @@ +TorchScript +============ + +.. contents:: :local: + +.. automodule:: torch.jit +.. currentmodule:: torch.jit + +TorchScript is a way to create serializable and optimizable models from PyTorch code. +Any TorchScript program can be saved from a Python +process and loaded in a process where there is no Python dependency. + +We provide tools to incrementally transition a model from a pure Python program +to a TorchScript program that can be run independently from Python, such as in a standalone C++ program. +This makes it possible to train models in PyTorch using familiar tools in Python and then export +the model via TorchScript to a production environment where Python programs may be disadvantageous. +for performance and multi-threading reasons. + +Creating TorchScript Code +-------------------------- + +.. autofunction:: script + +.. autofunction:: trace + +.. autoclass:: ScriptModule + :members: + +.. autofunction:: save + +.. autofunction:: load + + + +Mixing Tracing and Scripting +---------------------------- + +In many cases either tracing or scripting is an easier approach for converting a model to TorchScript. +Tracing and scripting can be composed to suit the particular requirements +of a part of a model. + +Scripted functions can call traced functions. This is particularly useful when you need +to use control-flow around a simple feed-forward model. For instance the beam search +of a sequence to sequence model will typically be written in script but can call an +encoder module generated using tracing. + +Example:: + + import torch + + def foo(x, y): + return 2 * x + y + traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3))) + + @torch.jit.script + def bar(x): + return traced_foo(x, x) + +Traced functions can call script functions. This is useful when a small part of +a model requires some control-flow even though most of the model is just a feed-forward +network. Control-flow inside of a script function called by a traced function is +preserved correctly: + +Example:: + + import torch + + @torch.jit.script + def foo(x, y): + if x.max() > y.max(): + r = x + else: + r = y + return r + + + def bar(x, y, z): + return foo(x, y) + z + + traced_bar = torch.jit.trace(bar, (torch.rand(3), torch.rand(3), torch.rand(3))) + +This composition also works for ``nn.Module``\s as well, where it can be used to generate +a submodule using tracing that can be called from the methods of a script module: + +Example:: + + import torch + import torchvision + + class MyScriptModule(torch.nn.Module): + def __init__(self): + super(MyScriptModule, self).__init__() + self.means = torch.nn.Parameter(torch.tensor([103.939, 116.779, 123.68]) + .resize_(1, 3, 1, 1)) + self.resnet = torch.jit.trace(torchvision.models.resnet18(), + torch.rand(1, 3, 224, 224)) + + def forward(self, input): + return self.resnet(input - self.means) + + my_script_module = torch.jit.script(MyScriptModule()) + + +TorchScript Language Reference +------------------------------- + +TorchScript is a statically typed subset of Python that can either be written directly (using +the ``@torch.jit.script`` decorator) or generated automatically from Python code via +tracing. When using tracing, code is automatically converted into this subset of +Python by recording only the actual operators on tensors and simply executing and +discarding the other surrounding Python code. + +When writing TorchScript directly using ``@torch.jit.script`` decorator, the programmer must +only use the subset of Python supported in TorchScript. This section documents +what is supported in TorchScript as if it were a language reference for a stand +alone language. Any features of Python not mentioned in this reference are not +part of TorchScript. + +As a subset of Python any valid TorchScript function is also a valid Python +function. This makes it possible to remove the ``@torch.jit.script`` decorator and debug the +function using standard Python tools like ``pdb``. The reverse is not true: there +are many valid python programs that are not valid TorchScript programs. +Instead, TorchScript focuses specifically on the features of Python that are +needed to represent neural network models in Torch. + +.. envvar:: PYTORCH_JIT=1 + + Setting the environment variable ``PYTORCH_JIT=0`` will disable all script + and tracing annotations. If there is hard-to-debug error in one of your + ScriptModules, you can use this flag to force everything to run using native + Python. This allows the use of tools like ``pdb`` to debug code. + + +Types +~~~~~ + +The largest difference between TorchScript and the full Python language is that +TorchScript only supports a small set of types that are needed to express neural +net models. In particular, TorchScript supports: + +.. csv-table:: + :header: "Type", "Description" + + "``Tensor``", "A PyTorch tensor of any dtype, dimension, or backend" + "``Tuple[T0, T1, ...]``", "A tuple containing subtypes ``T0``, ``T1``, etc. (e.g. ``Tuple[Tensor, Tensor]``)" + "``bool``", "A boolean value" + "``int``", "A scalar integer" + "``float``", "A scalar floating point number" + "``List[T]``", "A list of which all members are type ``T``" + "``Optional[T]``", "A value which is either None or type ``T``" + "``Dict[K, V]``", "A dict with key type ``K`` and value type ``V``. Only ``str``, ``int``, and ``float`` are allowed as key types." + + +Unlike Python, each variable in TorchScript function must have a single static type. +This makes it easier to optimize TorchScript functions. + +Example (a type mismatch):: + + @torch.jit.script + def an_error(x): + if x: + r = torch.rand(1) + else: + r = 4 + return r # Type mismatch: r is set to type Tensor in the true branch + # and type int in the false branch + + +Default Types +^^^^^^^^^^^^^ + +By default, all parameters to a TorchScript function are assumed to be Tensor. +To specify that an argument to a TorchScript function is another type, it is possible to use +MyPy-style type annotations using the types listed above: + +Example:: + + @torch.jit.script + def foo(x, tup): + # type: (int, Tuple[Tensor, Tensor]) -> Tensor + t0, t1 = tup + return t0 + t1 + x + + print(foo(3, (torch.rand(3), torch.rand(3)))) + +.. note:: + It is also possible to annotate types with Python 3 type annotations. + In our examples, we use comment-based annotations to ensure Python 2 + compatibility as well. + +An empty list is assumed to be ``List[Tensor]`` and empty dicts +``Dict[str, Tensor]``. To instantiate an empty list or dict of other types, +use ``torch.jit.annotate``. + +Example:: + + import torch + from torch.jit import Tensor + from typing import List, Tuple + + class EmptyDataStructures(torch.jit.ScriptModule): + def __init__(self): + super(EmptyDataStructures, self).__init__() + + @torch.jit.script_method + def forward(self, x): + # type: (Tensor) -> Tuple[List[Tuple[int, float]], Dict[str, int]] + + # This annotates the list to be a `List[Tuple[int, float]]` + my_list = torch.jit.annotate(List[Tuple[int, float]], []) + for i in range(10): + my_list.append((x, x)) + + my_dict = torch.jit.annotate(Dict[str, int], {}) + return my_list, my_dict + + +Optional Type Refinement +^^^^^^^^^^^^^^^^^^^^^^^^ + +TorchScript will refine the type of a variable of type ``Optional[T]`` when +a comparison to ``None`` is made inside the conditional of an if-statement. +The compiler can reason about multiple ``None`` checks that are combined with +``and``, ``or``, and ``not``. Refinement will also occur for else blocks of if-statements +that are not explicitly written. + +The expression must be emitted within the conditional; assigning +a ``None`` check to a variable and using it in the conditional will not refine types. +An attribute like `self.x` will not be refined, but assigning `self.x` to a local +variable first will work. + + +Example:: + + @torch.jit.script_method + def optional_unwrap(self, x, y): + # type: (Optional[int], Optional[int]) -> int + if x is None: + x = 1 + x = x + 1 + + z = self.z + if y is not None and z is not None: + x = y + z + return x + + +User Defined Types +^^^^^^^^^^^^^^^^^^^^^^^^ +Python classes can be used in TorchScript if they are annotated with ``@torch.jit.script``, +similar to how you would declare a TorchScript function: :: + + @torch.jit.script + class Foo: + def __init__(self, x, y): + self.x = x + + def aug_add_x(self, inc): + self.x += inc + + +This subset is restricted: + +* All functions must be valid TorchScript functions (including ``__init__()``) +* Classes must be new-style classes, as we use ``__new__()`` to construct them with pybind11 +* TorchScript classes are statically typed. Members are declared by assigning to + self in the ``__init__()`` method + + For example, assigning outside of the ``__init__()`` method: :: + + @torch.jit.script + class Foo: + def assign_x(self): + self.x = torch.rand(2, 3) + + Will result in: :: + + RuntimeError: + Tried to set nonexistent attribute: x. Did you forget to initialize it in __init__()?: + def assign_x(self): + self.x = torch.rand(2, 3) + ~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + +* No expressions except method definitions are allowed in the body of the class +* No support for inheritance or any other polymorphism strategy, except for inheriting + from object to specify a new-style class + +After a class is defined, it can be used in both TorchScript and Python interchangeably +like any other TorchScript type: + +:: + + @torch.jit.script + class Pair: + def __init__(self, first, second): + self.first = first + self.second = second + + @torch.jit.script + def sum_pair(p): + # type: (Pair) -> Tensor + return p.first + p.second + + p = Pair(torch.rand(2, 3), torch.rand(2, 3)) + print(sum_pair(p)) + + +Expressions +~~~~~~~~~~~ + +The following Python Expressions are supported + +Literals +^^^^^^^^ + ``True``, ``False``, ``None``, ``'string literals'``, ``"string literals"``, + number literals ``3`` (interpreted as int) ``3.4`` (interpreted as a float) + +List Construction +""""""""""""""""" + ``[3, 4]``, ``[]``, ``[torch.rand(3), torch.rand(4)]`` + + .. note:: + An empty list is assumed have type ``List[Tensor]``. + The types of other list literals are derived from the type of the members. + To denote an empty list of another type, use ``torch.jit.annotate``. + +Tuple Construction +"""""""""""""""""" + ``(3, 4)``, ``(3,)`` + + +Dict Construction +""""""""""""""""" + ``{'hello': 3}``, ``{}``, ``{'a': torch.rand(3), 'b': torch.rand(4)}`` + + .. note:: + An empty dict is assumed have type ``Dict[str, Tensor]``. + The types of other dict literals are derived from the type of the members. + To denote an empty dict of another type, use ``torch.jit.annotate``. + +Variables +^^^^^^^^^ + ``my_variable_name`` + + .. note:: + See `Variable Resolution`_ for how variables are resolved. + + +Arithmetic Operators +^^^^^^^^^^^^^^^^^^^^ + ``a + b`` + + ``a - b`` + + ``a * b`` + + ``a / b`` + + ``a ^ b`` + + ``a @ b`` + +Comparison Operators +^^^^^^^^^^^^^^^^^^^^ + ``a == b`` + + ``a != b`` + + ``a < b`` + + ``a > b`` + + ``a <= b`` + + ``a >= b`` + +Logical Operators +^^^^^^^^^^^^^^^^^ + ``a and b`` + + ``a or b`` + + ``not b`` + +Subscripts +^^^^^^^^^^ + ``t[0]`` + + ``t[-1]`` + + ``t[0:2]`` + + ``t[1:]`` + + ``t[:1]`` + + ``t[:]`` + + ``t[0, 1]`` + + ``t[0, 1:2]`` + + ``t[0, :1]`` + + ``t[-1, 1:, 0]`` + + ``t[1:, -1, 0]`` + + ``t[i:j, i]`` + +Function Calls +^^^^^^^^^^^^^^ + Calls to built-in functions: ``torch.rand(3, dtype=torch.int)`` + + Calls to other script functions: + + :: + + import torch + + @torch.jit.script + def foo(x): + return x + 1 + + @torch.jit.script + def bar(x): + return foo(x) + +Method Calls +^^^^^^^^^^^^ + Calls to methods of builtin types like tensor: ``x.mm(y)`` + + + When defining a Script method inside of a ScriptModule, the ``@script_method`` + annotation is used. Inside of these methods it is possible to call other methods + of this class or access methods on the submodules. + + Calling a submodule directly (e.g. ``self.resnet(input)``) is equivalent to + calling its ``forward`` method (e.g. ``self.resnet.forward(input)``) + + :: + + import torch + + class MyScriptModule(torch.jit.ScriptModule): + def __init__(self): + super(MyScriptModule, self).__init__() + self.means = torch.nn.Parameter(torch.tensor([103.939, 116.779, 123.68]) + .resize_(1, 3, 1, 1)) + self.resnet = torch.jit.trace(torchvision.models.resnet18(), + torch.rand(1, 3, 224, 224)) + + @torch.jit.script_method + def helper(self, input): + return self.resnet(input - self.means) + + @torch.jit.script_method + def forward(self, input): + return self.helper(input) + +Ternary Expressions +^^^^^^^^^^^^^^^^^^^ + ``x if x > y else y`` + +Casts +^^^^^ + ``float(ten)`` + + ``int(3.5)`` + + ``bool(ten)`` + +Accessing Module Parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ``self.my_parameter`` + + ``self.my_submodule.my_parameter`` + + +Statements +~~~~~~~~~~ + +TorchScript supports the following types of statements: + +Simple Assignments + :: + + a = b + a += b # short-hand for a = a + b, does not operate in-place on a + a -= b + +Pattern Matching Assignments + :: + + a, b = tuple_or_list + a, b, *c = a_tuple + +Print Statements + + ``print("the result of an add:", a + b)`` + +If Statements + + :: + + if a < 4: + r = -a + elif a < 3: + r = a + a + else: + r = 3 * a + +In addition to bools, floats, ints, and Tensors can be used in a conditional +and will be implicitly casted to a boolean. + +While Loops + + :: + + a = 0 + while a < 4: + print(a) + a += 1 + + +For loops with ``range`` + + :: + + x = 0 + for i in range(10): + x *= i + +For loops over tuples: + + :: + + tup = (3, torch.rand(4)) + for x in tup: + print(x) + + .. note:: + for loops over tuples will unroll the loop, generating a body for + each member of the tuple. The body must type-check correctly for each member. + +For loops over constant ``torch.nn.ModuleList`` + + :: + + class SubModule(torch.jit.ScriptModule): + def __init__(self): + super(Sub, self).__init__() + self.weight = nn.Parameter(torch.randn(2)) + + @torch.jit.script_method + def forward(self, input): + return self.weight + input + + class MyModule(torch.jit.ScriptModule): + __constants__ = ['mods'] + + def __init__(self): + super(MyModule, self).__init__() + self.mods = torch.nn.ModuleList([SubModule() for i in range(10)]) + + @torch.jit.script_method + def forward(self, v): + for module in self.mods: + v = m(v) + return v + + .. note:: + To use a ``nn.ModuleList`` inside a ``@script_method`` it must be marked + constant by adding the name of the attribute to the ``__constants__`` + list for the type. For loops over a ``nn.ModuleList`` will unroll the body of the + loop at compile time, with each member of the constant module list. + +Break and Continue + + :: + + for i in range(5): + if i == 1: + continue + if i == 3: + break + print(i) + +Return + ``return a, b`` + + .. note:: + TorchScript allows returns in the following circumstances: + 1. At the end of a function + 2. In an if-statement where and both return + 3. In an if-statement where returns and is empty (an early return) + +Variable Resolution +~~~~~~~~~~~~~~~~~~~ + +TorchScript supports a subset of Python's variable resolution (i.e. scoping) +rules. Local variables behave the same as in Python, except for the restriction +that a variable must have the same type along all paths through a function. +If a variable has a different type on different sides of an if statement, it +is an error to use it after the end of the if statement. + +Similarly, a variable is not allowed to be used if it is only *defined* along some +paths through the function. + +Example:: + + @torch.jit.script + def foo(x): + if x < 0: + y = 4 + print(y) # Error: undefined value y + +Non-local variables are resolved to Python values at compile time when the +function is defined. These values are then converted into TorchScript values using +the rules described in `Use of Python Values`_. + +Use of Python Values +~~~~~~~~~~~~~~~~~~~~ + +To make writing TorchScript more convenient, we allow script code to refer +to Python values in the surrounding scope. For instance, any time there is a +reference to ``torch``, the TorchScript compiler is actually resolving it to the +``torch`` Python module when the function is declared. These Python values are +not a first class part of TorchScript. Instead they are de-sugared at compile-time +into the primitive types that TorchScript supports. This depends +on the dynamic type of the Python valued referenced when compilation occurs. +This section describes the rules that are used when accessing Python values in TorchScript. + +Functions +^^^^^^^^^ + + TorchScript can call Python functions. This functionality is very useful when + incrementally converting a model to TorchScript. The model can be moved function-by-function + to TorchScript, leaving calls to Python functions in place. This way you can incrementally + check the correctness of the model as you go. + + Example:: + + def foo(x): + print("I am called with {}".format(x)) + import pdb; pdb.set_trace() + return x + + @torch.jit.script + def bar(x) + return foo(x + 1) + + Attempting to call ``save`` on a ScriptModule that contains calls to Python + functions will fail. The intention is that this pathway is used for debugging + and the calls removed or turned into script functions before saving. If you + want to export a module with a Python function, add the ``@torch.jit.ignore`` + decorator to the function which will replace these function calls with an + exception when the model is saved: :: + + class M(torch.jit.ScriptModule): + def __init__(self): + super(M, self).__init__() + + @torch.jit.script_method + def forward(self, x): + self.ignored_code(x) + return x + 2 + + @torch.jit.ignore + def ignored_code(self, x): + # non-TorchScript code + import pdb; pdb.set_trace() + + m = M() + # Runs, makes upcall to Python to run `ignored_code` + m(torch.ones(2, 2)) + + # Replaces all calls to `ignored_code` with a `raise` + m.save("m.pt") + loaded = torch.jit.load("m.pt") + + # This runs `ignored_code` after saving which will raise an Exception! + loaded(torch.ones(2, 2)) + + +Attribute Lookup On Python Modules +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + TorchScript can lookup attributes on modules. Builtin functions like ``torch.add`` + are accessed this way. This allows TorchScript to call functions defined in + other modules. + +Python-defined Constants +^^^^^^^^^^^^^^^^^^^^^^^^ + TorchScript also provides a way to use constants that are defined in Python. + These can be used to hard-code hyper-parameters into the function, or to + define universal constants. There are two ways of specifying that a Python + value should be treated as a constant. + + 1. Values looked up as attributes of a module are assumed to be constant. + Example: ``math.pi`` + 2. Attributes of a ScriptModule can be marked constant by listing them + as a member of the ``__constants__`` property of the class: + + Example:: + + class Foo(torch.jit.ScriptModule): + __constants__ = ['a'] + + def __init__(self): + super(Foo, self).__init__(False) + self.a = 1 + 4 + + @torch.jit.script_method + def forward(self, input): + return self.a + input + + Supported constant Python Values are + + * ``int`` + * ``float`` + * ``bool`` + * ``torch.device`` + * ``torch.layout`` + * ``torch.dtype`` + * tuples containing supported types + * ``torch.nn.ModuleList`` which can be used in a TorchScript for loop + + +Module Attributes +^^^^^^^^^^^^^^^^^ + +The ``torch.nn.Parameter`` wrapper and ``register_buffer`` can be used to assign +tensors to a ``ScriptModule``. In a similar vein, attributes of any type can be +assign on a ``ScriptModule`` by wrapping them with ``torch.jit.Attribute`` and +specifying the type. All types available in TorchScript are supported. These +attributes are mutable and are saved in a separate archive in the serialized +model binary. Tensor attributes are semantically the same as buffers. + +Example:: + + class Foo(torch.jit.ScriptModule): + def __init__(self, a_dict): + super(Foo, self).__init__(False) + self.words = torch.jit.Attribute([], List[str]) + self.some_dict = torch.jit.Attribute(a_dict, Dict[str, int]) + + @torch.jit.script_method + def forward(self, input): + # type: (str) -> int + self.words.append(input) + return self.some_dict[input] + + +Debugging +~~~~~~~~~ + +Disable JIT for Debugging +^^^^^^^^^^^^^^^^^^^^^^^^^ + If you want to disable all JIT modes (tracing and scripting) so you can + debug your program in raw Python, you can use the ``PYTORCH_JIT`` environment + variable. ``PYTORCH_JIT`` can be used to globally disable the + JIT by setting its value to ``0``. Given an example script:: + + @torch.jit.script + def scripted_fn(x : torch.Tensor): + for i in range(12): + x = x + x + return x + + + def fn(x): + x = torch.neg(x) + import pdb; pdb.set_trace() + return scripted_fn(x) + + traced_fn = torch.jit.trace(fn, (torch.rand(4, 5),)) + + traced_fn(torch.rand(3, 4)) + + Debugging this script with PDB works except for when we invoke the ``@torch.jit.script`` + function. We can globally disable JIT, so that we can call the ``@torch.jit.script`` + function as a normal python function and not compile it. If the above script + is called ``disable_jit_example.py``, we can invoke it like so:: + + $ PYTORCH_JIT=0 python disable_jit_example.py + + and we will be able to step into the ``@torch.jit.script`` function as a normal Python + function. + + +Inspecting Code +^^^^^^^^^^^^^^^ + + TorchScript provides a code pretty-printer for all ``ScriptModule`` instances. This + pretty-printer gives an interpretation of the script method's code as valid + Python syntax. For example:: + + @torch.jit.script + def foo(len): + # type: (int) -> torch.Tensor + rv = torch.zeros(3, 4) + for i in range(len): + if i < 10: + rv = rv - 1.0 + else: + rv = rv + 1.0 + return rv + + print(foo.code) + + A ``ScriptModule`` with a single ``forward`` method will have an attribute + ``code``, which you can use to inspect the ``ScriptModule``'s code. + If the ``ScriptModule`` has more than one method, you will need to access + ``.code`` on the method itself and not the module. We can inspect the + code of a method named ``bar`` on a ScriptModule by accessing ``.bar.code``. + + The example script above produces the code:: + + def forward(self, + len: int) -> Tensor: + rv = torch.zeros([3, 4], dtype=None, layout=None, device=None) + rv0 = rv + for i in range(len): + if torch.lt(i, 10): + rv1 = torch.sub(rv0, 1., 1) + else: + rv1 = torch.add(rv0, 1., 1) + rv0 = rv1 + return rv0 + + This is TorchScript's compilation of the code for the ``forward`` method. + You can use this to ensure TorchScript (tracing or scripting) has captured + your model code correctly. + + +Interpreting Graphs +^^^^^^^^^^^^^^^^^^^ + TorchScript also has a representation at a lower level than the code pretty- + printer, in the form of IR graphs. + + TorchScript uses a static single assignment (SSA) intermediate representation + (IR) to represent computation. The instructions in this format consist of + ATen (the C++ backend of PyTorch) operators and other primitive operators, + including control flow operators for loops and conditionals. As an example:: + + @torch.jit.script + def foo(len): + # type: (int) -> torch.Tensor + rv = torch.zeros(3, 4) + for i in range(len): + if i < 10: + rv = rv - 1.0 + else: + rv = rv + 1.0 + return rv + + print(foo.graph) + + ``.graph`` follows the same rules described in the `Inspecting Code`_ section + with regard to ``forward`` method lookup. + + The example script above produces the graph:: + + graph(%len : int) { + %15 : int = prim::Constant[value=1]() + %9 : bool = prim::Constant[value=1]() + %7 : Device = prim::Constant[value="cpu"]() + %6 : int = prim::Constant[value=0]() + %5 : int = prim::Constant[value=6]() + %1 : int = prim::Constant[value=3]() + %2 : int = prim::Constant[value=4]() + %11 : int = prim::Constant[value=10]() + %14 : float = prim::Constant[value=1]() + %4 : int[] = prim::ListConstruct(%1, %2) + %rv.1 : Tensor = aten::zeros(%4, %5, %6, %7) + %rv : Tensor = prim::Loop(%len, %9, %rv.1) + block0(%i : int, %13 : Tensor) { + %12 : bool = aten::lt(%i, %11) + %rv.4 : Tensor = prim::If(%12) + block0() { + %rv.2 : Tensor = aten::sub(%13, %14, %15) + -> (%rv.2) + } + block1() { + %rv.3 : Tensor = aten::add(%13, %14, %15) + -> (%rv.3) + } + -> (%9, %rv.4) + } + return (%rv); + } + + + Take the instruction ``%rv.1 : Dynamic = aten::zeros(%3, %4, %5, %6)`` for + example. ``%rv.1 : Dynamic`` means we assign the output to a (unique) + value named ``rv.1``, and that value is of ``Dynamic`` type, i.e. we do + not know its concrete shape. ``aten::zeros`` is the operator (equivalent + to ``torch.zeros``) and the input list ``(%3, %4, %5, %6)`` specifies which + values in scope should be passed as inputs. The schema for built-in functions + like ``aten::zeros`` can be found at `Builtin Functions`_. + + Notice that operators can also have associated ``blocks``, namely the + ``prim::Loop`` and ``prim::If`` operators. In the graph print-out, these + operators are formatted to reflect their equivalent source code forms + to facilitate easy debugging. + + Graphs can be inspected as shown to confirm that the computation described + by a ``ScriptModule`` is correct, in both automated and manual fashion, as + described below. + + +Tracing Edge Cases +^^^^^^^^^^^^^^^^^^ + There are some edge cases that exist where the trace of a given Python + function/module will not be representative of the underlying code. These + cases can include: + + * Tracing of control flow that is dependent on inputs (e.g. tensor shapes) + * Tracing of in-place operations of tensor views (e.g. indexing on the + left-hand side of an assignment) + + Note that these cases may in fact be traceable in the future. + + +Automatic Trace Checking +^^^^^^^^^^^^^^^^^^^^^^^^ + One way to automatically catch many errors in traces is by using ``check_inputs`` + on the ``torch.jit.trace()`` API. ``check_inputs`` takes a list of tuples + of inputs that will be used to re-trace the computation and verify the + results. For example:: + + def loop_in_traced_fn(x): + result = x[0] + for i in range(x.size(0)): + result = result * x[i] + return result + + inputs = (torch.rand(3, 4, 5),) + check_inputs = [(torch.rand(4, 5, 6),), (torch.rand(2, 3, 4),)] + + traced = torch.jit.trace(loop_in_traced_fn, inputs, check_inputs=check_inputs) + + Gives us the following diagnostic information:: + ERROR: Graphs differed across invocations! + Graph diff:: + + graph(%x : Tensor) { + %1 : int = prim::Constant[value=0]() + %2 : int = prim::Constant[value=0]() + %result.1 : Tensor = aten::select(%x, %1, %2) + %4 : int = prim::Constant[value=0]() + %5 : int = prim::Constant[value=0]() + %6 : Tensor = aten::select(%x, %4, %5) + %result.2 : Tensor = aten::mul(%result.1, %6) + %8 : int = prim::Constant[value=0]() + %9 : int = prim::Constant[value=1]() + %10 : Tensor = aten::select(%x, %8, %9) + - %result : Tensor = aten::mul(%result.2, %10) + + %result.3 : Tensor = aten::mul(%result.2, %10) + ? ++ + %12 : int = prim::Constant[value=0]() + %13 : int = prim::Constant[value=2]() + %14 : Tensor = aten::select(%x, %12, %13) + + %result : Tensor = aten::mul(%result.3, %14) + + %16 : int = prim::Constant[value=0]() + + %17 : int = prim::Constant[value=3]() + + %18 : Tensor = aten::select(%x, %16, %17) + - %15 : Tensor = aten::mul(%result, %14) + ? ^ ^ + + %19 : Tensor = aten::mul(%result, %18) + ? ^ ^ + - return (%15); + ? ^ + + return (%19); + ? ^ + } + + + This message indicates to us that the computation differed between when + we first traced it and when we traced it with the ``check_inputs``. Indeed, + the loop within the body of ``loop_in_traced_fn`` depends on the shape + of the input ``x``, and thus when we try another ``x`` with a different + shape, the trace differs. + + In this case, data-dependent control flow like this can be captured using + script instead:: + + def fn(x): + result = x[0] + for i in range(x.size(0)): + result = result * x[i] + return result + + inputs = (torch.rand(3, 4, 5),) + check_inputs = [(torch.rand(4, 5, 6),), (torch.rand(2, 3, 4),)] + + scripted_fn = torch.jit.script(fn) + print(scripted_fn.graph) + + for input_tuple in [inputs] + check_inputs: + torch.testing.assert_allclose(fn(*input_tuple), scripted_fn(*input_tuple)) + + + Which produces:: + + graph(%x : Tensor) { + %5 : bool = prim::Constant[value=1]() + %1 : int = prim::Constant[value=0]() + %result.1 : Tensor = aten::select(%x, %1, %1) + %4 : int = aten::size(%x, %1) + %result : Tensor = prim::Loop(%4, %5, %result.1) + block0(%i : int, %7 : Tensor) { + %10 : Tensor = aten::select(%x, %1, %i) + %result.2 : Tensor = aten::mul(%7, %10) + -> (%5, %result.2) + } + return (%result); + } + +Tracer Warnings +^^^^^^^^^^^^^^^ + The tracer produces warnings for several problematic patterns in traced + computation. As an example, take a trace of a function that contains an + in-place assignment on a slice (a view) of a Tensor:: + + def fill_row_zero(x): + x[0] = torch.rand(*x.shape[1:2]) + return x + + traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),)) + print(traced.graph) + + + Produces several warnings and a graph which simply returns the input:: + + fill_row_zero.py:4: TracerWarning: There are 2 live references to the data region being modified when tracing in-place operator copy_ (possibly due to an assignment). This might cause the trace to be incorrect, because all other views that also reference this data will not reflect this change in the trace! On the other hand, if all other views use the same memory chunk, but are disjoint (e.g. are outputs of torch.split), this might still be safe. + x[0] = torch.rand(*x.shape[1:2]) + fill_row_zero.py:6: TracerWarning: Output nr 1. of the traced function does not match the corresponding output of the Python function. Detailed error: + Not within tolerance rtol=1e-05 atol=1e-05 at input[0, 1] (0.09115803241729736 vs. 0.6782537698745728) and 3 other locations (33.00%) + traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),)) + graph(%0 : Float(3, 4)) { + return (%0); + } + + We can fix this by modifying the code to not use the in-place update, but + rather build up the result tensor out-of-place with `torch.cat`:: + + def fill_row_zero(x): + x = torch.cat((torch.rand(1, *x.shape[1:2]), x[1:2]), dim=0) + return x + + traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),)) + print(traced.graph) + + +Frequently Asked Questions +-------------------------- + +Q: I would like to train a model on GPU and do inference on CPU. What are the +best practices? + First convert your model from GPU to CPU and then save it, like so: :: + + cpu_model = gpu_model.cpu() + sample_input_cpu = sample_input_gpu.cpu() + traced_cpu = torch.jit.trace(traced_cpu, sample_input_cpu) + torch.jit.save(traced_cpu, "cpu.pth") + + traced_gpu = torch.jit.trace(traced_gpu, sample_input_gpu) + torch.jit.save(traced_gpu, "gpu.pth") + + # ... later, when using the model: + + if use_gpu: + model = torch.jit.load("gpu.pth") + else: + model = torch.jit.load("cpu.pth") + + model(input) + + This is recommended because the tracer may witness tensor creation on a + specific device, so casting an already-loaded model may have unexpected + effects. Casting the model *before* saving it ensures that the tracer has + the correct device information. + + +Q: How do I store attributes on a ``ScriptModule``? + + Say we have a model like: :: + + class Model(torch.jit.ScriptModule): + def __init__(self): + super(Model, self).__init__() + self.x = 2 + + @torch.jit.script_method + def forward(self): + return self.x + + If ``Model`` is instantiated it will result in a compilation error + since the compiler doesn't know about ``x``. There are 4 ways to inform the + compiler of attributes on ``ScriptModule``: + + 1. ``nn.Parameter`` - values wrapped in ``nn.Parameter`` will work as they + do on ``nn.Module``\s + + 2. ``register_buffer`` - values wrapped in ``register_buffer`` will work as + they do on ``nn.Module``\s + + 3. ``__constants__`` - adding a list called ``__constants__`` at the + class definition level will mark the contained names as constants. Constants + are saved directly in the code of the model. See + `Python-defined Constants`_. + + 4. ``torch.jit.Attribute`` - values wrapped in ``torch.jit.Attribute`` can + be any ``TorchScript`` type, be mutated and are saved outside of the code of + the model. See `Module Attributes`_. + + + +Q: I would like to trace module's method but I keep getting this error: + +``RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient`` + + This error usually means that, the method you are tracing, uses module's parameters and + you are passing module's method instead of a module instance (e.g. ``my_module_instance.forward`` vs ``my_module_instance``). + - Invoking ``trace`` with module's method captures module parameters (which may require gradients) as **constants**. + - On the other hand, invoking ``trace`` with module's instance (e.g. ``my_module``) creates a new module and correctly copies parameters into the new module, so they can accumulate gradients if required. + Given that ``trace`` treats ``my_module_instance.forward`` as a standalone function, it also means there is **not** currently a way to trace + arbitrary methods in the module except for ``forward`` that use module's parameters. + Version **1.1.1** will add a new API ``trace_module`` that will allow users to trace any method in the module and more than one method :: + + class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + n = Net() + inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight} + module = torch.jit.trace_module(n, inputs) + + +Builtin Functions +~~~~~~~~~~~~~~~~~ + +TorchScript supports a subset of the builtin tensor and neural network +functions that PyTorch provides. Most methods on Tensor as well as functions in +the ``torch`` namespace, all functions in ``torch.nn.functional`` and all +modules from ``torch.nn`` are supported in TorchScript, excluding those in the +table below. For unsupported modules, we suggest using :meth:`torch.jit.trace`. + +Unsupported ``torch.nn`` Modules :: + + torch.nn.modules.adaptive.AdaptiveLogSoftmaxWithLoss + torch.nn.modules.normalization.CrossMapLRN2d + torch.nn.modules.fold.Fold + torch.nn.modules.fold.Unfold + torch.nn.modules.rnn.GRU + torch.nn.modules.rnn.RNN + + +.. automodule:: torch.jit.supported_ops diff --git a/docs/stable/_sources/model_zoo.rst.txt b/docs/stable/_sources/model_zoo.rst.txt new file mode 100644 index 000000000000..a2a8dec43519 --- /dev/null +++ b/docs/stable/_sources/model_zoo.rst.txt @@ -0,0 +1,7 @@ +torch.utils.model_zoo +=================================== + +Moved to `torch.hub`. + +.. automodule:: torch.utils.model_zoo +.. autofunction:: load_url diff --git a/docs/stable/_sources/multiprocessing.rst.txt b/docs/stable/_sources/multiprocessing.rst.txt new file mode 100644 index 000000000000..f45563e23b67 --- /dev/null +++ b/docs/stable/_sources/multiprocessing.rst.txt @@ -0,0 +1,180 @@ +Multiprocessing package - torch.multiprocessing +=============================================== + +.. automodule:: torch.multiprocessing +.. currentmodule:: torch.multiprocessing + +.. warning:: + + If the main process exits abruptly (e.g. because of an incoming signal), + Python's ``multiprocessing`` sometimes fails to clean up its children. + It's a known caveat, so if you're seeing any resource leaks after + interrupting the interpreter, it probably means that this has just happened + to you. + +Strategy management +------------------- + +.. autofunction:: get_all_sharing_strategies +.. autofunction:: get_sharing_strategy +.. autofunction:: set_sharing_strategy + + +.. _multiprocessing-cuda-sharing-details: + +Sharing CUDA tensors +-------------------- + +Sharing CUDA tensors between processes is supported only in Python 3, using +a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in +Python 2 can only create subprocesses using ``fork``, and it's not supported +by the CUDA runtime. + +Unlike CPU tensors, the sending process is required to keep the original tensor +as long as the receiving process retains a copy of the tensor. The refcounting is +implemented under the hood but requires users to follow the next best practices. + +.. warning:: + If the consumer process dies abnormally to a fatal signal, the shared tensor + could be forever kept in memory as long as the sending process is running. + + +1. Release memory ASAP in the consumer. + +:: + + ## Good + x = queue.get() + # do somethings with x + del x + +:: + + ## Bad + x = queue.get() + # do somethings with x + # do everything else (producer have to keep x in memory) + +2. Keep producer process running until all consumers exits. This will prevent +the situation when the producer process releasing memory which is still in use +by the consumer. + +:: + + ## producer + # send tensors, do something + event.wait() + + +:: + + ## consumer + # receive tensors and use them + event.set() + +3. Don't pass received tensors. + +:: + + # not going to work + x = queue.get() + queue_2.put(x) + + +:: + + # you need to create a process-local copy + x = queue.get() + x_clone = x.clone() + queue_2.put(x_clone) + + +:: + + # putting and getting from the same queue in the same process will likely end up with segfault + queue.put(tensor) + x = queue.get() + + +Sharing strategies +------------------ + +This section provides a brief overview into how different sharing strategies +work. Note that it applies only to CPU tensor - CUDA tensors will always use +the CUDA API, as that's the only way they can be shared. + +File descriptor - ``file_descriptor`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + +.. note:: + + This is the default strategy (except for macOS and OS X where it's not + supported). + +This strategy will use file descriptors as shared memory handles. Whenever a +storage is moved to shared memory, a file descriptor obtained from ``shm_open`` +is cached with the object, and when it's going to be sent to other processes, +the file descriptor will be transferred (e.g. via UNIX sockets) to it. The +receiver will also cache the file descriptor and ``mmap`` it, to obtain a shared +view onto the storage data. + +Note that if there will be a lot of tensors shared, this strategy will keep a +large number of file descriptors open most of the time. If your system has low +limits for the number of open file descriptors, and you can't raise them, you +should use the ``file_system`` strategy. + +File system - ``file_system`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This strategy will use file names given to ``shm_open`` to identify the shared +memory regions. This has a benefit of not requiring the implementation to cache +the file descriptors obtained from it, but at the same time is prone to shared +memory leaks. The file can't be deleted right after its creation, because other +processes need to access it to open their views. If the processes fatally +crash, or are killed, and don't call the storage destructors, the files will +remain in the system. This is very serious, because they keep using up the +memory until the system is restarted, or they're freed manually. + +To counter the problem of shared memory file leaks, :mod:`torch.multiprocessing` +will spawn a daemon named ``torch_shm_manager`` that will isolate itself from +the current process group, and will keep track of all shared memory allocations. +Once all processes connected to it exit, it will wait a moment to ensure there +will be no new connections, and will iterate over all shared memory files +allocated by the group. If it finds that any of them still exist, they will be +deallocated. We've tested this method and it proved to be robust to various +failures. Still, if your system has high enough limits, and ``file_descriptor`` +is a supported strategy, we do not recommend switching to this one. + +Spawning subprocesses +--------------------- + +.. note:: + + Available for Python >= 3.4. + + This depends on the ``spawn`` start method in Python's + ``multiprocessing`` package. + +Spawning a number of subprocesses to perform some function can be done +by creating ``Process`` instances and calling ``join`` to wait for +their completion. This approach works fine when dealing with a single +subprocess but presents potential issues when dealing with multiple +processes. + +Namely, joining processes sequentially implies they will terminate +sequentially. If they don't, and the first process does not terminate, +the process termination will go unnoticed. Also, there are no native +facilities for error propagation. + +The ``spawn`` function below addresses these concerns and takes care +of error propagation, out of order termination, and will actively +terminate processes upon detecting an error in one of them. + +.. autofunction:: spawn + +.. class:: SpawnContext + + Returned by :func:`~spawn` when called with ``join=False``. + + .. automethod:: join diff --git a/docs/stable/_sources/nn.functional.rst.txt b/docs/stable/_sources/nn.functional.rst.txt new file mode 100644 index 000000000000..93a595d598e3 --- /dev/null +++ b/docs/stable/_sources/nn.functional.rst.txt @@ -0,0 +1,516 @@ +.. role:: hidden + :class: hidden-section + +torch.nn.functional +=================== + +.. currentmodule:: torch.nn.functional + +Convolution functions +---------------------------------- + +:hidden:`conv1d` +~~~~~~~~~~~~~~~~ + +.. autofunction:: conv1d + +:hidden:`conv2d` +~~~~~~~~~~~~~~~~ + +.. autofunction:: conv2d + +:hidden:`conv3d` +~~~~~~~~~~~~~~~~ + +.. autofunction:: conv3d + +:hidden:`conv_transpose1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: conv_transpose1d + +:hidden:`conv_transpose2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: conv_transpose2d + +:hidden:`conv_transpose3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: conv_transpose3d + +:hidden:`unfold` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: unfold + +:hidden:`fold` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: fold + +Pooling functions +---------------------------------- + +:hidden:`avg_pool1d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: avg_pool1d + +:hidden:`avg_pool2d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: avg_pool2d + +:hidden:`avg_pool3d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: avg_pool3d + +:hidden:`max_pool1d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_pool1d + +:hidden:`max_pool2d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_pool2d + +:hidden:`max_pool3d` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_pool3d + +:hidden:`max_unpool1d` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_unpool1d + +:hidden:`max_unpool2d` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_unpool2d + +:hidden:`max_unpool3d` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: max_unpool3d + +:hidden:`lp_pool1d` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: lp_pool1d + +:hidden:`lp_pool2d` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: lp_pool2d + +:hidden:`adaptive_max_pool1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_max_pool1d + +:hidden:`adaptive_max_pool2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_max_pool2d + +:hidden:`adaptive_max_pool3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_max_pool3d + +:hidden:`adaptive_avg_pool1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_avg_pool1d + +:hidden:`adaptive_avg_pool2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_avg_pool2d + +:hidden:`adaptive_avg_pool3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: adaptive_avg_pool3d + + +Non-linear activation functions +------------------------------- + +:hidden:`threshold` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: threshold +.. autofunction:: threshold_ + + +:hidden:`relu` +~~~~~~~~~~~~~~ + +.. autofunction:: relu +.. autofunction:: relu_ + +:hidden:`hardtanh` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: hardtanh +.. autofunction:: hardtanh_ + +:hidden:`relu6` +~~~~~~~~~~~~~~~ + +.. autofunction:: relu6 + +:hidden:`elu` +~~~~~~~~~~~~~ + +.. autofunction:: elu +.. autofunction:: elu_ + +:hidden:`selu` +~~~~~~~~~~~~~~ + +.. autofunction:: selu + +:hidden:`celu` +~~~~~~~~~~~~~~ + +.. autofunction:: celu + +:hidden:`leaky_relu` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: leaky_relu +.. autofunction:: leaky_relu_ + +:hidden:`prelu` +~~~~~~~~~~~~~~~ + +.. autofunction:: prelu + +:hidden:`rrelu` +~~~~~~~~~~~~~~~ + +.. autofunction:: rrelu +.. autofunction:: rrelu_ + +:hidden:`glu` +~~~~~~~~~~~~~~~ + +.. autofunction:: glu + +:hidden:`gelu` +~~~~~~~~~~~~~~~ + +.. autofunction:: gelu + +:hidden:`logsigmoid` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: logsigmoid + +:hidden:`hardshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: hardshrink + +:hidden:`tanhshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: tanhshrink + +:hidden:`softsign` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: softsign + +:hidden:`softplus` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: softplus + +:hidden:`softmin` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: softmin + +:hidden:`softmax` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: softmax + +:hidden:`softshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: softshrink + +:hidden:`gumbel_softmax` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: gumbel_softmax + +:hidden:`log_softmax` +~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: log_softmax + +:hidden:`tanh` +~~~~~~~~~~~~~~ + +.. autofunction:: tanh + +:hidden:`sigmoid` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: sigmoid + +Normalization functions +----------------------- + +:hidden:`batch_norm` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: batch_norm + +:hidden:`instance_norm` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: instance_norm + +:hidden:`layer_norm` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: layer_norm + +:hidden:`local_response_norm` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: local_response_norm + +:hidden:`normalize` +~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: normalize + +Linear functions +---------------- + +:hidden:`linear` +~~~~~~~~~~~~~~~~ + +.. autofunction:: linear + +:hidden:`bilinear` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: bilinear + +Dropout functions +----------------- + +:hidden:`dropout` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: dropout + +:hidden:`alpha_dropout` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: alpha_dropout + +:hidden:`dropout2d` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: dropout2d + +:hidden:`dropout3d` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: dropout3d + +Sparse functions +---------------------------------- + +:hidden:`embedding` +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: embedding + +:hidden:`embedding_bag` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: embedding_bag + +:hidden:`one_hot` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: one_hot + +Distance functions +---------------------------------- + +:hidden:`pairwise_distance` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: pairwise_distance + +:hidden:`cosine_similarity` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: cosine_similarity + +:hidden:`pdist` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: pdist + + +Loss functions +-------------- + +:hidden:`binary_cross_entropy` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: binary_cross_entropy + +:hidden:`binary_cross_entropy_with_logits` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: binary_cross_entropy_with_logits + +:hidden:`poisson_nll_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: poisson_nll_loss + +:hidden:`cosine_embedding_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: cosine_embedding_loss + +:hidden:`cross_entropy` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: cross_entropy + +:hidden:`ctc_loss` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: ctc_loss + +:hidden:`hinge_embedding_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: hinge_embedding_loss + +:hidden:`kl_div` +~~~~~~~~~~~~~~~~ + +.. autofunction:: kl_div + +:hidden:`l1_loss` +~~~~~~~~~~~~~~~~~ + +.. autofunction:: l1_loss + +:hidden:`mse_loss` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: mse_loss + +:hidden:`margin_ranking_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: margin_ranking_loss + +:hidden:`multilabel_margin_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: multilabel_margin_loss + +:hidden:`multilabel_soft_margin_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: multilabel_soft_margin_loss + +:hidden:`multi_margin_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: multi_margin_loss + +:hidden:`nll_loss` +~~~~~~~~~~~~~~~~~~ + +.. autofunction:: nll_loss + +:hidden:`smooth_l1_loss` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: smooth_l1_loss + +:hidden:`soft_margin_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: soft_margin_loss + +:hidden:`triplet_margin_loss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: triplet_margin_loss + +Vision functions +---------------- + +:hidden:`pixel_shuffle` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: pixel_shuffle + +:hidden:`pad` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: pad + +:hidden:`interpolate` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: interpolate + +:hidden:`upsample` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: upsample + +:hidden:`upsample_nearest` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: upsample_nearest + +:hidden:`upsample_bilinear` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: upsample_bilinear + +:hidden:`grid_sample` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: grid_sample + +:hidden:`affine_grid` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: affine_grid + +DataParallel functions (multi-GPU, distributed) +----------------------------------------------- + +:hidden:`data_parallel` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.parallel.data_parallel + + diff --git a/docs/stable/_sources/nn.init.rst.txt b/docs/stable/_sources/nn.init.rst.txt new file mode 100644 index 000000000000..099c107e57a7 --- /dev/null +++ b/docs/stable/_sources/nn.init.rst.txt @@ -0,0 +1,21 @@ +.. role:: hidden + :class: hidden-section + +torch.nn.init +============= + +.. currentmodule:: torch.nn.init +.. autofunction:: calculate_gain +.. autofunction:: uniform_ +.. autofunction:: normal_ +.. autofunction:: constant_ +.. autofunction:: ones_ +.. autofunction:: zeros_ +.. autofunction:: eye_ +.. autofunction:: dirac_ +.. autofunction:: xavier_uniform_ +.. autofunction:: xavier_normal_ +.. autofunction:: kaiming_uniform_ +.. autofunction:: kaiming_normal_ +.. autofunction:: orthogonal_ +.. autofunction:: sparse_ diff --git a/docs/stable/_sources/nn.rst.txt b/docs/stable/_sources/nn.rst.txt new file mode 100644 index 000000000000..3a6a4b1c8cf7 --- /dev/null +++ b/docs/stable/_sources/nn.rst.txt @@ -0,0 +1,877 @@ +.. role:: hidden + :class: hidden-section + +torch.nn +=================================== + +.. automodule:: torch.nn +.. currentmodule:: torch.nn + +Parameters +---------- + +.. autoclass:: Parameter + :members: + +Containers +---------------------------------- + +:hidden:`Module` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Module + :members: + +:hidden:`Sequential` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Sequential + :members: + +:hidden:`ModuleList` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ModuleList + :members: + +:hidden:`ModuleDict` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ModuleDict + :members: + +:hidden:`ParameterList` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ParameterList + :members: + +:hidden:`ParameterDict` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ParameterDict + :members: + +Convolution layers +---------------------------------- + +:hidden:`Conv1d` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Conv1d + :members: + +:hidden:`Conv2d` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Conv2d + :members: + +:hidden:`Conv3d` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Conv3d + :members: + +:hidden:`ConvTranspose1d` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConvTranspose1d + :members: + +:hidden:`ConvTranspose2d` +~~~~~~~~~~~~~~~~~~~~~~~~~ + + +.. autoclass:: ConvTranspose2d + :members: + +:hidden:`ConvTranspose3d` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConvTranspose3d + :members: + +:hidden:`Unfold` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Unfold + :members: + +:hidden:`Fold` +~~~~~~~~~~~~~~ + +.. autoclass:: Fold + :members: + + +Pooling layers +---------------------------------- + +:hidden:`MaxPool1d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxPool1d + :members: + +:hidden:`MaxPool2d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxPool2d + :members: + +:hidden:`MaxPool3d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxPool3d + :members: + +:hidden:`MaxUnpool1d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxUnpool1d + :members: + +:hidden:`MaxUnpool2d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxUnpool2d + :members: + +:hidden:`MaxUnpool3d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MaxUnpool3d + :members: + +:hidden:`AvgPool1d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AvgPool1d + :members: + +:hidden:`AvgPool2d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AvgPool2d + :members: + +:hidden:`AvgPool3d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AvgPool3d + :members: + +:hidden:`FractionalMaxPool2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: FractionalMaxPool2d + :members: + +:hidden:`LPPool1d` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LPPool1d + :members: + +:hidden:`LPPool2d` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LPPool2d + :members: + +:hidden:`AdaptiveMaxPool1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveMaxPool1d + :members: + +:hidden:`AdaptiveMaxPool2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveMaxPool2d + :members: + +:hidden:`AdaptiveMaxPool3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveMaxPool3d + :members: + +:hidden:`AdaptiveAvgPool1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveAvgPool1d + :members: + +:hidden:`AdaptiveAvgPool2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveAvgPool2d + :members: + +:hidden:`AdaptiveAvgPool3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveAvgPool3d + :members: + + +Padding layers +-------------- + +:hidden:`ReflectionPad1d` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ReflectionPad1d + :members: + +:hidden:`ReflectionPad2d` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ReflectionPad2d + :members: + +:hidden:`ReplicationPad1d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ReplicationPad1d + :members: + +:hidden:`ReplicationPad2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ReplicationPad2d + :members: + +:hidden:`ReplicationPad3d` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ReplicationPad3d + :members: + +:hidden:`ZeroPad2d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ZeroPad2d + :members: + +:hidden:`ConstantPad1d` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConstantPad1d + :members: + +:hidden:`ConstantPad2d` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConstantPad2d + :members: + +:hidden:`ConstantPad3d` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: ConstantPad3d + :members: + + +Non-linear activations (weighted sum, nonlinearity) +--------------------------------------------------- + +:hidden:`ELU` +~~~~~~~~~~~~~ + +.. autoclass:: ELU + :members: + +:hidden:`Hardshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Hardshrink + :members: + +:hidden:`Hardtanh` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Hardtanh + :members: + +:hidden:`LeakyReLU` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LeakyReLU + :members: + +:hidden:`LogSigmoid` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LogSigmoid + :members: + +:hidden:`MultiheadAttention` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autoclass:: MultiheadAttention + :members: + +:hidden:`PReLU` +~~~~~~~~~~~~~~~ + +.. autoclass:: PReLU + :members: + +:hidden:`ReLU` +~~~~~~~~~~~~~~ + +.. autoclass:: ReLU + :members: + +:hidden:`ReLU6` +~~~~~~~~~~~~~~~ + +.. autoclass:: ReLU6 + :members: + +:hidden:`RReLU` +~~~~~~~~~~~~~~~ + +.. autoclass:: RReLU + :members: + +:hidden:`SELU` +~~~~~~~~~~~~~~ + +.. autoclass:: SELU + :members: + +:hidden:`CELU` +~~~~~~~~~~~~~~ + +.. autoclass:: CELU + :members: + +:hidden:`Sigmoid` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: Sigmoid + :members: + +:hidden:`Softplus` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softplus + :members: + +:hidden:`Softshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softshrink + :members: + +:hidden:`Softsign` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softsign + :members: + +:hidden:`Tanh` +~~~~~~~~~~~~~~ + +.. autoclass:: Tanh + :members: + +:hidden:`Tanhshrink` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Tanhshrink + :members: + +:hidden:`Threshold` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Threshold + :members: + +Non-linear activations (other) +------------------------------ + +:hidden:`Softmin` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softmin + :members: + +:hidden:`Softmax` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softmax + :members: + +:hidden:`Softmax2d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Softmax2d + :members: + +:hidden:`LogSoftmax` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LogSoftmax + :members: + +:hidden:`AdaptiveLogSoftmaxWithLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AdaptiveLogSoftmaxWithLoss + :members: + +Normalization layers +---------------------------------- + +:hidden:`BatchNorm1d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: BatchNorm1d + :members: + +:hidden:`BatchNorm2d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: BatchNorm2d + :members: + +:hidden:`BatchNorm3d` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: BatchNorm3d + :members: + +:hidden:`GroupNorm` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: GroupNorm + :members: + +:hidden:`SyncBatchNorm` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: SyncBatchNorm + :members: + +:hidden:`InstanceNorm1d` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: InstanceNorm1d + :members: + +:hidden:`InstanceNorm2d` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: InstanceNorm2d + :members: + +:hidden:`InstanceNorm3d` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: InstanceNorm3d + :members: + +:hidden:`LayerNorm` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LayerNorm + :members: + +:hidden:`LocalResponseNorm` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LocalResponseNorm + :members: + +Recurrent layers +---------------------------------- + +:hidden:`RNN` +~~~~~~~~~~~~~ + +.. autoclass:: RNN + :members: + +:hidden:`LSTM` +~~~~~~~~~~~~~~ + +.. autoclass:: LSTM + :members: + +:hidden:`GRU` +~~~~~~~~~~~~~ + +.. autoclass:: GRU + :members: + +:hidden:`RNNCell` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: RNNCell + :members: + +:hidden:`LSTMCell` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: LSTMCell + :members: + +:hidden:`GRUCell` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: GRUCell + :members: + +Transformer layers +---------------------------------- + +:hidden:`Transformer` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Transformer + :members: + +:hidden:`TransformerEncoder` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TransformerEncoder + :members: + +:hidden:`TransformerDecoder` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TransformerDecoder + :members: + +:hidden:`TransformerEncoderLayer` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TransformerEncoderLayer + :members: + +:hidden:`TransformerDecoderLayer` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TransformerDecoderLayer + :members: + +Linear layers +---------------------------------- + +:hidden:`Identity` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Identity + :members: + +:hidden:`Linear` +~~~~~~~~~~~~~~~~ + +.. autoclass:: Linear + :members: + +:hidden:`Bilinear` +~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Bilinear + :members: + +Dropout layers +---------------------------------- + +:hidden:`Dropout` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: Dropout + :members: + +:hidden:`Dropout2d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Dropout2d + :members: + +:hidden:`Dropout3d` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Dropout3d + :members: + +:hidden:`AlphaDropout` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: AlphaDropout + :members: + + +Sparse layers +---------------------------------- + +:hidden:`Embedding` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Embedding + :members: + +:hidden:`EmbeddingBag` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: EmbeddingBag + :members: + +Distance functions +---------------------------------- + +:hidden:`CosineSimilarity` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: CosineSimilarity + :members: + +:hidden:`PairwiseDistance` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: PairwiseDistance + :members: + + +Loss functions +---------------------------------- + +:hidden:`L1Loss` +~~~~~~~~~~~~~~~~ + +.. autoclass:: L1Loss + :members: + +:hidden:`MSELoss` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: MSELoss + :members: + +:hidden:`CrossEntropyLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: CrossEntropyLoss + :members: + +:hidden:`CTCLoss` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: CTCLoss + :members: + +:hidden:`NLLLoss` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: NLLLoss + :members: + +:hidden:`PoissonNLLLoss` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: PoissonNLLLoss + :members: + +:hidden:`KLDivLoss` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: KLDivLoss + :members: + +:hidden:`BCELoss` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: BCELoss + :members: + +:hidden:`BCEWithLogitsLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: BCEWithLogitsLoss + :members: + +:hidden:`MarginRankingLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MarginRankingLoss + :members: + +:hidden:`HingeEmbeddingLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: HingeEmbeddingLoss + :members: + +:hidden:`MultiLabelMarginLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MultiLabelMarginLoss + :members: + +:hidden:`SmoothL1Loss` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: SmoothL1Loss + :members: + +:hidden:`SoftMarginLoss` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: SoftMarginLoss + :members: + +:hidden:`MultiLabelSoftMarginLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MultiLabelSoftMarginLoss + :members: + +:hidden:`CosineEmbeddingLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: CosineEmbeddingLoss + :members: + +:hidden:`MultiMarginLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: MultiMarginLoss + :members: + +:hidden:`TripletMarginLoss` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: TripletMarginLoss + :members: + + +Vision layers +---------------- + +:hidden:`PixelShuffle` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: PixelShuffle + :members: + +:hidden:`Upsample` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: Upsample + :members: + +:hidden:`UpsamplingNearest2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: UpsamplingNearest2d + :members: + +:hidden:`UpsamplingBilinear2d` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: UpsamplingBilinear2d + :members: + + +DataParallel layers (multi-GPU, distributed) +-------------------------------------------- + +:hidden:`DataParallel` +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: DataParallel + :members: + +:hidden:`DistributedDataParallel` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: torch.nn.parallel.DistributedDataParallel + :members: + + +Utilities +--------- + +:hidden:`clip_grad_norm_` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.clip_grad_norm_ + +:hidden:`clip_grad_value_` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.clip_grad_value_ + +:hidden:`parameters_to_vector` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.parameters_to_vector + +:hidden:`vector_to_parameters` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.vector_to_parameters + +:hidden:`weight_norm` +~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.weight_norm + +:hidden:`remove_weight_norm` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.remove_weight_norm + +:hidden:`spectral_norm` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.spectral_norm + +:hidden:`remove_spectral_norm` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.remove_spectral_norm + + +.. currentmodule:: torch.nn.utils.rnn + +:hidden:`PackedSequence` +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.rnn.PackedSequence + + +:hidden:`pack_padded_sequence` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.rnn.pack_padded_sequence + + +:hidden:`pad_packed_sequence` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.rnn.pad_packed_sequence + + +:hidden:`pad_sequence` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.rnn.pad_sequence + + +:hidden:`pack_sequence` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: torch.nn.utils.rnn.pack_sequence + +:hidden:`Flatten` +~~~~~~~~~~~~~~~~~ + +.. autoclass:: Flatten + :members: diff --git a/docs/stable/_sources/notes/autograd.rst.txt b/docs/stable/_sources/notes/autograd.rst.txt new file mode 100644 index 000000000000..3a7d610b05d1 --- /dev/null +++ b/docs/stable/_sources/notes/autograd.rst.txt @@ -0,0 +1,117 @@ +Autograd mechanics +================== + +This note will present an overview of how autograd works and records the +operations. It's not strictly necessary to understand all this, but we recommend +getting familiar with it, as it will help you write more efficient, cleaner +programs, and can aid you in debugging. + +.. _excluding-subgraphs: + +Excluding subgraphs from backward +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Every Tensor has a flag: :attr:`requires_grad` that allows for fine grained +exclusion of subgraphs from gradient computation and can increase efficiency. + +.. _excluding-requires_grad: + +``requires_grad`` +~~~~~~~~~~~~~~~~~ + +If there's a single input to an operation that requires gradient, its output +will also require gradient. Conversely, only if all inputs don't require +gradient, the output also won't require it. Backward computation is never +performed in the subgraphs, where all Tensors didn't require gradients. + +.. code:: + + >>> x = torch.randn(5, 5) # requires_grad=False by default + >>> y = torch.randn(5, 5) # requires_grad=False by default + >>> z = torch.randn((5, 5), requires_grad=True) + >>> a = x + y + >>> a.requires_grad + False + >>> b = a + z + >>> b.requires_grad + True + +This is especially useful when you want to freeze part of your model, or you +know in advance that you're not going to use gradients w.r.t. some parameters. +For example if you want to finetune a pretrained CNN, it's enough to switch the +:attr:`requires_grad` flags in the frozen base, and no intermediate buffers will +be saved, until the computation gets to the last layer, where the affine +transform will use weights that require gradient, and the output of the network +will also require them. + +.. code:: + + model = torchvision.models.resnet18(pretrained=True) + for param in model.parameters(): + param.requires_grad = False + # Replace the last fully-connected layer + # Parameters of newly constructed modules have requires_grad=True by default + model.fc = nn.Linear(512, 100) + + # Optimize only the classifier + optimizer = optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9) + +How autograd encodes the history +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Autograd is reverse automatic differentiation system. Conceptually, +autograd records a graph recording all of the operations that created +the data as you execute operations, giving you a directed acyclic graph +whose leaves are the input tensors and roots are the output tensors. +By tracing this graph from roots to leaves, you can automatically +compute the gradients using the chain rule. + +Internally, autograd represents this graph as a graph of +:class:`Function` objects (really expressions), which can be +:meth:`~torch.autograd.Function.apply` ed to compute the result of +evaluating the graph. When computing the forwards pass, autograd +simultaneously performs the requested computations and builds up a graph +representing the function that computes the gradient (the ``.grad_fn`` +attribute of each :class:`torch.Tensor` is an entry point into this graph). +When the forwards pass is completed, we evaluate this graph in the +backwards pass to compute the gradients. + +An important thing to note is that the graph is recreated from scratch at every +iteration, and this is exactly what allows for using arbitrary Python control +flow statements, that can change the overall shape and size of the graph at +every iteration. You don't have to encode all possible paths before you +launch the training - what you run is what you differentiate. + +In-place operations with autograd +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Supporting in-place operations in autograd is a hard matter, and we discourage +their use in most cases. Autograd's aggressive buffer freeing and reuse makes +it very efficient and there are very few occasions when in-place operations +actually lower memory usage by any significant amount. Unless you're operating +under heavy memory pressure, you might never need to use them. + +There are two main reasons that limit the applicability of in-place operations: + +1. In-place operations can potentially overwrite values required to compute + gradients. + +2. Every in-place operation actually requires the implementation to rewrite the + computational graph. Out-of-place versions simply allocate new objects and + keep references to the old graph, while in-place operations, require + changing the creator of all inputs to the :class:`Function` representing + this operation. This can be tricky, especially if there are many Tensors + that reference the same storage (e.g. created by indexing or transposing), + and in-place functions will actually raise an error if the storage of + modified inputs is referenced by any other :class:`Tensor`. + +In-place correctness checks +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Every tensor keeps a version counter, that is incremented every time it is +marked dirty in any operation. When a Function saves any tensors for backward, +a version counter of their containing Tensor is saved as well. Once you access +``self.saved_tensors`` it is checked, and if it is greater than the saved value +an error is raised. This ensures that if you're using in-place +functions and not seeing any errors, you can be sure that the computed +gradients are correct. diff --git a/docs/stable/_sources/notes/broadcasting.rst.txt b/docs/stable/_sources/notes/broadcasting.rst.txt new file mode 100644 index 000000000000..40e0adc73b19 --- /dev/null +++ b/docs/stable/_sources/notes/broadcasting.rst.txt @@ -0,0 +1,113 @@ +.. _broadcasting-semantics: + +Broadcasting semantics +====================== + +Many PyTorch operations support :any:`NumPy Broadcasting Semantics `. + +In short, if a PyTorch operation supports broadcast, then its Tensor arguments can be +automatically expanded to be of equal sizes (without making copies of the data). + +General semantics +----------------- +Two tensors are "broadcastable" if the following rules hold: + +- Each tensor has at least one dimension. +- When iterating over the dimension sizes, starting at the trailing dimension, + the dimension sizes must either be equal, one of them is 1, or one of them + does not exist. + +For Example:: + + >>> x=torch.empty(5,7,3) + >>> y=torch.empty(5,7,3) + # same shapes are always broadcastable (i.e. the above rules always hold) + + >>> x=torch.empty((0,)) + >>> y=torch.empty(2,2) + # x and y are not broadcastable, because x does not have at least 1 dimension + + # can line up trailing dimensions + >>> x=torch.empty(5,3,4,1) + >>> y=torch.empty( 3,1,1) + # x and y are broadcastable. + # 1st trailing dimension: both have size 1 + # 2nd trailing dimension: y has size 1 + # 3rd trailing dimension: x size == y size + # 4th trailing dimension: y dimension doesn't exist + + # but: + >>> x=torch.empty(5,2,4,1) + >>> y=torch.empty( 3,1,1) + # x and y are not broadcastable, because in the 3rd trailing dimension 2 != 3 + +If two tensors :attr:`x`, :attr:`y` are "broadcastable", the resulting tensor size +is calculated as follows: + +- If the number of dimensions of :attr:`x` and :attr:`y` are not equal, prepend 1 + to the dimensions of the tensor with fewer dimensions to make them equal length. +- Then, for each dimension size, the resulting dimension size is the max of the sizes of + :attr:`x` and :attr:`y` along that dimension. + +For Example:: + + # can line up trailing dimensions to make reading easier + >>> x=torch.empty(5,1,4,1) + >>> y=torch.empty( 3,1,1) + >>> (x+y).size() + torch.Size([5, 3, 4, 1]) + + # but not necessary: + >>> x=torch.empty(1) + >>> y=torch.empty(3,1,7) + >>> (x+y).size() + torch.Size([3, 1, 7]) + + >>> x=torch.empty(5,2,4,1) + >>> y=torch.empty(3,1,1) + >>> (x+y).size() + RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 1 + +In-place semantics +------------------ +One complication is that in-place operations do not allow the in-place tensor to change shape +as a result of the broadcast. + +For Example:: + + >>> x=torch.empty(5,3,4,1) + >>> y=torch.empty(3,1,1) + >>> (x.add_(y)).size() + torch.Size([5, 3, 4, 1]) + + # but: + >>> x=torch.empty(1,3,1) + >>> y=torch.empty(3,1,7) + >>> (x.add_(y)).size() + RuntimeError: The expanded size of the tensor (1) must match the existing size (7) at non-singleton dimension 2. + +Backwards compatibility +----------------------- +Prior versions of PyTorch allowed certain pointwise functions to execute on tensors with different shapes, +as long as the number of elements in each tensor was equal. The pointwise operation would then be carried +out by viewing each tensor as 1-dimensional. PyTorch now supports broadcasting and the "1-dimensional" +pointwise behavior is considered deprecated and will generate a Python warning in cases where tensors are +not broadcastable, but have the same number of elements. + +Note that the introduction of broadcasting can cause backwards incompatible changes in the case where +two tensors do not have the same shape, but are broadcastable and have the same number of elements. +For Example:: + + >>> torch.add(torch.ones(4,1), torch.randn(4)) + +would previously produce a Tensor with size: torch.Size([4,1]), but now produces a Tensor with size: torch.Size([4,4]). +In order to help identify cases in your code where backwards incompatibilities introduced by broadcasting may exist, +you may set `torch.utils.backcompat.broadcast_warning.enabled` to `True`, which will generate a python warning +in such cases. + +For Example:: + + >>> torch.utils.backcompat.broadcast_warning.enabled=True + >>> torch.add(torch.ones(4,1), torch.ones(4)) + __main__:1: UserWarning: self and other do not have the same shape, but are broadcastable, and have the same number of elements. + Changing behavior in a backwards incompatible manner to broadcasting rather than viewing as 1-dimensional. diff --git a/docs/stable/_sources/notes/cpu_threading_torchscript_inference.rst.txt b/docs/stable/_sources/notes/cpu_threading_torchscript_inference.rst.txt new file mode 100644 index 000000000000..600f6813770d --- /dev/null +++ b/docs/stable/_sources/notes/cpu_threading_torchscript_inference.rst.txt @@ -0,0 +1,124 @@ +.. _cpu-threading-torchscript-inference: + +CPU threading and TorchScript inference +================================================= + +PyTorch allows using multiple CPU threads during TorchScript model inference. +The following figure shows different levels of parallelism one would find in a +typical application: + +.. image:: cpu_threading_torchscript_inference.svg + :width: 75% + +One or more inference threads execute a model's forward pass on the given inputs. +Each inference thread invokes a JIT interpreter that executes the ops +of a model inline, one by one. A model can utilize a ``fork`` TorchScript +primitive to launch an asynchronous task. Forking several operations at once +results in a task that is executed in parallel. The ``fork`` operator returns a +``future`` object which can be used to synchronize on later, for example: + +.. code-block:: python + + @torch.jit.script + def compute_z(x): + return torch.mm(x, self.w_z) + + @torch.jit.script + def forward(x): + # launch compute_z asynchronously: + fut = torch.jit._fork(compute_z, x) + # execute the next operation in parallel to compute_z: + y = torch.mm(x, self.w_y) + # wait for the result of compute_z: + z = torch.jit._wait(fut) + return y + z + + +PyTorch uses a single thread pool for the inter-op parallelism, this thread pool +is shared by all inference tasks that are forked within the application process. + +In addition to the inter-op parallelism, PyTorch can also utilize multiple threads +within the ops (`intra-op parallelism`). This can be useful in many cases, +including element-wise ops on large tensors, convolutions, GEMMs, embedding +lookups and others. + + +Build options +------------- + +PyTorch uses an internal ATen library to implement ops. In addition to that, +PyTorch can also be built with support of external libraries, such as MKL_ and MKL-DNN_, +to speed up computations on CPU. + +ATen, MKL and MKL-DNN support intra-op parallelism and depend on the +following parallelization libraries to implement it: + * OpenMP_ - a standard (and a library, usually shipped with a compiler), widely used in external libraries; + * TBB_ - a newer parallelization library optimized for task-based parallelism and concurrent environments. +OpenMP historically has been used by a large number of libraries. It is known +for a relative ease of use and support for loop-based parallelism and other primitives. +At the same time OpenMP is not known for a good interoperability with other threading +libraries used by the application. In particular, OpenMP does not guarantee that a single per-process intra-op thread +pool is going to be used in the application. On the contrary, two different inter-op +threads will likely use different OpenMP thread pools for intra-op work. +This might result in a large number of threads used by the application. + +TBB is used to a lesser extent in external libraries, but, at the same time, +is optimized for the concurrent environments. PyTorch's TBB backend guarantees that +there's a separate, single, per-process intra-op thread pool used by all of the +ops running in the application. + +Depending of the use case, one might find one or another parallelization +library a better choice in their application. + +PyTorch allows selecting of the parallelization backend used by ATen and other +libraries at the build time with the following build options: + ++------------+-----------------------+-----------------------------+----------------------------------------+ +| Library | Build Option | Values | Notes | ++============+=======================+=============================+========================================+ +| ATen | ``ATEN_THREADING`` | ``OMP`` (default), ``TBB`` | | ++------------+-----------------------+-----------------------------+----------------------------------------+ +| MKL | ``MKL_THREADING`` | (same) | To enable MKL use ``BLAS=MKL`` | ++------------+-----------------------+-----------------------------+----------------------------------------+ +| MKL-DNN | ``MKLDNN_THREADING`` | (same) | To enable MKL-DNN use ``USE_MKLDNN=1`` | ++------------+-----------------------+-----------------------------+----------------------------------------+ + +It is strongly recommended not to mix OpenMP and TBB within one build. + +Any of the ``TBB`` values above require ``USE_TBB=1`` build setting (default: OFF). +A separate setting ``USE_OPENMP=1`` (default: ON) is required for OpenMP parallelism. + +Runtime API +----------- + +The following API is used to control thread settings: + ++------------------------+-----------------------------------------------------------+---------------------------------------------------------+ +| Type of parallelism | Settings | Notes | ++========================+===========================================================+=========================================================+ +| Inter-op parallelism | ``at::set_num_interop_threads``, | ``set*`` functions can only be called once and only | +| | ``at::get_num_interop_threads`` (C++) | during the startup, before the actual operators running;| +| | | | +| | ``set_num_interop_threads``, | Default number of threads: number of CPU cores. | +| | ``get_num_interop_threads`` (Python, :mod:`torch` module) | | ++------------------------+-----------------------------------------------------------+ | +| Intra-op parallelism | ``at::set_num_threads``, | | +| | ``at::get_num_threads`` (C++) | | +| | ``set_num_threads``, | | +| | ``get_num_threads`` (Python, :mod:`torch` module) | | +| | | | +| | Environment variables: | | +| | ``OMP_NUM_THREADS`` and ``MKL_NUM_THREADS`` | | ++------------------------+-----------------------------------------------------------+---------------------------------------------------------+ + +For the intra-op parallelism settings, ``at::set_num_threads``, ``torch.set_num_threads`` always take precedence +over environment variables, ``MKL_NUM_THREADS`` variable takes precedence over ``OMP_NUM_THREADS``. + +.. note:: + ``parallel_info`` utility prints information about thread settings and can be used for debugging. + Similar output can be also obtained in Python with ``torch.__config__.parallel_info()`` call. + +.. _OpenMP: https://www.openmp.org/ +.. _TBB: https://github.com/intel/tbb +.. _MKL: https://software.intel.com/en-us/mkl +.. _MKL-DNN: https://github.com/intel/mkl-dnn diff --git a/docs/stable/_sources/notes/cuda.rst.txt b/docs/stable/_sources/notes/cuda.rst.txt new file mode 100644 index 000000000000..e0a9005653bd --- /dev/null +++ b/docs/stable/_sources/notes/cuda.rst.txt @@ -0,0 +1,304 @@ +.. _cuda-semantics: + +CUDA semantics +============== + +:mod:`torch.cuda` is used to set up and run CUDA operations. It keeps track of +the currently selected GPU, and all CUDA tensors you allocate will by default be +created on that device. The selected device can be changed with a +:any:`torch.cuda.device` context manager. + +However, once a tensor is allocated, you can do operations on it irrespective +of the selected device, and the results will be always placed in on the same +device as the tensor. + +Cross-GPU operations are not allowed by default, with the exception of +:meth:`~torch.Tensor.copy_` and other methods with copy-like functionality +such as :meth:`~torch.Tensor.to` and :meth:`~torch.Tensor.cuda`. +Unless you enable peer-to-peer memory access, any attempts to launch ops on +tensors spread across different devices will raise an error. + +Below you can find a small example showcasing this:: + + cuda = torch.device('cuda') # Default CUDA device + cuda0 = torch.device('cuda:0') + cuda2 = torch.device('cuda:2') # GPU 2 (these are 0-indexed) + + x = torch.tensor([1., 2.], device=cuda0) + # x.device is device(type='cuda', index=0) + y = torch.tensor([1., 2.]).cuda() + # y.device is device(type='cuda', index=0) + + with torch.cuda.device(1): + # allocates a tensor on GPU 1 + a = torch.tensor([1., 2.], device=cuda) + + # transfers a tensor from CPU to GPU 1 + b = torch.tensor([1., 2.]).cuda() + # a.device and b.device are device(type='cuda', index=1) + + # You can also use ``Tensor.to`` to transfer a tensor: + b2 = torch.tensor([1., 2.]).to(device=cuda) + # b.device and b2.device are device(type='cuda', index=1) + + c = a + b + # c.device is device(type='cuda', index=1) + + z = x + y + # z.device is device(type='cuda', index=0) + + # even within a context, you can specify the device + # (or give a GPU index to the .cuda call) + d = torch.randn(2, device=cuda2) + e = torch.randn(2).to(cuda2) + f = torch.randn(2).cuda(cuda2) + # d.device, e.device, and f.device are all device(type='cuda', index=2) + +Asynchronous execution +---------------------- + +By default, GPU operations are asynchronous. When you call a function that +uses the GPU, the operations are *enqueued* to the particular device, but not +necessarily executed until later. This allows us to execute more computations +in parallel, including operations on CPU or other GPUs. + +In general, the effect of asynchronous computation is invisible to the caller, +because (1) each device executes operations in the order they are queued, and +(2) PyTorch automatically performs necessary synchronization when copying data +between CPU and GPU or between two GPUs. Hence, computation will proceed as if +every operation was executed synchronously. + +You can force synchronous computation by setting environment variable +`CUDA_LAUNCH_BLOCKING=1`. This can be handy when an error occurs on the GPU. +(With asynchronous execution, such an error isn't reported until after the +operation is actually executed, so the stack trace does not show where it was +requested.) + +As an exception, several functions such as :meth:`~torch.Tensor.to` and +:meth:`~torch.Tensor.copy_` admit an explicit :attr:`non_blocking` argument, +which lets the caller bypass synchronization when it is unnecessary. +Another exception is CUDA streams, explained below. + +CUDA streams +^^^^^^^^^^^^ + +A `CUDA stream`_ is a linear sequence of execution that belongs to a specific +device. You normally do not need to create one explicitly: by default, each +device uses its own "default" stream. + +Operations inside each stream are serialized in the order they are created, +but operations from different streams can execute concurrently in any +relative order, unless explicit synchronization functions (such as +:meth:`~torch.cuda.synchronize` or :meth:`~torch.cuda.Stream.wait_stream`) are +used. For example, the following code is incorrect:: + + cuda = torch.device('cuda') + s = torch.cuda.Stream() # Create a new stream. + A = torch.empty((100, 100), device=cuda).normal_(0.0, 1.0) + with torch.cuda.stream(s): + # sum() may start execution before normal_() finishes! + B = torch.sum(A) + +When the "current stream" is the default stream, PyTorch automatically performs +necessary synchronization when data is moved around, as explained above. +However, when using non-default streams, it is the user's responsibility to +ensure proper synchronization. + +.. _CUDA stream: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#streams + +.. _cuda-memory-management: + +Memory management +----------------- + +PyTorch uses a caching memory allocator to speed up memory allocations. This +allows fast memory deallocation without device synchronizations. However, the +unused memory managed by the allocator will still show as if used in +``nvidia-smi``. You can use :meth:`~torch.cuda.memory_allocated` and +:meth:`~torch.cuda.max_memory_allocated` to monitor memory occupied by +tensors, and use :meth:`~torch.cuda.memory_cached` and +:meth:`~torch.cuda.max_memory_cached` to monitor memory managed by the caching +allocator. Calling :meth:`~torch.cuda.empty_cache` releases all **unused** +cached memory from PyTorch so that those can be used by other GPU applications. +However, the occupied GPU memory by tensors will not be freed so it can not +increase the amount of GPU memory available for PyTorch. + +.. _cufft-plan-cache: + +cuFFT plan cache +---------------- + +For each CUDA device, an LRU cache of cuFFT plans is used to speed up repeatedly +running FFT methods (e.g., :func:`torch.fft`) on CUDA tensors of same geometry +with same configuration. Because some cuFFT plans may allocate GPU memory, +these caches have a maximum capacity. + +You may control and query the properties of the cache of current device with +the following APIs: + +* ``torch.backends.cuda.cufft_plan_cache.max_size`` gives the capacity of the + cache (default is 4096 on CUDA 10 and newer, and 1023 on older CUDA versions). + Setting this value directly modifies the capacity. + +* ``torch.backends.cuda.cufft_plan_cache.size`` gives the number of plans + currently residing in the cache. + +* ``torch.backends.cuda.cufft_plan_cache.clear()`` clears the cache. + +To control and query plan caches of a non-default device, you can index the +``torch.backends.cuda.cufft_plan_cache`` object with either a :class:`torch.device` +object or a device index, and access one of the above attributes. E.g., to set +the capacity of the cache for device ``1``, one can write +``torch.backends.cuda.cufft_plan_cache[1].max_size = 10``. + +Best practices +-------------- + +Device-agnostic code +^^^^^^^^^^^^^^^^^^^^ + +Due to the structure of PyTorch, you may need to explicitly write +device-agnostic (CPU or GPU) code; an example may be creating a new tensor as +the initial hidden state of a recurrent neural network. + +The first step is to determine whether the GPU should be used or not. A common +pattern is to use Python's ``argparse`` module to read in user arguments, and +have a flag that can be used to disable CUDA, in combination with +:meth:`~torch.cuda.is_available`. In the following, ``args.device`` results in a +:class:`torch.device` object that can be used to move tensors to CPU or CUDA. + +:: + + import argparse + import torch + + parser = argparse.ArgumentParser(description='PyTorch Example') + parser.add_argument('--disable-cuda', action='store_true', + help='Disable CUDA') + args = parser.parse_args() + args.device = None + if not args.disable_cuda and torch.cuda.is_available(): + args.device = torch.device('cuda') + else: + args.device = torch.device('cpu') + +Now that we have ``args.device``, we can use it to create a Tensor on the +desired device. + +:: + + x = torch.empty((8, 42), device=args.device) + net = Network().to(device=args.device) + +This can be used in a number of cases to produce device agnostic code. Below +is an example when using a dataloader: + +:: + + cuda0 = torch.device('cuda:0') # CUDA GPU 0 + for i, x in enumerate(train_loader): + x = x.to(cuda0) + +When working with multiple GPUs on a system, you can use the +``CUDA_VISIBLE_DEVICES`` environment flag to manage which GPUs are available to +PyTorch. As mentioned above, to manually control which GPU a tensor is created +on, the best practice is to use a :any:`torch.cuda.device` context manager. + +:: + + print("Outside device is 0") # On device 0 (default in most scenarios) + with torch.cuda.device(1): + print("Inside device is 1") # On device 1 + print("Outside device is still 0") # On device 0 + +If you have a tensor and would like to create a new tensor of the same type on +the same device, then you can use a ``torch.Tensor.new_*`` method +(see :class:`torch.Tensor`). +Whilst the previously mentioned ``torch.*`` factory functions +(:ref:`tensor-creation-ops`) depend on the current GPU context and +the attributes arguments you pass in, ``torch.Tensor.new_*`` methods preserve +the device and other attributes of the tensor. + +This is the recommended practice when creating modules in which new +tensors need to be created internally during the forward pass. + +:: + + cuda = torch.device('cuda') + x_cpu = torch.empty(2) + x_gpu = torch.empty(2, device=cuda) + x_cpu_long = torch.empty(2, dtype=torch.int64) + + y_cpu = x_cpu.new_full([3, 2], fill_value=0.3) + print(y_cpu) + + tensor([[ 0.3000, 0.3000], + [ 0.3000, 0.3000], + [ 0.3000, 0.3000]]) + + y_gpu = x_gpu.new_full([3, 2], fill_value=-5) + print(y_gpu) + + tensor([[-5.0000, -5.0000], + [-5.0000, -5.0000], + [-5.0000, -5.0000]], device='cuda:0') + + y_cpu_long = x_cpu_long.new_tensor([[1, 2, 3]]) + print(y_cpu_long) + + tensor([[ 1, 2, 3]]) + + +If you want to create a tensor of the same type and size of another tensor, and +fill it with either ones or zeros, :meth:`~torch.ones_like` or +:meth:`~torch.zeros_like` are provided as convenient helper functions (which +also preserve :class:`torch.device` and :class:`torch.dtype` of a Tensor). + +:: + + x_cpu = torch.empty(2, 3) + x_gpu = torch.empty(2, 3) + + y_cpu = torch.ones_like(x_cpu) + y_gpu = torch.zeros_like(x_gpu) + +.. _cuda-memory-pinning: + +Use pinned memory buffers +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. warning: + + This is an advanced tip. You overuse of pinned memory can cause serious + problems if you'll be running low on RAM, and you should be aware that + pinning is often an expensive operation. + +Host to GPU copies are much faster when they originate from pinned (page-locked) +memory. CPU tensors and storages expose a :meth:`~torch.Tensor.pin_memory` +method, that returns a copy of the object, with data put in a pinned region. + +Also, once you pin a tensor or storage, you can use asynchronous GPU copies. +Just pass an additional ``non_blocking=True`` argument to a +:meth:`~torch.Tensor.to` or a :meth:`~torch.Tensor.cuda` call. This can be used +to overlap data transfers with computation. + +You can make the :class:`~torch.utils.data.DataLoader` return batches placed in +pinned memory by passing ``pin_memory=True`` to its constructor. + +.. _cuda-nn-dataparallel-instead: + +Use nn.DataParallel instead of multiprocessing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Most use cases involving batched inputs and multiple GPUs should default to +using :class:`~torch.nn.DataParallel` to utilize more than one GPU. Even with +the GIL, a single Python process can saturate multiple GPUs. + +As of version 0.1.9, large numbers of GPUs (8+) might not be fully utilized. +However, this is a known issue that is under active development. As always, +test your use case. + +There are significant caveats to using CUDA models with +:mod:`~torch.multiprocessing`; unless care is taken to meet the data handling +requirements exactly, it is likely that your program will have incorrect or +undefined behavior. diff --git a/docs/stable/_sources/notes/extending.rst.txt b/docs/stable/_sources/notes/extending.rst.txt new file mode 100644 index 000000000000..78c5582c2bdc --- /dev/null +++ b/docs/stable/_sources/notes/extending.rst.txt @@ -0,0 +1,209 @@ +Extending PyTorch +================= + +In this note we'll cover ways of extending :mod:`torch.nn`, +:mod:`torch.autograd`, and writing custom C extensions utilizing our C +libraries. + +Extending :mod:`torch.autograd` +------------------------------- + +.. currentmodule:: torch.autograd + +Adding operations to :mod:`~torch.autograd` requires implementing a new +:class:`Function` subclass for each operation. Recall that :class:`Function` s +are what :mod:`~torch.autograd` uses to compute the results and gradients, and +encode the operation history. Every new function requires you to implement 2 +methods: + +- :meth:`~Function.forward` - the code that performs the operation. It can take + as many arguments as you want, with some of them being optional, if you + specify the default values. All kinds of Python objects are accepted here. + :class:`Tensor` arguments that track history (i.e., with + ``requires_grad=True``) will be converted to ones that don't track history + before the call, and their use will be registered in the graph. Note that this + logic won't traverse lists/dicts/any other data structures and will only + consider :class:`Tensor` s that are direct arguments to the call. You can + return either a single :class:`Tensor` output, or a :class:`tuple` of + :class:`Tensor` s if there are multiple outputs. Also, please refer to the + docs of :class:`Function` to find descriptions of useful methods that can be + called only from :meth:`~Function.forward`. +- :meth:`~Function.backward` - gradient formula. It will be given + as many :class:`Tensor` arguments as there were outputs, with each of them + representing gradient w.r.t. that output. It should return as many + :class:`Tensor` s as there were inputs, with each of them containing the + gradient w.r.t. its corresponding input. If your inputs didn't require + gradient (:attr:`~ctx.needs_input_grad` is a tuple of booleans indicating + whether each input needs gradient computation), or were non-:class:`Tensor` + objects, you can return :class:`python:None`. Also, if you have optional + arguments to :meth:`~Function.forward` you can return more gradients than there + were inputs, as long as they're all :any:`python:None`. + +Below you can find code for a ``Linear`` function from :mod:`torch.nn`, with +additional comments:: + + # Inherit from Function + class LinearFunction(Function): + + # Note that both forward and backward are @staticmethods + @staticmethod + # bias is an optional argument + def forward(ctx, input, weight, bias=None): + ctx.save_for_backward(input, weight, bias) + output = input.mm(weight.t()) + if bias is not None: + output += bias.unsqueeze(0).expand_as(output) + return output + + # This function has only a single output, so it gets only one gradient + @staticmethod + def backward(ctx, grad_output): + # This is a pattern that is very convenient - at the top of backward + # unpack saved_tensors and initialize all gradients w.r.t. inputs to + # None. Thanks to the fact that additional trailing Nones are + # ignored, the return statement is simple even when the function has + # optional inputs. + input, weight, bias = ctx.saved_tensors + grad_input = grad_weight = grad_bias = None + + # These needs_input_grad checks are optional and there only to + # improve efficiency. If you want to make your code simpler, you can + # skip them. Returning gradients for inputs that don't require it is + # not an error. + if ctx.needs_input_grad[0]: + grad_input = grad_output.mm(weight) + if ctx.needs_input_grad[1]: + grad_weight = grad_output.t().mm(input) + if bias is not None and ctx.needs_input_grad[2]: + grad_bias = grad_output.sum(0).squeeze(0) + + return grad_input, grad_weight, grad_bias + +Now, to make it easier to use these custom ops, we recommend aliasing their +``apply`` method:: + + linear = LinearFunction.apply + +Here, we give an additional example of a function that is parametrized by +non-Tensor arguments:: + + class MulConstant(Function): + @staticmethod + def forward(ctx, tensor, constant): + # ctx is a context object that can be used to stash information + # for backward computation + ctx.constant = constant + return tensor * constant + + @staticmethod + def backward(ctx, grad_output): + # We return as many input gradients as there were arguments. + # Gradients of non-Tensor arguments to forward must be None. + return grad_output * ctx.constant, None + +.. note:: + Inputs to ``backward``, i.e., :attr:`grad_output`, can also be Tensors that + track history. So if ``backward`` is implemented with differentiable + operations, (e.g., invocation of another custom + :class:`~torch.autograd.function`), higher order derivatives will work. + +You probably want to check if the backward method you implemented actually +computes the derivatives of your function. It is possible by comparing with +numerical approximations using small finite differences:: + + from torch.autograd import gradcheck + + # gradcheck takes a tuple of tensors as input, check if your gradient + # evaluated with these tensors are close enough to numerical + # approximations and returns True if they all verify this condition. + input = (torch.randn(20,20,dtype=torch.double,requires_grad=True), torch.randn(30,20,dtype=torch.double,requires_grad=True)) + test = gradcheck(linear, input, eps=1e-6, atol=1e-4) + print(test) + +See :ref:`grad-check` for more details on finite-difference gradient comparisons. + +Extending :mod:`torch.nn` +------------------------- + +.. currentmodule:: torch.nn + +:mod:`~torch.nn` exports two kinds of interfaces - modules and their functional +versions. You can extend it in both ways, but we recommend using modules for +all kinds of layers, that hold any parameters or buffers, and recommend using +a functional form parameter-less operations like activation functions, pooling, +etc. + +Adding a functional version of an operation is already fully covered in the +section above. + +Adding a :class:`Module` +^^^^^^^^^^^^^^^^^^^^^^^^ + +Since :mod:`~torch.nn` heavily utilizes :mod:`~torch.autograd`, adding a new +:class:`Module` requires implementing a :class:`~torch.autograd.Function` +that performs the operation and can compute the gradient. From now on let's +assume that we want to implement a ``Linear`` module and we have the function +implemented as in the listing above. There's very little code required to +add this. Now, there are two functions that need to be implemented: + +- ``__init__`` (*optional*) - takes in arguments such as kernel sizes, numbers + of features, etc. and initializes parameters and buffers. +- :meth:`~Module.forward` - instantiates a :class:`~torch.autograd.Function` and + uses it to perform the operation. It's very similar to a functional wrapper + shown above. + +This is how a ``Linear`` module can be implemented:: + + class Linear(nn.Module): + def __init__(self, input_features, output_features, bias=True): + super(Linear, self).__init__() + self.input_features = input_features + self.output_features = output_features + + # nn.Parameter is a special kind of Tensor, that will get + # automatically registered as Module's parameter once it's assigned + # as an attribute. Parameters and buffers need to be registered, or + # they won't appear in .parameters() (doesn't apply to buffers), and + # won't be converted when e.g. .cuda() is called. You can use + # .register_buffer() to register buffers. + # nn.Parameters require gradients by default. + self.weight = nn.Parameter(torch.Tensor(output_features, input_features)) + if bias: + self.bias = nn.Parameter(torch.Tensor(output_features)) + else: + # You should always register all possible parameters, but the + # optional ones can be None if you want. + self.register_parameter('bias', None) + + # Not a very smart way to initialize weights + self.weight.data.uniform_(-0.1, 0.1) + if bias is not None: + self.bias.data.uniform_(-0.1, 0.1) + + def forward(self, input): + # See the autograd section for explanation of what happens here. + return LinearFunction.apply(input, self.weight, self.bias) + + def extra_repr(self): + # (Optional)Set the extra information about this module. You can test + # it by printing an object of this class. + return 'in_features={}, out_features={}, bias={}'.format( + self.in_features, self.out_features, self.bias is not None + ) + + +Writing custom C++ extensions +----------------------------- + +See this +`PyTorch tutorial `_ +for a detailed explanation and examples. + +Documentations are available at :doc:`../cpp_extension`. + + +Writing custom C extensions +--------------------------- + +Example available at +`this GitHub repository `_. diff --git a/docs/stable/_sources/notes/faq.rst.txt b/docs/stable/_sources/notes/faq.rst.txt new file mode 100644 index 000000000000..11c6a6d5c584 --- /dev/null +++ b/docs/stable/_sources/notes/faq.rst.txt @@ -0,0 +1,150 @@ +Frequently Asked Questions +========================== + +My model reports "cuda runtime error(2): out of memory" +------------------------------------------------------- + +As the error message suggests, you have run out of memory on your +GPU. Since we often deal with large amounts of data in PyTorch, +small mistakes can rapidly cause your program to use up all of your +GPU; fortunately, the fixes in these cases are often simple. +Here are a few common things to check: + +**Don't accumulate history across your training loop.** +By default, computations involving variables that require gradients +will keep history. This means that you should avoid using such +variables in computations which will live beyond your training loops, +e.g., when tracking statistics. Instead, you should detach the variable +or access its underlying data. + +Sometimes, it can be non-obvious when differentiable variables can +occur. Consider the following training loop (abridged from `source +`_): + +.. code-block:: python + + total_loss = 0 + for i in range(10000): + optimizer.zero_grad() + output = model(input) + loss = criterion(output) + loss.backward() + optimizer.step() + total_loss += loss + +Here, ``total_loss`` is accumulating history across your training loop, since +``loss`` is a differentiable variable with autograd history. You can fix this by +writing `total_loss += float(loss)` instead. + +Other instances of this problem: +`1 `_. + +**Don't hold onto tensors and variables you don't need.** +If you assign a Tensor or Variable to a local, Python will not +deallocate until the local goes out of scope. You can free +this reference by using ``del x``. Similarly, if you assign +a Tensor or Variable to a member variable of an object, it will +not deallocate until the object goes out of scope. You will +get the best memory usage if you don't hold onto temporaries +you don't need. + +The scopes of locals can be larger than you expect. For example: + +.. code-block:: python + + for i in range(5): + intermediate = f(input[i]) + result += g(intermediate) + output = h(result) + return output + +Here, ``intermediate`` remains live even while ``h`` is executing, +because its scope extrudes past the end of the loop. To free it +earlier, you should ``del intermediate`` when you are done with it. + +**Don't run RNNs on sequences that are too large.** +The amount of memory required to backpropagate through an RNN scales +linearly with the length of the RNN input; thus, you will run out of memory +if you try to feed an RNN a sequence that is too long. + +The technical term for this phenomenon is `backpropagation through time +`_, +and there are plenty of references for how to implement truncated +BPTT, including in the `word language model `_ example; truncation is handled by the +``repackage`` function as described in +`this forum post `_. + +**Don't use linear layers that are too large.** +A linear layer ``nn.Linear(m, n)`` uses :math:`O(nm)` memory: that is to say, +the memory requirements of the weights +scales quadratically with the number of features. It is very easy +to `blow through your memory `_ +this way (and remember that you will need at least twice the size of the +weights, since you also need to store the gradients.) + +My GPU memory isn't freed properly +------------------------------------------------------- +PyTorch uses a caching memory allocator to speed up memory allocations. As a +result, the values shown in ``nvidia-smi`` usually don't reflect the true +memory usage. See :ref:`cuda-memory-management` for more details about GPU +memory management. + +If your GPU memory isn't freed even after Python quits, it is very likely that +some Python subprocesses are still alive. You may find them via +``ps -elf | grep python`` and manually kill them with ``kill -9 [pid]``. + +.. _dataloader-workers-random-seed: + +My data loader workers return identical random numbers +------------------------------------------------------- +You are likely using other libraries to generate random numbers in the dataset. +For example, NumPy's RNG is duplicated when worker subprocesses are started via +``fork``. See :class:`torch.utils.data.DataLoader`'s documentation for how to +properly set up random seeds in workers with its :attr:`worker_init_fn` option. + +.. _pack-rnn-unpack-with-data-parallelism: + +My recurrent network doesn't work with data parallelism +------------------------------------------------------- +There is a subtlety in using the +``pack sequence -> recurrent network -> unpack sequence`` pattern in a +:class:`~torch.nn.Module` with :class:`~torch.nn.DataParallel` or +:func:`~torch.nn.parallel.data_parallel`. Input to each the :meth:`forward` on +each device will only be part of the entire input. Because the unpack operation +:func:`torch.nn.utils.rnn.pad_packed_sequence` by default only pads up to the +longest input it sees, i.e., the longest on that particular device, size +mismatches will happen when results are gathered together. Therefore, you can +instead take advantage of the :attr:`total_length` argument of +:func:`~torch.nn.utils.rnn.pad_packed_sequence` to make sure that the +:meth:`forward` calls return sequences of same length. For example, you can +write:: + + from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + + class MyModule(nn.Module): + # ... __init__, other methods, etc. + + # padded_input is of shape [B x T x *] (batch_first mode) and contains + # the sequences sorted by lengths + # B is the batch size + # T is max sequence length + def forward(self, padded_input, input_lengths): + total_length = padded_input.size(1) # get the max sequence length + packed_input = pack_padded_sequence(padded_input, input_lengths, + batch_first=True) + packed_output, _ = self.my_lstm(packed_input) + output, _ = pad_packed_sequence(packed_output, batch_first=True, + total_length=total_length) + return output + + + m = MyModule().cuda() + dp_m = nn.DataParallel(m) + + +Additionally, extra care needs to be taken when batch dimension is dim ``1`` +(i.e., ``batch_first=False``) with data parallelism. In this case, the first +argument of pack_padded_sequence ``padding_input`` will be of shape +``[T x B x *]`` and should be scattered along dim ``1``, but the second argument +``input_lengths`` will be of shape ``[B]`` and should be scattered along dim +``0``. Extra code to manipulate the tensor shapes will be needed. diff --git a/docs/stable/_sources/notes/large_scale_deployments.rst.txt b/docs/stable/_sources/notes/large_scale_deployments.rst.txt new file mode 100644 index 000000000000..b06ed62290c0 --- /dev/null +++ b/docs/stable/_sources/notes/large_scale_deployments.rst.txt @@ -0,0 +1,136 @@ +Features for large-scale deployments +==================================== + +.. contents:: :local: + +This note talks about several extension points and tricks that might be useful +when running PyTorch within a larger system or operating multiple systems using +PyTorch in a larger organization. + +It doesn't cover topics of deploying models to production. Check +:mod:`torch.jit` or one of the corresponding tutorials. + +The note assumes that you either build PyTorch from source in your +organization or have an ability to statically link additional code to be loaded +when PyTorch is used. Therefore, many of the hooks are exposed as C++ APIs that +can be triggered once in a centralized place, e.g. in static initialization +code. + +Fleet-wide operator profiling +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +PyTorch comes with :mod:`torch.autograd.profiler` capable of measuring time +taken by individual operators on demand. One can use the same mechanism to do +"always ON" measurements for any process running PyTorch. It might be useful for +gathering information about PyTorch workloads running in a given process or +across the entire set of machines. + +New callbacks for any operator invocation can be added with +``torch::autograd::profiler::pushCallback``. Hooks will be called with +``torch::autograd::profiler::RecordFunction`` struct that describes invocation +context (e.g. `name`). If enabled, ``RecordFunction::inputs()`` contains arguments +of the function represented as ``torch::IValue`` variant type. Note, that inputs +logging is relatively expensive and thus has to be enabled explicitly. + +Invoking callbacks adds some overhead, so usually it's useful to just randomly +sample operator invocations. This can be enabled on per-callback basis with a +global sampling rate specified by +`torch::autograd::profiler::setSamplingProbability`. + +Note, that ``pushCallback`` and ``setSamplingProbability`` are not thread-safe +and can be called only when no PyTorch operator is running. Usually, it's a good +idea to call them once during initialization. + +Here's an example: + +.. code-block:: cpp + + // Called somewhere in the program beginning + void init() { + // Sample one in a hundred operator runs randomly + torch::autograd::setSamplingProbability(0.01); + pushCallback( + &onFunctionEnter, + &onFunctionExit, + /* needs_inputs */ true, + /* sampled */ true + ); + } + + void onFunctionEnter(const RecordFunction& fn) { + std::cerr << "Before function " << fn.name() + << " with " << fn.inputs().size() << " inputs" << std::endl; + } + + void onFunctionExit(const RecordFunction& fn) { + std::cerr << "After function " << fn.name(); + } + +API usage logging +^^^^^^^^^^^^^^^^^ + +When running in a broader ecosystem, for example in managed job scheduler, it's +often useful to track which binaries invoke particular PyTorch APIs. There +exists simple instrumentation injected at several important API points that +triggers a given callback. Because usually PyTorch is invoked in one-off python +scripts, the callback fires only once for a given process for each of the APIs. + +``c10::SetAPIUsageHandler`` can be used to register API usage instrumentation +handler. Passed argument is going to be an "api key" identifying used point, for +example ``python.import`` for PyTorch extension import or +``torch.script.compile`` if TorchScript compilation was triggered. + +.. code-block:: cpp + + SetAPIUsageLogger([](const std::string& event_name) { + std::cerr << "API was used: " << event_name << std::endl; + }); + +Note for developers: new API trigger points can be added in code with +``C10_LOG_API_USAGE_ONCE("my_api")`` in C++ or +``torch._C._log_api_usage_once("my.api")`` in Python. + +Attaching metadata to saved TorchScript models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TorchScript modules can be saved as an archive file that bundles serialized +parameters and module code as TorchScript (see :meth:`torch.jit.save`). It's +often convenient to bundle additional information together with the model, for +example, description of model producer or auxiliary artifacts. + +It can be achieved by passing the ``_extra_files`` argument to +:meth:`torch.jit.save` and ``torch::jit::load`` to store and retrieve +arbitrary binary blobs during saving process. Since TorchScript files are +regular ZIP archives, extra information gets stored as regular files inside +archive's ``extra/`` directory. + +There's also a global hook allowing to attach extra files to any TorchScript +archive produced in the current process. It might be useful to tag models with +producer metadata, akin to JPEG metadata produced by digital cameras. Example +usage might look like: + +.. code-block:: cpp + + SetExportModuleExtraFilesHook([](const script::Module&) { + script::ExtraFilesMap files; + files["producer_info.json"] = "{\"user\": \"" + getenv("USER") + "\"}"; + return files; + }); + + +Build environment considerations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +TorchScript's compilation needs to have access to the original python files as +it uses python's ``inspect.getsource`` call. In certain production environments +it might require explicitly deploying ``.py`` files along with precompiled +``.pyc``. + +Common extension points +^^^^^^^^^^^^^^^^^^^^^^^ + +PyTorch APIs are generally loosely coupled and it's easy to replace a component +with specialized version. Common extension points include: + +* Custom operators implemented in C++ - see `tutorial for more details `_. +* Custom data reading can be often integrated directly by invoking corresponding python library. Existing functionality of :mod:`torch.utils.data` can be utilized by extending :class:`~torch.utils.data.Dataset` or :class:`~torch.utils.data.IterableDataset`. \ No newline at end of file diff --git a/docs/stable/_sources/notes/multiprocessing.rst.txt b/docs/stable/_sources/notes/multiprocessing.rst.txt new file mode 100644 index 000000000000..0f4721da0848 --- /dev/null +++ b/docs/stable/_sources/notes/multiprocessing.rst.txt @@ -0,0 +1,136 @@ +.. _multiprocessing-best-practices: + +Multiprocessing best practices +============================== + +:mod:`torch.multiprocessing` is a drop in replacement for Python's +:mod:`python:multiprocessing` module. It supports the exact same operations, +but extends it, so that all tensors sent through a +:class:`python:multiprocessing.Queue`, will have their data moved into shared +memory and will only send a handle to another process. + +.. note:: + + When a :class:`~torch.Tensor` is sent to another process, the + :class:`~torch.Tensor` data is shared. If :attr:`torch.Tensor.grad` is + not ``None``, it is also shared. After a :class:`~torch.Tensor` without + a :attr:`torch.Tensor.grad` field is sent to the other process, it + creates a standard process-specific ``.grad`` :class:`~torch.Tensor` that + is not automatically shared across all processes, unlike how the + :class:`~torch.Tensor`'s data has been shared. + +This allows to implement various training methods, like Hogwild, A3C, or any +others that require asynchronous operation. + +.. _multiprocessing-cuda-note: + +CUDA in multiprocessing +----------------------- + +The CUDA runtime does not support the ``fork`` start method. However, +:mod:`python:multiprocessing` in Python 2 can only create subprocesses using +``fork``. So Python 3 and either ``spawn`` or ``forkserver`` start method are +required to use CUDA in subprocesses. + +.. note:: + The start method can be set via either creating a context with + ``multiprocessing.get_context(...)`` or directly using + ``multiprocessing.set_start_method(...)``. + +Unlike CPU tensors, the sending process is required to keep the original tensor +as long as the receiving process retains a copy of the tensor. It is implemented +under the hood but requires users to follow the best practices for the program +to run correctly. For example, the sending process must stay alive as long as +the consumer process has references to the tensor, and the refcounting can not +save you if the consumer process exits abnormally via a fatal signal. See +:ref:`this section `. + +See also: :ref:`cuda-nn-dataparallel-instead` + + +Best practices and tips +----------------------- + +Avoiding and fighting deadlocks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are a lot of things that can go wrong when a new process is spawned, with +the most common cause of deadlocks being background threads. If there's any +thread that holds a lock or imports a module, and ``fork`` is called, it's very +likely that the subprocess will be in a corrupted state and will deadlock or +fail in a different way. Note that even if you don't, Python built in +libraries do - no need to look further than :mod:`python:multiprocessing`. +:class:`python:multiprocessing.Queue` is actually a very complex class, that +spawns multiple threads used to serialize, send and receive objects, and they +can cause aforementioned problems too. If you find yourself in such situation +try using a :class:`~python:multiprocessing.queues.SimpleQueue`, that doesn't +use any additional threads. + +We're trying our best to make it easy for you and ensure these deadlocks don't +happen but some things are out of our control. If you have any issues you can't +cope with for a while, try reaching out on forums, and we'll see if it's an +issue we can fix. + +Reuse buffers passed through a Queue +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Remember that each time you put a :class:`~torch.Tensor` into a +:class:`python:multiprocessing.Queue`, it has to be moved into shared memory. +If it's already shared, it is a no-op, otherwise it will incur an additional +memory copy that can slow down the whole process. Even if you have a pool of +processes sending data to a single one, make it send the buffers back - this +is nearly free and will let you avoid a copy when sending next batch. + +Asynchronous multiprocess training (e.g. Hogwild) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using :mod:`torch.multiprocessing`, it is possible to train a model +asynchronously, with parameters either shared all the time, or being +periodically synchronized. In the first case, we recommend sending over the whole +model object, while in the latter, we advise to only send the +:meth:`~torch.nn.Module.state_dict`. + +We recommend using :class:`python:multiprocessing.Queue` for passing all kinds +of PyTorch objects between processes. It is possible to e.g. inherit the tensors +and storages already in shared memory, when using the ``fork`` start method, +however it is very bug prone and should be used with care, and only by advanced +users. Queues, even though they're sometimes a less elegant solution, will work +properly in all cases. + +.. warning:: + + You should be careful about having global statements, that are not guarded + with an ``if __name__ == '__main__'``. If a different start method than + ``fork`` is used, they will be executed in all subprocesses. + +Hogwild +~~~~~~~ + +A concrete Hogwild implementation can be found in the `examples repository`__, +but to showcase the overall structure of the code, there's also a minimal +example below as well:: + + import torch.multiprocessing as mp + from model import MyModel + + def train(model): + # Construct data_loader, optimizer, etc. + for data, labels in data_loader: + optimizer.zero_grad() + loss_fn(model(data), labels).backward() + optimizer.step() # This will update the shared parameters + + if __name__ == '__main__': + num_processes = 4 + model = MyModel() + # NOTE: this is required for the ``fork`` method to work + model.share_memory() + processes = [] + for rank in range(num_processes): + p = mp.Process(target=train, args=(model,)) + p.start() + processes.append(p) + for p in processes: + p.join() + +.. __: https://github.com/pytorch/examples/tree/master/mnist_hogwild diff --git a/docs/stable/_sources/notes/randomness.rst.txt b/docs/stable/_sources/notes/randomness.rst.txt new file mode 100644 index 000000000000..0648c4a2f614 --- /dev/null +++ b/docs/stable/_sources/notes/randomness.rst.txt @@ -0,0 +1,56 @@ +Reproducibility +=============== + +Completely reproducible results are not guaranteed across PyTorch releases, +individual commits or different platforms. Furthermore, results need not be +reproducible between CPU and GPU executions, even when using identical seeds. + +However, in order to make computations deterministic on your specific problem on +one specific platform and PyTorch release, there are a couple of steps to take. + +There are two pseudorandom number generators involved in PyTorch, which you will +need to seed manually to make runs reproducible. Furthermore, you should ensure +that all other libraries your code relies on and which use random numbers also +use a fixed seed. + +PyTorch +....... +You can use :meth:`torch.manual_seed()` to seed the RNG for all devices (both +CPU and CUDA):: + + import torch + torch.manual_seed(0) + + +There are some PyTorch functions that use CUDA functions that can be a source +of non-determinism. One class of such CUDA functions are atomic operations, +in particular :attr:`atomicAdd`, where the order of parallel additions to the +same value is undetermined and, for floating-point variables, a source of +variance in the result. PyTorch functions that use :attr:`atomicAdd` in the forward +include :meth:`torch.Tensor.index_add_`, :meth:`torch.Tensor.scatter_add_`, +:meth:`torch.bincount`. + +A number of operations have backwards that use :attr:`atomicAdd`, in particular +:meth:`torch.nn.functional.embedding_bag`, +:meth:`torch.nn.functional.ctc_loss` and many forms of pooling, padding, and sampling. +There currently is no simple way of avoiding non-determinism in these functions. + + +CuDNN +..... +When running on the CuDNN backend, two further options must be set:: + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + +.. warning:: + + Deterministic mode can have a performance impact, depending on your model. This means that due to the deterministic nature of the model, the processing speed (i.e. processed batch items per second) can be lower than when the model is non-deterministic. + +Numpy +..... +If you or any of the libraries you are using rely on Numpy, you should seed the +Numpy RNG as well. This can be done with:: + + import numpy as np + np.random.seed(0) diff --git a/docs/stable/_sources/notes/serialization.rst.txt b/docs/stable/_sources/notes/serialization.rst.txt new file mode 100644 index 000000000000..46800314cf83 --- /dev/null +++ b/docs/stable/_sources/notes/serialization.rst.txt @@ -0,0 +1,34 @@ + +Serialization semantics +======================= + +Best practices +-------------- + +.. _recommend-saving-models: + +Recommended approach for saving a model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two main approaches for serializing and restoring a model. + +The first (recommended) saves and loads only the model parameters:: + + torch.save(the_model.state_dict(), PATH) + +Then later:: + + the_model = TheModelClass(*args, **kwargs) + the_model.load_state_dict(torch.load(PATH)) + +The second saves and loads the entire model:: + + torch.save(the_model, PATH) + +Then later:: + + the_model = torch.load(PATH) + +However in this case, the serialized data is bound to the specific classes +and the exact directory structure used, so it can break in various ways when +used in other projects, or after some serious refactors. diff --git a/docs/stable/_sources/notes/windows.rst.txt b/docs/stable/_sources/notes/windows.rst.txt new file mode 100644 index 000000000000..cfda0445a145 --- /dev/null +++ b/docs/stable/_sources/notes/windows.rst.txt @@ -0,0 +1,290 @@ +Windows FAQ +========================== + +Building from source +-------------------- + +Include optional components +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two supported components for Windows PyTorch: +MKL and MAGMA. Here are the steps to build with them. + +.. code-block:: bat + + REM Make sure you have 7z and curl installed. + + REM Download MKL files + curl https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z -k -O + 7z x -aoa mkl_2018.2.185.7z -omkl + + REM Download MAGMA files + REM cuda100/cuda101 is also available for `CUDA_PREFIX`. There are also 2.4.0 binaries for cuda80/cuda92. + REM The configuration could be `debug` or `release` for 2.5.0. Only `release` is available for 2.4.0. + set CUDA_PREFIX=cuda90 + set CONFIG=release + curl -k https://s3.amazonaws.com/ossci-windows/magma_2.5.0_%CUDA_PREFIX%_%CONFIG%.7z -o magma.7z + 7z x -aoa magma.7z -omagma + + REM Setting essential environment variables + set "CMAKE_INCLUDE_PATH=%cd%\\mkl\\include" + set "LIB=%cd%\\mkl\\lib;%LIB%" + set "MAGMA_HOME=%cd%\\magma" + +Speeding CUDA build for Windows +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Visual Studio doesn't support parallel custom task currently. +As an alternative, we can use ``Ninja`` to parallelize CUDA +build tasks. It can be used by typing only a few lines of code. + +.. code-block:: bat + + REM Let's install ninja first. + pip install ninja + + REM Set it as the cmake generator + set CMAKE_GENERATOR=Ninja + + +One key install script +^^^^^^^^^^^^^^^^^^^^^^ + +You can take a look at `this set of scripts +`_. +It will lead the way for you. + +Extension +--------- + +CFFI Extension +^^^^^^^^^^^^^^ + +The support for CFFI Extension is very experimental. There're +generally two steps to enable it under Windows. + +First, specify additional ``libraries`` in ``Extension`` +object to make it build on Windows. + +.. code-block:: python + + ffi = create_extension( + '_ext.my_lib', + headers=headers, + sources=sources, + define_macros=defines, + relative_to=__file__, + with_cuda=with_cuda, + extra_compile_args=["-std=c99"], + libraries=['ATen', '_C'] # Append cuda libaries when necessary, like cudart + ) + +Second, here is a workground for "unresolved external symbol +state caused by ``extern THCState *state;``" + +Change the source code from C to C++. An example is listed below. + +.. code-block:: cpp + + #include + #include + + THCState *state = at::globalContext().thc_state; + + extern "C" int my_lib_add_forward_cuda(THCudaTensor *input1, THCudaTensor *input2, + THCudaTensor *output) + { + if (!THCudaTensor_isSameSizeAs(state, input1, input2)) + return 0; + THCudaTensor_resizeAs(state, output, input1); + THCudaTensor_cadd(state, output, input1, 1.0, input2); + return 1; + } + + extern "C" int my_lib_add_backward_cuda(THCudaTensor *grad_output, THCudaTensor *grad_input) + { + THCudaTensor_resizeAs(state, grad_input, grad_output); + THCudaTensor_fill(state, grad_input, 1); + return 1; + } + +Cpp Extension +^^^^^^^^^^^^^ + +This type of extension has better support compared with +the previous one. However, it still needs some manual +configuration. First, you should open the +**x86_x64 Cross Tools Command Prompt for VS 2017**. +And then, you can start your compiling process. + +Installation +------------ + +Package not found in win-32 channel. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bat + + Solving environment: failed + + PackagesNotFoundError: The following packages are not available from current channels: + + - pytorch + + Current channels: + - https://conda.anaconda.org/pytorch/win-32 + - https://conda.anaconda.org/pytorch/noarch + - https://repo.continuum.io/pkgs/main/win-32 + - https://repo.continuum.io/pkgs/main/noarch + - https://repo.continuum.io/pkgs/free/win-32 + - https://repo.continuum.io/pkgs/free/noarch + - https://repo.continuum.io/pkgs/r/win-32 + - https://repo.continuum.io/pkgs/r/noarch + - https://repo.continuum.io/pkgs/pro/win-32 + - https://repo.continuum.io/pkgs/pro/noarch + - https://repo.continuum.io/pkgs/msys2/win-32 + - https://repo.continuum.io/pkgs/msys2/noarch + +PyTorch doesn't work on 32-bit system. Please use Windows and +Python 64-bit version. + +Why are there no Python 2 packages for Windows? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Because it's not stable enough. There're some issues that need to +be solved before we officially release it. You can build it by yourself. + +Import error +^^^^^^^^^^^^ + +.. code-block:: py3tb + + from torch._C import * + + ImportError: DLL load failed: The specified module could not be found. + + +The problem is caused by the missing of the essential files. Actually, +we include almost all the essential files that PyTorch need for the conda +package except VC2017 redistributable and some mkl libraries. +You can resolve this by typing the following command. + +.. code-block:: bat + + conda install -c peterjc123 vc vs2017_runtime + conda install mkl_fft intel_openmp numpy mkl + +As for the wheels package, since we didn't pack some libaries and VS2017 +redistributable files in, please make sure you install them manually. +The `VS 2017 redistributable installer +`_ can be downloaded. +And you should also pay attention to your installation of Numpy. Make sure it +uses MKL instead of OpenBLAS. You may type in the following command. + +.. code-block:: bat + + pip install numpy mkl intel-openmp mkl_fft + +Another possible cause may be you are using GPU version without NVIDIA +graphics cards. Please replace your GPU package with the CPU one. + +.. code-block:: py3tb + + from torch._C import * + + ImportError: DLL load failed: The operating system cannot run %1. + + +This is actually an upstream issue of Anaconda. When you initialize your +environment with conda-forge channel, this issue will emerge. You may fix +the intel-openmp libraries through this command. + +.. code-block:: bat + + conda install -c defaults intel-openmp -f + + +Usage (multiprocessing) +------------------------------------------------------- + +Multiprocessing error without if-clause protection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py3tb + + RuntimeError: + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable. + +The implementation of ``multiprocessing`` is different on Windows, which +uses ``spawn`` instead of ``fork``. So we have to wrap the code with an +if-clause to protect the code from executing multiple times. Refactor +your code into the following structure. + +.. code-block:: python + + import torch + + def main() + for i, data in enumerate(dataloader): + # do something here + + if __name__ == '__main__': + main() + + +Multiprocessing error "Broken pipe" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py3tb + + ForkingPickler(file, protocol).dump(obj) + + BrokenPipeError: [Errno 32] Broken pipe + +This issue happens when the child process ends before the parent process +finishes sending data. There may be something wrong with your code. You +can debug your code by reducing the ``num_worker`` of +:class:`~torch.utils.data.DataLoader` to zero and see if the issue persists. + +Multiprocessing error "driver shut down" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py3tb + + Couldn’t open shared file mapping: , error code: <1455> at torch\lib\TH\THAllocator.c:154 + + [windows] driver shut down + +Please update your graphics driver. If this persists, this may be that your +graphics card is too old or the calculation is too heavy for your card. Please +update the TDR settings according to this `post +`_. + +CUDA IPC operations +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: py3tb + + THCudaCheck FAIL file=torch\csrc\generic\StorageSharing.cpp line=252 error=63 : OS call failed or operation not supported on this OS + +They are not supported on Windows. Something like doing multiprocessing on CUDA +tensors cannot succeed, there are two alternatives for this. + +1. Don't use ``multiprocessing``. Set the ``num_worker`` of +:class:`~torch.utils.data.DataLoader` to zero. + +2. Share CPU tensors instead. Make sure your custom +:class:`~torch.utils.data.DataSet` returns CPU tensors. + diff --git a/docs/stable/_sources/onnx.rst.txt b/docs/stable/_sources/onnx.rst.txt new file mode 100644 index 000000000000..490c75b0c8ae --- /dev/null +++ b/docs/stable/_sources/onnx.rst.txt @@ -0,0 +1,695 @@ +torch.onnx +============ + +.. contents:: :local: + +.. automodule:: torch.onnx + +Example: End-to-end AlexNet from PyTorch to ONNX +------------------------------------------------ + +Here is a simple script which exports a pretrained AlexNet as defined in +torchvision into ONNX. It runs a single round of inference and then +saves the resulting traced model to ``alexnet.onnx``:: + + import torch + import torchvision + + dummy_input = torch.randn(10, 3, 224, 224, device='cuda') + model = torchvision.models.alexnet(pretrained=True).cuda() + + # Providing input and output names sets the display names for values + # within the model's graph. Setting these does not change the semantics + # of the graph; it is only for readability. + # + # The inputs to the network consist of the flat list of inputs (i.e. + # the values you would pass to the forward() method) followed by the + # flat list of parameters. You can partially specify names, i.e. provide + # a list here shorter than the number of inputs to the model, and we will + # only set that subset of names, starting from the beginning. + input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ] + output_names = [ "output1" ] + + torch.onnx.export(model, dummy_input, "alexnet.onnx", verbose=True, input_names=input_names, output_names=output_names) + +The resulting ``alexnet.onnx`` is a binary protobuf file which contains both +the network structure and parameters of the model you exported +(in this case, AlexNet). The keyword argument ``verbose=True`` causes the +exporter to print out a human-readable representation of the network:: + + # These are the inputs and parameters to the network, which have taken on + # the names we specified earlier. + graph(%actual_input_1 : Float(10, 3, 224, 224) + %learned_0 : Float(64, 3, 11, 11) + %learned_1 : Float(64) + %learned_2 : Float(192, 64, 5, 5) + %learned_3 : Float(192) + # ---- omitted for brevity ---- + %learned_14 : Float(1000, 4096) + %learned_15 : Float(1000)) { + # Every statement consists of some output tensors (and their types), + # the operator to be run (with its attributes, e.g., kernels, strides, + # etc.), its input tensors (%actual_input_1, %learned_0, %learned_1) + %17 : Float(10, 64, 55, 55) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[11, 11], pads=[2, 2, 2, 2], strides=[4, 4]](%actual_input_1, %learned_0, %learned_1), scope: AlexNet/Sequential[features]/Conv2d[0] + %18 : Float(10, 64, 55, 55) = onnx::Relu(%17), scope: AlexNet/Sequential[features]/ReLU[1] + %19 : Float(10, 64, 27, 27) = onnx::MaxPool[kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2]](%18), scope: AlexNet/Sequential[features]/MaxPool2d[2] + # ---- omitted for brevity ---- + %29 : Float(10, 256, 6, 6) = onnx::MaxPool[kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2]](%28), scope: AlexNet/Sequential[features]/MaxPool2d[12] + # Dynamic means that the shape is not known. This may be because of a + # limitation of our implementation (which we would like to fix in a + # future release) or shapes which are truly dynamic. + %30 : Dynamic = onnx::Shape(%29), scope: AlexNet + %31 : Dynamic = onnx::Slice[axes=[0], ends=[1], starts=[0]](%30), scope: AlexNet + %32 : Long() = onnx::Squeeze[axes=[0]](%31), scope: AlexNet + %33 : Long() = onnx::Constant[value={9216}](), scope: AlexNet + # ---- omitted for brevity ---- + %output1 : Float(10, 1000) = onnx::Gemm[alpha=1, beta=1, broadcast=1, transB=1](%45, %learned_14, %learned_15), scope: AlexNet/Sequential[classifier]/Linear[6] + return (%output1); + } + +You can also verify the protobuf using the `onnx `_ library. +You can install ``onnx`` with conda:: + + conda install -c conda-forge onnx + +Then, you can run:: + + import onnx + + # Load the ONNX model + model = onnx.load("alexnet.onnx") + + # Check that the IR is well formed + onnx.checker.check_model(model) + + # Print a human readable representation of the graph + onnx.helper.printable_graph(model.graph) + +To run the exported script with `caffe2 `_, you will need to install `caffe2`: If you don't have one already, Please `follow the install instructions `_. + +Once these are installed, you can use the backend for Caffe2:: + + # ...continuing from above + import caffe2.python.onnx.backend as backend + import numpy as np + + rep = backend.prepare(model, device="CUDA:0") # or "CPU" + # For the Caffe2 backend: + # rep.predict_net is the Caffe2 protobuf for the network + # rep.workspace is the Caffe2 workspace for the network + # (see the class caffe2.python.onnx.backend.Workspace) + outputs = rep.run(np.random.randn(10, 3, 224, 224).astype(np.float32)) + # To run networks with more than one input, pass a tuple + # rather than a single numpy ndarray. + print(outputs[0]) + +You can also run the exported model with `ONNXRuntime `_, +you will need to install `ONNXRuntime`: please `follow these instructions `_. + +Once these are installed, you can use the backend for ONNXRuntime:: + + # ...continuing from above + import onnxruntime as ort + + ort_session = ort.InferenceSession('alexnet.onnx') + + outputs = ort_session.run(None, {'actual_input_1': np.random.randn(10, 3, 224, 224).astype(np.float32)}) + + print(outputs[0]) + +Here is another `tutorial of exporting the SuperResolution model to ONNX. `_. + +In the future, there will be backends for other frameworks as well. + +Tracing vs Scripting +-------------------- + +The ONNX exporter can be both *trace-based* and *script-based* exporter. + +* *trace-based* means that it operates by executing your model once, and exporting the operators which + were actually run during this run. This means that if your model is + dynamic, e.g., changes behavior depending on input data, the export + won't be accurate. Similarly, a trace is likely to be valid only + for a specific input size (which is one reason why we require explicit inputs + on tracing.) We recommend examining the model trace and making sure + the traced operators look reasonable. If your model contains control flows like + for loops and if conditions, *trace-based* exporter will unroll the loops and if conditions, + exporting a static graph that is exactly the same as this run. If you want + to export your model with dynamic control flows, you will need to use the *script-based* exporter. + +* *script-based* means that the model you are trying to export is a `ScriptModule <../jit.html>`_. + `ScriptModule` is the core data structure in `TorchScript`, and `TorchScript` is a subset of Python language, + that creates serializable and optimizable models from PyTorch code. + +We allow mixing tracing and scripting. You can compose tracing and scripting to suit the particular requirements +of a part of a model. Checkout this example: :: + + import torch + + # Trace-based only + + class LoopModel(torch.nn.Module): + def forward(self, x, y): + for i in range(y): + x = x + i + return x + + model = LoopModel() + dummy_input = torch.ones(2, 3, dtype=torch.long) + loop_count = torch.tensor(5, dtype=torch.long) + + torch.onnx.export(model, (dummy_input, loop_count), 'loop.onnx', verbose=True) + +With *trace-based* exporter, we get the result ONNX graph which unrolls the for loop: :: + + graph(%0 : Long(2, 3), + %1 : Long()): + %2 : Tensor = onnx::Constant[value={1}]() + %3 : Tensor = onnx::Add(%0, %2) + %4 : Tensor = onnx::Constant[value={2}]() + %5 : Tensor = onnx::Add(%3, %4) + %6 : Tensor = onnx::Constant[value={3}]() + %7 : Tensor = onnx::Add(%5, %6) + %8 : Tensor = onnx::Constant[value={4}]() + %9 : Tensor = onnx::Add(%7, %8) + return (%9) + +To utilize *script-based* exporter for capturing the dynamic loop, +we can write the loop in script, and call it from the regular nn.Module: :: + + # Mixing tracing and scripting + + @torch.jit.script + def loop(x, y): + for i in range(int(y)): + x = x + i + return x + + class LoopModel2(torch.nn.Module): + def forward(self, x, y): + return loop(x, y) + + model = LoopModel2() + dummy_input = torch.ones(2, 3, dtype=torch.long) + loop_count = torch.tensor(5, dtype=torch.long) + torch.onnx.export(model, (dummy_input, loop_count), 'loop.onnx', verbose=True, + input_names=['input_data', 'loop_range']) + +Now the exported ONNX graph becomes: :: + + graph(%input_data : Long(2, 3), + %loop_range : Long()): + %2 : Long() = onnx::Constant[value={1}](), scope: LoopModel2/loop + %3 : Tensor = onnx::Cast[to=9](%2) + %4 : Long(2, 3) = onnx::Loop(%loop_range, %3, %input_data), scope: LoopModel2/loop # custom_loop.py:240:5 + block0(%i.1 : Long(), %cond : bool, %x.6 : Long(2, 3)): + %8 : Long(2, 3) = onnx::Add(%x.6, %i.1), scope: LoopModel2/loop # custom_loop.py:241:13 + %9 : Tensor = onnx::Cast[to=9](%2) + -> (%9, %8) + return (%4) + +The dynamic control flow is captured correctly. We can verify in backends with different loop range. :: + + import caffe2.python.onnx.backend as backend + import numpy as np + import onnx + model = onnx.load('loop.onnx') + + rep = backend.prepare(model) + outputs = rep.run((dummy_input.numpy(), np.array(9).astype(np.int64))) + print(outputs[0]) + #[[37 37 37] + # [37 37 37]] + + + import onnxruntime as ort + ort_sess = ort.InferenceSession('loop.onnx') + outputs = ort_sess.run(None, {'input_data': dummy_input.numpy(), + 'loop_range': np.array(9).astype(np.int64)}) + print(outputs) + #[array([[37, 37, 37], + # [37, 37, 37]], dtype=int64)] + + +Limitations +----------- + +* Tensor in-place indexed assignment like `data[index] = new_data` is currently not supported in exporting. + One way to resolve this kind of issue is to use operator `scatter`, explicitly updating the original tensor. :: + + data = torch.zeros(3, 4) + index = torch.tensor(1) + new_data = torch.arange(4).to(torch.float32) + + # Assigning to left hand side indexing is not supported in exporting. + # class InPlaceIndexedAssignment(torch.nn.Module): + # def forward(self, data, index, new_data): + # data[index] = new_data + # return data + + class InPlaceIndexedAssignmentONNX(torch.nn.Module): + def forward(self, data, index, new_data): + new_data = new_data.unsqueeze(0) + index = index.expand(1, new_data.size(1)) + data.scatter_(0, index, new_data) + return data + + out = InPlaceIndexedAssignmentONNX()(data, index, new_data) + + torch.onnx.export(InPlaceIndexedAssignmentONNX(), (data, index, new_data), 'inplace_assign.onnx') + + # caffe2 + import caffe2.python.onnx.backend as backend + import onnx + + onnx_model = onnx.load('inplace_assign.onnx') + rep = backend.prepare(onnx_model) + out_caffe2 = rep.run((torch.zeros(3, 4).numpy(), index.numpy(), new_data.numpy())) + + assert torch.all(torch.eq(out, torch.tensor(out_caffe2))) + + # onnxruntime + import onnxruntime + sess = onnxruntime.InferenceSession('inplace_assign.onnx') + out_ort = sess.run(None, { + sess.get_inputs()[0].name: torch.zeros(3, 4).numpy(), + sess.get_inputs()[1].name: index.numpy(), + sess.get_inputs()[2].name: new_data.numpy(), + }) + + assert torch.all(torch.eq(out, torch.tensor(out_ort))) + +* There is no concept of tensor list in ONNX. Without this concept, it is very hard to export operators + that consume or produce tensor list, especially when the length of the tensor list is not known at export time. :: + + x = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) + + # This is not exportable + class Model(torch.nn.Module): + def forward(self, x): + return x.unbind(0) + + # This is exportable. + # Note that in this example we know the split operator will always produce exactly three outputs, + # Thus we can export to ONNX without using tensor list. + class AnotherModel(torch.nn.Module): + def forward(self, x): + return [torch.squeeze(out, 0) for out in torch.split(x, [1,1,1], dim=0)] + +* PyTorch and ONNX backends(Caffe2, ONNXRuntime, etc) often have implementations of operators with some + numeric differences. Depending on model structure, these differences + may be negligible, but they can also cause major divergences in behavior + (especially on untrained models.) We allow Caffe2 to call directly to Torch implementations of operators, to + help you smooth over these differences when precision is important, + and to also document these differences. + +Supported operators +------------------- + +The following operators are supported: + +* BatchNorm +* ConstantPadNd +* Conv +* Dropout +* Embedding (no optional arguments supported) +* FeatureDropout (training mode not supported) +* Index +* MaxPool1d +* MaxPool2d +* MaxPool3d +* RNN +* abs +* acos +* adaptive_avg_pool1d +* adaptive_avg_pool2d +* adaptive_avg_pool3d +* adaptive_max_pool1d +* adaptive_max_pool2d +* adaptive_max_pool3d +* add (nonzero alpha not supported) +* addmm +* and +* arange +* argmax +* argmin +* asin +* atan +* avg_pool1d +* avg_pool2d +* avg_pool2d +* avg_pool3d +* cat +* ceil +* clamp +* clamp_max +* clamp_min +* concat +* cos +* dim_arange +* div +* dropout +* elu +* eq +* erf +* exp +* expand +* expand_as +* flatten +* floor +* full +* full_like +* gather +* ge +* glu +* gt +* hardtanh +* index_copy +* index_fill +* index_select +* instance_norm +* isnan +* layer_norm +* le +* leaky_relu +* log +* log2 +* log_sigmoid +* log_softmax +* logsumexp +* lt +* masked_fill +* max +* mean +* min +* mm +* mul +* narrow +* ne +* neg +* nonzero +* norm +* ones +* ones_like +* or +* permute +* pixel_shuffle +* pow +* prelu (single weight shared among input channels not supported) +* prod +* rand +* randn +* randn_like +* reciprocal +* reflection_pad +* relu +* repeat +* replication_pad +* reshape +* reshape_as +* rrelu +* rsub +* scatter +* scatter_add +* select +* selu +* sigmoid +* sign +* sin +* size +* slice +* softmax (only dim=-1 supported) +* softplus +* split +* sqrt +* squeeze +* stack +* sub (nonzero alpha not supported) +* sum +* t +* tan +* tanh +* threshold (non-zero threshold/non-zero value not supported) +* to +* topk +* transpose +* type_as +* unfold (experimental support with ATen-Caffe2 integration) +* unsqueeze +* upsample_nearest1d +* upsample_nearest2d +* upsample_nearest3d +* view +* where +* zeros +* zeros_like + +The operator set above is sufficient to export the following models: + +* AlexNet +* DCGAN +* DenseNet +* Inception (warning: this model is highly sensitive to changes in operator + implementation) +* ResNet +* SuperResolution +* VGG +* `word_language_model `_ + +Adding support for operators +---------------------------- + +Adding export support for operators is an *advance usage*. +To achieve this, developers need to touch the source code of PyTorch. +Please follow the `instructions `_ +for installing PyTorch from source. +If the wanted operator is standardized in ONNX, it should be easy to add +support for exporting such operator (adding a symbolic function for the operator). +To confirm whether the operator is standardized or not, please check the +`ONNX operator list `_. + +ATen operators +~~~~~~~~~~~~~~ + +If the operator is an ATen operator, which means you can find the declaration +of the function in ``torch/csrc/autograd/generated/VariableType.h`` +(available in generated code in PyTorch install dir), you should add the symbolic +function in ``torch/onnx/symbolic_opset.py`` and follow the instructions listed as below: + +* Define the symbolic function in ``torch/onnx/symbolic_opset.py``, for example + `torch/onnx/symbolic_opset9.py `_. + Make sure the function has the same name as the ATen operator/function + defined in ``VariableType.h``. +* The first parameter is always the exported ONNX graph. + Parameter names must EXACTLY match the names in ``VariableType.h``, + because dispatch is done with keyword arguments. +* Parameter ordering does NOT necessarily match what is in ``VariableType.h``, + tensors (inputs) are always first, then non-tensor arguments. +* In the symbolic function, if the operator is already standardized in ONNX, + we only need to create a node to represent the ONNX operator in the graph. +* If the input argument is a tensor, but ONNX asks for a scalar, we have to + explicitly do the conversion. The helper function ``_scalar`` can convert a + scalar tensor into a python scalar, and ``_if_scalar_type_as`` can turn a + Python scalar into a PyTorch tensor. + +Non-ATen operators +~~~~~~~~~~~~~~~~~~ + +If the operator is a non-ATen operator, the symbolic function has to be +added in the corresponding PyTorch Function class. Please read the following +instructions: + +* Create a symbolic function named ``symbolic`` in the corresponding Function class. +* The first parameter is always the exported ONNX graph. +* Parameter names except the first must EXACTLY match the names in ``forward``. +* The output tuple size must match the outputs of ``forward``. +* In the symbolic function, if the operator is already standardized in ONNX, + we just need to create a node to represent the ONNX operator in the graph. + +Symbolic functions should be implemented in Python. All of these functions interact +with Python methods which are implemented via C++-Python bindings, +but intuitively the interface they provide looks like this:: + + + def operator/symbolic(g, *inputs): + """ + Modifies Graph (e.g., using "op"), adding the ONNX operations representing + this PyTorch function, and returning a Value or tuple of Values specifying the + ONNX outputs whose values correspond to the original PyTorch return values + of the autograd Function (or None if an output is not supported by ONNX). + + Arguments: + g (Graph): graph to write the ONNX representation into + inputs (Value...): list of values representing the variables which contain + the inputs for this function + """ + + class Value(object): + """Represents an intermediate tensor value computed in ONNX.""" + def type(self): + """Returns the Type of the value.""" + + class Type(object): + def sizes(self): + """Returns a tuple of ints representing the shape of a tensor this describes.""" + + class Graph(object): + def op(self, opname, *inputs, **attrs): + """ + Create an ONNX operator 'opname', taking 'args' as inputs + and attributes 'kwargs' and add it as a node to the current graph, + returning the value representing the single output of this + operator (see the `outputs` keyword argument for multi-return + nodes). + + The set of operators and the inputs/attributes they take + is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md + + Arguments: + opname (string): The ONNX operator name, e.g., `Abs` or `Add`. + args (Value...): The inputs to the operator; usually provided + as arguments to the `symbolic` definition. + kwargs: The attributes of the ONNX operator, with keys named + according to the following convention: `alpha_f` indicates + the `alpha` attribute with type `f`. The valid type specifiers are + `f` (float), `i` (int), `s` (string) or `t` (Tensor). An attribute + specified with type float accepts either a single float, or a + list of floats (e.g., you would say `dims_i` for a `dims` attribute + that takes a list of integers). + outputs (int, optional): The number of outputs this operator returns; + by default an operator is assumed to return a single output. + If `outputs` is greater than one, this functions returns a tuple + of output `Value`, representing each output of the ONNX operator + in positional. + """ + +The ONNX graph C++ definition is in ``torch/csrc/jit/ir.h``. + +Here is an example of handling missing symbolic function for ``elu`` operator. +We try to export the model and see the error message as below:: + + UserWarning: ONNX export failed on elu because torch.onnx.symbolic_opset9.elu does not exist + RuntimeError: ONNX export failed: Couldn't export operator elu + +The export fails because PyTorch does not support exporting ``elu`` operator. +We find ``virtual Tensor elu(const Tensor & input, Scalar alpha, bool inplace) const override;`` +in ``VariableType.h``. This means ``elu`` is an ATen operator. +We check the `ONNX operator list `_, +and confirm that ``Elu`` is standardized in ONNX. +We add the following lines to ``symbolic_opset9.py``:: + + def elu(g, input, alpha, inplace=False): + return g.op("Elu", input, alpha_f=_scalar(alpha)) + +Now PyTorch is able to export ``elu`` operator. + +There are more examples in +`symbolic_opset9.py `_, +`symbolic_opset10.py `_. + + +The interface for specifying operator definitions is experimental; +adventurous users should note that the APIs will probably +change in a future interface. + +Custom operators +~~~~~~~~~~~~~~~~ + +Following this tutorial `Extending TorchScript with Custom C++ Operators `_, +you can create and register your own custom ops implementation in PyTorch. Here's how to export such model to ONNX.:: + + # Create custom symbolic function + from torch.onnx.symbolic_helper import parse_args + @parse_args('v', 'v', 'f', 'i') + def symbolic_foo_forward(g, input1, input2, attr1, attr2): + return g.op("Foo", input1, input2, attr1_f=attr1, attr2_i=attr2) + + # Register custom symbolic function + from torch.onnx import register_custom_op_symbolic + register_custom_op_symbolic('custom_ops::foo_forward', symbolic_foo_forward, 9) + + class FooModel(torch.nn.Module): + def __init__(self, attr1, attr2): + super(FooModule, self).__init__() + self.attr1 = attr1 + self.attr2 = attr2 + + def forward(self, input1, input2): + # Calling custom op + return torch.ops.custom_ops.foo_forward(input1, input2, self.attr1, self.attr2) + + model = FooModel(attr1, attr2) + torch.onnx.export(model, (dummy_input1, dummy_input2), 'model.onnx') + +Depending on the custom operator, you can export it as one or a combination of existing ONNX ops. +You can also export it as a custom op in ONNX as well. In that case, you will need to extend the backend of your choice +with matching custom ops implementation, e.g. `Caffe2 custom ops `_, +`ONNXRuntime custom ops `_. + +Frequently Asked Questions +-------------------------- +Q: I have exported my lstm model, but its input size seems to be fixed? + + The tracer records the example inputs shape in the graph. In case the model should accept + inputs of dynamic shape, you can utilize the parameter `dynamic_axes` in export api. :: + + layer_count = 4 + + model = nn.LSTM(10, 20, num_layers=layer_count, bidirectional=True) + model.eval() + + with torch.no_grad(): + input = torch.randn(5, 3, 10) + h0 = torch.randn(layer_count * 2, 3, 20) + c0 = torch.randn(layer_count * 2, 3, 20) + output, (hn, cn) = model(input, (h0, c0)) + + # default export + torch.onnx.export(model, (input, (h0, c0)), 'lstm.onnx') + onnx_model = onnx.load('lstm.onnx') + # input shape [5, 3, 10] + print(onnx_model.graph.input[0]) + + # export with `dynamic_axes` + torch.onnx.export(model, (input, (h0, c0)), 'lstm.onnx', + input_names=['input', 'h0', 'c0'], + output_names=['output', 'hn', 'cn'], + dynamic_axes={'input': {0: 'sequence'}, 'output': {0: 'sequence'}}) + onnx_model = onnx.load('lstm.onnx') + # input shape ['sequence', 3, 10] + print(onnx_model.graph.input[0]) + + +Q: How to export models with loops in it? + + Please checkout `Tracing vs Scripting`_. + +Q: Does ONNX support implicit scalar datatype casting? + + No, but the exporter will try to handle that part. Scalars are converted to constant tensors in ONNX. + The exporter will try to figure out the right datatype for scalars. However for cases that it failed + to do so, you will need to manually provide the datatype information. We are trying to improve the datatype + propagation in the exporter such that manual changes are not required in the future. :: + + class ImplicitCastType(torch.jit.ScriptModule): + @torch.jit.script_method + def forward(self, x): + # Exporter knows x is float32, will export '2' as float32 as well. + y = x + 2 + # Without type propagation, exporter doesn't know the datatype of y. + # Thus '3' is exported as int64 by default. + return y + 3 + # The following will export correctly. + # return y + torch.tensor([3], dtype=torch.float32) + + x = torch.tensor([1.0], dtype=torch.float32) + torch.onnx.export(ImplicitCastType(), x, 'models/implicit_cast.onnx', + example_outputs=ImplicitCastType()(x)) + +Functions +-------------------------- +.. autofunction:: export +.. autofunction:: register_custom_op_symbolic +.. autofunction:: torch.onnx.operators.shape_as_tensor +.. autofunction:: set_training +.. autofunction:: is_in_onnx_export diff --git a/docs/stable/_sources/optim.rst.txt b/docs/stable/_sources/optim.rst.txt new file mode 100644 index 000000000000..d6d89c915596 --- /dev/null +++ b/docs/stable/_sources/optim.rst.txt @@ -0,0 +1,169 @@ +torch.optim +=================================== + +.. automodule:: torch.optim + +How to use an optimizer +----------------------- + +To use :mod:`torch.optim` you have to construct an optimizer object, that will hold +the current state and will update the parameters based on the computed gradients. + +Constructing it +^^^^^^^^^^^^^^^ + +To construct an :class:`Optimizer` you have to give it an iterable containing the +parameters (all should be :class:`~torch.autograd.Variable` s) to optimize. Then, +you can specify optimizer-specific options such as the learning rate, weight decay, etc. + +.. note:: + + If you need to move a model to GPU via ``.cuda()``, please do so before + constructing optimizers for it. Parameters of a model after ``.cuda()`` will + be different objects with those before the call. + + In general, you should make sure that optimized parameters live in + consistent locations when optimizers are constructed and used. + +Example:: + + optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) + optimizer = optim.Adam([var1, var2], lr=0.0001) + +Per-parameter options +^^^^^^^^^^^^^^^^^^^^^ + +:class:`Optimizer` s also support specifying per-parameter options. To do this, instead +of passing an iterable of :class:`~torch.autograd.Variable` s, pass in an iterable of +:class:`dict` s. Each of them will define a separate parameter group, and should contain +a ``params`` key, containing a list of parameters belonging to it. Other keys +should match the keyword arguments accepted by the optimizers, and will be used +as optimization options for this group. + +.. note:: + + You can still pass options as keyword arguments. They will be used as + defaults, in the groups that didn't override them. This is useful when you + only want to vary a single option, while keeping all others consistent + between parameter groups. + + +For example, this is very useful when one wants to specify per-layer learning rates:: + + optim.SGD([ + {'params': model.base.parameters()}, + {'params': model.classifier.parameters(), 'lr': 1e-3} + ], lr=1e-2, momentum=0.9) + +This means that ``model.base``'s parameters will use the default learning rate of ``1e-2``, +``model.classifier``'s parameters will use a learning rate of ``1e-3``, and a momentum of +``0.9`` will be used for all parameters. + +Taking an optimization step +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +All optimizers implement a :func:`~Optimizer.step` method, that updates the +parameters. It can be used in two ways: + +``optimizer.step()`` +~~~~~~~~~~~~~~~~~~~~ + +This is a simplified version supported by most optimizers. The function can be +called once the gradients are computed using e.g. +:func:`~torch.autograd.Variable.backward`. + +Example:: + + for input, target in dataset: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + +``optimizer.step(closure)`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some optimization algorithms such as Conjugate Gradient and LBFGS need to +reevaluate the function multiple times, so you have to pass in a closure that +allows them to recompute your model. The closure should clear the gradients, +compute the loss, and return it. + +Example:: + + for input, target in dataset: + def closure(): + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + loss.backward() + return loss + optimizer.step(closure) + +Algorithms +---------- + +.. autoclass:: Optimizer + :members: +.. autoclass:: Adadelta + :members: +.. autoclass:: Adagrad + :members: +.. autoclass:: Adam + :members: +.. autoclass:: AdamW + :members: +.. autoclass:: SparseAdam + :members: +.. autoclass:: Adamax + :members: +.. autoclass:: ASGD + :members: +.. autoclass:: LBFGS + :members: +.. autoclass:: RMSprop + :members: +.. autoclass:: Rprop + :members: +.. autoclass:: SGD + :members: + +How to adjust Learning Rate +--------------------------- + +:mod:`torch.optim.lr_scheduler` provides several methods to adjust the learning +rate based on the number of epochs. :class:`torch.optim.lr_scheduler.ReduceLROnPlateau` +allows dynamic learning rate reducing based on some validation measurements. + +Learning rate scheduling should be applied after optimizer's update; e.g., you +should write your code this way: + + >>> scheduler = ... + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + +.. warning:: + Prior to PyTorch 1.1.0, the learning rate scheduler was expected to be called before + the optimizer's update; 1.1.0 changed this behavior in a BC-breaking way. If you use + the learning rate scheduler (calling ``scheduler.step()``) before the optimizer's update + (calling ``optimizer.step()``), this will skip the first value of the learning rate schedule. + If you are unable to reproduce results after upgrading to PyTorch 1.1.0, please check + if you are calling ``scheduler.step()`` at the wrong time. + + +.. autoclass:: torch.optim.lr_scheduler.LambdaLR + :members: +.. autoclass:: torch.optim.lr_scheduler.StepLR + :members: +.. autoclass:: torch.optim.lr_scheduler.MultiStepLR + :members: +.. autoclass:: torch.optim.lr_scheduler.ExponentialLR + :members: +.. autoclass:: torch.optim.lr_scheduler.CosineAnnealingLR + :members: +.. autoclass:: torch.optim.lr_scheduler.ReduceLROnPlateau + :members: +.. autoclass:: torch.optim.lr_scheduler.CyclicLR + :members: diff --git a/docs/stable/_sources/sparse.rst.txt b/docs/stable/_sources/sparse.rst.txt new file mode 100644 index 000000000000..b746af7f7fdc --- /dev/null +++ b/docs/stable/_sources/sparse.rst.txt @@ -0,0 +1,145 @@ +.. currentmodule:: torch.sparse + +.. _sparse-docs: + +torch.sparse +============ + +.. warning:: + + This API is currently experimental and may change in the near future. + +Torch supports sparse tensors in COO(rdinate) format, which can +efficiently store and process tensors for which the majority of elements +are zeros. + +A sparse tensor is represented as a pair of dense tensors: a tensor +of values and a 2D tensor of indices. A sparse tensor can be constructed +by providing these two tensors, as well as the size of the sparse tensor +(which cannot be inferred from these tensors!) Suppose we want to define +a sparse tensor with the entry 3 at location (0, 2), entry 4 at +location (1, 0), and entry 5 at location (1, 2). We would then write: + + >>> i = torch.LongTensor([[0, 1, 1], + [2, 0, 2]]) + >>> v = torch.FloatTensor([3, 4, 5]) + >>> torch.sparse.FloatTensor(i, v, torch.Size([2,3])).to_dense() + 0 0 3 + 4 0 5 + [torch.FloatTensor of size 2x3] + +Note that the input to LongTensor is NOT a list of index tuples. If you want +to write your indices this way, you should transpose before passing them to +the sparse constructor: + + >>> i = torch.LongTensor([[0, 2], [1, 0], [1, 2]]) + >>> v = torch.FloatTensor([3, 4, 5 ]) + >>> torch.sparse.FloatTensor(i.t(), v, torch.Size([2,3])).to_dense() + 0 0 3 + 4 0 5 + [torch.FloatTensor of size 2x3] + +You can also construct hybrid sparse tensors, where only the first n +dimensions are sparse, and the rest of the dimensions are dense. + + >>> i = torch.LongTensor([[2, 4]]) + >>> v = torch.FloatTensor([[1, 3], [5, 7]]) + >>> torch.sparse.FloatTensor(i, v).to_dense() + 0 0 + 0 0 + 1 3 + 0 0 + 5 7 + [torch.FloatTensor of size 5x2] + +An empty sparse tensor can be constructed by specifying its size: + + >>> torch.sparse.FloatTensor(2, 3) + SparseFloatTensor of size 2x3 with indices: + [torch.LongTensor with no dimension] + and values: + [torch.FloatTensor with no dimension] + +SparseTensor has the following invariants: + 1. sparse_dim + dense_dim = len(SparseTensor.shape) + 2. SparseTensor._indices().shape = (sparse_dim, nnz) + 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) +Since SparseTensor._indices() is always a 2D tensor, the smallest sparse_dim = 1. +Therefore, representation of a SparseTensor of sparse_dim = 0 is simply a dense tensor. + +.. note:: + + Our sparse tensor format permits *uncoalesced* sparse tensors, where + there may be duplicate coordinates in the indices; in this case, + the interpretation is that the value at that index is the sum of all + duplicate value entries. Uncoalesced tensors permit us to implement + certain operators more efficiently. + + For the most part, you shouldn't have to care whether or not a + sparse tensor is coalesced or not, as most operations will work + identically given a coalesced or uncoalesced sparse tensor. + However, there are two cases in which you may need to care. + + First, if you repeatedly perform an operation that can produce + duplicate entries (e.g., :func:`torch.sparse.FloatTensor.add`), you + should occasionally coalesce your sparse tensors to prevent + them from growing too large. + + Second, some operators will produce different values depending on + whether or not they are coalesced or not (e.g., + :func:`torch.sparse.FloatTensor._values` and + :func:`torch.sparse.FloatTensor._indices`, as well as + :func:`torch.Tensor.sparse_mask`). These operators are + prefixed by an underscore to indicate that they reveal internal + implementation details and should be used with care, since code + that works with coalesced sparse tensors may not work with + uncoalesced sparse tensors; generally speaking, it is safest + to explicitly coalesce before working with these operators. + + For example, suppose that we wanted to implement an operator + by operating directly on :func:`torch.sparse.FloatTensor._values`. + Multiplication by a scalar can be implemented in the obvious way, + as multiplication distributes over addition; however, square root + cannot be implemented directly, since ``sqrt(a + b) != sqrt(a) + + sqrt(b)`` (which is what would be computed if you were given an + uncoalesced tensor.) + +.. class:: FloatTensor() + + .. method:: add + .. method:: add_ + .. method:: clone + .. method:: dim + .. method:: div + .. method:: div_ + .. method:: get_device + .. method:: hspmm + .. method:: mm + .. method:: mul + .. method:: mul_ + .. method:: narrow_copy + .. method:: resizeAs_ + .. method:: size + .. method:: spadd + .. method:: spmm + .. method:: sspaddmm + .. method:: sspmm + .. method:: sub + .. method:: sub_ + .. method:: t_ + .. method:: toDense + .. method:: transpose + .. method:: transpose_ + .. method:: zero_ + .. method:: coalesce + .. method:: is_coalesced + .. method:: _indices + .. method:: _values + .. method:: _nnz + +Functions +---------------------------------- + +.. autofunction:: torch.sparse.addmm +.. autofunction:: torch.sparse.mm +.. autofunction:: torch.sparse.sum diff --git a/docs/stable/_sources/storage.rst.txt b/docs/stable/_sources/storage.rst.txt new file mode 100644 index 000000000000..61148916884c --- /dev/null +++ b/docs/stable/_sources/storage.rst.txt @@ -0,0 +1,12 @@ +torch.Storage +=================================== + +A :class:`torch.Storage` is a contiguous, one-dimensional array of a single +data type. + +Every :class:`torch.Tensor` has a corresponding storage of the same data type. + +.. autoclass:: torch.FloatStorage + :members: + :undoc-members: + :inherited-members: diff --git a/docs/stable/_sources/tensor_attributes.rst.txt b/docs/stable/_sources/tensor_attributes.rst.txt new file mode 100644 index 000000000000..d9dfb8aae286 --- /dev/null +++ b/docs/stable/_sources/tensor_attributes.rst.txt @@ -0,0 +1,136 @@ +.. currentmodule:: torch + +.. _tensor-attributes-doc: + +Tensor Attributes +================= + +Each ``torch.Tensor`` has a :class:`torch.dtype`, :class:`torch.device`, and :class:`torch.layout`. + +.. _dtype-doc: + +torch.dtype +----------- + +.. class:: torch.dtype + +A :class:`torch.dtype` is an object that represents the data type of a +:class:`torch.Tensor`. PyTorch has nine different data types: + +======================== =========================================== =========================== +Data type dtype Tensor types +======================== =========================================== =========================== +32-bit floating point ``torch.float32`` or ``torch.float`` ``torch.*.FloatTensor`` +64-bit floating point ``torch.float64`` or ``torch.double`` ``torch.*.DoubleTensor`` +16-bit floating point ``torch.float16`` or ``torch.half`` ``torch.*.HalfTensor`` +8-bit integer (unsigned) ``torch.uint8`` ``torch.*.ByteTensor`` +8-bit integer (signed) ``torch.int8`` ``torch.*.CharTensor`` +16-bit integer (signed) ``torch.int16`` or ``torch.short`` ``torch.*.ShortTensor`` +32-bit integer (signed) ``torch.int32`` or ``torch.int`` ``torch.*.IntTensor`` +64-bit integer (signed) ``torch.int64`` or ``torch.long`` ``torch.*.LongTensor`` +Boolean ``torch.bool`` ``torch.*.BoolTensor`` +======================== =========================================== =========================== + +To find out if a :class:`torch.dtype` is a floating point data type, the property :attr:`is_floating_point` +can be used, which returns ``True`` if the data type is a floating point data type. + +.. _device-doc: + +torch.device +------------ + +.. class:: torch.device + +A :class:`torch.device` is an object representing the device on which a :class:`torch.Tensor` is +or will be allocated. + +The :class:`torch.device` contains a device type (``'cpu'`` or ``'cuda'``) and optional device +ordinal for the device type. If the device ordinal is not present, this object will always represent +the current device for the device type, even after :func:`torch.cuda.set_device()` is called; e.g., +a :class:`torch.Tensor` constructed with device ``'cuda'`` is equivalent to ``'cuda:X'`` where X is +the result of :func:`torch.cuda.current_device()`. + +A :class:`torch.Tensor`'s device can be accessed via the :attr:`Tensor.device` property. + +A :class:`torch.device` can be constructed via a string or via a string and device ordinal + +Via a string: +:: + + >>> torch.device('cuda:0') + device(type='cuda', index=0) + + >>> torch.device('cpu') + device(type='cpu') + + >>> torch.device('cuda') # current cuda device + device(type='cuda') + +Via a string and device ordinal: + +:: + + >>> torch.device('cuda', 0) + device(type='cuda', index=0) + + >>> torch.device('cpu', 0) + device(type='cpu', index=0) + +.. note:: + The :class:`torch.device` argument in functions can generally be substituted with a string. + This allows for fast prototyping of code. + + >>> # Example of a function that takes in a torch.device + >>> cuda1 = torch.device('cuda:1') + >>> torch.randn((2,3), device=cuda1) + + >>> # You can substitute the torch.device with a string + >>> torch.randn((2,3), device='cuda:1') + +.. note:: + For legacy reasons, a device can be constructed via a single device ordinal, which is treated + as a cuda device. This matches :meth:`Tensor.get_device`, which returns an ordinal for cuda + tensors and is not supported for cpu tensors. + + >>> torch.device(1) + device(type='cuda', index=1) + +.. note:: + Methods which take a device will generally accept a (properly formatted) string + or (legacy) integer device ordinal, i.e. the following are all equivalent: + + >>> torch.randn((2,3), device=torch.device('cuda:1')) + >>> torch.randn((2,3), device='cuda:1') + >>> torch.randn((2,3), device=1) # legacy + + +.. _layout-doc: + +torch.layout +------------ + +.. class:: torch.layout + +A :class:`torch.layout` is an object that represents the memory layout of a +:class:`torch.Tensor`. Currently, we support ``torch.strided`` (dense Tensors) +and have experimental support for ``torch.sparse_coo`` (sparse COO Tensors). + +``torch.strided`` represents dense Tensors and is the memory layout that +is most commonly used. Each strided tensor has an associated +:class:`torch.Storage`, which holds its data. These tensors provide +multi-dimensional, `strided `_ +view of a storage. Strides are a list of integers: the k-th stride +represents the jump in the memory necessary to go from one element to the +next one in the k-th dimension of the Tensor. This concept makes it possible +to perform many tensor operations efficiently. + +Example:: + + >>> x = torch.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) + >>> x.stride() + (5, 1) + + >>> x.t().stride() + (1, 5) + +For more information on ``torch.sparse_coo`` tensors, see :ref:`sparse-docs`. diff --git a/docs/stable/_sources/tensorboard.rst.txt b/docs/stable/_sources/tensorboard.rst.txt new file mode 100644 index 000000000000..1a40a945f302 --- /dev/null +++ b/docs/stable/_sources/tensorboard.rst.txt @@ -0,0 +1,95 @@ +torch.utils.tensorboard +=================================== + +Before going further, more details on TensorBoard can be found at +https://www.tensorflow.org/tensorboard/ + +Once you've installed TensorBoard, these utilities let you log PyTorch models +and metrics into a directory for visualization within the TensorBoard UI. +Scalars, images, histograms, graphs, and embedding visualizations are all +supported for PyTorch models and tensors as well as Caffe2 nets and blobs. + +The SummaryWriter class is your main entry to log data for consumption +and visualization by TensorBoard. For example: + +.. code:: python + + + import torch + import torchvision + from torch.utils.tensorboard import SummaryWriter + from torchvision import datasets, transforms + + # Writer will output to ./runs/ directory by default + writer = SummaryWriter() + + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) + trainset = datasets.MNIST('mnist_train', train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) + model = torchvision.models.resnet50(False) + # Have ResNet model take in grayscale rather than RGB + model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) + images, labels = next(iter(trainloader)) + + grid = torchvision.utils.make_grid(images) + writer.add_image('images', grid, 0) + writer.add_graph(model, images) + writer.close() + +This can then be visualized with TensorBoard, which should be installable +and runnable with:: + + pip install tb-nightly # Until 1.14 moves to the release channel + tensorboard --logdir=runs + + +Lots of information can be logged for one experiment. To avoid cluttering +the UI and have better result clustering, we can group plots by naming them +hierarchically. For example, "Loss/train" and "Loss/test" will be grouped +together, while "Accuracy/train" and "Accuracy/test" will be grouped separately +in the TensorBoard interface. + +.. code:: python + + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + + writer = SummaryWriter() + + for n_iter in range(100): + writer.add_scalar('Loss/train', np.random.random(), n_iter) + writer.add_scalar('Loss/test', np.random.random(), n_iter) + writer.add_scalar('Accuracy/train', np.random.random(), n_iter) + writer.add_scalar('Accuracy/test', np.random.random(), n_iter) + + +Expected result: + +.. image:: _static/img/tensorboard/hier_tags.png + :scale: 75 % + +| +| + +.. currentmodule:: torch.utils.tensorboard.writer + +.. autoclass:: SummaryWriter + + .. automethod:: __init__ + .. automethod:: add_scalar + .. automethod:: add_scalars + .. automethod:: add_histogram + .. automethod:: add_image + .. automethod:: add_images + .. automethod:: add_figure + .. automethod:: add_video + .. automethod:: add_audio + .. automethod:: add_text + .. automethod:: add_graph + .. automethod:: add_embedding + .. automethod:: add_pr_curve + .. automethod:: add_custom_scalars + .. automethod:: add_mesh + .. automethod:: flush + .. automethod:: close diff --git a/docs/stable/_sources/tensors.rst.txt b/docs/stable/_sources/tensors.rst.txt new file mode 100644 index 000000000000..55a911267038 --- /dev/null +++ b/docs/stable/_sources/tensors.rst.txt @@ -0,0 +1,477 @@ +.. currentmodule:: torch + +.. _tensor-doc: + +torch.Tensor +=================================== + +A :class:`torch.Tensor` is a multi-dimensional matrix containing elements of +a single data type. + +Torch defines nine CPU tensor types and nine GPU tensor types: + +======================== =========================================== =========================== ================================ +Data type dtype CPU tensor GPU tensor +======================== =========================================== =========================== ================================ +32-bit floating point ``torch.float32`` or ``torch.float`` :class:`torch.FloatTensor` :class:`torch.cuda.FloatTensor` +64-bit floating point ``torch.float64`` or ``torch.double`` :class:`torch.DoubleTensor` :class:`torch.cuda.DoubleTensor` +16-bit floating point ``torch.float16`` or ``torch.half`` :class:`torch.HalfTensor` :class:`torch.cuda.HalfTensor` +8-bit integer (unsigned) ``torch.uint8`` :class:`torch.ByteTensor` :class:`torch.cuda.ByteTensor` +8-bit integer (signed) ``torch.int8`` :class:`torch.CharTensor` :class:`torch.cuda.CharTensor` +16-bit integer (signed) ``torch.int16`` or ``torch.short`` :class:`torch.ShortTensor` :class:`torch.cuda.ShortTensor` +32-bit integer (signed) ``torch.int32`` or ``torch.int`` :class:`torch.IntTensor` :class:`torch.cuda.IntTensor` +64-bit integer (signed) ``torch.int64`` or ``torch.long`` :class:`torch.LongTensor` :class:`torch.cuda.LongTensor` +Boolean ``torch.bool`` :class:`torch.BoolTensor` :class:`torch.cuda.BoolTensor` +======================== =========================================== =========================== ================================ + +:class:`torch.Tensor` is an alias for the default tensor type (:class:`torch.FloatTensor`). + +A tensor can be constructed from a Python :class:`list` or sequence using the +:func:`torch.tensor` constructor: + +:: + + >>> torch.tensor([[1., -1.], [1., -1.]]) + tensor([[ 1.0000, -1.0000], + [ 1.0000, -1.0000]]) + >>> torch.tensor(np.array([[1, 2, 3], [4, 5, 6]])) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + +.. warning:: + + :func:`torch.tensor` always copies :attr:`data`. If you have a Tensor + :attr:`data` and just want to change its ``requires_grad`` flag, use + :meth:`~torch.Tensor.requires_grad_` or + :meth:`~torch.Tensor.detach` to avoid a copy. + If you have a numpy array and want to avoid a copy, use + :func:`torch.as_tensor`. + +A tensor of specific data type can be constructed by passing a +:class:`torch.dtype` and/or a :class:`torch.device` to a +constructor or tensor creation op: + +:: + + >>> torch.zeros([2, 4], dtype=torch.int32) + tensor([[ 0, 0, 0, 0], + [ 0, 0, 0, 0]], dtype=torch.int32) + >>> cuda0 = torch.device('cuda:0') + >>> torch.ones([2, 4], dtype=torch.float64, device=cuda0) + tensor([[ 1.0000, 1.0000, 1.0000, 1.0000], + [ 1.0000, 1.0000, 1.0000, 1.0000]], dtype=torch.float64, device='cuda:0') + +The contents of a tensor can be accessed and modified using Python's indexing +and slicing notation: + +:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6]]) + >>> print(x[1][2]) + tensor(6) + >>> x[0][1] = 8 + >>> print(x) + tensor([[ 1, 8, 3], + [ 4, 5, 6]]) + +Use :meth:`torch.Tensor.item` to get a Python number from a tensor containing a +single value: + +:: + + >>> x = torch.tensor([[1]]) + >>> x + tensor([[ 1]]) + >>> x.item() + 1 + >>> x = torch.tensor(2.5) + >>> x + tensor(2.5000) + >>> x.item() + 2.5 + +A tensor can be created with :attr:`requires_grad=True` so that +:mod:`torch.autograd` records operations on them for automatic differentiation. + +:: + + >>> x = torch.tensor([[1., -1.], [1., 1.]], requires_grad=True) + >>> out = x.pow(2).sum() + >>> out.backward() + >>> x.grad + tensor([[ 2.0000, -2.0000], + [ 2.0000, 2.0000]]) + +Each tensor has an associated :class:`torch.Storage`, which holds its data. +The tensor class provides multi-dimensional, `strided `_ +view of a storage and defines numeric operations on it. + +.. note:: + For more information on the :class:`torch.dtype`, :class:`torch.device`, and + :class:`torch.layout` attributes of a :class:`torch.Tensor`, see + :ref:`tensor-attributes-doc`. + +.. note:: + Methods which mutate a tensor are marked with an underscore suffix. + For example, :func:`torch.FloatTensor.abs_` computes the absolute value + in-place and returns the modified tensor, while :func:`torch.FloatTensor.abs` + computes the result in a new tensor. + +.. note:: + To change an existing tensor's :class:`torch.device` and/or :class:`torch.dtype`, consider using + :meth:`~torch.Tensor.to` method on the tensor. + +.. warning:: + Current implementation of :class:`torch.Tensor` introduces memory overhead, + thus it might lead to unexpectedly high memory usage in the applications with many tiny tensors. + If this is your case, consider using one large structure. + +.. class:: Tensor() + + There are a few main ways to create a tensor, depending on your use case. + + - To create a tensor with pre-existing data, use :func:`torch.tensor`. + - To create a tensor with specific size, use ``torch.*`` tensor creation + ops (see :ref:`tensor-creation-ops`). + - To create a tensor with the same size (and similar types) as another tensor, + use ``torch.*_like`` tensor creation ops + (see :ref:`tensor-creation-ops`). + - To create a tensor with similar type but different size as another tensor, + use ``tensor.new_*`` creation ops. + + .. automethod:: new_tensor + .. automethod:: new_full + .. automethod:: new_empty + .. automethod:: new_ones + .. automethod:: new_zeros + + .. autoattribute:: is_cuda + .. autoattribute:: device + .. autoattribute:: grad + .. autoattribute:: ndim + .. autoattribute:: T + + .. automethod:: abs + .. automethod:: abs_ + .. automethod:: acos + .. automethod:: acos_ + .. automethod:: add + .. automethod:: add_ + .. automethod:: addbmm + .. automethod:: addbmm_ + .. automethod:: addcdiv + .. automethod:: addcdiv_ + .. automethod:: addcmul + .. automethod:: addcmul_ + .. automethod:: addmm + .. automethod:: addmm_ + .. automethod:: addmv + .. automethod:: addmv_ + .. automethod:: addr + .. automethod:: addr_ + .. automethod:: allclose + .. automethod:: apply_ + .. automethod:: argmax + .. automethod:: argmin + .. automethod:: argsort + .. automethod:: asin + .. automethod:: asin_ + .. automethod:: as_strided + .. automethod:: atan + .. automethod:: atan2 + .. automethod:: atan2_ + .. automethod:: atan_ + .. automethod:: backward + .. automethod:: baddbmm + .. automethod:: baddbmm_ + .. automethod:: bernoulli + .. automethod:: bernoulli_ + .. automethod:: bfloat16 + .. automethod:: bincount + .. automethod:: bitwise_not + .. automethod:: bitwise_not_ + .. automethod:: bmm + .. automethod:: bool + .. automethod:: byte + .. automethod:: cauchy_ + .. automethod:: ceil + .. automethod:: ceil_ + .. automethod:: char + .. automethod:: cholesky + .. automethod:: cholesky_inverse + .. automethod:: cholesky_solve + .. automethod:: chunk + .. automethod:: clamp + .. automethod:: clamp_ + .. automethod:: clone + .. automethod:: contiguous + .. automethod:: copy_ + .. automethod:: cos + .. automethod:: cos_ + .. automethod:: cosh + .. automethod:: cosh_ + .. automethod:: cpu + .. automethod:: cross + .. automethod:: cuda + .. automethod:: cumprod + .. automethod:: cumsum + .. automethod:: data_ptr + .. automethod:: dequantize + .. automethod:: det + .. automethod:: dense_dim + .. automethod:: detach + .. automethod:: detach_ + .. automethod:: diag + .. automethod:: diag_embed + .. automethod:: diagflat + .. automethod:: diagonal + .. automethod:: fill_diagonal_ + .. automethod:: digamma + .. automethod:: digamma_ + .. automethod:: dim + .. automethod:: dist + .. automethod:: div + .. automethod:: div_ + .. automethod:: dot + .. automethod:: double + .. automethod:: eig + .. automethod:: element_size + .. automethod:: eq + .. automethod:: eq_ + .. automethod:: equal + .. automethod:: erf + .. automethod:: erf_ + .. automethod:: erfc + .. automethod:: erfc_ + .. automethod:: erfinv + .. automethod:: erfinv_ + .. automethod:: exp + .. automethod:: exp_ + .. automethod:: expm1 + .. automethod:: expm1_ + .. automethod:: expand + .. automethod:: expand_as + .. automethod:: exponential_ + .. automethod:: fft + .. automethod:: fill_ + .. automethod:: flatten + .. automethod:: flip + .. automethod:: float + .. automethod:: floor + .. automethod:: floor_ + .. automethod:: fmod + .. automethod:: fmod_ + .. automethod:: frac + .. automethod:: frac_ + .. automethod:: gather + .. automethod:: ge + .. automethod:: ge_ + .. automethod:: gels + .. automethod:: geometric_ + .. automethod:: geqrf + .. automethod:: ger + .. automethod:: get_device + .. automethod:: gt + .. automethod:: gt_ + .. automethod:: half + .. automethod:: hardshrink + .. automethod:: histc + .. automethod:: ifft + .. automethod:: index_add_ + .. automethod:: index_add + .. automethod:: index_copy_ + .. automethod:: index_copy + .. automethod:: index_fill_ + .. automethod:: index_fill + .. automethod:: index_put_ + .. automethod:: index_put + .. automethod:: index_select + .. automethod:: indices + .. automethod:: int + .. automethod:: int_repr + .. automethod:: inverse + .. automethod:: irfft + .. automethod:: is_contiguous + .. automethod:: is_floating_point + .. automethod:: is_leaf + .. automethod:: is_pinned + .. automethod:: is_set_to + .. automethod:: is_shared + .. automethod:: is_signed + .. automethod:: is_sparse + .. automethod:: item + .. automethod:: kthvalue + .. automethod:: le + .. automethod:: le_ + .. automethod:: lerp + .. automethod:: lerp_ + .. automethod:: log + .. automethod:: log_ + .. automethod:: logdet + .. automethod:: log10 + .. automethod:: log10_ + .. automethod:: log1p + .. automethod:: log1p_ + .. automethod:: log2 + .. automethod:: log2_ + .. automethod:: log_normal_ + .. automethod:: logsumexp + .. automethod:: long + .. automethod:: lstsq + .. automethod:: lt + .. automethod:: lt_ + .. automethod:: lu + .. automethod:: lu_solve + .. automethod:: map_ + .. automethod:: masked_scatter_ + .. automethod:: masked_scatter + .. automethod:: masked_fill_ + .. automethod:: masked_fill + .. automethod:: masked_select + .. automethod:: matmul + .. automethod:: matrix_power + .. automethod:: max + .. automethod:: mean + .. automethod:: median + .. automethod:: min + .. automethod:: mm + .. automethod:: mode + .. automethod:: mul + .. automethod:: mul_ + .. automethod:: multinomial + .. automethod:: mv + .. automethod:: mvlgamma + .. automethod:: mvlgamma_ + .. automethod:: narrow + .. automethod:: narrow_copy + .. automethod:: ndimension + .. automethod:: ne + .. automethod:: ne_ + .. automethod:: neg + .. automethod:: neg_ + .. automethod:: nelement + .. automethod:: nonzero + .. automethod:: norm + .. automethod:: normal_ + .. automethod:: numel + .. automethod:: numpy + .. automethod:: orgqr + .. automethod:: ormqr + .. automethod:: permute + .. automethod:: pin_memory + .. automethod:: pinverse + .. automethod:: pow + .. automethod:: pow_ + .. automethod:: prod + .. automethod:: put_ + .. automethod:: qr + .. automethod:: qscheme + .. automethod:: q_scale + .. automethod:: q_zero_point + .. automethod:: random_ + .. automethod:: reciprocal + .. automethod:: reciprocal_ + .. automethod:: register_hook + .. automethod:: remainder + .. automethod:: remainder_ + .. automethod:: renorm + .. automethod:: renorm_ + .. automethod:: repeat + .. automethod:: repeat_interleave + .. automethod:: requires_grad + .. automethod:: requires_grad_ + .. automethod:: reshape + .. automethod:: reshape_as + .. automethod:: resize_ + .. automethod:: resize_as_ + .. automethod:: retain_grad + .. automethod:: rfft + .. automethod:: roll + .. automethod:: rot90 + .. automethod:: round + .. automethod:: round_ + .. automethod:: rsqrt + .. automethod:: rsqrt_ + .. automethod:: scatter + .. automethod:: scatter_ + .. automethod:: scatter_add_ + .. automethod:: scatter_add + .. automethod:: select + .. automethod:: set_ + .. automethod:: share_memory_ + .. automethod:: short + .. automethod:: sigmoid + .. automethod:: sigmoid_ + .. automethod:: sign + .. automethod:: sign_ + .. automethod:: sin + .. automethod:: sin_ + .. automethod:: sinh + .. automethod:: sinh_ + .. automethod:: size + .. automethod:: slogdet + .. automethod:: solve + .. automethod:: sort + .. automethod:: split + .. automethod:: sparse_mask + .. automethod:: sparse_dim + .. automethod:: sqrt + .. automethod:: sqrt_ + .. automethod:: squeeze + .. automethod:: squeeze_ + .. automethod:: std + .. automethod:: stft + .. automethod:: storage + .. automethod:: storage_offset + .. automethod:: storage_type + .. automethod:: stride + .. automethod:: sub + .. automethod:: sub_ + .. automethod:: sum + .. automethod:: sum_to_size + .. automethod:: svd + .. automethod:: symeig + .. automethod:: t + .. automethod:: t_ + .. automethod:: to + .. automethod:: to_mkldnn + .. automethod:: take + .. automethod:: tan + .. automethod:: tan_ + .. automethod:: tanh + .. automethod:: tanh_ + .. automethod:: tolist + .. automethod:: topk + .. automethod:: to_sparse + .. automethod:: trace + .. automethod:: transpose + .. automethod:: transpose_ + .. automethod:: triangular_solve + .. automethod:: tril + .. automethod:: tril_ + .. automethod:: triu + .. automethod:: triu_ + .. automethod:: trunc + .. automethod:: trunc_ + .. automethod:: type + .. automethod:: type_as + .. automethod:: unbind + .. automethod:: unfold + .. automethod:: uniform_ + .. automethod:: unique + .. automethod:: unique_consecutive + .. automethod:: unsqueeze + .. automethod:: unsqueeze_ + .. automethod:: values + .. automethod:: var + .. automethod:: view + .. automethod:: view_as + .. automethod:: where + .. automethod:: zero_ + +.. class:: ByteTensor() + + The following methods are unique to :class:`torch.ByteTensor`. + + .. automethod:: all + .. automethod:: any diff --git a/docs/stable/_sources/torch.rst.txt b/docs/stable/_sources/torch.rst.txt new file mode 100644 index 000000000000..ce376788ed7f --- /dev/null +++ b/docs/stable/_sources/torch.rst.txt @@ -0,0 +1,358 @@ +torch +=================================== +.. automodule:: torch + +Tensors +---------------------------------- +.. autofunction:: is_tensor +.. autofunction:: is_storage +.. autofunction:: is_floating_point +.. autofunction:: set_default_dtype +.. autofunction:: get_default_dtype +.. autofunction:: set_default_tensor_type +.. autofunction:: numel +.. autofunction:: set_printoptions +.. autofunction:: set_flush_denormal + +.. _tensor-creation-ops: + +Creation Ops +~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + Random sampling creation ops are listed under :ref:`random-sampling` and + include: + :func:`torch.rand` + :func:`torch.rand_like` + :func:`torch.randn` + :func:`torch.randn_like` + :func:`torch.randint` + :func:`torch.randint_like` + :func:`torch.randperm` + You may also use :func:`torch.empty` with the :ref:`inplace-random-sampling` + methods to create :class:`torch.Tensor` s with values sampled from a broader + range of distributions. + +.. autofunction:: tensor +.. autofunction:: sparse_coo_tensor +.. autofunction:: as_tensor +.. autofunction:: as_strided +.. autofunction:: from_numpy +.. autofunction:: zeros +.. autofunction:: zeros_like +.. autofunction:: ones +.. autofunction:: ones_like +.. autofunction:: arange +.. autofunction:: range +.. autofunction:: linspace +.. autofunction:: logspace +.. autofunction:: eye +.. autofunction:: empty +.. autofunction:: empty_like +.. autofunction:: full +.. autofunction:: full_like + +Indexing, Slicing, Joining, Mutating Ops +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: cat +.. autofunction:: chunk +.. autofunction:: gather +.. autofunction:: index_select +.. autofunction:: masked_select +.. autofunction:: narrow +.. autofunction:: nonzero +.. autofunction:: reshape +.. autofunction:: split +.. autofunction:: squeeze +.. autofunction:: stack +.. autofunction:: t +.. autofunction:: take +.. autofunction:: transpose +.. autofunction:: unbind +.. autofunction:: unsqueeze +.. autofunction:: where + +.. _generators: + +Generators +---------------------------------- +.. autoclass:: torch._C.Generator + :members: + +.. _random-sampling: + +Random sampling +---------------------------------- +.. autofunction:: seed +.. autofunction:: manual_seed +.. autofunction:: initial_seed +.. autofunction:: get_rng_state +.. autofunction:: set_rng_state +.. autoattribute:: torch.default_generator + :annotation: Returns the default CPU torch.Generator +.. autoattribute:: torch.cuda.default_generators + :annotation: If cuda is available, returns a tuple of default CUDA torch.Generator-s. + The number of CUDA torch.Generator-s returned is equal to the number of + GPUs available in the system. +.. autofunction:: bernoulli +.. autofunction:: multinomial +.. autofunction:: normal +.. autofunction:: rand +.. autofunction:: rand_like +.. autofunction:: randint +.. autofunction:: randint_like +.. autofunction:: randn +.. autofunction:: randn_like +.. autofunction:: randperm + +.. _inplace-random-sampling: + +In-place random sampling +~~~~~~~~~~~~~~~~~~~~~~~~ + +There are a few more in-place random sampling functions defined on Tensors as well. Click through to refer to their documentation: + +- :func:`torch.Tensor.bernoulli_` - in-place version of :func:`torch.bernoulli` +- :func:`torch.Tensor.cauchy_` - numbers drawn from the Cauchy distribution +- :func:`torch.Tensor.exponential_` - numbers drawn from the exponential distribution +- :func:`torch.Tensor.geometric_` - elements drawn from the geometric distribution +- :func:`torch.Tensor.log_normal_` - samples from the log-normal distribution +- :func:`torch.Tensor.normal_` - in-place version of :func:`torch.normal` +- :func:`torch.Tensor.random_` - numbers sampled from the discrete uniform distribution +- :func:`torch.Tensor.uniform_` - numbers sampled from the continuous uniform distribution + +Quasi-random sampling +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: torch.quasirandom.SobolEngine + :members: + :exclude-members: MAXBIT, MAXDIM + :undoc-members: + +Serialization +---------------------------------- +.. autofunction:: save +.. autofunction:: load + + +Parallelism +---------------------------------- +.. autofunction:: get_num_threads +.. autofunction:: set_num_threads +.. autofunction:: get_num_interop_threads +.. autofunction:: set_num_interop_threads + +Locally disabling gradient computation +-------------------------------------- +The context managers :func:`torch.no_grad`, :func:`torch.enable_grad`, and +:func:`torch.set_grad_enabled` are helpful for locally disabling and enabling +gradient computation. See :ref:`locally-disable-grad` for more details on +their usage. These context managers are thread local, so they won't +work if you send work to another thread using the :module:`threading` +module, etc. + +Examples:: + + >>> x = torch.zeros(1, requires_grad=True) + >>> with torch.no_grad(): + ... y = x * 2 + >>> y.requires_grad + False + + >>> is_train = False + >>> with torch.set_grad_enabled(is_train): + ... y = x * 2 + >>> y.requires_grad + False + + >>> torch.set_grad_enabled(True) # this can also be used as a function + >>> y = x * 2 + >>> y.requires_grad + True + + >>> torch.set_grad_enabled(False) + >>> y = x * 2 + >>> y.requires_grad + False + + +Math operations +---------------------------------- + +Pointwise Ops +~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: abs +.. autofunction:: acos +.. autofunction:: add +.. autofunction:: addcdiv +.. autofunction:: addcmul +.. autofunction:: asin +.. autofunction:: atan +.. autofunction:: atan2 +.. autofunction:: ceil +.. autofunction:: clamp +.. autofunction:: cos +.. autofunction:: cosh +.. autofunction:: div +.. autofunction:: digamma +.. autofunction:: erf +.. autofunction:: erfc +.. autofunction:: erfinv +.. autofunction:: exp +.. autofunction:: expm1 +.. autofunction:: floor +.. autofunction:: fmod +.. autofunction:: frac +.. autofunction:: lerp +.. autofunction:: log +.. autofunction:: log10 +.. autofunction:: log1p +.. autofunction:: log2 +.. autofunction:: mul +.. autofunction:: mvlgamma +.. autofunction:: neg +.. autofunction:: pow +.. autofunction:: reciprocal +.. autofunction:: remainder +.. autofunction:: round +.. autofunction:: rsqrt +.. autofunction:: sigmoid +.. autofunction:: sign +.. autofunction:: sin +.. autofunction:: sinh +.. autofunction:: sqrt +.. autofunction:: tan +.. autofunction:: tanh +.. autofunction:: trunc + + +Reduction Ops +~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: argmax +.. autofunction:: argmin +.. autofunction:: cumprod +.. autofunction:: cumsum +.. autofunction:: dist +.. autofunction:: logsumexp +.. autofunction:: mean +.. autofunction:: median +.. autofunction:: mode +.. autofunction:: norm +.. autofunction:: prod +.. autofunction:: std +.. autofunction:: std_mean +.. autofunction:: sum +.. autofunction:: unique +.. autofunction:: unique_consecutive +.. autofunction:: var +.. autofunction:: var_mean + + +Comparison Ops +~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: allclose +.. autofunction:: argsort +.. autofunction:: eq +.. autofunction:: equal +.. autofunction:: ge +.. autofunction:: gt +.. autofunction:: isfinite +.. autofunction:: isinf +.. autofunction:: isnan +.. autofunction:: kthvalue +.. autofunction:: le +.. autofunction:: lt +.. autofunction:: max +.. autofunction:: min +.. autofunction:: ne +.. autofunction:: sort +.. autofunction:: topk + + +Spectral Ops +~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: fft +.. autofunction:: ifft +.. autofunction:: rfft +.. autofunction:: irfft +.. autofunction:: stft +.. autofunction:: bartlett_window +.. autofunction:: blackman_window +.. autofunction:: hamming_window +.. autofunction:: hann_window + + +Other Operations +~~~~~~~~~~~~~~~~~~~~~~ +.. autofunction:: bincount +.. autofunction:: broadcast_tensors +.. autofunction:: cartesian_prod +.. autofunction:: combinations +.. autofunction:: cross +.. autofunction:: diag +.. autofunction:: diag_embed +.. autofunction:: diagflat +.. autofunction:: diagonal +.. autofunction:: einsum +.. autofunction:: flatten +.. autofunction:: flip +.. autofunction:: rot90 +.. autofunction:: histc +.. autofunction:: meshgrid +.. autofunction:: renorm +.. autofunction:: repeat_interleave +.. autofunction:: roll +.. autofunction:: tensordot +.. autofunction:: trace +.. autofunction:: tril +.. autofunction:: tril_indices +.. autofunction:: triu +.. autofunction:: triu_indices + + +BLAS and LAPACK Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: addbmm +.. autofunction:: addmm +.. autofunction:: addmv +.. autofunction:: addr +.. autofunction:: baddbmm +.. autofunction:: bmm +.. autofunction:: bitwise_not +.. autofunction:: chain_matmul +.. autofunction:: cholesky +.. autofunction:: cholesky_inverse +.. autofunction:: cholesky_solve +.. autofunction:: dot +.. autofunction:: eig +.. autofunction:: gels +.. autofunction:: geqrf +.. autofunction:: ger +.. autofunction:: inverse +.. autofunction:: det +.. autofunction:: logdet +.. autofunction:: slogdet +.. autofunction:: lstsq +.. autofunction:: lu +.. autofunction:: lu_solve +.. autofunction:: lu_unpack +.. autofunction:: matmul +.. autofunction:: matrix_power +.. autofunction:: matrix_rank +.. autofunction:: mm +.. autofunction:: mv +.. autofunction:: orgqr +.. autofunction:: ormqr +.. autofunction:: pinverse +.. autofunction:: qr +.. autofunction:: solve +.. autofunction:: svd +.. autofunction:: symeig +.. autofunction:: trapz +.. autofunction:: triangular_solve + +Utilities +---------------------------------- +.. autofunction:: compiled_with_cxx11_abi diff --git a/docs/stable/_sources/torchvision/datasets.rst.txt b/docs/stable/_sources/torchvision/datasets.rst.txt new file mode 100644 index 000000000000..040962edc6ae --- /dev/null +++ b/docs/stable/_sources/torchvision/datasets.rst.txt @@ -0,0 +1,226 @@ +torchvision.datasets +==================== + +All datasets are subclasses of :class:`torch.utils.data.Dataset` +i.e, they have ``__getitem__`` and ``__len__`` methods implemented. +Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` +which can load multiple samples parallelly using ``torch.multiprocessing`` workers. +For example: :: + + imagenet_data = torchvision.datasets.ImageNet('path/to/imagenet_root/') + data_loader = torch.utils.data.DataLoader(imagenet_data, + batch_size=4, + shuffle=True, + num_workers=args.nThreads) + +The following datasets are available: + +.. contents:: Datasets + :local: + +All the datasets have almost similar API. They all have two common arguments: +``transform`` and ``target_transform`` to transform the input and target respectively. + + +.. currentmodule:: torchvision.datasets + + +MNIST +~~~~~ + +.. autoclass:: MNIST + +Fashion-MNIST +~~~~~~~~~~~~~ + +.. autoclass:: FashionMNIST + +KMNIST +~~~~~~~~~~~~~ + +.. autoclass:: KMNIST + +EMNIST +~~~~~~ + +.. autoclass:: EMNIST + +QMNIST +~~~~~~ + +.. autoclass:: QMNIST + +FakeData +~~~~~~~~ + +.. autoclass:: FakeData + +COCO +~~~~ + +.. note :: + These require the `COCO API to be installed`_ + +.. _COCO API to be installed: https://github.com/pdollar/coco/tree/master/PythonAPI + + +Captions +^^^^^^^^ + +.. autoclass:: CocoCaptions + :members: __getitem__ + :special-members: + + +Detection +^^^^^^^^^ + +.. autoclass:: CocoDetection + :members: __getitem__ + :special-members: + +LSUN +~~~~ + +.. autoclass:: LSUN + :members: __getitem__ + :special-members: + +ImageFolder +~~~~~~~~~~~ + +.. autoclass:: ImageFolder + :members: __getitem__ + :special-members: + +DatasetFolder +~~~~~~~~~~~~~ + +.. autoclass:: DatasetFolder + :members: __getitem__ + :special-members: + + + +ImageNet +~~~~~~~~~~~ + +.. autoclass:: ImageNet + +.. note :: + This requires `scipy` to be installed + + +CIFAR +~~~~~ + +.. autoclass:: CIFAR10 + :members: __getitem__ + :special-members: + +.. autoclass:: CIFAR100 + +STL10 +~~~~~ + + +.. autoclass:: STL10 + :members: __getitem__ + :special-members: + +SVHN +~~~~~ + + +.. autoclass:: SVHN + :members: __getitem__ + :special-members: + +PhotoTour +~~~~~~~~~ + + +.. autoclass:: PhotoTour + :members: __getitem__ + :special-members: + +SBU +~~~ + + +.. autoclass:: SBU + :members: __getitem__ + :special-members: + +Flickr +~~~~~~ + + +.. autoclass:: Flickr8k + :members: __getitem__ + :special-members: + +.. autoclass:: Flickr30k + :members: __getitem__ + :special-members: + +VOC +~~~~~~ + + +.. autoclass:: VOCSegmentation + :members: __getitem__ + :special-members: + +.. autoclass:: VOCDetection + :members: __getitem__ + :special-members: + +Cityscapes +~~~~~~~~~~ + +.. note :: + Requires Cityscape to be downloaded. + +.. autoclass:: Cityscapes + :members: __getitem__ + :special-members: + +SBD +~~~~~~ + + +.. autoclass:: SBDataset + :members: __getitem__ + :special-members: + +USPS +~~~~~ + +.. autoclass:: USPS + :members: __getitem__ + :special-members: + + +Kinetics-400 +~~~~~~~~~~~~ + +.. autoclass:: Kinetics400 + :members: __getitem__ + :special-members: + + +HMDB51 +~~~~~~~ + +.. autoclass:: HMDB51 + :members: __getitem__ + :special-members: + + +UCF101 +~~~~~~~ + +.. autoclass:: UCF101 + :members: __getitem__ + :special-members: diff --git a/docs/stable/_sources/torchvision/index.rst.txt b/docs/stable/_sources/torchvision/index.rst.txt new file mode 100644 index 000000000000..f8f89f92629b --- /dev/null +++ b/docs/stable/_sources/torchvision/index.rst.txt @@ -0,0 +1,17 @@ +torchvision +=========== + +The :mod:`torchvision` package consists of popular datasets, model +architectures, and common image transformations for computer vision. + +.. toctree:: + :maxdepth: 2 + :caption: Package Reference + + datasets + models + transforms + utils + +.. automodule:: torchvision + :members: diff --git a/docs/stable/_sources/torchvision/models.rst.txt b/docs/stable/_sources/torchvision/models.rst.txt new file mode 100644 index 000000000000..dda7adf6aaaf --- /dev/null +++ b/docs/stable/_sources/torchvision/models.rst.txt @@ -0,0 +1,397 @@ +torchvision.models +################## + + +The models subpackage contains definitions of models for addressing +different tasks, including: image classification, pixelwise semantic +segmentation, object detection, instance segmentation and person +keypoint detection. + + +Classification +============== + +The models subpackage contains definitions for the following model +architectures for image classification: + +- `AlexNet`_ +- `VGG`_ +- `ResNet`_ +- `SqueezeNet`_ +- `DenseNet`_ +- `Inception`_ v3 +- `GoogLeNet`_ +- `ShuffleNet`_ v2 +- `MobileNet`_ v2 +- `ResNeXt`_ +- `Wide ResNet`_ +- `MNASNet`_ + +You can construct a model with random weights by calling its constructor: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18() + alexnet = models.alexnet() + vgg16 = models.vgg16() + squeezenet = models.squeezenet1_0() + densenet = models.densenet161() + inception = models.inception_v3() + googlenet = models.googlenet() + shufflenet = models.shufflenet_v2_x1_0() + mobilenet = models.mobilenet_v2() + resnext50_32x4d = models.resnext50_32x4d() + wide_resnet50_2 = models.wide_resnet50_2() + mnasnet = models.mnasnet1_0() + +We provide pre-trained models, using the PyTorch :mod:`torch.utils.model_zoo`. +These can be constructed by passing ``pretrained=True``: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18(pretrained=True) + alexnet = models.alexnet(pretrained=True) + squeezenet = models.squeezenet1_0(pretrained=True) + vgg16 = models.vgg16(pretrained=True) + densenet = models.densenet161(pretrained=True) + inception = models.inception_v3(pretrained=True) + googlenet = models.googlenet(pretrained=True) + shufflenet = models.shufflenet_v2_x1_0(pretrained=True) + mobilenet = models.mobilenet_v2(pretrained=True) + resnext50_32x4d = models.resnext50_32x4d(pretrained=True) + wide_resnet50_2 = models.wide_resnet50_2(pretrained=True) + mnasnet = models.mnasnet1_0(pretrained=True) + +Instancing a pre-trained model will download its weights to a cache directory. +This directory can be set using the `TORCH_MODEL_ZOO` environment variable. See +:func:`torch.utils.model_zoo.load_url` for details. + +Some models use modules which have different training and evaluation +behavior, such as batch normalization. To switch between these modes, use +``model.train()`` or ``model.eval()`` as appropriate. See +:meth:`~torch.nn.Module.train` or :meth:`~torch.nn.Module.eval` for details. + +All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), +where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized +using ``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +You can use the following transform to normalize:: + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + +An example of such normalization can be found in the imagenet example +`here `_ + +ImageNet 1-crop error rates (224x224) + +================================ ============= ============= +Network Top-1 error Top-5 error +================================ ============= ============= +AlexNet 43.45 20.91 +VGG-11 30.98 11.37 +VGG-13 30.07 10.75 +VGG-16 28.41 9.62 +VGG-19 27.62 9.12 +VGG-11 with batch normalization 29.62 10.19 +VGG-13 with batch normalization 28.45 9.63 +VGG-16 with batch normalization 26.63 8.50 +VGG-19 with batch normalization 25.76 8.15 +ResNet-18 30.24 10.92 +ResNet-34 26.70 8.58 +ResNet-50 23.85 7.13 +ResNet-101 22.63 6.44 +ResNet-152 21.69 5.94 +SqueezeNet 1.0 41.90 19.58 +SqueezeNet 1.1 41.81 19.38 +Densenet-121 25.35 7.83 +Densenet-169 24.00 7.00 +Densenet-201 22.80 6.43 +Densenet-161 22.35 6.20 +Inception v3 22.55 6.44 +GoogleNet 30.22 10.47 +ShuffleNet V2 30.64 11.68 +MobileNet V2 28.12 9.71 +ResNeXt-50-32x4d 22.38 6.30 +ResNeXt-101-32x8d 20.69 5.47 +Wide ResNet-50-2 21.49 5.91 +Wide ResNet-101-2 21.16 5.72 +MNASNet 1.0 26.49 8.456 +================================ ============= ============= + + +.. _AlexNet: https://arxiv.org/abs/1404.5997 +.. _VGG: https://arxiv.org/abs/1409.1556 +.. _ResNet: https://arxiv.org/abs/1512.03385 +.. _SqueezeNet: https://arxiv.org/abs/1602.07360 +.. _DenseNet: https://arxiv.org/abs/1608.06993 +.. _Inception: https://arxiv.org/abs/1512.00567 +.. _GoogLeNet: https://arxiv.org/abs/1409.4842 +.. _ShuffleNet: https://arxiv.org/abs/1807.11164 +.. _MobileNet: https://arxiv.org/abs/1801.04381 +.. _ResNeXt: https://arxiv.org/abs/1611.05431 +.. _MNASNet: https://arxiv.org/abs/1807.11626 + +.. currentmodule:: torchvision.models + +Alexnet +------- + +.. autofunction:: alexnet + +VGG +--- + +.. autofunction:: vgg11 +.. autofunction:: vgg11_bn +.. autofunction:: vgg13 +.. autofunction:: vgg13_bn +.. autofunction:: vgg16 +.. autofunction:: vgg16_bn +.. autofunction:: vgg19 +.. autofunction:: vgg19_bn + + +ResNet +------ + +.. autofunction:: resnet18 +.. autofunction:: resnet34 +.. autofunction:: resnet50 +.. autofunction:: resnet101 +.. autofunction:: resnet152 + +SqueezeNet +---------- + +.. autofunction:: squeezenet1_0 +.. autofunction:: squeezenet1_1 + +DenseNet +--------- + +.. autofunction:: densenet121 +.. autofunction:: densenet169 +.. autofunction:: densenet161 +.. autofunction:: densenet201 + +Inception v3 +------------ + +.. autofunction:: inception_v3 + +GoogLeNet +------------ + +.. autofunction:: googlenet + +ShuffleNet v2 +------------- + +.. autofunction:: shufflenet_v2_x0_5 +.. autofunction:: shufflenet_v2_x1_0 +.. autofunction:: shufflenet_v2_x1_5 +.. autofunction:: shufflenet_v2_x2_0 + +MobileNet v2 +------------- + +.. autofunction:: mobilenet_v2 + +ResNext +------- + +.. autofunction:: resnext50_32x4d +.. autofunction:: resnext101_32x8d + +Wide ResNet +----------- + +.. autofunction:: wide_resnet50_2 +.. autofunction:: wide_resnet101_2 + +MNASNet +-------- + +.. autofunction:: mnasnet0_5 +.. autofunction:: mnasnet0_75 +.. autofunction:: mnasnet1_0 +.. autofunction:: mnasnet1_3 + + +Semantic Segmentation +===================== + +The models subpackage contains definitions for the following model +architectures for semantic segmentation: + +- `FCN ResNet101 `_ +- `DeepLabV3 ResNet101 `_ + +As with image classification models, all pre-trained models expect input images normalized in the same way. +The images have to be loaded in to a range of ``[0, 1]`` and then normalized using +``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +They have been trained on images resized such that their minimum size is 520. + +The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are +present in the Pascal VOC dataset. You can see more information on how the subset has been selected in +``references/segmentation/coco_utils.py``. The classes that the pre-trained model outputs are the following, +in order: + + .. code-block:: python + + ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] + +The accuracies of the pre-trained models evaluated on COCO val2017 are as follows + +================================ ============= ==================== +Network mean IoU global pixelwise acc +================================ ============= ==================== +FCN ResNet101 63.7 91.9 +DeepLabV3 ResNet101 67.4 92.4 +================================ ============= ==================== + + +Fully Convolutional Networks +---------------------------- + +.. autofunction:: torchvision.models.segmentation.fcn_resnet50 +.. autofunction:: torchvision.models.segmentation.fcn_resnet101 + + +DeepLabV3 +--------- + +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet50 +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet101 + + +Object Detection, Instance Segmentation and Person Keypoint Detection +===================================================================== + +The models subpackage contains definitions for the following model +architectures for detection: + +- `Faster R-CNN ResNet-50 FPN `_ +- `Mask R-CNN ResNet-50 FPN `_ + +The pre-trained models for detection, instance segmentation and +keypoint detection are initialized with the classification models +in torchvision. + +The models expect a list of ``Tensor[C, H, W]``, in the range ``0-1``. +The models internally resize the images so that they have a minimum size +of ``800``. This option can be changed by passing the option ``min_size`` +to the constructor of the models. + + +For object detection and instance segmentation, the pre-trained +models return the predictions of the following classes: + + .. code-block:: python + + COCO_INSTANCE_CATEGORY_NAMES = [ + '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', + 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', + 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', + 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', + 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' + ] + + +Here are the summary of the accuracies for the models trained on +the instances set of COCO train2017 and evaluated on COCO val2017. + +================================ ======= ======== =========== +Network box AP mask AP keypoint AP +================================ ======= ======== =========== +Faster R-CNN ResNet-50 FPN 37.0 - - +Mask R-CNN ResNet-50 FPN 37.9 34.6 - +================================ ======= ======== =========== + +For person keypoint detection, the accuracies for the pre-trained +models are as follows + +================================ ======= ======== =========== +Network box AP mask AP keypoint AP +================================ ======= ======== =========== +Keypoint R-CNN ResNet-50 FPN 54.6 - 65.0 +================================ ======= ======== =========== + +For person keypoint detection, the pre-trained model return the +keypoints in the following order: + + .. code-block:: python + + COCO_PERSON_KEYPOINT_NAMES = [ + 'nose', + 'left_eye', + 'right_eye', + 'left_ear', + 'right_ear', + 'left_shoulder', + 'right_shoulder', + 'left_elbow', + 'right_elbow', + 'left_wrist', + 'right_wrist', + 'left_hip', + 'right_hip', + 'left_knee', + 'right_knee', + 'left_ankle', + 'right_ankle' + ] + +Runtime characteristics +----------------------- + +The implementations of the models for object detection, instance segmentation +and keypoint detection are efficient. + +In the following table, we use 8 V100 GPUs, with CUDA 10.0 and CUDNN 7.4 to +report the results. During training, we use a batch size of 2 per GPU, and +during testing a batch size of 1 is used. + +For test time, we report the time for the model evaluation and postprocessing +(including mask pasting in image), but not the time for computing the +precision-recall. + +============================== =================== ================== =========== +Network train time (s / it) test time (s / it) memory (GB) +============================== =================== ================== =========== +Faster R-CNN ResNet-50 FPN 0.2288 0.0590 5.2 +Mask R-CNN ResNet-50 FPN 0.2728 0.0903 5.4 +Keypoint R-CNN ResNet-50 FPN 0.3789 0.1242 6.8 +============================== =================== ================== =========== + + +Faster R-CNN +------------ + +.. autofunction:: torchvision.models.detection.fasterrcnn_resnet50_fpn + + +Mask R-CNN +---------- + +.. autofunction:: torchvision.models.detection.maskrcnn_resnet50_fpn + + +Keypoint R-CNN +-------------- + +.. autofunction:: torchvision.models.detection.keypointrcnn_resnet50_fpn + diff --git a/docs/stable/_sources/torchvision/transforms.rst.txt b/docs/stable/_sources/torchvision/transforms.rst.txt new file mode 100644 index 000000000000..1ab8289d836d --- /dev/null +++ b/docs/stable/_sources/torchvision/transforms.rst.txt @@ -0,0 +1,108 @@ +torchvision.transforms +====================== + +.. currentmodule:: torchvision.transforms + +Transforms are common image transformations. They can be chained together using :class:`Compose`. +Additionally, there is the :mod:`torchvision.transforms.functional` module. +Functional transforms give fine-grained control over the transformations. +This is useful if you have to build a more complex transformation pipeline +(e.g. in the case of segmentation tasks). + +.. autoclass:: Compose + +Transforms on PIL Image +----------------------- + +.. autoclass:: CenterCrop + +.. autoclass:: ColorJitter + +.. autoclass:: FiveCrop + +.. autoclass:: Grayscale + +.. autoclass:: Pad + +.. autoclass:: RandomAffine + +.. autoclass:: RandomApply + +.. autoclass:: RandomChoice + +.. autoclass:: RandomCrop + +.. autoclass:: RandomGrayscale + +.. autoclass:: RandomHorizontalFlip + +.. autoclass:: RandomOrder + +.. autoclass:: RandomPerspective + +.. autoclass:: RandomResizedCrop + +.. autoclass:: RandomRotation + +.. autoclass:: RandomSizedCrop + +.. autoclass:: RandomVerticalFlip + +.. autoclass:: Resize + +.. autoclass:: Scale + +.. autoclass:: TenCrop + +Transforms on torch.\*Tensor +---------------------------- + +.. autoclass:: LinearTransformation + +.. autoclass:: Normalize + :members: __call__ + :special-members: + +.. autoclass:: RandomErasing + +Conversion Transforms +--------------------- + +.. autoclass:: ToPILImage + :members: __call__ + :special-members: + +.. autoclass:: ToTensor + :members: __call__ + :special-members: + +Generic Transforms +------------------ + +.. autoclass:: Lambda + + +Functional Transforms +--------------------- + +Functional transforms give you fine-grained control of the transformation pipeline. +As opposed to the transformations above, functional transforms don't contain a random number +generator for their parameters. +That means you have to specify/generate all parameters, but you can reuse the functional transform. +For example, you can apply a functional transform to multiple images like this: + +.. code:: python + + import torchvision.transforms.functional as TF + import random + + def my_segmentation_transforms(image, segmentation): + if random.random() > 0.5: + angle = random.randint(-30, 30) + image = TF.rotate(image, angle) + segmentation = TF.rotate(segmentation, angle) + # more transforms ... + return image, segmentation + +.. automodule:: torchvision.transforms.functional + :members: diff --git a/docs/stable/_sources/torchvision/utils.rst.txt b/docs/stable/_sources/torchvision/utils.rst.txt new file mode 100644 index 000000000000..ad2fc91c8974 --- /dev/null +++ b/docs/stable/_sources/torchvision/utils.rst.txt @@ -0,0 +1,9 @@ +torchvision.utils +================= + +.. currentmodule:: torchvision.utils + +.. autofunction:: make_grid + +.. autofunction:: save_image + diff --git a/docs/stable/_sources/type_info.rst.txt b/docs/stable/_sources/type_info.rst.txt new file mode 100644 index 000000000000..24effe8cc4c0 --- /dev/null +++ b/docs/stable/_sources/type_info.rst.txt @@ -0,0 +1,55 @@ +.. currentmodule:: torch + +.. _type-info-doc: + +Type Info +========= + +The numerical properties of a :class:`torch.dtype` can be accessed through either the :class:`torch.finfo` or the :class:`torch.iinfo`. + +.. _finfo-doc: + +torch.finfo +----------- + +.. class:: torch.finfo + +A :class:`torch.finfo` is an object that represents the numerical properties of a floating point +:class:`torch.dtype`, (i.e. ``torch.float32``, ``torch.float64``, and ``torch.float16``). This is similar to `numpy.finfo `_. + +A :class:`torch.finfo` provides the following attributes: + +========= ===== ======================================== +Name Type Description +========= ===== ======================================== +bits int The number of bits occupied by the type. +eps float The smallest representable number such that ``1.0 + eps != 1.0``. +max float The largest representable number. +min float The smallest representable number (typically ``-max``). +tiny float The smallest positive representable number. +========= ===== ======================================== + +.. note:: + The constructor of :class:`torch.finfo` can be called without argument, in which case the class is created for the pytorch default dtype (as returned by :func:`torch.get_default_dtype`). + + +.. _iinfo-doc: + +torch.iinfo +------------ + +.. class:: torch.iinfo + + +A :class:`torch.iinfo` is an object that represents the numerical properties of a integer +:class:`torch.dtype` (i.e. ``torch.uint8``, ``torch.int8``, ``torch.int16``, ``torch.int32``, and ``torch.int64``). This is similar to `numpy.iinfo `_. + +A :class:`torch.iinfo` provides the following attributes: + +========= ===== ======================================== +Name Type Description +========= ===== ======================================== +bits int The number of bits occupied by the type. +max int The largest representable number. +min int The smallest representable number. +========= ===== ======================================== diff --git a/docs/stable/_static/basic.css b/docs/stable/_static/basic.css new file mode 100644 index 000000000000..c41d718e429a --- /dev/null +++ b/docs/stable/_static/basic.css @@ -0,0 +1,763 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > p:first-child, +td > p:first-child { + margin-top: 0px; +} + +th > p:last-child, +td > p:last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +li > p:first-child { + margin-top: 0px; +} + +li > p:last-child { + margin-bottom: 0px; +} + +dl.footnote > dt, +dl.citation > dt { + float: left; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: flex; + flex-wrap: wrap; +} + +dl.field-list > dt { + flex-basis: 20%; + font-weight: bold; + word-break: break-word; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + flex-basis: 70%; + padding-left: 1em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > p:first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0.5em; + content: ":"; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/stable/_static/css/theme.css b/docs/stable/_static/css/theme.css new file mode 100644 index 000000000000..fe2862f7ba6c --- /dev/null +++ b/docs/stable/_static/css/theme.css @@ -0,0 +1,8 @@ +/*! + * Bootstrap v4.0.0 (https://getbootstrap.com) + * Copyright 2011-2018 The Bootstrap Authors + * Copyright 2011-2018 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */:root{--blue: #007bff;--indigo: #6610f2;--purple: #6f42c1;--pink: #e83e8c;--red: #dc3545;--orange: #fd7e14;--yellow: #ffc107;--green: #28a745;--teal: #20c997;--cyan: #17a2b8;--white: #fff;--gray: #6c757d;--gray-dark: #343a40;--primary: #007bff;--secondary: #6c757d;--success: #28a745;--info: #17a2b8;--warning: #ffc107;--danger: #dc3545;--light: #f8f9fa;--dark: #343a40;--breakpoint-xs: 0;--breakpoint-sm: 576px;--breakpoint-md: 768px;--breakpoint-lg: 992px;--breakpoint-xl: 1200px;--font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";--font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}*,*::before,*::after{-webkit-box-sizing:border-box;box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}@-ms-viewport{width:device-width}article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0 !important}hr{-webkit-box-sizing:content-box;box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[title],abbr[data-original-title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}dfn{font-style:italic}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent;-webkit-text-decoration-skip:objects}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):hover,a:not([href]):not([tabindex]):focus{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}pre,code,kbd,samp{font-family:monospace, monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg:not(:root){overflow:hidden}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}button,html [type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{padding:0;border-style:none}input[type="radio"],input[type="checkbox"]{-webkit-box-sizing:border-box;box-sizing:border-box;padding:0}input[type="date"],input[type="time"],input[type="datetime-local"],input[type="month"]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{outline-offset:-2px;-webkit-appearance:none}[type="search"]::-webkit-search-cancel-button,[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none !important}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{margin-bottom:.5rem;font-family:inherit;font-weight:500;line-height:1.2;color:inherit}h1,.h1{font-size:2.5rem}h2,.h2{font-size:2rem}h3,.h3{font-size:1.75rem}h4,.h4{font-size:1.5rem}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,0.1)}small,.small{font-size:80%;font-weight:400}mark,.mark{padding:.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer::before{content:"\2014 \00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}code{font-size:87.5%;color:#e83e8c;word-break:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width: 576px){.container{max-width:540px}}@media (min-width: 768px){.container{max-width:720px}}@media (min-width: 992px){.container{max-width:960px}}@media (min-width: 1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*="col-"]{padding-right:0;padding-left:0}.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col,.col-auto,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm,.col-sm-auto,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md,.col-md-auto,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg,.col-lg-auto,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl,.col-xl-auto{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}.col{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media (min-width: 576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-sm-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-sm-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-sm-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-sm-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-sm-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-sm-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-sm-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-sm-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-sm-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-sm-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-sm-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-sm-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-sm-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-sm-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-sm-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-sm-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media (min-width: 768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-md-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-md-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-md-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-md-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-md-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-md-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-md-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-md-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-md-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-md-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-md-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-md-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-md-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-md-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-md-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-md-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media (min-width: 992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-lg-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-lg-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-lg-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-lg-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-lg-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-lg-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-lg-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-lg-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-lg-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-lg-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-lg-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-lg-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-lg-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-lg-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-lg-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-lg-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media (min-width: 1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-xl-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-xl-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-xl-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-xl-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-xl-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-xl-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-xl-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-xl-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-xl-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-xl-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-xl-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-xl-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-xl-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-xl-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-xl-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-xl-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;max-width:100%;margin-bottom:1rem;background-color:transparent}.table th,.table td{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table .table{background-color:#fff}.table-sm th,.table-sm td{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered th,.table-bordered td{border:1px solid #dee2e6}.table-bordered thead th,.table-bordered thead td{border-bottom-width:2px}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,0.05)}.table-hover tbody tr:hover{background-color:rgba(0,0,0,0.075)}.table-primary,.table-primary>th,.table-primary>td{background-color:#b8daff}.table-hover .table-primary:hover{background-color:#9fcdff}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#9fcdff}.table-secondary,.table-secondary>th,.table-secondary>td{background-color:#d6d8db}.table-hover .table-secondary:hover{background-color:#c8cbcf}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#c8cbcf}.table-success,.table-success>th,.table-success>td{background-color:#c3e6cb}.table-hover .table-success:hover{background-color:#b1dfbb}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>th,.table-info>td{background-color:#bee5eb}.table-hover .table-info:hover{background-color:#abdde5}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>th,.table-warning>td{background-color:#ffeeba}.table-hover .table-warning:hover{background-color:#ffe8a1}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>th,.table-danger>td{background-color:#f5c6cb}.table-hover .table-danger:hover{background-color:#f1b0b7}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>th,.table-light>td{background-color:#fdfdfe}.table-hover .table-light:hover{background-color:#ececf6}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>th,.table-dark>td{background-color:#c6c8ca}.table-hover .table-dark:hover{background-color:#b9bbbe}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>th,.table-active>td{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,0.075)}.table .thead-dark th{color:#fff;background-color:#212529;border-color:#32383e}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#212529}.table-dark th,.table-dark td,.table-dark thead th{border-color:#32383e}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:rgba(255,255,255,0.05)}.table-dark.table-hover tbody tr:hover{background-color:rgba(255,255,255,0.075)}@media (max-width: 575.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-sm>.table-bordered{border:0}}@media (max-width: 767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-md>.table-bordered{border:0}}@media (max-width: 991.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-lg>.table-bordered{border:0}}@media (max-width: 1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:.25rem;-webkit-transition:border-color 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#80bdff;outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.25);box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.form-control::-webkit-input-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:not([size]):not([multiple]){height:calc(2.25rem + 2px)}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding-top:.375rem;padding-bottom:.375rem;margin-bottom:0;line-height:1.5;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-sm,.input-group-sm>.form-control-plaintext.form-control,.input-group-sm>.input-group-prepend>.form-control-plaintext.input-group-text,.input-group-sm>.input-group-append>.form-control-plaintext.input-group-text,.input-group-sm>.input-group-prepend>.form-control-plaintext.btn,.input-group-sm>.input-group-append>.form-control-plaintext.btn,.form-control-plaintext.form-control-lg,.input-group-lg>.form-control-plaintext.form-control,.input-group-lg>.input-group-prepend>.form-control-plaintext.input-group-text,.input-group-lg>.input-group-append>.form-control-plaintext.input-group-text,.input-group-lg>.input-group-prepend>.form-control-plaintext.btn,.input-group-lg>.input-group-append>.form-control-plaintext.btn{padding-right:0;padding-left:0}.form-control-sm,.input-group-sm>.form-control,.input-group-sm>.input-group-prepend>.input-group-text,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-append>.btn{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}select.form-control-sm:not([size]):not([multiple]),.input-group-sm>select.form-control:not([size]):not([multiple]),.input-group-sm>.input-group-prepend>select.input-group-text:not([size]):not([multiple]),.input-group-sm>.input-group-append>select.input-group-text:not([size]):not([multiple]),.input-group-sm>.input-group-prepend>select.btn:not([size]):not([multiple]),.input-group-sm>.input-group-append>select.btn:not([size]):not([multiple]){height:calc(1.8125rem + 2px)}.form-control-lg,.input-group-lg>.form-control,.input-group-lg>.input-group-prepend>.input-group-text,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-append>.btn{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}select.form-control-lg:not([size]):not([multiple]),.input-group-lg>select.form-control:not([size]):not([multiple]),.input-group-lg>.input-group-prepend>select.input-group-text:not([size]):not([multiple]),.input-group-lg>.input-group-append>select.input-group-text:not([size]):not([multiple]),.input-group-lg>.input-group-prepend>select.btn:not([size]):not([multiple]),.input-group-lg>.input-group-append>select.btn:not([size]):not([multiple]){height:calc(2.875rem + 2px)}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*="col-"]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled ~ .form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.5rem;margin-top:.1rem;font-size:.875rem;line-height:1;color:#fff;background-color:rgba(40,167,69,0.8);border-radius:.2rem}.was-validated .form-control:valid,.form-control.is-valid,.was-validated .custom-select:valid,.custom-select.is-valid{border-color:#28a745}.was-validated .form-control:valid:focus,.form-control.is-valid:focus,.was-validated .custom-select:valid:focus,.custom-select.is-valid:focus{border-color:#28a745;-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.25);box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.was-validated .form-control:valid ~ .valid-feedback,.was-validated .form-control:valid ~ .valid-tooltip,.form-control.is-valid ~ .valid-feedback,.form-control.is-valid ~ .valid-tooltip,.was-validated .custom-select:valid ~ .valid-feedback,.was-validated .custom-select:valid ~ .valid-tooltip,.custom-select.is-valid ~ .valid-feedback,.custom-select.is-valid ~ .valid-tooltip{display:block}.was-validated .form-check-input:valid ~ .form-check-label,.form-check-input.is-valid ~ .form-check-label{color:#28a745}.was-validated .form-check-input:valid ~ .valid-feedback,.was-validated .form-check-input:valid ~ .valid-tooltip,.form-check-input.is-valid ~ .valid-feedback,.form-check-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid ~ .custom-control-label,.custom-control-input.is-valid ~ .custom-control-label{color:#28a745}.was-validated .custom-control-input:valid ~ .custom-control-label::before,.custom-control-input.is-valid ~ .custom-control-label::before{background-color:#71dd8a}.was-validated .custom-control-input:valid ~ .valid-feedback,.was-validated .custom-control-input:valid ~ .valid-tooltip,.custom-control-input.is-valid ~ .valid-feedback,.custom-control-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before,.custom-control-input.is-valid:checked ~ .custom-control-label::before{background-color:#34ce57}.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before,.custom-control-input.is-valid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(40,167,69,0.25);box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(40,167,69,0.25)}.was-validated .custom-file-input:valid ~ .custom-file-label,.custom-file-input.is-valid ~ .custom-file-label{border-color:#28a745}.was-validated .custom-file-input:valid ~ .custom-file-label::before,.custom-file-input.is-valid ~ .custom-file-label::before{border-color:inherit}.was-validated .custom-file-input:valid ~ .valid-feedback,.was-validated .custom-file-input:valid ~ .valid-tooltip,.custom-file-input.is-valid ~ .valid-feedback,.custom-file-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-file-input:valid:focus ~ .custom-file-label,.custom-file-input.is-valid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.25);box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.5rem;margin-top:.1rem;font-size:.875rem;line-height:1;color:#fff;background-color:rgba(220,53,69,0.8);border-radius:.2rem}.was-validated .form-control:invalid,.form-control.is-invalid,.was-validated .custom-select:invalid,.custom-select.is-invalid{border-color:#dc3545}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus,.was-validated .custom-select:invalid:focus,.custom-select.is-invalid:focus{border-color:#dc3545;-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.25);box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.was-validated .form-control:invalid ~ .invalid-feedback,.was-validated .form-control:invalid ~ .invalid-tooltip,.form-control.is-invalid ~ .invalid-feedback,.form-control.is-invalid ~ .invalid-tooltip,.was-validated .custom-select:invalid ~ .invalid-feedback,.was-validated .custom-select:invalid ~ .invalid-tooltip,.custom-select.is-invalid ~ .invalid-feedback,.custom-select.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-check-input:invalid ~ .form-check-label,.form-check-input.is-invalid ~ .form-check-label{color:#dc3545}.was-validated .form-check-input:invalid ~ .invalid-feedback,.was-validated .form-check-input:invalid ~ .invalid-tooltip,.form-check-input.is-invalid ~ .invalid-feedback,.form-check-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid ~ .custom-control-label,.custom-control-input.is-invalid ~ .custom-control-label{color:#dc3545}.was-validated .custom-control-input:invalid ~ .custom-control-label::before,.custom-control-input.is-invalid ~ .custom-control-label::before{background-color:#efa2a9}.was-validated .custom-control-input:invalid ~ .invalid-feedback,.was-validated .custom-control-input:invalid ~ .invalid-tooltip,.custom-control-input.is-invalid ~ .invalid-feedback,.custom-control-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before,.custom-control-input.is-invalid:checked ~ .custom-control-label::before{background-color:#e4606d}.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before,.custom-control-input.is-invalid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(220,53,69,0.25);box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(220,53,69,0.25)}.was-validated .custom-file-input:invalid ~ .custom-file-label,.custom-file-input.is-invalid ~ .custom-file-label{border-color:#dc3545}.was-validated .custom-file-input:invalid ~ .custom-file-label::before,.custom-file-input.is-invalid ~ .custom-file-label::before{border-color:inherit}.was-validated .custom-file-input:invalid ~ .invalid-feedback,.was-validated .custom-file-input:invalid ~ .invalid-tooltip,.custom-file-input.is-invalid ~ .invalid-feedback,.custom-file-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-file-input:invalid:focus ~ .custom-file-label,.custom-file-input.is-invalid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.25);box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.form-inline{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.form-inline .form-check{width:100%}@media (min-width: 576px){.form-inline label{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .input-group{width:auto}.form-inline .form-check{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;line-height:1.5;border-radius:.25rem;-webkit-transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out,-webkit-box-shadow 0.15s ease-in-out}.btn:hover,.btn:focus{text-decoration:none}.btn:focus,.btn.focus{outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.25);box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.btn.disabled,.btn:disabled{opacity:.65}.btn:not(:disabled):not(.disabled){cursor:pointer}.btn:not(:disabled):not(.disabled):active,.btn:not(:disabled):not(.disabled).active{background-image:none}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary:focus,.btn-primary.focus{-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.5);box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled):active,.btn-primary:not(:disabled):not(.disabled).active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled):active:focus,.btn-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.5);box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary:focus,.btn-secondary.focus{-webkit-box-shadow:0 0 0 .2rem rgba(108,117,125,0.5);box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled):active,.btn-secondary:not(:disabled):not(.disabled).active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled):active:focus,.btn-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(108,117,125,0.5);box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success:focus,.btn-success.focus{-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.5);box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled):active,.btn-success:not(:disabled):not(.disabled).active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled):active:focus,.btn-success:not(:disabled):not(.disabled).active:focus,.show>.btn-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.5);box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info:focus,.btn-info.focus{-webkit-box-shadow:0 0 0 .2rem rgba(23,162,184,0.5);box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled):active,.btn-info:not(:disabled):not(.disabled).active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled):active:focus,.btn-info:not(:disabled):not(.disabled).active:focus,.show>.btn-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(23,162,184,0.5);box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning:focus,.btn-warning.focus{-webkit-box-shadow:0 0 0 .2rem rgba(255,193,7,0.5);box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled):active,.btn-warning:not(:disabled):not(.disabled).active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled):active:focus,.btn-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(255,193,7,0.5);box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger:focus,.btn-danger.focus{-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.5);box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled):active,.btn-danger:not(:disabled):not(.disabled).active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled):active:focus,.btn-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.5);box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light:focus,.btn-light.focus{-webkit-box-shadow:0 0 0 .2rem rgba(248,249,250,0.5);box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled):active,.btn-light:not(:disabled):not(.disabled).active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled):active:focus,.btn-light:not(:disabled):not(.disabled).active:focus,.show>.btn-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(248,249,250,0.5);box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark:focus,.btn-dark.focus{-webkit-box-shadow:0 0 0 .2rem rgba(52,58,64,0.5);box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled):active,.btn-dark:not(:disabled):not(.disabled).active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled):active:focus,.btn-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(52,58,64,0.5);box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-outline-primary{color:#007bff;background-color:transparent;background-image:none;border-color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:focus,.btn-outline-primary.focus{-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.5);box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled):active,.btn-outline-primary:not(:disabled):not(.disabled).active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.5);box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-outline-secondary{color:#6c757d;background-color:transparent;background-image:none;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:focus,.btn-outline-secondary.focus{-webkit-box-shadow:0 0 0 .2rem rgba(108,117,125,0.5);box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled):active,.btn-outline-secondary:not(:disabled):not(.disabled).active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(108,117,125,0.5);box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-outline-success{color:#28a745;background-color:transparent;background-image:none;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:focus,.btn-outline-success.focus{-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.5);box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled):active,.btn-outline-success:not(:disabled):not(.disabled).active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled):active:focus,.btn-outline-success:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(40,167,69,0.5);box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-outline-info{color:#17a2b8;background-color:transparent;background-image:none;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:focus,.btn-outline-info.focus{-webkit-box-shadow:0 0 0 .2rem rgba(23,162,184,0.5);box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled):active,.btn-outline-info:not(:disabled):not(.disabled).active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled):active:focus,.btn-outline-info:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(23,162,184,0.5);box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-outline-warning{color:#ffc107;background-color:transparent;background-image:none;border-color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:focus,.btn-outline-warning.focus{-webkit-box-shadow:0 0 0 .2rem rgba(255,193,7,0.5);box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled):active,.btn-outline-warning:not(:disabled):not(.disabled).active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(255,193,7,0.5);box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-outline-danger{color:#dc3545;background-color:transparent;background-image:none;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:focus,.btn-outline-danger.focus{-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.5);box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled):active,.btn-outline-danger:not(:disabled):not(.disabled).active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(220,53,69,0.5);box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-outline-light{color:#f8f9fa;background-color:transparent;background-image:none;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:focus,.btn-outline-light.focus{-webkit-box-shadow:0 0 0 .2rem rgba(248,249,250,0.5);box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled):active,.btn-outline-light:not(:disabled):not(.disabled).active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled):active:focus,.btn-outline-light:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(248,249,250,0.5);box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-outline-dark{color:#343a40;background-color:transparent;background-image:none;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:focus,.btn-outline-dark.focus{-webkit-box-shadow:0 0 0 .2rem rgba(52,58,64,0.5);box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled):active,.btn-outline-dark:not(:disabled):not(.disabled).active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 .2rem rgba(52,58,64,0.5);box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-link{font-weight:400;color:#007bff;background-color:transparent}.btn-link:hover{color:#0056b3;text-decoration:underline;background-color:transparent;border-color:transparent}.btn-link:focus,.btn-link.focus{text-decoration:underline;border-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link:disabled,.btn-link.disabled{color:#6c757d}.btn-lg,.btn-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.btn-sm,.btn-group-sm>.btn{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity 0.15s linear;transition:opacity 0.15s linear}.fade.show{opacity:1}.collapse{display:none}.collapse.show{display:block}tr.collapse.show{display:table-row}tbody.collapse.show{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height 0.35s ease;transition:height 0.35s ease}.dropup,.dropdown{position:relative}.dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:.25rem}.dropup .dropdown-menu{margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-menu{margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-toggle::after{vertical-align:0}.dropleft .dropdown-menu{margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:.255em;vertical-align:.255em;content:""}.dropleft .dropdown-toggle::after{display:none}.dropleft .dropdown-toggle::before{display:inline-block;width:0;height:0;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty::after{margin-left:0}.dropleft .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#212529;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:hover,.dropdown-item:focus{color:#16181b;text-decoration:none;background-color:#f8f9fa}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#007bff}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.btn-group,.btn-group-vertical{position:relative;display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover{z-index:1}.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group,.btn-group-vertical .btn+.btn,.btn-group-vertical .btn+.btn-group,.btn-group-vertical .btn-group+.btn,.btn-group-vertical .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after{margin-left:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.btn-group-vertical .btn,.btn-group-vertical .btn-group{width:100%}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type="radio"],.btn-group-toggle>.btn input[type="checkbox"],.btn-group-toggle>.btn-group>.btn input[type="radio"],.btn-group-toggle>.btn-group>.btn input[type="checkbox"]{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.input-group{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.custom-select,.input-group>.custom-file{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;width:1%;margin-bottom:0}.input-group>.form-control:focus,.input-group>.custom-select:focus,.input-group>.custom-file:focus{z-index:3}.input-group>.form-control+.form-control,.input-group>.form-control+.custom-select,.input-group>.form-control+.custom-file,.input-group>.custom-select+.form-control,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.custom-file,.input-group>.custom-file+.form-control,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.custom-file{margin-left:-1px}.input-group>.form-control:not(:last-child),.input-group>.custom-select:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.form-control:not(:first-child),.input-group>.custom-select:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label::before{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label,.input-group>.custom-file:not(:first-child) .custom-file-label::before{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-prepend,.input-group-append{display:-webkit-box;display:-ms-flexbox;display:flex}.input-group-prepend .btn,.input-group-append .btn{position:relative;z-index:2}.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.input-group-text,.input-group-append .input-group-text+.btn{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-text input[type="radio"],.input-group-text input[type="checkbox"]{margin-top:0}.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text,.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked ~ .custom-control-label::before{color:#fff;background-color:#007bff}.custom-control-input:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,0.25);box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,0.25)}.custom-control-input:active ~ .custom-control-label::before{color:#fff;background-color:#b3d7ff}.custom-control-input:disabled ~ .custom-control-label{color:#6c757d}.custom-control-input:disabled ~ .custom-control-label::before{background-color:#e9ecef}.custom-control-label{margin-bottom:0}.custom-control-label::before{position:absolute;top:.25rem;left:0;display:block;width:1rem;height:1rem;pointer-events:none;content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#dee2e6}.custom-control-label::after{position:absolute;top:.25rem;left:0;display:block;width:1rem;height:1rem;content:"";background-repeat:no-repeat;background-position:center center;background-size:50% 50%}.custom-checkbox .custom-control-label::before{border-radius:.25rem}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before{background-color:#007bff}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before{background-color:#007bff}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-radio .custom-control-label::before{border-radius:50%}.custom-radio .custom-control-input:checked ~ .custom-control-label::before{background-color:#007bff}.custom-radio .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-select{display:inline-block;width:100%;height:calc(2.25rem + 2px);padding:.375rem 1.75rem .375rem .75rem;line-height:1.5;color:#495057;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center;background-size:8px 10px;border:1px solid #ced4da;border-radius:.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#80bdff;outline:0;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.075),0 0 5px rgba(128,189,255,0.5);box-shadow:inset 0 1px 2px rgba(0,0,0,0.075),0 0 5px rgba(128,189,255,0.5)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{opacity:0}.custom-select-sm{height:calc(1.8125rem + 2px);padding-top:.375rem;padding-bottom:.375rem;font-size:75%}.custom-select-lg{height:calc(2.875rem + 2px);padding-top:.375rem;padding-bottom:.375rem;font-size:125%}.custom-file{position:relative;display:inline-block;width:100%;height:calc(2.25rem + 2px);margin-bottom:0}.custom-file-input{position:relative;z-index:2;width:100%;height:calc(2.25rem + 2px);margin:0;opacity:0}.custom-file-input:focus ~ .custom-file-control{border-color:#80bdff;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.25);box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.custom-file-input:focus ~ .custom-file-control::before{border-color:#80bdff}.custom-file-input:lang(en) ~ .custom-file-label::after{content:"Browse"}.custom-file-label{position:absolute;top:0;right:0;left:0;z-index:1;height:calc(2.25rem + 2px);padding:.375rem .75rem;line-height:1.5;color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem}.custom-file-label::after{position:absolute;top:0;right:0;bottom:0;z-index:3;display:block;height:calc(calc(2.25rem + 2px) - 1px * 2);padding:.375rem .75rem;line-height:1.5;color:#495057;content:"Browse";background-color:#e9ecef;border-left:1px solid #ced4da;border-radius:0 .25rem .25rem 0}.nav{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem}.nav-link:hover,.nav-link:focus{text-decoration:none}.nav-link.disabled{color:#6c757d}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#007bff}.nav-fill .nav-item{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;text-align:center}.nav-justified .nav-item{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:.5rem 1rem}.navbar>.container,.navbar>.container-fluid{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{-ms-flex-preferred-size:100%;flex-basis:100%;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem}.navbar-toggler:hover,.navbar-toggler:focus{text-decoration:none}.navbar-toggler:not(:disabled):not(.disabled){cursor:pointer}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;background-size:100% 100%}@media (max-width: 575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 576px){.navbar-expand-sm{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-sm .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .dropdown-menu-right{right:0;left:auto}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width: 767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 768px){.navbar-expand-md{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-md .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .dropdown-menu-right{right:0;left:auto}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width: 991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 992px){.navbar-expand-lg{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-lg .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .dropdown-menu-right{right:0;left:auto}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .dropup .dropdown-menu{top:auto;bottom:100%}}@media (max-width: 1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 1200px){.navbar-expand-xl{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-xl .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .dropdown-menu-right{right:0;left:auto}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .dropup .dropdown-menu{top:auto;bottom:100%}}.navbar-expand{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .dropdown-menu-right{right:0;left:auto}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .dropup .dropdown-menu{top:auto;bottom:100%}.navbar-light .navbar-brand{color:rgba(0,0,0,0.9)}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:rgba(0,0,0,0.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,0.5)}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:rgba(0,0,0,0.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,0.3)}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .nav-link.active{color:rgba(0,0,0,0.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,0.5);border-color:rgba(0,0,0,0.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0,0,0,0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,0.5)}.navbar-light .navbar-text a{color:rgba(0,0,0,0.9)}.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:rgba(0,0,0,0.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:rgba(255,255,255,0.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,0.25)}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,0.5);border-color:rgba(255,255,255,0.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255,255,255,0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.card{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,0.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-body{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-0.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,0.03);border-bottom:1px solid rgba(0,0,0,0.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:.75rem 1.25rem;background-color:rgba(0,0,0,0.03);border-top:1px solid rgba(0,0,0,0.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-0.625rem;margin-bottom:-0.75rem;margin-left:-0.625rem;border-bottom:0}.card-header-pills{margin-right:-0.625rem;margin-left:-0.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img{width:100%;border-radius:calc(.25rem - 1px)}.card-img-top{width:100%;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img-bottom{width:100%;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-deck{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-deck .card{margin-bottom:15px}@media (min-width: 576px){.card-deck{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-group>.card{margin-bottom:15px}@media (min-width: 576px){.card-group{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap}.card-group>.card{-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:first-child .card-img-top,.card-group>.card:first-child .card-header{border-top-right-radius:0}.card-group>.card:first-child .card-img-bottom,.card-group>.card:first-child .card-footer{border-bottom-right-radius:0}.card-group>.card:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:last-child .card-img-top,.card-group>.card:last-child .card-header{border-top-left-radius:0}.card-group>.card:last-child .card-img-bottom,.card-group>.card:last-child .card-footer{border-bottom-left-radius:0}.card-group>.card:only-child{border-radius:.25rem}.card-group>.card:only-child .card-img-top,.card-group>.card:only-child .card-header{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card-group>.card:only-child .card-img-bottom,.card-group>.card:only-child .card-footer{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-group>.card:not(:first-child):not(:last-child):not(:only-child){border-radius:0}.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-top,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-header,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-footer{border-radius:0}}.card-columns .card{margin-bottom:.75rem}@media (min-width: 576px){.card-columns{-webkit-column-count:3;column-count:3;-webkit-column-gap:1.25rem;column-gap:1.25rem}.card-columns .card{display:inline-block;width:100%}}.breadcrumb{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:.25rem}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:.5rem;padding-left:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:-webkit-box;display:-ms-flexbox;display:flex;padding-left:0;list-style:none;border-radius:.25rem}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#007bff;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{color:#0056b3;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:2;outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,0.25);box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.page-link:not(:disabled):not(.disabled){cursor:pointer}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.page-item.active .page-link{z-index:1;color:#fff;background-color:#007bff;border-color:#007bff}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#007bff}.badge-primary[href]:hover,.badge-primary[href]:focus{color:#fff;text-decoration:none;background-color:#0062cc}.badge-secondary{color:#fff;background-color:#6c757d}.badge-secondary[href]:hover,.badge-secondary[href]:focus{color:#fff;text-decoration:none;background-color:#545b62}.badge-success{color:#fff;background-color:#28a745}.badge-success[href]:hover,.badge-success[href]:focus{color:#fff;text-decoration:none;background-color:#1e7e34}.badge-info{color:#fff;background-color:#17a2b8}.badge-info[href]:hover,.badge-info[href]:focus{color:#fff;text-decoration:none;background-color:#117a8b}.badge-warning{color:#212529;background-color:#ffc107}.badge-warning[href]:hover,.badge-warning[href]:focus{color:#212529;text-decoration:none;background-color:#d39e00}.badge-danger{color:#fff;background-color:#dc3545}.badge-danger[href]:hover,.badge-danger[href]:focus{color:#fff;text-decoration:none;background-color:#bd2130}.badge-light{color:#212529;background-color:#f8f9fa}.badge-light[href]:hover,.badge-light[href]:focus{color:#212529;text-decoration:none;background-color:#dae0e5}.badge-dark{color:#fff;background-color:#343a40}.badge-dark[href]:hover,.badge-dark[href]:focus{color:#fff;text-decoration:none;background-color:#1d2124}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:.3rem}@media (min-width: 576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#004085;background-color:#cce5ff;border-color:#b8daff}.alert-primary hr{border-top-color:#9fcdff}.alert-primary .alert-link{color:#002752}.alert-secondary{color:#383d41;background-color:#e2e3e5;border-color:#d6d8db}.alert-secondary hr{border-top-color:#c8cbcf}.alert-secondary .alert-link{color:#202326}.alert-success{color:#155724;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#0b2e13}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#856404;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#533f03}.alert-danger{color:#721c24;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#491217}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:-webkit-box;display:-ms-flexbox;display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;color:#fff;text-align:center;background-color:#007bff;-webkit-transition:width 0.6s ease;transition:width 0.6s ease}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}.media{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start}.media-body{-webkit-box-flex:1;-ms-flex:1;flex:1}.list-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,0.125)}.list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.list-group-item:hover,.list-group-item:focus{z-index:1;text-decoration:none}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#007bff;border-color:#007bff}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom:0}.list-group-item-primary{color:#004085;background-color:#b8daff}.list-group-item-primary.list-group-item-action:hover,.list-group-item-primary.list-group-item-action:focus{color:#004085;background-color:#9fcdff}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#004085;border-color:#004085}.list-group-item-secondary{color:#383d41;background-color:#d6d8db}.list-group-item-secondary.list-group-item-action:hover,.list-group-item-secondary.list-group-item-action:focus{color:#383d41;background-color:#c8cbcf}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#383d41;border-color:#383d41}.list-group-item-success{color:#155724;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:hover,.list-group-item-success.list-group-item-action:focus{color:#155724;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#155724;border-color:#155724}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:hover,.list-group-item-info.list-group-item-action:focus{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#856404;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:hover,.list-group-item-warning.list-group-item-action:focus{color:#856404;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#856404;border-color:#856404}.list-group-item-danger{color:#721c24;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:hover,.list-group-item-danger.list-group-item-action:focus{color:#721c24;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#721c24;border-color:#721c24}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:hover,.list-group-item-light.list-group-item-action:focus{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:hover,.list-group-item-dark.list-group-item-action:focus{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:hover,.close:focus{color:#000;text-decoration:none;opacity:.75}.close:not(:disabled):not(.disabled){cursor:pointer}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;outline:0}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform 0.3s ease-out;transition:-webkit-transform 0.3s ease-out;transition:transform 0.3s ease-out;transition:transform 0.3s ease-out, -webkit-transform 0.3s ease-out;-webkit-transform:translate(0, -25%);transform:translate(0, -25%)}.modal.show .modal-dialog{-webkit-transform:translate(0, 0);transform:translate(0, 0)}.modal-dialog-centered{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;min-height:calc(100% - (.5rem * 2))}.modal-content{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:1rem;border-bottom:1px solid #e9ecef;border-top-left-radius:.3rem;border-top-right-radius:.3rem}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem}.modal-footer{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end;padding:1rem;border-top:1px solid #e9ecef}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width: 576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-centered{min-height:calc(100% - (1.75rem * 2))}.modal-sm{max-width:300px}}@media (min-width: 992px){.modal-lg{max-width:800px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-top,.bs-tooltip-auto[x-placement^="top"]{padding:.4rem 0}.bs-tooltip-top .arrow,.bs-tooltip-auto[x-placement^="top"] .arrow{bottom:0}.bs-tooltip-top .arrow::before,.bs-tooltip-auto[x-placement^="top"] .arrow::before{top:0;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-right,.bs-tooltip-auto[x-placement^="right"]{padding:0 .4rem}.bs-tooltip-right .arrow,.bs-tooltip-auto[x-placement^="right"] .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-right .arrow::before,.bs-tooltip-auto[x-placement^="right"] .arrow::before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-bottom,.bs-tooltip-auto[x-placement^="bottom"]{padding:.4rem 0}.bs-tooltip-bottom .arrow,.bs-tooltip-auto[x-placement^="bottom"] .arrow{top:0}.bs-tooltip-bottom .arrow::before,.bs-tooltip-auto[x-placement^="bottom"] .arrow::before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-left,.bs-tooltip-auto[x-placement^="left"]{padding:0 .4rem}.bs-tooltip-left .arrow,.bs-tooltip-auto[x-placement^="left"] .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-left .arrow::before,.bs-tooltip-auto[x-placement^="left"] .arrow::before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem}.popover .arrow{position:absolute;display:block;width:1rem;height:.5rem;margin:0 .3rem}.popover .arrow::before,.popover .arrow::after{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-top,.bs-popover-auto[x-placement^="top"]{margin-bottom:.5rem}.bs-popover-top .arrow,.bs-popover-auto[x-placement^="top"] .arrow{bottom:calc((.5rem + 1px) * -1)}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before,.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{border-width:.5rem .5rem 0}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before{bottom:0;border-top-color:rgba(0,0,0,0.25)}.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{bottom:1px;border-top-color:#fff}.bs-popover-right,.bs-popover-auto[x-placement^="right"]{margin-left:.5rem}.bs-popover-right .arrow,.bs-popover-auto[x-placement^="right"] .arrow{left:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before,.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{border-width:.5rem .5rem .5rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before{left:0;border-right-color:rgba(0,0,0,0.25)}.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{left:1px;border-right-color:#fff}.bs-popover-bottom,.bs-popover-auto[x-placement^="bottom"]{margin-top:.5rem}.bs-popover-bottom .arrow,.bs-popover-auto[x-placement^="bottom"] .arrow{top:calc((.5rem + 1px) * -1)}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before,.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{border-width:0 .5rem .5rem .5rem}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before{top:0;border-bottom-color:rgba(0,0,0,0.25)}.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{top:1px;border-bottom-color:#fff}.bs-popover-bottom .popover-header::before,.bs-popover-auto[x-placement^="bottom"] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-left,.bs-popover-auto[x-placement^="left"]{margin-right:.5rem}.bs-popover-left .arrow,.bs-popover-auto[x-placement^="left"] .arrow{right:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before,.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{border-width:.5rem 0 .5rem .5rem}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before{right:0;border-left-color:rgba(0,0,0,0.25)}.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{right:1px;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;color:inherit;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-item{position:relative;display:none;-webkit-box-align:center;-ms-flex-align:center;align-items:center;width:100%;-webkit-transition:-webkit-transform 0.6s ease;transition:-webkit-transform 0.6s ease;transition:transform 0.6s ease;transition:transform 0.6s ease, -webkit-transform 0.6s ease;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block}.carousel-item-next,.carousel-item-prev{position:absolute;top:0}.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translateX(0);transform:translateX(0)}@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}}.carousel-item-next,.active.carousel-item-right{-webkit-transform:translateX(100%);transform:translateX(100%)}@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)){.carousel-item-next,.active.carousel-item-right{-webkit-transform:translate3d(100%, 0, 0);transform:translate3d(100%, 0, 0)}}.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translateX(-100%);transform:translateX(-100%)}@supports ((-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d)){.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translate3d(-100%, 0, 0);transform:translate3d(-100%, 0, 0)}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:20px;height:20px;background:transparent no-repeat center center;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:10px;left:0;z-index:15;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;background-color:rgba(255,255,255,0.5)}.carousel-indicators li::before{position:absolute;top:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators li::after{position:absolute;bottom:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.bg-primary{background-color:#007bff !important}a.bg-primary:hover,a.bg-primary:focus,button.bg-primary:hover,button.bg-primary:focus{background-color:#0062cc !important}.bg-secondary{background-color:#6c757d !important}a.bg-secondary:hover,a.bg-secondary:focus,button.bg-secondary:hover,button.bg-secondary:focus{background-color:#545b62 !important}.bg-success{background-color:#28a745 !important}a.bg-success:hover,a.bg-success:focus,button.bg-success:hover,button.bg-success:focus{background-color:#1e7e34 !important}.bg-info{background-color:#17a2b8 !important}a.bg-info:hover,a.bg-info:focus,button.bg-info:hover,button.bg-info:focus{background-color:#117a8b !important}.bg-warning{background-color:#ffc107 !important}a.bg-warning:hover,a.bg-warning:focus,button.bg-warning:hover,button.bg-warning:focus{background-color:#d39e00 !important}.bg-danger{background-color:#dc3545 !important}a.bg-danger:hover,a.bg-danger:focus,button.bg-danger:hover,button.bg-danger:focus{background-color:#bd2130 !important}.bg-light{background-color:#f8f9fa !important}a.bg-light:hover,a.bg-light:focus,button.bg-light:hover,button.bg-light:focus{background-color:#dae0e5 !important}.bg-dark{background-color:#343a40 !important}a.bg-dark:hover,a.bg-dark:focus,button.bg-dark:hover,button.bg-dark:focus{background-color:#1d2124 !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:transparent !important}.border{border:1px solid #dee2e6 !important}.border-top{border-top:1px solid #dee2e6 !important}.border-right{border-right:1px solid #dee2e6 !important}.border-bottom{border-bottom:1px solid #dee2e6 !important}.border-left{border-left:1px solid #dee2e6 !important}.border-0{border:0 !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.border-primary{border-color:#007bff !important}.border-secondary{border-color:#6c757d !important}.border-success{border-color:#28a745 !important}.border-info{border-color:#17a2b8 !important}.border-warning{border-color:#ffc107 !important}.border-danger{border-color:#dc3545 !important}.border-light{border-color:#f8f9fa !important}.border-dark{border-color:#343a40 !important}.border-white{border-color:#fff !important}.rounded{border-radius:.25rem !important}.rounded-top{border-top-left-radius:.25rem !important;border-top-right-radius:.25rem !important}.rounded-right{border-top-right-radius:.25rem !important;border-bottom-right-radius:.25rem !important}.rounded-bottom{border-bottom-right-radius:.25rem !important;border-bottom-left-radius:.25rem !important}.rounded-left{border-top-left-radius:.25rem !important;border-bottom-left-radius:.25rem !important}.rounded-circle{border-radius:50% !important}.rounded-0{border-radius:0 !important}.clearfix::after{display:block;clear:both;content:""}.d-none{display:none !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}@media (min-width: 576px){.d-sm-none{display:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-sm-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 768px){.d-md-none{display:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-md-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 992px){.d-lg-none{display:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-lg-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 1200px){.d-xl-none{display:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-xl-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media print{.d-print-none{display:none !important}.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-print-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.8571428571%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.flex-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.justify-content-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}@media (min-width: 576px){.flex-sm-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-sm-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-sm-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-sm-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-sm-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-sm-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.justify-content-sm-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-sm-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-sm-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-sm-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-sm-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-sm-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-sm-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-sm-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-sm-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-sm-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-sm-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-sm-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-sm-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-sm-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-sm-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-sm-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-sm-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-sm-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-sm-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-sm-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-sm-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-sm-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 768px){.flex-md-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-md-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-md-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-md-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-md-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-md-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.justify-content-md-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-md-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-md-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-md-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-md-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-md-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-md-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-md-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-md-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-md-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-md-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-md-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-md-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-md-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-md-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-md-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-md-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-md-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-md-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-md-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-md-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-md-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 992px){.flex-lg-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-lg-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-lg-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-lg-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-lg-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-lg-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.justify-content-lg-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-lg-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-lg-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-lg-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-lg-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-lg-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-lg-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-lg-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-lg-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-lg-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-lg-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-lg-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-lg-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-lg-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-lg-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-lg-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-lg-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-lg-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-lg-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-lg-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-lg-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-lg-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 1200px){.flex-xl-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-xl-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-xl-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-xl-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-xl-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-xl-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.justify-content-xl-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-xl-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-xl-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-xl-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-xl-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-xl-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-xl-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-xl-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-xl-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-xl-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-xl-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-xl-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-xl-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-xl-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-xl-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-xl-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-xl-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-xl-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-xl-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-xl-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-xl-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-xl-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}.float-left{float:left !important}.float-right{float:right !important}.float-none{float:none !important}@media (min-width: 576px){.float-sm-left{float:left !important}.float-sm-right{float:right !important}.float-sm-none{float:none !important}}@media (min-width: 768px){.float-md-left{float:left !important}.float-md-right{float:right !important}.float-md-none{float:none !important}}@media (min-width: 992px){.float-lg-left{float:left !important}.float-lg-right{float:right !important}.float-lg-none{float:none !important}}@media (min-width: 1200px){.float-xl-left{float:left !important}.float-xl-right{float:right !important}.float-xl-none{float:none !important}}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:-webkit-sticky !important;position:sticky !important}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}@supports ((position: -webkit-sticky) or (position: sticky)){.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);white-space:nowrap;-webkit-clip-path:inset(50%);clip-path:inset(50%);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal;-webkit-clip-path:none;clip-path:none}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.mw-100{max-width:100% !important}.mh-100{max-height:100% !important}.m-0{margin:0 !important}.mt-0,.my-0{margin-top:0 !important}.mr-0,.mx-0{margin-right:0 !important}.mb-0,.my-0{margin-bottom:0 !important}.ml-0,.mx-0{margin-left:0 !important}.m-1{margin:.25rem !important}.mt-1,.my-1{margin-top:.25rem !important}.mr-1,.mx-1{margin-right:.25rem !important}.mb-1,.my-1{margin-bottom:.25rem !important}.ml-1,.mx-1{margin-left:.25rem !important}.m-2{margin:.5rem !important}.mt-2,.my-2{margin-top:.5rem !important}.mr-2,.mx-2{margin-right:.5rem !important}.mb-2,.my-2{margin-bottom:.5rem !important}.ml-2,.mx-2{margin-left:.5rem !important}.m-3{margin:1rem !important}.mt-3,.my-3{margin-top:1rem !important}.mr-3,.mx-3{margin-right:1rem !important}.mb-3,.my-3{margin-bottom:1rem !important}.ml-3,.mx-3{margin-left:1rem !important}.m-4{margin:1.5rem !important}.mt-4,.my-4{margin-top:1.5rem !important}.mr-4,.mx-4{margin-right:1.5rem !important}.mb-4,.my-4{margin-bottom:1.5rem !important}.ml-4,.mx-4{margin-left:1.5rem !important}.m-5{margin:3rem !important}.mt-5,.my-5{margin-top:3rem !important}.mr-5,.mx-5{margin-right:3rem !important}.mb-5,.my-5{margin-bottom:3rem !important}.ml-5,.mx-5{margin-left:3rem !important}.p-0{padding:0 !important}.pt-0,.py-0{padding-top:0 !important}.pr-0,.px-0{padding-right:0 !important}.pb-0,.py-0{padding-bottom:0 !important}.pl-0,.px-0{padding-left:0 !important}.p-1{padding:.25rem !important}.pt-1,.py-1{padding-top:.25rem !important}.pr-1,.px-1{padding-right:.25rem !important}.pb-1,.py-1{padding-bottom:.25rem !important}.pl-1,.px-1{padding-left:.25rem !important}.p-2{padding:.5rem !important}.pt-2,.py-2{padding-top:.5rem !important}.pr-2,.px-2{padding-right:.5rem !important}.pb-2,.py-2{padding-bottom:.5rem !important}.pl-2,.px-2{padding-left:.5rem !important}.p-3{padding:1rem !important}.pt-3,.py-3{padding-top:1rem !important}.pr-3,.px-3{padding-right:1rem !important}.pb-3,.py-3{padding-bottom:1rem !important}.pl-3,.px-3{padding-left:1rem !important}.p-4{padding:1.5rem !important}.pt-4,.py-4{padding-top:1.5rem !important}.pr-4,.px-4{padding-right:1.5rem !important}.pb-4,.py-4{padding-bottom:1.5rem !important}.pl-4,.px-4{padding-left:1.5rem !important}.p-5{padding:3rem !important}.pt-5,.py-5{padding-top:3rem !important}.pr-5,.px-5{padding-right:3rem !important}.pb-5,.py-5{padding-bottom:3rem !important}.pl-5,.px-5{padding-left:3rem !important}.m-auto{margin:auto !important}.mt-auto,.my-auto{margin-top:auto !important}.mr-auto,.mx-auto{margin-right:auto !important}.mb-auto,.my-auto{margin-bottom:auto !important}.ml-auto,.mx-auto{margin-left:auto !important}@media (min-width: 576px){.m-sm-0{margin:0 !important}.mt-sm-0,.my-sm-0{margin-top:0 !important}.mr-sm-0,.mx-sm-0{margin-right:0 !important}.mb-sm-0,.my-sm-0{margin-bottom:0 !important}.ml-sm-0,.mx-sm-0{margin-left:0 !important}.m-sm-1{margin:.25rem !important}.mt-sm-1,.my-sm-1{margin-top:.25rem !important}.mr-sm-1,.mx-sm-1{margin-right:.25rem !important}.mb-sm-1,.my-sm-1{margin-bottom:.25rem !important}.ml-sm-1,.mx-sm-1{margin-left:.25rem !important}.m-sm-2{margin:.5rem !important}.mt-sm-2,.my-sm-2{margin-top:.5rem !important}.mr-sm-2,.mx-sm-2{margin-right:.5rem !important}.mb-sm-2,.my-sm-2{margin-bottom:.5rem !important}.ml-sm-2,.mx-sm-2{margin-left:.5rem !important}.m-sm-3{margin:1rem !important}.mt-sm-3,.my-sm-3{margin-top:1rem !important}.mr-sm-3,.mx-sm-3{margin-right:1rem !important}.mb-sm-3,.my-sm-3{margin-bottom:1rem !important}.ml-sm-3,.mx-sm-3{margin-left:1rem !important}.m-sm-4{margin:1.5rem !important}.mt-sm-4,.my-sm-4{margin-top:1.5rem !important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem !important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem !important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem !important}.m-sm-5{margin:3rem !important}.mt-sm-5,.my-sm-5{margin-top:3rem !important}.mr-sm-5,.mx-sm-5{margin-right:3rem !important}.mb-sm-5,.my-sm-5{margin-bottom:3rem !important}.ml-sm-5,.mx-sm-5{margin-left:3rem !important}.p-sm-0{padding:0 !important}.pt-sm-0,.py-sm-0{padding-top:0 !important}.pr-sm-0,.px-sm-0{padding-right:0 !important}.pb-sm-0,.py-sm-0{padding-bottom:0 !important}.pl-sm-0,.px-sm-0{padding-left:0 !important}.p-sm-1{padding:.25rem !important}.pt-sm-1,.py-sm-1{padding-top:.25rem !important}.pr-sm-1,.px-sm-1{padding-right:.25rem !important}.pb-sm-1,.py-sm-1{padding-bottom:.25rem !important}.pl-sm-1,.px-sm-1{padding-left:.25rem !important}.p-sm-2{padding:.5rem !important}.pt-sm-2,.py-sm-2{padding-top:.5rem !important}.pr-sm-2,.px-sm-2{padding-right:.5rem !important}.pb-sm-2,.py-sm-2{padding-bottom:.5rem !important}.pl-sm-2,.px-sm-2{padding-left:.5rem !important}.p-sm-3{padding:1rem !important}.pt-sm-3,.py-sm-3{padding-top:1rem !important}.pr-sm-3,.px-sm-3{padding-right:1rem !important}.pb-sm-3,.py-sm-3{padding-bottom:1rem !important}.pl-sm-3,.px-sm-3{padding-left:1rem !important}.p-sm-4{padding:1.5rem !important}.pt-sm-4,.py-sm-4{padding-top:1.5rem !important}.pr-sm-4,.px-sm-4{padding-right:1.5rem !important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem !important}.pl-sm-4,.px-sm-4{padding-left:1.5rem !important}.p-sm-5{padding:3rem !important}.pt-sm-5,.py-sm-5{padding-top:3rem !important}.pr-sm-5,.px-sm-5{padding-right:3rem !important}.pb-sm-5,.py-sm-5{padding-bottom:3rem !important}.pl-sm-5,.px-sm-5{padding-left:3rem !important}.m-sm-auto{margin:auto !important}.mt-sm-auto,.my-sm-auto{margin-top:auto !important}.mr-sm-auto,.mx-sm-auto{margin-right:auto !important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto !important}.ml-sm-auto,.mx-sm-auto{margin-left:auto !important}}@media (min-width: 768px){.m-md-0{margin:0 !important}.mt-md-0,.my-md-0{margin-top:0 !important}.mr-md-0,.mx-md-0{margin-right:0 !important}.mb-md-0,.my-md-0{margin-bottom:0 !important}.ml-md-0,.mx-md-0{margin-left:0 !important}.m-md-1{margin:.25rem !important}.mt-md-1,.my-md-1{margin-top:.25rem !important}.mr-md-1,.mx-md-1{margin-right:.25rem !important}.mb-md-1,.my-md-1{margin-bottom:.25rem !important}.ml-md-1,.mx-md-1{margin-left:.25rem !important}.m-md-2{margin:.5rem !important}.mt-md-2,.my-md-2{margin-top:.5rem !important}.mr-md-2,.mx-md-2{margin-right:.5rem !important}.mb-md-2,.my-md-2{margin-bottom:.5rem !important}.ml-md-2,.mx-md-2{margin-left:.5rem !important}.m-md-3{margin:1rem !important}.mt-md-3,.my-md-3{margin-top:1rem !important}.mr-md-3,.mx-md-3{margin-right:1rem !important}.mb-md-3,.my-md-3{margin-bottom:1rem !important}.ml-md-3,.mx-md-3{margin-left:1rem !important}.m-md-4{margin:1.5rem !important}.mt-md-4,.my-md-4{margin-top:1.5rem !important}.mr-md-4,.mx-md-4{margin-right:1.5rem !important}.mb-md-4,.my-md-4{margin-bottom:1.5rem !important}.ml-md-4,.mx-md-4{margin-left:1.5rem !important}.m-md-5{margin:3rem !important}.mt-md-5,.my-md-5{margin-top:3rem !important}.mr-md-5,.mx-md-5{margin-right:3rem !important}.mb-md-5,.my-md-5{margin-bottom:3rem !important}.ml-md-5,.mx-md-5{margin-left:3rem !important}.p-md-0{padding:0 !important}.pt-md-0,.py-md-0{padding-top:0 !important}.pr-md-0,.px-md-0{padding-right:0 !important}.pb-md-0,.py-md-0{padding-bottom:0 !important}.pl-md-0,.px-md-0{padding-left:0 !important}.p-md-1{padding:.25rem !important}.pt-md-1,.py-md-1{padding-top:.25rem !important}.pr-md-1,.px-md-1{padding-right:.25rem !important}.pb-md-1,.py-md-1{padding-bottom:.25rem !important}.pl-md-1,.px-md-1{padding-left:.25rem !important}.p-md-2{padding:.5rem !important}.pt-md-2,.py-md-2{padding-top:.5rem !important}.pr-md-2,.px-md-2{padding-right:.5rem !important}.pb-md-2,.py-md-2{padding-bottom:.5rem !important}.pl-md-2,.px-md-2{padding-left:.5rem !important}.p-md-3{padding:1rem !important}.pt-md-3,.py-md-3{padding-top:1rem !important}.pr-md-3,.px-md-3{padding-right:1rem !important}.pb-md-3,.py-md-3{padding-bottom:1rem !important}.pl-md-3,.px-md-3{padding-left:1rem !important}.p-md-4{padding:1.5rem !important}.pt-md-4,.py-md-4{padding-top:1.5rem !important}.pr-md-4,.px-md-4{padding-right:1.5rem !important}.pb-md-4,.py-md-4{padding-bottom:1.5rem !important}.pl-md-4,.px-md-4{padding-left:1.5rem !important}.p-md-5{padding:3rem !important}.pt-md-5,.py-md-5{padding-top:3rem !important}.pr-md-5,.px-md-5{padding-right:3rem !important}.pb-md-5,.py-md-5{padding-bottom:3rem !important}.pl-md-5,.px-md-5{padding-left:3rem !important}.m-md-auto{margin:auto !important}.mt-md-auto,.my-md-auto{margin-top:auto !important}.mr-md-auto,.mx-md-auto{margin-right:auto !important}.mb-md-auto,.my-md-auto{margin-bottom:auto !important}.ml-md-auto,.mx-md-auto{margin-left:auto !important}}@media (min-width: 992px){.m-lg-0{margin:0 !important}.mt-lg-0,.my-lg-0{margin-top:0 !important}.mr-lg-0,.mx-lg-0{margin-right:0 !important}.mb-lg-0,.my-lg-0{margin-bottom:0 !important}.ml-lg-0,.mx-lg-0{margin-left:0 !important}.m-lg-1{margin:.25rem !important}.mt-lg-1,.my-lg-1{margin-top:.25rem !important}.mr-lg-1,.mx-lg-1{margin-right:.25rem !important}.mb-lg-1,.my-lg-1{margin-bottom:.25rem !important}.ml-lg-1,.mx-lg-1{margin-left:.25rem !important}.m-lg-2{margin:.5rem !important}.mt-lg-2,.my-lg-2{margin-top:.5rem !important}.mr-lg-2,.mx-lg-2{margin-right:.5rem !important}.mb-lg-2,.my-lg-2{margin-bottom:.5rem !important}.ml-lg-2,.mx-lg-2{margin-left:.5rem !important}.m-lg-3{margin:1rem !important}.mt-lg-3,.my-lg-3{margin-top:1rem !important}.mr-lg-3,.mx-lg-3{margin-right:1rem !important}.mb-lg-3,.my-lg-3{margin-bottom:1rem !important}.ml-lg-3,.mx-lg-3{margin-left:1rem !important}.m-lg-4{margin:1.5rem !important}.mt-lg-4,.my-lg-4{margin-top:1.5rem !important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem !important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem !important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem !important}.m-lg-5{margin:3rem !important}.mt-lg-5,.my-lg-5{margin-top:3rem !important}.mr-lg-5,.mx-lg-5{margin-right:3rem !important}.mb-lg-5,.my-lg-5{margin-bottom:3rem !important}.ml-lg-5,.mx-lg-5{margin-left:3rem !important}.p-lg-0{padding:0 !important}.pt-lg-0,.py-lg-0{padding-top:0 !important}.pr-lg-0,.px-lg-0{padding-right:0 !important}.pb-lg-0,.py-lg-0{padding-bottom:0 !important}.pl-lg-0,.px-lg-0{padding-left:0 !important}.p-lg-1{padding:.25rem !important}.pt-lg-1,.py-lg-1{padding-top:.25rem !important}.pr-lg-1,.px-lg-1{padding-right:.25rem !important}.pb-lg-1,.py-lg-1{padding-bottom:.25rem !important}.pl-lg-1,.px-lg-1{padding-left:.25rem !important}.p-lg-2{padding:.5rem !important}.pt-lg-2,.py-lg-2{padding-top:.5rem !important}.pr-lg-2,.px-lg-2{padding-right:.5rem !important}.pb-lg-2,.py-lg-2{padding-bottom:.5rem !important}.pl-lg-2,.px-lg-2{padding-left:.5rem !important}.p-lg-3{padding:1rem !important}.pt-lg-3,.py-lg-3{padding-top:1rem !important}.pr-lg-3,.px-lg-3{padding-right:1rem !important}.pb-lg-3,.py-lg-3{padding-bottom:1rem !important}.pl-lg-3,.px-lg-3{padding-left:1rem !important}.p-lg-4{padding:1.5rem !important}.pt-lg-4,.py-lg-4{padding-top:1.5rem !important}.pr-lg-4,.px-lg-4{padding-right:1.5rem !important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem !important}.pl-lg-4,.px-lg-4{padding-left:1.5rem !important}.p-lg-5{padding:3rem !important}.pt-lg-5,.py-lg-5{padding-top:3rem !important}.pr-lg-5,.px-lg-5{padding-right:3rem !important}.pb-lg-5,.py-lg-5{padding-bottom:3rem !important}.pl-lg-5,.px-lg-5{padding-left:3rem !important}.m-lg-auto{margin:auto !important}.mt-lg-auto,.my-lg-auto{margin-top:auto !important}.mr-lg-auto,.mx-lg-auto{margin-right:auto !important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto !important}.ml-lg-auto,.mx-lg-auto{margin-left:auto !important}}@media (min-width: 1200px){.m-xl-0{margin:0 !important}.mt-xl-0,.my-xl-0{margin-top:0 !important}.mr-xl-0,.mx-xl-0{margin-right:0 !important}.mb-xl-0,.my-xl-0{margin-bottom:0 !important}.ml-xl-0,.mx-xl-0{margin-left:0 !important}.m-xl-1{margin:.25rem !important}.mt-xl-1,.my-xl-1{margin-top:.25rem !important}.mr-xl-1,.mx-xl-1{margin-right:.25rem !important}.mb-xl-1,.my-xl-1{margin-bottom:.25rem !important}.ml-xl-1,.mx-xl-1{margin-left:.25rem !important}.m-xl-2{margin:.5rem !important}.mt-xl-2,.my-xl-2{margin-top:.5rem !important}.mr-xl-2,.mx-xl-2{margin-right:.5rem !important}.mb-xl-2,.my-xl-2{margin-bottom:.5rem !important}.ml-xl-2,.mx-xl-2{margin-left:.5rem !important}.m-xl-3{margin:1rem !important}.mt-xl-3,.my-xl-3{margin-top:1rem !important}.mr-xl-3,.mx-xl-3{margin-right:1rem !important}.mb-xl-3,.my-xl-3{margin-bottom:1rem !important}.ml-xl-3,.mx-xl-3{margin-left:1rem !important}.m-xl-4{margin:1.5rem !important}.mt-xl-4,.my-xl-4{margin-top:1.5rem !important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem !important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem !important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem !important}.m-xl-5{margin:3rem !important}.mt-xl-5,.my-xl-5{margin-top:3rem !important}.mr-xl-5,.mx-xl-5{margin-right:3rem !important}.mb-xl-5,.my-xl-5{margin-bottom:3rem !important}.ml-xl-5,.mx-xl-5{margin-left:3rem !important}.p-xl-0{padding:0 !important}.pt-xl-0,.py-xl-0{padding-top:0 !important}.pr-xl-0,.px-xl-0{padding-right:0 !important}.pb-xl-0,.py-xl-0{padding-bottom:0 !important}.pl-xl-0,.px-xl-0{padding-left:0 !important}.p-xl-1{padding:.25rem !important}.pt-xl-1,.py-xl-1{padding-top:.25rem !important}.pr-xl-1,.px-xl-1{padding-right:.25rem !important}.pb-xl-1,.py-xl-1{padding-bottom:.25rem !important}.pl-xl-1,.px-xl-1{padding-left:.25rem !important}.p-xl-2{padding:.5rem !important}.pt-xl-2,.py-xl-2{padding-top:.5rem !important}.pr-xl-2,.px-xl-2{padding-right:.5rem !important}.pb-xl-2,.py-xl-2{padding-bottom:.5rem !important}.pl-xl-2,.px-xl-2{padding-left:.5rem !important}.p-xl-3{padding:1rem !important}.pt-xl-3,.py-xl-3{padding-top:1rem !important}.pr-xl-3,.px-xl-3{padding-right:1rem !important}.pb-xl-3,.py-xl-3{padding-bottom:1rem !important}.pl-xl-3,.px-xl-3{padding-left:1rem !important}.p-xl-4{padding:1.5rem !important}.pt-xl-4,.py-xl-4{padding-top:1.5rem !important}.pr-xl-4,.px-xl-4{padding-right:1.5rem !important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem !important}.pl-xl-4,.px-xl-4{padding-left:1.5rem !important}.p-xl-5{padding:3rem !important}.pt-xl-5,.py-xl-5{padding-top:3rem !important}.pr-xl-5,.px-xl-5{padding-right:3rem !important}.pb-xl-5,.py-xl-5{padding-bottom:3rem !important}.pl-xl-5,.px-xl-5{padding-left:3rem !important}.m-xl-auto{margin:auto !important}.mt-xl-auto,.my-xl-auto{margin-top:auto !important}.mr-xl-auto,.mx-xl-auto{margin-right:auto !important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto !important}.ml-xl-auto,.mx-xl-auto{margin-left:auto !important}}.text-justify{text-align:justify !important}.text-nowrap{white-space:nowrap !important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left !important}.text-right{text-align:right !important}.text-center{text-align:center !important}@media (min-width: 576px){.text-sm-left{text-align:left !important}.text-sm-right{text-align:right !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.text-md-left{text-align:left !important}.text-md-right{text-align:right !important}.text-md-center{text-align:center !important}}@media (min-width: 992px){.text-lg-left{text-align:left !important}.text-lg-right{text-align:right !important}.text-lg-center{text-align:center !important}}@media (min-width: 1200px){.text-xl-left{text-align:left !important}.text-xl-right{text-align:right !important}.text-xl-center{text-align:center !important}}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.font-weight-light{font-weight:300 !important}.font-weight-normal{font-weight:400 !important}.font-weight-bold{font-weight:700 !important}.font-italic{font-style:italic !important}.text-white{color:#fff !important}.text-primary{color:#007bff !important}a.text-primary:hover,a.text-primary:focus{color:#0062cc !important}.text-secondary{color:#6c757d !important}a.text-secondary:hover,a.text-secondary:focus{color:#545b62 !important}.text-success{color:#28a745 !important}a.text-success:hover,a.text-success:focus{color:#1e7e34 !important}.text-info{color:#17a2b8 !important}a.text-info:hover,a.text-info:focus{color:#117a8b !important}.text-warning{color:#ffc107 !important}a.text-warning:hover,a.text-warning:focus{color:#d39e00 !important}.text-danger{color:#dc3545 !important}a.text-danger:hover,a.text-danger:focus{color:#bd2130 !important}.text-light{color:#f8f9fa !important}a.text-light:hover,a.text-light:focus{color:#dae0e5 !important}.text-dark{color:#343a40 !important}a.text-dark:hover,a.text-dark:focus{color:#1d2124 !important}.text-muted{color:#6c757d !important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.visible{visibility:visible !important}.invisible{visibility:hidden !important}@media print{*,*::before,*::after{text-shadow:none !important;-webkit-box-shadow:none !important;box-shadow:none !important}a:not(.btn){text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap !important}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body{min-width:992px !important}.container{min-width:992px !important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse !important}.table td,.table th{background-color:#fff !important}.table-bordered th,.table-bordered td{border:1px solid #ddd !important}}.highlight table td{padding:5px}.highlight table pre{margin:0}.highlight .cm{color:#999988;font-style:italic}.highlight .cp{color:#999999;font-weight:bold}.highlight .c1{color:#999988;font-style:italic}.highlight .cs{color:#999999;font-weight:bold;font-style:italic}.highlight .c,.highlight .cd{color:#999988;font-style:italic}.highlight .err{color:#a61717;background-color:#e3d2d2}.highlight .gd{color:#000000;background-color:#ffdddd}.highlight .ge{color:#000000;font-style:italic}.highlight .gr{color:#aa0000}.highlight .gh{color:#999999}.highlight .gi{color:#000000;background-color:#ddffdd}.highlight .go{color:#888888}.highlight .gp{color:#555555}.highlight .gs{font-weight:bold}.highlight .gu{color:#aaaaaa}.highlight .gt{color:#aa0000}.highlight .kc{color:#000000;font-weight:bold}.highlight .kd{color:#000000;font-weight:bold}.highlight .kn{color:#000000;font-weight:bold}.highlight .kp{color:#000000;font-weight:bold}.highlight .kr{color:#000000;font-weight:bold}.highlight .kt{color:#445588;font-weight:bold}.highlight .k,.highlight .kv{color:#000000;font-weight:bold}.highlight .mf{color:#009999}.highlight .mh{color:#009999}.highlight .il{color:#009999}.highlight .mi{color:#009999}.highlight .mo{color:#009999}.highlight .m,.highlight .mb,.highlight .mx{color:#009999}.highlight .sb{color:#d14}.highlight .sc{color:#d14}.highlight .sd{color:#d14}.highlight .s2{color:#d14}.highlight .se{color:#d14}.highlight .sh{color:#d14}.highlight .si{color:#d14}.highlight .sx{color:#d14}.highlight .sr{color:#009926}.highlight .s1{color:#d14}.highlight .ss{color:#990073}.highlight .s{color:#d14}.highlight .na{color:#008080}.highlight .bp{color:#999999}.highlight .nb{color:#0086B3}.highlight .nc{color:#445588;font-weight:bold}.highlight .no{color:#008080}.highlight .nd{color:#3c5d5d;font-weight:bold}.highlight .ni{color:#800080}.highlight .ne{color:#990000;font-weight:bold}.highlight .nf{color:#990000;font-weight:bold}.highlight .nl{color:#990000;font-weight:bold}.highlight .nn{color:#555555}.highlight .nt{color:#000080}.highlight .vc{color:#008080}.highlight .vg{color:#008080}.highlight .vi{color:#008080}.highlight .nv{color:#008080}.highlight .ow{color:#000000;font-weight:bold}.highlight .o{color:#000000;font-weight:bold}.highlight .w{color:#bbbbbb}.highlight{background-color:#f8f8f8}@font-face{font-family:FreightSans;font-weight:700;font-style:normal;src:url("../fonts/FreightSans/freight-sans-bold.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-bold.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:700;font-style:italic;src:url("../fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-bold-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:500;font-style:normal;src:url("../fonts/FreightSans/freight-sans-medium.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-medium.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:500;font-style:italic;src:url("../fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-medium-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:100;font-style:normal;src:url("../fonts/FreightSans/freight-sans-light.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-light.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:100;font-style:italic;src:url("../fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-light-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:400;font-style:italic;src:url("../fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-book-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:400;font-style:normal;src:url("../fonts/FreightSans/freight-sans-book.woff2") format("woff2"),url("../fonts/FreightSans/freight-sans-book.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:600;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-SemiBold"),url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") format("woff2"),url("../fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:500;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Medium"),url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") format("woff2"),url("../fonts/IBMPlexMono/IBMPlexMono-Medium.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:400;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Regular"),url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") format("woff2"),url("../fonts/IBMPlexMono/IBMPlexMono-Regular.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:300;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Light"),url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff2") format("woff2"),url("../fonts/IBMPlexMono/IBMPlexMono-Light.woff") format("woff")}html{position:relative;min-height:100%;font-size:12px}@media screen and (min-width: 768px){html{font-size:16px}}*{-webkit-box-sizing:border-box;box-sizing:border-box}body{font-family:FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif}a:link,a:visited,a:hover{text-decoration:none;color:#e44c2c}a.with-right-arrow,.btn.with-right-arrow{padding-right:1.375rem;position:relative;background-image:url("../images/chevron-right-orange.svg");background-size:6px 13px;background-position:center right 5px;background-repeat:no-repeat}@media screen and (min-width: 768px){a.with-right-arrow,.btn.with-right-arrow{background-size:8px 14px;background-position:center right 12px;padding-right:2rem}}::-webkit-input-placeholder{color:#e44c2c}::-moz-placeholder{color:#e44c2c}:-ms-input-placeholder{color:#e44c2c}:-moz-placeholder{color:#e44c2c}.email-subscribe-form input.email{color:#e44c2c;border:none;border-bottom:1px solid #939393;width:100%;background-color:transparent;outline:none;font-size:1.125rem;letter-spacing:0.25px;line-height:2.25rem}.email-subscribe-form input[type="submit"]{position:absolute;right:0;top:10px;height:15px;width:15px;background-image:url("../images/arrow-right-with-tail.svg");background-color:transparent;background-repeat:no-repeat;background-size:15px 15px;background-position:center center;-webkit-appearance:none;-moz-appearance:none;appearance:none;border:0}.email-subscribe-form-fields-wrapper{position:relative}.anchorjs-link{color:#6c6c6d !important}@media screen and (min-width: 768px){.anchorjs-link:hover{color:inherit;text-decoration:none !important}}.pytorch-article #table-of-contents{display:none}code,kbd,pre,samp{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}code span,kbd span,pre span,samp span{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}pre{padding:1.125rem;background-color:#f3f4f7}pre code{font-size:.875rem}pre.highlight{background-color:#f3f4f7;line-height:1.3125rem}code.highlighter-rouge{color:#6c6c6d;background-color:#f3f4f7;padding:2px 6px}a:link code.highlighter-rouge,a:visited code.highlighter-rouge,a:hover code.highlighter-rouge{color:#4974D1}a:link.has-code,a:visited.has-code,a:hover.has-code{color:#4974D1}p code,h1 code,h2 code,h3 code,h4 code,h5 code,h6 code{font-size:78.5%}pre{white-space:pre-wrap;white-space:-moz-pre-wrap;white-space:-pre-wrap;white-space:-o-pre-wrap;word-wrap:break-word}.header-holder{height:68px;-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;left:0;margin-left:auto;margin-right:auto;position:fixed;right:0;top:0;width:100%;z-index:9999;background-color:#fff;border-bottom:1px solid #e2e2e2}@media screen and (min-width: 1100px){.header-holder{height:90px}}.header-container{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.header-container:before,.header-container:after{content:"";display:table}.header-container:after{clear:both}.header-container{*zoom:1}@media screen and (min-width: 1100px){.header-container{display:block}}.header-logo{height:23px;width:93px;background-image:url("../images/logo.svg");background-repeat:no-repeat;background-size:93px 23px;display:block;float:left;z-index:10}@media screen and (min-width: 1100px){.header-logo{background-size:108px 27px;position:absolute;height:27px;width:108px;top:4px;float:none}}.main-menu-open-button{background-image:url("../images/icon-menu-dots.svg");background-position:center center;background-size:25px 7px;background-repeat:no-repeat;width:25px;height:17px;position:absolute;right:0;top:4px}@media screen and (min-width: 1100px){.main-menu-open-button{display:none}}.header-holder .main-menu{display:none}@media screen and (min-width: 1100px){.header-holder .main-menu{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end}}.header-holder .main-menu ul{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;margin:0}.header-holder .main-menu ul li{display:inline-block;margin-right:40px;position:relative}.header-holder .main-menu ul li.active:after{content:"•";bottom:-24px;color:#e44c2c;font-size:1.375rem;left:0;position:absolute;right:0;text-align:center}.header-holder .main-menu ul li.active a{color:#e44c2c}.header-holder .main-menu ul li:last-of-type{margin-right:0}.header-holder .main-menu ul li a{color:#fff;font-size:1.125rem;letter-spacing:0;line-height:2.125rem;text-align:center;text-decoration:none}@media screen and (min-width: 1100px){.header-holder .main-menu ul li a:hover{color:#e44c2c}}.mobile-main-menu{display:none}.mobile-main-menu.open{background-color:#262626;display:block;height:100%;left:0;margin-left:auto;margin-right:auto;min-height:100%;position:fixed;right:0;top:0;width:100%;z-index:99999}.mobile-main-menu .container-fluid{-webkit-box-align:center;-ms-flex-align:center;align-items:center;display:-webkit-box;display:-ms-flexbox;display:flex;height:68px;position:relative}.mobile-main-menu .container-fluid:before,.mobile-main-menu .container-fluid:after{content:"";display:table}.mobile-main-menu .container-fluid:after{clear:both}.mobile-main-menu .container-fluid{*zoom:1}.mobile-main-menu.open ul{list-style-type:none;padding:0}.mobile-main-menu.open ul li a{font-size:2rem;color:#fff;letter-spacing:0;line-height:4rem;text-decoration:none}.mobile-main-menu.open ul li.active a{color:#e44c2c}.main-menu-close-button{background-image:url("../images/icon-close.svg");background-position:center center;background-repeat:no-repeat;background-size:24px 24px;height:24px;position:absolute;right:0;width:24px;top:-4px}.mobile-main-menu-header-container{position:relative}.mobile-main-menu-links-container{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding-left:2.8125rem;height:100%;min-height:100%;margin-top:-68px}.site-footer{padding:2.5rem 0;width:100%;background-image:url("../images/home-footer-background.jpg");background-size:100%;margin-left:0;margin-right:0;position:relative;z-index:201}@media screen and (min-width: 768px){.site-footer{padding:5rem 0}}.site-footer p{color:#fff}.site-footer ul{list-style-type:none;padding-left:0;margin-bottom:0}.site-footer ul li{font-size:1.125rem;line-height:2rem;color:#6c6c6d;padding-bottom:.375rem}.site-footer ul li.list-title{padding-bottom:.75rem;color:#fff}.site-footer a:link,.site-footer a:visited{color:inherit}@media screen and (min-width: 768px){.site-footer a:hover{color:#e44c2c}}.docs-tutorials-resources{background-color:#262626;color:#fff;padding-top:2.5rem;padding-bottom:2.5rem;position:relative;z-index:201}@media screen and (min-width: 768px){.docs-tutorials-resources{padding-top:5rem;padding-bottom:5rem}}.docs-tutorials-resources p{color:#929292;font-size:1.125rem}.docs-tutorials-resources h2{font-size:1.5rem;letter-spacing:-0.25px;text-transform:none;margin-bottom:0.25rem}@media screen and (min-width: 768px){.docs-tutorials-resources h2{margin-bottom:1.25rem}}.docs-tutorials-resources .col-md-4{margin-bottom:2rem;text-align:center}@media screen and (min-width: 768px){.docs-tutorials-resources .col-md-4{margin-bottom:0}}.docs-tutorials-resources .with-right-arrow{margin-left:12px}.docs-tutorials-resources .with-right-arrow:hover{background-image:url("../images/chevron-right-white.svg")}.docs-tutorials-resources p{font-size:1rem;line-height:1.5rem;letter-spacing:0.22px;color:#939393;margin-bottom:0}@media screen and (min-width: 768px){.docs-tutorials-resources p{margin-bottom:1.25rem}}.docs-tutorials-resources a{font-size:1.125rem;color:#e44c2c}.docs-tutorials-resources a:hover{color:#fff}.footer-container{position:relative}@media screen and (min-width: 768px){.footer-logo-wrapper{position:absolute;top:0;left:30px}}.footer-logo{background-image:url("../images/logo-icon.svg");background-position:center;background-repeat:no-repeat;background-size:20px 24px;display:block;height:24px;margin-bottom:2.8125rem;width:20px}@media screen and (min-width: 768px){.footer-logo{background-size:29px 36px;height:36px;margin-bottom:0;margin-bottom:0;width:29px}}.footer-links-wrapper{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap}@media screen and (min-width: 768px){.footer-links-wrapper{-ms-flex-wrap:initial;flex-wrap:initial;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end}}.footer-links-col{margin-bottom:3.75rem;width:50%}@media screen and (min-width: 768px){.footer-links-col{margin-bottom:0;width:14%;margin-right:23px}.footer-links-col.follow-us-col{width:18%;margin-right:0}}@media (min-width: 768px) and (max-width: 1239px){.footer-links-col{width:18%;margin-right:30px}}.footer-social-icons{margin:8.5625rem 0 2.5rem 0}.footer-social-icons a{height:32px;width:32px;display:inline-block;background-color:#CCCDD1;border-radius:50%;margin-right:5px}.footer-social-icons a.facebook{background-image:url("../images/logo-facebook-dark.svg");background-position:center center;background-size:9px 18px;background-repeat:no-repeat}.footer-social-icons a.twitter{background-image:url("../images/logo-twitter-dark.svg");background-position:center center;background-size:17px 17px;background-repeat:no-repeat}.site-footer .mc-field-group{margin-top:-2px}article.pytorch-article{max-width:920px;margin:0 auto}article.pytorch-article h2,article.pytorch-article h3,article.pytorch-article h4,article.pytorch-article h5,article.pytorch-article h6{margin:1.375rem 0;color:#262626}article.pytorch-article h2{font-size:1.625rem;letter-spacing:1.33px;line-height:2rem;text-transform:none}article.pytorch-article h3{font-size:1.5rem;letter-spacing:-0.25px;line-height:1.875rem;text-transform:none}article.pytorch-article h4,article.pytorch-article h5,article.pytorch-article h6{font-size:1.125rem;letter-spacing:-0.19px;line-height:1.875rem}article.pytorch-article p{margin-bottom:1.125rem}article.pytorch-article p,article.pytorch-article ul li,article.pytorch-article ol li,article.pytorch-article dl dt,article.pytorch-article dl dd,article.pytorch-article blockquote{font-size:1rem;line-height:1.375rem;color:#262626;letter-spacing:0.01px;font-weight:500}article.pytorch-article table{margin-bottom:2.5rem;width:100%}article.pytorch-article table thead{border-bottom:1px solid #cacaca}article.pytorch-article table th{padding:.625rem;color:#262626}article.pytorch-article table td{padding:.3125rem}article.pytorch-article table tr th:first-of-type,article.pytorch-article table tr td:first-of-type{padding-left:0}article.pytorch-article table.docutils.field-list th.field-name{padding:.3125rem;padding-left:0}article.pytorch-article table.docutils.field-list td.field-body{padding:.3125rem}article.pytorch-article table.docutils.field-list td.field-body p:last-of-type{margin-bottom:0}article.pytorch-article ul,article.pytorch-article ol{margin:1.5rem 0 3.125rem 0}@media screen and (min-width: 768px){article.pytorch-article ul,article.pytorch-article ol{padding-left:6.25rem}}article.pytorch-article ul li,article.pytorch-article ol li{margin-bottom:.625rem}article.pytorch-article dl{margin-bottom:1.5rem}article.pytorch-article dl dt{margin-bottom:.75rem}article.pytorch-article pre{margin-bottom:2.5rem}article.pytorch-article hr{margin-top:4.6875rem;margin-bottom:4.6875rem}article.pytorch-article blockquote{margin:0 auto;margin-bottom:2.5rem;width:65%}article.pytorch-article img{width:100%}html{height:100%}@media screen and (min-width: 768px){html{font-size:16px}}body{background:#fff;height:100%;margin:0}body.no-scroll{height:100%;overflow:hidden}p{margin-top:0;margin-bottom:1.125rem}p a:link,p a:visited,p a:hover{color:#e44c2c;text-decoration:none}@media screen and (min-width: 768px){p a:hover{text-decoration:underline}}p a:link,p a:visited,p a:hover{color:#ee4c2c}.wy-breadcrumbs li a{color:#ee4c2c}ul.pytorch-breadcrumbs{padding-left:0;list-style-type:none}ul.pytorch-breadcrumbs li{display:inline-block;font-size:.875rem}ul.pytorch-breadcrumbs a{color:#ee4c2c;text-decoration:none}.table-of-contents-link-wrapper{display:block;margin-top:0;padding:1.25rem 1.875rem;background-color:#f3f4f7;position:relative;color:#262626;font-size:1.25rem}.table-of-contents-link-wrapper.is-open .toggle-table-of-contents{-webkit-transform:rotate(180deg);transform:rotate(180deg)}@media screen and (min-width: 1100px){.table-of-contents-link-wrapper{display:none}}.toggle-table-of-contents{background-image:url("../images/chevron-down-grey.svg");background-position:center center;background-repeat:no-repeat;background-size:18px 18px;height:100%;position:absolute;right:21px;width:30px;top:0}.tutorials-header .header-logo{background-image:url("../images/logo-dark.svg")}.tutorials-header .main-menu ul li a{color:#262626}.tutorials-header .main-menu-open-button{background-image:url("../images/icon-menu-dots-dark.svg")}.rst-content footer .helpful-hr.hr-top{margin-bottom:-.0625rem}.rst-content footer .helpful-hr.hr-bottom{margin-top:-.0625rem}.rst-content footer .helpful-container{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;font-size:1.125rem}.rst-content footer .helpful-container .helpful-question,.rst-content footer .helpful-container .was-helpful-thank-you{padding:.625rem 1.25rem .625rem 1.25rem}.rst-content footer .helpful-container .was-helpful-thank-you{display:none}.rst-content footer .helpful-container .helpful-question.yes-link,.rst-content footer .helpful-container .helpful-question.no-link{color:#e44c2c;cursor:pointer}.rst-content footer .helpful-container .helpful-question.yes-link:hover,.rst-content footer .helpful-container .helpful-question.no-link:hover{background-color:#e44c2c;color:#fff}.rst-content footer div[role="contentinfo"]{padding-top:2.5rem}.rst-content footer div[role="contentinfo"] p{margin-bottom:0}h1{font-size:2rem;letter-spacing:1.78px;line-height:2.5rem;text-transform:uppercase;margin:1.375rem 0}span.pre{color:#6c6c6d;background-color:#f3f4f7;padding:2px 6px}pre{background-color:#f3f4f7;padding:1.375rem}.highlight .c1{color:#6c6c6d}.headerlink{display:none !important}a:link.has-code,a:hover.has-code,a:visited.has-code{color:#4974D1}a:link.has-code span,a:hover.has-code span,a:visited.has-code span{color:#4974D1}article.pytorch-article ul,article.pytorch-article ol{padding-left:1.875rem;margin:0}article.pytorch-article ul li,article.pytorch-article ol li{margin:0;line-height:1.75rem}article.pytorch-article ul p,article.pytorch-article ol p{line-height:1.75rem;margin-bottom:0}article.pytorch-article ul ul,article.pytorch-article ul ol,article.pytorch-article ol ul,article.pytorch-article ol ol{margin:0}article.pytorch-article h1,article.pytorch-article h2,article.pytorch-article h3,article.pytorch-article h4,article.pytorch-article h5,article.pytorch-article h6{font-weight:normal}article.pytorch-article h1 a,article.pytorch-article h2 a,article.pytorch-article h3 a,article.pytorch-article h4 a,article.pytorch-article h5 a,article.pytorch-article h6 a{color:#262626}article.pytorch-article p.caption{margin-top:1.25rem}article.pytorch-article .section:first-of-type h1:first-of-type{margin-top:0}article.pytorch-article .sphx-glr-thumbcontainer{margin:0;border:1px solid #d6d7d8;border-radius:0;width:45%;text-align:center;margin-bottom:5%}@media screen and (max-width: 1100px){article.pytorch-article .sphx-glr-thumbcontainer:nth-child(odd){margin-left:0;margin-right:2.5%}article.pytorch-article .sphx-glr-thumbcontainer:nth-child(even){margin-right:0;margin-left:2.5%}article.pytorch-article .sphx-glr-thumbcontainer .figure{width:40%}}@media screen and (min-width: 1101px){article.pytorch-article .sphx-glr-thumbcontainer{margin-right:3%;margin-bottom:3%;width:30%}}article.pytorch-article .sphx-glr-thumbcontainer .caption-text a{font-size:1rem;color:#262626;letter-spacing:0;line-height:1.5rem;text-decoration:none}article.pytorch-article .sphx-glr-thumbcontainer:hover{-webkit-box-shadow:none;box-shadow:none;border-bottom-color:#fff}article.pytorch-article .sphx-glr-thumbcontainer:hover .figure:before{bottom:100%}article.pytorch-article .sphx-glr-thumbcontainer .figure{width:80%}article.pytorch-article .sphx-glr-thumbcontainer .figure:before{content:"";display:block;position:absolute;top:0;bottom:35%;left:0;right:0;background:#8A94B3;opacity:0.10}article.pytorch-article .sphx-glr-thumbcontainer .figure a.reference.internal{text-align:left}@media screen and (min-width: 768px){article.pytorch-article .sphx-glr-thumbcontainer:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#e44c2c;-webkit-transition:width .250s ease-in-out;transition:width .250s ease-in-out}article.pytorch-article .sphx-glr-thumbcontainer:hover:after{width:100%}}@media screen and (min-width: 768px){article.pytorch-article .sphx-glr-thumbcontainer:after{background-color:#ee4c2c}}article.pytorch-article .section :not(dt)>code{color:#262626;border-top:solid 2px #f3f4f7;background-color:#f3f4f7;border-bottom:solid 2px #f3f4f7;padding:0px 3px;-webkit-box-decoration-break:clone;box-decoration-break:clone}article.pytorch-article .section :not(dt)>code .pre{outline:0px;padding:0px}article.pytorch-article .function dt,article.pytorch-article .attribute dt,article.pytorch-article .class .attribute dt,article.pytorch-article .class dt{position:relative;background:#f3f4f7;padding:.5rem;border-left:3px solid #ee4c2c;word-wrap:break-word;padding-right:100px}article.pytorch-article .function dt em.property,article.pytorch-article .attribute dt em.property,article.pytorch-article .class dt em.property{font-family:inherit}article.pytorch-article .function dt em,article.pytorch-article .attribute dt em,article.pytorch-article .class .attribute dt em,article.pytorch-article .class dt em,article.pytorch-article .function dt .sig-paren,article.pytorch-article .attribute dt .sig-paren,article.pytorch-article .class dt .sig-paren{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:87.5%}article.pytorch-article .function dt a,article.pytorch-article .attribute dt a,article.pytorch-article .class .attribute dt a,article.pytorch-article .class dt a{position:absolute;right:30px;padding-right:0;top:50%;-webkit-transform:perspective(1px) translateY(-50%);transform:perspective(1px) translateY(-50%)}article.pytorch-article .function dt:hover .viewcode-link,article.pytorch-article .attribute dt:hover .viewcode-link,article.pytorch-article .class dt:hover .viewcode-link{color:#ee4c2c}article.pytorch-article .function .anchorjs-link,article.pytorch-article .attribute .anchorjs-link,article.pytorch-article .class .anchorjs-link{display:inline;position:absolute;right:8px;font-size:1.5625rem !important;padding-left:0}article.pytorch-article .function dt>code,article.pytorch-article .attribute dt>code,article.pytorch-article .class .attribute dt>code,article.pytorch-article .class dt>code{color:#262626;border-top:solid 2px #f3f4f7;background-color:#f3f4f7;border-bottom:solid 2px #f3f4f7;-webkit-box-decoration-break:clone;box-decoration-break:clone}article.pytorch-article .function .viewcode-link,article.pytorch-article .attribute .viewcode-link,article.pytorch-article .class .viewcode-link{font-size:.875rem;color:#979797;letter-spacing:0;line-height:1.5rem;text-transform:uppercase}article.pytorch-article .function dd,article.pytorch-article .attribute dd,article.pytorch-article .class .attribute dd,article.pytorch-article .class dd{padding-left:3.75rem}article.pytorch-article .function dd p,article.pytorch-article .attribute dd p,article.pytorch-article .class .attribute dd p,article.pytorch-article .class dd p{color:#262626}article.pytorch-article .function table tbody tr th.field-name,article.pytorch-article .attribute table tbody tr th.field-name,article.pytorch-article .class table tbody tr th.field-name{white-space:nowrap;color:#262626;width:20%}@media screen and (min-width: 768px){article.pytorch-article .function table tbody tr th.field-name,article.pytorch-article .attribute table tbody tr th.field-name,article.pytorch-article .class table tbody tr th.field-name{width:15%}}article.pytorch-article .function table tbody tr td.field-body,article.pytorch-article .attribute table tbody tr td.field-body,article.pytorch-article .class table tbody tr td.field-body{padding:0.625rem;width:80%;color:#262626}@media screen and (min-width: 768px){article.pytorch-article .function table tbody tr td.field-body,article.pytorch-article .attribute table tbody tr td.field-body,article.pytorch-article .class table tbody tr td.field-body{width:85%}}@media screen and (min-width: 1600px){article.pytorch-article .function table tbody tr td.field-body,article.pytorch-article .attribute table tbody tr td.field-body,article.pytorch-article .class table tbody tr td.field-body{padding-left:1.25rem}}article.pytorch-article .function table tbody tr td.field-body p,article.pytorch-article .attribute table tbody tr td.field-body p,article.pytorch-article .class table tbody tr td.field-body p{padding-left:0px}article.pytorch-article .function table tbody tr td.field-body p:last-of-type,article.pytorch-article .attribute table tbody tr td.field-body p:last-of-type,article.pytorch-article .class table tbody tr td.field-body p:last-of-type{margin-bottom:0}article.pytorch-article .function table tbody tr td.field-body ol,article.pytorch-article .attribute table tbody tr td.field-body ol,article.pytorch-article .class table tbody tr td.field-body ol,article.pytorch-article .function table tbody tr td.field-body ul,article.pytorch-article .attribute table tbody tr td.field-body ul,article.pytorch-article .class table tbody tr td.field-body ul{padding-left:1rem;padding-bottom:0}article.pytorch-article .function table.docutils.field-list,article.pytorch-article .attribute table.docutils.field-list,article.pytorch-article .class table.docutils.field-list{margin-bottom:.75rem}article.pytorch-article .attribute .has-code{float:none}article.pytorch-article .class dt{border-left:none;border-top:3px solid #ee4c2c;padding-left:4em}article.pytorch-article .class dt em.property{position:absolute;left:0.5rem}article.pytorch-article .class dd .docutils dt{padding-left:0.5rem}article.pytorch-article .class em.property{text-transform:uppercase;font-style:normal;color:#ee4c2c;font-size:1rem;letter-spacing:0;padding-right:.75rem}article.pytorch-article .class dl dt em.property{position:static;left:0;padding-right:0}article.pytorch-article .class .method dt,article.pytorch-article .class .staticmethod dt{border-left:3px solid #ee4c2c;border-top:none}article.pytorch-article .class .method dt,article.pytorch-article .class .staticmethod dt{padding-left:0.5rem}article.pytorch-article .class .attribute dt{border-top:none}article.pytorch-article .class .attribute dt em.property{position:relative;left:0}article.pytorch-article table{table-layout:fixed}article.pytorch-article .note,article.pytorch-article .warning,article.pytorch-article .tip,article.pytorch-article .hint,article.pytorch-article .important,article.pytorch-article .caution,article.pytorch-article .danger,article.pytorch-article .attention,article.pytorch-article .error{background:#f3f4f7;margin-top:1.875rem;margin-bottom:1.125rem}article.pytorch-article .note .admonition-title,article.pytorch-article .warning .admonition-title,article.pytorch-article .tip .admonition-title,article.pytorch-article .hint .admonition-title,article.pytorch-article .important .admonition-title,article.pytorch-article .caution .admonition-title,article.pytorch-article .danger .admonition-title,article.pytorch-article .attention .admonition-title,article.pytorch-article .error .admonition-title{color:#fff;letter-spacing:1px;text-transform:uppercase;margin-bottom:1.125rem;padding:3px 0 3px 1.375rem;position:relative;font-size:.875rem}article.pytorch-article .note .admonition-title:before,article.pytorch-article .warning .admonition-title:before,article.pytorch-article .tip .admonition-title:before,article.pytorch-article .hint .admonition-title:before,article.pytorch-article .important .admonition-title:before,article.pytorch-article .caution .admonition-title:before,article.pytorch-article .danger .admonition-title:before,article.pytorch-article .attention .admonition-title:before,article.pytorch-article .error .admonition-title:before{content:"\2022";position:absolute;left:9px;color:#fff;top:2px}article.pytorch-article .note p:nth-child(n+2),article.pytorch-article .warning p:nth-child(n+2),article.pytorch-article .tip p:nth-child(n+2),article.pytorch-article .hint p:nth-child(n+2),article.pytorch-article .important p:nth-child(n+2),article.pytorch-article .caution p:nth-child(n+2),article.pytorch-article .danger p:nth-child(n+2),article.pytorch-article .attention p:nth-child(n+2),article.pytorch-article .error p:nth-child(n+2){padding:0 1.375rem}article.pytorch-article .note table,article.pytorch-article .warning table,article.pytorch-article .tip table,article.pytorch-article .hint table,article.pytorch-article .important table,article.pytorch-article .caution table,article.pytorch-article .danger table,article.pytorch-article .attention table,article.pytorch-article .error table{margin:0 2rem;width:auto}article.pytorch-article .note .pre,article.pytorch-article .note pre,article.pytorch-article .warning .pre,article.pytorch-article .warning pre,article.pytorch-article .tip .pre,article.pytorch-article .tip pre,article.pytorch-article .hint .pre,article.pytorch-article .hint pre,article.pytorch-article .important .pre,article.pytorch-article .important pre,article.pytorch-article .caution .pre,article.pytorch-article .caution pre,article.pytorch-article .danger .pre,article.pytorch-article .danger pre,article.pytorch-article .attention .pre,article.pytorch-article .attention pre,article.pytorch-article .error .pre,article.pytorch-article .error pre{background:#fff;outline:1px solid #e9e9e9}article.pytorch-article .note :not(dt)>code,article.pytorch-article .warning :not(dt)>code,article.pytorch-article .tip :not(dt)>code,article.pytorch-article .hint :not(dt)>code,article.pytorch-article .important :not(dt)>code,article.pytorch-article .caution :not(dt)>code,article.pytorch-article .danger :not(dt)>code,article.pytorch-article .attention :not(dt)>code,article.pytorch-article .error :not(dt)>code{border-top:solid 2px #fff;background-color:#fff;border-bottom:solid 2px #fff;padding:0px 3px;-webkit-box-decoration-break:clone;box-decoration-break:clone;outline:1px solid #e9e9e9}article.pytorch-article .note :not(dt)>code .pre,article.pytorch-article .warning :not(dt)>code .pre,article.pytorch-article .tip :not(dt)>code .pre,article.pytorch-article .hint :not(dt)>code .pre,article.pytorch-article .important :not(dt)>code .pre,article.pytorch-article .caution :not(dt)>code .pre,article.pytorch-article .danger :not(dt)>code .pre,article.pytorch-article .attention :not(dt)>code .pre,article.pytorch-article .error :not(dt)>code .pre{outline:0px;padding:0px}article.pytorch-article .note pre,article.pytorch-article .warning pre,article.pytorch-article .tip pre,article.pytorch-article .hint pre,article.pytorch-article .important pre,article.pytorch-article .caution pre,article.pytorch-article .danger pre,article.pytorch-article .attention pre,article.pytorch-article .error pre{margin-bottom:0}article.pytorch-article .note .highlight,article.pytorch-article .warning .highlight,article.pytorch-article .tip .highlight,article.pytorch-article .hint .highlight,article.pytorch-article .important .highlight,article.pytorch-article .caution .highlight,article.pytorch-article .danger .highlight,article.pytorch-article .attention .highlight,article.pytorch-article .error .highlight{margin:0 2rem 1.125rem 2rem}article.pytorch-article .note ul,article.pytorch-article .note ol,article.pytorch-article .warning ul,article.pytorch-article .warning ol,article.pytorch-article .tip ul,article.pytorch-article .tip ol,article.pytorch-article .hint ul,article.pytorch-article .hint ol,article.pytorch-article .important ul,article.pytorch-article .important ol,article.pytorch-article .caution ul,article.pytorch-article .caution ol,article.pytorch-article .danger ul,article.pytorch-article .danger ol,article.pytorch-article .attention ul,article.pytorch-article .attention ol,article.pytorch-article .error ul,article.pytorch-article .error ol{padding-left:3.25rem}article.pytorch-article .note ul li,article.pytorch-article .note ol li,article.pytorch-article .warning ul li,article.pytorch-article .warning ol li,article.pytorch-article .tip ul li,article.pytorch-article .tip ol li,article.pytorch-article .hint ul li,article.pytorch-article .hint ol li,article.pytorch-article .important ul li,article.pytorch-article .important ol li,article.pytorch-article .caution ul li,article.pytorch-article .caution ol li,article.pytorch-article .danger ul li,article.pytorch-article .danger ol li,article.pytorch-article .attention ul li,article.pytorch-article .attention ol li,article.pytorch-article .error ul li,article.pytorch-article .error ol li{color:#262626}article.pytorch-article .note p,article.pytorch-article .warning p,article.pytorch-article .tip p,article.pytorch-article .hint p,article.pytorch-article .important p,article.pytorch-article .caution p,article.pytorch-article .danger p,article.pytorch-article .attention p,article.pytorch-article .error p{margin-top:1.125rem}article.pytorch-article .note .admonition-title{background:#54c7ec}article.pytorch-article .warning .admonition-title{background:#e94f3b}article.pytorch-article .tip .admonition-title{background:#6bcebb}article.pytorch-article .hint .admonition-title{background:#a2cdde}article.pytorch-article .important .admonition-title{background:#5890ff}article.pytorch-article .caution .admonition-title{background:#f7923a}article.pytorch-article .danger .admonition-title{background:#db2c49}article.pytorch-article .attention .admonition-title{background:#f5a623}article.pytorch-article .error .admonition-title{background:#cc2f90}article.pytorch-article .sphx-glr-download-link-note.admonition.note,article.pytorch-article .reference.download.internal,article.pytorch-article .sphx-glr-signature{display:none}article.pytorch-article .admonition>p:last-of-type{margin-bottom:0;padding-bottom:1.125rem !important}.pytorch-article div.sphx-glr-download a{background-color:#f3f4f7;background-image:url("../images/arrow-down-orange.svg");background-repeat:no-repeat;background-position:left 10px center;background-size:15px 15px;border-radius:0;border:none;display:block;text-align:left;padding:.9375rem 3.125rem;position:relative;margin:1.25rem auto}@media screen and (min-width: 768px){.pytorch-article div.sphx-glr-download a:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#e44c2c;-webkit-transition:width .250s ease-in-out;transition:width .250s ease-in-out}.pytorch-article div.sphx-glr-download a:hover:after{width:100%}}@media screen and (min-width: 768px){.pytorch-article div.sphx-glr-download a:after{background-color:#ee4c2c}}@media screen and (min-width: 768px){.pytorch-article div.sphx-glr-download a{background-position:left 20px center}}.pytorch-article div.sphx-glr-download a:hover{-webkit-box-shadow:none;box-shadow:none;text-decoration:none;background-image:url("../images/arrow-down-orange.svg");background-color:#f3f4f7}.pytorch-article div.sphx-glr-download a span.pre{background-color:transparent;font-size:1.125rem;padding:0;color:#262626}.pytorch-article div.sphx-glr-download a code,.pytorch-article div.sphx-glr-download a kbd,.pytorch-article div.sphx-glr-download a pre,.pytorch-article div.sphx-glr-download a samp,.pytorch-article div.sphx-glr-download a span.pre{font-family:FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif}.pytorch-article p.sphx-glr-script-out{margin-bottom:1.125rem}.pytorch-article div.sphx-glr-script-out{margin-bottom:2.5rem}.pytorch-article div.sphx-glr-script-out .highlight{margin-left:0;margin-top:0}.pytorch-article div.sphx-glr-script-out .highlight pre{background-color:#fdede9;padding:1.5625rem;color:#837b79}.pytorch-article div.sphx-glr-script-out+p{margin-top:unset}article.pytorch-article .wy-table-responsive table{border:none;border-color:#fff !important;table-layout:fixed}article.pytorch-article .wy-table-responsive table thead tr{border-bottom:2px solid #6c6c6d}article.pytorch-article .wy-table-responsive table thead th{line-height:1.75rem;padding-left:.9375rem;padding-right:.9375rem}article.pytorch-article .wy-table-responsive table tbody .row-odd{background-color:#f3f4f7}article.pytorch-article .wy-table-responsive table tbody td{color:#6c6c6d;white-space:normal;padding:.9375rem;font-size:1rem;line-height:1.375rem}article.pytorch-article .wy-table-responsive table tbody td .pre{background:#fff;outline:1px solid #e9e9e9;color:#ee4c2c;font-size:87.5%}article.pytorch-article .wy-table-responsive table tbody td code{font-size:87.5%}a[rel~="prev"],a[rel~="next"]{padding:0.375rem 0 0 0}img.next-page,img.previous-page{width:8px;height:10px;position:relative;top:-1px}img.previous-page{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.rst-footer-buttons{margin-top:1.875rem;margin-bottom:1.875rem}.rst-footer-buttons .btn:focus,.rst-footer-buttons .btn.focus{-webkit-box-shadow:none;box-shadow:none}article.pytorch-article blockquote{margin-left:3.75rem;color:#6c6c6d}article.pytorch-article .caption{color:#6c6c6d;letter-spacing:0.25px;line-height:2.125rem}article.pytorch-article .math{color:#262626;width:auto;text-align:center}article.pytorch-article .math img{width:auto}.pytorch-breadcrumbs-wrapper{width:100%}@media screen and (min-width: 1101px){.pytorch-breadcrumbs-wrapper{float:left;margin-left:3%;width:75%}}@media screen and (min-width: 1600px){.pytorch-breadcrumbs-wrapper{width:850px;margin-left:1.875rem}}.pytorch-breadcrumbs-wrapper .pytorch-breadcrumbs-aside{float:right}.pytorch-article .container{padding-left:0;padding-right:0;max-width:none}a:link,a:visited,a:hover{color:#ee4c2c}::-webkit-input-placeholder{color:#ee4c2c}::-moz-placeholder{color:#ee4c2c}:-ms-input-placeholder{color:#ee4c2c}:-moz-placeholder{color:#ee4c2c}@media screen and (min-width: 768px){.site-footer a:hover{color:#ee4c2c}}.docs-tutorials-resources a{color:#ee4c2c}.header-holder{position:relative;z-index:201}.header-holder .main-menu ul li.active:after{color:#ee4c2c}.header-holder .main-menu ul li.active a{color:#ee4c2c}@media screen and (min-width: 1100px){.header-holder .main-menu ul li a:hover{color:#ee4c2c}}.mobile-main-menu.open ul li.active a{color:#ee4c2c}.version{padding-bottom:1rem}.pytorch-call-to-action-links{padding-top:0;display:-webkit-box;display:-ms-flexbox;display:flex}@media screen and (min-width: 768px){.pytorch-call-to-action-links{padding-top:2.5rem}}@media (min-width: 768px) and (max-width: 1239px){.pytorch-call-to-action-links{padding-top:0}}@media (min-width: 1100px) and (max-width: 1239px){.pytorch-call-to-action-links{padding-top:2.5rem}}.pytorch-call-to-action-links #tutorial-type{display:none}.pytorch-call-to-action-links .call-to-action-img,.pytorch-call-to-action-links .call-to-action-notebook-img{height:1.375rem;width:1.375rem;margin-right:10px}.pytorch-call-to-action-links .call-to-action-notebook-img{height:1rem}.pytorch-call-to-action-links a{padding-right:1.25rem;color:#000;cursor:pointer}.pytorch-call-to-action-links a:hover{color:#e44c2c}.pytorch-call-to-action-links a .call-to-action-desktop-view{display:none}@media screen and (min-width: 768px){.pytorch-call-to-action-links a .call-to-action-desktop-view{display:block}}.pytorch-call-to-action-links a .call-to-action-mobile-view{display:block}@media screen and (min-width: 768px){.pytorch-call-to-action-links a .call-to-action-mobile-view{display:none}}.pytorch-call-to-action-links a #google-colab-link,.pytorch-call-to-action-links a #download-notebook-link,.pytorch-call-to-action-links a #github-view-link{padding-bottom:.625rem;border-bottom:1px solid #f3f4f7;padding-right:2.5rem;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.pytorch-call-to-action-links a #google-colab-link:hover,.pytorch-call-to-action-links a #download-notebook-link:hover,.pytorch-call-to-action-links a #github-view-link:hover{border-bottom-color:#e44c2c;color:#e44c2c}.pytorch-container{margin:0 auto;padding:0 1.875rem;width:auto;position:relative}@media screen and (min-width: 1100px){.pytorch-container{padding:0}}@media screen and (min-width: 1101px){.pytorch-container{margin-left:25%}}@media screen and (min-width: 1600px){.pytorch-container{margin-left:350px}}.pytorch-container:before,.pytorch-container:after{content:"";display:table}.pytorch-container:after{clear:both}.pytorch-container{*zoom:1}.pytorch-content-wrap{background-color:#ffffff;display:-webkit-box;display:-ms-flexbox;display:flex;position:relative;padding-top:0}.pytorch-content-wrap:before,.pytorch-content-wrap:after{content:"";display:table}.pytorch-content-wrap:after{clear:both}.pytorch-content-wrap{*zoom:1}@media screen and (min-width: 1101px){.pytorch-content-wrap{padding-top:45px;float:left;width:100%;display:block}}@media screen and (min-width: 1600px){.pytorch-content-wrap{width:100%}}.pytorch-content{background:#ffffff;width:100%;max-width:700px;position:relative}.pytorch-content-left{margin-top:2.5rem;width:100%}@media screen and (min-width: 1101px){.pytorch-content-left{margin-top:0;margin-left:3%;width:75%;float:left}}@media screen and (min-width: 1600px){.pytorch-content-left{width:850px;margin-left:30px}}.pytorch-content-left .main-content{padding-top:.9375rem}.pytorch-content-left .main-content ul.simple{padding-bottom:1.25rem}.pytorch-content-left .main-content .note:nth-child(1),.pytorch-content-left .main-content .warning:nth-child(1){margin-top:0}.pytorch-content-right{display:none;position:relative;overflow-x:hidden;overflow-y:hidden}@media screen and (min-width: 1101px){.pytorch-content-right{display:block;margin-left:0;width:19%;float:left;height:100%}}@media screen and (min-width: 1600px){.pytorch-content-right{width:280px}}@media screen and (min-width: 1101px){.pytorch-side-scroll{position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}}.pytorch-menu-vertical{padding:1.25rem 1.875rem 2.5rem 1.875rem}@media screen and (min-width: 1101px){.pytorch-menu-vertical{display:block;padding-top:0;padding-right:13.5%;padding-bottom:5.625rem}}@media screen and (min-width: 1600px){.pytorch-menu-vertical{padding-left:0;padding-right:1.5625rem}}.pytorch-left-menu{display:none;background-color:#f3f4f7;color:#262626}@media screen and (min-width: 1101px){.pytorch-left-menu{display:block;overflow-x:hidden;overflow-y:hidden;padding-bottom:110px;padding:0 1.875rem 0 0;width:25%;z-index:200;float:left}.pytorch-left-menu.make-fixed{position:fixed;top:0;bottom:0;left:0;float:none}}@media screen and (min-width: 1600px){.pytorch-left-menu{padding:0 0 0 1.875rem;width:350px}}.pytorch-left-menu p.caption{color:#262626;display:block;display:inline-block;font-size:1rem;line-height:1.375rem;margin-bottom:1rem;padding:0;text-transform:none;white-space:nowrap}.pytorch-left-menu-search{margin-bottom:2.5rem}@media screen and (min-width: 1101px){.pytorch-left-menu-search{margin:1.25rem .625rem 1.875rem 0}}.pytorch-left-menu-search ::-webkit-input-placeholder{color:#262626}.pytorch-left-menu-search :-ms-input-placeholder{color:#262626}.pytorch-left-menu-search ::-ms-input-placeholder{color:#262626}.pytorch-left-menu-search ::placeholder{color:#262626}.pytorch-left-menu-search input[type=text]{border-radius:0;padding:.5rem .75rem;border-color:#fff;color:#262626;border-style:solid;font-size:1rem;width:100%;background-color:#f3f4f7;background-image:url("../images/search-icon.svg");background-repeat:no-repeat;background-size:18px 18px;background-position:12px 10px;padding-left:40px;background-color:#fff}.pytorch-left-menu-search input[type=text]:focus{outline:0}@media screen and (min-width: 1101px){.pytorch-left-menu .pytorch-side-scroll{width:120%}}@media screen and (min-width: 1600px){.pytorch-left-menu .pytorch-side-scroll{width:340px}}.pytorch-right-menu{min-height:100px;overflow-x:hidden;overflow-y:hidden;left:0;z-index:200;padding-top:0;position:relative}@media screen and (min-width: 1101px){.pytorch-right-menu{width:100%}.pytorch-right-menu.scrolling-fixed{position:fixed;top:45px;left:83.5%;width:14%}.pytorch-right-menu.scrolling-absolute{position:absolute;left:0}}@media screen and (min-width: 1600px){.pytorch-right-menu{left:0;width:380px}.pytorch-right-menu.scrolling-fixed{position:fixed;top:45px;left:1230px}.pytorch-right-menu.scrolling-absolute{position:absolute;left:0}}.pytorch-left-menu ul,.pytorch-right-menu ul{list-style-type:none;padding-left:0;margin-bottom:2.5rem}.pytorch-left-menu>ul,.pytorch-right-menu>ul{margin-bottom:2.5rem}.pytorch-left-menu a:link,.pytorch-left-menu a:visited,.pytorch-left-menu a:hover,.pytorch-right-menu a:link,.pytorch-right-menu a:visited,.pytorch-right-menu a:hover{color:#6c6c6d;font-size:.875rem;line-height:1rem;padding:0;text-decoration:none}.pytorch-left-menu a:link.reference.internal,.pytorch-left-menu a:visited.reference.internal,.pytorch-left-menu a:hover.reference.internal,.pytorch-right-menu a:link.reference.internal,.pytorch-right-menu a:visited.reference.internal,.pytorch-right-menu a:hover.reference.internal{margin-bottom:.3125rem;position:relative}.pytorch-left-menu li code,.pytorch-right-menu li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.pytorch-left-menu li span.toctree-expand,.pytorch-right-menu li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:0.8em;line-height:1.6em}.pytorch-left-menu li.on a,.pytorch-left-menu li.current>a,.pytorch-right-menu li.on a,.pytorch-right-menu li.current>a{position:relative;border:none}.pytorch-left-menu li.on a span.toctree-expand,.pytorch-left-menu li.current>a span.toctree-expand,.pytorch-right-menu li.on a span.toctree-expand,.pytorch-right-menu li.current>a span.toctree-expand{display:block;font-size:0.8em;line-height:1.6em}.pytorch-left-menu li.toctree-l1.current>a,.pytorch-right-menu li.toctree-l1.current>a{color:#ee4c2c}.pytorch-left-menu li.toctree-l1.current>a:before,.pytorch-right-menu li.toctree-l1.current>a:before{content:"\2022";display:inline-block;position:absolute;left:-15px;top:1px;font-size:1.375rem;color:#ee4c2c}@media screen and (min-width: 1101px){.pytorch-left-menu li.toctree-l1.current>a:before,.pytorch-right-menu li.toctree-l1.current>a:before{left:-20px}}.pytorch-left-menu li.toctree-l1.current li.toctree-l2>ul,.pytorch-left-menu li.toctree-l2.current li.toctree-l3>ul,.pytorch-right-menu li.toctree-l1.current li.toctree-l2>ul,.pytorch-right-menu li.toctree-l2.current li.toctree-l3>ul{display:none}.pytorch-left-menu li.toctree-l1.current li.toctree-l2.current>ul,.pytorch-left-menu li.toctree-l2.current li.toctree-l3.current>ul,.pytorch-right-menu li.toctree-l1.current li.toctree-l2.current>ul,.pytorch-right-menu li.toctree-l2.current li.toctree-l3.current>ul{display:block}.pytorch-left-menu li.toctree-l2.current li.toctree-l3>a,.pytorch-right-menu li.toctree-l2.current li.toctree-l3>a{display:block}.pytorch-left-menu li.toctree-l3,.pytorch-right-menu li.toctree-l3{font-size:0.9em}.pytorch-left-menu li.toctree-l3.current li.toctree-l4>a,.pytorch-right-menu li.toctree-l3.current li.toctree-l4>a{display:block}.pytorch-left-menu li.toctree-l4,.pytorch-right-menu li.toctree-l4{font-size:0.9em}.pytorch-left-menu li.current ul,.pytorch-right-menu li.current ul{display:block}.pytorch-left-menu li ul,.pytorch-right-menu li ul{margin-bottom:0;display:none}.pytorch-left-menu li ul li a,.pytorch-right-menu li ul li a{margin-bottom:0}.pytorch-left-menu a,.pytorch-right-menu a{display:inline-block;position:relative}.pytorch-left-menu a:hover,.pytorch-right-menu a:hover{cursor:pointer}.pytorch-left-menu a:active,.pytorch-right-menu a:active{cursor:pointer}.pytorch-left-menu ul{padding-left:0}.pytorch-right-menu a:link,.pytorch-right-menu a:visited,.pytorch-right-menu a:hover{color:#6c6c6d}.pytorch-right-menu a:link span.pre,.pytorch-right-menu a:visited span.pre,.pytorch-right-menu a:hover span.pre{color:#6c6c6d}.pytorch-right-menu a.reference.internal.expanded:before{content:"-";font-family:monospace;position:absolute;left:-12px}.pytorch-right-menu a.reference.internal.not-expanded:before{content:"+";font-family:monospace;position:absolute;left:-12px}.pytorch-right-menu li.active>a{color:#ee4c2c}.pytorch-right-menu li.active>a span.pre,.pytorch-right-menu li.active>a:before{color:#ee4c2c}.pytorch-right-menu li.active>a:after{content:"\2022";color:#e44c2c;display:inline-block;font-size:1.375rem;left:-17px;position:absolute;top:1px}.pytorch-right-menu .pytorch-side-scroll>ul>li>ul>li{margin-bottom:0}.pytorch-right-menu ul ul{padding-left:0}.pytorch-right-menu ul ul li{padding-left:0px}.pytorch-right-menu ul ul li a.reference.internal{padding-left:0}.pytorch-right-menu ul ul li ul{display:none;padding-left:10px}.pytorch-right-menu ul ul li li a.reference.internal{padding-left:0}.pytorch-right-menu li ul{display:block}.pytorch-right-menu .pytorch-side-scroll{padding-top:20px}@media screen and (min-width: 1101px){.pytorch-right-menu .pytorch-side-scroll{width:120%}}@media screen and (min-width: 1600px){.pytorch-right-menu .pytorch-side-scroll{width:400px}}.pytorch-right-menu .pytorch-side-scroll>ul{padding-left:10%;padding-right:10%;margin-bottom:0}@media screen and (min-width: 1600px){.pytorch-right-menu .pytorch-side-scroll>ul{padding-left:25px}}.pytorch-right-menu .pytorch-side-scroll>ul>li>a.reference.internal{color:#262626;font-weight:500}.pytorch-right-menu .pytorch-side-scroll ul li{position:relative}.header-container{max-width:none;margin-top:4px}@media screen and (min-width: 1101px){.header-container{margin-top:0}}@media screen and (min-width: 1600px){.header-container{margin-top:0}}.container-fluid.header-holder{padding-right:0;padding-left:0}.header-holder .container{max-width:none;padding-right:1.875rem;padding-left:1.875rem}@media screen and (min-width: 1101px){.header-holder .container{padding-right:1.875rem;padding-left:1.875rem}}.header-holder .main-menu{-webkit-box-pack:unset;-ms-flex-pack:unset;justify-content:unset;position:relative}@media screen and (min-width: 1101px){.header-holder .main-menu ul{padding-left:0;margin-left:26%}}@media screen and (min-width: 1600px){.header-holder .main-menu ul{padding-left:38px;margin-left:310px}}.pytorch-page-level-bar{display:none;-webkit-box-align:center;-ms-flex-align:center;align-items:center;background-color:#fff;border-bottom:1px solid #e2e2e2;width:100%;z-index:201}@media screen and (min-width: 1101px){.pytorch-page-level-bar{left:0;display:-webkit-box;display:-ms-flexbox;display:flex;height:45px;padding-left:0;width:100%;position:absolute}.pytorch-page-level-bar.left-menu-is-fixed{position:fixed;top:0;left:25%;padding-left:0;right:0;width:75%}}@media screen and (min-width: 1600px){.pytorch-page-level-bar{left:0;right:0;width:auto}.pytorch-page-level-bar.left-menu-is-fixed{left:350px;right:0;width:auto}}.pytorch-page-level-bar ul,.pytorch-page-level-bar li{margin:0}.pytorch-shortcuts-wrapper{display:none}@media screen and (min-width: 1101px){.pytorch-shortcuts-wrapper{font-size:.875rem;float:left;margin-left:2%}}@media screen and (min-width: 1600px){.pytorch-shortcuts-wrapper{margin-left:1.875rem}}.cookie-banner-wrapper{display:none}.cookie-banner-wrapper .container{padding-left:1.875rem;padding-right:1.875rem;max-width:1240px}.cookie-banner-wrapper.is-visible{display:block;position:fixed;bottom:0;background-color:#f3f4f7;min-height:100px;width:100%;z-index:401;border-top:3px solid #ededee}.cookie-banner-wrapper .gdpr-notice{color:#6c6c6d;margin-top:1.5625rem;text-align:left;max-width:1440px}@media screen and (min-width: 768px){.cookie-banner-wrapper .gdpr-notice{width:77%}}@media (min-width: 768px) and (max-width: 1239px){.cookie-banner-wrapper .gdpr-notice{width:inherit}}.cookie-banner-wrapper .gdpr-notice .cookie-policy-link{color:#343434}.cookie-banner-wrapper .close-button{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:transparent;border:1px solid #f3f4f7;height:1.3125rem;position:absolute;bottom:42px;right:0;top:0;cursor:pointer;outline:none}@media screen and (min-width: 768px){.cookie-banner-wrapper .close-button{right:20%;top:inherit}}@media (min-width: 768px) and (max-width: 1239px){.cookie-banner-wrapper .close-button{right:0;top:0}} + +/*# sourceMappingURL=theme.css.map */ \ No newline at end of file diff --git a/docs/stable/_static/doctools.js b/docs/stable/_static/doctools.js new file mode 100644 index 000000000000..b33f87fcb249 --- /dev/null +++ b/docs/stable/_static/doctools.js @@ -0,0 +1,314 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/docs/stable/_static/documentation_options.js b/docs/stable/_static/documentation_options.js new file mode 100644 index 000000000000..f8c020d77901 --- /dev/null +++ b/docs/stable/_static/documentation_options.js @@ -0,0 +1,10 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: 'master', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false +}; \ No newline at end of file diff --git a/docs/stable/_static/file.png b/docs/stable/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..a858a410e4faa62ce324d814e4b816fff83a6fb3 GIT binary patch literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-bold-italic.woff new file mode 100644 index 0000000000000000000000000000000000000000..e317248423c75c8291b49bf7ef10e792167a1623 GIT binary patch literal 39560 zcmY&;W0WX8un6QW_06+xFuP*QlC17gc zb}@N5MF4=vGynj&2><}063BqcIx$6M!C#)*ZvtKb03hil-2QPnMLNdcI?1n}<}dXA z)Z6W8+~hI001+L|5N|~a}wv4eO^0TM<)P)u-|(Jc=OBOV#nfmvU4#0 z^@TwA^^5sM&%qGpxvR0kZ<{mxes!?_u%Vz?2>|$80|pS3ez%a(h39?(!2hp{008qJ z8=wEl)%l3I@B6P=2u!fuUmgU&Z`S{c+lHZ@k>1|kJ6Ev3|JO&a+)oDw3Fco6Va&LG zDuBXq@VsBrf3eSj!T~^kz$S%cx78C+LG zWN@FXN!MP_)59#xC)Dn}LOj6um!(GbyxKfXm1#x)Kb?+aoRC9AB1O(<=uRtDXWJME zda;t}n9UJil)-4uZ5;y|{62`G6bmYgDF28w#|WQqnoQeg;tGw#;SGjp_KObF&DBH5 zBiXOB3U|o9ZnFrDvh3X~0D64%cgAd2xc1s@31?rmoiHUZ$?*~@fOI{klxL7s zxx^bKBvv;zyrbE7jrDug>r9=1?7!xeoRQy;h&)6Huc!~BYSPTVrb(W%)aSCz{`Qet zWWq_aEFgQ}m!f+=b}KzpM)RbSLdG)9bPSF;j6``IMXiW=;I9DDSB{aba3{0*j=5fm z!A-Dar2;un=Jf(DP{t~J0k9oG^H8O&b3XQLzxMz#&G$)fP!Hf>)p0N%Oyzgx40>E@ zEzQtJf(=kxnMk}_dhY_T1*)Y5zwkZwG>b-AB|~0*qz-!M^@1zHHk$RB5Wb{=vT*t% zUsF9hka@6(=Gy*4?E8XQQZ9ar#0J+a7fy;=1CgaJ&%-g2DFNQRu0X_DH?J61EeswwDEt>te&2UpB^Td zC>R1e@DW468#N5JUlUj$P~338{OD~iLt1^nKa53OP+Cw#&_AFYproKIpx&TV!EV7U z5t0?YK8>G0@Jso9b$$#G6$t_Y`~rdkU=<5UH3@e=KR>ZQ`0u*6yQeoly|s8azIJE* z-{2p3mp*le{UP9!_{@Cty}oBFQY)qMs)(Q%>lk|znCX5X$Q?j=s_JU%YaHzDZSHPw zub?2oK_bG#L!>0dMaD+QN2n;tNyrRC0hi>)>M*#C7!jBl3zPjdljZtq^_2`9@Iy1z z-Kk4UoL_VMm8&+}JwArygZ;!XvH09x&sq8RC%7RgIVU?c-;l^3C9;NTjHcICA%&w* z{uvd17yuDcVLtye2HFapvYG-2+pzG!{zGd792+Q&>2V{T`Jj+P%h3i#T*1c>=4=k} zk@kT2{QknQ8?~5u`9Y96#R` z$b|Sd&djf9X$_L`y+pYDV2>gdH(658nwHk^O@iDXJnp2d*gW;R(XZXmc%~OfIqeU= zHmKqbg9isshO*E~GC>WC_-s%)1c8`{1I2N?HU3@1|0a$!(j_6ubr{kNHWI(um&YpC z_`{tkV{MB&@T!i}@7jwg#rZlGvVnnUz-92q4^miARQW9gYqpyj?C9^uEPs;U@m;KM zHbm3)nlN{2vojon3WSHx=GOgWM5jvoOr)TzZG}bI!k_66otd6d;2b>YK>@@5%-N4` zN&k0l>KbMzp&`2DDQae z?X@Oi6gCK94_gFCAVw@AW41c;QDJ&3Jr%p{zN)JRf0=$~WJlse*0e#WqdE}*4r+e5 zN~okkxW6s<6Z-%TYfSk#vIL=n&;$BgeV{BUn*Gk`z^Y%qU}RU(Q#Qu@hq~3n7nE zmUfWVOsk=9WYDBnrN1%o86N02=s)Oh2s_5hMBhYo0e+@^ro%zPxnM75Ph?-U%h~f9 z9v)d9!5!QlB_2W^Y>ZBgUX68y&Be*Z*Tvq&_5uBN|KtM+0AYocgnWi%fLMp9M(!ZC zBFG}@5`B$<2n~xA_a9@9hb1}{Mi!GbetNp%ko;;*@$1jS8J& zStnUHbntNSJn}w@JYpN;`x~cttLT<@D^@Rc7juA1N>~hKjDAc^X+Y^x0aU?TakDhA zRI!v&hWocxA-C{ba!}MTgDkWp#3c9xYT8o6OG8Jau?fzs^q<`s_ae=lcJYU_oQ!R7 zPnu8QH)J3{AZmzo2xN#{)KU~mV4YAX(U3VX9{de zYSmVidX;5W?|RZ&#(GbKrg5&`&k*6reuQDzAv`&BDO9QV4AhLPR!e(C8(RC0L#5rv z;q_k6DDl8y=qYzCOs(?{>khWpk{8_D#w*#I%^+axtIoam-p8ce(Cl#Sn63XVyf5F6 z@pmi`jvqZW0XT#&ND7E7qysc3>L<++HUR2wHnrQvR5!F2%JLf77#!rcMchGH>56-*39O_4V-v$C(H2)ArmOdTORL{k69NRG3k-bf|Fp z{P(t$7hk*-=7G*9`nst=D0_ zZ)dQc1oJcQ@oO|NCXbORC3<_@K@jAuocad=A`L+zM{dHjbZTCT;`lrgdREUZJ?2pZ zhLQ$K4Q*_f&a8~j0%;;USXI6zw>u%#pI3{9%Yor5)W0la&o52s5Bf(Gk z{Gb@VNJRJ7bMb&POdfocV0uJnLJ7Hm+iEzXIEYUelEQF>o;Pi``AN%mg!Va-*kW)T zSJMK=_&SYSrgUjqrlfF7NaUm_^LU84LYao+pg|(+1TCGmzJ5?K6?Pm`4xzf@_~|8Z z5%?_zI+C}&m;@EW#>i(i;bO^r8XX97dnV|ciFQ_^5y67}HWd1%aKK6>*r8e0 zCJE2#03hU zA*n-NUdTOUsH# zAgkx5%@#VSSWMjua^Vj?;K~piY|qW)MU$Fz9=-3QxX*td$N6`grF^m{A{S)o4WTkK zGQ9=@xjl_+2(t|kcb$`1A=5S9HPOr6ZVhf>Tf7C*-(;SBNy$Q*yvtq`st&7Y!j3a_ zfm~_%Uy@i4M~yJV^w6BUOI65}{3Z*2vw&m+-2FrT5d!t}2@v;D;qU=LO#Nu!^BMt= zblnMM6UJefI!Uf@Ox~w?H(_eHNN)?ti2(nI0`$cMjXJEJ&R)HC(lvh2@fbLGwMY3&z zqxj_=(l4Nc`qm$fU9r54OXFTW3KtUzQf*6-Y$!JzYhFYUY>t0gIh&cW7{m0 zb`>5RgWsWh4|Cp@M~~t+4|BPDtjEr(RbermT|*__!!FlReynbu`wLrV@VU?Nk~l_| z-2bjYSdxbRF{wy@lIDgsPs5A9e`95UfsO@tuY?TGI1EPuR_p`UbdX~1$Q$yzWgz^6 zt|M9X7h+7Gm7XIH+QJ`=w4HvPnidUNs0FRNx{N=*7A=%(TS|nU51M!HZUB6s#S|lY zU0Yu1FIcZXsg5>wH37>qXuKVqJqHcQ5ZyU39HAZ|f1+SsIF|;9Ee=887bKK!{K(^n zr>Gp!Q7~z*4v*!;>Ix_i&3{2^b;*5?%VV2NETFE4{FW@R}pE@c)AHDkh| z34M_Hf#4;ulH4%^y}PmqVntmHxf)Yp@Ei0WnPuS&NUGhc3i8*VRoAQ|8x&ban~{Em z`M@73#{LNz)o-chZa(ZMHFit5i+L5h%cnsVp2wpm??$}crLC@icj_M{Ja2Oq`bS6F zLr<;fElN3Mn~p_7-zA4*IUUH!nYypo_JDj6r);?7>y38s=-r`QBv7Np%x$5eJAoIo z>Qrm|%%MgA_Uh1;%~M`pi)jp*c)9H_9fo(sznQvxGd`_$Lt~u~q(c)hvNo+u9uvY_b5#}K`P@BnZe=TNCP^Qp@ z!3tta`|}=9?Sj=pb|6iOV|qH@k8dB=$a3ELB!Lx2FVq)D&v8%GZ1uH(K2(@@P$AJTM|4SxBkp zgLeAd^y=^T+jjhVS1h0U+XP}u!9?+srl8;p^q{SI;qj!Hjfm#s?y%k<-+-V%FVrb?grCV6Z z@r1o}FMB{)rDacdnwtzvZ$qwDBilt=EdZPp3;ru@ybC=sXqkNPXVeBGxyLdK9noJd zvYnm_SsL|6G+EFW?-3B11m*J$)heF}>3ViW>EnS$YYaQR;r{fYX5CdenMR)NDZ`VD zX46$U&FBWBfjOhSdp3#}kjmY7fr*a0-CTOHq%@WJYQy#C>ft?R?RL^U_uAtn7%-Q1 z!(9)2?h=S%usQ&YdeSY;k$VDqG@#2&LcVI6eaZKf?E_PF zP7l$&lA?*RKwe-^%xa1tYUqAxPDRByLC_9~_I1tjnC{51*6||T<|-I^OTVoa;Dv|q zsG*ffnxwbI|8Z45xE6M)0{NsF>y{}X`_?r_fd1c;kT2KG%J&zyYV_+}i%;k-mgh@G zNkROgZD%vbr7@Gk*pH^~+D*s6v)9$#fX>O>S5Ws+)5S64kB7IhCs*&$Blu_cYthG~ zFORW{|GYn4)K#oNHr*4rh>Qfc>XO1!3&Hl7t?#6l0BmG52NG0r%)w;FTbqkg zP0#g`gZYE;QziLH+3#aEN$@FxB$K`5-DFzBW75uAB74%( z%#pk6=CB~zq|Z?O;wM){Eeo5sRgl^1^lnjV2Xbd)zNQCR7DH2_b5+nY<3DCAYqGYR zOkAh;cZXE8*veb%*DLz@d|W2St?{KlE%ap_wbi|I(#>MqEhkJz zH>%0=$CUY4U+E0OA#+os=b?ylJa6GL`1lUX>MmkhrB7 zbt>|u?iC5RsV^a++dXyvhItJ$ME#LJ=wHARI^*E#FWV6Mijm9vCold&Ip)V!Aoh2U^-%~p*i`%e^*S=yY7_wP z6U}-C`yiH?`?l; zZr-fz)n^5dcL8l|{Xb_<}dq4?s~%=adlTTzs6}f(#3% z#JDFQq&QF@2MM472hmu3977ho1{GP2IlI>uz8YNDpA*P?bg>cZ4X$SU?f3St@S6?$ z!&H>mGAf&l?x|fqjXKATTf|5Sv)krrmKJ09T?e~{Uh~DKS<&6sy{+xW3LIr(l^u4Q zfhBuVAqxjIFlv;Ii@?c_~{ zh2t#xO5Osd^kcwNd1j)->WD&!?`|U_yH`$(9zDzbCX)>9vz+&OMLwi$@!f^eRBG?@ zbIVkjj4rpkBZ)mJ+BA0kc_Np-9ueqVH9rpQ+tPn-Sp><5qEm3B3(E|CD)#u`y}a?| zTUgyzPM9<{YH1^nR;2q*;icQo4^m#&(dYCYcirvqONjY9nKNf?xO>IsnApgsknaK7 z?YFTPMOIj}E1bndqKr`?4t!MCYxCW(y={*}wxZj;UjU7w4@ zW2?t{uB-f3CPso&&e}V@xLv-(uLj>`2{l&H?bX9Y`cD1A@klu4RSCEd&{R4V0jorb z(?0UE#R79WF@RkoWXA9Qn=YM3G=ZyHnQ8EUFHij7|Fs`8Q1)&Mg2Nn_(}*r#mnj2z zz_vAFr80#EBBWC5a++Kgh!~Di!&dhg&$sm(PSogd*O@Oz`@YU!UtKI{m%y0zoOFjM zj7Je>vgr%xsJNRGWTl8m?=ZyD?cUl+2xP2RG<)K%2XN8DpYSR27P~8z>7S?TPhm%4 zK$BLOdw>fz2^SjFEXc<^Bv%RvhD#q?$f{Apl7g_HGiO2hjP+scsS;9!oQ3;9CBLf| z3z9N{^d^dyp?xGTYR(x%a-gCyv?^m6@zB9p86UI*gZtBv(Qr&a!om~{_^^I31yC_n zH*sPWzQvtmP=r#j&^I%bGvrcA+K18Ti_;kiihoLXrpYK5cefBAoou^^h@ky3kI%61 zB!SUyCM&1?dg}s93dOCqTms&rkdIPW^5--(MCZhN4+-$7q@FTB7y2VaKGbb@_5 zm-y>=PyTRl-;4J?9xgLTCU>^#cILktUPH2Ge^RmjW48UdE~IGtUHJLWdy^}Da1}%> zG&bR_mQ}8nNX9u<9(U;tVDQ#f0H6powp_Z|48d7{gEoI&-$r&JuMJBm0kRrf0M!D3 z@|EFuLjNRLvoD8;OD6>$ee4=MEElEC$~k-ye(IFoiPzF)dN~#U+vhhX&oPrCA(~QHl(GZX zOeCmEcwgk=M#3>QKrS_Q>=s}Mdt(E1#}-3lY)o!^LzbOEO!zvh8x`7`2#qx5!L34Y zt%8=)48FWqC_(s&=)FZnAcM)G1Vt@;JYR6Y+7 za*oS%p7KY;m#d6*m&ZxpfTPeT4E8;_%Pi*{`68zr6el72r5YdEwScGXYUNIx0=(i( z9wp)Q5}7ek`4t{Uek7+=)J}L$>~emY8Yij5HyNDuw?1QN zK*wAb=+F1kmvs{3%88AQBCc~Gi#mz&lK)7}h+soGr~)~p&N2tG8lgZZC3Fgln$UXe zV~W%9;l|;DDftndiQ+SIkr|ul;f>)5>7zV#LlH$vKlxF!$`A7|Z$AQJ=UlaY4ED0% z@w8o*8+((jLa=DM&`5Lot0J4l8?ME^^n-8em4qv532kabc(SNY2!i5= z>%o2C46Jji!LDQ57DjgtKlU8AezYac_?YEomSdF$JyjoP%P-P0$vuN8=;#7TmY+t* z!%{aUYG$ID4Yle`i36i54*LR(Ln_u`qXWnG#dfYA;?x86JvMs#rE)8FfR+J1*E0OX zTyHWsr^?uux^8}Jpb#M39UW5PPcd?Dh0+tZ(RNnxWLX)KQjGl^NBAMt`#rc!ZaTU8 z4hBBR| zRN8wyU0_OGRd&_&n)J$q=Fx7WDzx-yrB#!m<)|d{M zX6OgxxT7R@u^Avq|E46C5c)c+H&e5eui74RLH(2bMIH2#0TdB*4dNzQZv+EK!hOGXpn&I;93Qr``>bA*lRZiF)N+-RuR{wa7d1emefjv*)A1PyT5QO_FE%g5O@i>7( zmP$}6gs&Px6c;1H*o6`VLZE*}Xo;O{%boYc-=yGs_@!X~s(2iK$v&p@6EA(Y%y@N~^k#Ub~Ka+6{!un7Q*Wp5J8EiE!ChsZC>w zBZ)Va8Z7va0JV(k_e0KUh<4}43VwB$NqL{wSLE=WOrg3vcBjGDBZFhy$vvE;Be`2V za(WxAc}`A|J6@_ZQUCUaaH{fzig^rXkrg_KcmOIFv2dVFLlNQAqICpxn(0L_xQ0LR zcR@gom0suCZEC8xUF_xjx8V0dJ*~>r8Vl1vhzl^e3L*J7*-g%K8FiwG4>kk7`VTK1lgVxOwX_H0i8#900*f_dVz9#y356_%^ehYxL*Qbi1IfSGueZl}8yp7B z;l~xUL=IebgMMPWAtQ-*qlCq%fYKvay?w?6(B|TqvLdHWd&$D!CxEVqg|(df^TAfg zhfHm;BkvE_C|zf{?aO-ON)xN&W40IGPnGgZLiNr^%cZUsxOKJ3e#ETX7^PnSPD z=6n?gmGa8g0ivUHBu z=ND;`O!FL?tIG=A=+U}Z&8Zn7pRRCTmT!L-s$sgxP@R(=0zHO}t1a}1>TLKP+}H6s zvTMTw7fJzsO}J<<+ZJJY_l~k9et=-8oKZa}FYtJ1i7rUG{6*l)Qe#EVvS%hf)k{EA z)OD5OGMBO+>{4H`P^bW73sf2vhU#Q8)hfMM=Okv-?|&=FS+!ql?T$BtI4|^F zb|@!h`jn^}BldTGi9r&oS=j!@d4cNkT(Rr=qC$z+cCJmrL#?6G8UyEJ12I}}V0G1|FX2oE+Y$)V4lz;O_uS(51o2=5cBMqQ)trt_*2Wqg&P@hFlLh15ZVxcF#m1zM4>h~t%M?X!d1z?!`wr$oP2$VlT%2%I^Z>^JRa5i) zb)PevA41Y zapz=HTZ6e^m2M0I+6_7OE-->LKS@4`Jp>9oM)6++78w`^DVjdL@1Iq9UcI+!5X-5d zvF)sT$$jn4JQAsjT{$gfS_(b=-SF1XvjDV}M3<(2gtgXJIG3$p+ST~JA}FGW^{12q zRzBc~Re(O_nHAKVb{Kaak*9oTMcg!bv)s>jtnK%vxp#VAKJsdk49Jo)=IK|LF-v+w zfSn25^GZANsyi(J^P#3sUB9&D48cJwY0dUTK5Yz)b#R3S-}m$uCyxWO>dqh6XCbc; z=gNT7s~W_=wRDa1NNE0J8|QTY^1S)e(AA`88=~TYFprLNfflvs6;*8&MM%BMv)nu?CH4&ufs}|93PWr62 zftzN5D$@e2QQ66?R%rxYyaj|}Z%s2tNJXY$ZJ)@4%y-%%6$3-c0^P4f6EvkJ0Wnum z_Sev^1Q9l57O#4w77_*Y6RUY1YQTI)iQ%Va7@80O3&8wtmH#N+0e+#(BJDI@;x~5Tv}u1qAsKnSg2PS_Jd&M@@5mHt69f9txTPW^E?k_IdX=!7J0Ky?f%0F(l+xc~ zZJ7$SzKghpOM>!h8ZE35a~cXJW}IC^c{r4U%`3P~yH$2BwleZt@@ z4SBuKH)cHLNO#!;Q81bcO`ek;zOh=bhJ#U9;u4*1W0%FTc|3h7(NS>a`dAGvWAocl zA)U9otqVg6q4}AY_S!b&DDVecTG99b_SnV6!^Z{G7pSI8^y76XUSgHjrxy0YW@3DI zk!HV42-;w=O`0Fk#m;$LVYkQbZpW6|Oef1+%~YM{8n+L{n=$+ZF<;TP4^v-;s|uPy zbX~^UB>;*Z@~Bl!+& zN?~o#7SZ!ZD86=Nn1+$wCF?D&n{Xb|pRp^S>-ZVLj4pIVPDjO;o^G?(!WgK|C>z$>h0OhA=K5}rMo-i6+q|iPwPw3^n4_A}Ff zLIR5Pe z@+(DP1Ds()99oxfdz_kl=GBoj;J5FL4YOP*3HUMSBcHt}caVo##f)FxEf$qwFx5MmK=B3>hqsJg;+pUgM3#3K9o7c;y zeO2}bvMQLa_k)-iu_M_xse{go?P$Z(&6B8Em?2qKcuZdFr8s@29<1h>eXIrBGY8Z0 z6nm-z$1(XE)tZR{o>A@%Ez_Lc{PEv^00lhAt=wE@TX_+BYyHK==8u8v@Ytht>706( ze}`Wl{AyV+oD!Il0~s^2(p(2NN{tRTgjthd$g}#mN)2$LLu%Ics~1O6@fIkQ{9^rh z`ij;MqqX#4tJ5r)qCGzIGV7K9M zrv*?QBtey+C-cxWQJS+tF~Hjny4vXFB{KJ^551+2G zt4>?5$9;+my*^$8>P~l()fTEJC(YdN!u&qxDjWaKSzI`|t(No8ufnb%v?5^mLVvJ#RYh zCS&H0)aSI5Ai<~cbIx_Ri=iQ_vz*-#WkFSON`cIY2_L(WEg=Z$iH6tQ#ZZRkuWk1$ z_WmSRG8%P{f(=8$e}+eKQ{?_A#j0nos+u#uyj_c*ZJ!(ti?$~NuDH^aJoL` z{^SL(N!0Q(P<|SB0kgP8FEqiRN*2lY0@bf|kCw?eW?=hEuwWuiqD9P(nP$*Z#^-xG zyk8Qr{mGO`-VcwJ6USbaXSTsNh>aR|gFi}wRH4dFEfFr2W|lH0m(jPnnr(lp?p~{C z)_bV;#PRU%JRo&q2kXHt4{klVC&36;kY~UmT1comrR;J6W1iZCNm+VPFwpzRNF)@+ zJ8us>=t3Y9wyR5^nE|JAi~fKv3er6J@>HN#oim>UT54gbvZ1CD2sL2g+i19XnjAW+ zexAV%YP=JvRjgI z8jjkm=%n^Ixs9INJOjL*8hnd2gOfEvU-^efdI&c-mNVB+@;la;6vt{c5w%hyO2BV1 zhA65ZMz$aSZF^c>)TQ)d&=irX)MygF8wT~V-v_ckV1OHyx(u2>P!f-c99mBfb(klE z8#hvZ5XACqkc}s?lq2>>!FfJP!?$MJA^EY%*FTObX9MbYfzPzRiO_>Zs+{Sz(jS#w z_Qi*2`aQ0e_QN)B!TVaVq*P+qlwGX!;E!RhvQb$a$P&z9u8L)+NV+T;F^8-D#Pds2URul|?K`g35jggLg0Rr*=`r8HY{GxjpU_4}fo zV=~4j*Kyjpvs~LvJ!fDx=uTbVt5^TP1SeMr_9V_(2aqP&ESdz|LZoIMR+YJLQibDr zKm8I9r_n`MH?O^FERETtY2J|Rce|z-JF!pNluSL@Wb%^n-#8mo=h-t&G$v%AW-~=0 zdx8N<ib5-g@cDdJu>x(iLG`ND`11ce;p2-t}M#B0#!)7O(j6i*rqaJ(yj zCM-YpR&&akD0UQ6oeaiWXgaMny0vr?>pLx3g~oQmIf4rn?tq45=4#p2!m}P*+)(f+ zdzLG4IJ8nh|8WUprLrsv(OEo%i5-7-uv#JbcwW?8HSp*oHn-U&jbd%+Q@gXc zqdYHaTg_oyf^Gs~%ogxNp7!Wl(+e?nP9KuHYPCOw4>s&zk_NEC#_d`N)en15gZ|nP&!#MI z?<0O9`1im02DQVfQ5(Leh2>nBPgNs+2>~DaLZiid>+p>GMk?SQEnnH;5bPE{#Xv9f zkhj}UmyyX&*{gyUGqcG60>6Rz;`@ZP*Es*|jI)H%Xq%10nqYax^lHqnC#MXSJxXc= zk4l2O^iu&h&0!}?=*$_8fB}(c`p|c}w|5%K$y-P|A0oI>8)+j$Y3NG17s`{E<2p;S z?M){8yg)fgmDax&M3Qyqh1jkv+Ucpbr%F7p6Z<%C5xgC1i`>?Gx;iWP` zaV(F|<&F2Xk=e(9tExS@TPkb0)CyKhdUJ!0?WX;rxQd1sO%T@L13c#TJ4Pj>2y7?1 zqGBCtWD01Dixh`g$ND@kGn}49gvm;Xv-$W>Y7pJ zWKLms7@>x2jF6WHuE?*1!TdcS{TfD9GX8Y1L<9TJW8ToL}3iZ9l zHy|aJWs^dlA|pvS*;(e!iHtQ3$mw1yFJS0pzOG3Cnd~Bt+)%Bz1Y7^V8 zn~{cZu2eqfHZSc}=;uz9JYuDr0UClH=Ml;OK>F~5jXU3GCf_r>(XpLyf(607s$dBwtLfAM0{?Y{PSnN61-p|~shgrZC+ zH-y5Zz*D`1O?k~S)q^wybIc6}(>c<2|Ue+Z3gxCnVZTzhBk_0R?Gc!OU{S>kwT z9L|-g)5$J&kL2GJkN9iCcy08&RmtTj4M*R*erDrTc%&E~n{p+5_>+oucFzP^N1~ zsUZyXXigm~g^&QvxN1=7t0;0R6V`vSmm&Uv?ryi^uEbFmPXTq3@==@m+sL^jR{CL1 zUvCIMD&=@*Se$&zfRr-O_5$Fg^=DhE6#=bJ58BY6SZ0C}l$8H6fjsc~Fs`Wv!$ty( zuwRD3iP&Do*_Zdc;16;YPiGmBh|GBf0)Kl!%Cb3Y4UQ7K#WU{aDYu8F)9y(0M?otO z=f{k-qbMp|hdr?QZXP~;!RO|`d8&7`(#$p$#h;N+cvI@n@|E&l!~JAibXYty_9-kL z{~lH0(%e7zyi;X!XQIE`DkPU7ZWNgkDyZxlX$rat!=Rb7F2wZ{&}Y0-14lsCfk`c{ z4E*yTg>x(T310k~VifzA1=S~QW^hs>{7qSQT8cr!i|l4!pcf`M9aAe1+dj#dF{$#* zK}dX`JEaKCMG%(H9vUK*AO$Kz!hAyrr2N9U8lA3dBuqg_1&g#D4DK}xO{^M1*2oF9 zWMcB-8Ic8&|E%_#+hae=W)Jz^#t*%H9>=n{uG4rr^!6sAv8F??!GZEyw>dtl^!N4( zT5Gl3LU+o){taN_pRhf%H9M3%8WbJ09utF|u~{+0Ve9ySjpN;7s1L&>vTTt&gOX*p z2Ow7GS@%I*RI$@!OTgR2&r|?IhDOqIs-g#W26vBp^Me`V$4RWO#CY$d9k3K1qWM}% zM?coP1awXpAR4#AuW8e0wz#NF5BLu*YJ;kLA4h1Ks19P=+8|1`TKE&QfPv})W9z}>74)0yCttzgg^r4MJZ*T+ z_|{n+Ce$0IP-8N=gePhq*zOMDV{kdHq@AQQmG7=ox88w$6K;U=s8sV8`9sd~H%q5_ z>R6DY1SjTYQju(-qzTwYWgkm<2Ae z>BG80%crrQ+z?|;3p{uhKR_$q_Z4e1UL?l08{DU(4<_5elHDqPe6od>lIzd9LGqiq zxbcj7@0lXz_Z!UtDc>v4%tM0cY zY%^EAaI~mT$IgW3MpKFWr)CYR>NaSRxzD%GyXlx~S&OYqw_(1q$*aJP;Us5HE&Tem zwVIjw*lRjBCBg|SHiCU5ghpHKWp4g;c1gUWyg9<-?s@r7CTJB(Y@r5Y*X^oBLg;6X zA#c_+-@{??^IT%ngta)Df&#m}?&e(=j@)$xo~cxsMi>IC1pm%~?!l9PB*LGmEtW!V zUR)#}q_(0UA?}PPXG|qUtp)WV0J5-QsP2Mn)3Om?FMH^Cyx@zK6~e0iVbzDfa5;0a z%BfWtFRdTgBobAL$){|L%sskE(8W)mh0{Bwp`7|We^@pa?a(Tgl^>K5p_p=fk>+9B z_Vhj9(KJ@W@#bCdCnY;`ugiS8o9#4H+p`vf)#+d;f3RlHnQIjtY-Ov}=YEVsh;_HO ze6ICmhb1s&C6;1~&uk=0Ah1+U_0f&?6_O4T=Fmixf+~vEd>%o9eyA9fp-EI;tPo>k z5K<>^RT{AfR;Mut5hb8NKwLv0S^!s}t^u&P5;2lWBNu%4NJGNC)IX5x9Qi3;~ zRE)QE!qylGo~jo!SvLyl12?m!zVHacy_mS(dJ*>?eF%2{nWyzOMdLpn2f1+=!>;XX*df(@XRPr zG#v@adKCG|DZ07s!aC38OH#&!y^gm?s}>G+6*Q4F(_)TS5m8}SR2!~-C#0FYm#05H zs^0$huqcC}|KZ?OcvROx0wL5!G)7D~VP{(4RU5;w!bCkT!OX#VKJp#1yUTd=M=GP0 z6IC>M#)hU^f1!L?=Jl%)MDn0VQVdPB%66REp54hUg4#aq5ENh}1y(g#TY+&kn;q8U)Ho{pdZyU)uBeQI-YPa$JYdqU7i$k&3yPUgmAQkv| zQzTPq8xW~?WPyg^PhMf}c7JboP2QNgTH3XP$?{SZEX{Tnuh+yD7dUJ=S3PHK-~Pj< z7AG$gvz5lvmP|TvKP0|7cNEV#_4or96GzR|LTxV%9~<%D`2k4nq*8oN{^ir=Ae$W> zq~)1TFBX`(P3&Gt9n4-UM^(=Jw9gUybIGA7z1=(g6aUxDL`BWC6KHuOkD^#i8l&-$ z`2FD@8RAk|-1VVd=iMO4ao;ul`aH)(lL9+pL*U+$jP)5kz`^P<4|bAd|11>$N+3FQ zZR}c$GDsJS_kMxa1EO0P$}=RYl@Y}8!MLIH@%nX5YOfUN^^0a6L$s}QUSA67W?^*< zBZ8pwe1+Opu7z$6X#r55IaBnn@sLUGj>L%NTX#^As%?u<)z0MLNw_Ri$~4&(qe!%^ zN36Tw^L1Z`GHhQ8-)cTG->=P?A-vFplavcTtGy|@Nv&ga)o*9bnXOS-?6wD6n2M)U z)sQ<}uP^v@mcF8l-)$aG_{Xt6&wihG@tHQ9K)TElJ=#9|Y`uiAM`R0buXR)S-vjrT3tlV>IlR)!$u%NpA{#szqs_R1!dm!(1(x`k7x}oVr`NHz zm2Pa=nVo-T9R+K?@6{vMC(c!9+YZisLq1^et-z81ak+rJr_4d74uw)ZCUQiLw+Advxs6OVg0} z6l_9;JvIOZOh8dUM+$&ijn``3zNjIEfuLRXkO;EYgRqdp@94c}D7QMI@ zZn+x!B`{gw%|&I~Wxoc?aUbrOVLL{aqREW@mb`cB*a-@MTj{6Z0!xV~|MFm?t=;D& zN0iB9ba>j@~hn6NoYjw`%`nty*-ivN<*TvfN{OGz`j=EQWrodBWTEhvReCOf$o93mr*>`ga zwG(ZoddPF*#5T`ZmE;~5iohypAE(C0eu#TDt<$KH)+M*MmQTX_uwjB5HeHp0|?4yzhp4)1AZ$lc6|Ir zQ5obp3hVy?GeFG0c#|#8S==as@IH;EH7FVe)(QsU}h7*NUjiA?xDm4i6D8yD$-2KcKcco;oqGqy7 zI6I7nUD?>tu0+FJGE)n4iiZ2MJtwE2l7Ot38Y*wED;fUNP>Cl!dPTIVWXAO1mQ_`A zuIy7fbX3XsmCx;dgq{6FdTvqi5O$}KR$QMOe4>w|w7EJoI+UehXHB?zEknpqKFS?T z_A4^I^ec90G2D$rh7u>4E>Gj5WR1KL*3}Bx&zrbsm7XE2P$=6SG+bNUMmslcAUSbD zsBhc7DT57TUj{aqo5QpQO)EM|vjkWk?EAOSrWN!ml~sXoTQz7}1GW{U@2DXSSCGE) zE3Wb7^D1S(aBs)ouNpCNKI?2BK0Q2We8(r$&)(d*oPUAt8}&1E0|pg&-SLE0fm^_E zS*uernrd;&hi#$x72M|LyZ^a^{0;Qe$IG@2-SDH?eYx^EPu$jYVL4mkshhT)e*bJT zo_xr@0(zl>LCH{X5q!T|!lu8=E(43M0xcVNH`|$IADK_o+YEEV}8V zj-%T((b~z}l2>;5OjnJ)?hfoRhBdlIh=QHGg6U=zJ?KANie{Dd)g4OKnWz1~yj9|? z=Ph*GqV+t}f=PCoMNUFo;fM-3zhkSHoOnULoiBW4WcxYR0s3nl_$@C$jBaGoyX#hG zO1IJ#=1uBWPY>PdQ4=n`>(=g6?oq2;ckZ{pdawQYU;Y)`g8>tkjB4yNImj*h&A(sY zclg4w9ph%~y=KhZ@oN=7ga<$5ounTUB{hL4(UNlYl_!(FnL++k)giJ&N%JRHlHXNz zy!}@0?~oJ1oC?R(=(}i|^bczoQ8OjXChJonT8oQmhd=)V7FfQ6!5y z9F99(NOGqg6t5#3cR6Y5$*fnbj3U6u87-}=Xp#Tki#emK5H%IF+Y&hgp1R(8{!stB zzqv~(3^*!8a|OB$_N?<*ACNp*=LOMjZL!!9Ey#vP?5S}&JlLLzM-E#sb$Yc06P#tU z#b!r%#qM1$zyYVSA%@r_UZ9<4BtjJS6BSiR>yHK^j@iPapI?|6P(*_4V97L6e`T|8 zOyPTZ*FM<)3*n0kcEO>P_wN)U8HF$(yxAckjg|~Hh2Sh%k>$^7@ht`GGdLH(|b(+T~$ezsoMq?M=|R=I^%F9gBJ_0G282 z{E!QQ-YjIQa7QYwAZLT&EkOkuQj^2s?(m64AnRo`PuyZHE7BzuDCl}bXTl{A!SXVr zbe|T1h?*#zgXGLdO4$yY9oe~gB_SF#hq6Js8FQ{01*iZn92T0<;kWQECxsNjJ@yr( zyO&$Id;6@BrVK~E@J{2%n{KHHMm@GcW6Gwl9?*2-s#n*O`g#Sv?+e8xNGaqfZU*|VF+%$PxS0>uf@ya~G6$XG5# z3Ndvdg`|bBIe?Lf(#YQ(Da7IDEj`R??3dox+I3$?2)ALveXS}+b@IN+Eif0& z_n)LM$=?6fOkRAs+nj|d%I=u%^X4^Z7xTco+H~>#eaZWax=@Qrmu?A4R=!Ql4k5lB z_4h#~=P9vL9^EIJa4E48djEMyB412&+MXdSZxqt=>CQ=)o$jG02#?~AN!j?R%$Q`v zZWO00)_rQdS6wce=}SF}Fl%Qei!h73PkhDh`wN4G!4k*AyNb$-hUc_Dh7>@1jx^9{ zc2n#n#G|_UVk+2EQ~wTQXGG7*`nVwXhPr_{(h50YtTmvo`8 zC?jJt5o@R8D8`#S~aU1wO$(X`f2xU=n%VVqBh2L5s^Mj!{r|jWJyTbx()I6VEOPpitRu6uQTrg6^?h9=az;)JQ_Bh8EGH z-m!cP`|@X7n-!)8)rt{th~boDJhw){cFP zU9?EbtwN92NIl7Z688v# zvCKg5cQ#UtrfkFlv8bJl;0`F8-Mz2?31i`50a~*o6lY&zGZ0lGCnUO#OFDC#l-m=% zN=&N|y%lKCY_{_lrLgdlW^J+JzFg5}ZOO2C5hxFj+(9oA})x72Zz854wz( zd=aeB`m(jN!Alp5r~R=Lm(%2ULc|+sLj3yjfkJC=$VV>(fps!BLVX$44~s{u+e%BmkinBEuU_tL3Z31pQe+C7l{ zKC+#;WYuDgTA|-=O22X@%B<{-f*>5VkwieTVO1$W3YB^o#h04sJ~@?iVhF8RH5e^K zs3Bmg~odhn{%!M5`wO65K~Zq-vjhghckq$K*Hnkt)vfxcoYaJTAW>AKOR5@}KsR zaz$n%oi_@f37f%R@Bz)QQIQ1-fZ{qRfHBM&pn8vq zrM4EQE|cyHNlQIwO)7BWv!-x^L$<9YjmyA?D2X~CwV6I?%28xfsO^bHIJXUp`Pc>L zaC!)Z6vGWo2(A@iBJYSmWcR~w-1X?5cb~rYCAsb4mmhxgof_8BykZ6rdfnLTn}E`J zwH=>rCYpzc?m^-`x_U~h+`dN_W$Vo5iJX5?sDVI5#QnR^GAzynfbE|shzsAHwy4OQ!%PSgA+i5EavBzqe1o$ zq(RRA2QqK~Wxz{2Pc3#a{kup(4c+C$;mTCNdl`x(jT)!YWH-$-7BkOXL=}lQgChzg z2}BSNA60UGm^RUVCyms+BSPBc0hvr;HM=BQ@Us-dFGPu0`W-}MBvwEqEj&^>WrD1< z=--JtKf#g+R`y5L_NL)In10w z4zv8H$YF7o=wrQNhh<_E+Y>+RsZ@R@1tZ2Sr_!(FXP%%jmY2CljU271$T5*-qxp?A zp)J9sQ$S&FMB+FF_Xt8Py}1kb=$+H2lPL`aqs!``7+nNjtkzz(#d`6`Jwx%1AkgYj z)ulL;%C6+(lUFQkkZ2tdx_^U$E)L0Gbi<3c4n>U2!-b=F-}GMT|HnBP+X%X$HZ%B+Cm-;{y)yGPo zH2@KDi`7sC;cpNw219g9aFvxlSOhJjRG&irRv|572v~@4EzwpuZ((DY$f?6)9F7<7 z6Xc_H>dIHoD2Ic6zzy(%A}7&*(?U$;DW?}(*+N=m)fg2J-8A+w{q-l(elvMM;BwqRhN zev`wYaeZUM0u7@ljBaOD9395g`7ZYdAx@*25Hm%=(UA;5_ljgzKVCr_cq)9@bD%s1 zqF8<0Lor0$Ehe+CfV#Vy3LADVK@boZOs<$xQ#CtPATiDpt-T0A4B^96>@W<~xNi7i zZ>;0fruD!M)2EjYe_*$Q8}<#_&STxN!_sx$km4DeJiA`yLWP^2YX$lDJ`X zzI;DA-yqC)a<}=Grsi8p=Udusz6I)h3$QB|=BsqYmj2&QHwl%7dg1%0n}SKZQ?Z%f zIb#JRjlH)kzC#|rEgYnCj`ZMhI5p=mopYGZxth+o4CagvDJ|>C-(hcKH9G%poRTh9q6!6lB?<(u2hU^x zHd&?!Ux9=dOYI43z))6&1drMdXmWCO5!BdXy*)|&>i>I*?pAPB$=v0-d_TcIQWLEf zmA>RI@MES-8QJumgyxA-rTt7<=YLQ7G`ynAH5R(g`+&3`{2uKl)`SxEN(bw}P$Hz3 z7KbD?9zLR^48%I72zksZHNd-YPa#*ep%g<&HDLctgRN1o0XZ+xg=%OcRM;N(WTt<& z%9IAe0+GtX|C#daY9*;4-8-iL1C8qc&ARI!eswv&OdO^i_ zsOQ@fLZrY-9XPe6U#tbGZ%FE4vCK*{B_2@8jjBop!qX=k#%zs7L2q?xa|((wswlFj z#~%0QDK;up0A%$Q0f8gf^Ml>qlfz(JD-k>VsPlHP1j;(aQlS9f-JYA*p9%nNkG z@F=Ru*OZea*@xeKVIOa~a&hBRgEB|_asHPpHncp85QLlOZ~k#n{eemHAJ$Fz{hGDA zN!7r}t+T9;Kgh0A0SUhtwPr@C(K~unarW1_C9`KM(1bJRBQ)WWh4UY|w=%Y@dh)P0 zQW%BNj-wPCGl2E-6gFmIYG*1?7yA-&RG}_TLSK8&CY=Eg?A}?2wgzJc!sZl$}N46q~4x_+O%CBYEyf0xg4bR66nosj}G&z z#qtmC*!#-GAe{ZUYFCd+`BnZ8QRXNHCHOOLCIi;}O&6n3`arfa6%@)$l@tZtQ7Cg% z6v{&N*=&ffnPWS^Tb$_`nG}r@<`4cqoqta_%1qkB?)y$SO6t{pCnV)3=*jK9JS-)D zQr_5cL1nu#U;cVJ-&~mQbw2=gv!FNB%|aD*6KCql^DDTUI8$FQGyRW%H_@Z}Zt%@n zO33#e7@Xu0)p<+u_aMwS2V{NC#q%wM`Q|F~&FvxT>U{In`IdC!Z@%py@;56dS7Go0 zWH9CS#S_n^6EFO37AKv$?__fvOQh(?Nz*USYi@z!zIE}Nr~EDf-@(n~F(u%=K5;Px zz=2En6bdNKG1yKOjv%5SdJpAc>d&Shm$LEUI@#%6-UE^-+e)`)RGPrrsZXnh))LUj&UCL0bOy1zqrY9`28L`4k0Hhk0~m{;5>}bTFwf z;)3`kjwgvF{_U%DNI(HQjHL!-T2Ffdu2IpH&5^hXT>~9$2;h1&1Q?V(wq^w0zz6>J zDEOzwYCs%1LCIgjSdB1Nqe&U7#z40m_1Kn+W9@opSH-Lu$9$ifOXGh`1?bzO3N?S+ zl|QTQe4n2!Yy`fR;ob$Bt-)=%r3xgyF(f)dwB6oxyv1N*EI}y;hU|nPd%{woxlJq# zscrURQ3f67tBhnaR=i3ihZE18j*3@tde%WNW>82c`sgaB1NVHim`x7E@5Ccscj5Io zTxnyDa6FyfD(J4B*qEr zOjRE$5YOHlRGqLA+&&lAWECm#g@TtTdBIbod+IKOoMv06~= z4QY`d+e_~Q#a;y6XMvTUiycxFebJpnU!;$ZRItGv23j%os=$5NGV4%9)C(eNHQAL~ z9o-w3<-QCUV=9++89>H!sGk4SD<~uXF1TCMQ^<_U*w@gpXTsP=_lg{u*Bf%APeqPw z5lmUs(a<4;!ViUzq+ZlzVI+5{9#n5=lI(=;z7x>>AdKA&zT#AM>>0_i=kckhUO`}m z0N>fl*hPDoI*1;X^x3wAV0D_@YMs>PKvB!fxXe8C-LqVmgT&-hkD7R`S3YoaR9EO? zUf|ldfG1eZJgm+q2=npa($9sZU*O5rDeNh*(@x+t+~a3Ig&@tRWZEPbeX>ziP^ATZ2B?&o)r(Ai zM|SFRVtToSx2boM-F(r~>ll=xo#IDM2b*hQ(wRaggwGSB)XPdrLq1q7Pe`nYBDAq7yhax;cawi4Ch7X znbYo2!lt5oq~q34(GsS*-w~#dnNaq`lhgydtzE6Gxcj?fDD?ja@8IOy+P@5@Zp092;#M6VUL5iSP6AtB6MdL;UPuuV z(-g%|zOkQP&i&nc&%NiI-|uSIvxB=o+I8wMF&$iR@TK^@#}Ah-9qv2AC;MXL$>ckw z=c1!O8}1F0)1!3#M*c@SpL#ey_s$nEKnA`P17z@PXMj}Q)cVK)S+IG1?0jGjB1l-4 zjL9HBgA_a?Mo7ytJ--+1c1*L|x&LFk>CBmVTcbb62vx4YbdX13Y&zb|(*2CAXF5HV zAI)%gl-jSE```=g*Yc(87y94gE(mD_)P#ATpQX6Yg+Og-Ha6v(+8|X(U|{ z8!C*p*%(oI2RmLcU`DlEVtE_LWj4*1e1?p-l3wbD39a zSyfY+50gbBx$W2g^tICK=kL5{@PYoptqcwuH-cvweo{)=PHe|vXJkIpIr3NVM1I+P?IoNS#03=fFwKWiZiiI@p z-Ye=~qx7QL`kORNFUK+Hd13vGx}2Li&iX9sTsCv}Y1*$MN=LAIJ~~w>flSd}=K?Kr zJ8a5Zm@t83YFgC`k^J4jFhCto(FGMqYJgrusbCjvNLFrxLL}<>i2guDmX5tnuZBo0 zSK1+DQ^fuSZb#e2!A9IW+phX6fdL@=&LL)X=?a+Pg ztIX$%-OhleE>-N-POY=}&ubbQX?FCW_R(uP?5cmkVsi&toJpi&bGY&%rnhpc8}?w{ zCdSU&2xX4l)X`32=Bw&RQ(#yixNDs{yr~Lj&0}Y>!17qV`z%Ma(EmO;_Sl)@Z$TLA zwxv(Va4LI#?2$LBcclDL(kot*TfU9C;bUX!)7$Tc09Jd`g!0l`a9E;`s?Xv|=uBg;xt?64R(8Sfq_>-sCw8?P)=%}$q4o;X~v zSwXQhlcC%tZ!lP@Q`lA-6s%1pv%OY1Gzj;YIcs|SOX@N{Rz6Uz(q-fxFkfvtNB6@% zZjfWC(GuMPe?^*J)l{Qp5YPuO-n2e2+s4%3^rxC5j;msT`zn9yB|FUmKSKZJWxR8i z^YA;3x~x|)of62-3m5e7SYN$@?^qw2`pyCV2>+sG9i6X(9MV0q*b=T)7*KL)d`TwY zs|Dt3Ta}ZNgt?`_p(c*Jjk+);Z-T2=-x2@YW zZeZ6<(_0<^PDuV4^KP_r9G^F#TN?R(fM^AB@(S9=>B~`X%JqP>~57=L&apD1z6iC45nw+;Y< z_`p_P-g;=umP7OktSxs8tlGMN^~lKTRim2+;5x9GU002cGTvi9|0Z&2ag52+VhyGF zonmPxiEkjcPLLI+FLUMz1+G3d%G~Xctg)sk%^8TW`Q)OFy>H9_A&0w1bqK7?&CI0Tn=|~0Q+mL8T;UL3Zb-4BDQC7ZcUUptKnw0Cq z8s~Qe8NUUq;0b%f$dTj0#Nf2>jFpI%9m0_($-dj)TQ-{8zijP&ak5V+T~lXrkU^pJ z%i8Tf+_`DH`p?S(d1|jKSF^HyFM0Xut1hH^HxKSz+BhKWy=^pgG5zto%pX;FvsJP1 z)P`Q}FynSoZFG}b6cD9qjgumUF_mCLB_F0@%IG|)4I1y5fI8E_lWGxkJ(s+I)RF>F zL|P=!>d z{&JST)5Wn|X3x z2?TKH7D(|!XxB$QInb?v&~AeVG*=sO!Y8xz^5jt79m)&=N(5*~9YhBot-! zgR7_3#TZ9hiz{yrhP8}!9%(;xdDbXwo*>3PrA*jmf7R>myLr#I#%^D;WZ}2{?)QXC zDAy_!$>NtsMuxt*Y2^9upm^(s`$y&8OG!<>b+X@dBsq~apFw@X4I5bPzNH`aW_^&~ zfhWNppDgRLv)-P9!z+k%Ed_h}4a8@G#Q|1zpM9#(sVoENVQN=CN%4a18maAorw(ja z)~i)Ku0pNgf)Yf@~kfX~&okgn8f;Jw{Czi(_;sLQNNA?7+PYiru|EQw0^>)SL@6bN=F!KjU6B*)1w{ zC)AFX&lS^&7Z?8S(;e6xmlWv)y_XU?0fL=pu`wknoz(K(lF~(6|8enlUAK_$K=r1i zR1lLuE*O^z&bZ`;hbgqiLUdOfg;!`#Kzp8kp&~YXlsDB$p^nb7;-*L&M5h#LjV6*E zom~ccoAEYq6$HkF1Kq2n>eT=Q2zp$q&x>0}r7x%Sy3|Y-wU?7jyrvfFJ#0R@m(0-J zX2Dz2)61r*s{RjW%L7K_^&t~B95(fdlP^4d&v%Dbj`~Hin+~GkJ-6JqbZ5l-j1kO2a>_y(z!8e z1d6L9o%l9$`2?UU9BE5YRMAOGZH?++YNlZ7pp(0d5UQn<4WP)a)qf}6QsWdC@v_`y zJ{-a9$q;D1NNO8F;J}Jlg_syaOmZ4xB8V7G>5?gn+9^bpc8VAZoQ?{_P@M2dL=44; z7z4yaC}JYgOz}(F9*rPkk~0v~lx&}ln5O2|Xe`;@VSt!OTt`e;Lrhz}>IcU-;khv#O;Zpb%0!{Gf~5m@|S0DGROXvqi;>QH6;;Uixm${>-+){sr@EJah*61Q9WY zi1>)qf@WzI77DMVA{TSlf1?rz+N{F;7ieCfwg7nA%{@zxK1!O-fp!Zs%J1qN-C-8p-7oUc5m;|#~?Z7o@?wwe9SGkTyl(! zMSvj3{DS-dBj2mYB1LI>#v3F%J1vT=s_*@*YL4f5Ke_ey-Yt*+`}M6SQ-{ovg~uHW@6F7IyrH-iOO?%Jp}>_m_9&`g`6F zmn*>V*6nWt0Y4;c@%omhZvBH?zvF%X)_=(LyWX#E{Xwp8d!N4ZFLM12@1uABRIb12 zb>H>n`aSQ9cmGzdzvcCB_vQNA-bc6pQLguHeR=!e<@$Z^(R<&O>kt0wi}$8-{T=Vo z;_u}8Lr*RKORm4`eSi4_x&EGaZ}}JAk(YXF@7l|}$Xj|BUhXOHH}Lm?cMtwN^6t>p zeebc?fx9CZ5qYuqQ_sM+W0;%34C8g-+W@X-H0opc`*V0^V07Vy@U0I&GjHx`7^A!% ztPsOz1=G+s4WAl5FX88$)*Qk7DU6M1ExfDX`!)Q0N-KsiUwexZ59OVE0j#gQhlKSV z?^Ev+?=$%P4Cuf=%DbXJU6|RnLKkRAy_M%3rR(b~T3+PpH|oK?dynosxOe|?M~yC` z_@_o4r%7(QYLLu2>SOiu)Layy>WA4}C#o06ie?&>X`{1CJ@2YfF^wa0q33FPt)7Ni zxYP?=L!Adb_3+;PJD+~?*`3cGJb0k4uCBVvYCw0E0yf9Q9?Px(&`to^6SzNuCn*fb zG=dDm#GD+DGPrXHw`1bi0H%b*#TndAQ->fuSj>?L#2@9;uc0 zTxTXq6ZNor@812_BXFlg0Ksp7y@Ox}d2<~_YOlzEa#B?k&WgnJ3ht$#w%bjzQ<`M= zwI*pXQjt-i%Cm5;S7G)_rHh)rcTnHEO?~%v+WN;gMA56;(8n!+cLDMmD7Cq@eE@XO z>e{SRc|D?Er*oSSG*grF4AwL-2VIaV-4Z&j2tF_1YEJGdgmE!FK~+xZ?~-O9)I#7X zQFKlE$nG;IgE)p2@ctCOrKC-$vva})S2g5#QTONYHzaDbIQkwuTMC{E_;-)^V|nQ~ z&i&azlQ6gozA73CLdt`}hu=PHhpC|RmSv@BpAe4XjU zfV0h0bsWz0G)-TrFqx|;SEiVnXdZ=Gq``ytiY2V6?%(Su#L^>)<)AxHyu0w{hQZ<@ z&(}}x-feZ=qEzZf4W+tUi($v5sC^N{xhRYRN8$<*1+XLmqchMl3z5oN1-sV}*rS@b z_UQQxMkX-+3KRy_Wa+GpB1iQ<1xn7mF{o|N8^TWoKhNNE0LJE%Pak>D;M)mM8W1LD zG%g@YdoZ&HpT2WnsUHJvJbwt^VRiLq1PkZCz_sZ45ayw|oDnao@ji{6)SxL^=NPVD z5KO#e8jY5T!CE z3{Hg;Eb(f_Kp3OpSojl~J8I0*Wp68j!qzSd+J@fGM<{a=CFG*iHg>ZOFp5u`8bt1aGFW3a+Z3o2Vap_-J zesx|V{3E)1O0++tJ1EstN_BYb8ToGZe=dG)((i2bOrMj*y&x|4Nj5>vpRo<#H@uHD z_C)sR#hW&Ga}BQMS^H{gXNPQSDB2eYU;2&@J*6>~Hrf?}(y|T-t3$Xxmr<+}c77M2 zAGh*m(<4>u*X=fVBg)$)aY|lfLiRHj$xbBt91+hMiuE;huGiP=m#bieI}$tJ1rzo1 zlx7Y{>b^kki}g)Pt6)FD`GWP`bzF8?^FTa0>kebeYc%8Uk4i6xoH!Tm?!(;!pay-` zc0Y$cb|gCE6Agi*uW#<3e32&eetaX(8VHte4h^mAIZ@`fP*>?5N_ZfgJ|hlQv3w+1 z%XS@U!}(3D*FMEq{JhNAstECta2kupIFcw5l-a=7?JHENW6fMfa=a44VVMK6$KAMy)~Vt%h`m z=|Rd4$=W)v$<`QSzjdu+@t<}r*6urm2R1Bns4+sSUKCXr+$vF2+-fR-v7lN*wUSHq zv!)ixk}9Q@j7X@9gGH~FqBW5#F-VXeId}fw8z#=NS#VeV+@SIHS9fX)*wZ3Ijbt|ab`}%e@M93 zAzIoD(4^shMC;mmZ$)!ClgWuoS~!{2ng*dXCG5IJW@w@W+Xue{{Oo0O2#Gx$vN(_ejbS~~U_X;tXp(j(1uy9wC3ye6wqrIv+)56=mm=4ScO&sV*CnT;n#Ej+cdAy_0w4VR5&?KF!;e}dP^U>dTIgmE8O5lAEUdF-HqTm?yfrmTYljt< zwBLd6UkEkG$4jBnKFwuaBax=!kGrL@soUEqa6L&SJtKItbTmPj5$u#KZdS`w+VcX= zn=n6#C9krck#nB4Sj~twO>lc`Ml@hv@LS1LGs-?DFdHM>Z%gYxqwh)WNiNq5>2%vo ziG*NVUZjo(W3QF8==~LK_rx2~Qc*@XC#z(==7<|_#M$~r&Mscrtdo62L~GbgQwzTm zqF`Neb|IR98SG=~=p0eDZyde7i7j`_n7gTyjCtYs=xuuB=3y&>u4~-gj%{ZLx&%%hxN% zefW7mFx|}1P_4okM0ADqGD`FT=~E~YZfdU&u_G_3W^c~%`KGk8f7|BgzFz%p+FC_* zp2th>K)4XQp$m<+-y#F5{buL&vUkvwd54GHf_rxS1t3Knqj@ZZe(nm zkg5p7et!vTGS*4$JX3qluq$H>fqM(_3{AbV+SJUuY>mr1!FofBxLd=zHx=Cr%3}*z zS(UQancN_kuFGzB^vlZlT}8uCvg1VRe5;{6+EP&6--oeX!m!hrr-e^5$r>2Wf%w;o zLd-|wvI?=?gBJwxm0;~w%~ee_6Kq`=e3XLot~wmcA?wYotilZ~;BG)dR@xPQXSKW+ zwR27RRVDK|QEdAPNiDkT(&E;FOMT=;z(w(Ly3me zO^KZq3}r=YTA%r`DC0^jIdtBrL+V^HqEXn@ds1dYRiCGl$9*~;w2OMqpA)gYW{s^I zlJ3;v>mE;G}LTJbC(U;UR*(a3icw%2e+oZSzTQBWldXsKorcWR$T~R zd9CeKvE^LK5u=pWR3x>i(XX;Tt+PefR^dJ zs@onm>)tq)Hlrp-^F61wXwn;&wqrIt>SjG&$+v#QFep3lC$x8BPfWU*pX-U)J4Vv0 zWG=P&j%I(>I99;&+yAjqJ zFQpG-b60C!bLu3SH^g8Y{o8B5!pU#Y!rjW)t%!@#CpPQeH|a6k+GA6aQG-_3N3u6! zUzWd>W`?AFyg#!~Qs+4x(ShptY<1SRBYio<&%NFpwIXaI(&^^+&C!N3vxwG6y3n@v5DgcDpbe-poCU$tM|O$qp-B=RD_<)@)m+iESo_tfMmXRMc&< zxi&(2DEqiq6k9T>f7a#6x$T{H*B98F*!JUcl4~KIhfp9lyM$an#Vy%=D!sv|%!Zr& zo8A3Mx2kFDUACPPHMN~KOKm+bezOy>)+^4E}^)&kL zu(Z>9WUTjh=aiwc)>M5TSCTGl*Fvp_4R6q`ulAj$qLGEz-QFA9-O||?X@it(=uy=v z3PnP>v)^6+g#)W~RX>9Jc+8Q_r-^h$nosO6e3>n*D4 zjpwy0a9Y|^^OsNBvPGK@r(!`>UR~ekvveS)0!zBrO)j^}JY7hu;l(wUr<_5HjhAK0bC_VbeXR@C~# z+dNhUpEH()?EiBAGb|%?+o_k@FRR^H&RweAHuv-`_LT>B<5S!2N84^-*wSxpKP~X) zJELjM5^NvNNOZSnNsLr6M!J&=IYV^Oa;B~Q)W)9d`eaUjs5+U#`$5%{x~I0|>RFawQfnN_)Al>r)q^Nnz~wB zQS>%GC#;=RZ2Of=*FKv5xXSdff_OyIvwj;-i|VrGnbkT<^@^gI(MM%RvJOv+|8ecr z&8XeIPR`J>@=Ro#{o0jwd55>R@pF<_A<-}+zo%=Qv@x@(Gwu9nvoWS>NGgcbh%&0 zx#lm7y3wEagk0(h=Ui!9#?z)r(MI>IdB#)A3EMh%D=6)9tDmp9AfG)aiuk;xJ<)4( zMtcUv#=S)&S7rP&(LB9zmgx=k9LBwU{p#ip*5boj|2=a~4i_c#Dkp95OR!#$M~Ef& zsOr0^=w!1VbK_Xk65HYv=2MR)!`pts^=3Y9^H-TG&EMVEaCNwSKeBn+Dl2v0IGwuc z3^0GsoKu$ec_MvCL-NWT8!~0&wf8prT&^@txx3!>3*1&qt8)>mb}k|;&!}G4_Jf-> zNB!)a-POxVIVBrsO8*!?k3G8WmTP6QBI)BU(sg;_{*~lm8R> zJJobTv4A~yN6tQkbPlZ}^@H30&}CmFD$n(^#6sCUUO6ZJ=MoFA%k`>^%#`N@meL2Y zP-cSXvh3fvc)0l|@2||A>TxQWS!ZYd)7hGhWLt8WcE4@UCT^?dx8)PIez!i@N;#4p z^4I-7$LphLQrZjh^;Kyh9=>hhD|y?%U4G>XUMoMDt8=3>b;556P`zcQ^-9B|MVeLb z3)qu)13cna1MKbfX0MX;D%SHQjqWNnI31mhC%xfB4bLWnBmapSJsX^S6{zF0;Uu{G zv^VZO_32vu?gX9>gR}AP)gVwW1}7(j;Zrp^Q+(f7g9%Sl!P)WTMQ`lm!`|?vIy?^s zLq7w579i73D$$-A~CKiaVdPhh8 z2q58@i9bFK07SnU9pL4p2k`m;1V#a_7Qx=rvG1P(5( z@dMVV4-L)_f9?a$FyN14#NOWFOEo&_9icFLLy%419|N#4t;Wp8M>iy4-#-Dg#%II9 zQT@q52gMGs`e!eOCuhArDG#7tP9^LC!_V!L-@WvMjv6qwzXvUvOn}?_uuL#G9h~&W zs4qaDVA&fU`Rdd^de$2bg3|-Mdv-bk{p0m2@;k0FeLg&UapL!%`T^@1NZ?s7P-DLb z)SsMK3AQqro}s)fJ3H!p2dRoq+iq`8}9|S|g~-xIPZ)pC5tD>oW(Zr+$CXoA@xf58^}Z1*1j3 zfZ8pq)WtjiMF;M{qA>Zv6ESD9>el_YC^bnvSn;RcnyzcaWU6w9xbChjN@yi zlUX{~^A0?oUBGme>2MxSW8G0^7RGp`(jt%X5xp>H9_FD6jh%?l%v#T)FvhsI3UhEW zdAihqWX5mT0XPcB8hD^*7fJNX0vWPMY^9C6e=ozkM)Z>o&Sx0@+ zQJ>QE&$@f}=>Y-Uy=l)5fzX~hNR~gVNOilHksCD&1#?p7@+8I zNq%<{X=9llb=6B4zbG=&kPvS(w6BMpi||sbB(?8<0`P^=L2=8O<1I)U!Yk@$@{AaUkt`Zslxd@LSBQ}#@1rxEn#)6&~bu7 zM2SI{32X-W6l+9w0nAYQj}O$*=-d!*H>KP~!!0=pix;HOE|1uQty0n-K)1GRr#4xT zC$mbY=U`T~$yVOtZyZg0pCXy9R=62o8ZDA-L-Ti$idS;O_43?(Vj@ zYaqZ9+}$qUSNFg7<)4~5T{Sh;)!j8uJ>935D@~}}vfVNDMI^eBOY_y`3_(6-PwE{0^Q0^rvm}1I@hiop_Pw{Ia&;VSs!s=_PgJsDy8-s- ztX?X5$R=dFdgoeeKGy(A!I>P}d)-ORPU+riN zy022(@64Y2V^l`UH>dt0s_df01PR)cQX$P^3w6CfsGKTFBd$pFNOX=ZDeTm>*WCPi zByiSe76S+m;a3cVCHxu=i{Ri-WPUSh3)1Jq8rRpz%=rdeDGPlsS`3m_G1GwI7seHl z79`|{6us`LaHjg(jaAY2OM2yMv{1heEhY(@CbuQJ)d)ln60P_MeQ&d zDcP_ilDZq72*!>;pPWn-fq)~Hrqjz$HyD>9cl)Ko-SKQQ6E8Y5Vo@{rq}syIR@1fU zG3xLK59gdZ*CB2{e&sNiV$5CYwxui$A#0tI=NWN0Sv;!gyoj$YTOW%Ix^0Fz%~t}j z=Frv(S{rLMS(_9Dn;%BRFwLk{m5BWOSR^B#t#l7#u5LiO!o)A(^m}==z8HRFjhm*V z9hKTfSZEbkg?G%llHE*rG*q6nf`Zjm$oX!pz;dQoL9$qLI zAZP(AUPr94(w>LFZhCwKi{U zBPCv2kY|R(gPQgm_v40+F?j(_^cF1D?jiyg&Lg7UsbzIn_qth(yQ2^9TU8ZZFv_upqPm*`}0RR1moGoTmh7_Ps=-~Gcc)xGaH zfH=gOONj(vyqH(A?N!8};$R$RY*nse6+*4DM8cJsNHRicbtMidL)r6Nk*IMk4s3l?q9*$ zmisE(7nY38TBF|Ga)qN$1nP9;-fyZADn5zc00tW>!`cPgBX%lD5>Bf_FALSQ`%;5_C$A8WEe!624l@Vc;)8))8kJpg(X=Iike(pbT zJZ8Zz8{D~Ub&qPzUjdhMp4k$Oz12wXQU4f+*A%ZjDr(t5@J`fO+zInPIH>;dXYZ@J z4*_U8DK$pCuRh)Gte6HVyKtIJxlF5Ubio80TZgY8@Q=uN^7XRMfw~L(NwWR|YSuoC4Kuuo&*5>u$pzdJ*O6rNHNY1w;n; zL~A>!A~Y+3oQ<5!{iB&T6wag-3xom8deidlHpV0n{i5J{)-sF~8-{|s4R#OETt)vW zs&O=wDxWh)WObm7Yuk_sbKziqbt@)sE?99bxZ9d;qnB!F5OW`5kdgF3F2pMKMD@O- zBh@k{Rt4>Pny2wd&li1+OWi&Z?z?3o`8!xXAml@^Zh5C~tZOq;Z*OuPc0`KwFCDM< z7{znU;QPMkBa)#QO75k1KHBoz@qqG{Is7;~Os6cx3J4>gJoV^1?b}(q4rwSKu~b7% z++QTuMLGSSQizhVzVL-DeKiI-&G^;(1Zeqg@LlHZ{@$#w0{+W4NM6)cn|l7zkk^+l znf7tsPW?JLyIG`L1MRP{(u@lTFmE*gR#TrWf7rbnIzOe2%;4*|opN_SU6%J%JfbPQ z-9V+O!Tf-Vg(Qj|tcxh>hbpcGGhHU!4HLW}j>Ew*JMfMeHt5!LJ2C3y(^^I(%H29# zm(2d?>j9qhFLB(E0EY6VU^*nhntmJwxDc9{Z)hd?G4e<$#xZhOEjuv^uvTWWf-AGW zkw0^}b$Z=fr!y$7@`*b3{R1O?;py4X7;}?xA7-MI9=LscF?u6{xJFC5$Mgd^;teoN z5vTzJW`i7wcHyRQcq#qHg&Yay-%OFXQwA)dij0Jj)tQ*urtFkU!}5(aSw>_D42>6& zjz>6UOG`aMybfWvxZ|*qm;Qua_Q`Q;du&C?V?d|x17f8uL-3gw!9&0;YM%B@z$AY} z)aenFhut+GYtq96{Svj>Ctyx#NaR369A1P3wI-RC1=HIVFlKIuSf)NgfHe&PFH|S8 zdu%hA@{RFqoT(c2v(@0zpU1uriKQ8dr7DS~+mG$xpKu5nTZ6(8we-=+OVfR7X4Hrs zcN&S&9*Of{tbPEekvz4A7@*H1ejKl09FzOZbjue{Tk$q{l+m#u=O;skOj50)Mm~$` zOWGdq{JgeK(;|XLj=rOcN#5^pjH;3`ixL#X!&hE58;vsEC|4R~X*j%o7+G zSQyyXR~T%U74mRdG#?K9{+WwxO<;0XX~(N>P7{YTiZLe!s(QV7VJw5dR}iS-6RHzd z+gDrr8t= zR+N(#_n@#Xn?W=pwz2F1)ytRiy;r#2R`1Znmty+a@8U;>6Tp)cZf+tZVE{a^7bDIt z6qkg70&7KxM9MCUxLzTogMnu7d(=`?l;;s#9UyVZYBM`pDphMdj7Y;iuwrZ8#IJBu zUZ7>7+Bq(xh>uv#TIw%KmiDx7_V;lzbiYKcmb2;*tNQ9F82X2qoTjT3r}Z-LcM@Ur zK#N@oCM+Sj`Fbc1-qa^^+P6+|zbI_`*5ETXp?*>os&-tlhCz*Y^2gZ7T^mG<{?0Ao2kq!97UDx@(BO*N zzQ>@L$@OfNtMM4rA13NADFg4Ef0KH{4h!%o{1d<3baz|jc|doUWg=z>EP@(7iX?16 zurP|5csO<_enQYd@ihqn{`Ot(!BUE5`Rav|2X!bwxOhP+GQb(jy!gvDw< zG&EVY9AbP&9k*=fL?-XOx8`u0fT=-|rr-Aa>15>d?V5Yn<>v|hTedL#VCj%=F&6oo z8?M-_(HmScqDg{k>5O@u+(n7UbIg@Sf(|zrjSiqmV=ciwa3hsWh)~;RrXa=KUPu$Y z2&y7dfzA2evk(b~99n3Vp+bI%zHdy{%7tqOCJ=uK!8@lhFWJf08?lp(Oj})pugunK zD@tT+rMIv*%8&MmhuH;GP2+;PyXJ@3Bjs=5e~R$snBXr@?}mv813r^jsXq9x_2`gT znV8m+YDz0tQ4hYv5; zep!Ur=mlP*N;`JUC5|Q%vtflq6W9reUj|guLg~R}u3Ff$vhH+=Yzdh5fHgD+kwxVn zKjh?ic;@YM@Iz%gm_IPY%ARL34=BRXi!fgKc5=YT=g1Mu2l(2Oq+|K(b71&#F%%uA z*b-v+Z`q=1qb2q1av)m6Dtu9c_E^~pLW*|DZRyt#Zxx-{pI|6{QGUPW*rh(>c9F=8 z{~YhW1P8rv#JbvAH(?B^Yx?ipQu27XUh$*N@^ICMuoC{3J}5pP@!Gpgkg}MBT^z^J)^+ zw4!2oDm%Gu9H+9Fhd8t03;dZD*s2D>JZ+B3a#C(F|6_#;qK9#tT2E zR{BM2dE=dQ2XUVMX5fvx6)cpimJF}(KBIjm;3sWmwSP$hl}RtW4fO4e`=L((F%3N7 zn|%7lQ(H+A%zCBCB<>AAqcAbWjc$sSSYpNnkUNP_o8D#n*@>)Uo$xpfqDTdX4@SLr z&i)FYQlq&rmS;9cBfBHF%v5GfS}VCBv5-6Keo=drIB9my@75WasW(h|Q9u?g$@up= zVcsx%4#}E%jj17kGH)EETnMQYfJ~25L=^KOF620D-l8`6VW1izv6ssQW5MC*vpVFy zG9BaDfFT{0+Bhy<&M6C?+5lgf!k?3aa8QIcJcub725-xw8K!hgG?`{{RDvT# zNbj45*t#;%0rC7(#6=qid8P3y(`Xr^Y4Fq3Kz;e%ky^_w`zFsFt)-7%kK`SXrQf0fcROr^*%|2)Tvq_yffuq(@97rn9lT|L z)tQtF=?lVXcbnPY-mDgH?#I)dK5Os`>i0Vop+JRA?z>p`C*)2)c-xF{MZN8N5};nt zp2e95W@c5-rK3wH(&XYWG4!Y=emfg?P>f)RUQs?Dm~{bH9Efg!tBnx1WjJMzsMdj7 z8JNO>4(cP54AN|-u+CnOnB%v_^IrVE@$HCBbm%`pPMrONy7C!Sp=b6CxZO{X~EY>sLOk<;ZJ07CdyC2Fa*Erl1+vkzwK)eRE!*8t~Np>tm^x-tKN=O zrm8=WR<7x*!c6?BY-I4OaY0gw;0UC6PO&NT;`3h?bRCbaKj=hy&3C;#&VodeeXblx zgS~vkzs?mA@${+Ev5ABm0Mq|}NkVH8WI#iWPR)x>ZES!*DRK7w4I-(~sTno?&S)jE z{2USe51eO(y=p)0{rN)l+HyBJ;LY~p@b3}cHhcH~xgs2u$KEHPjxCiLfK$sMN+>zp zJN(_);8&YuFzO;GXH|kwGB^q49QT7R6>#h@VeBiUq%VcYCc&@U3bcl;^aIdL!{NW| zg23O5LfD6^jyAT*sk8X?<2;@m*7VN1e;|2H;J^CCqm2?591*6^Rb$I(g6a6yp}sh~ z>XuuQX;w;~dJt0RmOJtxl`MB;3+q3c{#Exc*zyFK*$318cSxP~H)=J?Gatl>*bfNj zb;~z5dD{Wo5n^r_;KIg~70~5PF}6k0szw0FX(8=d$THqim6a@j$YEK7q&sM zLf9*}(}0Cd?e}}m`NA8A9TOmcO?<5@5dBuUP|$&Yd>83+yEHVYRS<{! zIA}aAuR+6M!)hU2z$Vsib$D_eT$Y)2#bUy;w=1Tuh3S5Ck9BI~=@d zt%~J#yjkrSK35X_&>gJjm0d`4Lv4Po+Db7iz;E9j*>U`4dwa3uPxA%S!8kB^d)2}R zxxqdVp+23ah;C(fs@(x4hlS%h-9g3F(NXpEPIY1exJ}2|N};?PN`KQ-G;dly84hww zDB6aK1IbW#bXU5Ibk&@^*BD?;aI9W3l2MxHO|hIyQTje>Fn&fJ!Vh~ zWq2}^ieT0Jwj4Pgj#Rn;xo#MKRxgD`w*s}TEm@GntXD#7Be*6Gcm+c02v0@!6 z`llgrBbHqNd#dNeF@;XKR^$l6n?;g!v$W#5Gviib;fut zqjvU=MaFTmlxN8821~1Q>zdFu+$rlw|M|A_C;y>&`TFz1YIrX(sSdsKD*ziv! zujRz&S}6e9(Kg<@k&=w{fL<&WQvs$RVAwr#=_`>9)U2-Wiptw=ux?78y47+QZ~>cN z+4tBbIVq}PTGOZVtnZdmZb}ABCjQivYWl{eTuNUFy2+4Ox=e6gi3DXiyXkLC6oGQW zxf+Dxwa=4JMORvri>Xh};i~+cn^Usn64D=j?atU3H8{@L{tC*RJ#_k%G0uC{-&=w}aI zAAiOEb}F(s^rX*9dyN_fxyP>dHLj*f)Mc2bB3feqSqHKN+DB+nTh+7gXRbj~gMxO% z@2(ntFNW&TU9DP$q}_eVe~H+;d>EGs!dN<#(gq=D8bVpGCdajoIxcZMyG?a*1b|E;g@WTdw8GpFT(J%o465oj^S zh1}1*2XnPzu~6V1^*nN&tSYikZPk6#eF^3Cb}ndiTkXGn&1hKNcw@+UmG}m%U_0qT z7FxLHHnkbFPpgTo+v8k4g11>!p?l+}YPMgCYgyrSkXW_TyPE7%my6oi(zD zFDJpQg|AJ%*$Eq}XmXSu23F?3Z-4Cv6uBz}&V=Qt(&TgvCe3;GXd0l*++2}8aLF^7 zv@0)HYyyr86spyHYjheXgMSfRn?`F}dD-QB=_o&Wa7ch;C;9!ZcN)05r)mF$e|jy~ zruhYE%eC>>pD1uMAjT!fj{zX}@rs@zFmD^+;tHdSwY}@he>L{#P0{*-A%pYvit0M= zaWl;4v`eYXy)^wBB@kd<(6#T+ZungiHot0E1EpN(qEz5eN!`(`0 z(j*6kTnvFeKX+wfW(HeZS#bs3wqFs=D7{(beK~jIIai<@+fnXTfrUaaE5^1gc(;QD zd6m{6`~Uy{)TAO~HmRiD1^|F^b#kr<5}B4(R1*SZsg^L-Vr6O#QZ_HvR+Xk=V@|NK zwe2hp(Nlw&=<*D9BwdEEv3yZEu9^HbDp7%m9L-bg@RdQvgEXAe-Ur!_lZ6%gU{pGq z9{q)H|G{WIEwRF_ux9;;SGT}X(Cl7MvMlozt5f<3LMW5OB%<6RQ3^o_t_YdPrch#R zH%wDidM4;WtMF+fMEy|lx3g`Ir@N5Lwfa~1N8wKHc*H4kiidmmc9@BV^rls`i1xuO zp&Z*$qM@an^Yr!M`ECBW?>$mlEJVdfh9LHo|2M=aR)vc!BXZrYU1Ym(u3U%Crz_tX z4S`0)E3KCB@4?zfu|K6s%B1Q6pkrzZm$hH&gy8%A9^C`?eNX{%VI3jk2vlql9I>t? z(sjVPQxB}_ND^LurtkM#-=?BWB+J{s4iCpMn;94ZlgLF|zTW`Mtr#$B)F_FPiUDH` z7_~Laf{nq}L=HrRk&+`6+5!Q^OteqVL&55kQ;g(ZB;#3mT6!V7fmYL4XVX7XDwxe%~C|b(IHXLKfAFNeN<&Ol0Se%DPzv3R=?u zv84^6i;!}^SC(#k%cr)iPno7o(-D-ZE&yC%Meu;(3TPj3o-Awh znj`doxAhi)^ck!#yKKd){MLj^F)AqJ9&g0kkx97&+Q3^IHKzr8bu4VfhA}pd-YT310{`S=pwXG?^XxM1Yf+q36e|VnXZyu(4 zF;8Ub2x;?pCjQzFXVu?h|Kon~#MT`Ytp(Erb+&0M98K5uYgRliuofg3Vsj7MA zu>E$Ep#ZYn(>AA7A+o-V$0GXzi6nR@6B)<%AKmib5r{YmoYnl-EW^6dZSSx=`2RBV zwFl09^sM(7*xm1%q?}0=MA<#B-vU((A}L6iWO4>Y9h*{7dWK|%l+9ji+ru4PL!VIU zmBT26f2h`^c1dC*V(h`g%YzeERQ0sVy}I74mHc<3uYEfsFMN9mKY5Z-XaZQLYEm`S z5>(+1?ymPSIY(49S8+7}@<83E{Br98E?5>UH*)eV$4$M)PrzDOQd?Z?lw$qFWy;^1 zYL&j<4UoS%mK4|7XwR9-<<3SviY&hAZngv12E=p&qy~o403aoyo1~TirD=emXr#HL zxQ;rz#u(j11Av+vnl;X(cN?Db;aw735^rs^n{5zYkQy7`{OdGVD>)Z=1YE{LT=u?Q z`uh6L&$sNvPWmD_J|9rRK6bAlHpqPB%4Ye>woS2|E0<0Das=;jx0UR`irn@R}*q`h>ALONS74$-re`SK7B7Cdz>Y90J^B92^LgwiX2~Z+i@X5Ay2K!_R|H_rrTK|F6YD5M!3jnpu@1ld(V5 zjD^ssFJpwp7-144gb>0AV?2AkcHW<>76d!HPD5uzr6r_v5=cPz?o;1=-A}mb|ASCM zjc)rvTA(537dv}ru=$U_X$v#^I$>r<+n^GAlEA(BpWn~6^nT8=mO}|41Q8Jt@p-LH z(|H|D8X$iKfB6Bw){gM`CgJ6Z>J%o=?@%|n^(AEY4 zDJrh&E3??taD`HwkzLcEotSO>>QkSjC*08;y3+vkJ2{`{dK(Cta4V z%r&%TGo{AG=Z^F4#s6qDx2pNff9~B4jPX6c7hi|3#s|gn*kN+~Voj+vz41-UXjjdf zg;Q@0{W29P``70cF4C_kj&fCH-6d`r-+$dma6@$ zs8z=e4}9=50R$037!gDfLo!mBR3?o{XEK;fCX2~ta+qAE0EH+*F-p(^Lo02x(}}bV zksf;Kqu&geAv0pe7-yPUmdy(5Y;$q?%sXFv^C$bxMck(3rS?*W3!$P=a)*@zmBXr$ znk-3VVFL~?&Ks&g6=lKLvx)V8^*rqtgGehHt|n|*(%Hv8Y#L6MpzW6amZo`&fw#SZ zt~|8fJUp>_yVov;UT*;N?orxqY2y5gWsy|MzY9%b2E(klEl1SV5uw#~_6}d`i{5O;QnO;ij-%$p zRdcJ<)AQDR_4^4BBt)19QDVeNkR(OAmm#Ybl%i5nR#A;hvTQ|7T|-k#TSr$<-@wqw z*woD2qSE!Pw$@l@gH5*BW`|uA_Solu!`cxXJE`ZCGtRl-k}Iyc<&JwEc;u<4s@rO! zW3*n>SM^POS3lJ+^;i9``HGjyD=j5Gc+8(X8F;bqX5gdvrhY8c4BDldd4fL!EuDb8 zo<$&oAQr(4LX=Qp!nGG6vchXLU-6JAnOUdUm5c6rro9;S;j3@H9|H?tDdaNdf0;^_ zY&mk%d=>=^3Kc0Z%QLrpcZR)>03w1cZfA_fEeWL26);v=U_q~pb2gZbE5mc$Ez5iy0iu$Hv znCRMj{;Gu)H0+9ht+8kd1?XIB+!S{5QkL*I#VkZ|Sj05^4JU7ljTOoM%MT@Xn4HKg zFOc^st8Vb}>WFVdQRdFdMSVq|i|A+hZR69xgdgs^ zeJ)7Zalc83Y2(18_@3Xdhq~1xN8|3CM}vvCw`U|)ia5jmQ8KN z%B`%mh*xjM3efgotSqu%SG(U`5Zy!=%mw_^FRVsSCR~jcY@KOx!?!gqF$;d4x@h*a zn7_+Y?a?U%j#wTcZ)&dVJu z!=BU}<69Sk)_2BppQox4C%#NndPiR_rzRw|aePg4WUQ2~-Yry=I5WFxzy7)ZWr-P) zNed>}V(^jn5L{c2aEE)HNu>jd`{Yc8eFjJ>64e_`-kd$SmNgkxVuGVc-f8JLs;)j; z*Sl}YQpkmm<_1Phwc zn41DSiy)L3>Y zo)3@|{%ha=Qh5x7ntcYNWk82QVXIK08`f-776Ds`tmftsV2ex!#9~hf-a$>S-DeaD zrUuS{93-*~(0;tX9R8pjcw0VR7Q8uFq61T}zf0`wr1ZpccRq6fqa}XFhxCr9*to~2 z%OV%GC_yI6*#H}Yt*WVA@GOU6kz_?LTd7Ea$;u5xlO#uzm4!^V5UWyRTU211YirO` z>!3?7R)!}NS?xm7V?+vTMTU2=deX%k6v~^wTN%SvL{F4SBS`B3EJ?G_Tu%d6hjVvA z;bd===p;vxbmcHhzn#Q4J^$S@$;)eI3^!sAC8~-GIqL`)ZPf1@DZpxMSy8bDGM{7V zO+AAoVnl%kU@kyNbJfV*r4?uIqsYv`QZ<*naMLV3L?_o{cymZWLKtBATxe_{trsZ{4o{Jmc0x#5{B>2wjw@mkw)*MVwpB08iV> zqWbl&%m*+zG;%WH@azpL9jB==cjYmpE>(kJuD$Sse?8T@I7DF1nt#KL%*3CAM-L^` zP5~o_2oQ@|358c~u>G64?97G`6b?BMvlKi8%~z&J*&Ocw@puKwKR zE$nE`&c_}gT|-tG(#p(C`|mGLjt4zyii1mQ_&T6|6Z|0_W-_&!oG-28>0c9@?DaO> zUxtb1rHk)?!+LfalgYSfn}&$t?#-M>R#z1*EU#>3n2i|x#I=0 zWpto~(C$s7@(~D16@muCbe1N4)Kceif?%V-ZjG#1Ub;?^6YoOHeSew<0~V7QF5)C8 z%}iBKU3G)sL$9&1j}&SXD5{g8PQX98TpnkoEzEYn==%>Z#eU*IaS$3@Z8Y2?{9QR7 z0r2&;_CM$l!{!Nq)VVTO>waJ_?vVEK)zMvER{MVc`pz?6-YUAi{N|fi!U|#llyz#} z4dU_y%_8&RP`G3i-1yOpWYXDtc?<@Hn7qL%;@d6622j?!NXFB}LfS_XhkiUHObCAj z7lR*tkV*?$yiQ)Z{gb>z>Es1U5`s{kP_Dl~VH2(8Yj|)FMSwQPRMQT@n_L&3%V~rH zAv^pB!$D9?x}p%)ln_3d?M8&Uikb(QaPa)Tf2+(^Hq1_+|u9xWxi^nAc$26)@*bU!~Yn09>j5 zNl~-{;BKk0hk!}|$S21nfC?kkK=SJVa8ceMK#5Tg@LBWa=bEpLWX#OdN8=Y?wMV5; zIjh`MUMj7sy=s7Jv#XEi-~Y(}R{&I&N7wQDLbXF>uTl<&11I+a{}K^zdZqAJ^4CvK z&OAB(SCx(Bw7ZXeq*zH`@}-Fx>P*njZQkz+@XpFDA9 z^6WW-;-L!7v+|L`ocXeByFlIxm;xj2Q-7GpK8*DrrVkkp0OKBcBz-cw_V|fgx9>l= zb9di5_7R%DYkKh#v%YzXtfw-I3rfq%OR8&7_0p0>IQ8))z-$1RYPebnMYEr1C7Q^> zE+X{TQ%sn{CkVcgrwEG<5fD5Q5>derNd$(*RWHLYwBZ&W;lQ-14KIReu<{5-Ck=*= zSunM^ftY7D+U3V~*3uX7^6G{UuIU%R^AXY`)22^ZsYeLw!wo_(X{7L1&H9LFxE@l; zQ|52ZlNBI`n#G)dE0jdkGw45NOH(Ge9*F8}ddp~t>DwKAiE<-d-nLDEp_quj#U;Z* z|Aa0TX>F&Eq^7e3w0H3bL(jrIlc_pWDRM)XzPmMF z(LC@`Fab(>k$HOBsubxipHSzfT~g50FKgmlzofFB!~2wrgC@-e@v8FBioVN&IOy)? zBaaGfNc2TpI&&uKF>TZy0#n^01x~cSPcUunFk(?z_Z?0=S_;~W=KeN1epREwS74t- z9~1~b5cm!s!fOD?=RlqYhc^Hp|9L5(f?jV8WrKS2X{?W@8#R%v%DwSI0x(gV~6|943>ChOT28D;ka?KDj}c zyz{t#D8X{J3DixXvCI%$+#WY z6boKDt>skXz)&Nj$}18V)QTs}XaR)zChw8^Di2&Hl3m2uy~>oqn>3n0IqlENd4I=a zdBUec=!XA0&t(`t)JBFnYn6j24z*=m(V1sli^RErYJ;+dD%A5%@mis|<`{8Arv~w& zg-Rp?q#fD-(;;##MWa~}$5IAH7Eu=OHpQIljMTYgh*=ckiIf5d;`J!_$Z?~qte{46 zxe6G)m13C*kqMmf!!LzIR{+b_Pm73+Z?MlPD>9!n5=)tJo#MB<$5Sa|>{KjeB6=f) zl)PYOLLAlFna_f=lbZ7_cR0f;!5ft=^FgSc+2&f87|p2>`iiLQ^~QGQw%S=?@p91s zF@jharhj$8B0z>*gHgqE;s{ozkme(Q==O%jpzIq<3=65f(#5Z*CE5=>fgPZG@ZruiM}u?;Kpf6=3*ADbsf`-!oSz%JmtA+jz|Q<%_fKtN8+AmT z5mBq`X~hgPE%p$?I zOba7&Za7pyyq|T5xVG9z22Cv`w+r!`{X20ZLr| zu*JFM(8R8AYdaah^aE z^33>=$EC=ENakY13n}DfAXTd}tCs^DrH*LxH@G zi|};80#6h~tib2@~c#mqqK-0X6$h-HVb< z?>*1df3ef2r}s?T_D$~(xE?gYBQwoM#(KT>e}MHzGN*rW`|*>yM@{W7AJjj7s-J`D zq(K!FT4Nm(p3U;g5O7n}=MQ@zI@z&e#Zm=m18}yG zwc(7usxT^%`{qHGnx&mKZ7PS(IDMJKPzL`05iqo!&W6v!*|W=tKZEE;t=qi$;QEW( z+@{9_w$t&%e?`DU)c1b)yz!AVZ#_hwwyn;qf)&FG3k@-X$NPHK9cI}_WzBgCb~dI} z^V_I+h^RPYZb}|+CreolY3*<0D2o+!=6l^llF8%tW9m7uk|0PRal8UAF^lZ8IsdIPm#wjr7<(W&y{s|W=Xr(FNQ4Q z5?FVgxMpZZ`)ti|wC5_kW@M!eT6?^8tMsZ2?MO@ARaxoA=@nI##t+RRB#G%_2)$VW zC`{{eVRAFcjjWy@^)EPlJ`(UH9Z{qv#I{^LK)tp|zI56V5rMO_5rl>{b`GUP;Zj*{ zHO{$DqPs7B`lTchp%gooPgj5abr(&>oTpF}u#;W&j04>$=yB#;It21(P8Rpc zOFdXxP-W+zCW+q;A^7rv2iD2GW{(Hi3UEHIo++Xv=d*Z{iCxWlfHPgl(<1z6O|Nab zYEB@*ggl01l(raZm9y+2eZ5*sKAt*LiMhMzB%p5lUR^!oNlrgpMzQZ{IMgVBrmB9G zoz+?p+XWgzROy?lUIOxtp>!dx^Sy>@hD&+{7B96?lfO*JML27I4xrO_6yAy$j z@m$Hd3S3ff^#O@_xx$lGn)v18NwMzUd*JDdT?RuB!_9}EwCowYu=;ipjzURJm9;8x zspLeOY$LSr@cHigF1Ixue)?wjrVFd?mSDAwB@*SWax9sgK#_G%fc)~W9P0L=L~!+- zVS^WBpTw4TW(YD`T2tv}8S$xteT#9^H{BhSnv(l6L-TO0dg9?8pIZbqpIT*DRizp0 zX0yC9bN2w@CJGTk@e^baqG_bVctfeE)_7D)T_Q@n;bUyB`{q z5Bz`k6caIPP`%dyy6paMN&pK9r}{t%XrzZVEkA>uS<@jy#a8eoT25hnj~k-+XVG6b z*G-&k1z(_+b2%C%pO>SGvFZ;dz%4)iXA%R*6wmoM^a6iE+qi=!%O5Ao8zc8+lWYe9 z#^d`0O?lF81TSwqRbCgh*IoEw11@_2{>1eH>O*IoNEO>3aMY&DWHXxXZCX$X_BV+T z+&ROt09wt~x8vf@_DShg*-7X`mJ>0EAg2%;^qGloCM)hSHU?JEiA@5G|2k5By1ZIl zl}?Sdt9A7N9O9;KVo<3(kB9n{onlif$wEt_el(@d%8A}sF_q<7K9zqI7s{zLS56fc z(uFKl${ndBRlATx|G>}`0BrigY2~(x-DQLW@2zosb z7h)Cf_v2}V4Ae)MDT@@{1p-Z}l}b?Qtaa|xa)Imgxed>~ek`L~QrH6l4K%u5JYxrV zp8e%NM%;;<=v&o~F7AcZT$Cq&Jck|n&Cco6(Y*J}mU`_!PxWnu`$?L*n4J#DEMW;P zUAZxn&Et^@a4`E+>E#g2&#pd81@_NF8l1dsAf|`{E>!`Wo6Ox-S$*tU&njl0PcxC# znD!A&nJ(KRWo&B97>yJnb#^t8S*b}{?=ofqi^yM%V2t|`3FWmNH}`8-26SmRbsmQ?XKhE{iWzE?KN7hEPJLjq6YqGRhys)bV_GAvae)lF9>Agcbi>*1t`|BFF;Q6X{o;xe7HU+hA_|(wM zH`9X&;vjH{3gj>pE@7#MUmk5K{%~%~|9fZl+pfKIt=OTjQ?{~ya(P^tZheR|r7NoI zVZYgnGU-(sB^{A+5KSB;@bZRl4~K9K$|G!NCU=|ngEl_aXxQ_xP@DDm-@PMBA&khw zly!|)28w-D8-z&z9GsK*q@3W_nvF_E^lRwC%Y-!HPw@6 zn_Ou7a*Vd0`~LB9|Ec~_K#{b~O-_^nb0CYqp+>*`m(+To=63z)t^l#1rf;AYZ^Uz% z=>hAI|J0p3q5o;FeEE==;yKGap0@{|TNOJlH8QU4ArPePb_J#{HkkG2>c z8GPs87guxVa1oGq12YP42o70Wrmbu(i-#KKKs(W`lLFuO&M+{}*lxGspTPRf?y}vt zgpNqCsIPZ7+ByIz+iv-F*TGO5GXpf%Y4edJ7N3MxW%Rp9oHe=~N{NsPZ ze>aUH9_nm-lhkttfG;j6+^nmu5z)1o`8j3jkj;p9F8x^keMfdl7&5XA@Rn-2n+6wc zx1^rJUMAk#x>ygL;Dp*Vv^GClxjnY+^xeAP>9=XS4t0T;Q*-1$3#8xrp(iF%W=Uy=fgOpEJ7 zP(6y0Gue@O*lYgt-Ssnk9DeF`8t*aFSB7U3U7IZ)A zj#nwv+}`J5REgU<7zk1@YG4|^@7;0(plJB5-mG*h*sMRqMr1X4Q~B;d!{09lpym#t z2|D*(x&`r5hvyVz=UQF(Qh$I<)#e12gij`qH&9+Uh!6)VhbnaYw%ib-TjTpN63Q0Du#^@a6AoU=2b z`3S#ZA*oZh&Tc*dn}n~<*Vt2nSS`#V-Jx3ps|=Qp;o_Xl7S1_D1Pp(;I+MzA3fTXV zyjeb4JH^%27n`cpUE6+kTLG}m_dbEyz-{VsTJac!}t7zW)r z@ncJvUEhOFbXKjH(Yyy1m}DTgp={q-z>?+etTBJKzG_nd1B>vhK$r<^fLr6QrJYHt zCb26wg?L88t!wWtj@(vR7_R_>uvXFQ5*TEHp*xASDw8Ftz=O^?S`L9w*|-u3ySZ(` z<0nM)x{jWr(6KdziAwOGQ(mc9pe$3}gpU_9XFB_D_pZ2W`s2c2)33H~^gi!f_?awW zE=yHC+nL4Bqmc`WqBda>J8cyVYyddR#a^C?PV1$b1$k={xKjbAfHI>4nBf~}&W>C+h3$FCNnQczTkYXhK?zoqG~Az6`c{e18tJeO@j z`o@Cxxt-^8Ucz``sKra#?O~0py}pQ@wud*>X*=ilTps%lCZ(PQUOfPTK8%W-p8+n4 z;_>|s;bn->9>0YWZ(Zx5^)>#V*qT2uPa{B7tvE>m-Ve?zO(!ucDic8U^{4>PZHcG} zF|%=f>n&!qA%&<)R8f4`rwOY~&b2qrly1)q4#7XiV%%kC!Xxq^d7<&j&GU`7@#TJh z`>~p5AHBs_VBTW_uF7=tvwXi`&KE7!V)SfBavije;`Q<{e^dJSg`B3g(1g_|yaBs) zzbI}eJOHQXx(YGbn>l2gj1{WHu`hg(wMoOE3z>%PR_%6@KnlVa`d*=yHAF44!3>zeYRIrC z!>5L$zVh;-2I!qSEhK=AM6cdd*)NGv`k_Jye$I~Z|3R;WZ(Y})2~+x`!U+B@UjC^b zT$xSx=;uBrch;J5jxjWz!=hkK2A!r6(?6bY2z6wz3t&>aA_7RB?clu;JEOuiuZXbu zb3nJjjd0J`JxRmiS;FtdL+te7qYrd^UK7TjT8hG$FqCL@KErRi#JZeg%iLC|W+~`H z>I#jLMxv@u0JL`VIYpa!K%CW|#y?L>uOq3knh$?>Ct$<|BW9S&*oIt|xjrVtiU_ez zy__8yl1L%BE2k08HKlv?1ct$e!_KyS_(n@>9b0LPsWG~1)?ZswgY%PSk^?F#dV2SA z{dnXoA8bo=e_vzcwOxtcFL2M=iq+_xM9Wm{=q?OLM8LB}7%V#-PvE52c!F)(HLi^= z06qI7nNvQsdP;Jg{K-bIc5Pe*XxG5O$r(y4ci(TjA8fSI`*rXgEqr)hHx`WX4RH8I z^!T%(o8TscH3ZWmayB#_OE12Ky5?LCKPs+WyXW6Dw8r_`wDTG` z6@N|hr-kE|pGZzTU6yqEWODh*l%&bB#L0>u2cJKju4suN&9b9poGi53;wlVaG&zbpsk;2_O#ytiha}K#lDs$MaD=V&MTCI|s zll!L1`>k(^k9XfZiwlxPE+)Y3|DyhnX?Z6+)c}qBL65b%&IcHO`XM}GK)49}*lqAB zCE5mJbj8;qier!1RjSVJI{;5`lonKu0B!}>yUx&bN;)zVtVSrAt4e)zQQVL#i^ zgFVmg4z};v)!ILIb939yUHSTqlEN%?_kc=LS|FLNdTF7+Ps3S*xD>$*mgi4T*5~(M z;Pul>ZUnL3H(V|lq9lS74ms-MO*) z4ROQY(%ua&xk2stI;Yxri4f7@D*-+iR{8LX5QPC@aRa=`gcZFxPGlK*&#f;!Wp zZZ-YqK9Pst)X0o|3O^Xz7M|5#!){iHM|9n$Egc~lE+o%YO6P-I?T&f8>N{z0G<%I= z-RdWMXu_W#!?QZ(Q? z3ugQUm86E5BRA(g%X&&*x(?<0KubuG+UxPl>E4A+Yy5qVECFU!pECT1E!9J#qq|6f z5xWM}VPWk(+EB)5$Vkd)U=dmurG`iB z#>XJgNN9Bd3x`6+$01Q+QF`$s!yKT^k68znm~ENhd}-FPeB14E{>b^^C-lBmhFd1M zUYd0(-*#mwcjWBjBLy&yT>&+=NIbyJ&__?`3h41Aq>^o!*RNMIwjhRQpO^OL$%EMZ|vXtk`F*5k8B{1#@n<%+Ph-^^jV75mQx! z>t2d|(QN zf+dJq))dR`*5KwNcWoHg&8<~B(>M`HNOdbaweB2)Hk&$|Q(e)(^PU?n^8xU}_&&(vUjc### z^%p=UO?3GdWEePFd%C^4WNT@T_C#AXu_BV8sw5|+p>RYs8;j&8<0&e}anL6Up<<(G z$#>i$PDn{3Ymj>a(eAGQraE5>k}niW>Lf=_nvf3d-z<~2*!;ibqdvSi-85f$MYgVL z({N-@Bq}aimy!Fm*P6|E-7UjXB1gqEx&U1^zfCW87ZH{@Cqk9K=r#& zuAn=sRwZq`L5a16@Z!~urENt5PWzypnzD~=B z^_Ov`o9lKEIfJ77C{zb;?Lvy*_-F`*ibiX_UpOx&`|Q{ie#FiAL}`fip=~!r{~7mW z$W%0>!RR4~ahYdqu{^9;ot3!pwP7pAki!sk(RgB$qD5I**uO+^RwE9w&y1SElEk){ zc?Vd%91k^it^|{1BsMu$%)kgcE6$wVE|bQ}#I>y!8NT}mFT3}ar;PD!z1XTOq!{>d zIFD6fgijar9EvB11RK9@T`cJ1dnodabD~Wt9-1Zk zFq+RBa}9(g`s69sD*6HkNc3s!4q7|d!`#JMu{Q+zyss=1*xd|8>~x&^ja0~tY9_^z zhyHqTaV47a&cb?l-7zwjcdRT)Nh*<{E58nc@m#sSJ}ceFpcHOX=kCIOTPP6Wzjy;` zbR%*p0Sdfx^4?s&xFZ86`?dq5z2B>D%0=kL&B`z9bRD{xQ}^mei^B@pbfW^hF({Pk zTn6uSx;EYHY!Hu8VD!h9eCC9_;$CpmLF3W$wQbXG_Ko^<+SLp;y9v)M69sPuObh&c zCCgXsh4LDMqi9}VeiQh?W|ne_5WNL8N3wAeO&Ao9MtND&3Ph}|TD=?aa-g3*GcTB) zm(I?rZkN-U-NxuoAcN%QR`&E0Es}+VQ+%KV3^G(_32Ek8kLiUqWziTeuRIY)TGIxX zVbKR_ZW4p1aB&7|j(uI9KRA59fP3xPbyjHDH2c#>_DonfYkKALb#_?jbv1sC(7|eZ+OGi$OuAz?h3McijqP92{+wo&- z(ra)`tjp<-^t#`281Invx5@WkO>3h%sV4G^qagNnbim!Iv)UP!{N(K@*e?^TjO-lR zh)?6;6Rh44a9#~Vl!SOvwZqB~?O3dxqZQ9Y;8FJ)7{&h+XxEEzcz=NG>1Rw`{`rOK z|DPf869iD1Kw6BMtN%v^rk^w8EL26F%2S1H78oUw`ls+YLs)!FPQJB<@cbWyYoDVQ zdOGhXQVR2o;9T9q7^h0LeC%lUgu!6Dm6u7=>usJioL{`x=Tg=YCa7t#lt=8JhN>4J_ww5#WRW8Ur+Dc3 z6iMVs#iLNa?|EP_a3+Mp=7vN^S2i}+e!1H`$uS@RknQr~3OJAh14EnJwX_=ITCdOn zv!LZ%5A__i+Z|Qmq34sOwY|>x28^HQ%gud8&7TE%Z4{319HU;OQx9STL9t{NOVk18 zYe<10KasuPh6PoyYKl`8!9uki6G=k=o{U-0gCe%f5T^=|0s8DW0kDfy9O@3(@F@hu-2*DS%#v(#4hx+WX`N`HA`R_?~lABuc${ zQH3f*V|93yG+;laAcps?sj$LI^IUKUq6GO<8`HCKPEos=2f+rrFkK0d3;f%Xp_4B9-x?MB)3dalGU ziMBv*cV?|G1+rBv3SxVT3nux>s&(8To;Pqw5H;UxYBKH%mRlAEEI zjwdY=IZ_fB?rf7pj$%lVYb2DB^<%`w`fNj4T zW;_-y45dmPn>bdvlaY3+w}4U}aCOb*XqrhHxmO$Vd>+C=6bY3~W_vm=o4b1XDr5}K z+C$nPN>tNzHT1ZhMMH-gBw!<~mh<65(o|hn^2_Wrr-b~)y>(wR=1>72C8}u4T9cDB zU-(m1KIvd%7b^}xuH=tA!J|2w*hqDj&koVCcNM%(Zu1*=#k_Uz^>5x$x-(G_7mCoH zm@kOfy`BonWq_oj$0_E}Q zotn(f2?!8f%Or&-Q^^$qWE(^l5oxd;%+9tF7{W>b>SZ&}=GzECN$0VeIbvizfb@I!JHvsaNYFR*H(7PPU9SnRX+ac7^|k? zf}rRYpke)KP!1{p`&rhq^JLl%Jb^@7&1NKwqdG*W3S@-dM2SPC-(ahYfwuj=T{5_n z)0M?RGN>xjfoGI~RzW*o1QfU=Di{p$u_Pnv*hw*&U~;nYE;$Tv@)PsRC3O%b1nIoi zQxh$_k?=*30L=o;anpLh2wvYDk|gX>e=kRxf8zvO@nVYjvBvq@9eVb(rriV~111?K z%b)p`D_|fW)p4s^kD$jv*xJO7hm>|g5-nh7hsP&y&f6!!H}9B_NuGPy-)0lxDn}3@#TP z5`dy@$3C?aC@>&C?nlJfEJUHLsH=`%M{x{Hkpoa=5wPf#e>WolD1+g$h~eCDrslCC z#{U1hb75scD6gwW_4nxuCkqAx8f=T-*Ld1Ra#N3t%}7RNMVj8UZFYbg0oB5+CIBmP z&U7e3TL2i%gT}Qqw-bS7sH2=frW#LqH~n#yBQAzelbr9Yt|%=?s7elNI?wYL^e_m( z;!|vjRBk;R@EDDxF$A`a)`}ZK;i1$kDR+NlEhFNNvlOmI;W1oD)Jh(|8DPt=$SjzS ziFkP!#W}#LTZauSyG91khyYGNCdo6+ATYwTFHGc&Xpy5CFDs)_Pg88kP;`nNoG{}A zJ1QdW$?s-AMnzH!xbPqfhPU8Npy5*kQYr&W^Yz3OmkfgQb-_{k8W=P+5#TVe1srA) zV|s-A%7=x|Gy&#;#(`E`u_llb)HJFo<+yX-QuP9Ej$3kOmojekQGELl(wqz5Cu#O* z;Rt9#CGSs~GS~1iijRefF0h9*?HM-js?T8h1EZUz6kxw&saoXFbn3-sG>k znEqt|>Tp2ID`B?2NP|Y~gZmg9CHAOun|B zqoX(BkHW0?R{av(wlDz()hMXDH70zifd(&&HXK_sp4r<};AG=pyUhTBb_ECWx;$$( z`+;6&ItnHG^&q2-6!pkM))75T8efQ`oUlbU(vzpG2m)?y3ZkeHF|#D_CTw=@sn)yT zJ7fU0H`hFs(bR7N<#hVS=EnL9sP$NaR}n!`bbykQi#79GU6uipWC;UgVv}PEl`@wN zpheM*NLE+8zL<&9uLku*Ch$f@ko9_bY8WJ1(HET<2%%;f#4)#=HnAq&ESR2?4LI;W z0^4MobPGTCoi0b|LH>n`Egs(u3g2xy`r%O=@FNJAZP7pbTd&P^vkPB3=Lds5E%FFd7ZHV5 z_okNUin!PzC@9oI?J`+IMvn*0DrQq|#{F8ACmy{ftTaRIc+)J59{&N2gr5uE<@$KG z=?{APC10TEF4@3J!&b{e)Fbq(O*t@e%Z;ZA+wvWzce@b_y%^JRLi7^6N~qN#EkR}? z`%Y*aTo-{wBBzRW)Db0;vG07iH?^3o?OLna87M)!lVcBAXF4)&Lq#D|+b5ZQJ`^-c z#W|W3Rf;Nx2G)09Za}Ox{}p_cAP!9nNuypuK+6K31=y?vV?{uPcE*)O3?Sn|M#%Ad za}2yBo11^H6Am`Hu~NDW2%Bp7o)`#~W}o8CJ{yRt#_x)`$3~SCj@X?(M2&D)Pj0$e zZ2>#1ZYL2O$3?e6;DDj=Un&A&rgujShJ^F{70r6$Y4#DXZ8VW!a&PvM$qpRpqJG${ zE#hR@Y*RPWn$Cj;CP*cK4R-7?8>Z-QwDet_aWWF@-23#o;~gu^y77>8ra?pgIGh?nImgz*3FpvYZkMkQ*9j=^?s5~j55aNbgOqP-y1Gu>#&)_WZWPN*qsGRry&AKN^o!z_kqk8Cd^3el_c2#LL0pRcJ^7 zVH^Mp%mtBXT(2tDm=rF1>^1?6LuQ%niuBk^j~K6+_WR8~8hp%ZaZkGQki`+GMeb=E z62*9inq!ifM3JF}J4H^caxz!FhdH-)a9G^b76ej=pMb;=*Pg+@73OKvk_LU+To)m? z)>R>aE%57fvWuPrPAwJFGP9(=0U2ZXy z9c{S2nb*Lo7tUQGs?4Uu@ghFfFq~X1{7425PCC{89q3I!& z_S9-0lTR@)@e#h8nNz*lg1JAMKrfG0#!c(HLYSnSBG)bChNfV+RF!(|_D9=Obt;p%IcqXQ%Df-kr zyf=EG-^;oyT%vvvK9OP%e|LI7#iFTq1h)MF$dx|#`S7Qqm!tJYiR`5d>MS~VwQCQx zp`^LRJ>1qGf7?BDkBKLprlb;aQQDNoFgWtmb+e|#3>bIdxY-njB8syV!Gzi8hLli2#&~0V*Gn zD9DhJBIEN4WoQa(_6!|ph_2y;J6a69)+d#8d~GrNoZ)iAW!u{ zVpG|&HasnJ`eO66WYOh>P3eGIjiQBnqYusHy0G6*EhMmR$C79xCGF4zTYis=&fdaw zHnTD4+yq4&e{6F~G8enoM0yZOU(6F4Gzty+j8-%tbX^b93<~-t3Vk#~4RDsK(o+ag z4C@WXb6`Zh7l_s4sbj}nG)*DYcrh~vVIxD5#cVv2EStrJpgLe`_M;N$WB?-wBTaG^#S@QrL)2Q0%Qn{gTM49?;~sDc;k#&6BlwO1z7(khx3+dWT+h|d8J?gMTUJYS zB^Y)*ACrR67n-ogc588~o;WT;irA9oSs}U3_YYp0v^gWp>8#yb76Q0SFRL;|Ktasy zf4cI%t321DT;^gBcGwF7Gm*_E79nJtuGLWLk!3N*N$XTk7r|l7t;_u4FHqct_aT?H zx~o|RIw`uPYJvpTIjn5S*F~02c|j?zW0w_I%m(B7&f zcQ>Kc(csg4x5Vq6qM?aS!b;K1IX)T{MXZy|_0jz3#yU>6OvPHkJ?SR+wW_4^k&%0m z?mxrb$n|jk`f{G%1P5cerV9^-;?qKOl>E4q(LZK+%%WUD7x^yNDX##yeT0+n8g56= z2Ve9XM1qR>{?NAz{na8xSg-Tq<;14t?caP`DL$)~6PZikdU%?&;TUj)Ihg~IaxF)) zoXFT!jFZ`m8?CP&?m(Y?nN=aK_P6vox<6Wgi#B?k5Nu>Xkz>| zR)WG<61R$BD6PPen&3()b91H|t`I{Dz*?fRmAgiM8m4-KAIG{d5@b056W|hOMNq6c zfo|9~r`S*hjmGyze6OevbnC@>x@#)Y%{-K{VIYvg4KCdyZ&#(o=9JODb|ToA#VgIe zpCQh1!qhq$R_UDMhL@2_z#_GyT$)Lm-ca5a<0GSz`H;7-cT4emE=svr}q z8xD~zq%x{baWBWJs5`NAWQ`rzyDAK~f#d@c(DqY@8=iE=6C*YD@Fu!D;ul9Ai6u9X zI$o2`3L&ssGIbppXIx6Ut)#(Og`gml?@5%100HSC-7fJ%yW@WjvASI8kP6T#8lWZO z%5i(pW_vBduLPR(aY&&lWH-VyuD|o%2=re4@DH?*U>>*V2#@{kUn+9>NPzEROY?RI z`IucJQ7Q$HOYD5tE%koC!)I?vvC|zaYgiFoS3-UxPCe_ezrh(Up(Bi9fy#t9Q+ns1 zizf~M4k=|wxCc*tz5^JEa(J)<E_|_h; zl``ZaHCydL{+mMDuu-H|BaPny^rz|gUc zhKI6}SYE0IRIr(rE{dBY8xI-Rm7+q`J2dmHrJ;ZP!n8xNc=~~Xcg?k7N>Lmf=?Cwl#eB2Ge-0lg& zxhT8H`4A2wvu)sNMr7Pw{E#27hR@ls8bG|WYI9i<@ ze;!rGh)zh{ErZkf*I|BC(=~h%FX%!=mfqpmjxKg3Fz!nl$V*edz; z6t3v`BxkwtI9xG<?QC5N0@R;>L zX;?rU5L&eQU?{2c+Yw1d7MAFi3qQBTNw#R$?R;kOrCEY8U)@8l_;V|*Q~h)@Uv^*E zz;l_mV$>DzVHT!eq{IjJ8eH(9^hvqooHT{o&|T*t0x$ycN6Cn4v|Y7yA|JNax^X|95vhLPD-J=x3<6ZCWpA&}QOD zmN77y4Wm-CUW&8PWZ5`kw(4T_5Wt?`*UQ5AuuY@b4=vhwsWR5f7>Y^pi~8F2hXrEs z0b4D5U3caQp9vh4C_x*%wp8O^jesM;n7jlbai<7axhY`k1PKOd>SR(pSat!J5(suo z+3etR6JI?PNTk?;lOUw-RDfyl1kTu^#OV!tW#Jojl@$?O@M-8V66l6$9mYz^m{}cW z1QX;n&vy5~pO$*6(q+{Az;QyW@g#Tl_};wqaD>`V8nb~?t8%!c*MvkfH?FAop;%Oz z+7`(&j*Z82C1wJ*kt2UHS2A&_wR`Jvk9ZV9FxL{%Iu6)Qw8U+t)wy9dgXLKOL6A+# z!0@Kofrbf(N?M-%^zmj;$w}U9_b9opJv%cZjSTB3FiH19=E&7lDVi#2bJeK*0v;YpgbXu$W|uby$yPye$_cG5+rH; z(@rw9(X&Z`y2{uYok{JC9!cW|wol=dzEN(2@$Z3KV!^kCVTUhqRDHk6tloXGioB** z;QcbZ*J*mJm-U4TNrs!n&UdyFJ9i?U z6q!J;EcAwTgjnk_Hues=cJvt{OL{ZF$fA+G3A3lMwL7{~gpoc$yHJnp*1(dYJ0j$d zU-|7-9oa-JX1CK>+-%kp=R~B z@`MY{DN%zK@AVe(P7d?2POD>~dwd$Vyb)iz!w*y1U|Ew{ zr*{yD1hSX)D9chhuJ1kI&LSR0|40d9D!3*KeO{q}R0Y=sj?#4FSagtc1^Ffo$=_Jm zYp;T{5dvh#WnszuJU1h|;~Kq(836Em>I~H7X^^w8C=5qO<6C<;au;vO8!R!yL|Id* zpg`V+$i!i|v~zMs1*x%rkhfYpNzzF>wPODaRX`Am5jwZmS=iU~59#?~JPfw1O%~Q- zp9SQ~jM2)tjwZL#)WdKYJggdO4H2zPAJmqT3Z$MW<-x0qZli#lcsdpQn*~W2Frbn8 z@MCpFJ1kh)Xt>k1AYQ=UZntYbSx$da6XIh}a zjDhrE0b(ZT)db2yv4Vx`v@Cr1(mHxYe$mDx%?Yj3C&Z8&)TdvFLnbh)-!Wx#A_({K zMHj@?+}Q@7S?ah>4ey8}24+4h$PJzf0t`k|JrsBib=tm9PY}*B-wO3S=aNR}+P@w~ zqxC4Qs|9f+gHlCfKJwlaTiAX^jP-fm`?ijj;~rb0rZCH<$i(>9?l>9z($6F%LVfGMam)3NCOe;vE4SYHo(jxvV+?J;?X(hzTJA%`1zhU|>&Roibf;UWMU$^5NwGa| zva#ERb>AkVtZz;#j9MqEb8xq(a9-S&TeM8|;WU%LGRXA|p_KkOAwe;Z@FpdgmB}}C zhy3{zE<}7N!>gz^!5CzmH%c0(=18eFZz^in{=gGLqU}-g_1MGrf4#lPZ*02j!FxRd0r@vf zd8;}Dxgwuk>WZycV54nu0()(DW#-SX>Tprld-Upp#l80Knxve}e%EUiY08t?s8;2v zN3RKU%_7H5@&~a5FPZSx7utJ6=`)xa##NKB>`g5K8GBwDMX zq)^31h+qqIG|SBVb%XR1B=sZc-K8pg%5>UGJu#kaAr5k&F85v5F8bNlY6+X;*JvT$ zp;7pPhUf9=)_a#+bElK&{^%5rfi92tbbC6 zsQbCTVdCFtI}r|h4^@Ls~b^im3QwsV5IS#DG9omyqbk}IfHti`~giz%|7F7 zcBxcziW%O9ugT>!WFHE$)@g73=e2^_)rJzqVeaWnO(9{l7+pWwhU!aOY^`hW48%=f zhKstCcDBabwEK1qnnv4$RDJL+3C!wH>e5=i3^lwoEPaO{g@oDiyK3!fkZo0+tBRTZ zZ0#bZ=bF?F@>84I6z@49S1GI|Qo9K=8d^~#?Yd{mSEJhEm3G$Tf#o1h6m2*=)-j?Z z+=kBx%tM?QXuk5HG$~(=@nT`Tg&I#wB_|}fAnR&DIp_!-rQnvxL!2n2t|YEuRjuE0 zavhP=v$RS}BeNzzEv3OWb?)cfdq9pG8xJUY4PX$|fK1QP!Gc#mo|I~JVEHaHzr)pm zCAGDZQ(9`dZ!1#Yb79>v=KH|hEMWCOeKi2{In7`rtxDmti&SbkAIlFLA=}ok0(r%x zxqVndF@2$LzktO3+G5c=CSEU?M>`SCUZO{EU?CEq3QJx?3U;rAyFVZb9u1 z1^&azLXVBv$Jx?W3Dgd1_!@%;&J=|ec+PFbIt5<%@O_vB+!3Lpx?pGMH}=ap+f$F_*3yTN0$E_T#S!VX%W{T8eR-)AmexH2>6NLG4(ke9tWIN$jJtPFR0 z!PIq!?nmK2-WQ9_?0Pzv+Y;(S$JdMpx5^cstt~Kx7tdn~?j$-5yD`w|Atw@(@y@yv?o8TpBcfAIgBEdU(GfHYEJjDHi*!S|nvqWD~(Hmf`V)#9| zG~umCb;G(^ESAJ~{sflFBuhSJ{cGVMu=!a5f!Hb4$(bGY1VQ6)+<6(a6>;+sf)WS` z={zN#2%D>yEd5>Me;Mb=n6ejRGJedaL^TspfHi;dX}E~ZU;P^LC_0(6jAtWzO3~TJ zvl>$>|Ko;FS`lQb96;l+jhIPFfgOu|$PT=y-K2#EXFZD*HpzF2iPK`HB5wBWoS7 z++>bIuNDnEI^Aa%Qhc8qE!iQC2Wfn`KY`Xk?9cO&+EqfCE9~ZuuZHEK1P+Z!Z#9<7 zoB7+-m?++M^)cpy#JZ?@rMvKOfD2=3Nfyz2I2g)>7sg4$%QzGq?CW31@?lf zh0^t|FE`0W`gBq>f2rPdlSO}+6i1)}ux(Ki!ZDx9mtIDv)0w%E!;%;aWG0A zG~#w#c!Z>lbm6{C=a@vA z4hXMIG_-@h;k*4+>28q!eXz7%ZyIt)RqLb=AiGHoN^lNq0e1n7h1oA5mP&J--K%oQ z&o*(}PT1bis>~m^nNoQ(YUU1vmu*^^=qb%gB7Dlce|p#Tw~}~G549BvbKa2EwLnNp zda+L#myHnFjf?6QmaO^^%7MLQrw}T%RLsbX4mA7bCF%wi%COKaWC~WboM&l(C(vdB z!h=y+1}tJ}8gHIg)FAxTLa~CWD$1kns#yf00&P2h(X3=uGly&d68)j~=G5S(VH*F- z!$W%7?W)n@=CsTchya6>>dMxnU=p;y@i0|QfGbZbOoB;z9Zt?5yD|lprI4`u@^PJk zrBNPLxT`6b7oJG$6V{A7$PFiblz^8Ks{J+YHjznQVT?Hw?`{~LkN5|(gku)NZ5lSo zC+c1X%nU3=RWsqq{+@=tw+=!sXs-E)TQZ?&JG==vesip>MkRIzEPtF~hmNU95PHfh z9HXao@+E}*?`x1myKC1+ zqcOAem~c+Gy?B|aTe(lqAshNL6z*iB(zQ36%GhSaWN8{ev%kTZVs*`OpcMERK_v9pT5- z4=nWyxHRPJ3LeIi5-21G=OR$}=HM>cI1b?=<=_`;d|!;OQwq7aV>rxNd67 zB`cli$Ol@tsj`MS8F4S4IA4&Iqyp=g3o0WuUB*!3Zf8_v-2~p5h+$3>GMEuJT-w~? ztks2-hDdJYw(v#`F}|LS7D-W-7fTh<;&-vTRT+wrCZ6( z8xmr3NkbpZ3x;*bB>!Nhw`!bI&VV{5N*ZoE?mafRiPKCI11Ic3sGlJdLOw6Z2nwWX zCyLXcuyqF927S(XGeXCC_1lUZ0lFXUxk*AUg2^y>OJW;=9=l8pNH<>fVDJd|P>sId z@cOv=v=~|4#sbBIYrUS5W$fGqZ#wzPp%Eg=KLY^yO@#*4Spu>od@~^p;+Y5cQEv#m zPmNyw;5ckFYq&irF|4|D;;lk?9DY39Qf4}vgPX>499CLxp_{%y@kFYL0Sc1Okse?~ z64T36JthJ)1&oRs-DwZe89Rm8g0S-I;mh(3Rzna97bec>qCd(?u_;g~DY6?1_lbL7 z6k_pj@a9!;!rGcF?ktjSs567uSY==`+YF;L&B=2olcVrmEE9XC=EcbJ2ug<{PB52N znPS`pmOmXh;H~X4HN=JHu(+VjW(uLCD5NlQG2coyj-7x|)IfF@ z=-k}1a92pAFq}8aiO~UXdpKVX?>0^-_(dtnQ(1wZEHx$0X+|XYQcw36iH{R7Hqxq!c6Q7n zvpj6K0GNx8ED&A(NlYW$OL0bO$FCS-;^v)ZZ%*Z-p<}|ydzC9ftQmDPPBX1!al>tJ ze?aVHl%oO^J6^+X$=KJ{15i_qhM;jph3aeIYrOrL3W=8a8=iI6|5% zYY7l78uNo9f=bj=1;f}%Wu-+=FIi!F6ZXd3XOb^W&ZT0PhS5?S?4oXXur)a>~G1Rk`=w_DaQ+ zn<_lBq-oH^IE~ERhlwplbUT?N;>i2B>WC1iB<5H-;7jjfcG^XwSGdy$Gq!sVO5B=j zEZLn6Fb=bdTZ$^Pq}5gm!U9ZECD?1I1+xa>u)5i!nskY#T3c2(MBQr2RN+A+UUB=C zhSZnPC<;dTGy@aciBhhYCrI1RJk+-TKuG$;mFnQk&!-wS0i z*l7L@8mLx|jU^-Tg+A@F^d*CjyDJ9OZn3B^ZSf3t<5+ZaojPf*A5awaX62+>j5;-U z?J85-H%u9ugiJ}K!cJeS?%-hq3eHp!6JzA8UQl0%gok|C! zT6|jxFl#M^51v$mBBL-g)FV;{D^3h#4#5;{8bCz#QJ3HSMtl??BPsc^WT0bipWH6y z&IG<~%er0?h@rlGn_wjIj$0ndV~unI?qCvj@WhKwf>@!*G{bsmdWhs6l zZfGiksf07jY@%s4>;!P6aw(Zxwm;OPK(l=LO-__;XEoepR%SNBif*f4v3FVAxw`S< zA5>e16E&?lgziCro<)!F0sDKrmrn7U8L^HuC7n30EJIZ07 ztzlzB)xxKbYRGJ*$kAjD1CwS;5D>{0IL}>6L=`N0L3v*koW`A>YPi7%ImW?rbbMcl z-+M?kP?5S-Zc|I9+c>EctU^-7wJVtO+8U_J$wMf2QoYq8ZxWip<7JnSm-|7$G938}(2@RYDyA{(B3zhVG+isheagqBk^t@3Q zQvqE=bYt!@tts*A)|^mwb+LhJ-%9oVwy-sv=VTK{BdX2-o;yMraJxK;3K=ttIS&g} zSoNmVOX)V;HEy@AU~iSjn2qT7s5G2>8&;9wrd1Bv_gvDM{a^ga)ke=tiO{2mR;izy zuFLaE#pRgHyjZV3yiI|coro)f8=7q3Dmcs=!QLDfdO0|9n|>!J{Ts@`M%9uJ8JM5c zww_8MeMT?|S!e(POU^6PYjP~!v4$|*a`_EDmQGx3-GuC_kIYIW>rOw|0*}IVhdG1~ z=6jS0W16tlYPrk_Y9Qht6lgt!;G!p1D}7In=jm}SQ-kPp+!92NiTKiB+1yKPO7cxn zg4vz+ONPP!2TK3Sv8RI)5QLPU>Tm<}$$vDE{K@>;|F=R@^La1aTSuD=yuBb`8FqV+ zC(Czr9vVK8Px@9UhkG(%y3cGLfjf=Wpb-Rj3hPz)bppQSPeqj?QYGBiC+N>3bj)y*%%DE zS8yvjP+x#@jMR3MtAfYgi2&P{8_HWr9H}~r}!DlS0dSXnef(AMfVHE_aVWVYp4m-^Qx ztJU6VxsCx*E^H!#j>3SYA%PH4Fmy0+Q7ua|7a-(@D)>K1+kc5wh(|v6A8iIvu8Bwp zQm5UyQUDXquMw82CA#ilE@Am-f)vwS>kPFbUp2yanzuG$Si3MSEk=_+Ag#ghch~JJ zD9}74cZbkPTS|bT!H%|;4&BpUq(;WI0!~7E9uH6&c`e5FI(Kqw9zcvPAX;Z_t697*>mkCkz=SPG5-#p*yLxLD6mXL!meqOT}h~az4 zdG)ok5;b;^K@LqsX;0aSA|Kig40404joAsSyOxY`!&!6LeW}CkIFLw`E|0+lxXJjg zA}femt&{*0Y62_+Uu-I)sUc3O*K)gHXB~_e>kt|`){t0OGW~v@(=1R?+{Y#F zNKORE*WIF!acjJ;7Y?k$dp>!`@04#p(+L)eoxtk)plvDTlJz#{*;XlaL*nQ{x!Z^3 zYdt7QByN9>5(k}^M~M~47~&lc*d@B!j+$|5buP3?eym1%_E5m@1}IR9^7m6?if>Ag zp<%-_FtIJ+yo8GqE@8`8m0CWX=&d=JtY0ucBa4O@=KnAMB&mJ1A8<-%Y6f%QG;H=Y&#;R63JZkb$!iGEMv;2B~ z9~DS`Xr%pWn6@g~8Hx>s4N>mj^oL?>@ovFw+Q3wvOLZO{xr}Ie)E9{u4d)O=d6%ge z)E@T?Z$7-wR!d#U0|!pc?5}tS9Vz>t+!qq{)ORk$774N37`!k|$C5VW+&!3RM`tC% z1~=F$+_d?Vco1lT)x3t>ZunSX)-?;YXMJv=z>mxKG5PI%@yFd9J}+89ldl1rH0BCB zyE!m|Yn_urOz&s2jxiu_2kc!--DGHTT5W$pSO<=%*R(Y^pIDZR!H!@UFNmYlccR#y z!R>#(>u{~;;TK!sdDCK<5~DS@>4yg|cW~F)K@8>Q1yW|54XMMzqeKme_c=;G{Uy?+ zgUMzu5_qMlJVd1kKhFmPVI!me48EEx6iv2vgoOnAvoJJ=f4ttuz8cU$W7rST7aQ=8 zrq)ESkC3=k{Fg|`5kB_=e`WFK^fi_c0#qf-5|d*vO82&!75bRvk#JKrY9F3u$vTZZ2s1}uPc)Df z!(?Dfk+bunf>yVd)km;^tud4R6_{9Lw86~Bg1J8)&s>^02TdC>;Pc?wj#U(ch!Wm7 zjpAS4(VEr@4}BDgGcHf57g!eKe3>9CfA#Q-G2Ggz#%9tCsy;#qPd=FSk%Q=b4F5nC zabM)TMr{kv>l(@qJwZ&*tm#8$Bb*2}?zaF`mxDnWev{>5LHZKoW^8k{MS;!5FKzd} z*+qk47G zlZ(dk!%S!4zL9L@$>*&MN-@pGbhgDx9DTJ-WL6uXGGve|SZhv`q>hG3r;DmHI1^QV zCF<=OvBcY-P=|*-?!LDkZRn}RZW+pBtdZK^QSIrITV|?qr!b@b70bAag-p8bCl0h5 z>(r4=EAtM5X3-O^dt^_&Jl2`eZM56xJvQ+cY~0f@pMuG)I&pRLpiJitRX~dh+R=UFUgJ%WlCu_2Hv`#}UxD!}aWt_RQt% zHm^Wdj?72jD!yBhX?8V!8qfGCZG!bv)DpaQ$Job^RTJP}rVT?TVT7gLkD8|^?1o$Yt7mQn$ z)+_=IVDKDR2BUva2;J3WNH(1$NiJlRvu!|ADA%JH0GH`-H-^`GBC)#c2pj`s#WC1w z7C3(~>X!YZ3y9WWrs>eQJi<2i$?msDl~7a$f~zKGe9Y@PEpNV*_*a)5;36$eA!yE{ zrQzv56J<#^V)?F(u!lF0r{ul)+6Q(OK<{J1XOXn$yjF&^+ORRXKAvE^-TF}>N`a-c z`IEck!w?AZYRnegp7nC_p>|3<)3(4h{mT+}&0=uq4R#)SErA)0G7x{OrIVD>xY{qD z77(ISc*b&YYZqee(4;FpIovGaw@u#|h||QDtabP-uxVM@ zB+)1bsk5Ug!4mtO#WQd6A$_ojx0V43O5Fr$E~A|aZ?ma%hVTzo_ba|ug}cT7IGz9Dn7}I@ynzIdhTKrVT1ec-JI2P!57CW~&*~GHwH)@%|>T&uE14P9mV+ z$|o7Kh3!*{rTt4Aw@o{(REEVx{IM_+T5DO~B`-z+ zHW?`|Gd}z}+~$oou2kyX;Z^COH{<9vtJ4{Q@@{2?bUA7~%vLI-EDwb;h8epnuWGZ8 z>jn5?R!qlqiU@_plXo@E6#I~J7R}Yo*JMfDLrWuc7z^DG_Fk>YtiwZpUp?O2J| zAvRcl$GtVyQxVaE4qt&HD>P_*ZF^s$Sid-J99jq{gPy#Iu z|IWdR$?VGPgimxJUX(FSq%oEg>!59v+@HzfvrgJkRTe$oF4<7ga^&dfx!ZS}67zCP zB|EF5s0+xpP`mxuHNLO^C*H~#!yF}yNbcc)v*t4?qoPWkNVapG=$Hnc^Z3wC9>&Q_ z>A@H&{bp*rn^{*IZ3-Sa-aAPdymR9Uq++hpqvH*RbIk5b_D@c8znti8M!2JgDv%lYk} zgp2Ti@d(569t$`-#2S~j`a|lyie0YSnM3l>DTU;9uu&1o42_WqkAE_GbQ9WW=xjP3`h2OVj@w>8qo z|2kg-NkYR_Kvuy-WmUnR_S3eVHnQ zO*kl4E6)j!f{Ud#`i8IgPA~c6b|=ddGT9}WZm>#L)}F_hsH z&dZ29XH6U#kYBFnOuKuOPS;VFCQNr3^~pdX{AqffPL~Ww>XU)Vpk#0|BpI3vQ*b3Z zW&?DQ7r_5-G%7;}O&NLZ>0{z(a10ZD&*TYNYU@#v7h~js9FzKF;0G4eK-$)?GW;qV z*_W6!w>f|9?f(DUjz$#h2rAR3N4{X&5%abgMp0iMI~&{h^1I*t=k}cE^i<7LRo!RK z>FKH-cSQ*a5Kxfs<`)fu{@sD{d;EX-Ki&Ur5)!KN--Oa{E&hM-?|8S66cd;D)(w8k zBHvI2rw4D7{H3S@0s;mD0s=n@0>W+xFtF~GR8bQJ0YRMo<_dm4lUpR{8&gzaVg&&~ zM)|h;=7ShgH#=dP*cmx~>rB6Sf!~-vY!ihrar;dS0)le#%@O+#Xv)|&=8hJ2ARwsp z-<-N{e`YF;OeGdZF5k9jJKvn|*#83*3tLa~Z{4@8@-qlXjVheP7onw@(YJrI|9G7L z0Y#ur*792k0%DHyEfar(99bK3)za>_$G49A+rJVB2zY6a60?M@gUL6~GWt92*f*Bh zC17yuj6A;MT5JBVEio80RKLBEo!PhU+aA>)1Vn;9I!poI(ZS_62#ELdx83Ep&kb%$ zo(D%~vu|79|K^tQjb(waISh9*SzPQ`Z*`VEk!%cO_@`nk9DZ(krF zD|1mF9zcRrN7e%943dutCFfA60jP&fI ztupWih_ZdBuXvuCS{QuWgq!{De5dc;r+H>x{%*hgU3pXGN2N)ja8C6=7P{}c8bwKG z%wt;G;8iO2_mt;YDQc-cY`1=pskTwC+0*ViLaHVm4xhU4eZvC)efET;;~`*z(UAP` zy;M7Ql*vhj;%2(yxZoKdQ@rRpOP|yn5?hAeP#a4h{LVN$avWR6-jH{Xw~B?G44n;~ zh<~i2t^T}}F9Zz??R^Ve2%I*gcGRZG3@QlL2>RD|*2&Q4sn1|#p(Of4huTIKiIYYJ z##G>iqU~1UZ8XWVsn0X%&+qiBjgb$Z4tfc1e@owVM7)~r<5K2~}VKY$r(qs^cOI}&VMo%25D2L%$rmHCZGlC(R zwoKUG47E#ryl8NcV$y|%#GkZXO;ZJH7bV4#bX!TuX%SvxqL8V*wU9ASesEv8nB^x&!QzLgt(RR~ z$^W*Arzc3B{u0Wqhjv-jr0}GxnG9!c0|(5u(9s`vP^b_b-6O|F-T#9r!fKX zAV0>1(e`96<0G3?q79mv=Rizx9GlWav4&$O+2xPM1h>O75HKn=@1;!QV%73ekMpHX zJ7JnnpNc7^F>aksYjZ%DPVzJs%qmF;$~C6G_A^%7vE&@Q;!vDN3h<>E-Xw5SBM2dzByN zY}nPc70mfTpPK?-HbZ)c<|TGO^US^hl@qwWE4UZiTW5)zr=MIO;7GDdstM^3(lWre zBhw1^@9&=qqog!#U0UB|8g_cs@|}VKxyT zy2Wwsv<^8E+K9q3b(rj(i&;kvVafCD#^?RPQ_+ zkp7_V0~Wh3*PipAXrFMOKQ05Ct|9jx`?q^}n;b9+RboeyCUT6x-{JWokO#Qt%22z- zwidAw8Z;;%z@!nCGL7m z#~(9;bHqlmM=@uM&kFJ`SUMOI!8q@|{h`f+(XHy@V2BtBCPdwhYIgjs?c&btwk%YZ zT`QGtdX0*`lpd4#jVr49d|^n&bbW3QXmyi|s_RVgi^Dw=j5@Az2+i9EmKf5_fmWO& zm{iwUGs0l&6qCqPbkRdWx;4rOkeP0@Tk7~+-ty5~hVcmNlShj#^s<+;!4~UWEu)3q zjjpN3cWc<_(pNL5;D3WPMbXt?{_+?iIex)iIAeLc?X4tEmJ}<^6>lNwBCR(Y9jnDO znv-!Q2bV8!ve$+8o3o8oa^X!)RqBzi9k&fnb{uWWWpc;tjddH8&e3onQ8E@<+}HSH z&CzMG2>INfePT=@ZCagniq@kD+%4hkA$YC62Whgl!oz8i8%0*TU0$ zr-|9wj~;hbNs7N8SNYy73WmXer8F*+%StWj*;~0EWkYMUc^Q(G$*MnA7Mrn~nax{J znlXPD`?id*2<)!29Ig!oZ#kT@jS%oXmT{0qCuPdV7=!<~bbb~rP><1403w?6Y$*@u zA?}*2PERXvASpFWcNR#6UZqi#9lL5dhi#1%5}F^i_P9;NH6!2h7gcEEm8UkB6+r;X zRqz*67L_u!3Q{<(%;`PiY$g}ut79EoGT)H7ZMh^jv;5dOI{XD5eAs2P*`6vS+Cyv4 zke5zcA3qgoT*C4dIBuMJ&)ETI?ccY+-PTNe^@3ykY-Wd6zjy?lS?ZT}i4^{b>3f+; z{N|m{+GYtntXb57DnKP92Vla;2L1L9ef<)E9z3_cp}SUi#;{YiIiGsZdqNZei6f-K1IQ4q?lykjQ^1#0DT39T%@F0LLyW;k7jS35KTrJpa1r z_55zlPCFoBbn&)(|2b4;0_T*U%ngkf(eR6V9dK?{SRDD~&;sNEmUe9FiM)qz4zYwF z?w|K_23w}O)Oc;PzRXD9j((*R6dx83jLU^l@crpBw3sfN5HE>eUy$60aPnQ@aCIoc zHWlD}{t+_@%XUbE`!=Y8)n#e=(N?CTSmREpm$h{Jac{qgSI;MR5%9W%e7i>Hda1UG zDFxnbzuQvKV^LwTUCR4XV(oS5fGhvEN^23R)%L>X2a>h{-e|f_ApJ)67e3V;Hi_4l zY@=APCyyfXiH;S)KN|y=!|}xi#mDWM298r{Di=yVxkKK44^EYd?q$X=kI5z-+2abA zTleIw#9fu*P4-|m$g46v>zrm8{V0S<08&xSW}ycaK!VB43gh+x$I|=36Y1Yl!s(jR zJb=d5VB$>Dg6SlUoy1e{ueX9-4|#3R;y>-H=oheUdliCV46md{i=K3&V*Zz?Kr0r! zNu-7-wfVKEW3id$qUK{roCbL9nk~uEM;Ck&zWcLu^E>@)H68!q0@N-~r4(BSzs)(? zSc#jcNr^g8VCmCMX~BvXc_Eb`l{}qE?+xfo{rH& zPJ)XjA>PF~?}9UzC`m}O(&v)a)AB^4fxo(gUyTuyS9zG;&s4Rlv%Ix!o|qHujv%W8 zezFUaBMyQb#aC~dRCfqE&hv@$S%sCW?No$6x97(mw$rP$GmPFGP%1M|V5>D}2((iKPGNEUk z%V4KWh*c*Z(3TNr(W{u2%QbxM&Yo?b3*t?Fx{JmCyk#nNmCA7lEeip#mU#czFCF5B zmj36E{^#rbOI`&lzWE&glvSrX#l|$Y1C9YbZoTeIiry9Xy4C^qDg%TzPET4o5mTkt zPScLoYzidxWPSKLg{NMw8gk;07bOX=x5ybijohX0S2}d)s1LIoR)Db<%Op1oaB_(qy}w`JYPXeZM9w<;5Xzu5#l zSTHi^)bV7*^i&5#Cn(?~o28}9{wU&6RQxuF z; zVKK%sV}d3~wXix#mj~#S!!3}yjizBO6om_9=$~=+jPkppDpwf*E_a%iFvbKJROkaJ0{s@kuboRMats^{zlK_tcW_o7)AUFT930Bv0~{t+%pfx@ z5M65o?f>yBP1#$ z4I~dF1tbTgFC<-vM+irxOu4^b!}}NVVqR};01Hfcf{2K)h^Pou`8-;6!p+y$7vPKN z)!=&j`0A^th7jN1@ucq)`i=11uXevL6ncV)U5L5I|72Nqx#X7yDkRPt&dxY)dH@7^ zJ6NuUmgd?j4|iLur^m}PBt*y`aj~I63i6^tGt4)&%-*WVBKhzK#! z@^Vv?4D?l%cD5Ji1o(fm^mI2j1o*o;eSJRO!NG!qeu@tClaUtWn;7r!p`+mffqWfQ zGifaza|vd$sWrM?)>FWTGfvx$PS=TMg7v5KtqwpQH3i3={ZN0f$2r$Lwy!iv4D{p9$4K9~lCGh~MWiEARG*AT%ZWXsh}Y z7X7VQ!6c2<;?g#>U<58OqdWiyBvLlq@4rMD1iTdz1QY=j6XYD^9^@J19TW@{3KRuo z6O`}!k^tET*#(ILxdEjBg#lRxIRc3Q*#|iRsQ|eI`2wW^nFkSrMD+mbDRn65h@fmD zB7+AGt`hSAF&fh2hQ0G3VF#CDj7|AM4q>dgoa3XMK#7C{#SnotxVd?SQkhUB*it^C zf+T2cHsSf~e0&Wq)9GxoPdj>+TtH=(9DKNO^z8bF>3&(30^8-t;>ZO!k; zUEe<8p)%)^_^d$=1srIQapm~}wHr%5=icE$-`bq2GczKpR8tJv5}dm<1bAyZ<^w~&w1BceVUR}8|S zCgQ{K9Bzz#77D*ep$&IR%kb@oHbD)?uk_|}DmHxaXUf|-5cEH5;rF@s;L7qokA?zq zP>uPFX#!xyL?zT-LVwJ3(L)_5zs(3I1svW0{QtrCzQ=N%O@cOJQ1OvF!gP!ik5+@;?`B-a`t{R zE|-7D{*Z@XyuCeqdc>43>vnQq%vOg2ttG5%E?U0ODaer#?FNC(`|)I~ltq*0R>!$h!ShC>ok4VM|mmSJflw$SI3%6o`gf5x>p zt;G6C2>>g6qO-H|r;M2#YvwQ2`uU90FyfVgp-WDnGygL;hO4VG7BhP)19RhChpOwoGY9 zUA~dm&fJ0~5x2()Wqv&T=gkYG9)j*2^BPv~hg~{9s@`sJ z|KyoZxJ#|1Hbti+L6LU_Z|S4VB9FgB7e^lFZ8{S9n9#B^X9_P$64-mQoRk>@F(eK_ zBinz3Y-OH}qMfq3;^VkfU;CfkEpI%D=nL}92)>s(oUi7`5qQ33ONW}*YtbLYW^9cu zf}R@QsntuDy$0Ni)rDO9SEw1#^l(`ymq*XRdzs{-*3#gcGRkJXMKaWR%jvwzn2uTX zE{=Wazif7b5U@+L_`)%z^#7@!8G!`7N?sZY_#Dg&ISC+n z$kVvDOdQ%fJaj90`s0Oe+sq6sJc4vbUXb_<2vzkuSU4=}4_&gSZNgqDt-KK;dSnER zByZ}vxoA3%XBU!^Czltpa;FC&-{70;>nB7v-vmwk#imBqK(rsl<>CcS!z=JGp_G^Y z&|PsHk5gV!WAs+_mS>e-Y-dfICofwZy;$6X-%D!t#&AFj-OR+kUcQMzl#lSpt81mvd-bApqC4iw(x{}K)a~g)Z)l_Y0 z{fz}i=*^}VZQ2~~$_REORKo(#kvtY=__5rMj4V_mSZGe>k1HmsUIRVJKjfS%LLAle za@1Ov`W)5EWd})!Q9l@S3lkM7F{RZfrJM3mrlTfyeQGOL1G_mmTYqV0$d$hkawkZk zNu7yj9Y_u1{c>a&2?sFbVH4soe2e=FryL`Xnm4QG$TlPbqK*7Xx*z~)ot%+3AL;$CYd zfmBfxi&V5&rJ78T63>n)m7&=#BSh<3!9pChAR==gQIsOntLCE|qc)cPxOXdqcblwY zpV|e9=_s{$YfLPW3vz;y`h+r;uAv3Kf@_v>W@+*hz62enwVKB?&v;RK;4m4rP}+qe zi}6)$#;j@ANuqmc3l#GzV@p z%>3ZptDGY%UQk*Q&_{cFOsHF_wtWFyVEZ&w+8*iv&F$`Lh4lTF0u5#~ou8h5&}lf{ zjy4QnHY665Obg?RD>*oUE$En)S|KEhi=+}iIWqFA3{@V@H>Kp&TwkrY5*)5XNlFvm zpp`@5XkEl2apZhNwDc|Xb3kY1rnCl>#U3xqciw&yF>d%?lC7Ihu0D>obzpy9f6`IS z7qqB+IplPBj>a1yB$&SZX85}5w&qg(z z+0pTEC&0F2%nMn(+Rw@wYdeUyW3#aLHXR#C{a|(79i;1dI}yEz*xeRb6#uIi*3I`m z8lK~ZF2Cjy_Sp9w%_>|08u{Z76K2IO>>!?1G`n|Z7pNdgJQ-z81mKnsSlH$EB;vj^ z$mYhY`qt)pb<8+Jvehu{{eq`mB1Sc}9Loc2Ote$D7|{A%pKIkfV~w^0xK>2ROP5yW z&nnO|1Y=-y3y_cdUcKSt)(KTA!!Q}Mo4KVx(iFo>=+)m>gRIxB`+m~Z1=Qs)H1UuJ zdt$(h8MSHWH}L(lm3|YU(tASHD-?K-e`MB!m_fF-iVY^CnuKJIoW}}GbF|fC2@#6U zz+csM1vgpuG_-~P!O#09u0w62%F=op)S#|iZLibybNyptY~mEE1u47Xb_;&nKXQ&< zdc}#!IThq8iR`L$aU-y2l%F=RY@p-X6Y9wW@8>7EJG|b?T7-WG%z2CZ34g9UnY0$d z*%XvA<8fL%Wb3ba2A+?pTwxc9)m{kg-$v{2OEyPMdLG*wm~=!HR4b6!Cv*8$J6`K89P9Gs;pZr^kGimn zY0UQgQgE@VkdvprN9z-p45jsB6W& z5#KS5n=&NqCw7z`VxXfm54=w&YDc;Vs?05tR$Va((Ay1+P|I7-q@_4$o0n4+D z8azm$@qE0h;r3{BJUFna5lsE;dER}+cPi<2{`h+Ae0bUZlC22_{dy2Qc}uw4I`h9O zP(f1UnKkh0%Dy#^=XKyfl$GzPtX9(JwR>|j`dH=Z@hE)>S~|AZ<&*WU@O(OOB{K&^ zH8Qp*+zdPJY+_tr65nzpM3Nrg_O^wLwh5|80?h$fyaAc<5f#RfzqtvP)49*pgK`Dc za8=9jRf2oP4CMl3v6mQ_GqduQxzokK>CG77cjFb+!9_fn?x|mTmkLUasME&fV;DNC zKjoB6{4z7*ri2xdYW!Z0)VS=s>`yr=dYrTdvUn1SezfhRyFY#Hc09cwl;Lf!KN#=l zSWIRw1Bbo0u0z72E3FB+DvwTz!>&GnA8PYYJ$1VgF0h#es5`q)A&G`IJfrNJ4yu*8 zN@%NGA%GHFxpN?Q)TI;VJ^o?KwvDfx*zERQTqeQLfi`g?IlF(1J=E#MXnZpk5!0d8FAX+URWte zd3BLuU0AjNA%+}u#9av$&`Ci|2S&-vu4~Wk8Y234R-L$!EKZIUujS7vM#u}!Lr@0u zzd;OP%~&S;H7@tHPzT-5o$>OvjdeOK zLS8o>swFy=Zv>yNbpg-9%S@y+pt#^!azp4Uce_>O#di{ExFCLpP%ilV{7o=F&i+Vf z^m@k={jj?upEFm6M8z{mKu!&3i}1T_rQ{!P&V)aquSAU4TGHf2{!Z{w7BS{_zjdUKGD4Y>%GV|zN0CSZBaRGLp#X!1Y@f1Y zlapr`Hq#>t29!vMJzCZ__dzo;Nw6&(DP;-QYS%J9q0CXHB9CjQB*)|WHf;=%tw`J0^9vJx} zTA>m64Pz`-#9dU!)28L8B(AK#KDQlmG(QU;ONanpJjn$|4h}sZAc&{@b&Ynf#(oDA zi-ldkox_$tPOJM1FqzKdmcLde;2Cl7@zq{&h8FkY3vHP_O6C>#JBsI-(6j zE~&VeGODGut(G{FgQUP15XljLiVQYdyK4vVMZ4+NiU&~GE2WN#XC@CX8 z^z*x@1jsoPP)$kOa;V2N+XIInG5qNTfC@f1=)zF6D>Vw?MN#(^6i#N22$lQ-;mi@Z z(<}m$7%l}msf1;81M&78U>52E*X+1M%TjA>K+@MJ-C(?o#Qikcb88TNTbc3#k!QN> zLRJw4ojWq}(0jUDY7t*^E;W#{TmJUz^2N z@KgGQ#|cwCc@dIVMIm7|na)!mcN&)nl+{EHH+?hN+4X{kapu^0A*3S%T4xP^*7EunzVi>fFYF({Zop4L=VEJ5)8wUP)%VhnR1 z0ZSkKBZ0J3>luQraUmi77jr)OA}cjfdJ0R!&RyIqL}?ld(OwscelF9T!(5KFBGVjc z6k#D7A=Vf+#QWubR$~72noM{ta`rBTc&_U-6%ErVe$Hb2UR)7fVeD_Ey3}Av%)Ij$ z=Cw2MH645nA6o>qu!^5FKg~>?T}zW4`;umWB8#L%_}mFHbK`V zj_Tj9bOp_)JWUF66f(SyC)lW36qa3YDlZHA3z#MEgy~9rR<>v$k5el+wMz) zP6x5ZrHANC!Vk>ZrVGMHrR>onp*;?udZuxDrOZsRx`JZ)U+3$+{hvRuk>9y}zn;td z+f}`#J7U(QGEat%twyjvtY^?2D(AP#WM+rBLxm0M;7)9ywAfTzP}QP_a;t3vu{<@S z=!L}`_wY{!MfN*axz|X~Vn7p;QE{~gEu_T&^SJkL7hV_fRog)CYYXU>2(h>LN1Y4A z3lduXyL)&=PzFa^o82|ew3cXZ)u^W6IsmOE-+cbGC@E)uH5b@0W&z|Mad=xpR%a&x zTSJfW)@ZcbvmTunqTjxmewAYzv>uORGmX=UKxs~>=M6BA>X7K=92@?`E2&KJM>#g+ znMQNItvk6b<3Sq7K;k>$M+Z88ixrc5c?LM-H&wp84|%$&Xm_e;capZq!rM9k;}(uX z)NK6Y>#e6pHujrlA#~D?r&|jxc;1~b`0$Y$?PH^v4=#|S!e_y zE^nhZyC2^htj>tG3_e8@WZ0Ot1|@lOuAV!yOMC0Cl%6EBWC`cFNNl(hKHO2svt zFoi^nnbs8SKyNvo!C~lws6zBX`Yjtsl6ko0?~~DslzX>|thf`awqO>@4d{1*M%?*) zLPob!r6TYKK%Fa2NQDdSyWf)er*7TlHrMT(NzYQSym_nr8dG`DlS}gc)HOlE(G*sN zyur=tKwybg)=3n;+r136_HS6G1gJ&G*)RhoS%V^V>jI(gkAXDs4LAVIG2C*C-OG6k zx^{4-e8J``h?dyQiCnVH1}u@O!m$khlrk-82_ECfW@Ur1Xc`aJRN+CNrjP8Va^RE* zC%vKm>@<7e%dLUxhYWa8&|ya(O-#R-X0=g-Zn!c-|*(4IMjw z6}B3FPkOXul%LGU*7CGXx2U!JFfUY7w1|cNS;Ebgz@Mae@C(u zu(Ar-)uAqyV!5clugA&Re)4*=2b?zcfu!ek)iX$<@4PCHlHX0q+1M93D5Enh`&Ici zj&&Yh`EQdZ^x(g4j$}0lcl_Bm@FKJ-E*i23H21q!JVtBgF2&MB$Veuh60Jn_6mH<> zN(`22aQ0xD2KPKk2q8!JD&LG)q>Ibrcq~*VeJaY6J}682De2C{D%cpzxVhmvdp2G#T;B9NPJux{6@)e;U=#Rk!SZ>ZRtS=_f z;Ey3F?+_@Ly}*(f*rQ<(mmt5A81QUHG;T-y+L1R1*E99?fdOH1^B<+5;-~ve0?2#e~#66^CUX(vYxS-?Cf-FJU znKyqyS1y-EwVG`6)q4LxILcsYEbOi*F+-(El_tC z?{HhNaA(N%wOqrjE~bzdFg%K=XAa<&MIX_FKDoq?4e`Pj{uCjp=QXG~tH|9@@C|W2 zv4Y^25doZ9m+lw1Dx3Ti&!_KQ39Q+DN|pj`hC)ynx=z`CZ@=h&k1R8hTvpI$w)b&G zE%Gn!){tQGJ>Iy|xdrFQi=P`^RZEqNwIX`=?Rb3QKkal@tV)IU++KqoJ{}LB)e2o~sc@3+9)O zm;}Y_>y?ovC6sA}Fy-C|_54Mrn^!fT4R(yeu2GUHRDHH7&m2n}$>Zp8lWD6DI1lw7 zB4L-A7^UR(8)(wdR~WJSD*DhnTlA9#++IvSAlZ=Y4=IDVxLzt30} zKc(M%$iK4bqjS4@?o4XHz8cV;FfdJ9UhMpig6Ud8d6mJDO;zKo?oyl@HBo17s_Y#> zd$o%Oz>aL_A>`*f=jUdX+DpL@vrJk_{Yon#Wt)o_#Oe%4PLIK^pVwlSu=%HJmjHDs zeeC!-#Dv2%Vpy)*XIn>yptEEwOTIdw+(}IF+ne6Rr70~IV91G$+>F^`(3ByFjk*5m z3tdU%-&d`3s+zfkP(NN4>Gv$RI+KV&rLEL`4jC*vsjVwf{#~IKyAeWMv!s^5{&)u5 z-wJ)DP0tc8`T69lcq0JYvwL9u`<8)Ao;1xoaSxER-sk#^CQm?<$yp7aMmNY!U=y=S z>Id>HRb$(|F^&toUcpqpq77dYpb5EcYQq(%{3^T>TkXnTB|mWd{F?y2`xIp}`wSNx zUWZi}emXKGD?$lx)c#c54Fl;O-@5ouv(mIjN~)esb9k#_yvv*U=izP ze<)KJX;@437Y3i>zqC^&k(7avpbVOL(}P^nA@2hs7wZ-mMN;`j@nT?XBJvmP9VC)f zMXQCP)?1O+tjzJS%Ca9a`D^8evhPLemBR)0b1rJ=5mN@`mj9mFNtlYxAi?DQ0EPjp6M7q?nevy?eo(0OYHp47;;B$>|1Ih%?dHi-%0C& z32KYHHboG?c{cmi3^qJ;4UjQ1+Cq%sKFv}3uAPti);dud>gsEh1slMi7;qGh3B5$* ztaRqG31J0~U_X2Mf~rU}rOO9L2g_>R3vH8#W!(zfEF-2s!Hoy;pc{JIDXK$zTant- zkR(c3V=JPpIhdVbj-%Qj5Ofrv7*%%Ydtl6@#bMd=QKDgZ#NP>PebgcbYT8%l6O2U; zuVQiNjO8PWg|oMhk9|Es(Q&%x92E!^3KC-vewa7u`5R8+NeT)2(`Y17$E>{jq_G<> z^v4M0s+r);NN+i*K@*%kD%k10eir>+x(!Y!{BV*A^jQ2Yd)C;|qmzo?r9p`J(Sz~( z8sSEHFMl5vb`-TOqRP-PnhlucswvlZX}DDM`?*-};!2_CQ)v1PC^J+0GQp(xooyBv-m>&1CKePO3EYT=vtxY-m#p?%6Z$e=px?Xw!U^t2+rr+8JiHuHcO` z?euNF$8W6WFZLxksW#lJ8Algac4RaBL zETS_$W4<)KK0SZ`dF&tlp1>R`Hj^el=bD?~2L-EW_!g!>o6VZ*qPPZOkLUT99>9t? ztjz!CX(Pg1D#a_IpH{+PFXMbFrKDy>nw(dvKlXEpx~29R&wFDeW= z@PG?KIqoJ56Ps~Aa;fSFR|HizWCq-UVuwXZ$C}|k9$LJJTY<1zDMYmL5 zWf(zxl<<9;n7!dZ>A66%0sQ^U3&*1eb?Jcj+NxUU%Z~Tg}4(Zc>ehz z4wrg@TeOUNO(zSui(*tRTQA4@f%O^SbA%WU;Q#zcq%m`o5C@rUemae)!gg|TsqooD z7qepwC4tGYtK8+uZh6r5fCx>@fMXXB6CuNgqpD9a1=z#P<_HHe>iDvj;+>SQGZRTT zwPxUe`0JMgjG=?rP5U8WrhSm91lpdCDr9u4@y=sWVXcz|7xWLZ1vL5Il5>VnQsxTi zPM#wc6n@L^3lIEuw-&>JG;xM(%RVRnl2&HKiN0re^;$HmyNCKXX;1sX>P{U}caL}u zrsh`nj=&7tENlV8XrWhD69(M;hlUld(%ia~DZC4Pe+Js(%;IKsnS$Z%PVGXNMZKvlo z*7;&1=xw+1ck$Av|LAk)2DmoWkb=M+@iuy#1-mS3T2FKq>CF}Wn)f*7-hFerQg(%h zUG=qWrgK!C-R$^!une0>6_Z*0H_fYTpeNUCBN)0}80NpoZ!G+P1<*W0!5 zAC#z0PU3ECQA>NXWY$=84Nl>-yfM+yf2w8g-g|B3LdJ82+>71$bGw-B@8`Alk}Ox? zkv|UX5fEqW|1LJcnvd#Q4vM6aaqjhZ65W$3E69yC4m$d7N1Y!`o8le)TMNBjzDx*j z(+1oPYtGX!inhk7(DUnP&ghLih;|%h3;Fleo53ID5ETxDJXy3fO$pk3zB4Uf3x&Ha zqgTJ>9u2#NbL{;}Yx*vQ=Xf5ji1)&sAPGVwM$EJsxlH_lA?W_tEuWNcdJ00X(Np+x z|8g`q>V7KFd0k@bN}8e}^l_MCi=+Bl(tZ7GTueQF^MyqAHzeu-#GY-hTqRPj8syd* zOmWvn^2?*`f{;kaCVl#!hVjKTc^7wjm`4oSb)f$o_yx@J^zcyxu5rO2y4L~C7C#H{JwDsk@} zpoO0sR`-3jH$f%Om(dIndy$iUSZvxDj`20^n}%+}9tP;J!)N^7ZsgQEoJkL$H2?^~ z4CE0Yq4l~-R+cK8U4KH5TM|lG+-mU!=tnM0*&?E|=Ywo@DIiR)&2CI2vPdo!)}$Me zg=Z(p^s2L+W=d?$JH4$pUZ)6czUo{c|6U!p(5Uo!1baRD@b6ZBJD7R$bHr>ST5mNO zDXwtX9D47SZQ?Y7YMhX#GW?y5C`!rrOZW{6%B`^{EHKbT!*mZ!JCmEP-KfFs-Tee9 z#rm%G?Tg^Yb7UUq{fH5M&^un+r-wd>4bkh}K&(taUe_JxEKRQUqi6`XR>Z8r>nRl6= zn%h()5b-=OLen-e9q9=OCZi06d-fn!G2jRj`i)j#PQX z5QUhT4Chw}@4zTJ?JrkaN`1JNej@Uuu{kwZ5@CAcLL@WO{TrJI(zP#=?2@F%|~g~oNycevsjX`>o%fU zYjH@xoB8pmgQ`(d+{w}jmf@M6-+SDBgS89gwGqm+bxa4Ob7$ z8WX0`HYL%dlelWn z&9#fS#1*5Zz(xP}aT*U?#ZUgZDp*v@s6Q|2uU^y4!ST_2lu<#TsB_ORDmK|24k$X(&Fbh+YS;JwaJU+vE}>I>8S zCw9TZ`_nUNdR{-^xbU&~pZC4%1=&8J1QNrw_{ZB(1)LQkNnn{?aLhJlQz<&nWmNos;v_oIfLy6sI}8@h)0m)l4va$Lc#1iG8QHo0 z3mJu0mBZ?peQ$$Ai6jU1`Kp7=$3OP9ej3v@xf$Mc4}@DRjU)jF(qJ_w_hQQSw6j8o z6w>}4Iu)%>!HF;SUGEyIOGRSkQf2?pjWodHhv=5rx5j_&c4MxN#>f?;tqBN~NEk=a zVjx67Pxe|ACh6=jT{;MJhc1RRiif8|v9z^B&G=A&Q-(!%eW(8wLOO0h6|^XNW0uma z&%(%R_itM7Sm6HQXj7-<(*E=Hr>w)K(^lhX>w}KvG1f%dSd#(0wV?NNt`A?&_7-dV z`{odx1Ir6NpGNkb`QdZjatV(PG4K2Nk$BFsLWbqS@qm{d8Mdlwl{zRYQz~0oG6w zs-5+x)ctT=lLAv1dWPpU@vy$+1||`uiH#U*Npov7X);8eekWvH8`q<=XGHuC$J0d% zbi?{}Y6;DiMDTyMoX+NFDosW-x;>}I|6=9*OG%E}W+2)$-kw>%gC@c3DgDboTyIuk zw^s;Mj6a_Xh%sSzosaPe_m1_7_LAq6%m_0$I~X`jQ(#Ij=i(KxnX&nNjqK0Q{YSSJ zUvt2*@*$>w_ghn9fB=vABt>QC8QOw2_^I9kHW;Ip@nw(vnwI_A!=(k|P zg-oQ*X1vQ$)hFdzXwD6c%xpKv9~mN!BAQ+}%c{)=F3{O)R*oRp1?Un zISfyi@6aQd^cA!!#E@gn{c&H&&|+JuOy{wQ@qAz3|08S8z~g=tO(EO8I%Ng50z!H{@A z;KSd>lnDB!u?xaL#W5@KT`fVbZ@V9LjV>Mkv(r$Ta^I%L((M^+%_ZfW!ibjoO6EwD zv@?s8lP7(Waf!tiS;Z^~o+}V`FW-nMsv5dep*E$_bs9sHq}bCU>=KUDlBcs~^Xk}2~Kds)Xdo#J;71&lF` z73tgkIH{SGC}J?s(a`J9%3^$q90!tQlX9Gttk++8rB1zSr!*t6jJiFcza=%w7=wY@ zG7(C1n6q^4GWTS;eiR5wszmvDt(Svm^Ce>H;|VfF7JvaARUC5)X(g>>EiKDdDEtpf zWs6Grn`9+jj8qo)l?-S}B&LSw&_%X3B1B2_WZa5H&3%MFawNLt^F`FN%tU-L7XmvQ1W9X2~YGC1F2u{~%sEp!_XQLDJr(S5t-a4qf~EzUFZ>UI=>gctS* zz(^E1ID=^PZ5KiJFFK|EVZlFIMXtWYENesmN(MXo)Sj$0rE zRRQJqCQlsNb#4F_vNDrBa%b6}(D{`qtHb{ahtC%HlQ@}T+|^&t)0=AyMoBKw`^Pm* zNhL{5j}zKi-SV`jtIc(7-U_d$jR&p!pU(P@$M|puXDERua*jZq>vYy~ga!QfxofN# z*H)9~=i(h@uLfye%=QMi(Ze-OyyOUjKEM5YzIb#^`!)n09VuFZWJ+8T8&wI_Matj7 zKywFR)m>){bw^ZN1G78n592*pb^a zP&v#b7yVAaY=&@AC6!)~buVsx;w-1b*!|Bi0UH_)$?ttQB{3n{Sp$|D=SKY*| zFQDP3G@-IE#Z!~g$ECJ*pSSDFV^0Ay*EAig zjha}dXI-n!o-2Q$JGE>uBtO|;;ZtFC%Q zdtZB5d;jIDTA!dJ>6NtQ$tOWR+^jvn@sBTGee=}0=g(cXc)@wg8mqrK{%61AHr-1L zU)iwX7447r-lM&L=&Gv@!Hwo=H)_v4^%NaNucD)$ep(ZrdAsSg`0BPL7Y=IKz^%7Vyx<`0KePpZYO`$<+~ei&(FEFc}EzJOR6 z4YSo+y&AnyivH^NEy>F_x3;{n z$Zy_$a?RStk9X!aw zijm_?`{OrqW=o=<*;1{~mYTRyts{4Z^$X!%)29KrpgMQ@#AL*v3xgpb7UfkA(r1(s z1&$b;ha8a)V_z$$ETj^dCGJUi3c`k&dD?j-hjd5=ZzyW=8?`QHG>aai>%RNLsS{Q0 z^Q{ZHxtli6I-@Dnxr@`buAg{$iFV|K^aJHj@sR$p)YaZyNqeAQWwCZfb7OMM-%b4P3fs`pjA1!nV`??(XIacSh_&xxY8IV3NwxkgF;{&Co_&?MqpO-$B_9@ zs1>upFD?L~hDam6Vj1h%iRu9s#N@nuIc0VGaAFsDQsz8dN>EB=RYa&m&d6%kM~%S{ zNk-ysV?!9BiVUPaQ~0X;w%m2-YuD_%=Z4F|Uz@vaTk+y6&kGN!Ij>weK5^NyZEc0bcFVje|GKpL$t8?&92{HFnP`eoUe1zqWHMkU4Lfn zo6&uV5E2Q+8l;AbP^wS~zQeslY^07D`D-@eOhEIzy+QLB*2{&Q{p=Mh z&YrV!B^$H1x}N6lBC|lE4r2GF24reMS zP)MZEpmeHLN>khZ-(Q!}l?&t;M)Mr_q zM29&&1-le0+Iv({b4PpXXh%wYw{z~4$+PB6natm`?>BEf_2h4UL(5*i`pOGdY(zcO z4EEY{8GEf6(Icr$Ybn0_X)`U=jwU!q*I&614QL%r9MQ--oq~2yd!n0YPaXL{ICelg z^qF>$pp7&|drREMXk&qS9RNwlit=%l%at&)l3~`SyndYEUK!D!y?E%iqgu<)Atn~E zz{FNO@k|`j4N8t-CpbVIvOEV)M}AcDKoHveN!`vx3F-Ihpx7okMK~Q+Odl1QCM+C- zI`fAcL(K`<7^aOMeX*7P0r0FSZ_G59Hpwbno^%uleEs&(!75gkRK7 z6sfK+qsNeP;%{_1c1>HgW6fDNejPMt?ZhTYGBl?^!UoTjlS)z#9%lzhRUlTdy@GRA zqtm3XHYqiUblfnu@?DAfrmjU~)N01yLdAgF+6Yuo`467*xlUvcu_!jyL6x zl$onlf*I^FL`X~dS+$$r4V0=uc(gJ!o~;~18;;0(9UfDDkiowZH9yD}=9NJ(OC5!V zThiRZH#gMLXj1}(vcAX!jy#WuNnQic9)SQqsxGPbi?t(7kGp3rdbMrr+9fO2+E-k2 zNb5ejO1W^&g0@w3#p_d?HlNpSuWK@ugudQ-+t7lFNd3@=b|UgN-#&ZMqSD)sv&dz~Al3v%gcJ8I0 z^?s(czh|2Jd1habKSC@ z1(NNnS1;E)wZB6@rKO+#_YcKK_?$kAZF(tR?|wf_Otg>!gJti@*7!JMjgRYD<2w~n zs>O5Bc{R%?P4)ll7yay`=8yj0u;zC-m)vU~yD3r~sU30Ejz)N|9^UyN(p@m-k;4Mho0=qCl2Cjyu4NT?+67LO z*$h9Gb9l}ON5Y_bvJuH%!#jh8C%*|jYBeI1*`Bqt7-i>Uz|KE0!j)Nm+CbdIOM=;6 z*%LTzWHSp>N`@*ZVfKZwcqlW7&qCeAFybo++T!OiaT7}~3uRUEA`FQ~wJ{iyVp;a= z;Vr=b0k*o5pjq@vQwIKw%VNX}^qFbj*}UccY4pjSsrkq`Q>HIoJbkKgbo7dq+izOE zeDoK=>?D2unzPQjW;)@zKOsbVMRhPZdEH-5_CRg1cKWy? z1NW@!X@+}NNml3Jp0SM~dce=3S*$3V8>yQv)CoNZ0qJcBxM${_d4b|!7%}Lv&N^{|2p7JJJV0EgI)NO*F^cU;VWZJv)o7 zQwy>h*Ux8u8M2xC%aF~`0kP=I=%*l@p%TeKIqTet3Z!QMP<`}i&&2}K2 z(q};?w>$SQymP^Vp2eA*PPO-1rU4nT0rAs-2XM`@ilhXVdCKX~qKa^s;j!UbkV#-k z^PG;P%A=XW(A0plrqNbpsgpm@fz>#~WaD1%xn~Y4n>mZhW-&4oQ-WwpX1j+RvyHZD z?`KVq93JWZ0lYlMYz`Y(mMp6yWnuRPci*g6Fe4D54J@2?j*1d$4`{40;26kh`;?aG z&;YbGGr~s0`R;GS{r555hRC;za-M)3CnXU2R$nt?(O&(D?V`;Lwkw1Ah~es4yMtZu zxY*_lsu{sPcqy4=IQFW`$2yzUU|1@e9m7=%P;1>qTj#;Z&6@`!r;pdk2N)^ZGsA}a zBZ0nJ%kV%7$V!s3vKrnDHkKcxIRy#nU|0jx)S=`-lM7>tjg`Hk2KIv+Y-4V4K6pPi zCf;or=ym8AgE;O*Sui@2%*^V>44`8bnEV+?M?dgm>7!X($M0(iI$_h3N8f&REfus* z&U%t|W)L1Zt9Ek#lg~YS;-o(35r++II14R6Y^)598lPjsZNhMyVZ&XW1F*IN&A0Q> zd^^3(bZY8&f7V;SZfsxstzWZ=vwAK*PdDC5P5+$j)=x9C@4cMn{>pxG0q3%|n{qQV z-I#sD@aLp=hzy(^!pIRrpex8A(3K*ZT6uqHYGF*aA$o#Qv7B~z1;3A3Wo)r<04zG3 zarAU}bRQu`MmHH4ln!Lp)~-+nnJ%~e!JPK@M$jwo4?!<%An2tXLP==|%>xUj44pZa z9)_JygQwTQMAg8kdfcF_&!qR^2=t+VM9A4jAA*@agspnsJtnESSdvm|6o2ih%vm0BBu@5DYWva5|RE z7%#ajNwP|?FEiMgGgp!!0R5g}*Yrb)|6(e#E0URf>vffh`w;nzeTaNSUk>7X^7V1e z*FpmpYO+w6YrAQT*6-F1(Zp`;m)h&QX+0l!M0=Gc9?^cK{c<;rYyY{M*6RD)&F)?= z?h3iT6-k7M<^;YHfh7) zGwuebdi-ohfOb16jN)g4N0lr+G-U~%9-XdH+_(({SAa5Pb)B-&*+|TLH!Zqz)AhF^ zJ@377$(@@wZKtiv>DJci5A1z*+&SOfvsx!){k?C@)&B5Pq-E0sZ_T3xFC19QJzV;B z*DoUPJpS`bbi0FL>PfG2HQbPaQ1wvY^g-79CJOw2!|VMUUhmA|^~66Le(D+U96Ant z(HzrEH~*XK{>^n~&UL~ihP{^IyCcAN4FmCA26CPg7+(m9*YLB^uP;(mvc|-u(Hd ztG9?G_1Bk=e#AXT_i@roOV4MxbKUQVf_MiRM!ro_N!+j4o(wv{1CTnD#+YhFgpJ`b zCCLJBLmeHd z_N2;68ud`cIbG0YS`YvHu1^9MXfBz#YL^s1*C`vx2(eSMyIjAGj1+-Dne>J&&1$S1p^!v z9UHzMfYhJy2#~7%u~o-MHPN{*zcBPvjMUeDspF$Y9&Md=w>VVW+WC6}B$f20NktA# zO8e5Jye~}ZkH$J&x^7$m{Nl#<^L#6vVgHTlLw|r z1~XP*(|Z7_7^tz_@7RRT&}1*rBur|^?FLPveQ7ed7fot2G^qfZRJhU8&d{U|Xi@<* zNt+9bS)f9xlG`O)s9YyY+MicaTE;|G)F8~@f0Qs8%h3Df3^ODXd@DIE)9}w2DG)u3 zlcCWTFhr|poRHJ9Z%+NQ9#^M&T>T81*yOqByrI{vDE!|LZg0MBW&Xc5+TL;car)I` zHw_<#(YWEms)u*JYK)d(jMZ9$pPR@}&Wxim_pCgT9-@cmO5&_;Hh5SKxpcr!#gbsG z9?E5P#pPBKs5VpsHZi^EUpZlasH_7$gQ=B{$GDLWYneIbhJW zp)XyVbLm4xu6 z>3;5kE!S5pnlk;IbEZ#?pO%epg)A%*B&l=S-{fo7|_0>^H;*XFjk{~0H zlgn9DS?tg^POi;>30Wi<13VaJ??^WkM7=n8JS>NbP zZGrc}YP{JrLZM z*NlbVE*KV7jxh2zv~am4Y{ovjxsb$_J)5gG<8F+#)QD7%9C7`7-&wd2NZj_vYu;Xc z{f`d3?(2Hca{aoyzf*Gek=x!H`{2(|-;Ha!V0J!#=VmTWCvsCJPg~Y8aov(;UY;>w z>HBqKmaq7&ngudmx#T6(eQjq=w~zZZC%71-G2(W2JSrZb zp@BG!TRU0x$)ps@dw7Q@FOZKNlsp4KPZuzdoRZT(Pcy6TH1N}7F~?!~KoHcz&$X>x zodyhbuf7&TU#~a=bnVap=-TB5x;FMQSi+VDBrIiimofw8A4S+^cG_uBwsi+=K&);0 zSSBAM)7QPMuNCClQ~O#DeGTe;4H|uoWcwOqeJwHiT4{`{>PP745_vBh*@7Z{XtC4z z{d79WI$eI6PG^m<(~R$ImTn5xmu;Xi=BFDApV@I1jJ~fm`d&xw`O@B_U0RgD{AX0- zfjHZ8w}%>ehM$LIL2^{XVYOS4@N$WR?Jka#!cBM!^JFYk2S=ve;o;c?uV(luEM16E zMrM@p3{07o;)Kf&B-%zd`X^BL|&6I>CJR_W*3r7!*T9(V?g5v zz(2injqoW0<{CNv>0F~oPOi~LHl+8^;j=O&{NG^1^~GyCIlht25hJbLM}@iK4iW-B zf0v|6kF1Q8&{~qefNTb!9nSG-lYz>icNHwVyrmw)Q7F^0LP+ zEG}H!eD+23;5py9XnMswZtaec?A{$cuW(#_ZB7<)+!$8mVXV7yCF}7EnMe+5x*&IYchIo-4qHjPSkkf+8ChKTBh5i9> zbdAtbe^|Qf(f;^WKX|&ri(q;@oCVd*ZEql_y}i+Xi~B?S4H{^_X~CASD_xVU6rLIG zFS955!~Pwm8xs8?0MmD}IZ=9>_d=Wfq!e?aSepwnZ7#t!>rl_&3}9YbkHdb{+}z39wJSRqf-x3r4SuR3U+;&bFd@g6NQZR|>y8^+ZZQsX zp}pmG<+7S$GEkH-;1Y_dl`efFE-#kO=9lO@px6*vtI`8LX!3N-mlGy22EQlqsTUhu z!PN7GM&}DNOLx7f@GU*r=~1KmTus*A;z@P)Yy1&GfFqO!4FpV=v`O!qyFlqY=XJKnNE?TpmlcJ}|}hY^8^R z39_rOG$&Omgad*5(kR84DpgQ?4 z8{6L3J_>FB+1S@_TRDgB58kJp@4s*B;`zGHW_Z0Ihdt2iIRhZ}1_nUv%{S~-)r~bn zb~)w4(A<}z_oDtd6~Q-*ilZ-#uNcbIT;#TM{y*4G^v0l-z|TpBEyu!Kr*A77-+j=A z&pSltfVSwaYmxV=~-3TCs=~Gc_c7+t;$TSAgfzU+WRjqacwk(Wj|Dp7EElH$D_Vm_pdATZ+xC zgz%GM7Z#qNgksKPzE2;SVp-os;Itjlz5wj#cERc~YxaP<=UmAoeQiojTJGn@Q99<$%+LzX_M8-I~c|`tIff^zo4hGUI2#ST`8WjF^CJBogpy6fyGj- zm*tqOXkqAOIjkNhTZ36fs4s6gl|i@7rkBwL!kDi6=?TruE&F1xrfNHA>(3AF_|D@m zbL(j>U6ifLJe$_N@Dz9A86&p8i9Z4wdk~3XY#+gNmEwfzF^nk1oKT~pVNliDJJMCo zfZ(V$R#P(RPMvdOwN+xr)YQ;cc3=tUHJ?uijrRy*p z_Y}3`3XC8lgi z*)nHuLnOe!JakCTYPxXy@>MfeTQ)9Oy`6vHPmfRX-%Y&^Q@igY@Q*-a?eoo3+;&Q-TSDC`x`j z1Ds&bY){Hisfv2qYfwq?$SIdUi0v)%DEa-2N@QRn(t9pgi6;wJZ$EX!HcmJ{#!l|O z(4a4jpAco^_dnd;z(3aBL#Jyl@LMyP#ZTjxgKnuq6yLPSnrghUbVqtPZDru2#v4m? zMEG9Z*NN@0%Iu%YXl3)fmRGB54zp&S6b%GX?dll_E1^r+_OtU zsK=IpEijT!Ay)7a%g9^{`=q9tRMA5nl zz*#Z-43{_wF2);hSICJy5M+^T=waDKGiR(>bKhF7;i6e^Fk|LguJNM#;L1gJ&04;E z*31CU2+PJY=BGi z59i(sadq|E_1uf%y)y7;ljv;iu|}@B>)<<`LgztkUpLX7#CCx%I7O@`Rb(;I(0|F7l##yn!)Gw8DMVG;S9W%C}s9w!N_d!=hMX7({aG<|igdAlRE5EXkCW?N_buRMc*>%YHpiwh`^; zFawB4U_iwi=JDSYu0#9NYuqRoU9`Mt6P+!+WX^y1-JA0yVMZ#|wU?U$znxvjId_); zWBq%stm`G6|IiS8f`0;LQ8bgSOtTvy8YscN!Gi0-T*E)FiR<*?~xN=8(jtbK)(C{ z+wvO87tm`G2Z8OG7b!%rG&WV4nU1?B!u(JQ<&@;Y4HMNt5ujhNxSY|?n}^1zjtUG` zIGCcra!fC?Cv*13cJht-#@GqAF*fZ%-(m}rxKH*IdoEhX+wxsgnnxr$r9WveavRTo zZ29?Z^Up7)BaAJxqjJ;b(`adC%WN)q)`A6R&2DS^VvD}5Fi;EW^$^;hht z$ZP~2jJ4Z?k`q_}X;XS8dw^rTCI*2^V6v6n`xPl-6_0lMrNx_oj**Apua=N2r`=dhNNOB zO9qcjBvPZ0LwvQdQI2Y5P=fWpGOi4Zr5kiUkudiPDS^wc<;XDwNR=@$CB~>yr&A?X zhop!hDeBxPZ$J>S1g%IsHMlwpQ9anLjs)_IMiv@b4=mKIv(Q+~W2gfj(W!;tcHqC& zdr}4ExM7;wJQBF2VHncUSIbzb1-~|mu~Ne@Ox-FfO(qATV{?D}71DVu(TB-;(fONr zzJ@W~#GxbNou&@$1#P|dqBsV*4tJU^qQh|vT6)G{!o=o&bE%Z~8 zs>6|~8j?sTH8CZs4@d-oy%i~u8HP}H7#d4Y2BTZXKNAR`8jT{zK$c0j7xI;`jfV9^{;SMC#ql#0_3H zztKn$v>4HlK;VOgkW#fG&Xf%d@m*VcD&*x7ZEcJgIe_UL65y>k!Wqm{MGj^}+6Mq# z&}E(g0RR910{}w5Dw1no4?Oh%9|!;d0002d``dH?0002j(Tn~6bN##r#|Ga30RRX9 z0ssI20001Z+GAj3VBnDb+sDAb{{4UP{|xry3_uYS@CpE&RtG|O+HI3fXj4HHg->Sg zdkHQ=3B`h>1Wcoq5JCwhgc5=@XqrW7S}8?>)*@NBP-5|?LKh+rkuJKkD!9s`BBi)9 zh=>ajL?|LikRTXYD4=Swj%{2}jzPvaV7cG6Tn2;Cu~^n#EoH1|bVvABJOZp<+Gb{ufNi z>#lz1-W{I*0SRj!%T@@7{52%4VGPJu^h*snUBZf*b>Hh51a*?@bM6nFV9#OR^B$^t z8#!4eL&&OKD9N~gc$caQn~eC~FGF zdv7i2`MT+4QbRw9vaZ2-O6r(YNt9XB1KxWzmC|#Y^@K(_iEJ0oW7B(uulz4{{^)v?cZKUlrESO%^W?dTv^S@GIUy~WtIc`DmyH);m z(t!F4pfkDA0001Z+GAi~(1F4dhFOetOp}-`m@hDYU{PWTVX0%;$MTMqht-C)jP)4n zGqx-29_&5rPdI!y3OGJ+MsUvHe8$DZrNZUFwTHWcdliomPZZA#o-4duyk)%Wc+c=L z@R{+&@SWgi;t%7W!~ajs0borBr9Beo>QAcTlg=kkOc>u}|ZLCY$B~%?Da~S`FG_+I`yVwEyX>(z&6_ zrK_e}q&q{8M{kpUh5i=Z23w8zZJ zY>_#Od7g!x#UYD#mT6WCtnb*$*k#x)v3qCV;UMHN$5GFT#c6|cmWzx_gUdJ99M@NF zO>S%4Zn$%~CwRzt-0?K=?DPEKwawegyUzQEkB`q1Unbuw-vfRven0#p{C@?c1#|`K z;ea-QUV#yTMS(qmdjj7C$pn=Ioe5S6jtkxcgpWcvfUqLuPpC}jmN1>LU*W3&RE3rq z000000RR91>IFFf1poj5000620RRF3761SN00D&n0001Z+Ra?ca$`psZcheCh9VUx zDn(L2onkTAlt!6blF4kqwls;!_)?ZVQ&}EMN1nsTk~l{i$9WW1tf*p>6~!}fc>*>p zc>)$J>Hhxz@7rm~vNKgsF3acA-T(dCEi0woU3*)-q1JA`rPSJwl;Z0(mFYXWzM$#oC|v`djM#wSV&UxApV?@b!1pmp6XK*KexMjoevmtWre17E+b9(I1p*YBvTbI8}7xixE_k8_V zb+_|pzW$qfyYnx;{=0go^KZKT#~bSX?oG9=CTglKRHaIFs$Qs>%G4kA?|pSw|2$N8 zaCJ|8s9O5zfgVw+k@~r+_1zOacdTdBs;%#K^mQMjZtLG)>vy#tJy*HD>*&u)4OF4W zWNK5dFw%b=%o=xv{#)q3uk_~tYaZzNCwgp&wan8@-(Tp@PqAXI=ND>d@yOJ%>gn|} z^#EAkQIFKe>J$C<6HSNt$<#T1+InVcg|?<)qRupJ1|Pw%r^k);+o$^ZfqpYlTl#mT z@$u>Osk|NV%5kd2d}^!5;NWs7nT7M%lga9{o`IjW9ysD1WJ+O*AhT>q*P%N&8YgS*Ag^SA?;6jg;Q6>p2@q-50I99OmoP{v$xE z1D^Nwnzf!|bTqSiYqYx5e_!aU0rWZ7<3@O6tY?hhQ_L_>=M0bD_JLW=_BDeBJ<%(e z=O_AZ0?8T;82}fvY7NUYnl;eBIjAYv6Kv{tr;KN&`F9um5ndXKXPWnq)d%`#HEU$P zXYWOP{~+)KPb@dOjO#J;G}D^x(`j%#1rLnw7Cg(qt&+6gifDQmP;?9L`+DS9k3ZKu zHJm*4);3%*8nUY?*;j{JvYTp8e`fk~U;o|F`q+hyAE?jt-BV3z518y@To06P>Y1DR zZ`XUCWk1%m(fmDquUF500$67LbF&uS@9B9)qxQiIH@=IpM**}9>pam{&jGV*XBw=( zfPXQ5)~wpqe|zBg9_Tp2dqZP}_Z|KDO#gQDj7<$W#<#1wz}G;2%5(Ps?=!&N#(f7X z?|APvfoHT5FxD@9q4x9}WW(AOi@?RCbc2XDIrE+1O|Lh=iFY2a_; z=`Lvh3{Q+ycaaUy*nRkJ@_#;lDe1QsdZy1|anHf!4rJ2{{2AGRxiilVjhifcJ6o`=J=VS+^QGhY&|Qpi+9)gZtYvKht1W$f%%eyrWPV#if0E=)(<8a{ zL%Z!=iSo20cHu>iU_XZ}*&~iV+u%7t5nmH>JzTG2S1}Q8o9%oVOrw{(n7IR~cNubr zt?$TM1^Efe7sPjmxGb~g9roy?JA^5%5y#*6td}#KIA-py>!%x<8soDT`#Ixd4_Ifq zprPmK>(%{Jm!(O(KU~SP_87~nLo?R(43u>f)H&TV65e4>?}I}wmLG7|vRFq_IA6tj z<*kXu&uqr#BE%QK>5x6fHb;@3%?4h#ugI*9#kmaUcoxEz%>l_{Qq~L(eCU38t3B>Xc54&Y^t^?q+&2Nk*x0Xh&9qX<3@Pz0wlx@MzS@Ga#SxFDh#UKGt_?tYD7~rO?9<0dp++;3;av9CfZ4Q((*l zBL?8i8Pa&QKrcz&(U0wAGrXLO(CD zuGD*Hm`j<=3|uOhlT}bx=UlhTx;4)zK#8faf2;A6=XFC;GgOl!e(%GkC$y&F-k6_| zluRW$wWH1$JA*?VROJKkux1_@_(bW9dEh8nT~B0np?!c==eDjjt4nXmP&xA82@eZU z-qsd2Vhly%LxxHnhZ49LuFL>;#&YvH<-+*ncQp1nD8JyYX9caw zJzO@SnSM9HblNUp6`9qtobjPKx!0?6u%;fYX%bd$VY#kIjxN}oM-|YqjNh5hQ&3r| zuRKY$JPa%7vGloUZQ%=9NoD0e13oiPSFZ}$;q7~^<=lF~{OYqb2}{2#=DcIGf7C`K zruB#5s`Dbb9qrTh|0Ep);T7;3gVL}&Nz~8Tc10%6cA9bNoT)T^sKCk<;GMwR_>%BB zGri-kiIJWUKcf*-t&~q#kz% zTz(0xr;WV|kyY4t6w4iZT1K9`3UrqsOc7%pmGo4w9b+u{QR0+~l%uHIIRw92jd&f# z{emL^%_aC{LINR8g{Osn_D~^4&0%4!1+!_^spV}QSXvrZZfU=x@4sYf3?E-Hjq-NJ z>((4;9Dn?-e^a*?QQ&(LC%u~SCg~J_&K=@2<3vt9!U!Hf(3s zc>Cv|Wt`;ogm>{OteDz)k{z`Zys4n8{_eElR0W$pg$69z*Yfu;Th@}b-tb7tqDb#6 zTS1UFs(`^9JgU4rU{RJaN@Tt+$5^g;&Zm;bg&E81mE*ep+yG2JGptdo5C$c#OucL* zdLR0fvxH;q?dsSuvs9Ber}(@ot>oVp`MKAtzp<@3s-3?KJxv{Cq~bZp@aipOd=unA zE;k$C$(Z9Y)$^1sRf(KJYSUB>d6t+TYle9JBFfmBkX(czzkjONB&^55nUHkOuq8DhN>ZHn_Qsc~s1xM*k*b!$lXPFVMPc4xn`RVjI$qZM-LP#=QM*ox*A%%?tQ4FqR{{i~zUX7+LYb~aVEaw%U!kJ*r3{oqjewFyN)&gBS zvnO0m*a}XfE^$}`QQvlEdu+?B>9zrBafrTh3#(SB|A|)TC&B%d0J7dUI<#b;F}J z>v1RF@I+kCcHp04??g`F`I(>ZiOD;b+^ZxmrF=)+pRGMBAbBo7rBz>gPOHhom_yB- zS;w&At4_Sh!yq z`xWur`oy^Iy-JT+YLBKQ%ZOIrN0K)pUpBjzX6Dd7+Mij6)M-wOb-co&Y`uzAjeJA)^Y{l@usEH62! z7+4@zYovpu!DsAEYYS(}wN`C)!TGDM_I6KUr?F>V)W>r>HEFx38{W)6ZAm8?N1Pox zU8g+f71m6x6JuNPnc&i9o*BALl4~1C4|pH<9I>V1`e$lS&Moe=`@TSOV(G`tAlEr} z9&&~pcL}L}8YQy(3HJs|n+?bPo8|pUzp6>~E>ovO>tLr%s15VttDS%quQc{TwJU4Rzn4>RVQ>rloLbXmA~&0)};zgmYh><+Gqfvd{~URSvf`bwM2 zrtBcqcU;_Q6dL1L?bp-Bf9KXt!^jx+cL&H&No!o+$CsoJ+qY2hu=xtzdbjU%!Wube zySsH|yF2yvMN*K)4Lx$5qMRi(^Y**LGx?%*t{*{tJmN_6?U=hF@f-PmLT%46_3R{uRoP*T zkP*Hf(k^4+|wkyxLjMH%rt-0da4uX|n2-21-Q1o^`hK5oo$XKs5SaSpy*Uqefpu)nMW zdvYtdXzFXNqv$$5Cl5|4roKVw+ehq=U8ZL$h=-7#_-%7qw6HZ#Kj6PdDKS!tPfxW0`aLtb-^h86r?5pYssW~?)5exz)Ss5*hq zBCpk6vR^h$oXC?r+;tAaEc7aQc`Jyv1Ml3SS^2Tu{gbsWJPWLOchu$6BYq#omXdKi z_qyDdaW4K&r62uiPspck?wu=5WjrZODjVIi_>5=4Cu~FRIw)zmh36|?z-JFY5uLY` z6TOl%${84md&4MKCHyPayrOZI>56);$6ddE^>YWY_&n&pSKi6txrOd>(uiM-^$;Fm z#JPv7?@m}J<9f`GW3eTs;*-gzKI9B<@d?+}d|dO}zE1O(_ceSSPVYy?r>!ch`?c4p zb7z3*zVc34%KI_*A#2F1@@%NKkyoBI`&_;>YrDIi`i`?`sXG^u1?M7idq(vl*bk0t zj_~Z9?CMokPUFUz(r@DDO^NEFjO_;UC(7?wLK?r%6$-l z(q}wRZU4^4!_Plye`VlRk0+d&wU*{Tt%cc0wIGK{`)xU!xTu<6lutreUrAk^RBKWD7+Un~YXGdDeQhy6YL2aD&nf-ex& z?yHY@_xG{>e560=e|^w5^dCk46QO3kpJGR%4)$wh9|zxN_%Bw~^)CQ;+HKHBOp|dK z$MNq2rG>Kh2KOHC`?h5$juxnU?=1?nRRkvz-t?g_8af6JoVX~Wm=a3qM}Nu~ zz(58u7&jiush|=sLm0|1hBJbZjAArn7|S@uGl7XrVlq>h$~2}kgPF`?HglLul~_1R z3mdt?K0dOEU2Nw7$2lfZY~wu}*eh1Cv6KC5;Wi&6nuDC+7eD#U5l-=ddz@w-^Vv-` zkEr234|&F8p74}U)bf(&yx#|mUj{(c8QfZiI)UPlq5-(6iJmdNtX=H zbAiiT;SLwM#9cOXQZl*8Eyc1VuoN}l9Pfz?;n-00U$3|pu^plNPRSz+o?xyPX_ zZPm~;O{pslr9<{e=Tr9SC^?|Ogg6+~poD%d8 z+#S`$QT>${C-LX%L!a9{mkakL`zy+u=+JCjkn-+BtHDiiK-j-`Pew8T0RR910Axr26951J0X%pB0Atqx0{}t*00000000000000000000 z0000#Mn+Uk92!&`=wKX#Oa@>8gHi}Y34~=42nvGURD*;m0X7081BWOJgH`|pAO(Sp zEC+>L41rcZcV%K`23svmae(c52DgL#7QWsP1)P&>xm!9n5f!LtL=77UfMA!S+5i6u z333d{0N)F$w%u&cyti z3PH8Tzdf^a_lbh2QV|a5UVtS4I06-yO8Kw`LPw|o-FE?;0h(L4jgcZC6(WpDM6Fmd zSim+m=xuC-L^&q3L`j!XAXVC=@roFQVkhD=rK_@~CQ^%N1Oq3{@X`ZP5*d)A*yZ^D zZD#>+kl4Ggs=a3~6DB)(PgYu)u9l?F5D7^*lLQYSt?0(>SDycVO;2cdgX$IdN5+V% z)&j{0*|P9Da~m9>zW+b{RaL!Bv6WgaCZ|@pW~CC(xn#xuiamyKoMkip-l+U$T?tP0)N~5I9jg zXLDc(addG;a6=^7E_`VD!{N3yAOKc%_`Y@|Gymw#oEcL5X=!gy41y9l9^P(z3+OGP z?^ap+Srp*$LCZBe41xfg|20dS9JLORg#>baeZIaGGXMX9oteO9XR{YP$R1I4OE4d1 zlFUw)`-Y`fB_@{15zeHN=s4pE=g&%BqjSq%DYtZNf&h?#-b8P?>cXc zmr}Pyf2plf?C#3${IBKrij{FlR8WMa0GV26kHW26jTF147OSR4X_Z&u-rN0Vd*;2r z3MnCF+jQ$=BLieajXp(dBlD1d(;ndge%IRCt^fo2*y13@uqDM&m}n5wh8&HL{U``+ ztQx1*hF`taLIRM1h=}_+5fKrQCP@8}zt6mXN}|;ce|{?pYl0 z|NjU;S6wnXXM^(*#{dHTc&|%B!-E4R_#+4<>R$WsL}~btldd}I7_{Cz?adHqL@uwkcR6GnQBAG!GbQ6sgK zr15pk#zPsYMWm<0RpAJ{5HR1xs3R9L1~~4H`1kq`*PwA~9-P={n%(*nl)9~=9pvQP`g`%4& zuOAs`ZkANoUC}t76SzYAi4}H{HY(T@5sENdc}eG?3E8m&gJ)uCjfLj-ovz9fOf<{q zbyjmQp#sz&fRP1>kwYapVn!|5}h`(pfZ=WA>Wvtf?8=9zCnHbljLQw0o_Q($^SI5*SveW4Mo3*R0$9YQ=YvO`7JwX(HlQf{uGB-HtM|L2Pm@sJj18L}s4pQlXK=#{CX zR1)6hnr<&ihbw6Nv?pIbi8L{;(ZEeyz5O|#ic;L!QCRSZfP#sMNR)-PXxJJJ$|-Ao zX`H*Mg<@Cd)k4$=A2}*FKA>jO-?L(xmgd#afmL28pFm=MzJg^Sf22E^JQZrHswEc$ z>%y1mrk8|x(!-Ih=lEf1=1!1&2}P8+&_a8ls0J%CcR(qRcM%Qj;rx`neor5e_DrDx zq7p!Hs`F^edMxBDT^sc;XUcuTB*cXEw^RX&p;) znI0<)S@7s&60k|CY&PI7#Ijop+?@MB(zcQ>c*5yC*I%j@R_^*QUi?$QTpSB_z3?+pyKu~M zkWIY;J2qPUkW%UI+%D9oBhZNB`UPSolQBx{IR==ykcXpbaj+vYD>efQu~J+>7)3cH zM>&y9@Yo~bE-#$19WmeTaft2VY0!co>@|wolN~dHSeV<6$`LkD(Dujq_keWb_K3)# zgJrjB$rq~$6}5g*tnT^LK4o&bP&>n>m>Vpz5~xbPPV1^{H)%UIW*cB3(T`_tROw*@ zd%0o(7+gGu(BS5GJsF*i?uB3$Z$Cz_b%wP{{Faw}sWi_s#qFVjCdyaw;l7~;Kau7S z0qlK1RmUE6ke&bLr))<;W^ifw=k(>x@nCDs& zvjRaE=`_#K(2WtnjbLEdnoQlU@FNx6PsGvTRrNxSSt%RWG=$!aB=%6sOT&P=vcP7} zgFDm?Z1&0+X~bsvu3p&D#u@8W3ErnlJB}mo)wiaJ-=bbjX2{YtuYByk*woMGGd3$D za~2B;KP>n~B5KR?uRhg(Gp{&a=iIoqE%Xk@`65*8!xgj&Wv9Eq^$*@HzJ}ruJmhS&ky34|}(FGBOT=;sBj8k5(ukVoxpYed=mjz` zGBL9VA_*2EFzkJpa8^JBn@G{wexK!vL3=D% zhRfyi2yB6AIz=UW!G4q;Fpj|g}6mbmQD8 zCf6^MVzrIbYqK3X+wGWQ{-U0_+8|mvV_W+jKCgO5PbR-E2O@w@tNp*WNX=DL=7G1| z%mC;aPp{|}s}CM<(VF{v`czn5j`8DCzxWaafmSL))6>TK&*&m^Y=!LFdnrS62u1a^9tFF z*t&|w03T0tHZtT1Dq+0GfS|vYiNqRREFfX%`V|wQxHpAv&FC;gO=Wv#q2#ErQ|Gx-?iPufr^}{&LnwH!T>l>R}+dI2^`v-?d$0w&}=NAtiK6?D*>9glA zMweIDH@B*_cW`uac5!uc_we-cMj%mWj1LxvClE=#WIuljl|~N;WH4DlY))`UXjnLK za6w5~ZBs{YPv7DGBgal0KY424^x&D{(UGx<@tNt_IUp&DD|nRAoe=XpO=wgsyayNq z18!J91%}>=gD>Ys4hMi?x4qYU_PoXmw_knjt+!u)<6<}8#mC=$_}S-}^vye%dslu% zY1N_X%K8Ri?S1X7z}@#p0hk;-4B^EIw4Q=};}?JUCwvmg(~YzYFzsruuA<}jyK|NMjID9Bx_sP9n6rj6pd z(JjMEK~?9G*QM4~$@Orxw8V+!G2*?eW&Iu9eKHY;#(JCl5-tj`pTc_t;ytPXTpdb6z4?w}zMs^0{ zgiEDSPcYDL`qt5a!J-_@*{6;%<&C{W)Uo|Q;YFmBrLFI|1i|6mh=6988w6srbP17W zE-De;3r<8(!_!=lrJ07An-8KvFoV0Zp4u3*Q)E-D4v?HU@$B-p62LL15!|+{alxW| zQUc}9rN|P8w$OM9CR7W*OFV5#-m(tzMs``mi5PP@vXsHB0tr(JH-H!-c?1@%^pkW< zsT&5s+df@47Fgg=f=$YA7nYMxOG?Nik&Hu^BoONVj5hb_D*)(ophqD0M8IB4EB>tu{~n+YxYW`E;7R1=f;-Ix{j82D;8&Tz7J8Tq<4+V5PF&z#>A}V zj1d$nLlI?N)!(s_`&1PWC6nqTMC31cs_!I2kiqgQkllwsFfN?u1Gs@tcVLW5A2HA( zv=;Y4A);eEWSPGYR#8c)oKY>%4%rC=N18oaBVKbUnPho3@NgX zeD@4GdJBVu5y-HXUbZ-;NqZ$@EJ9mgc@ZO~PZcxa_(D+51iP2erq6!A6R8AaGYe2D zQH9P5oscdFtkZsP+JneyJT(N<|Mu(yCV;@4HBdC4y98C+LC!Vi*tysbJirRVP)* zRTQ2aD#pHZ!z|=ZB%HHhaL<7j6b#8o2O*cU5dPE{DiRuwmD6KdkZJy;NHybi03^=1Q z-{i!=9((nLj43q&4c~qkOnb1m%-pTGbEjwZz9Yl3PdQ3b8(WkU3)wuT+bsW(oPuc} zd7w+%sj!nYk8hsdIIaQVj={bRdpj3G5B5nar^#lU|Jg7cyolc-uE*i=SV@ZPcd2uyiBxSPe4uBnytp#AkC#UELWSc)%EZeCgOH^m-ZLp=a+w8p^;+z zoefu0-LV_7OBK{02YLZXOkL$xc8i2YP=P+s>TCB_fSEJ*1P8C)X^{Qxm%CT@aQ*yJ z&gnBdv)fZMTjJD5k+^bnAh8FyS7%tUqfCu0rU3{;lXW9 zG~uvh>+&MBZD1#UrT%1R+`k_!lxUy6q?p&_6)l$}K6H05)V22QD5tHAP6OgoP-$@P z9;ceXv|pzBh3u5-#P!^J$ zmhqz+<_|%#;J0j+(DKypPd>o`ZU1D zDG%&Q=Z&rSrX=>^+vNn4j$(JV-kn`2ES+L9`aGgw^fKH^b~$67^sWf>G(7pjlXI9m z=WtcjV|j#NgN&VvLI5FGMUiHzK-ykXh8c1K;MwO(!ATOad)J4UJMQWmFw6kKa|oZ7K}sd{DyKSzw?tMM5Lt5PS3E1vw4U)qGMB8y*RS z%a9R`ePNk1%{y4=57einj3FhM_WV{EGujd;fds}IuFcZ#PRhtqiXM3}i_@e-5w-`L zHE)@P=V?hx3wTb>LV(F)hYO_9to||=bL9%L4CsRy0jBif5SvH}iNG0RlPZf8R|_g& ztbgvxdGDPoPu>Y61Pr?uS&;aWpyut{cWzhI9s3dvZCWOSt1l4yL#a#;f%eottqpo_a!h0E^!DblO>NU!GI4N4P2FQOx&ByQ4z`$;%f(r? zfak8ghPD^UpwUl!9vTHp+<@APDSP;8fV?$b%*nCj!)SR%60CB1TNwQ`or5Q72 z37*A=T0;h}q6LsY5Q2OAS1M5pa|94165ITTQrYeqd5I?;csrlXHZK>DtYDULOEyEwLV>?Ixz)q;)ymjLRzW>ltG>xq=D4dNbN52)SiuCUDZpqu+B#V;7o z4b}m^`48fgZ)K2X*YJ zb?jP)$q|PxyPOWue6L1y;<9(rGf6l--Sy zEL>Osd|$--pWv=P`k8bA=e>fSLD&NL!U%PQM1~RP7(+3TiGC$+-F8#~c1|nKA=8u; zjh7&0RmQP1?h>wCI1n7Xs9)xl+rnd)to*eIDn5Sv7(bxJk9nU&9I}C@r3bmAx9ohv ze30m&0!3%$K&TBF`xksl&<12~kjtU|{1PxnJkWQOfb<*_pB{qSdo*&mrqm%&;FS^R z*3M%;k6I$(_8lGU%^pzPZ9rQ){g<%rDDoUZt+rI}0!*$Yvf|0~lX*h3Tz>%}zdV@( zOq!K-;O-&e1s;H}BH})Hgsf+vyA*zRB&M2gzPPWYzUPI%O3pehL$k(SStQn7hi2V; zs;F2^avkGMkhPar<5377C4(iHBL(~vjp|N6?dha0a4YpiB-9hlK8>m_DHy~zwYEfcwhV`_k1d!PUy9wY~0+FSmuJx6NK-!(Unvz%8V3!={{^U=N_9ED2$ z75jeylKhdyN!Gj{GN`7jO_nK@Q~_%+nnk5W2Mr2>fWE(zxncGP7LcFv?vD{~J@B-e zWyaeM=;(dfxP#(?~uUqC}Ir1|8LFki{5O7{7#xisRETbDkkWL49ydIB;;pZpIxD|E$-QK(7d1?2_uq@jwgiFVZ z;N-{Y$zw0h8{8vD2m2MpH#PShwD&e5a%qYlbh^JmvKz(4+n?a=PVzmNcg@#NTCed@ zNqy9Q?q~_is(&rSrLXhglJC||cd6@eormdPV_6+7erqiCk&lY9JT}<1uj4UJ-r7kX z7j(2Hd)E@(Zp@w8?pHNhdzSG#&K)amwlc_YmqU`Wb#l|{0_2Ap_7~-f(u@;B(wo&j zs{Gr}fKy%q0j?zIdn#v(Q(I2v*;b`w>0&~@Jyts`*}=3c}vVI)|9bx(pLGF z0lSqhW<*WU&7O@9kRYKZoQ$4zp_4hYFj@zv3J5cSo6gG ziLpVt^himB7=TLVU)1-|h18|f5l^Y9i|f~WoHV1{qj4bzMEhey{6KSawl-z27Fb8K zBnJ$C;BRaJ0Z}^I6&?_}q%JAo0{oTK{nNoA#} zK%eW)v~Ov25ryPaz{mm8v-T^#(NXN65E_Y)7oV^PX^0W10^S6nV7CY- zB8P+$sjQ#`@GXy}pW@v|zj8M#B7un~>Xj!nQS0-DiWlMKsf}r6HRo^XEV9^9$+*Q$ zR`zT0L(7@IoCHp#DMTEOLT2C!9v1L^mRzb_1Br`kahgSBi;Kzsmd+&=^+iH}X{fR!O zyNjg9$wk+$IB6dFBuj~uw~KWOvCdEf_a;)KSxgpKQ_?v63lYP9@4Qlp5W&ajqok-CX-m z=A1^w{TG&#R2r%xw@+W*j9ZyiXsSUvWOL{)``Y=)hy^lC1M2l{DF|%=E-VQ#3eWiZ?LmAr zmRR@f#?rZ{Xfoa>49B?<=_hhbiSu}RB`jQ_g?;SErT<1J&aa*vY3HN#3C6E%6J6pt z9_MZ4hm|RG1G59c>;g#R*uu=rrOQ)`Mb2BBIw(f}{3P*dbD)U*e2TBtQ>E|8-Fxco zCvr&IHhzB|b*sD`_f3yK>**YMJU%_K-rY6)5PkC9^-Z<*=jC;y zkIG_DvQ&H?=qGso;;%V`i|qSkw?R2Qv@u7@(K2z@*_Jgk{-URA_;Fg;fo^~5P3_`H z+hnsh+t^IoV;?a084-EG(m-og*cwCs6IEbL`?u`f*}TG;Y-{YFegB_YE7t?9>5*95 zIdJq#3E&#wA^#xtK2zxFqM6}N&_yVpTgIs~jgTfNcTL7mc_!hxje@+8G!++|tX z3rJZ^zUcy>oUYflkbn+xv=mH}pZ?*0Gri}(VNTnEQk=eKIlg7Re(b(P|NT9R8J{z( zkkKLflIW7L`)j0~f-=z$!^}G$>iu#x;ps;pf*%^WC@D$d)<uTl;E@hH#7%?#@ z^GK6%hb6+yxaZc6nvz-lmC$bt!_h>}MIKh`E6WXu7*HQg^t<2BkeGX2PkXezIm0yx@FcoY0 zX=K8f|5*j%;d!Hwo%w$zhDCq1hZnfi6$Xjg_Wbf^_pfeF6$&2IbvtQCN~J^!J}5ZbLB; zsj}3B*c}*^P^Kp;J~coi&{)tC+4bYb^{QHwoSOXhxb38Zw|33j%i|Ul`B-CpKkblw zCzEUn`vb=#g#MwJu78VGhP$VLkbHDhKDfPL#XdVxxxzUg9h=^Oc6UnLn@^915)D|d zeMv!hA9OIy7stY(NTJsf0Vu&0?q1P7Qqg(3xys#+io|$_`C;>sQ1mNm1cSi}XHY5O zfhbR5UJM1D!=GVx6V87MW<#Yc_fr;L4a z^$O_h)+r#CR9r>9|OmGcZ} z_;p0dBt+{>?9+6a)D>~Q^E9uQ{_J%IH{5AR-2l}+C@R>XNf>-F^cyc>7CdLM0dRJ| zPGU2AlnP7A``J|teM-%-@zRcgmI^l;8p6kmj`45`JeE#HV!Z>VP(v*MOTK4u;`k}|#axNb-dF6B5@&h7%)m&61I^hTa z^>>Bu7w1$AKUY{6=RTWe6wY~?HChWwcbEO_yoAL@pwK4gd%})ZTfM7z#upn&kDEWuJB4X?=C8{tZM)ZP z&LH%}#mC_kzs-1!CXFR|rONs|_q<6SP&%gM?{*FQnmlSfYz;RR2Y+G=8Q5iOqcrjA zs3GcN;zXjqYJQoO7`r^%-r8hlG&B3_qy%AR`p*p5QtB@>uRuD*8*hQ!J!a@ii<29D2y`Tb=Dk}3t#{S|z+<6Ss98__K@6AG%5EmsSJ6J23+Cl2zP z1uo4ePqsO&8IW>RT%4@BdOx~kauH~_(_J>ducA;|rBJf3j34au%)mR>qf72w7`Q^N zLOpaZ$<^@a!-d$iz^8Z@NJD2s3-V;sd~g!7<5~%D)5_Z{v?A7<0svmXF7%Y?--VIkl z8@s;6n2=N$GT=#I5zhQW7GV)=`m53$Jg$FjGj)xD6h98wim z`i1p%aWODJ3*2_#>AlBLbPm>s%0Z#2Y#%J0odZqJxBiVUPHjCOhM zGLo&(FVdGS-wGM8b6WAV5~Sw!NEwoqyhAz+6+r>Pvb`s+JC;y zLj31fs9irC9;H`q2I+-+>+i7*Gvex%n0~*q7r7mesG0^Awa#JSDCoza5{QADOB*K= z99+e%>6(kn8X1mKxV(HUxwR!-V|hs<-I2n@CBAtu0U6_-?o(+VQxl#U6J8$EY3|da zzq2nME{RGU2-j#fk@WCrW(Y^f3J-U(U+u5cMPGSxtqQ)7{@uNTIHL)OjX)9@VT6S1Q>hq$#I>x(^ng;FHc9j`K{`_a}cwXa!gvMLP3pi`Q zfVExF`zN9I*Y+}iU4g!@1wI7X0hdOn_@t8ENc=x}$I*gxEFsxPchFd>;1^5v6?BjW z5`&{7a{Xt)^eEM#*)}_v?8b7{&ZOg9@k0@HQ*$2(!Q{yK^ zEe8{=4f^*P(~Gc#A{~xgW2#b^iHWv_wRxDndZ~Ph(~^kJ66xDM1fccqdR!t7<4LD- zRoo|BI0y>TBGQVY#*o+`Up0NM0-96R5mwD{lI1ZYBnW1qo>g++KP^<%%4TV~ifS(b zwp@U1azLPf8$bgY&l(}m^8UeBYptD!uEa~!3)xQ^Z4|0M%7!{88)rxxOZ0u5#Yux5 z!+L6vUe z`Y`SvB<>C;@W;w(%Y~H8qNCZh#o;=^^??IV}q0}l<#!&E+dP{^jz1WXo_&rGq@ zh9t+|GNH#WLk$TQTbdPDjj5})q|I^2Jmxoj{4RRq=zVD4eZcO-r9(cAQ#ut>9BwY9 ze4+_3RqYkQX^8<7Jj(;^y?amI`!AuuKA|}_7eJQ8r^B2(BtxU_t${htzxO*+^L}%=S=o`}oW7TO4gG~IkL|_;u-k4y=MGhF` z`D)2Aq3lo{jqcsSG-XOI#Tn9g8k4xpgyX$VyL_>^{w75xP@MGItwbdNCRW@fSvY(# zzDNVyux$|)cli8{0B6ABIC?Uh$G7|n?h&l3vS{dm$@R07=O4~qjr%RJDZvNq1Cm@E z?;Nq>AI1z=`}C%XGNohH5}D3C5LXgT-Yel-VMt4dIK-(TRc~Y+&J5w5Apqaiw*A)sQ$`+vZ@#Z!a6UI+=+@!XIS2;P)fh@&~ ztJJMF-TSB2=t-;YjU<7G)%*J_ZLMvy5qk?;%l>{#8*>}!ZEdbXx%^(Yimll#b>h`r zT9x+Ms~p4DB8TA4x}5vsfKm<2#S4+K1-%$O->%VzvsOqL`%3S#FGR;jyF!Fz8s5^X zXV?LviD;wu@uv~(<7~OueUkEscI;VIvaw6blo{Zd7XjZ&Y@!qg@8#n#R4b|Tb?GVW zy)x*)d8x@9&~NB`m^ea|Pjnu7YyprjD8R0m!4|odYtn9iI@fOpqu99;%%g7;&4{iZ zSh&#Z@U=^G{T+DIFKH!Vxk5u}{L8kxal<1`J*m83{XOx$;~zdfA3Gc7-`#0a@TxS+yct)==Xd4R)d|l>EvXxZu%}j`FzC zRxH_GBmnU`OY`##ZNzK}!8gd?%QHy7k_~~?-r~S7xWC4_CFKS`cWeVIky+@VbMGI+ z;)S>PQFLT3_r!&pkIp#gIYLI)^vIo{xs9#6>^s%>d5GYyD_2PKQ5EUz$fYpU{lTPK zxg4a;Js*&=0s|j{CxxdB_*O141~DZ=za3}vb$)WD>{iOrsc}~{9{;QH5e!SjF6dXP zMM7K!D^28AsG6UuNg1Wak3wU7b%&_>q{?0^!>r>{TFSRt*}swh%jV33r)vZ7Ez-A# znuw)C(N3pbwH8hGzs8lGJ1wTIfr(_FH?ay7>|c|q)JVa3!!ms~8yY|*<~Em)t8p+3 zY@0S}ObjW>&?PT4z{WGSq#PvHU;kv>Uk^_W(_|yPt?T}HyAHR)HQc;<4uF3&^*>UV zaetDMn0Z=Qm=d7PY~dI<1;i9=IJra=U?fum-zc;L6JY&@R3V4W3>CxuCoDDgGQrkt zV0!^&Om-D2_I#{4qQ}dY?n59D6ZpC3KH#;e)XSldt>)>3yqgC=wYtXIS5p`eyN0G& zC6sfXel6(o>4_jdY-Zi$K3ER3W|$w9K}N_)t~iDXTE%U2SeUT2Fc=n?>?IQhJM1Af zi?qGFrke8MB@vA!Vsx`5HVzB*7DEnNRgjHuV18!%eQqMks$a1CE|?_@0Fs-oaP=H< z;n<6(96pnWa`lD|zvwIYRRtO&$=zoFkCJAQ$YvoVBYyst_`Mk+68w_r7>i5uQ;QY9 z+s7AYA}YrLEM|)jiWe8|+!+s3!Lmh$SPnml8SavMDMvjqUimrKsM;;n0K{l@X6tLni;VCXoYHKzC?XQTQ;|# zE^U8Yp_!Yl(66prhS&SYL^_5^kq%-B+_qSro$uGq{sYKF7g#sLe!&p~IQs0dZI`L6v(YhdsT)V_q}l784;Fdn%{DyVW}r(r zNl}`DW~s`<9(q{N1QZQB@>Sm4O#tAdQ5`E;MH$xhP>jY2y&cB>K3ff`? zD)jHXl%qzk?ncROHK_A(V8T-N{yyeF$LB_4li>RO>d8BaAiSQ_i(||AdOt)ulnCbO zNG63E!C*3C7lAGvIO+z!KJe?Bt)ZI(3f#TX9eDN(044nL$$c<(#kyfRG}fse#M(0k!wQuxei8u*wwW=HggZ$I7gPXGPFrEfm|`RmQf zQT)~N^^v1Vqk$$rdgqiM_XqNc`Q3T(%<;9L&B;G*B<;V~0#@%)5&A4n!EHgi(_dc; zFM1ro3_!w0!YOoN7_cVJ5=`NAFPdtFA$1Zr1vIC+zF2GF_hmpR3Y*{e)x~xN6H?37 zbpbG_q}K>XxeY8`FqT^+e@|Mx+1bC?zSWCtJ{}xiY~KX1Bid7-m`5bb-i@_@xIs8N zPKEe6#Em+D8y(Xj7Utz$bWFKsL1J40Qj26yQP2UBP+V&1&@rMVB?d|eR}G#Oiv5n< z?!SHfpMNDwo-NG1fBuW#D@thoes=v?gGYtX0!oeV=0Zq^#Igv<9z8F~7KEx(V)k9l zD1w?(e4`bPzFKK=t%Xt+)ycoXM8$QGdx5ckOaXoI&@-0ze5mRL z4DQhVNV>RT_?qh^%PF++ZoWqjNkk3S;MpRZ_@UtRTC-)#8C*ke#q*peKl<(;H>+cO zBk-L@aG;+sfws~gL%Z)jvlrt^4PLKu0A+{Ga~p?R56X{pc;=7> zphe+q1-Xf$Y6+BD6stEiugl%(;;bj*X;q$_S1%4c|G>{rq1a`_U1Y?7%9+1H3K*@H-FZMOTL43gUQ4h~0dY^e zP>fjdC3%u;3LpvYKRMi!1j5~j0HHds77QnEgc1Z6pio?o8*r$qwgK+zru7Ey8Rd-v zi-Z})nrDN%S&%F4oSqr5CULpO$tv2a1Q$};20_mv{U??y_f>m;`&aNu>i}=~ZvYVX zt6*^HI;3pCmX-c;8jv<9TLzS}5<(n!t2mfEB?rbLoTzra<<`&Byv!%N$>t_Z4F zLslVV^Lf~^2*jw@kWjLcNx;fNlj!o?QqBFKq-sQwsKx8I+m#NEW+loTAlR5n*t3JO z%%bQSV`Sej^{qK@7y+=4J5EJn;ovIAm0P$JPg*yPlj-i`o{3$xd9^TOONX-q%U&W0 zqs6^ZA36r@WM_{xf)xSXJd#!HuHchSpqayp1PmkwBQyh$xSL|)oT5W_)oDF*#GKZS zl(dMFX%2U_e8U_WXP6R-nPAOiGMxa5PZdrA*~+ohA+#7J!QfPnPKk(bSL@VpfAbwE zi&`eWx4`Lw*wGGa2+b;zo_-%jF;Uadn#c1w2qlW9)3`#jG4J9@bKU$EuWf7b#Btu7 z3BwEvNwwJB^+OOW4>cg-c~y8=T(>MsNI0KTAig@5QFlmy=e4T26=qHx#a+DQc-&(T zVB`qq>f;q22Ukx*BnQ5%74T)!TUxxizj}A~Qhhf_EZUFyfm`*d{@g>ktSVp<#2nRI&H zgJ!z6kZv)Riq&+fXr;!-8cTxauf$o?_{K@ru+iBqbD+rtu8CoQE?4ptf&ETV?r3Xr zjvZk};7j44wh-e`qDkirMFrp$Zx(qb7J`GbYf2`zRjL75z?VkLTJSu~>`?Dwt(`7_Fw0fkH;&a{La zuSPyizj+*sRHYO~!hB(vxJ%`lHa`9nb$f^VzJ&j_67ZlLR-rq5`}nv2e%$oiEGdZ} zrbXjVvbz7?SP~AyIE9Cdx(~muAB^5CQDzEHdjwekyA!A=i%2Rfp>;yO@|+Bv4f?&; zqSsGDI0!(|g~YRYYpOguT67X-H#9^hZhH}OcOK{l@P=(0H$u#q&2;i0W<9|N3({rc z8nDT%^O`U`jA_vYV^UnUaFi&m+G8qH&{GC*AoY?Di_wFta8w<6Ef&}hD5XWp@Z2D* z85usksl~Mx;laI;&~yIX`zKy0g@L-+?`pzNI6Sm9c{Hv+YwD=zdREZZw~v?32H3Ic z{Bg-|G$7dZBubH5-g4?@Vbb&uJazuAtQEuSl^nX|7W-O@@V#a?HrTvrZ{&6%Q;RX; zD*_ODNpTm98Nf1dgF`Tkg0urdK&I{DNC=vMr3)%n^juF-Yy=zz&<@~^fNlz3O|i_w z<1O-Rz@~k)0ZY%H`$Yf$CqnRhihk7~58wVt_Fumw)$uo_?}b}h5zomj=rCD+@3*5s z=0E+qZnQk}hp}bQeZExeJQnL4H;Bx5E9FV>ejO4NNPPU!1nAjNK!FC%xU-UieQ78) ztplRkd|W$##VtX71TN?RtR@c2$-rhk36X)-`PV$7Vpju~{U0;e`c472dUYGTuER@U-U5ZWV)tzjw{r%LJ zF|Z=&dZvGWw5Y6_T@ht|W4PES(u4nySc@*vt%bem2iaMz_y|$tE}9?LT3nv>1EsRT z0P|2Sl2qw1gWSxUeC!l^(RE6g)osQd%E(OI+N7dPvX#bHOs-D^ftNZ{Y)R^)fQhIE z2nNe|+lrd{fk4FABv?r1fKno}REi*DMXH^EB{d^>Y$RCbT-qtxxLwQ>i6rvfK+m@Z zify(iyqe^@29OrBH&M2wbkB?i#7(RX^H-wb%omRN^>wfB9_?ua5Sgb*bPCtleB zA+gEDZqTdUI99Lg7|6agq%Ax04ogLhfn2Z9M2nRjjh2dn*J6tA+d!WTo=r9@iiVnN zsYXT{UKi#_reF-O?$VE=FN}n}GDc^OwY-*;a*zbr;YQD+6t+q>j-gw&_Rq<#v7-pb zgW3<_HDASaZ(KhCyD4l;Zr?h>Sll!4K--(TsF_@nks+%hk(Ayzz;C74c+-I25+Bst zSmx^mg`pk*Pk!S%>}lS+w3_QY~(>IY}o1H!tRNYt+nkkl(p3_=Z>6yJG=m`#(cI*x=4%;#jj zs^Ezi6zre)ma@}_8(1Zu(E6!XJTlv_UTjf1e-%wAH4;K=E;Ae$PA*6>jMzhTju;4N zTthoR?sO6X0D`bWP@IOC?H}Csm*B287eWJ2=3olSk-v79i5~F~no3Sty$v zbjnapt;YJjm=0;C&w-K#sb~#C*qK@1sx2ck*l6GfZ;@M~KPi(ad2g7BNkb$RUMBs* zQ~_??c-r-Mzl9Viq$Jsb+9z8QU64Y<%Z$AzV)HBrvG4f1&mH|antqFyERK?;-}cSl z-8)SuXzi;Vvrb_jTYa8AM){KTy18|DG@2jYed0kkw!d=WUMKU1!|ZebU=akPaky*c zR2&^c2WN_UCh6~&6%-|j*5}NXH21RHql-I6teNs?zmQaCX?0;*=C&P3ajk9ep)jq> z`!p@mDQg<*aewbzD^=Z~4tnV`MSE|}sE%O3yGLtZm({#tK}3&|pmss1bt7w3#0|ty zxJ_w6gQF&4?q##5oBgKg9#{|Y9GtlphrWl!&-rKbpCS8G zJi-}n4n9}>g!j{L|0{l1Ho=?lI?47uA`UIz?5{s-#j8@%6H4v&!~JJ*7bp+c^IXuL zf6n^`Z|-^H%?dk$4m`Fa2A$P&tK4Z^j(vgxN*a@*>GBrr|GO0Fzdq5gul$=6>AyDd zZ6(j&$!<5V0|Tyw*EY`BSiZe{tQ86sE~cW`ppw}s7RNYVz4)+tP=gVY%n|~Il$4E> zL)$L}zUK0v@D$((3}uqlnCnb9$W0%5(uLhcvS$0`rdn&N-e?g-*lH6@SZEw?$#hn2 zra9P5R<8oT((Ts%>G|ai#*&=`90|3@TUx%hzO#E&F4TAEm2nvb6PH}1W@hAs>rIp= zah>QaLb9f$vW6zE3?*H9c!$)chkREn0|vEKB;bfxi;LanI9h6@3IVoymiY=rIq;ut zLQUTm*rp&L0&yg8G7#I?hJY??gEq6yye`k2`?ZXjtLpP(XvKwEu)Ur6e|}fmc4jAR zK}&c1wskBIygt|VR7Ye|MjGpOf9v7nDHIJGEzV1!;I@8`!Ogn%7hNdA-FXwt&DjRq zUGcG^eQdBnH%h!E_dwS&>g14mm)m_vd{Ph9Vwm0%tde60*r=0+f`Tox>&gPfS1Zpq8kmsr zI*bdJNz%{-65@qZkclgXSd3yUfecjN=8@C_GTSL~xynxw%i&#SG+Tc3nj5y^vY4I+ zAw!T;tEgc+6w=5dC^Df_OS`j$5{H}QHE_@7u*d?l%tkaAy5uCrdRPJS_On}@0xZ~! z&}tD8V~cL00WA^DMWRQ2kc{snS>mm-Oj0Di%rr?TB3U_*(7JzJ8!TKO4qrx!A;M;D z7}(>~lMw{a9H@X$T#h%DK?3bE1nOCKB`;BRmN?0|$!QeD<=s2EJXsU-IK4f3_(u0H z_sW8!1H}86uJPQ$MQ2GD#ZKVD1@X zDS8h?Uf4qCt}J~+$?XY%9so8;jKn+wSZj1=VRBK|hkQA;v%)Vm&F&n+tmDPW+lJqX z2N)pjDe4(W?Hn<*40$NtJ7M6rOuP=X3OWZsq{cbu)!+4?WmJqQnetpI#54w+=p1Oc zK*1u_pw>{;ShVVG0#%YL-N1=cM6?*}CP+AF-7QJjr1rQVNu8vZ zy;7oNT$MQ~VF0WbkD;52^=j@fCPG3=!z>ex56B{PdrQlzw6%S3cz#htvP7P}UTx(DY>gKx02j&?uz!GZu8p4K zLd9N^qAyT2*8VbTCM=m@V9p3swa&BG0Du<04Ir1@$}6o|I!{?kw?tG3T-p0$&}yj! z9!ncz+V;{DXIc#V7(Ymd<{7B%6sB@eF4IZd8WA~En{S)8F0350+&V}~TFI7+6UIpk z8V-FAu*P8-S2q344o8qhGRrog<6b^R?UVu(IkiQtJ#kFWh+2*IzSl?1(nZNgvoov7 zBMAdK=sF*e31tBdeD8)#D_iPziZEhdp)B6+0Nmjirbh8?t12O@$*xH>s;}DTt;qf%jfr~TD zsoZk0c(?n$ zYrY%j_!v&{6hWoaD#GTW7wsnlW{Wd4t)5eP3hqZ96EBt zigpT*iifeb;i1HM__G0dxWqgJ1}`4yY3_jDCJW^T(@%RRuW%GB%8rp9E}a&zOWo~?X;jBgZf@+RqAL@YWY>Di zNxM$4s}E*6NjP+#UQ)4SveYpdcL9kqIU9ghU5xgOyJ!(fwlJQM^I4qFRS4I2${PHq zJCo60E4_Q!a!LbHd{k>CWz!2goX_UmPknn_21_7O3*iBmkm8^!EMvk(!s%HA85|ws zZ?U+UUd%HSM`wnTrwS}vF|ds^gnb6bVt7gBx%vtgxY`kU$P}RLRi=S0*PrI&o&yHU zyBt^LN#^^^R7I7}h39j9&QE7qa!%{)k+lg?7mp;@)b-th4K#6oy&|Ks#N%0Rz$@H9 zwYJM&=8RG_cAbw}c$>1)S0wCWAvev$?o9E1zQwzb(#*{^1)Gxw1BV%#mae3ud+UwQ zyZd2pxARD3Uhf{QFT>AmF&3^N&-dUx?iFa5-V5NJ?~?BmtLkj}!TdZGclef$FYD8X zXAgES?=Yh&rO3}uFqIUi(+!+IP5)A^k&DXT4~FFOlCRBTL3f+f4h!;@mE4AnmgCxy zSJkV_wg$(n-rPIEMmKL`ejPDtJ=LYW&Av?2L z^tBrUwl7GZ8rbdj9Uh%94V|8kHv(?2azz}pCe`$>)F#z>>#R4lGqEHF6r9~nl35q1x_s1`q0IblttUBCp(rHzIl!|1Zxve zn6zn+l~vBQ6LPy}z3*NujQ&~-axzW#^MY^wWCOwV=>bbaacC;HI={h0>}Wrf%wr z&d_KtZG&mAaEx*uHW$xm=*Qmgc88cKITM#0NhTYJI}g7C^F{s$qkN+*6w+YK((Vyt zXPXts1@kj+4_!9j)tW3$>#b)|ob)=TQ+7G!bl4`xSQOU#th-66FROUCHdKE>vQ*y1V5}8|5p(}l zVf&+hK1tA{trUtvNTA<}bxsm8B3-txk#E!r5y+f z7NTJESqDUlx!RClo4faNgGhS*zLnm97_(66Nh}&5n|!pTlGLY49Tj*RTY}XHp*fIE z5D7WYQ29vSfEyE?`V_!yj=C7u%muLGru|@o)#99vwx<-bprv>k&w(+l&!)9x#e#iS zggl%PF!**$y?1k^H28or40-RsOaW+MBQ1se! zycNo#q|jTtagIwV#AhgvEzNXY4+Yz8W=&=bzlB-GMV9?icyU3YAuyXa>wXTwX?KOH zxiNo7lb*1}N@vIQZ2C0C=|4j@3Y;xD+C+?KN9S3yqY_6bDaE*M4yWNVBMEW;VtP?; zsRK3B(T>IN>%pr~Hd@`TNK=NBY^p9}reU0ry>>o?>K`nBDF>W*U@o~C zpysV9@&+T|27rutbtyZ$y-m}I+gOIXo11}5_{|h%O(zsqJSc^8=#w}eEwkRvq}xr! zqR(QptO)2eYvkj0q&w+WGwKModHM@E*>?)nfwrV)%Umu{CM(IlLednnsw3AF$d!e& zL%nPWf2FhhT+raDZ7$1H}+XZY>WAAg@%4q9qfDPSW*xVjpiWMAH7v`yDc-tcF9nT+yV?Kw)!Ps6Ij#>zIM zW#`edb4!hZv+>5v(#YD}KM^L88+91hu7(oXHX_+(6#3@e$?w3;j9c~t8WN?qd-f>) zxH)BGj*$UKHMucJ?%nb}xXpso{IZrJKUzMyp6$1BgZOImr0|Ea=$^W{stmg{+|=nz zf4l$eNG=gU%k_Px%dStA7I#qJ)^Oe}>j!5vqD}!G8=b$ry^SaH>{;>a<(n}N zN_rNqg-1BAbIu`3GfP>O02uO6Dd1`K=pIrzaC~!)NrOYMwWP`0*7GZg`Pn+(GJ}s> z_*9YVUtVi>_zc;;nW;}^Ml$70QC#g$TD<$RAH4o>Uow6zTIC7#$@D+^qbQG$fi658 z!|C1aEjwnD9auRR6~)R#e-GE{LE8@;UX_E`b25f-g5|*U6i<)l-8SqKl2_?TkAnNR z;_sUyCQ3ayAyem>3cz-6IhF@BZiB(|C{k=R}OcwIQLaB zlC-PW#R#XExGz##y7YJf?%YGtc1%k0>jfaAE~v~DzPSB%Xou3VO-_9%XA-+qVul^l zcE{85HEDWPQrmstd(cT~f}SL)wGYahHmWXh$W>7)z%51XK6Da?eI-3kz*@eRBy$OE zL%u^ryPfo`wDmsUBx~Qn{As?4cMn!Y9&k%hybmjgg6|i#E&WNg^EC<|)(f{gk{HGa z5WfXF20H=h1)u}9kGIi$9-tQ-0h3;-vSh6K{n_c}RwAEWHQak5tEgqnk~446JI`mo zrbM4BF6r}2b58!Y(s2R!_t<2?Rvpmr`e_j0tI3550Joh90YZ1qX9q&U00EU2pk3fI zPvqIaFr$gwVZqqwILx`?%&>|z-m}F}>W^Wyi5Wet;mOShwXB)?0$ohb;Q*O6dN}a# z8~Oeb1A~iw*hF4x51MHf3|qu6lZPJ&S#}OTl)9W7e#Ef+^0LQF%P-%DHz$B~g`-Pm zA+nibgHZwg0*)Q3_|)e8;T<{C05?V~STo=Qk(BwxE$~^mg!&cmg-j*?Fs5*gI85U>CyaY&EokKjbxf6I<8*#u1jJ_tA}#kvHEs(;@tAbLbDX>M8D49{__=o|a1$p8Ev zXCHe4w;|uD0bd@NmS*>3PG#~wK?Y=>3MQ<)1URnkp_+o{1xa6IXigQia^XarD79#% zvX6idNTEkMvdETM(pjoefQ9lrD;{eTCX@EFRC=+da^%n&+L$)`)FQifpk@f-p5=Ge z&Xxxmz@q`Ym#L3-ei(eyID*#({e{E>Ek$VWtq{ifRz=lcqP-+nE$0-Xf{;Z?nK&a5Pc7)O!z`o7A1vSsW+WgtHb9*U~7;)3$r8==0GSXh^OIbd?8_la>Gd;`+{ z6(Q~Q_@h~=NONIrr&A9p4i*SwFHBaY&3P~s)~2EYG_W)@?SP!%U z&{-7P3;6Q-2NIfbePvoUe_%1X|7&5m@1zbt<3OhF2tzqAi;RdQsOm$OgPI+u$AM=# zgKm^W%O_W@egEPfbZ9zRxF6FwHj$|V!z+x&cG$Q2X@5n#bbuVN*4DdI@80!p<+uVjSO`V z+2c#q4KAF0SW+QVxT-aHrUsaSITIKjP-P*Q#L^<(%=d^IlR)Ie3RqQw$J|xR2vh~; zb|9lU$f{`$Y#H&v#>PcQI$JeaC!1XV&Af6hJze((t8QGFF_1Bg*lN4 zudv4K)w_Gh^H%?mmdTF9aGOR>@-t&ELuQ7SqNXu9?+79mn zcE1m-tVt!dgO*=s*rsc00z!AZLK@w(llK7oU*98%g(rH&1StX{n%hTEjC;&WQ&yG9 z=j@ph1BK%0E0Y4TH1nl*^kdR28O}49t46SAIxf!0IiR`Ypc7QS;aZOkrN9|CEf^oTK0F!>r9&@*Gff^Q{D=yH|5(Ffx=TfBb*Krf8k?ay{1EX}jA#xPx z7GfqDE2|@EhcKw6H`F9Z#zv>>e}*&XaD;XUNW@|i%C*Lpewf%)Mv_hnMIGYk)<&+^==|o2!Y+YK&8hA45o;Jvfl42OJzB$oCsWxPcHE!Ww!MaJj zOA+Ionq)X5L@tiEh}7yzN<$?mz>^GFs8mcv)E$iH5!?{2v)^+R%ZmR?>OWHzc`(b! z|G}4^|1Q&^mOt7rxw*2J#7i4`Nw4(&aVR0bKjF&^Hz-Gdye1-zVY?niY;eudY!eG7 z@~Oi)hQ8K z4E!)94e*SCnARHtw>0R>v$3aYv&seD&np9$PP|N$?(lLN|<=QEaj@NSURTQKsgQ$RLvud{@iF z?=+2K%M9y`Z!2KU1%a1t>pVkoo z@sPibo(q69GaGXeYAJ>Ze4~xX zViI;;@&d&^bQe=c^ANBVWEXzaIyT%45*Q+Odwvqku8WXa@fa`CbC2B*m5ckODR$v} zy+$rCo601qrCOZuF`Y)np5RkF4tNoCEts&13->5dR4e|SQ9jYj_fL{NyjLooEM6jt(jvXU9PH_hG{`%LhK%DGVN z!Z2DY!D~jm%-B&OagFD6`UoY0jvaqt<%A;6X7=}l_OzJaGNd@lwO)?qepVVJ4l2y& z@b^k%F*JyFWz*{44MVW8xj{pQ75o-7<$aG+1gv?jlWyp}L#kL(^y1qKYWM*5YE;r} zJv7IxY&Ua*1BfImeXM}@_5=Z2K&pr9< zR7Eo~++mWG?U4}^OLL8o-RS`RFj3r8u%edK+6o|i0vJpb-6B-5U}h&AqMHrSqzg1v zY*{Ucx@^jz@W2z#Sbn7<^@V9<1-*Q7K*X|6DftPYfJ*?Rc{Q-IG6N?=@{9wgV)nfm z%?d{YFgQzh6UllHl*P?P^>2_s6*)FSM$Q-7w2RVL2D0u=F+jV;BY~;&XV4o*qU-C# zNp*df2-KU*Njo-bmE5hnG}ykO%X;FFfkcYr^d;*K>iA@wK@o!?&sn{p35bM;+)a4U zR2W>oKPfV3WhQva7`;8+%KHYDm8>3moJqr`a6Xs(hpS?`SMeX=3EC5(j=-MoT@N@Z zbB@kZL?vN55}|G=CWFP7B>?75O5tus)hW^oLqa`x>LBC9iHsrWqW2A0M0FeFr@uiH z09ixGmrDfNWwyy>qwWmgOPAL57(mSG$6qQvM|^V_M{>nTM_>*nU{rsujOJV|y!gGUc_{w57Wk3pb8w^H&YG&pM>Pqo#QmnCz|5)WQba%mr)KP| zk*Eb2H86>#eN8c{vo#bpbeZ_{iiS)nMaCv`7^pN`0*6SlKza63;#5J>iw_8Yl~@6~l;t zN?P79DD%=Uvi?HLTkCx)MptFsI@Orw19*%A+Wdb&6un!a@;Nttnll^vvBgmRo1gx5}okVf_Y3!|}HviwuiaS$W?Zq%-wj z{LXBnr(20|kB(BMe6pD?|E?e|M`X5Sy}I8_hMG#m4S@wsHZThg<3>=MW1^RpB3JY~ zI_}>C2OC98IwTT5us)wkG2MEwLFs_vzP^PjFl!?L_&Z_dt zc2WP*pxLY?HU;@6FTvk}XHB zJoySP$#7tM9dz6f3U%R`BVI)tTLVj#DZkvv!Bwi&>~gAnsy1lUq*;qrZG5`vu7{p_ z1;-w9&2!ayUzl&XCAK+aZ>X@?Tjp33BB`;=1`FNxUPxoR!+!OP-!47nsrJx)$Mx37 zN_{=n&jXJ<_rz1re5t>ez2HSB4Df?hUh|4q4fM5dEi%|3Lku(2a664KivL4njQhEa zH^EmXnrxCOrkU!3U1peWrdhu6olE>qy6g>ah79Q_fc~(UWHOMEOkoa71jQd@_^XHz zr<``qd3T&~)?Eu6iBQ*EkFW^0G^`O3wulV-JmBg{BL}8^IWk>6?%F}Ashv_^Xr95& z>8T7Q@A>Jesi~>7RC+2Sm6^&)Wv6mdxz<@)r)*m4lVhgo`d=|%+~|INdyN_RX?MD} zIm^qtUN?Hg)AVw>AoO+~f!b7hD&wAIUZDR^`$5Z;J*lZa{Up)yGfsN{hePW9CRUkW zqQ31slNIR~-Dfw$Kjf9@DP;wS$^m)D3D= zFkn!ZjQZUH5k>NE=nRI$O9;H9!47&#@LuoP#+004$j2@%e#s1U)+29E2LJ#7&j)`t literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-book-italic.woff new file mode 100644 index 0000000000000000000000000000000000000000..a50e5038a40569ac50f2cee39a0751cb26037eca GIT binary patch literal 33944 zcmY&<1CS<7*yKC5?cK3$duGSBZQHhO+qP|E$DSSAy!q~ri@U3ctfwnG^Qo%ti0-b4 zYFAlNQ2-F&rv#+|P<{ds2l4;3|KtAuA}XpR{Ua#*VR8S1Z|Cc|n6QZG4>$DF7Wjb@ z2sKE%n4GL4000aL0KmWl05lpb{rJ6Nipqij037CzmiOnJ)Dm9*xU3=_BLD!e_T$s~ zfgYKF^^~EtzTFSk^rQ3t!NO6yAgG~>(=Px3k^M&_{2yRm5WGz6Os#)7uOChQPd;PC zCc08nea9a!GX0O{r|l*<8mj8{J^*zm962A&c^Ad-^m}WjPhi}SnIp} z^lQie6005FN03ga4Q!G1TXY2Um^WXjPUH|0VWXI-sv~w{2 z@dnQQ`1*c$nNovqOjl!rpD_o;{czy_VckNr@+SiQ0I+o=e4_nhnZnX7n0B&ac9bs7<_oVOLWA|=v@?|Nw%vB{y) z!D`%0jGCi1gJp-9J!LBPmXK6?)IX#m%d{R}YZyvA^)*9e_%pm%Y95UrwP?)>N}PdI z@obSQrQ@v556qq}gHTk{j>Gr${|Iz$SKLkAQUen3w!OFSw>P{h@4ps3H`wMh|5;LM zTxy~ZaP?!FH_)0Ix|XAa-)x9xxkd~5ZN_V#}}e?PN(C}BG+`i|`QyM5%o9FXKB zob9`fAHr6vc)FB&f3uT3(6xwClrOgJFWqaC<3-U@gqMO^Hx-rUi#Y#jb``bXbOrLf zpsiL+sgFUI$jbQJpfxjIc!TyAx^b9lRlJwfhumL*JP-iu=TPS7F9#*`8*(rYIz)&i z$}e6Hq*<@e3=`yW4xk%AH&}RsN$p40zi^zv4Ysjs?gpL{Tsz=+!+qgrI{H9qn$`pcpU?UoJ5+&rZIaB}Uw7MEozqNYH|#bfZ{l6*_?l zwnC4+D3yGs{>U6^t$fg&qqDhCAwJ5W-*4%>CuY4}?8t`EV6`&o6zfr|KVQyR|LTu#(FTPRW@FvMn5F??r1adJp(EeTzq)24*<2&-n3c})EA zbm;;0LP~lP+C5@MR~%7%eeE(yVHBMNCD4Q-J|#_1Gn0l zx=7&@>#@P%rck-GawBDbOM_&|dTk2*OEuoHW|nX?@>t&kdeNgT6U)u^4w);=Vxn9- zkl3ap0G^=mx0;56$G~8Yvllz1!p~8cP zoHkC8ZS(d3o9jQJxcfFel(l1_WBN{=v|@)LoAHKbxQQqyo~kkvrgt2V5nbOa*vWYt zoU=?h-rJf@o$xdx2H&D-!(I#!zB42V$4DUeiKfN`r|)}!rhWl-(9*F^`1f*OY4AZr zOfYA9behp|QY-`%J;ES8WNuIdAtI>|iaAh4eQc>vtQ*LwAa#12<1xZ?nESd}uNhMF z>AyC=ThxZ!8JhZ#Yf>F_M%+27;;ttoW-9eqPGwKnwh^zvp5%aIm zXSlEyd1L<10Cq|rCT~Dqes=+f?m>1z^92;z!RiFA?v37{yinkVy10SJ^v&)PXBjA& zp1e!5!|<)nH!J%9{oA8)!|{Q_*d^35i0TcPyn)yHeW~q+-~-p|zato`1JN?^e#7;` z;{_mK+_t&hgRvPbwS(X-jP32;*>fb1udmX4*W<(WV`Ya7)uXZ3 zN5mktNBT`GwLd`oQxOVkr40pBL?>lUxkEGeE2W8kIn6M!^9{I@P6e#dLmUrqry)q7 zFPtH(fm7*`R0Ys~w*E|5wmq7YQs`y9=m@a~e%};?59ejAZx-B#^`_B3mgwd0&?qS{ z>ur5_thATaz99-h=KoMJy9bcG|F`;o6<9B$eSF|w3^%d8) zN!>f2vKJprnW<=2WRsW6D-&`VyT#nwwNS3O2?#+s(wpeBot0Q5`O%X^#23MJ}_ z^^fA%@J{a$3hQSzBHe6yCQdNSrrPjk!1@rIEgiwu{O4d}W_~fM`_N_sKwK5AZxAlaS4hc}zOm>ZCawai5~$ zmGi7ulpNq4$!Y*wET>t^53)s?sW%F9lx!Hj1I0FeT@NQP4OwU(37iddf1xM;{8&`A zUqUbyXA@26^iUfH4wKO56`agN`z}`&wmghFwz6<`(u`=k*Ws=$JN0FaC{Y3HG-bK+ zT~souZcTLV3Cg9hy?ONJ-09g(Mbtg0z4!cgCDQcj zW9!7_7^LvMnNOM5`qKBtH1A{O(eGh07RR6GBK&>DD(9!2TiOS^?52hpSufv%x!;aGaB^GtaT&yy18d1Oh@op%NiET zf0xaYF7dWgn%gjjPR@w$1`{Os`A^OP|B*W$_IZp#}i|)HjA-E@ko>_Fk z)xBa#v>@2?`X`~{Daynr*whO7FVX&gg+9m4UA`btPufQJRmIOwPp#~-N*1vM;~M&& zUJ(;s3FJdQ4d3QPJjhMm$V=FVA0g|jmgc_8V{5Vb@e!M5PZsCAwj++J@!JtM5Mje= zwPvWNCIkzo+#BU%u8WJg07H+ND0bITjI(wkvSI6Lf=x>iM)9v-iAD9J%csBH*$*^6 zUCXvZ$kBUyXyvb)5T!wiv?$$J35WxY!;~iE3>?yw#&4q5moJ({9ctn=u#8KW+bfsN zVhoFPYD*1`v!YU=&bDUWeXzgJ^DCbPy<6A`YViY!fRkI4);dayLgbhJ5}Nk{Vb&DY z8ly1tPN*C)q==?dD;ACOR(J=BrRc2+{B@vL2=We{TOcn2I$zdbtk)R%J*{TV?JX@B z;cV%nVV)4oh-Myd9;RyYtC$f5i=r{kZ%TB+2ws8KF&>B!p*-bPJot;i=a)5G;V@y5 zC^}K0SuL0>tpNI%LXk`gQKS)AgPID_!6B4USJ314qh%b_LPS(b^Vq>#buvQ(J{BQP zTz>;nd6Kkvx0PrBcf4p=Da{!DS~%)BFUOFZuBg`?_5xw;r2WWpjN1_$$GDozZ!`zd znZkQb@#roZjQOX8uD}r02j9nQ!h^>Rwh*89k+|n05GT3o-7?qoB^+sj-#X#z-)p+1 z(>M+&cpY)b?ZIw`tO|s=g!t`&N~h0G=yu8?*FrZ19*Sd;GbyT4Rhv=h69JO-!xBY6 zFxB}(8dePWI9C1l7EOs*m-+^Km_%8nJJ=e1WwZ*4(fP`jy(*Tyf6lwrd$~{rCi%uy zs;5`KKtwKc82ZM#a!Z#UP|bT@+*>K|U+#%p(_! zPH3rogYxkw_~jF&yEia8!&gSOC@*|rFvES7&cuWP&W{DcJph|%AAyWbjt{@ii!ElK zi`gU_-c3$CA|LxVHrD$BxDhMmuk~(ToLGkDbR>qlLrC@BvT;Dkt_)}UOr2|Db}2ii zw2jyaWUkh{f1dm_E=)j*X8TQDxOG!qEP(87A;0w@-DRuN7f~dVjN9@^gc6=tdb#PC z1!a!SoTMVjI3O0mp(E^umUjq8nXn64XsueZQZlyK;3$;u-!v9FRh10Gh%jxr0bVnh za%jDn<0C}M(Keudvw1`vQRWf>qIVmEBG9!x>Z}W9-BNhe>^Q8tVemaPes(KQ`C&R9 zO3_&DD}5NzbqB{6XpECZ_P^_AsAsZ0%>>s?Gc69Om4%^7jJP2FT2%8}l0t+hz zTV+KJ3!4Q?4TFif0zqHr#>DU6i4&)1B=<5sFmi)1X4E@tRMgi;k3~G{7Ez8UPC;J> z07#o4B>k6LPtU|p4+~rr9FZO5kRjle8WzW|2|N%uZn$4=^d^@ftv=usa}f`W77PiD z0*nKU1dIjD8;mO0Etn-jqQcjw@#7m|DZj7Ij{&kGK|p|CKu`d@Vgb1(;r9FcJNBF4 zP4{N!-}QHIEk3TV-AVr!#5?|lPu)R(2*e}-Gar4g@5zefN~xSG5*X$h=I#Vmx*sS? z2XLOMy4u=b4)*pocemFUFwo#25#iw>65`?_W22)(G}PoIWu@gM7Ut$AXQ#&pIN0bY zX{qTc8tUpQYpcr(JlyOoZLO_M9`5cgZ?De}5a7T7A;G}`BErG~LxY2T6lARRb*@gb z*|b)-`2=Ih)LN}>i|O^pb5^U(F6YS>yp89J?atVIauSw%o8f_g3o>JMSiDBe2rSHn z$^M$ja(%V>N(K&up_%IL)TJfP&$+$IRhz9IA49Uiej?ad0&cITto*xUypWWf9H0R(0GI(R0k!}~fHOc2 z5D4f5cmsR^egOZU<6uB2AROQcum`vTbOA;HWq=95`e(f_42I;kt}WjwqbY#61&09Q zKlJw($2w|bdfbR-J{Z){aG~tVbr8}FJ`%s$m&YpG_|2UuZEcG;@S={}@7jwc$@wxCvW|&l zz-2(;2PG^hs{9&)J=;wUeyH$1%b(d9#qJK&EvbE`7Erm3xl_T zY_Ydq{!u%i| zpo9D{kqo24fPm!XgoV%iQLASzz?i#50_0IiYGC}_K!-Nt|5zPJQ$W01GLLS9mt7YEKJWmBAj5T=3(voL6k1b1YffTgUhD1#2&q%B=f z&HclzWjNipVD{M8S7+VHM{8A9jfaRS@nje9C`BZ6jjl0O4%r($hiv3! zx6_YC@af8$-8%|f-J^N^sCT`74>|OydjUzIe$(5#ermMcdPbxAe9x_U)mr}~+b3MT zNO8F)^i7u*oguZAIC^2y^ss3;B=(KZx^M$d0x@&+bi2}A_LM@>b0=>M33o?YL=sZ1 zLXuU~QeFG?1pcuZ=E`Z%p!!A(o}pRljtb z=_z-o*^~(?0{nPj5~0#ym^9Nc^_UQ6O+SwPFy)+~jszGmbfg64f#^;Jh#8S6{iJOFY!Pbe*ICje$>o}fejyzv?9;81%+i)^hdVe|d zwK>n9bzj6iKW*B-v?5r=ux)!xXF#w`>3#-kDPNYWwz)oeeVmj)@UHh-_roDntGgW( zn)*EUUGEND-wX^|VpMr1<$ok^-KSTGC&M8l8daoQrm4a2{uzUe1nZ#Hx07Qw1EOIO z@aG`)kk$@ZrL+KA7kHx8k4px&X>kc}zR}tY#T}D3(VR+7`4dmy2H-Vl zI&PZcPh(6fEQpCtikARWVS>R0W4AYBXZq6z*DJ4PbR1U?ZG>uspu5a#|M3uuzxm;1 zbTy&Rb(|2zqp*>g`Aq%oeO{y6Y^%-eHatL4i~}F7#?g6}NM(X3Hd*!R8h3LT?HJN7 z*ebpKF*3*oYim>HGWGg9olH6&-Yx8MBwNh4uCCB%KWGjjQfoOewGKtBI%XzX*X7b0 zRw#`e!{}1!-(mj9p9&oO`T6mT+w8J}HvpPU615IRwcHZy*Qd$&j8FdhfEy-+P!Mwk zW^@iML-tqrmIVYRk4cI%ha-DzNz?|n%km7KAp{$4o2koZz*F=fMC?|5VLB`c$IzXM zS9NdwLq9?hg6q!9(S0oIYYn*?j&I~|9)dxhkkV}-Z>c7->;C0iG-EA!;?PD-b^b=z zA?wTg^RwgD9o7Pmi!CnIXspF9!%_BA4c$c|IxqMA$z1M+iPe)(x|E)TuV3C^Y&87r zHTs$CrIdJAasxxmdG?&u+=Y7}3^^jj2e1*eOFi6S{8Gg$qVc16U*hsZNG@*7_{k%+ z_GY*u6_(;q?*xuuj~U0gH^gzI-X+#%rppOK&CF-MoRXYE4xv7ktgW-ElGm7jNPH2m zc@WC#VmJCQ(=;j`S@h!prWJw=AtxZW`B6F;VMySzP^E#(nB~w$LIpF6Bsah3i5JQC zoBet2-K=>xxLMjgNYV>au5|yv*Zi6+!lNp(E`mDpe{d47Wi(Ao#iBYe1+}bGteo3K zM{P4K=-(n)_g{m<#l*HwGQj-X28mWUHD+mQtxk?>rv8-nQatzylLrxGpl2_j*de zJig>?h^}lad*z$~!4R!?T| z)!kqYVYM1=RNBICanoxYeKT;RNJjob&gL?G3CF!82BL#CsxzfM#C1}pf0J~EmHch| znPrp$h+eNIJ{i^IoQC7si?lml=E;j(#?$f2skSy@RdfHujuCL$Jsh+JGE{PPS$x~P zWLkE54)Q&@(3e)UozJI>#MHTiNmyl+X8o$qZo+MLbvoZgUMy>RBjrog#P zDx>s*Qy3WVohdM4>Xl@c%(R4O7SRse4pn28!Gxou$nQ5X2tXwAu6U27ve9uq!u5i?jewkU zz&2ze+zDCdrG;(-E6kQboV^AFT&0~bIERDrbH>8ooJ@-)Vc%OlvDd5dC>e$lIa{d1 zSdusXL+q>7I9PUzg6p;1@Ln^zei~LJz&p-+xVdve|LzNP8?s(F{DSe#4 ztO$UmqIUiz{>?Ct5Ov%Nt?oW*Re&}X$3kubG?=Uarvz*;E)14PBg*XKC;#n9z1}>_oR16p~KY>{<_+4z6W6}00evm>XNwzkoYpd%v zyZr$&tJUsmu6OZ4(!4RWqPt^vw84})(7drCtBt=z+i{4XI-T2lvVOL4yhNQLr)ku% zy}11_wfv}*0?cFD=Bn&*lP*1FLRunnH5b&s7NW|!SIZ)vqPGZxXo`ALj@u>tNJ??N?PIWuenYl6*Q!xP_~8(^Y9y z{%_u&+~Wd5GdFTZU44U_>YToF?C+*lj&(Vzo&q+OGP1B`jS(;TCsmM<=HBUC);ph zW;#`-!7v&@_~I<<*>I)_MQ-qG=Y3EkQgeaen$?Gc6E(-7EqztGH;*LN5fs&~mddlu zxF52St>Zrb(e+gs1R>*nNkpsNdfB{Q*=jv~BCyeXAH0;g8)Gea9o}wVNsc+^TN{S2 z@G)O~W7F)2b>jZ^8eNBrw4AfC(#O<6cu)vL&*KE_^ZEseG!{Y?2tlx$B+pSu`9X0T z!Mr;^|7ETTyJo$`S()bJA)p@mn?rjg6sL@!I3+CCD{mQBu3RNRQea3gB8DAAV6Gx> z3>ij}LYqHC88T;~(YenmBra)){F{t?9iS`^E6;C}yW%h-<@g&;46~kb(S&cP47Oz; zZ=XR$1RCY5AR~fIt%)&#UnRX_ObYizET2b|hvjLlbLZu3cW@Mci`n*aCs)JX&H3Dr zt(E&?qMRD*=z05LaAkiYeCWqBt&S`QP;mc|%rg?I; ziRKZW0w?L9RrBuJBpZZOS;XN2Of!YL%RF$3?A)UVz+uN|Gcv06`~J^VP}&V}tQq4b z=`Io+VKKlPc^-t{vx+cCxd3fwE!NM|PmGx~<}Pf6rqMkFZi>S$0%ISH%0Z$iA{E%+H70`6AP8{yY49_d@bl~tMA@*MRM95;O6sR zAC24254TI(iK`ARgCm`utHNm}BeTu+@0f6d?3BL)LccRO3|+Z4B76=<9v3WWPn(?? zZJQW(>~Q|Fne`9~Jw5bUgP9-9n<^lqmyPnT@Ow5}tl2i3%Gg_2w|-?NQ;ok%+x0rz z^}@gU#7hKxx3AP!Z(U~1_r?fo5jW^qr~^G)oWDkW!WyviC{9eJUP2dZYsgKBV$s1s zhZtgZtO(*}TI&4*9r=)|S9bCnP5-Aq;x~#lgz67u)sr$g2AYn7XMreSsnODF2S6KV z?yFB@i(@1vsS;F`GYB)R2qzl%n}J0>lYfWau#kXQSpo-N0f!lp!tXMW7a81i*noAl zf9QXfpzEgYuo_7=Tx%W!h8qlNcxtdu273w^M;Tb0SX{PT&TfrWW3Cv=(N$zQq&%mN zSWV~Y^57iPVqwZ~j|nfQ$Hvk#qWn!B3(6&Qmrg={ETnWTj9#wj3PluPKpOIFXtt|D zI(lQQGSSD9*KS!w9xYKs7o&V}v3IcCkd0@77J`T7$U+vJRVc+x=6XVZ7E3x?#&%W< zgjdqQ&IA!eNrbAOf=F*HqJ%3C5w^sH@0*SXav+r%M^pJ-5nvbQx`CwYH%`ccX2&r) zQ0{q&g8)>J4WSOqf@VJI`pPJ{lB~5>*n@BBt`)lp;=bgBCcVKJ?)8dD;r@Nh z5&KO4y!|q%Il>&Z`Lf@#va_wFO3|*e{qm2^=H2T#YPFb5*F(kQ(r$dLZ+7fqGtSy4B|p($}uX`IVAv&Iq>n0Q8PJNqm8yhZXU)*G6O=DvLs(KivSfj z&s0nM)+!KWDSKj>mZ0-WGf(cVt>5&FUI=|Xv#85_ z+21gaz$!U`TFoq_SjOG({yQys0ozCh4Kj~8%Fet~W7SyjS9nZh_c;(&ir99Sz1Z-B zd<@_CuZ37PFR#@lMO(f%{^KB3xCaUUGcdK^?}%x5N;eXVfurNlMLA(;=OEkv zkWMM4Jf7Whoiu0}oAkRYk(8wW5)|i~2%B+Q>XK2_@UZpTwKsgQ4JX_vQ1XVDGzc1` zuzH8J=ayP~YKgthSonN;#2Jy4ElPA%W$s#bW4-rTdHZTbYA~^WmW2q4_otXq2AV*5 zN6pnJWC=p+#xPX2V81?mfZ7_xqQ^=WpU3DdU%Y4F#8l8Gt%1zI9c3mT5?X`A$ZaA0VZnK#k|0p70h zi|>vwUyOvAd5pJlofPqxyF6%E7Svu9?jJC=%};qHv-2t6&(LUDY26Lsh4yJv6PViN z$BAheC5Wy3vF4RBIXNR`eYKSE0V~h}OwaLRsPN++Ju(^C8%z6{F#k57SO)edlV(ub z2pCbojd(XWI+Z~e{+6#s*MgYwVelU{F|VOeP|eAPTHFg(B=gv$nHsQ+OOx7Dn@bzR zf3DYm`ueBA*_Ecsg1}TBP)M>eNKfK4BpS-ER!4J-vxKXF9Z{KNHsxiE+IV3>G)g}a ztR%7H?6x?ublVXHu_=0T6x?*G8A9HN(=rXD0;^yZviVn?b#XkCqp^I1=y7@O3jSqk zbX6d$afq2Qn~4NM9BTVMh=2O^LNVgVfy<3=_A%JYRI0sgf)OvgEW(@Ox+mH>K`&MsbR~T1j%Tu1RwZb%vK$CAVW<@z z+3wdfl4gK{WZSLphFFhx>En=h!W*92g^tg5x$VnQ>2m>|_1UXbvrICSk2E?{LO2%6 zI=bS$0cS?hMUca=R3wf{Iaq`q;#teDYpnR^g-g2%ffE5?&B zRy@K3pPt+k7yOi*uXN&1HxT_Dx3n_Oq8wcw>g+2$D&60yJ?)Bb$G54)qdTOjh=W`U zsiER;Am5@=_NTH?odlp=8fnZX-37+f&?<|48tK(b8tL;I#gkO6i^*K_LID^l^QsT1 zgU0pH=3)mXlGtH6Xt8>2Q!7oNV(iAXFaemIV&RC77E4V9 zyttCwCE zhjs8pM0re>a4r@v#O9Lu9cY~ z7qax^kuEK6;Erm+H2fYO}0? zb>Z{fBEV=~kv&t>U?)VSm6$D0?`ZQ`_Ah_Te?%+Pz(By|DPmsbI&3&1@_nhaS2A@5 z{^S?nr)JFwFPCpng1*G8kvwEclUY#^sst9Qlni0wD=h+^uHcBk4u>%8vqZKSL%LmP z37K)yxr{&KHGdDCfXK!u*K&OM^UsaGRce|_0jqI6Y*Ps`A&8TYyQv#656Fv2sl54pMx z%KnYk>}Yfi*uL6b%`rhTE%g&VFUPY^VPSnccbmlAo41uxli!!fm?;dD@kC~0A+_;1 zJlRZSU2{5{n9~x3bx;U?Fzf>nL5RaOaEDBbE{_M(#?rlLqJ12hQ#eb`!*jy3iQg$djtfILi+~ z0u+ztvrGW>5xVo^>_w5n$NvI7>l~jETMwBv;}=BTEtBg<^v7P$71_Vm83Pk-dnZc>po1m#sfuN0=cRb*+55^!bg3>4vvN^V4od50E>LYO(u z7A@!&cWs>)_h@Gc$8lbY3JPtfyZGS;Pl9;#c$%gVPm077nP1Es5MBX{tK$o95`%IF4)DI+Ds2gD)2wMPD+6Pl+`QAr*d?GQO~nN&V4B+##10AeVh*vrc9u`@P| zP}%E%V0V96Xrzd-w-Mt4gh@VNfiRC7Vy=265XwzyIB(&7u1C9aD^)K;uHoi1_{6xn zU>!m+fQQrTFL$$a=%#>P+hYH`W+Tv^?)Dyk8bvpyvlsUQq+=hf_EzR(Qlo-wtF8;k znht4K*S!5U<~JKnMOlcvjJU5&UCXaRdMQjmW1hS*-ygiwlAw9YAVr||u*Y7+;AiBw zXtsh|m^=SD)}(o+V+{$Xr>3c4IXDTZRn0j=wdbd0>vah2ERTy#T#!DHb%L^({j-<= zIohR4(nOM)w?0R_!&Gv+E;cN+DFo*V3Ep1RDsJ`n*pwX6D9GAL*Vyi zT3IMNxoZE3*iKmzYBW~l73<_5jX%x$fYj(wOx$;HyX;@MEmrNn_YfD~MdjILzy^1_ zPs~eIgDMS=65#g90?@Tmb@g8TrrJtz3Ua!_*RAzJM8Y~w+WtD3t#@=Q7zJ+gqM1j9( z5CgeMS7YV`(=gk^2oCIxPuGp8fq99*e-TQue6@HW1qntjqiC4U-6I-41UTI_Ku7a<-;c{HW z>Q7zGC0x7iF;fkiS>lLWeP+x`Om$X3pjO*%*0hD>G#&XL1yLuL53H%hMvw212q^Ydu5@ zb8TI!E!Nw`t*3f)yZd$pK|GJ9?6v1bOK@}-s~FMZ0~KQ^96Af_7DU$yprTQ!IoqDm ze)x%N&>>#Y^5m?E6SEqs5w<{ihr~hDZ(Ky*L8F6yMM?alr>Ye*W87W?Y*L;*76sf< zl^b8#j>X1VW49Y5mf?juc*HBIpSwsTg_FCH)1zD>SjLD$$$+#nOJ{XGNl8MUN0N$i z>=3=G$sW%0TIu@U-L<8KSxF|_>l{E(Z&t*2aWL_y*ah*-OMkK6U&}M^bqp7BzD8KT zxynCjP!fSP^AR7O9v3Fmzk&un?7vRikAhkmA45bwMWUi{3XUKW<<=1twop_6d{+^` zuh2yd?=PEuL!s9nnhQ>&El5xPwmXcEEH**@ewU506ECl#k0vNKtTp)O_cb}n>@ zSaInPRxqC=7{8-DLqTd~p1?-Flk8sKS^gLgZ>Jv6kQ+F#7()d$flg&hxCV71eDmm| zODP;(x>Z3idM1lRu{LiIw~uVR)&R{wLJn6ddGqr&Z*YO2hUfLAvi0^7XSw}S1s}KF z;L%X6PwmGuUz>rT{_rxJ=o9zSeP`~s={b|8E05#ibYd!v#{D%LwN8uSR>Uh;XHDCp zDsQ=mj>f_E)^%g^sr87xBmPVDEGIh|>Nx%3R@2GK%crKPPUB0%+c?&-l`P8p`|G)D zwU*rh?RqH`cL%{kY~*Wm!6%$3#<4}2MALcVktK%Qm~k4tqqJ-9%-n2taNCWMqqZ-fa7qNzGqaynVAS#tK`FDot@AfMDnc(BW@c*6=83-3< zi@SQZ!r8$tML;HXuj-K^&W%}X z%?nesCAp8s+uRquc)nC%FcQ`=hM>SPtoojb%1ARh>1EZcxE9~H4|nPLb5b_CBkA)W z`9LJg%OoHcSS@7KlQR}Jke`_%wmM0w6_wG^2trssR-Rh+TeFTI&Bc ztO~XRD+&>#^H+#X*@>`~@N*oQA#Y4{gYOQlpR1PnBnY%kdA26a*loHKo~s*74j zP=?IzXW_`74;aG6nBH=n=&B-DPq+($biU8`K_e^PmX=^nZuWK_*;q3Dvb;Jfydv<1 z&@($_C`2NtavI4&>cK4Fl^9QkxqxDj=RaOQqSvC_PPRS7Pe3X)$2paRzPt z*|Is}MIK8f(WsxSh$8*?qR=8aCJ>x1W&U}VQ_5%kezsleyVKEH_$RHs)G(PnakH`e zy}y^Bz0z}*jc$9*eUfUj<=btr@^9~L{^7wkh=%^f11{0~1tx1bnu5Tg@Sot@HWsNs z@`qjHdcfJ7fdgUA+aZqFP8D6m@W!aF;3DR~DseOzp7j$3oLGjo1i{~$N@FS~M)5*a z>7R;&)EtqutiM$lvn9eqiIAcL(6BA+W0uq=#c*-725U3#`Zn-%ZF&z%f~`Sp31E6Y zx~DJCNpdX=j~4{ca$ztnwAVhNJbmj*E>ov9b2Osr;dcMQT!CS4e1x6iEbf|sLR!*u z6~mQ+vV~#8ZZ|*aNIq@f#v1D^tAnwn$yeui z3+;y^hVBcP4%7D^ikD)Ule8eY;Yds92L)CO5B)nK8(4c-eyV1(O8zReKCL@1<-SpQ z@3ypQzg=J07<~X|k1j>8BgvN_>A_+KJ-|_KneL-I7M@n34^5mh5T9r!>7Q%6jwRf^ zJfJJR2~j_eW{Vv)-liwwOS{1gJXm8Q)*5mHJe7lNq;Alq*~O^Ebi2p;yCu zGHKu45cIgImtsfBRWQD*p3L7oWFEYRy!u``x(VdWbpX9@w|Fthc_MCjw-X6++8k8 z%F7dLY?8Ho`QG8M|Z;d8G_xY z!x$5q?+HB;P#xr@MGAo?pB7eT%cXn{4$Z;kL01Wo#!w25tW$) z$=pcN5ETT5-Gv;IW@Y*{AM^9}QJ9FzY(O8QHw6I2cJjcUHCs{@wE^0lIE2`4?pY@;s!@g6LoaH?*V)g5_f2#m5qN z>1%tg2{P|Hi#?X{gk-dM24-~3TJE(BY;KR@TDLLoGjDCu%gq%1S+Wt<>>rr-Yy#}_ zw!%N=y2KA~c7uS}G~ir&KOXTeGV5IWG9OJ`U^-O1t7uBEil-!@TcfS^yG9UqiSXrm z8Z|?D

    73{u|(M+ArYhpI7ZniMreC&fX3abUX=g5nVc&HyJ{fgFlJ_RM?S@PB0nL-4w1A!YA z%2Ki<_7H(FcuH5pt6*-5;FN3ED9hDR+L)e`ela@D)Y(1Vu%ku{_BXl$p0w{HxxaeF zy@K{P;sr4-lRv`Ti57`12qiD+ok%`nh1q-#67=vwr2J$t4*%p)@c!EAFycIhUkH^S z{?!>L{b7kfKOgD@$H8TFH zdeA%w{vt$FVK1>#3`96(ZH}6v<~wm))5KtcLHRp-4-9-)DA-3@f+1W8TpuZ8w*XcM z=LO=%GUlHwWRo}YS%-vSqJHrg<}Bg)SAMS&H)tC3=^yxTb%dZ z>||9LCd$V$5n_pRQG8OYRG^X@u{A$4m<=7x^i6n3-vco0W}s+rdP_{mVqtKzB#eux zc=JQZ2$>>zK;=}L;VlpnqYy(g!fBxkuy>kV)_r+1&<*J@A;O69KEDyR31GK^nDTH< z*jXOcwJ1^}u*-2Wh~3cu9!Dj}ObqB2;c-`9Kv85Rejli4Onpp-{Q;gd%1HhBvsS-_+(@~Ei)hHd7W3Aj+iY*$$DwR!h zpinx*Yy>shaG%EY`wN{GsAa(kYLTCvMZ4Y{7nm)&f2(vOa948lTB!)6E?LcFS$FAfdKgXny$Eu9;L0?~yi5%fPP%+2;f|%q*jUos__Quxnx{2b3%TjQ?0KO+DN5zQz$ecy|LUv`ELe2X;eZ_Up#@PxguxDV5k;1Pj6MFQ9@! zxiE?Ftd09e(9TNOzf`iE6#Ik=Y8>aabhL)`L!=_blw5PDF}kMvh*;N7wbdP1z%(zk za-yn|k}!xW1ZGE}Kbj28R>y+r5~b_%{%ML4DkJI2w^1hiY}CNsq(2+RSSJyxM75B@ zw*-H+)y<7J@yPI9KD+I8+wL~rg~ztl>@g#4=@l5tLamXuRUbN)&U~f7c4B(TkUlI0}+aYLi(}JEO|>vmKFCXb{gb;ASGCcNylBVE)VXA5%O(u zR&90Sslgu8=uC9mRxY-2nXOJYUiaJLI+Og{ygkmaYOO4~5f6>UJQ~a+cr-0T|*Hl%MG|jnkRnzP_O}ut|OJ$^a?3&4Q z=8tb1-&z@7J!Z|NS1lSVTz=#FnU~+Vj@nU%;kT(6(CUxM+%LGoRN@4ExRW7njVU0q zHeeYBC!GMxbn+^^D44>C86PmPGB+ZaHR)5Z zZ9cmHDw|On2yh+%7&k#pUy_{_VH-#7+{%^Gr|gs6)403vD1T2)&sy#|m7f{T%w;yn zJG#Z8I=O>D@CC&jl(egYg7cU-0={{K6+r{RbUxaU0lG!u7* zn$xiqtHntyIlj`NmrR*G2id099xEGEO*zX4>hBQA^wv(HpXAttJgg}vi^$kyhErKs zWou+2y3A2~a(H>HhLR$3ICE5<9L|HCX_}%kQlze_t=>1GeAejfqDyKkD~C@9AFc74 zs@YH98o#f4`^IrAwCRpu`tXe6s$nzB3o2$@UDYse*UVY3OmLJG4)ND%jip(I);}52 z(+6aS(#qAkYcl@Rx~yVY_SkSvl|PW__T*NCMi&pBJ3OEtw{YE-c`N1)A7l6Cy0a}g zMq65LBs{((I(OIr!|3_bZy7q0J?ah=Warv4IF;S#(2Z)f=jUW(*gOU&%NJWl0UZd> z&*Y{7Jq^IsK?YMUt}xx?W(mo|57H7F6qUI{qTQUBf{{8?#h-Fc_^JVc!Gi+>sz^sQ z9N}Fx{{rs#!!3!*D{xRt&&R4xRW|MMtYB_oz-D#2jZ8jhjBrpa18q_q6n*fL9qi=k zbs|(!vu2m|irtD0pPd1^(@qG`HqcI;tYxaO6EtA6KW%pL**<@HnC5cI!XO*vXosHG z+0*m9OyMAvhf3ombrU5WD_qQrWrW(20)Q zPjoKnBo&7rmHznXqa@?e{Ur0T$E44sC&5`bV{K$1Tw7mAB1hH~WHhb3qCxD;R?qG-=N2?4YU-o?2B%scoC=vJ^e zNJfDiB6E1^Pq9kgOxc9{Z864^hPFw=$+;qJu{uoLwS|k%dTKlIG;q`RhpQD!s}9p zOkne>GWY~Xo^Vx7L~4>|l8CbR;UAtu!uOGfQl^S|B2^*+K{06TiUoqWra&He{3Riw zDo9D6A|H{nPV(=P9b!{U*5QG&&!){xNrK(%ja#vAMTwEKizr5 z#-n$)&Rn$hs>>H`p|*4`W9RmQ9N=885y?H#0FWUps^Cl)it#FB8{VwLdTpW$k_cAJ z$fDUcfsNIMur!z0Je@MwXRewHsn5KmCReQ_SE;4@R@zsrAU|m$qoik>r1wb?^1X!( zvTIeN**8EE^FhvWFib)fptV$KYIq@`tn;A?{tR4 zS5MeFi2MuwvFE$Mo^?zO;F8f-${E}DH6_fu(0{2fDWP0yVAvbmr9^qDKW88M9Pvf3 zC;!F&*z;pC6`B3vvL9rA2V(n2ZNE4@7r_^U=L=$+Tmaz;+LN!!S+G1UJ#hVy=`~~h zHN!_vo49OF;i!?3OCs|N8gXA2>t)xdMuWw^oT26!RJ9=}8bM*$f}&dqlhhCqt;_n^JqRvO%YqX2Gc~M%Bc4W{|3}B4W4)snoIcMGun;7U(n47n3_0;CI zGxMrUq-J{kyoKqjZmeH+Nnre?gJ!iHZXLRnoi}sDq%kAw`ALEGk*exxL%o;QmX0sa z(`jc-U9!PPb!Q9j=VsCV-Q?Uh<9X(`=|~uUpVS={Wj9eR_ z#T7MHw`ku?7``>NcFl&ce2tYZV<=G`U__z@)oF9lDhB8%TBYSG;mlWw;h$}D>-MGj zLx&E^X>D!XwP!ZgA-fN*TW2Y#x~z(wd1m^G+i&YWjO#hjvyS_mmlzXx63Waqv)mBx z>58c}G_|JE1!&4dt)bl-jFPYL5}$((N;z=r@9!X!r9U;b`x{z*-pb9o`RKe;>)D;^ z@efdcU^ZDo0&ES~i+pM=z^6X{P%%e}C77KU=Rw|0OclfL+X%6ml^McVnL!ErMTe;I z$2!f_s>gUURrL%k97n2QWdr*hDXSo(Xe#|-nJ+f-ts8G?A?9UYEx7)r?r&%E!-qr% z8HT+7iu&?#vu~9BnfE}iXR3yPU$lx*yr51_zR|hUwoI42c#WL?W#!IaRhB$%Yv0$u zl&|w;BA0!Vj@@FdqAL3&Lz{hhm6GJ`JAKPRr*FMyCRO_v-@)%~9ChYvc9$GK-ovt7 zyDFX5{>Lg^|7o&(*;I+jKcWOZAG!G7sgb+&|FQc2J=eT={f;AxUb=qA)AMg#*|>E~ z`-&^Jjo}(szkc&8JJ!73`qG}&cP@EwM)O_E9>n$Gz>F5IL-tMU38^IDmvRyL)u<-# zNC*5RmXXer*CfGD_GeZ$kzp(uKV90jQ97`M)Uaga6zQQh@C$psmEKpq2Q=Uq9phv? zM9supbYH(a92T9%F45`;gz-_pq5icQ8*`q>0k13^_sMO_wlilq=#TPA9NpuA3t@u4pJ2wauNT!CL{kksRW`Dylz4sv0t5KiVtLb~!8lvL3ip^8zR)(C7!xzp? zTi%9FPU!zTIsf0u`M=D`=_4jRtr6~V!%~RCx8`#|76GC%<<6tU-5car9hFrKj_s zt#;8z3z9a`lS8|{ad+IsxoG_0}WPIA!^0REyAADWTTXA*T%-Gy&Ubl5MNW8qYNqm;++2xl70miUr`!48YzVhs}OS zt+MJpS(rr7+F}N85#q>XkEdtk7t(ZGkq2$BA)1(7Ooxg^Xre%_H-jBKUuh3N!>+fESWEM%915HWy$)lo3doUv$G~Gbv<%$PU^HJb|rez ziFH4jTzAWV@47`y3<}2RS?dQrKWqJN+51m{0F>79R2Ty4FDVRmoM&?}FgOx02J~~1H^f~}Tog5&K_M>;;Zm275u)>Z4aaaABe<`0 z3PZNJE6!zmv;mPfVRDTUs^Fg^l+7SF^iYCjQ2sOIKC~v8Q$j>cjHr;er zonQ$iM*Epf$uVxdxUg6S*HL-wfC|f)R+ojAF)fG-5gWuw++=mS64=PXE^!T^wO_O)dLK?@W7|L6)(Tk}4g}z8R^%bMb@FFzVOo{1iKLx6(Psg!>F6gRrX8Y&9a+ z*upqTMKFarRazq*QYJT(C6rR#Z;t5dEb83`SA^j)0mUL{uS!R*z zrEm)c_d`~YjcleK-<>^qCR6;6{#S40Z?orO2{ z=w%xgw7eAOTQ|119QeKTw{xfB6%1sT(Od>em>T%xVpZ#8>z=^vm#|*Ph1V-T z<=|hu-h>@L7r#>ymOORc2{W$Dvm6Vy1M54!q#Zxc>Ty%9H;jXPh0GA`Z#N#p`ZyNe za1Te0nfAB;v%rZtX`l`@dV?CB=CioyJT~H7$mg9~l<#FU=eah5rE}|E2fo8zkLmqP zuG982xekj-x_*Y&i5w#bO9bZ+2`p~0mWOds-xr74O?l+tY#eMyxn8+HSFhZE+(q8M z|C`I>;51R?mK=8J;w`t4+Q+0@@TLjwRqo3^1N_SY7tHAAzQ+FUYq|i?1_U|=#y=WO z0GioQ@08q#jiLzNMh*FAPgEzo5Xgj>7XK zb5%d^2up&ZA9TkG@ZM2(Jk=j`LJD*X`$8xBmw@zVU%M)LVa_|B6kiB*|NlC{z7P!# zpCjwMLW>qki(;mNS<)Xh3WGu@Earht9vD*E78At40;CVjv(HL?IL4F8Z=5 ziPG)gPa&MgR$quJt>-2XE`Sy;dal1nnAuE}J4l}?#zb@k{`cqzxZ<-(qqKh_X(Wvk zrAP2^K-#|n-p-sT?I*L~KtGlCQynzDXRYcHRR@y}-uFOek+RF65HPDp%;uupmQn{N z!%3fvg}G)spsfsEGcc!@9u)@Ip4J;J?lh8JfXc{j$L)V&AvQ>-;r7x)#A4^O2I6OW z%w^&(2)Y%BqZ<)8j6V^-MH+|GLuho0PhUht7fpZbvN^vie>`?z#@5p7MuV*L-y3j>!w&k-oj;<6B;q4!(2Mo9xft z*~m3bTl~bw-6y6XN`G}b_y%;|Bj{ciKz>926CX1!F!3?-`%Zj}>mBYi%3&86`iP3H z-@%W~4(;s;3SZZ?m1(5$`Z@eJG z_$9!YeLmXdTmZ)0e}s1F7el**RqB_1*Cb5Q`C#k~LqIlVI9q7Ou$0-|AEY>PJ`fhu zLGAfN^a&~bf{nT^3)G&xF+?nupPG_DM)3O}gJumK`imG{dOr2oFJ?BJ8{71YBh`Yl zH6bg{+ytK2GRA(VZkm!)H_iXt)XhvRyQECsY$vreBhx#7lUqaCRL;q$mFIM9?wv58 zM36BLk%w`1nk)>4VJ0rw`fs+==@}_Q^&Pl{%-y?xB{4GEY)u_F5Vz2y$1IP|H(5M= z9@Zi19P%ucutshh=Dj|RBkWELi*66ZmOim`u=K9<#hbxVufP7@!QF>d%m+iRX&ZB_ z=QA)WhviYxI7d*M*NC=8W{2t9sm1QcHoVvCyZ};KO-b z9y+`VpSfxRlTwT^a~2qsA^nV55mgl`?3sMtSz1 z;IehrBpgV|-Hrj9B*vSa^)fthU+rY|lX zK6!X)<%9{9SI>B9;CpmL+g`SLN9}{_LYLNr0wv+>f-#XXqppeG{G$bHtHX;Xl}@c5 zSwE{{@bsGM>4O^T8pK<#+H$rtcMD)dAtM@XoY)k7r!J3_gQ;gX)o`g!_gm0>xNX4MVzzmFipM!ErpD)d;VX;7aGrt#uu?t4Rv9Zu|HGd zIKN!n}A6{#Ky@ z7#J4UX}bEuWoP+ynpJRGjuGXvNwhxq--uSR{5trgq~)e)^!uRrd?D&ZbvrWNc_R8N>9>)6j!hq<5Penh5@ImE;YkZL7i zF^2;g3B}B~xxo<@HHI#sHq^;g6qro`tk@v=szA8vDa{4~%?8ruMSQ9+)->=Lu5MG8 zI22w7GV%o83R_%bE}>pxrajK7iYSVswoa?VTTmrW9;ZQXPJ!%JR@e?@GI>QZDL9$| z(bzg!z11DXD`t5LLlW!CgQz{p*~fiG9~bpvJ5m_oOGy&xgUJZJd+ALhb`P}bRwqLl>3;+`zhS}kw3FeMmM3;}_A?IrtTu)_ay|vV zqcgF=pTwEi{$#G~yi>6Y`pzO&;2eKNwbnbO3-mPg|mdY_CjzD=nu|-0_S0w zjzuz{F`d6K`p<((oz?FGSDo-rF931EGvz7F_kd55MsrC~=Hcheo^SBe=g}b_Qv+v(28^(To2bL)ZrPxaM=4t4DD29&Fq!T1*qXd6U)L47F zBa+jn>0~|nta~gI3o1-FsZ++e`8Y>}D+hV~!=d&c<3l)3e?VtX<2~!)@R#0urp~}l zHfQvA&kIhxM(L`D`;DhkB+t6`_vTx{DPq^t=b0FoDe`lf1w)w5yr#_=+GBLyxasWs zYH{kKN}gv6_tn9Db@D_v(V(>{9VrUC8wji;8=HlYts7u5y^lXhqJTcpkzu#dc_`SM z(R9uv@N*h7a7ZN10mrf6{WN1-1d`--(PWQCCP@?LXuFJ zK}3_e>@`L-o624zqRWlvZdciB2(7jDr8GJa&SZ`2O*p32K#xW|Vmi@c#<59GJmT+o z@Olg`rVMs;Mq}{nc&5iW;{sUhLq?0!lX!9=>GO_e~iXw$Y5jV~kvODCbNhdc27SV@~O}zR2n-1N?dpmC!GQ{|D%@z05PWaF) ze9Vn&d%5|@A3OGLSn+s+dZ2LmU9XUJo4Pi3>?91yvs!0|Ctu;NofDm1Dm^XzcGWwN z%=_rsj)!i4^SbiD;Bmi49>;mGryy>3GY1IiJMDQchU^((aj~ZBoXO9fx%2IFL1pG* zy#B)r41qrQ-#!GoQ)TyLETn^;aK@{kwa0Sv=SNcpLyNidff}BL%P>ZmyBMxzpMO~N zd4v3ZrbxH^uWMxQcdj*Gb9MyJ$#bqjE^)7GF7`6?GwC|7ze(3#Wv6T0z-P(4evzA9 zQStXb@VWzCxBiA+bDOEp25aTb$+eFC{%h@owJMD;7r9u>K%Qqh_bj=9*WY^C?49e{1%qtXb}R(a3!-7mH@awd#NM_a>dH^Q=EL&xd$-g1$f)euQ=? z3Bv+Z7r9|^xuzGM&Vt2h3qVa2pqhBHKhT~^0j*%M0$NcjXkx))?C@UJ6ulsr=M&TZ z;a>m0u1TbS0))?L4FSJa<|QAcLlG0kr#GI%P1tx;9u{-KEeL>HP?3s%^kDPBY0XcP z0)4YGEJ{%yH;67R&AR1h0s3ue1`I=sZTKvoi)Op?_zV4$<48Z1(0=i8xSxF{%sgf~ zw~t!{XGX>aUMh~8MSn;cQsWGYR$hLNj0y*vcIwQGlMe{C5N^7W=PY@c#O6O1PjIR8 za3W3jsUeNEwT(k+8tdvx2aP~IXTE3pIWIW*2lwb^r z^QkeNCO^O|TPFup+%Yi?2#{u_0i#6;sM4?*mf)1sBHJOI)2+$$(yju#sHa2neK;iF zjoE@j#Kc)LN~oO3!}mNmJWiezA6C*H+b96 zrM6)EPnX>?gZjpF7B}c$H`+qCWki(xs%e)osNcN48O=fh%eZ>Nk$ zAKj~1#&mx-CeeEu7#1@?hZchI2&SSnu}_hw`xNPer-BsE*>wMwSYpP9^9UBSCe+>p zDe4FP51~HWbJ0Wh&u6mw!~BVJ4OYK2;3u8CPeGm+54yk()aZ9&yeBy^KJ8yTF&?F{ z->LEJrBocx9UiZCBxF&3-i-3B&S%eh-V8o$Mt;tWzy>ewQ8UT>L-JuWFaIMCo9QIU zC(K;@<7T+;dfd$74NLob+)T?x$)_2X+5JI(&zqUYa0*?0K-U~*0CW4fl*Q~!aTp(% zCgg|c-bb0txbwmR$kBwvbTfT&37|^!gA+EeFa3F${^mXmAn0XK{kgPyb1`gizHCmv ztU>lf zw5m*WA6z1QbEWLbdHwi9G=Y#_CTPnLMJCfpz=oIsFD7eTXVTK7ju!W};pW!#jFb&H z#x!(VUD4!*(3Fj?5Gh2Qya%*wzL{;G_be%WfqTc@eRm_vySq;}vMbWhwBG#DqjNfl z=JUH>`}CHNADhGa?x@ZV5RP%-9vZ!(atgO7;E7*P=L)| zJgpRonjBgWjqB4gL-MmA=_4aunQ|Yp)fQ(wIke;wvI~KzP-YWNsm}ZPYHz>{oy4(m z;%s8$B+i4_Ng*%VM5!7crGY$3X^;?24kaLjDnt=N2mz&m6GgNLl{Ct!LWu4nb`bnH4CRS72jE&Y5(?CG_7j?!dQ}H0db@#z|*y@c;_+!D0`s)8K zcYc0A3#ZB19J$bJ7s!Aq$Fw^l zX1>o`2M3N1+dHaOtZ5zG(JUOQKe4KXt-sr(VE!X_gg5){IiktP5vaY z;eS?M>kG=eH)na*wGPd5g1N=TKHbT63!8*Nt^j8^;B(j=aFxjpOh-6rC-o>hm;*p$ zX$1(o5mG2<|7uZZ->T_o_4QzBPfuxZx;i>tT+)y3R^M3F(-SPYvH5ywu(z*7yMZHH z!MWA%+$F+AP{O@URn!f4ah$N+8%{6o>ewhbOv+4B8QHx9qs0hdpt15hNs`JzjxyV{ zgy@h|S!nj0l40)rta(#&OF!yruC8fnu8wvyB2U!Y+S=RR-b>zLY#O;n{siGMFEkYu zfKAnGQlJ{5oaT*7`4%}HL0iM<>n!$iz}Lsc`CW6KA!R?qD)=uUb(gbZJN7h#;- zls@L z6OAIrVe>Q^%R#I|P!y^-!6o4xJ22)50f!kPNnoeNyD4JJe zlw#IW)hV479HM)VbY>0WFMV)%RYTMGy1)0L2U1#cS)EoiDy26I-+X&u8^7gvx&>Vv zXjvavJ20JESvy?anEr8~t*YalfeU(ccZGn6M*S7uV)fyEvlX`|XE?g6+vWg}B%F!I zOg?$Z7ByrSlLC=$Cq+V}hHE^+%PrWV35l?EOPnpLi;eleaT+nArepGo@5bMhDz8o@(YS!rqclk%J$SmKz7Jc+^ZB zsBVbjRZ6Go3{M_m4E89Li@lu0a=r^R;Yk3q0bU;?<_KjJM1RX(<3iYyf(Gj)GVFL{>qohE`ry9a z#trWssVYaV^xZLZJ$ZPtHwLoUF(IC}UOPW}u6^gqYZpFwI!#vtWNBja&)H-qw?&eQ zhm##->RHK40pJr}hb&{m`*Of;_!q_cmxt3Yfi;C%Toh}V1xW(Pg|)8&IBYJXt1P{A z8F5a)aV@$4%-7J8qv=29BFt2^iLXl|7}qeaVH*(R+)8R?;blZu+Yi_l*i#H-oR7+i zUIVpn96Np#tWXCp6|;_CK^$L&)-05lO7NxSu~-b!QxXAm({UJ^B1~6FX$o1i0bATF zT|}oFcF!(%l1<0C3>2mY8vpASF;E$i+SU|3kQE`z6g`5iR;lO}#A>&5Mo1W2EsX7j zpE049x$z&GGEuArupLX`70#EF^Z3e8czzUpkE6&PFvS4hauUm`?m%&Ac_>m*TRT@c z-Y5$*!ERks4SOCwE^{Xe{3T%RN*Ev4t6<=lGiu_%dSNG$CbEN!2IsK+iaBmj?#aYn zJUjS;*egEmYwYemQWZqMhPBfN_G2H}Kz-!i*uk-Ma12+8hbH^$qt!SdmlJpSa`ars zjuqFw(z60HK0i45Y#%~gsNH`i;)d+6?NuP!Ua7fqs#s;+`46j3K`dUDc=ZZYJf78`G}kOh??;cx{TTZbAaF+pq-oNZF-?^Seb8_ z)x=^BA*CNMkEldT4zsFb0bgL%$UjT4?Oh%9|!;d0002d z``Qcu0002j(TeH+7yb4JeFpFV0ssgA0ssI20001Z+GAj3U|_%h_Z*N6{4f4*GJ7pU z3WF#EGI#|5s5=Ll0001Z+HI5HOH@G|#m}AZ_udr=6A9X&#gZ^Vd{`vpvMhma5SxL@ zB8v~pvMtd=Jwy~)WJnJpBGN-aLP(@+$kJ1Yh)6%4G#^5OCBpm%3hJRB1Tmdi+gPj% zAI{wG%-oqdXRi5(6!@4$z}up>V8mQRkB-}l09gw^XMGFlpoM$vK-!Ex>v$HH7SZ6nXxO<>7`{7F>OPS+{1t#;aaO0GI?a=xt-QVgkBj5 zp0!)9v%P>Y=tk)3n&kL4yY1$fcL=4?{xD?}*;kYT@_rmwrQI${!FI}F``VOn+yLv; zqteB1MXTNq(~G+b`(Cc`%-9_xFOd-S)PIXndY{z?RbLCvdsmY#ubIAwtmVyP>?f@} z%P8Nc)ZhvI&T)+i)+Ty{xGaaPA^!ugWPSsV($zzF+GAi~(1F4fhCYS| zj8#kmO!JubF@0cGVoqS5!F+=G8H)gm0?Q6o2G#)9V{BS%v)Gx~)!5f@h;YPkEaTY2 zagXB@X9kx6R|_`-w-5Is?oT{cJZpHxcwKn=ct7!}@y+4;!0*AoK|oBPL*SdBf#4*; zQ$k8YAwtuHjtPAd))EdAZV;X&yho%+F)UBnzahqz*|7 zNH37lkm-_nC)*|`BIhHwMDB{bntYf7pMr+M5=9pvtWtcU)S&cBxkh<`N{Y%NRSVTa zsxQ=d)U?#@sQpoQQ}56e(yY>)rTIupM9W62MC*XIfVP?T8{HngG`&aqcKQnpI1J7i zCK$djiZOa%Y-ZeN{KllnWRuA|Q!~>UW>V%H=2_+!EG#VkS$0{jvC^=bV$Eh?g~5+_$csCkX}$+P+!oCV29ut z!B;}mfY2i(DH^sS1poj5000620RRF3 z761SN00Er<0001Z+Ra?qZd=D09@*{^Hx1gL2%4Y;&Xp?_G~+FatXTJacXFEfRR1CB*M4n}4{(YUH zl~Nz9y{B%fwOj8fwf0k`_J*04}ATO`sl{L`1ovxqNJyW^9Yw6EY^;Dt9WNK5dFx1};W`(;#e;4}uwf^j3%>zCE zRF55CE%P+f_m}$f3#^#y`Gx9RJTi5xI(q#~Jp|Tw)noOU`doj1uIVuUGIfD}O+7QU zLQ~T)R_B^FgOA|X(c?z??Q{M7K))GlYNz^{!K@q5>Qj0r;Fsf8gL&0dPr${+kg@>Q z6Hhj)&3pmgR{DAjDLmBh?}1BKt#cRagrhRZB!hIP;G?1W9Mr$k&&Hak^Yx`+^`&{G zzAVzDJ4?ctzeY;`*Y%u@qz#DnT@CYXTEP*ZRRPbtdd*7DF}j*r{WaP>(BCihRS$Zd z>v2Q8Fg@ndaX;@JD!QD4uHGKT#j+f6G}V^F8}8;`_&eKX_`n(PmtanWvf7YM)Mn z;~98hbhqGH25y<8{dPptqky8@c;D3{M|%8)=BeT2nYXs#iqVi=P07AG)RNs)d-^le zpZofIN9$u7Hh!R<>AUBe(he}$$G8qC-PAKT^>^EQo@GDNw9))MeXm!~ehye>{)t%& z@AvdPqfz_dg&W_-*rNbihIO9ms}sO%+nEOIuk`m8{H%o4_+#2^enU$pOm2FNL(TsV zQ_)JU-LX%NgowTtQ$lMIUtUFHNy+|Afw$Zq{Z;CcjZw!!N*<~p2H)Dh;X z5r*f@04BlMLuB0c_}9|kZHyr86P!9M!DDcXuZU8G&vPgo0mKE*sH z4Lr7YgvTPC?dZ{4+84K(&$Pn-QX`C~*n*ZH=ov;TVwsy*&&aR^yojw=4W!77j zf{uS}$a))^W5yYp7R}j!A0#}9E~8r~u;qO{rsa8_XDpK#s^dNJ@XL6P_84D5Un$CZ zyoKl3g1#QHZm%;o9eCRvaQV>s7m{DdO9Ou!Pj^B4GdwX;-9zTfU#hrl5Ey$)5_%pHrb7!6#8aG+?#*4=`xV#3td17C&b~a#Jd#rsO=1a@-p}QF4 zv{6>*Sj*Z1R$Kb|m`9OL$o!^;{xr#(x<_*Bhj!apiSo20cHu>iU_XZ}*&~iV+u%7t z5nmH>JzTG4S1}Q8o9%oNOrw{(n7IR~w;6Jat?$TM1^Efe7sPjmxGb{f9roy?JA^5% z5y#&Tt(P;LIA-py>!%x<8soF(`#Ixd4_If~prPaG>+1fg&C(>^A1>utJB;P((2RAx z0A=k2bx!w;gm;+J`{0m^VT?&!3@EI_D81^axB{Sa4l8;x(w+oS+JN#kG}*tQUCK zMU`nP{!5q*XuKG1Ir>Ku};2uI0+A2*`p`Q=1uGD+ym`j<=6kIBplVwm>=UlhTx)skT zK#8faf2Z-2=T%KoQ&f{fe(%GkC$y&F-iV)&luRW$v7^ozJA*?NROLPJuwot<_(bWH zdEh8nUQc9op}mJyXSS|2t4m+WP&xGA2@eZU-qaR0WDG^(eTGUMhXHUgT$uvyl;!4g z%7=BvT@~qU9ALF`P$@W2{qOUs6X}5K8TcInS<}EVQXSw*sIII=b+DG;Wu8;N^izC( zhR??y^v{hgXY1;|*WWQT+|}6Up!|}%pCz;^_i)*SX8PR((`mbeRb*Dna>j?|;Il5po9#ufcGJa=1&p_oseeFrA;bB-pkEPE=YYSh--DUROgPYalqKp(*gUQ zOUvm&U@;f$o!rU=!<%u}kZknRP@oz&xQfy=Le^`y3EA+ielj$*kZPs_-2mx1mQ zgb8A-qmrHqwj+!sKT4c(k#ZPyJNw{QqZY5jxLiaiLjp5^KrcvHbdEJU5jpL8s^{?ypJPLeI;-ptG-Xxs@ z5W0XphQ)P*OeLKcP~K$nlOg9-;u$IDnXuIyU|+%P)pS9F$qT;WOtnPzainLP2=|S( z{(an!f;UsX?sKPG`t~Eh7G8`!4@O>Vl+gPN?1&6&(K16u*Mn7(UQ@&!*5Yiq*RhCK zl68`g7+?*_Gz;d}2oy{#XZx%fCWHM19i44t`<17+%h+<$#@xD266Q0{N6*tEKMy+x z^djKyd~6#FAcvfH@oKE8kSw1pXP5Vd2HHN{?#Oj~S!nen?+H^&UW9T%V~-|dJY`s9 zCqmW777NnBb>vjVTg9}3F8P@nY0wb6H5;}wYrOplXc;AWJ?34!5-X;5o@7Vu6mLrC zs=rHZI90-?&!7SG_O<*y%!ajOtv5VUvMAE~(pC`UjY?oJ1CL5C515x_3=^5J%P|&f zp7W`sabd>tcICLPKQ{o=&kQTnDulrRSEgPz5`6%D%2~p(_O^BGm|CjIn^SyVmR9m_ z^ZeY~)!*3G9M#TWhMuMhGE(uJBY5=&GQKf#AeWo<@MOgCnCf}TmdZp2LhdfWr zk2OQQei3DCbx1D4kl#PkYZBI@;A}`bXIPXm=9+tb_6)IJi8jS~m(;kl6I|4^h`KeT zd#9{>9lNt%+NzYi&e0ONbSS&U(XX`e+eO2iv*Quh`J$mTS}1VO*Y(&YFli;LO8-9@n#~cyru=z4H&^n zo6PSr$a{@;PLN+YnfE}k^c6-ybT6#MO&FV0oSRQ`sy@vV+E2b`1pRd#G{K;co)See z!OloDM>l2YtxzE=nqYn6N8iSkA?MK3s16CaQn5yvuHF-y4Y@v#lgE9Wpjkvc<>4YJPoaC(p+SN(u-D&kZ+IfEV>|HAv3DY;@chir_r&BK z2i&V9E~R`&+@GyHDtgDAkQnexm^XQ*n`^~)k##p#t8T%FS%=*N*?p>wFEVV~nl7om=-$#-+ zB40MWmS*PAKH8sIhtz3KgLS~2_H-vn8{C&O`1#lMQRl#R0G*ES>!T}VW&^A@`f~RXTr5sWp%;% ztG4!b&ta#rXP(!`b2~L@yQpg3%s*{OCmDyF9Xeg7Jm)pmOsx}RTk@IUfz3QKbekmC zHjp0jKJEo#OUd=m#Gag+-)Z-Kf#k%}kDEfSGwwX(3_0!+QvEbcWcOq44GwHJ9QSV) z_b2_TCe^!4of55roi?F1%!^k$0V`f<>S>#3hUD`@+o_xdlup>ITt>S94bC5?%s1De zj3wx@aPgY`pgX@>hcxUCE{lPy%X(f{xeNLRHkVD=L8|XKztbo*#jOddu6*j^Y%qjkVZ8pNd4`IyCU%$`F%oV&oTIW{Ir`){<5$g)UmB(Q|~uylq#*nG7nDU z3XO_s^mqSbe#s7BJqL+b>9aJtEXBf`KdkkX26iu9PDa#snXYTCS6h_pjZcFr@YLE< z{QHblwkY}Vm@UZV)!{xTttz!h!Dj)cUiFgtMaXmg{-V|e$)ir+)Yd_lb7Ey9@bFTF zcY__f+^a$y|9^{$)2!9QjJPJ5*nc5Qv%OXLHKbXdM$gKx8-KRnN4Irx;Z^m+f6 z`k%Rt(5X`|!EdXUmU9!eTYXPouy-EZk58%HkJN5pp6IuxPYbNRGdd0|LHckNM|U|( zQgIbiad)!MXNX=V&a|aZZQSNvpVI3OxsxfhALQPIo?6C*S)xd+%h6&G@jTq^CBCnF zUC!M5z1JA|!vsEV#Bpb4dmwQRzF1#FOB%DktO9#-E4XOtYptW`IzA^4PAaB;LFn5@ z?2lchXDWzCke>K$b6T{pHBUF_D1{Y8oY4<#M>2$0u>bMx)z7H?y-v!|N_!?UWxul0 zBJXg08$X7;W*iMm_`M?Fq{PfvXVUyg*%(oE3ZF$@tGQsmY?3&UCwaK*9EMrwDtUP$ zh_*fN+@V?diQWB^wJtpitax|S<p zJgfJ)d}&s8cRlqRXVFr3E+PxgMdbF3>SeGW9M>G-**V$OE3KSHwKJvP#?PA`-F(Xx znVduVW*6zwp16O-c~}YEC>bk~lpik}YzHs7mMpw#dW2X&p1Z+kA99>SYjFMG_dg2T z7a7=d{UouRcaP7#6aPJqg_m|c0aF94{Fjkc_ME^O_dx_om+?Ha{W~8IKmVltm7Z5U zo^oc^SeXAb=4K=1oE#?Yx8-c&ylQ@4J|XqH_+XLpfOp8R`aQ?hQFLVO#qjl&wGjEO zeii*%f5QI{LiH{G2aV`tzawe1yG5`lbHk z=>JBTFh?sy0e{z4c60EXhW`T2TJ`z>c-n2yH*8aJ5XbTFC61jqz4y?2d+*s!fl%xu zruW_nB(V)42@VEA3y59?LV^iITL96kK)?Wq-b4{FFw?~l7y*a(XXBQS?l;_>jtKbO z0>4V>%Kz#UNQ6WZiG?Vv*oY)YrnPibo4!Pu!PXTRcOFP=r zfsS;dGhM=ay3w5;^rRQP>4S!jfdeNl3MrzP68h4QQu;H1fegZphce2kz{_BUFqB~o zX9Ob|#c0MbmT`<{0u!0UWTr5cX-sDZGnvI~=1?gXjSesY+T+~+Q*n9DqNQN=^5xyJ*Z@`%Sg;UhJ?;2F<3 zjgRl_<~1*Qg`dxSWef8Ouz-csQpZ8+SwsUt8VRwOB`ghpc^S)TVg;*M$rTRK%xc!K zmM?tcDrY#$HQw-6V#F@75-0JJAc>MB$&wkzC1>d?~Q{YMUDTx`|;6)de)ottrb*Ju3G&lqIbinx-jrrJ-~vol2LoP+6ob zR{opkHr3T!UANZ+e4$`8gHi}Y34~=42nvGae1g|b0X7081BWOJgH`|pAO(Sp zItPVZ41!KScV%K`23t}uaRk+CH{x*H0F0aE*Mg1~D7T$(w-W0PQlN@)%ql>4a3>X4 z3z7Z*|DTsM##q~-+W`nxS$$tDJ4&M_9Juv?qtP5|)gY0^sqUK44WgPjZ>6Rq(8}H1 zADWZaexDA61G7y&do&CM!gOFL5)|nwXko?~YY!QIJvD zf~;ZgDC4L0H}KeE!7i(TURyVwEL>Rpjz^(4`B;CvOkY-0Bo(E~K5*&KU6Dw7HgoEs z$P&cD*A^rkW{W&bMoswiKQ1)YUq$20PJ=GvZEm80zJ`8u<`PE95@XkcfObe$dw7p% zTeEvdDj`xP82!GH7@t^$nV*eOZ(u}x1|Nl=*$(OS-#Z=2d^*Dljf z*Z%H6JH9_I1>DL*wASaVuA1JxyD&`L2^--3L0};fHK2|HbbtySI^cfj%FbPuGU|Q9 z)&~<*&ID2*dXJShRBr&1$FuU8lIV^2W|o39Jy1*PstsTj?hvak*;m!xvzG}|&VEm3 zTA8kvq|XouNjOsk4 z69cw^{{}<-upb&}8L^rV2vs~<1N`?3{8QIs)=DzSg7>txwSWKaCmz2<3Tcuy>F9u{ z0&2ROo}dQoR2_m3p}`r_>j%I9Y|})Y&dNJypi{soi~>Q@20=hT1*Hr?L{uVejA1mJ zvCwUIgsa^zNpv|a=!`Xsl%!tW44 zmeTg1u<$+lT>wHAH%e{_IF+V6(U(qlGma=ib<(V<^dWjc!8r{YNV?b|7LUU?5`D!!uR$5jXY;y?1WyP@nQ?+dW015-`B}tbg=Q57YDkZ~gxk0K&Gz(r ze+en6kYv^c(oO9N4SxE3fe1A0Gvudn`>x7_{nZA%+%v{Fimh=_GFM8idY#j3E#b#3uy{frRo&VZtF1)}%{fMkMioXN0K>r+30KtdXf1exRy@hJ9zNh59A5)9 zz22Ag z-J@da<^FboX<`;$j8wE@WXn6|4V;Xiw*WN~apQ@B*3795ORf%yx_b+r5p45vnk>Xs z=}L8NuwrW7(%2N7@C2DwG5@fT>-7bTSw$(3sA%XIn6zkPVdK@!FDNW3u2;W7!;;b_ zO`9Ec^fAU7CykQlqD)z`<(OcSDW;ijwz-sY6(~KowCRWL9eQ}^(V?fh-7OELmH1u4 zM1)VAo9adSO%1ZgWC^2JD11>lC%E8FU|Wo)*f?V;8{ycZ*ywT1SOa*NO#2wkE1Eqm za5Rrg%?cXEpm;1_X8LG(JYR^i={}t?B#w)SH@lXjc_g@!fgDmG_;m^uVeNvOXN|x! zvB+F{GwW$jrzqcR#VD(N1^rXV!3E1#7iHQQT2+=!ThSGZ#nnwQ3^iAqQ@aAxEY}Vk zpPB@faZPVb#WXZr(}8M_=NsdCH8$LeMq#OpC=mZi2@V7uK^G@(=Hz;*QwIjX@m_qBNFkv>mD zf8fW`wS!Rpy#aP&abr*G8i$(09H0Gky+?~I@~U_zK5dKfl?CN0R8o~exg162KOFhd z3T@C19nfd$2Mk~V2Y4U=5lDed5fBuj(&)k>qGIbfBY|s@YAFT*X__&guAy0)qj|GH zi#Mf2X?a#Lt5KtM4NWa=9bG+rU~qmo8__AcQ=3W|l&erl6;+U=T8&zD>NRN8q*;qr zZQ6C{)TKwSKK%yLLGZT>GAy^kN~^54##-yFx538P1Pg4Ywk5VghHbXnVW(ZK-9Tf= zUi<8i15nNIw|h9yqjADBUhs-{eBc{D<~R8HcUyd6X=QC=Yj;iC$H|Xi3|Ryh@Tp)Z zBBWFf3L+pVw5u9w2t5l&1Q8VzmyncdcwLl?m~1McVZMZYd-(Po$js%pZK$slHd65| zCQ%lYt58XmR!dRCSFJkr8Z>IstVL_INztxDr!GCw%U7R%0|sL&iS2gSX_wtJae$3O zmk&#Egv3$4p0n{HcpfvSd^Dx+X(+ad)9B}`MR7O8qkg^6=e zR%5(YhiQhZ1zy9x;Q~OM6O~sO!o_l;$#G*Y!vlRN?Z?KOqhH#Rf8@&iWDTzlRSU|d zanN+O(!|n$<^;M(x`?Rr?oe~ z1$+|CGzR-*j~4=Ly4;wr?N4$>sF7Q=h==P3Smnnx;5OM<4?lvBcLMa~gMR`8GXFv0edS#IXn^TebO7rL|$(2j&Xkn<0@%U!J2F_j=_%)&;2L3<*f{pmL2Onod!%3QF z-G1lcArV3vsRB|hwSES+(c=!NeQ8ua`YpNlXn=As_;mVk;@3A1uJY#{Enn-;}(``(;#4NAG9H4Y;3k`-I zgJ;}h@VnhHUt?BC)t4-^y^unB03AWX3Vci!1^Am?nsiQpw zv_809?V%RNC~d#+fB%<|WKo(p=FR>i)bRvEJ>J#p$){|ki)RSOMYimVX;3>Orff01OzoNdNf>F# zz=+|?NC79zgRpSe2G9)XD-y_Np2W*PMW&ZAX5|!-446y>AH&-l$$_IcI)Du<2**MZ zE+RT8WQ1mn>*z5D3zV}&k}ab!QmDFs=Eq6FK+Z^TMLKuX=YdR~$RI;)UZ}+z#e5j+ zCwPrb4#?q;1_IDXAQ}q7ae|rgEMM>!zyOhGAqp)iP$C+w#Gq6x8$V-3GZ7D{BLTS* zktYdFB_m%7>Pkg{SsJoM}HEVBm3S~~;lY-DJYE&Od= zpu{$2IrGLV8Cqhv0zf+)|r@5h-NNUJWGjE*>DOY$FAp~MHlZGLqm_V%tnF3 zXE6i!#1rMTv%zu3Eqdt5m0 zTu-j4qb{zl)sPl1Z>-sHW8n_~PZl}?5Jv3c00;@^g3oDtK)*oBrtj->1h9!IqemEP)2svaVgpR~yo=oZ$LXZeAv3;l)un5m#p%8?0#E^a< zM#ZL;Xr$6)m^MPb`4(^?O?6XHFg^j%hw!*(G+uQ%1y6BZhM>G_!Wp4#ikRvE?p#6Jt!+bOnR0oJdVHp&%!hIyWBZ!Plm% zonzCS07mH~00a3v*nFIkM=~FxfI}7li1eSL!hZm?Z<_P0pcMe1kJ=mnQsht>NIwn$ zaCUgf01T1T0IX>qc)B9P$V1tVx4ZLt={BfA9qQ49HVonjHtb>A`0t+@UI##GNhpjC zNw+{1YO4;Ac!0(aAw`|+7edq37dx!Szhq)I`m~v0HboR8T(y36@1M|S7TLA!I`dzR_Q<^;!0AR-5 z_p26HbRNdF>$mURxVh)hJMi|~N6%kC%hxYJUsqON)7aG9(B1)Uef_;~?7cYvkOu&; zb@l)36sjzfO*6~9*?aZMUqjD2pzXP?-0k1u4UtyM82i(l0HVy4)|4L2>`#4kv-=c>SD&6zXjW2V_^ zb&QxR?(gcY*EDzQ{;$6CS`5?oInG@0x&0!nHm2MBgZs;qVeiyOw>tLX*1?0Cs)>Qj zx7^U4G|sXm#TaOdnQd{s1^x;>BrvscUcj`b1*)}#a4H-_;P2PQcpXwL8?-euXJDBk zU1XSLb1goNWgw47(8kPt2OTggGsn)u;pFk^lt$!tO0=d3m<=<>Bp^szLK==TF$iz; ziOb|vg3ucUofMCJt1?F8TMz-MFf|XV?O_>)WU9~#zO`Ie;1)NvVdPyH&8G7b1Y!OZ z$B0#f`fkjkQ-HiE?mZ3mR01OS1M=DJC>K8%~acN+z@h5s~_xBpdFof*PMaaW_H)TN5W(eX6#bM7d=_ zLYOpe#$F*00GDGvqSpWbeFo5@K=L!Nf65~Kekn+L;wBbs?Gh+XA3=+8KuXXugL;O> z-I21=A|o(fBHFcTZh~QO*CW|v?1X&g50tP%+O1D)%6q7=MaB^_qEtoQKEV_v+o6YX zg)I4+;{;N~gnlh9@_$LW2)B4;G}h)X&)^g;ibQ*?aaq&ZR-vIZtne;3T6)Pb=4y7Q zIdVeQMrzqw2|1OgjV>^L%V9dQ=|pl)aTHwis^9hg|JlaMt@hMfmTRa&+3f$H*Pu`*Il>4y zysXhWCJ~|yegEKmJOZ1XH{kb9zBr3*;3Os`6O|MhwtfN6v=-Qj`+hPTIJ{U|JN~T{ z+frZvuCgPXQCa2l7i+NsoW$hF8X<}n&9N2bd0*;bZ(^-eHMgRg&0lsAZ~o#;bH({F z4{?g6JD1q5f=|ZQnghwwO6dHG{v7=TXe$bviv~LHBMmW5BpSQmW)u>@`4t6vXjOuj zxfJ>vZ{B&_5KzHLr64a=xwyl~0oM|+x9w8g8{)yj2e{_zXo%}&$C0pDv^up^!g0^- z@(87wO%dpR8Qh*)*562r-zdjMN_9ur-+Hd{(s9$WIaiI2%AMLiRhNrGw;K(S^p=<% zDu_;GNzQX8ZtXVvdfCDKB&Vna?bA;Kd#9xL?bDph$9ajU%!Bz9LR|UaFuuX{I!+=M zX7m@uUvjDwOhTXvg`l$lZ-mZSUkNhewq3x%u9gQ2u*jj9EFF>!s5)8YsYc9eT5%j8 z=93TwRP*W}19;sNdImk9tDKlxno42oQ%#Mk>dE1WY0`Y!5!8Ltj`j)sIVa<1=p~#g z?;#2u4WALElOH|Z>MNVhzvJII_iWtxbNTfJ{EQrS^S%sI+^LEXdZ=)daZJ@E1vE~6Ei4R$}&4S*mt4zKgYpf6Y5o!C%tGB(KvS^!O1X=hR`(K z*@CLsE@HvO(tdYUG)m#z$uyieIyV(}Rs!15*{bF^Xz`(|8H*`h;Ur^I2eX>N0!%fF zy@liw)d3qLEh)ooxOoMv1dTF{ViShIdbm|2`WI<~N{3JMX!B`cmu~M|iCxkN_4ani zRe&fXMn_20pTy32&kpLR;m0az7w*A}fdRh6IZ2@?wY8{{n?%G9 z64Jz0;##w6b4;NQ3ibdtx=juo5^;Yn?xnXP5|8h2byu{{-6$Tn0mr`a!6tq9>2A6; z*3MmHIeYiVa1(3=NPfS(Fl=bztGtPQb-1KA@s?SaX-A)$O~sd|m&D zy3yfCR;b|wQ6jN0-Os8!IVuK)JS606KQTC=xTc((2jpOPcWA2S$cw5^9-e`DbPPNz zNEiR3TlHgA2z*h803+WpR#Rn{&LoS&{O0=93h6#9WHw}30;N2d z-Q;X)Ii3w)sbG#Hb{e@N>8ccZmjmQ=OrQM-4U6mhPYQmbNQ3&J#Qla`nGxu?hQEHs%=zn^~b zz()iigd9Okz!flVN3MxYV%h6tE<2at&R1`Q$U`NBxdZ(wPKLmNL1NGZ*OA-LJG$@m zk{LPS9Qk&E#rO!eMV$FGeculB#u z<6ca5$VUz=E}mGtzT>)g$D?rUbJl@|2c0Q2#8&Cxrp!+Ai&$oS(PnaXY_FZ$#g^Q#0CTc`OC*7KRykUX?lB+%^tXLRDtd~Na=GEo) z)<7&LWaC1U-+}~Nf!XS2MG$X4UM{em5O6ENyasr$4ZzeMF58_?@gT;fDEBx}W8LJA zM{ynx^AM@q`DC$Zuyr#>g#-k~E~ww5y7;HhufM*!hC*EzErkqkQ3p%soi&m_BvI5- z^gQ}Zds)yTr;TU!VhT@3uIJdu4ksV-%T$TS?5OJmB1gh}Q$V2K3`z$dgjDxdEz^}o zo|t^OlFV3^?An>^cVWY`TbRQ0!R6RM^t8X$!8wC+fV!H7h3x{QSyK?}G}+k1Xp+&H zg3z+nq&S-8f_L?2s3fXn*$s|OLziO%ky_^y9i&-pgb2K#aDJWX zN=T#5jf2z`W`_Mj9I^bPpT2wVrOGQQB}nzltRFeHqgNK(nd?t7ZiCcAF9*&+L*QqsN{*W5ZkAHbicz84Rsq>$C3~J;SDMl* zdPT>+=jaPv+~|g@Z*Z+AZUNC5cLVC_&-4_EqE@Cjr&>r~Yxxa4*nP82E!jKs za}8p`L|Un4vHFBVKPkgGmcmlTsc0AHLY8e#pM_R2%n3vqRyls2p48A1sg;`d5PwCH znLqBX(RP5~nYxMY^1=)9E2XPM+HLJ*`4QS!jkl3S)dIwRYJ8-Fzf&y}_K;BpWxWcm z;}10JUPp#G3?{2ayw>6Nj6iE+2+j2jm6L2zO=n(FOLZwZ(nj}Y#YuDxuAp+JSz*%^ ze#Le%ejmG(GjC4Y;#6PHZLoQ6gDt8*t?r| z`@2p}k0EE9mLO3*Ag~z%hE3Z4qV9Ha`m)N^x-q8apLt#bXWvi$XTI0q*en!qn7Kd5FLQ4<{~->j@-4uR%> z^e6xqWW>V*N%7+5ovTISfC78O^gd%j z(WMN^+?A;(uey7W&Ua5gc{$L1c!Abet!-gTw{36Y=-RlF>G{z6&i#*`_w}wEZ~f`> z{+@#ilvl2{*>{&4G%Ugoad@ zyNSWvdDag( ztJ?W?Av`3RiVihNO$WtyJfvH5LJUIf9P3o(4?lZG_USvrAbKZGrJ#XZr9%frcTmoO zO%vZSHFfhbp&quZmI#X9*GJQ7%htB@1I5{=e{%yl`Dp!FO$3P6(bAP*x!b04H=v$X zE39SKEv*&_MH$ld@P+H5tDIY75jD{1Y6{<;GD7O)vHR0YDBYPQ6-hb|6|yquZp2rT{l~@P?4=Tz zP+k3pvJIudTxcFE5sqxkEMR3CLfd_ec6@MDQFUHY7dije?VI;{=foB7<+vdp% zs`YWyMkAjutErtYFU0+GF(TRK9_k z4&tD3taM)ZYM;ygcAZFh^mbJ%{chGY&_e7S+u*<=opYISB|At8!%{ozK6S44K#Rkr zJH*@jPVVeSrtlaU4Vy9=X}*V7=v+scgKXduKBv`33?&rBH{&kaeLg{1%TQA0`Wd!! zrylqJkzV5B-Z19w)co#Cug*AB%)V5j`SiIlMg6U-O8M&Ze_Qu-jjeWf?pp0??$bB# z9^j@?j4?pNF`w2g)~uMssStFy@x<}1k!$mIP5gLzaqi*n5zYMmu9(UcX)nwC`?2MQ z6=b7HDXrfjQMb&h1;KeQ3ynx1+*uL4Ypc=&v?V{60G-x|LI->KoIypsXotSGmG5l)4X_Y*om>WC|mC=fZY&i zI#GC{jNpe5ws&df&i1PspI*#gYj#a$QzGkY?^&2Oifqs4o0Sb8j*tHsS8^V6{bcd^ zlXFn;%E_ko2scCrsQY%_rDLU^7i+~mbUu|s-d|86D(U6t?_Nd40RQh|6aO#sWfiZn z+@Adu_ls~Y2AI5h`9N3e_NAfV`T)fc%lz-jzwKq4mlxH-O0(Sfvsm<*ZBD8Z6)?JF zJ#YJJ114*{j@@z3ojrFg!dHHdeBAJgGq|tlz4GZ_zg%9JD)g7&fR{(a0^muRj2|6- z`z2<+gK<|oS_wFD3Nr8Xb=kXp?uL1{)rWw`A5dEo&Se2ep-e}{UuRpvbHZVTV3p8osWWtkp|l@NWU59`wik$0Svy7kCQ%Vxsg zTGfR^&P9J{KyS;_#ICMn^tq+cQoYdhDn;yQU0dpimti6 zT=K7|?*q$9%IfXmBUi=-8d`i>b2qyWTc}xy+z1Kxgp-#=g|=t;mczuMjrwpGecUq6 zrPN}Zgk9y7Sfz3?Xpc+g>y?|MHx8HpitSD@4rQ%l}cKdWZX#oO5$F zhpT+2A6RIgKgteP`5?Q$4Q{Iz(|PP<75d4L(xh;(Z{HvyZo%Buj%6hZW6z zS`?#iUi*g%u=+`0e2EtS5jN#|PNUoZfh;|QUkYpY_;@=aQi38?jMcHXQxNW-a{wQY3zy|?Vx`fGVz}92 z+0N5N;@Z1eVR1onY{G3hT{f@eK|*tjGKdgVASS3fqnsFBhur<|xl_{j7=k0x;M;2J z-Fn`cZ_qdwgOfAy9s<)R&ER()87Uq6O>;x1&IIjn@JQFhCEy~T;cpl2{J+-hSZ!g@ z`gM`Dq}dpq_SuvjN0NPiFf3md666&aEnaKR?XNd#We^t4xNe+J^`(;{LDTjw2Li7NQ|$N1-0?>8E&_jID&6Z4RM3 zBWt3+urhXI3z{a(dI`FMQQRdFCOZCszZIV6xjvk!_veP6c~BFSY`*<*v$@m$yYc=$ zRjHr$bXQs)Z?B4eLt-4+*Qn6f8Ve_R*ejaxv-F1LL3T*v5Uana7e|*RjTTHU>@(Jj z6J13cdL-GaKfTWkQJOpLei#jrQmWP$>$`cWRL*!EvTtnROHm-34Ai^9JvO8#cXiC*WfF;%zB^T-=ubQrRv7H;}VC`~9{E61puMEkL zEl8tOB7H=ZnTfBS^jWr+r2~Bd>g$e<80Pfr$4hCr5N2{%i@!lP8Ey6afpQA|wV6T88H@ zT8H0|*^Kk{OP}S^&5Fjo8x$^m9uboE3$brdC+4;@Jny} zJDaq3vZBOpNh)NjW8-M9D7h23+{~Vf=Ocy( zmH&Pm=s5c3_Y*DZb!&S&kG}pzd2nF*P?u;~L%vWuIC-o~ykAegj0}09@$uL)F7obE`&Tme}(8h25hoV$AIoNkxhkDbkZEfgcgt#neu zkATHBCl*cLQJaqJZPK>YzpctZhxQKk9v^Zrp`@Rg$3{bfTl1qy(=D%LwY_$Dki^V4 zQd-D)DO9W|L6#etld-uVq3z_TZr03*Xq>9l7xN2pGKffVk|askz))Y99XiCYMWh1? zr-Nc2pKbir}z*30l#dT%l@U~ z%bae4Dj8zTK4unX2)DeE@oHnWqa@5E^g$WYqCN+}ctGwHRzC(b zp)aB9kGK7T6j@LMTUHod4teJf@ehuJsVvPj?1~Pc8L347%vYo+ahp zsr1KYB=TTT>KDH~PT;rIK`lk%WDQuB|Cv~f6%e9%Z_1mR`^It z?9yRoLT-tG6JEmz2=ClE*-W-rN<-&IVaz2OFN+oAJRO6T$#99)wHT;Kj7Cc&u?0+a zJXWp7V3kT-VhtM~-`I#oNhC=nmD~h$#4<(L#-xknW%yzlOkrzFxT7152RzKOrXzjz zz5iw3Weso8<5V|#&5uDVtZ8-ejw+A7a57+%aG#xZi}%a4X*peBGQ;q3w18Z6Lg1yG zJkW_;6g;pr!7?N_A_dB%z^g$NI)>ABq*s4vtTMHJfZ2hqhojwvePQ1=z&A|_$*@2& z)G8DknVP^P!Kxyugj_bMQZt%eR(=72JL3(~mEVFr6(2r#vgFSfNR|6`W7qgu?>y^b zUG7wk0c&Cn2@%zFze?xpXWPYyoDXJ{=Pf%{QfxH31!#rEbFGFhtSBWf~MXPz0No( zZ7hX*u&HV7vs&_b-5;&r@Dz`NqnB4HQC^fzd|M_x01NjNY%EL*_u{9KYP}->Seut^ zIho`JF_#jL%YY`;CYU1boF>*y905)HYauiM|7r#U|Iete%n-?6n(`-<(+L~-Z!68tT97fw7HyfNU z?Oz%#*u!++aQ>pIZE~o! z`URPDgrCd`do$r64cmrr78n~RmZ`E#zVX3fnu-h)i6J!ONxEi@YWFjfk!WeoAr_%g zo4g<=r%kC;r@)xJKqn*JsTT6oiF$6(O7aX?3h()F*uR-9$dMF_^U+9YR#^~XV}2S) zkzAUTT~(9nTP(_!XA6SzN;0BASZr*`=0B5ZGFmZzMX!PxLnuB&VQF={lyyPfFDxjk zVp7{ct03h6FW-!a&@$Dcs)5omHbpn!&nfsCDqr@ z7R!c?Ul?lQTt8QYvm@tNlM-FOkZR7@XC>0Ze3GM3) z($nx*`POpGmWbqTR$&?whuD-F^nf57@bPK>YYiek*Rv`gmt?H|d69}w62G3%lOj{G zcq2ofE)5+7Ai*3WvLs@wAc&NbjHFYrF3l7!NzQHre)@Nog?R`{MQa2iYr$Je2ALlp z_G9~k&2j4Dx@G0JtWOPvgV~AR&Gxc1RkLq7e$!$l{bmWj%fseJUHOOiU#w4%Fo^fO zTeI~^B9~LpQtCAYvuj5->+6YtkqCSO(nFsJTtd&D9LTRHI9cWC75#Evb!Q_DPp{2) zcN)^v^c1@NE*@-Hmu+CPa;XIw=>$9sSynti`Ixw=b z+fM)<0xzBu!rk||waovuQJMav@c7~O_0VyS7$0%((c)JHJGV05_C2(F{2sN&U*(H6 zqxH|D-B$($vVda$s4`1WL7ly=&QA4^I8(UJ(BB^iGPAwT3P5$1`8>5ZO9b8x^s54{ z(7CT=o{-NkGP5w7PuEj4g^b60MKqJ9l~YW95iCkO?*L*78DHs}5HFR-mwH|l$S(D= zci$5|*r3RxzS3SeT#CMTdFM#Q!mZe2S4!F;p`xbZ3Hd{A-%|0b3$%HKqZnNLt=0~1 ziF%HYd)nO9AT#%^U(|u+YjQC6n&TC{bJiqa8g#x!zOmJ+JnM-{c$vLRQ#vY)IPdYL z**@DUSjym6C#%tqK>vBSM?RI)Z(N7#ok_BLaNip1Bh*dS@^We*@`2~*x-t{N$%mqvd8XGjl^LOE)GhxRH^{W@Rd2@dD(=C}tZ0B9I zaz4&wbDznT}R|Y|h%tG>x<~Fl;-`FedvxX)|wu>oqWz z55gRWjsuY2YcKkW>@J>`v;VM*@U7jdPfse9D~e8=prJ$SXx@DLf>!GJc&`E~b=d19 zj`1ReMPG)z3bYCnWA3Tv&Q~^`c>l8MMA!6oL&IVR;g&RxdUP2>*?QsikF7LsS9WdQ zG5S#&t~n&vzO{C_KoIIQC<{#-QgP({89 z{x1fAJMkv<6bO8Vnjt6cqrN6)!lEE?cAc89#Z4y%RsVk)?9f$a8xKf&4_%csuh!*V zGmKxNi8M9Y-OI%xs+`iVnE2>&@c~2G6}=J_%@Xa`mR+pXB1rr>G}i@SCIJiZ=$pxa zLn~(s@bGxC6JaF=T8Nj}9F{)(_(<1bt6ee->Ve$y-(zk(o-^oY#JZE2POUO2Y`ok5x#m9;u$37#>=w`Fyy-8`~OPLP)n|_ zzh8@nfY)Mf-if^ty|Z0HgGJ0_*(5OyMnc1uyo;}OZsMeWdm78G?g_$*`Ayo()LUy; z{|IJ9$A*&kb9~#qo%T{_6QGUxcsE=VfHsC*vwV4kaxD3!%haCjiFyAl^m_|P{`)P@ z32uv;0nm0>Ee$O1QdEL0h2>kitS;T$>%8ff-12?~sqiAdHFNRP><-k`8xDQ@6Ve1SqWWCP+ z8GgRi6#;WMt(C|DdbLZF=gi!Nn}F4$H;jq>zkgEs_upt0MF#Yihm5sZefY=c4Y+>x zn5cq-1cD>MrPKliG^1jsaNT!8=1>+B%H|wieH;WxI29<^T1`=$5R7^)uaZdq&}Bem z+ol0YG}+g+C=ne$0ai`J7*PW;W!_en4gJTidS)4DW>+aVKzy{QH;s~uMPvmXX~8XX zCXT)tkGfJD$$|c(O|OfSbu-^tUwHBLF*} zRu=B>4eT=q!h>Wi1^2J1 zLudICXrJg#jIG$bgu+&8G8}uN5+Xek1@pUwky_>4*FzRdzz#?>zzq2_Sg*-o^?+fO zjMAKi0E;k=Bc_Kbg;Y42;4gxfs;M(#YZQ-3Tm;8}9M^O--xR>8pmj*FgA#6Nk`iFo z+EA7ypLx2D;j6$o>G<9VF0BkI;E3Rs?+E`Q>vH?kRCTFU-4*#Bhcn)+bXAT^MQ=S{Hy7rfBvu#~o|z zW3=U%24PELOBV(UP;>Iz+t7D}MB_V8L%aMc4p?*EQv2=q0_Oh>$jbLh^26lDF+4$%0-PB&7DG$n{m)}HwoT)C{3@<2@mc!0ci_3vGeBSjoAa4yT6!-KYyn{x(>L;A@7d+qTn_eC0nn9B?J~_S{JJKAC_sr_y#na{Y#Eqx7 z_0#7_Vi!F+&%EWa_wOgG;l(0ds2bG@=p{g6Q2$QavI2<&^4it(oCB=(s>`vQntvY1v zoY|Yk0<(r$0C4c9Tj$XCIPlzEv?~hu1A84{el|KWGD_|WRaSxGf~#qtR3sw~|5C_a z^a!%uAaR*Mw9z2-N;J@pvvCRH#po6>?;sR!; z=pCP#jZWJHV0#Hil91l}#xNb9`9m@wqq^QYSd1^!U8GQCjb1kwjW8;Zot; zkW|3JgjHW>|EU)l5G;)!FI)~qvzm5*nngrCZ5wPBW=ok=Bw}qNjZq!3!~J>p53749 z={!(UmW3ILvJA5?%V7n?3%y_jjb$h@;-=b>g-W0@C*+c{#lX2`z{VDV-Ki zZd2qt5DK(cO@^ea!*l$nNTopBXGO&}hINiL%)98bzyGfQpcW~sF1={Ln-h%&<1?`Z}g70N?W9O#mQTZw?h1Xej` zLDkYYX==boWYCzB(xsJk{i>nAh#&|Q0SJ{>z{!05Mw@;Zk31nZN{8-X(Z@b-$~pgS zOQg#8hUogdld4J(v)-fb)vpSFvdQv2-Tg;RYDQ%9P`v0JN3H4J%q?OMS5@;wm4dBd zIwpzf$^x+s25OC}j5S>x06-vILqIK$`~MvQ$fkyucLb)#nggMLmUlqhzjOn6tEg>S z!ce9|tCuz|am&JoEbu&Y~$Ls|c9|PXma*Xu<6?F>70%_hB z3D5P3Mw{N}nmi3nizI)(E)Y}(P`3e&C{j@&3Pa?5QxrNS?H2!{&E`g3&0yUX&{PG& z9AjKROyuJ&)Ub$o1T&apgMO2Jp$TjN5Tt=wRVajQ6DiW(5%ohFb^*P<=Ce0Tb0*8qFli#!YA+2EzkHv;)=>Y)5$k5~{4V zY*1v*N0zNrtg&gH1r)ubi0f4kTd2;n^k(bUD3D7a+C6m56XzYeXc4nHMq%-ji?n66 z{Xk>X?LOWQutx%C*m~FXsn3UQDVOS1)H3|3F3L)kIDKMeYwlODsAtYL<7W%gjU9cc zdhS7#ebp??5?HWHHw!JlW6yArbKvT+Dp`Fu&)veP5q8rx6rsWxiQAo*q+=KhpuXiW zAr*_#pxya9AOC(Sd`1(c-VGb~B6RQ_EfTK+*u2116%Q5|SAi#*P=*tnV>P<^Krp_F zSy*V4W2%~oHMoU%**Y;hG@gr%qi)M~!zEN>8bF`%YP>KV*-Mu?$R>1#>WSX^giz zXT3j0LJ22`L1N%pMIspbrj(?7AM^hFa2;0fMnNJ}6lsKXmn1f?IKrBreV&1(3WIyZf53Lj=a{?Fo3!0R#D1gfm_-?5>^} zEQxK4oVX>2{1$r+RS+bnu0qFEP<5A?T6MX02uecBA zcXudu{h+>#cPI4|ltcFnUEIjcgfdsE`cOJzTZYRAuiY`BXXfQ_{*ZGZ*JrQq>K5S# zIqNxc>36FY0iP&f*O*!dH6d5&No2<)JRYffi7$nY6k>PF#6YVaGP-KLB6R*WG@-z8 zj{dqsi~?w+-l+HS58Y3uqTQ*bATA|ukI^2!LEt(Xm9--oiB|!jcHG$UVxKo^WPDH( zdaM$}^UaPbI!w!9lz{mR!3FXV0R6B$29X+RQk`gd=^*otAgX~Y4=H44P)nK7 zmIZ1GtG(Sns0PV;u2h6S)wsaABD6uF%p~rW16R%@4Gm-?S#IP(8Whtkmc_Fjr_aHH zE>St$ZBW|(A9wVi>h0p&j3aHO&AXM#!7QVq91mq4Rgi8};-#eLkUvr040c><>rP;h z?1`XIZdnBhWu@LEWIW^#F(H8W4kg5+v=?moCC`$|3GdOxNVwLkX)8Z z1vy8Vk<9{9jrF1c*Hi64VW`4H2*GJr=MHz`2?oCeDA9}#@>s(eAJYb{7Rn~y$l-*RMkXsyFKx$f8FdUnB zf#HY5m}4_??4dwmL>OamroVBUXgsLIEaLbTGsJE2Z#qKK7JK$D+a_6IAhL?pn6R-T zeXOJmQ5llCFEK6;mh zFrh>;X*xz*Sq>Gj)YU* z(vy@=Niq-Pgdye%oGM2eYoQ;bz7Grn*zn|jHbx}}29Y~+cLKW3f_w(83qjF_Jh;ID zry_#Lg42@BxzZ*YkOhI7b}xTeCTllexHnk$YdNfs`BY>7e*cp+|F3k_sTF2|A7jy{ z{MY6~vSV@w&{!$&InzH1FcFR`HlIs2dp zfM|7;bOFO^3}4svSyy7Tt!pc9dWcePtnH(>`<<>j^UyIT_+``3?xSu;ZJyBI`|z2+ zs_!P1#jI-90%+Xes_(D-?Eb^A{}vu-&Z>74QkL=M<<)_B0mAJF@z;d5`jY?MSDRPU zQD3pI^rtYbzd_22oCQ5e!0*!t_hBWcgya)Dbg%Vw_$?h1KtfMtQh$i01U7d?7bY?b z9d$H%UU!9#0i1=-mnRe^j#kesk8GI-z+IKJ&gv0*owFt$-rL26sm?%xl-{-8H^LkE z@Ca%!A4a+24?{uJYxMP(Y!v?dAJGrqfL^YX?#5F8=CNj_#cUNvtIzf#01=H5ZV?uu zPdaer`czkw*egB>)Gp<$I=}`K5&*HOK?W#(f~07L1=xOc(Pm!QOEKJRM_MI73j^M$ zgKVM3Jopq5rqesd*pEf$t^Z?f%hGX-RGD|3aK&2igbGh0-|Zzu6^WjrvScz7V^jDg zC&S51RxS_Fs!BG$G*&oKqaOexyVQc2NyUiZM%#xKsa*bzGZh+`+C`Be)|Ap5RLS8u zwBnX%iXPNq_k&M)W1Y2Z zT5kzw&&^mmENx%>K7xDn;h~Fw`0@|LA?iV~i@OlVJ4U@ddw3TXnU>Bi-@UipBd;|m zo(W$I4^Q6sG&MUx_1y~uq!j8-K{^Z4o=7WuC*@EF^FTG$kfn%PihcysL2I_i?@ z8f@rgU7_CG>3Xx2payow-VL z&!_|mb0Qj+9KqP7jR>u`8x=~uw1;tX80Qfmv##-rBq%~_@PxBLYJh^K!kVSrDswI2$Rq(;k6J;zg)l80CCl3h zz&a-Um`(m{@-dl~y@NKGHtY%0CM}uy{gomCT%guw41}1)%UZEW8n%rIjJ63=F>Q(5 zv@Yx{`l%U^dA;WOLp6|8Ewn>+_o>hS^K6O$Bq?6cX&QhJwWY8nzv!*yL-GSZHdsz6b+bcmF619Vna!Ch}mOf9}- zPucBYEoQAF)?rOw?jB3#5e0(8QW!X57`9oV;M^%^ z&7-?hLaf3$x`8v;b&hEv4hhN4_{C!xiR-AJUqv@_>~>T?eUa;>oF8>#gnJKg5NPhJEON}Sz%ybp_js#)dk<55JG~!9sE+%@gGT@!L9ypyvN6alf zW=Xz&=KKIE!}{K=fTO9?tJ3_&d$~QmoV4&!xM~1>Msc;fw?cS$BNQf>9ltixxzM&a z&Bn#r#TTy_GOvD^X6nlqh$JaPsHk+=E zEKbWsL}c6=C}5yM;GDEz2gFgEyTfr?Y)1kk=vJ{Yu)Ft&n<)unDnDAU!-_AIdr2P% z670?^ip9d_YC=9?pMdEgtpm{t=)%?e_{W|%m7axXX+QuJ?-NEqj%3{EloUSMsW-+_ z>E4=M0zE8b4dtsK1fiq63iqqu05}I`bO}VKkB-?+nH?F+Rc|>nw~;7sGPDI3zC2Kz zRLPq2F~yG1qT25FA908fzE%9;jb4cAiE(n1QYmRjIH9#*##7KZvSy?uwwDicij6&TIY=R;7&e_jh~Gw1s}Z$M6dZ$ zd40Nzd86^{OUnJX!URX~N*KoF4y{vgXLIConHJA$7N|If$BNpKs>^A)i|QpsD|_6~ zw?`HYelXHMgM0m5hJAm(jPHJjSiD~+S^J^6vdnLFNjfBYfo(Li=y#~~%`9eMWaDF0 zHI^tYe_-s%Ar^@ZDML$1LYE9DeUMiT+ZD*bJZb{%a3$_aW!5p?OYNtlcyivzJ&~8e z%c-%7tPWe~QvditI>^J)8Jf5<)heyQ?fp^xg7L{;Dk{<8Si6yOsG{(dnJjq6_DYVM zt0Qd^rNAPF;LJ<|6`YaAV#0M;h2`j!t+J6xc_)a~=Tl`sWxH8zHZD7}q%3E+O5YmC zde%ckH*nh4JJy2%1}Lr(lUY~?K_6hjQER(Pr2L!q8f5$|tmP7jb*2OEME8(ibOy&9`Kl3cQI@rvHDF+S!WGFo>$4h z!Q}DHzGChBQ}}SeKMuZKZ4*CXsN}@%KSukBw%Ip?KTv$C{PB=r|FOcWWc|q^nOHV= zAU!6w0@u+SU#0m#`YDt4KKssk<)1{1xLRYy{iAo?Y|eUKGo;w&tUFKa*1tG~hj91X z;9BF6*lI4FaG|YA&F=nE4jfEu20UNeY7(_jW0ZYaxpBfe~xWk9beTVgVMJa!xE^lpT82;c018Z zO+o3}V7#~BP$w{nisxl*v-!Ts{J5HvW$}E8Ro6G!SlOZzrMQ`=hoNsTu3Z3|d$}}@ zO4>tkdVcM18D$rj4RM~YdwFbZN?(pYzq-6yCN3}Y^>b}VSBZtd-k*S!DyvC^f``Gt zyXIx!?1~&Tza!3VutAEcQKQWjF6oxE^Wfv0X{rkG^Ik4_^f{JLnZ7XIFM!iETjK++ zVX&|dA$u7xsUMTA2%{o!2vUbWMuI#vVCq5&gs|WdC|MdBqEn;5JP?Wq;sS)aCxC=0 zj+c#UqaH)UGFLVcB!T#FXar_~xzeoaLqqxkX_B9JI0;>+<=H8DcLBh|WDe4Aql~#{ z(|F*lq_hN4>hk^MOXD@o?-=PA)D7G;M~@ zDqjqsI`Ov{ptOG{rF$pua~9DxgkPX)Ppw}bpjgAng8EOCLbX}kTX*)g3~5_T_fDN; z`Fp14B>3XD)O~8vx6AEY2XJ%E#p&_9W`T-k>X_60a_4;)DrsS}$ccXqzJa>L_gSu` zJAa?}8x}Ed&%OS(fWE&w{rrcej-t$^ctfoD;2at4%`Hg#L?^I`ThRoHpyMN++QDgQ zIT&cS?qMTnp4eHGF}khCg!#?Jr*T;;6s}`ZnnnfamydUW zU2-C`AS&{TeTV+@O- zo9BQwU77k+Yz`PVhPGk)7KO}Iv?mgFlKh7&E#92i=e?$qwvNot+xpni=r_rIQHjho1#niMOWAd&Po~wWqo(_( zErZhHGgaIGa8!Zm6h;Z1SiR1vTeFy4ZHeAg34pWW_2w2dBvOu}=!*i<+I6~u+bIW_ z_A_y)ceBHDfH~UWD6~YU8ILy?Nd~bzOTvy8!daJ_>9UdGXU7BSp{cwuL*)dCv-#E~EETeoo(&dLEhUPm zj*`vW?alosrYxyTrKOd{aA0z95P@?}HAPz4K6x}!t0(I#udbL84e8cVnS&ZTsI51` zO{A4Oq=YU%oj~&zDi4$uSkX>esB3bS{e~Ka-@xj=%H^r zh3v?W%+#9?ikiIaVWbDXn@g9oCuG?z{j_Bqz)4m(Zl@HDo0xZEG!*O^$#ipC8S-Z2 z%nSkiTix6&D%x150{bB{i#mU%zw6w*1pG^d)q8#~1N#89<&@;haCUJSY#ggryZWd+ zDCEFFCub@?6L?cuHomD^brJbS_z~nHbHxNFXSL|0$%I9|KnruBIpYJ^1K8fR?H19o zeQIskGxD!x(f!4LUNfd1$S_nl!URin1Z#P<{Of3eE=m22`?D0la8`Rdv%F7enmi*^ zhnQaq9BPNt%Z7cs zSt^{vOAa%RyT7y+U4Oz9o8It^5gYxdf@)U%l;=6(q37cIbYcn>0AbwyKb_!DFZB6s zDOp0NyqW&XpyjaWzuy;y{Ub&X6oVr``{uv%>2)1HME^g;3gr-h-Rt(2901@K0Qw|f z>{gQ6bRJbYZWMrI`jk}vj3SJzomHC-#)G!g!~)2QqEX%A1Y?66tB0rn6b0yyrKL!CoUFoS!j+Q zr^-npmoqz0Uw3PUn+DA(%S`-S=R2ZGu)QuStUNmx-hNlnI8uN@2$|KuFmb8zPy%a4&OdcHd#ROSw;7xS6r1CiAO0w=kCS{gyX~cBA zEVpd8$dWnyXWJkrTv|?M$(WohNXxO9S;H<3$Bnj{RRcJchc-!jvJl&Tyto2rE*QR( z3k4S}TjpWjNg|SRRW9^|>b6wP0-f{HKDY&F&sauVjx*Uy(k^lfptdlqi6Uw36_sRh zBXe}rAnoa&mB>-FojF-P87kl3>&FTos0SvZFroJ(CdVWZEr7Jl;(Ra>#gLYQD@u&z zlcmh*TNWerz!XI&PJq#>QSMXrLTQqu0c4R%_rXLI)GwL`TbL{Br^`e=(X@AOiXBNM zSW9V;4}y;a=uLn&QiX6IpbG(H0UuP$daHB&UMmuIwi-6>eP= zKgSKKUP}igU@ykopPK-=gqWgIy-y}N+0aVuaSO#Z6QXg1071`qJCQKhKtRO+Ks$h= zhDdKEaF}*@as*|_Inv13%TegZYmUmYksOV(*>ZI9woIT%zdZ-XjJyO+i)-a^to?6G z+r+WCu9`S5>1uy(9onl>j>mR&gF8vMB6o`8>J4`qd-Z$wGt^qETW~8uf2{<@8!aN6 zDb^bl;CFE3fb|ob{iPi`)d1H9EPBC!_e4^tsRKSSUoiL{@R{7$4=M!SLMosT1FuB> z2c(65VrRci3{g8Iw0rQisud};3BrERnt>0^c2SSmY4|Ph;La=HSn+RfTsU`WC@$XNv5hkz;D>;-QY;LRQTcbf1w@C+spjIjYKRVFkG^ugi2hfv zcx(I#-kN-8OaSDbX=!%f<5d=~6+}Sy!C=D5vjE3A9aMvOBue^A`r*(~D;F-ri&BeN zXuAvEBS^a~d6F%&q^r;)0So65O95*WCKLBAm0oNp9XhavHtr5eyeGS|&ocya$MP$h z%jJF+@N59@Wa@*RZvZBP66H~{J# zRyE25BmxjBu!Q3MTg{4c4}yYG zp|NE!2~do)aPUQ%+KS>P7a7~JtV2cWS%fAf%A5$7teld* zl}y_5`J1Cq5pEMoFWU5wVl@y`Uj@mEw9!w7fZ8;x01Yg4%}Z#pXdu-DxNU~^=;Gy|EHTIj`aw7gts65%mjI7))ZH7pU=o;<@_fCTS?5Kx8mwFA zZ2-(Ha_t3tv3~;z)wn+0b(1@?7}Nh+7_R-~G~sET6h|0JL8?)iED5Swla)cui8IH6 zM;MS|l-S7IXK8$IbO|~%{dT4fqEKieQwN3@7>(_)ue*&uZQjKb@q)FsUX>a$p{nuJ zLCC6O6&oBaTi~5zV?y#pz@?O|FT2cd;y5On+X2g`w4(FdT~aA+#G0`n*_Wkfnea25 zB?tNxeShgg9_E-LQm$Erm2P z)J|j%&sA5rkgBe30*$$?hRauT+mzNMFy+bZjfuT>3|udmu5s#H(xhlQ zy_T^3rLZC=mBonnF%0V7)3M^Fr}SeK@( zDvQq_XG-)G8ZV!o6o{pjFYGaoMKfi%&S0$?!OC==yn%B-^TetbRbk+qfc4ltNPHe6 zk%t6RLS<+HW&s@|3UK2ibbYPegnGUvAZlWqFpF=fS}hPj*$x|Z?Kc_mn3AiWjB{u) zQVdcu@daw7hL#AFYQsV*9-FXV>RI1hx~C9AxXO6(0=mR{Cu8H#u18NC&{0?t;U}|@ z4*@9;X)vNvi+-bGCPYX~-4Y06U1>w)PAMsJid?i$NOf|q9aLPsCq+8O)j|N4k%Ush z(>JnYQpSUAK@3|Bxh9fPA^EZTXBPGD{5Si8Y&$@@fZ(X`Cn}BxliWQhX)v%Wm9u}2 zowpav*IlmT_s_%OZf8{m8`ny-Y?xA|K(#32&0gs@31bJ$frtQ@q7;l11x6Y?=gk+M zPodd}JQ9Ic9Z1AzaUPQN3SZBDbE$2_g`1dnT%e^T2uM`VrAXnY!zNZEIZLcHjMCu^ zk)yzDBW9AZusaf0!l0JUP?I1L2Qyv&8P5EJ1GGaxA~usyt~Ivw!^EagNjfPUjUZA{ zuGFf-#)ZrjkxO_Tp4znaCu4DIhaf5o#R=jH3l)znEIK8VAywjnM%9=eJ8D0GiT(EK zTf}Syzi4(}L7$wRBSYIJz>wNY#TMkB&(N$8Clo4a3>gZOOi4N$-2kORb*kE_e4&Tx z9Jn{J`Hr5J0L+@hA1M)3UMb1KBqm~S>(WA2!4pyUv_YPgB*TEU&4m_9wURN`WE=ki z)>Y!O6fw@JNrp2*gt=j}`0Ob5z^J z#)*6oo@dJh$j3ygD441p6u$*lPOX7AO`kl6p)roGZVNa)GaJWcD0$0uQe>zlX%$xeAhM^ebvas3`6W0Di;DovJfL zWHRudX=#9GjKs9x5O@n8^zx)*bFx|G22Yf%cGJD?@BhKtAg7fFu%SPgR*Q9iXaV6vCK2*MQFCDPVKj%KwLsv|?d!TH$2S$S(XT zIZHQ1MM=?J7u@^bc~%sQ{D*3uMJKYYD&x)|blsR4#U?AAlv!FEWoizI6qyX*XT40q zi>4~ZDvzeLN&5uX(qezyG|swEF!6iT)(R}9T#)%gPXsU1kA+n9h3d9Ap-z_yE>Z-^ z?Oa@M#RmHX5H>Zy?nE2!ZuM_d3n>!kbXz^z@okObi(1{L2nGL9USy)ei9w1tXX6TU z;(E=Dl11tp12}Y@twvp{k{q?jz&Uwh)2-laVK(cKdZ8J80)}&c;Gj87GWyg<03=vG zB)p{c-bV>w?qve@BRe1<-OKR@D6*zJh%BU7ag}L(I!aprp$qdb_Mz4&0Cip_-vQV6b zVeBacuNm<&V@H9+9*>#nBa{d_cKC&rg3LoUv$qoWhd1*_gcL`)-pk&yUxghqb}Gzg z_s_!4WT=y=C8D*t|5kLtoz*)uW?0T2&aSxjIJXEW*lVThdT*C1mK1aG?F2QvgS|Q` zX|xuaV^%aC7_Cb0Jd{@os$7NeOiEKBiM}aWR^(%}pe015g$}8r8o-0!8Fl2^l$_YtvbjK0A_rFhvLLrho*d-k(8lG7_`4 znK-GguMmNHlR0V2My-;&WS0ipS2XD-4jD+KNKRX_Zl{h<#u*eb81kIO3z~q4yUEpr z+f0Qa%l9WmdaaBEPZ^^(r%Q2dP*KThp~dMmY;woTl7DwrO!o@@!#zMdBGePu(zX4F zlOpHnEJYL&rUMb`nqo3od|3iuY^CI`W)z(wy)Y!yf~R&eP8`S>f+l*m1CuB&gM9f{ zXaXQTgnY3?plxQGTsG>?0KW8TU5f$4%s%~8yAd0=@8d|U80i4a!31pQofk6>B4IRH z5Uwb|v`{rGDgiuF4N68I?KcH4P|aU{likdg6%ALZ zm1zyPqFweY>2pMLE*4($wW@h2{wfE2&-2;2QSZ!}ipfVY2`t3@s-nQmspwKfKm?~| z?5vTf1sF9jiK%@=F{rau6gG63__T_KOesZ%U1rl!Y3^|xBFO^fF-VC+IY}>O-mU z(ut#H-w$q4+-d!1H%z#09vg*Lpu~}C=BBfpQ)n}49kM809#X9g2PE%DK1fhCLQ&0I z(iCZzE?+8UQIr=3np!L<*2{FVvX{upc=X@~8afViHc!56jMjK-n8R&Xl0P2P&f*j6 z!uUK*!@0Ji++RBZ?zN`ou2W{lk31+FtgV&OqT4C5sL$Zct#=B|vo}oEG^n7=sa6aF z0xIF{dA%|({UYlxw7g#DQ_;IB>-tn&K{>lMG*UUFi>`O%N&r_x0SQ{HZ>dk7W z&;}SsYIL~SkUASX2EXUaB6c*^rBLf z75xs5`&YxkO3{)I8HpcQmrsRDy7gd#(hkLcdl6M&)smZ! z=pzymiCpOiS)frU?=YLN!8jjf+?ZP5ny_4?2I_mnf&#UNJ!IY!*-9IN+t>?tS9alK!kNZi^7k)7;rqme|Tk4t;#9 z#1sh(hQEUU`3jsF&;5-_NY6vS@Ofk#w(N{JC4jgSXD)*z zbLYX6j2G{-^NEi#+L$#VZ9Mt4?k`|%D;_LF=viF#j}R$pq4T*KD^9!wiIOBskt$8P z44FZ~a^))Q)8VB`4QlmUYei6?#%*O3dh^`C2}oUloj zY)x{UF~LcvU2xVp=e;t~6&GEy*(4t|yY8xMCVS(ZYEw-y%?#7cw8SiPdI8R};LI0V z4um-L)P;T+gkczkai4V6oQK2H_(!w_*YJVkc>ASqDlpOM@A3>Z zJaTMr_jmX^{ayZUe~-V{-{Mar%{~Hv+72KD2qV9HQ zxrI8VYvgt-3hCNiE@*b2fUF{*4#-w$0(AtmfhG*PKiUL3Y-^tSpfbyMza62E>tH;eNheB0iF#e_vfzq$VJvcNZ# zK&e4n#N=cZ0e~M+002xd0KiQDZ^IK+Oi@`70Dz16*7APW{h7z@8Ie__V*~);r@!r# zzoADeX`O9ot#1baAdr9S{J$}K&>{$C=;A~G03eosYqIfAQ1Q%H8Cf%Lf4DF#v$_{uEosIx}N^BLKjl{aeHO9}r`! z%gnyXZ|?rPjQnegymKJ&Kv_rH8;CKAqV z6*p1}#*hA2|LkqH26u8+X~&mNP7}R2lo10Qv|7D+VH`Q9go#N58?*~Tn}nUiN)uSd zkPqhN-q$whxHH7zw;#9Fi8NoUUqh%vTbG!I2v*%%>%myxi zRG{S3>G+1M{_;mGeXsH2js0@7F2Zmqa|34&cgpdXJ&wGqCY|?-v2#|ls={wpbMeE! zo0aR7DrOWMgk<|D*SclBFq(JeoMoAN3X^?{l4UEa1`V3_Whj*C^_|w*^_LEdHXr`? zptI4|Yn4rD7tJ3uFMrSCVID=^wA;2m>ppTesvK7@B0ow#%!?VBr@}){%j4&@G5OOo zciqjF?)8tfT0W9DU$w#R%Hwl5Uj86-3liKlqRaXe`;(WJf&LvN}Y8;vBZct)NFa?qImcq;rr zUSNERKWp^qS->OFWaopal0S@Mtzbc!rE#ZGN_Gk4s3gtpM4iUGUG?EL|hZToFf1+jp!LZX7`{@3+j%OVX|tTNDq$iE(z2`!@{1L6cM< z;?b?s0<-I&Z8OT-LgFp{f#)V+6S(If_Igmp2-Ui8yKVd;9=wljuu}9e#}SdqRdPqy z7F0jSYk;5BB90tO+KdEmE4XPgSdV$4nT~yr8_RS}eI9(Y?d}Gw^?4@G3#AWFW{$TZ zwf3*D<64K#S~3hDv2j?Fc& z>a}r0$P9VI6j4fav?Z#jBG4`=Fas)URgr3grWLm&-rF}L@PcIJd-0qo=4X*?0N8qq zc1V}u*!s|lf8s3J97mM|e(u@var1Ws?mPDirIU#x=4H(#WcZuvvvEUC^<8B@sFplv zpZXyB;CDjkhM;dFZX@nsUrW2iw(cp)R zw^*p8gPTaPY;O5Y;m6XAP#Lf-Ph@ppoSfxm;}>9Car(Im zhd0Q}Y?xQLZ4z?OhEp(1$ zoDvO!GUNq{f~fM#R27LPmMs-~WnspaVrR4=aPQ{4H} z#cPGDa++?O;>STY#VDh^+E@8O-8awo20p{Zh+m!Qtd{oxOE+|5%HA_r{wjVO95`CL z%f`b3%=>L^#KV14!)zme;mf&n;4!|9l9a8t23k;l+;B_PTNjcal_K&O1zd76`(6>I zh1=3e`g^2qT@%5pqD%a|caims zS6uJRLp^tUW=&h3W-Y+~|}$tt0W@#e{k)E-?HC#hvQBx~;f9qrYRFIc=)apZlmY*Ri(+ ztTn`akNtvp7A*{;Ot>Bl{7SCKgNCL|n8n?kzKitwkqLO`Q&%jk!yPO<>KOl0R?)Tf z2fdQe@WHOGPCEMCD*5SXZ*?t_el?8SF^rH%9?i-1vx7s{6b;m4D9O%sWzTyR)?vJ^ zaEkP~9B9m@t|QGiEFJi=!g!Q;Cl*8~+pYH6T6GwAv#wfS0lDqdxxN=`#$3{VYsK?4 z-nInHsa|X*ZY?_6NmZ~)jDPZ(nSnRQg_r(Qc=HlFGa9&iD>EEz!_g#Kap!{+R=F>( zbTZ2D(u%nj>b+H(GOe32_*Ncu<4$?^j{mSaW~(YjC^YCHURP0q zN!aT?)ltM#E$UBRR^!o^{$yr9l5`v9?6??_lv4SV6p^{NAhI+cE|57>J`ahzVaRDp~``C*DOqDa-w~4Pb2{Pr^)kE#)P)9YGz;?X0LuJiBEw`s7d} zw;iL4X;j@5=uLZ-I@Jvn{W$Pe@^!}tZjD^U3V8c=@n3%XL;p)`-dTKEG|@E-Ih@@& zmPO$U-uTJsTeuF|_>MAvLif<*pe%NESF=m#gd%9NL|I{`{*ly8_@VnlDj%0QJEvoQ zIfNE#rbnN;O0+F;rZbqXy$6bosk5x!Gb6VCgrUFT{;mE#L*r8D^$P-7{Kb`{wBTj* z3-C}Lw)@{Y)LCv!vQO?O&r{)|LcVyph#-1T&@XiDU+9FizvcuPNRUqp+p@_8qv9IN zUZH*b@d z;o#0dG1S^Iv%5Fo#o6h}-%a!lpCOHEeGO{m_4JbA;taV5l%Vo|VWAG)r-un43h|R2be|#Mg&GdquO1@sN6bKv+|YFvLvl^P3C0{QI4w95I0ZNdI0-ll zxHmXekXsN-xMZoXPu=?$;(ShbwI2giX`Fxnzkr|sMCmMYW!%lz*H`ox{;Tfw*3s2h zR}~(PuibIaC*&L6xli?8PcY;dJ~JPEm+$eS)MAmGDiS!x3dZ&*W{MveO6!koRduzM zWe)b1W_P!jXK=8fzaqi|{UpQ%`Nl>E`>4Nw3Cc0yt+zXmHR7&4oo}{9=a7@I+}RBD2Aq=_tHa^eVT5C1%#QU`j+N-E z)s!)CAofpIbR^BsbAC+klr7n8bov;Q_4N?JMdNdOJ*MZ}9^wWkW*%-fgUiz_I$PE+uBrGY1^H ze<8}ih%4v-%ACz1Ho_hRpWj~?ezgiSJ11W}4dOStxR)UBZ)7IR&|GFNt~$rb6eg+B z*izRM%PGYp;+WnIhzZsY8S#cXz*GL&SyW_wAf$$s$^EGF+b1kU+DrnM1>k_kmI4t| zn#*6KzUXu29VYng9siCCqn>&0ex4KhpTsCVK_gDeiVfq> zt3BGab;o*vloS4t%Y7>DaCiuaq`zjHh)1d6ksb}o`k|2GabP$O)<-_``Cr752iqki zx%Pq^AO>TXy0ckj>%O?tq^)gnd!N;DdR)6Or8u95gI6(-47dy^{Gf#eMU`KIv8Fnx zA@&vCruY;54sN1-Gob1(mxZ~L8k`XrRG>V3*4A&w!rN3@CL;u0ZA&eRXDKG$bS688 zKr``R`veSo(x%=%CH-HysVkYy5Ws{y*gU>+8%{&Z+R=Gy$>zFhH2#}z5CF*67XZ<1 z^`ZmTO=+Qpt#fg+?kH~jkI58E{F@PD3UR!V9_a|Z1XPn9%MU_AQMl~TUrIKjkw^wG zp?;yEO@zuy!u?|_o{ZTKDi)SB8vh#bY8E}X@uC;p<~7!)N@Fs<-YR*RQvQOSn0NV3 zJ#mXLd0w@gcPzGTK5Q=8BK>1V>7_xUCE%WkwMdE6GyGFD@^J9(4g#C{u;H)|cNK30Xl=jCAmSMatftxoLIq!0b9>K`Nrv#n|$ zMhW4E52y_q8-p>`i zyi*Wo72!wO9|hHPKR(8o=fLPjNP_M)+D)T3fEk**Fk?zfYkgtvFC>*Q`OuHoi2;IM zntn|U!Eh+;(gnHM366x9tnm)VRcnN2*`6MbwttGIpzb4=%;Eg;igY0R?W?Q$ab&!< z^qIhjJ47b){hMRG2|UgCOUK+z&Sk&kWO4lAq7oEoPRa^!BF-JD!jnl)%!D%~O=qX(Aq_$5 z)YGpb^laFp@);WF1nn9%=?d^XB)y7HYM15pDJmj(nGp2KB9lG2PZ157%TUaC78}xJ z$D=Stip_U_0S{Ub`jpsn^n?981`pB!77 z6Uj(&`8Gx9PyJHcM?G0lUd zON7?H=e9KRqG1@V8F_(fhH$C78a>(LpqMj!ukur>h%=mIAx@13sAg1k4ev8U*v$`A z^LR#%pt&Az7V)j=d1CZrcB8mA+5x%TG<*o>*GKO}@S?z`3(7R$-sbTNypqq^B#LPt zD(>m}w`~lEr&M2ImZoZnRv}+Ph!xm$L(L_bIjh!hWh)_Kr1u`#PC9lvO8ck`CMpsf zi4CODNggbuOdfPtAfLe^WA2le+YOf_m9Oo4#}><=)odt4Gi|0qUgXejRp8Uj_8-B^ z=CsY9HwmFj@Q7$=v@5P`ES%?SE4B2zTaKW`*S&d5o{HlldwN@n?tEXpiLy@mNB1>1 z6VS^M^fu16K5yi*)qW;5Bpq$DT%Y|xNcORKYMtEb)yqR?Plw}}QRm{&h+O}gHre9P z+qty$)j#lczB@*BBDMaixO4HnA%Xiz7-RQkKe5zn7AJ5{oaQEh+7j50(~ZdE69Typ z)Lo-ehB)A6_ful%>p@F!x26M@rreJy9eEaxLIik_bSnjwAwR$bx8qd8%&*{reO4R& zh;oy;8iv3cIUun+7@xRn-VPrIB4ih|5nutW=^1PJbNTr-rM2Y+nMKcKLW%G`+amyuD1>wmq2AU8wmC;f|?ZZhGI^ z}e>XE+HUyLLi6Q@Q$f*aF>+x8xTkv$z975$2+NUjLX+tecl}gc~fA$;?KLYzg%C}-? zS5rw#Eg-Z(oL92vI5+<+;Drb^Ds&B+xav>&`wmJrb$ajQja|LZJUS~rNxQ${9kA2r ze1(8_UHQDbJyo9hp!t4M9LTD+zfHb2lHk_2oTZd5Ol7#f^)3ff;Xx&?Gbc@|(kl9F z=jFL_yS-LF}eggF3XmqS)!oZr|28}x~A1;>GD23<#Lg= zwCXgUiz_T7NwcxANpF1nx8TH0ldax$s;;`KQETya=eu708FBPfMcLBqq$=&iheVyl&m8 zq1%sI)8dviwKKQk)UKN(RWX{xw6!4C=3-{va+~OK_FDX?&FkSgcv-f2U{G{ClN86fi0b);4s2)UiuQ(_w|GU95bmErM8Cs7mQtuHrHSw7}OMg{=>IPe}H^ zBXOj!T>8oL&s9ZkS7tb+dk*a(_>V=w^E_%*);L>aX_oI8)(e(BYz>el0E~QNSVGYpTNGd4uHwC6Va5X3SbP* zgPK)5gfF%_C-|+u-5{S3OYh$MyUc8-eZcFsJ~Zt%DM|OWPnFO2#ln2(5mc5OpHFh? z0BxoW*alomSORjj@N_TWD`GduaXC|E5k_tOg|I=7%*Lf9X28>caAB+umy?>A$)?^G zHxESi{jm}@b_z6uisB)Tbe@ffW-N=;Aa+n9vee4n3(Dh;WnNGs6&xME0oj7Koh8Z` zElZ_I>-wlOdHDg{+U~vraA^J+EOiOf8h#qIdiEmXUB*s;BQq*m#&*Pt8Oaa{2CNbZ zv!aDB;>8);GxlJb&zAgC{b9k#S+SI`J-tVnENs>;}?6f&cF9hx#b$Dc7)&kKtYOQwCOI@rCJ- zE>jwxr;G|qQyx}Y_J^Vfyf+|n1g_$%kJk>vY z)NJkd&uanJ_+%~#O_Nv+zed);HL&`IoY@BWY1ZhOMd(>_K7AM3w=t{1&_atkBW}uJ zv?VqZ32qoG!T^pXLog|B;|`7n`kZlf_8hmTZYcIVq0q}Fkhg1484a;V+_y%)&*2TO zCA6s>74x^wP#xB@0oktSmv*9=$;CF?%T?TahYj3O!XdhY=PR)iUKQ;7D~WAfbc)|e zG0C`|$*^}i5j^uJo_i+I+Li{jev-T<(ng?TMt}3IBDDPK@Hy4PSzTEFcvq|-DC#1M zNF-JN_FxQ6PNBz5eGZk9a;nT%dPGxIG+`uu&9g4k{+KRm@_0?qRkA4LZP(CcU<*D&bws~8F1ISZ&~IS9Rw&vDY87C6?oZ*97^rl$i%-_Go?|om6$%qSm#2F0z_z zNlk6O#b3T(b3~jL`T9zH$Rd<)w118slPHR|j5ax57ST~`EqgpnurYKdgL-d4dQXw^ zIxvK#B=b&_-TSlUv0V-`-E^ZP>_;ewg0cPKxUP%HE~9@l;=|bxH+NK`ME3mCE@ag4 zcnG3;ENmc+m9QfR64ZIi>5CioJQMS$T7+0%X!7NQiS5e^X)mf*vA$B0>EN0qiN{Oi zXyZcmVjgdKp(EP%YsPf#^X!q>ahiZ}sNJ^(B&8R09pppbBQ*=kLxXrmAXTFlY%c|x z$J%%|@b4~ZVV*z^aYKW3Oyl7V6pofw>FLGbpga7-9k$ZLgtzwQvYO5cGs4O6IjUPF zCOJo-c%|NYs#_wLd5s@0@VuM+hWa5#y{B2*+YF)fg*sW)V3ZY z(9NDU(wCeC6nZ`*np!;*IabDvT*zD_ z{;xhgV^4QP+q>(a`#zrgjS0=y%r)eqtao(+GH7jR+KG^Lyh78*J|^mGI3}|lLWh*s zUmURKaD$g3dZtEg%Xzgo@_3}yb=~n(2omaOzoZ>VwGMuBLFpx+_hGveuoCk`#=*)m z<|#BnB`L&W`GTubopj~lJ-REQo|i};8%+7M-fcA594~ji1?n7tp8A?y?F=Q@0ymGk?H%6CNN zAG{8z$?C_08>J(U%gMLg%JM}PYkrz#NpSzRXBvnF11OR-G+2Ivh%R~U8gGz?XzwP?t zR7jHR>~cZZw476N1Q29*ZTYG$G^dQ8yvMD7iH9h7Rv=$uxLs;rYk^``@v_~Fk8d{X z=uOSVHPvk!MJ#TRB9SFB=3@&Z*$b_X*u}zxNT>KfSDz~!zZr!6PNZb>GKrfZ z`4p|&-qdKxi#t)p)9_I66vtfvUHuj^j z#;*stj`Q9apOHZ+-JOr_Qp4IrvRW=(ug)}WQauofMuBob4&ERU=un8cWza%Ib1yz6 zT%Lj?yJ)x`I7gsb$}|3bI_GA#e0Y!(@qXwhOyY4oay#K-+#_S$39I3hWbz`BYtH2^ z&h)j<3(WlRT|C!wbjr0zktD9L4l6M?s0|wm?%wW+OmGL~)g8*L>N`3^`BgZqt+d{i z=2>PbmW7__F3hyI#%Fn$>6N;IK`$GGmRzL1z3{ybM53^inp9$Aw8!%-1owawO+b00 zWK>mg>AjDSO2$vvU}aw@o3TrG1o!TfYilYNdUtShaPR!5m{TtDkRrKmeMIruAxMr@&JO+)0cFFguc%Cq^?< z8NgQyfu5t8v=B{j+l|)*_VuOF979NXL<;z+G!|=c=yFH>B%MMX}HAnlcFxIAC3YCT2;+lu)-C10v5J$|=a?8H~$7 znR1}8%ODF^yM|h^5^{C_ib>zq!v-Y{0L+{CP{nzEUVXjgq#YNyQprBWBQ|lP@;%q` z>v(vb#ol%{aYTsx)@svfZZP~1Wo0w*!}ma+?!fxED$S>M)N^#KiR#HVGKNO@V`-M7 zk4n!as?Xg!D!4(gf^bJFR#^p8qN@+oUKwt0;T{WDucMMwGLFNWEi6?kja zOjMC6gGO7k8Za!XOm>@+qFM^hJzE=16H`UyupxelZICp z^ZXMQ&r+6P$1ng*Bm)r6QHXl2Bn3CZ&p24X;DI-P3@nUFoyf=_<5DLGjBDekYl8Wk z2IGSl3V|kzv4;31vi$?j?y?eIRw86~W9#$DHzVKn)?X2y*YSN>{!hd;CyuTUXX^fIyY6>U(MQxfaJtQHVOTX%ZVx%>`^2-}SuVZ?1Q z_!r=MpHg$j`-ap>CGOSmhl8ixG?D3k3rgW|##G1artaXJEZBmBNr=w4+w=!wmicxF z{?|^?b4oeVecU1;v>3RAY3#k2JM!LNxWrsGJ8mzF;bCS3n$qsh+RFf3W>;AFX^N*= zG>vBYmAL{OCA?0M*Uv~3m^Y%-0gAiaI_iNGn=eb4Pt}#JG4NF5x^4jZ-GL z=NPTmLPm2sZIPu*`N5f{OVvt6&gKg(PhGmB)3t@BxQKh)K-1__UCafm>eSAA86YYY-70O1!$iWg7x_(M!+tDT{B58j2`#oE>9XKYHX)JxCJ zhP`;rwtfeu?kYfFinyeKg8T#mV&&ARFJGX~pY14=6CAca4y!DkVrC2Rup;YK z<%ez~bQ^Fj1%45(k)P@dnF7LBk0%Uuhk6)#tRPrt1Svd)T)!rnT~+=s2POiMfwI+f zyI!xG`a;O~L69J|ssC>eIvI2!2w3RB`a;6Qwra_j?iJJmz4|aPsqx&}#J(%_+;Sg? zKrW;RTe(&#vQ_PDx-iz3ACKr^Q4EmU(sC zm-nY}PDHXf;%{08!T_KFxGFlRRRcIPP-^6As>{@MDH#9r*!y6gDi%A?A3pvv0nuu0 zMu*&L{DRA+;x@_GQ`NnzyQQJ|oGqHi?cMVWAU!Gns(NzKW5K1Z>KhV{_i1D#!rgw0 zTk0}ZY!x!o)R&%KX&8dD4drNxcm2V2M^WS=dq4wpVch_fd=M94MgwEP+ zRW$EoS;|FS$?)M8ccQ82W<(&#hz$hXam7GeF*J5H#nr&tRNks;mFCRCg8PvpzAbnC z-XRP6mWL@K&!;Ag#p}hoZQD^*!S46Z?$q-O13Qk3Xe*;fiF<>cn=0?R&8nwPznv+~ zt4PzMFU{4o@dP5pDcIbWbjXY1Ix}P6(AgigWh!C6Q)?U)tDG9K7*v#6&f5-U)bPHi z4rL}>kln;ok9n$pw_Sswa7u8JxE6YY8CJ?JzT8Y{(wW$)vtn>mfu6I+-$436IgKtB zIo!{WH+{GAcxeqXjtUc%iD}%g6J{mHPDVeZ*qP_}DS(mh^9r5vFSM zK1PENEW-lbrN_SZ`_7TGg1$QQ{ixD0IJN>BeL+eT1qm}U&NQ|Z)EQ)$m22%bQFA|r zyS_=HHIC`SUo97=iUF4c!&}&Kf+QbEGP$Gm7xlyz_`1mgpNiW}k52tYQ??96nX@6e zuiFJr)hrJnK0MY;uGupY|DH>2>^T@BkBR8+B>+~>TJF+MNLB9fx+9K@+*ZJIn>BJr zBIS1rL7D36uhW)B{^`IrD0IDNt+WBH9czqohKI-X?RAK(it$sRYY*2NjPThr+@_WN zS(V1x%tF*8UGo_$Mrb7hMiy{i0|!5v{6>Dn8W{AWd6jVVRg3+y`q5}L-C}jIU=d_d zM9lyW%zx@Y)e(BYhY2SGo<;=0GnLS&ZxDI8gsQ@LVL%&QCYA9>7yq<9!J@v({YTY@ z-FbQUa-%bmtiCf0LZByXh#Uw@2bK## z1!5>Wwsdp;@s4O~E9BwuJlm`sOWbTySBqOClTLd%Mtog#v|iG!YoRMWglWtE4e}f2 zu^!s8k#X99O~DC1fU)Ytc!A#RdDZi#YKT}5oP1J-zFvAdnc6Ip0yI#Olc!8v@8nsS za0SBpv@}%_{Hu-CY?G>@gyKPrkgVt?#6gy&`sk^vs`>~v?ix-mFRV7X*Aj`I=k;z2 zF9Ara#cA0W_s{(FB4KeAe_iA&>y6Femq9TvCeA+$gK0Q-Oo^3rW2@b&D8&S_Ra}$whEBNpj_a{$JW1|FrLpNZ+8qViSF)GmfeXH zc@6um$(ej1siOP)eZlFnCMSG@%w1gIc-9Bh!A?aP4@B)qMD;d)r*ri#bazEQ*Fdl4 zKjTk(TW7yT=GzdUk;s)7j}reN6^!aQ;b<_ziy^^whW1Vx{c(ATeBAezK>At-35)=r zb}r&9$^WBteh*(j`NC}xbaX=;o!<-BS0F&-R!6d}o4-nd186G^)rt*OOs?y*km~V( z4Pme)S`i!x4<}UCM6VejEFWc9>vhUs$zN1zj!<(WmY0k*1~OTh^&ZC8?`!t^HwaX= zIvMkdS6nT7&X7*&n2%*vO3`(_h)OT*j@jxUayGPiF1C0gUYwlmqB%9X8nHb(?~N#X zXXaOcd{pO5q}o`DYpJoYsa5cyVHrSLfgVCEzMHNFGqB+Vj>FT`v1PKG4tZ-Q(Ph~$ zshLe>eh2+L^BgbgYsR_i zUH+U!tWu;G6~Qb0TEmY4(<$GiHP6|{RC3UXO7QfLMhj0(_0xC@#7)KVD`b7_x_oAT zi5Shu-X_Lnh5X`Hel;i{4s;%ZeO<(&vjlnlygC)(+3GW*ORkKHCfP2|TYXFXGtX|u zuy(sthpAJNS4y^I$`-bN+gb0%6pptam{my5VbJ&-MXGD=uyGV)sgk0;FK*K6`F#FP z@Olqa;UG(@z=tY*;CV4Xblz9lHC<7mAmdq~d6xPpKldVFRm-;5f8jv0Vnz4$eG*wp zyC4x^ApuVjynlCJ2(j%nEUPMe>m8#Z89*5w#X6j<;`LZd|Co7ncQkE&cX!Y2Gu!5< zaXKG>XFJcVb&(v)PY>UgR31GxrUH z`M3*hsmf$$S0UAqA zB8#S32aRU+7Hb+`sIeN^iJ+`sk6FmQcKAfg?zmMv8TrL7JPNcUY1>t=ca}&U`0iR4+o_E|>AV zR8IpX9P6sy)V^>K5V}woIO%u!rjzc6jBSi4aG@p#54as&TRH6#X|~ETvri`s#%vvu z=5h`vE(ICWpXk5X@``|9-KjlqCcz{O#iT^m0u4@~=nI(n=*FXXy<&F9gYqmvWqmVg z$5{*op8+#_KyA&EEDu&H*Lvm)H3$D$FKKtnB@iFFy!TvHe3|8fZk6?<>-Bo>;vBX@ zftFtENm@j#?Er6lyO1}XqX4O9HdWnK)lQ9$CS7Pd&A%1bkyB914Gyu5sxV-C%<;d> zeu&rofty!>D5yoL9mI#(DTQB2MS&SuH)da3S2Nf6bvojjbOP!CJzj&XM}vA+6L zHU$|Vj_T-OV@{|{iSC|#Hy}?zFnWm`vpY|@rY)~kM159aH?A~RVO`!SPBZ7K2R0jC zAcoNReaH|?3+Xdx5%cbIVZHRX@9XpG7I(~L(_w5fHFcxrX%jwV&2_e^$Y@eWt?90_ zWMj#+F=aA^QRc;MwX_XRv`@S2%-7EIIUQ{-VEnALXv0_Yg2XX0to@D=%b3GJl~Qax z&2vT9pojd=tznsh6wMAB9!8!GMjp(pOnx}QAB|~(9yfnZ(P1}sQ4s34*gwBlU;|^Y z!_7O&cZU%6gamooRm%{?6RXcdWVL^{#7%s{WGi#fHpMo4lG9qtQPo=IS_!7ERK$Ur z2N`Y0C^U%wqMFTT6)6$iH=hQy1Z&EDVXsIkb-1^d9 zF|bNm6xLQ$1$B3Ftw<5omY$nrXh@0S<~m!Q9J9)dbJt0elnPwELTgQbi)l-6if0kF zKp5rJu43j19WkWz&N}cXD;_zHihN76r#YENi#cTE)2{!AA0KPZoKi=fyI|tVuDx=J z1ED3qPw|)tI7D;02Ih%|^s+QK$1=>k1`pOq?+wbzp=0Gtns)N%=lr8#+?~FG zp05wRn}T=ssmiWi=L{%doj1TTXvf%wForEkKcl6ri++Fm-E#_Z7JPKv`j3UFa4Bo8 zrm?k>b2YBpokKYgD)qyyk=*g`4g(Qm7nYJ-tw6ym3DfLWWLY@pTsk{#hVp{ zF@FLC?gVZ?t2jEDta!%1o*cs3296{N?$FO)N824zwLh zxJ?P6W%9r7$Rzdxu+#~F?{%1yX#57yi%7ZZcN#Lqf!yN_F8}PfRhOw_=8N_#R!+Qh*aexYFSZzN3{MLV zwb#R}I?PVAMKF#f6tsU)#5i3KvpgWCpG@V2@C1o|fX&Z$haX%K2`F_de1;UUus`{j zppWmJn*c7UbtZ7s(I3(fSBokY#f04OQ{Bp#Y4~T?5 z>T<34Tfk%SV^YY~$)i=X{u~5Phjx*Z*?4^ZpgPq?L@l0;@9y>dp5iFyT9sPeb?ZRa zYjolQ6W%hKkH^kL{E;xWDa}ds*30mxvSAFH>2!ftg$cq4iO;Nn0q>v}fmF_0klMRE z1TzQHlp-eWQ~*Od7%vnR_=uxWQ*j?KLOUq-pdO7Y(?r9NGce4U47R5dOmX8)G+7>cjE}Oi0nNVNKYZCJl)sO153YV=f6s+eJNF{V= zaHIzI9?tZbSn4fUCn!pa@H~(Ss z+SViw)*}?jRnTUJi0-y@{k6_S!G;yMm95Xbc{&^>T({BU-r)QIKM`M4pUzeHTJL** z-Z>a~2W_UJB1#fLey4o>9P?1Usf3r#3Cq!@6lt7jM1P2wShmF!77{c*OZnjF-McQi z?>pxuIvFG~`$GdB8hkH0UxhZ%xF|{wwLwFt0@wSl_Fqv`sn7n?=p+XpA(RIXex`%H z5V7kADwU7Rn)_Rtmv)=xQW9M%xAu^#$l>49i1dpYW7SKzLCYlSmWESfn@^IpTe7N+ zYR}P937UBBhE6Zog6uzaS_P#TgXVqQ3v;?mVrMG6)Ef$HpJ$t;HyrUaIoif)HQOIS?u@6_Wi<>LxkGsufD zPEbLp6{9n3PQ0)ev`?S~i_~6S?y>hbAI1{Kh*Ledg?e52X`F0X5Dhz2_dHB-j2MLT zh$*gU6=?1QMV(Om)k73Q9lez^285fF5+zC&quPcDo&qOK$g(BS%K}7HBAG^_`yj0u zr>B&H62vf;ielETU%}ObW@a2u!_(+&speqJP}i0xw|#Z)`sYLr&HTr-(5RTC*2{;` z{Nrfg8k*ZtmD!g4S%c-#3SH-kfX#^7qf35cA1CNV7|# zXmWhY=((3FHrnDY&$+=46gpMMDdJ(IOT6DnUSVaKRxF*XSYX2ZrAJ?j|61MTP<1Lu z#SNcy9HqA#FP7H19gR~0tD1d!b04WDE2=t5)i}mGK7o$+{);bLnq3!HKj*MXmAD*= zFUMz^e}gn5{jlycWtwB+w6kdT$ZWH+c=+oqg-sUu*l_~D3YMraVeLXCc0|(zxy(f< zmBQ=0=aD}t)ZWE-ZBTDX-LxruYdu~M8nusdXpfrQ6im`7<<67O?1)@>yc-#Nx3M8X z3Z-k|E+~7ngXU9Y$b2e-XI85gL}duqRHH)i!36USnqXrP z-=PRBBS$_;RI$gb38*HNHJ0bBz@i$r2(B4ORhWiS8AyCF4wH`f5-12~nrl={F7x;D zI+0JVq&bV(wz_xAt>;d9YTbbcx$gO`t!?Fdk*O=7U6gzNy;+Sr3PZ$0qH&08M5 zsQqW{@IU|QjXkmM4YAmUi@UlmZtLn&<;NlrxcBi}pf6d;tQtc!fUab+2&7~P#WZY@ zjmB&U!~{*Ua-8{JU`B!QVlp!`N>tO22qrQJ#)=PVPraz6i&xRbTIC0;R<5M4ucnig zXICq4Q$Nr%$_2RvLNidaiAY63xd839pe!OqiV=xIwwrrnb`}M-Bd(B_1+)v$FE%BC zR#-r>Nt-}~Nn#6eI;~C{3P)U?XvK!cd8L7@))~tdmR8<0{n}gni&rf6+36e0muvdo z=eGNVtjU|ua^Bq1&<#sNtt%GIDHJJf#W8Q@3RxR{WC@AoV~orgjvE9r9c(5!wqG=J~CcYNeZem5P zU)233Y~cl4 z`lvOjp3gIy%DHBa;|m$Cn-kais&H;#LP8hKy3`s^T;`labu+6AJQ39`rbnThz;zocb&Jv~ zJGITf-go~izgAv+b=hs#uG@)!{Epb~-tLiq_nX684nDGJ>%oJpJuP5!EKAxP5n4+l zU~Kpj#}>LvS!tnPDH&Wo?NYie^gYFM51p)`Q!Z8ZY*BvDPQw}+UZ_0Yr96)P(~xfE zZQ%f;tAW_Tc7IAm3&jkLU5nOx%m5{;l;5jwq(NFTFIEZc%-SZsi?eb%HJdq{>Kr z#Q<~}P|A_Okf|KD=W+o^^8zdWsGtI*dDUOr_anND>|AdGaYtmip|T3p>e?u-zl~+M z5P3(+wMyBzq*&&;Ayheoic{HM)3Wm95pHgISK|f#imZ!fEM4rc+%i<%4W^o}CKSn(ot(X2x5n`^v!|i;y3aSS|8aRamY8iUmWlnp&htO$ZpOwgyjY18`b_yyXXa z4^fzL-dmfG@nP>UoRElPfb)v5*Q6k$*p zs9>$Cg7t%fg&B~m^4M7^*VB3MjOw;w7~VXi3O@DL=?u$}+5PWxZGsyt_X=_&SQ@BM zvq7OEtgW1r(Hrw(1iUOE;sZ=-Im|4W39?SgA6sa4L-M+sM!gI1Nps1gdH>R4DaqNYJuVg#IA2P4n`{L}25(s4DV z7f$V%(A>IY@%(vr-#B`~lyeTf@nJ({prO99s-6Zz=hX#sEv0qASql@mPu_EyFTT-w zjqMc;9~eJjE_(mGiZi6`1-Y0>%%>v_83&MtvVv-112rbn$tk2ET(dU_ZD{QNFWY{S zhB&0{Rc=4EoZB<#c^Sh!Pe07_hCw#+gLyOr-KZXnrDN$3N2d~YQ}UT#FxhWC{h5}5 zXC{-c6k!sqY0pe7Ow6Q%`nmq2+df)!oO`W2t*=67a>*j=@xb#t;rUz1Em$!>`1#wC z&zD-jI{_VeUQo`&^%gsV@~x~j+n_bKMCGg&RE@LdGG0khW2F4jU;wB#mTg-8w`7|x z`rEQiGt-H*vt#Xy84c%ltgVVvMO#kI14?NUbm{;)*}!TjCRYumls!c$u#k$va;BzN z$_X(rRQ-v#S&fmI8U9#2UyYHKU?NJ7R)n3w`NTNHvy4jlR?veC!ofSfGx%F_C$c~Lj$A}wi^tNQ%tnPN#OE?0_cb<}QT5j6yzh--}0p3U6)Fl7npkkiFrFxZ^Qq-j=D zA$?{1Ih%jGk()7*z6AYwVQZXz`saHtm^69r+{u$J(7fQjV!}l?bhb{o!hLdLazatA z{=AvdhB>oQZgPZZtHAch8Fg`@nuCM~flEo&P~2=W;*gmc)RZv=HDwD*#uJhlN+CZP zPe+@wtKKagK%0U;o_a)%dzYWtx1MwNeZ*}%^{q1cRVvc0h(8s%P6>BlVt~HONeOJI zzE2r8m>;>xk;De`v7})FAWX^&%K3@BO#(s4sKQCQ!6wCwO%aGZ#HGq|IP>yLe2nA{ z&~O+u&k`Aym^FnZCXR?Drp=;*tapE;ba-g7-7mGPM*X^5HyC#-%Im{`jI|lo3^Mk| z?y}R%Nea8>5Xe}};m-Ol)Gy0-p?;xpZotsJF**eO3uR&&0$5*Q{~y~@FkmS2hqPtt z-=i%<+W?v%tqqFnk4z0D8UezXDS>1=zJPYvzMCfY?`nr*WSZF273}GBnVnD*>CH+l zYgcjjpluy22jE2vvop003}(p_ma{ZioI$0Mk(+8XU~1l~N<{-(6&WvJQW0ZFJ_OT2 zB&RG#^t4WlkWxtR)XdY<3j_cOAn7^1u*NM9!E6G6-F$0x=cBPq=Z)Pn8Z z0|T6BERJ*kOK^WH8)rnonv%d2K}=`N`?%G-53UQyJ3Z%v;Tr^9#N8#Fs%pzuVf@q8 z6*MAUUo|Ah=!0$_3{o9jMpI++trJ_8x&(T9KsUX{@J`&gav?Ld<`%io0bEE)CT|Kz z*Y6kQG%Z+JbAz%;wX%l6IlDE1bJoM@GFq6tr^(e8vUL`^)N?Mx0wL2}&Z-DO0CSpK(r$s;yJVX{s&9oo>q|A=Ls{^0R2_%XN=)^o zllA33#9%wEFVfSov$nxKlS>xEJkDG4z36vw*hsVT%-ueK z4%6RZu1@`*&($+iJbTFGWpPcF?j4NoeiA0DhNHYMD2)!wg`lkhp(GT2g7A5upNdr@ zqu7GGImD~gtLlM4K~Va#VjL8koBwO3`UqypnaANcXuUCGn4way>cILl)8&V!8!wc! zk|=+G?KeXr1`7YX3{+>Jt+Z7+(1M2+Wk0?gRN(2YbbgC+fG&U+_Nk(x7A@&tC(IPK zk{psGbop+sKw>tc!QSDvO2T%g(Yg)7>nwb zW+8A8=plU1BlcvHf=absP!{wl!m>@btSWK+I&Ty2Pzwer`YE_FgJ4na5+vx|w=7$+ z@w%HIQ2rzSgIkua(C5~dc5K~vc`KKH<@ZrC8CGui4Ygl3UnS<0`1|vf52d5Z@h7Il z&B4ni0ugebzV*X%_ybYp^}f6)koT64rZfM9;R0%VxvAVaBOwE#R8K<(ycb0HKM?}| zi4gerKnN7RWnsay<3rTY=_r-kQ62x26aUGHe-BR7+>!`Ur+D!T;Kf-Z@nX{2O5?|P z!nFD)NB)x||4tm4vcmZUPj&!Lp3f~DQB#j#l%;X$bEk)j{>iZaWZ1upGWVTktaB=! zEco$yVodIiIDjZ?LPS|yC#zLKdKsv!%o(G@!pdT(Z~$6X^W{SrTpqxeT`Th8plF-jiz3y(ZWE?9a${Hjpf_8-%A$r6pdWO}^`hKQhK&IrV}IrdIaseqfXL<{K_r9+*7$ z{K)}dO^t6w+tRfR{Z5}#Tg)xmHMjhUn?|=zh}OFUQ)|QZlb4R$vUhHK>-;&RXGKQW zj$Ke6oK+v45x8W{jn|xfok!SU(%k8huP+$ks!#|iFo}u*c z4W&ncx(9H{PkpofnHaj(WCJ<5mDqiTR2qZmC(MYcSsGMyfyaoJEBe^iP`7=IM|k|go4 z&9%$_=Ojs$GMojQG$iax=XbFR|3s?lA)R58^zi|G_K`#6@d5hmSLw45<+3L%FVpv? z(duB5R`JryvZD|(abkkD zbZRo!0M)D#k4_H=i6Mlx9ltxwK zN*<3p#-#LKSnM(RwTy+`)>u}_C=~rsF=oy4;F-f3a}^Y_g6KR%r6|N-#7RL(#i%6V zdkk3)G{@4IJW@A6^W@gI>7d4?{&WplO>51xiVl#R<5#Yme*Yxze?V)T;p^9o8?`z#`S9Gu zX^6-x+Adskd*`${7fdhP91_=jz}X#$j!enCzE2s>xDR(YO2UgvIf=sH($vOgnd-)x zF0;DHYe6Pf<>+~{&7PB6>}81reJ++$YF&v+oDtB+Pp`u{4UPQjP3@_=dgC3>zB3q^ ze4=q^@r{idnZ9GV)nFglNj6xF?L(2P3p0A-ZWX!Oty7oVB!KJO47oZlk$?6gkRC?- z9Z@L{cM`HDBKunFh!EC=?lVGIgVv`b!&tZO|8k-7^@v#3njb2+E0z?Vb$UP1GG6sv z2;IdaAar{Z2;JDjfZjh`SNzt9{JZr5F|an9urHl<1>BZ7hq#BWx_)dFo(dSF?xhsyF{kT}>HrBai6( zk9RWLEjZ$!^1-ChmNvAYbJTu&S%o6X!8?Zb+z&nXs6F@Sux3#ThvjSy3P>u|rO4n3 z^*Az6-{h*5EE%ft4>ai+r7eN?D-sGn7$2o-^azSY+`Zdm<^4#ZQ6e~yL^dFl2qTN% zb4CtG*WO9eq$}^!$+RLFoGM7t>EdLm%rF%DMM5?AJ)jbd%NmF~^dnG$o2~C+^+=i| zN?clfQ>wU$Ei_1#H$}wF`~TRM-Gy6@oP3Ahmvd?gVxYiH!-@NqC1MI3P1QFsG=sDf zCrksXXrvgm*wfXCz6`rpY7H6J|2Vv4uzgtuYMe6Y+15Bk#JLHN;MT5zy;+hi#u}$M z+@E9X+0$#B5LkYs8YjnJTjLZNRO8f=G`fD@Rg$!}QtRALUa9u~h4eZ!#^3JCd=Xc;nlcO!e_hz=!a0%L1~~SsI4&6{@JU9%s$k@MWO;1BL+mz;MCw_wg&*$;Ae%h8ojUj6fvTjU-0Jm#x>VE*>U z>A@>|){a|w-m!%f+QOHNE1o(h+7@_3x%a^8qdU(d{lEUDPU89u#_ z*vMVvrUBI03((C2E2$0Kyfg&ac-}=>7l0JWL-~342w<}Jz|n@bl6^7aZi#WNEAJvK z=D=-O3+F`#AZF#fi*VV=g2h;xbwQh`A`T7*JIjycy$!;no&S$_(@uxP)kIIP+fa4Y zM<6Q@BiD@)$YD@&bB067%}t=>9!8X0XZG3Q$Wl}J6wAUKic^*%kz!UwBkVLzkqU!SAS}B< z`}sl)A!~rv2v3%$nn($>s4`V8mJN&p!e!+-u}TW;2B}yC!m~iTtYh#5-#ueTNYf2R zdH*o^y5VTSjUSqP;mM_B9>0%Y24_Z~MudPijqBLzxdb|gWDmwIiRE-owxNtE6gQ|# z=fn`(NlHa=Ajw@kICb^3xA;;-Ht73yY-?j<>)3|YrlzV&dT8ZJ<+Hg>O%;Lh-1>Rr z#;fb9dD7ayj{j6s3o^z7HtaPjW|dr$G2n+$ushQ*;?PP*<0pVS=VAobii$NV`Ni=W zTHY)=oGyKSaazGu38?Ov#lc+7LNT71P4dbI7hI{x&|$XOJxSDCd8=u}&dRN{wuC}% zrMh3NY;C>r+S#qjH$T5>LF-le?3!XayLJ6lbWPKRcN}?oM#~*XuA*=JJGBSyeQo0L z8(|cx?llE2Zk+JTO+Q}FZS?$G-$~EEJ@VXDs(r(7V?oB+h>Oe~0Xa5%1mswE(wMYk zETIRxt(HHZ2}Bmv!C)ZSFe-x6!DIy?S?}<-wh_^7dqlefTPGPs5KrIHMy%u+0(R?T zj^OF7K+9}~;%3CnoW7&ENo_E=$t}rxk6{~~ zzNSQNx0mf39Y8%WOv|PGpi~}~T_BUZp=6c@y3u3j(QC_5h0vfr^^_V-@KKTak-F3s z?R3oJ6y+eUgf{w}+%keF8q(F%%QlDUwPw`m`Ck=Z7kJA`oWmjPx`HYW#@Qh3BG-(7 zvP;J!`<#xot2HKb!n+ka+cPn_KY)1V{Q+*=arEP%`vcf@vvd32xKjDkZ`cL_eSO@m z|J)$pfB6Oh%nw$1&)r_2>iz2v_lo>rc1|Dg$jlk4bMO4)vq1%#CkYPCd;h~d`BTER;B0i)AH10(S zZENQd4^k~|cEPz2it94W7&X%IDKlRbF~g(o7$PDxV=R*aXJi@-0d=*IE(3!H8eO;WZub1mS<%Owv`$k$yh1cIyl()EbbQHZPxk9v+ z`u~senZ~SqleF`-U2MOWN>WRHNMhM2D+|I{^e&Zzn8kqjmK9*eg+uYGtZeK<723>d zu*-wNF4csvSTPEQmGGEaK`nqWEsR>pJZ@@d5oImn4vn(L3^rz97{!>`;bakATefOw zXNfXGCrDM+81eY=OBJGQ)TWFI=g9H=K}Mh88`#(6<;}mW8}sspd!Kdm-NeUkzh_;} zO^@F6%SiO~jXxi-{CF=0!LGjUerE68c+HycKY!o3(-PsY?zL;X2aLbz$%rAJsJH1L zdKNLfXmG`iWDO!Ijlk*I>XtW#6Y(t7+Rd^c-V9daU<3$Q03(Ht#1iK-Z1&8Lz|;ek zIv)(TI&q+GM`InX#{d7qp2eg^Q^kD<{@C|%57h}ON#q&&UTJbr)*}aPWOtsZ-gy(= zSw9(b7}L(hAjX6l$#CaL0gIM@{F1QKBNFL&dSnn<-rw@sIQld{hs$S}=ggbX_2OUJ z=FjLijMMR2Eq(g(%agy++0WuP+FuO(Cd`ke%Y<>n3|>F_FT4f;=8~IyU`XGRMBap% zham6Ft_km09f{XZ<{L)U&*$G;d}Mlf;~IZBx4dxy)?@J#X^YUT-qRe6>j&=1sZrO& zJ#oe+u~D=-SMr8CR`cx}*Z9J@fyM<>;NGmpOL>*HkmG;-J)=_+_f(XPYki@Cdve%! z?o^`h4cL1rH=H1VWDOdCUZ@)PYFOcURTWTy*~ zZ5uiS;lbm<;dpTGCC20bX4_K32W^9MwQYlkGCX)(DQPCN`T6`3kkHjcvIOIT1_e&y zzzP!SiFgK+LWu(_MMUf}3@qbUi5QIu3LHLl*LzuuLSMtiC;?YGj(#O7%ji8K-J6C?inye;+@itv zf}136H*hbC_e#Q%&8K^mr55_NqEE*jQ`;8of1f`l6q2pzh2Dg8>}I8$(uOc%VM;fI zr0)f#bzylqT(pO92j8o)x^o-LJY5frwjG{%J!YO(Y-P2s-SGI0QF-lhIIdM{RPf-poxWt)z=>lbc^Xlf` zf0V$X(`fez9E^jD+(}eG1%oLZ2CD+x^-vNkRtv_aJ}VSrK?f}`AS-=|30j7}$r}PO zEkiET;&S!`Te{Rj8J9(JVtJB^&LSCtacA-a7QI7=C=tJ4R?BHHdLd#YFTGA zKrXPW=qlNWN2e1IJw^7H0AW0A3#nXB0{3Zzt*+^iq~NeyUvvJd^0Mmcvhpg|hGDnC zrcajE)cVqQ80HuamNOdU;!Z`30@>l@hTR$JhTS>Ao{}8F98h;W$UD8W*@;wP{@^YS zvzxb+BTDWwbacedHErO>&K%UIwWp5r;Zw(xn^Cgy6h4Q(1V{vQ(k}=DiIaU}!eItH z!4O3Ri{wBMxO^DEpd%j!uvXI>t3u`(jW6{BLtl?WkM){mKnCIsg)l}p(R@7Kf4sm zdd*!`{_?7-a(|U`JrcQ#5jl!P-tvQVCgN0z^I`fpmbnettD4dOEk^$!dX_5nE7PbS z7t1Pe?^5CoGv<;!A;w?Tz+cr4b7IT4QD;G2j1&ZuHN?Zwbbyj3iqIArIV>D0h*H(r zk<^nU=`b8Q0vR7aBlXrMi8sKq{~t%1+JoNw{2mk;8007bZ z+JpcA007s~j12#K{p<%62Jip?00;mA00000004N}V_;-pVE^>@2#^f?U;V$5U4#KB zf&yLw0HC}F3jlc9ZIe%E6G0fo-_AF?DIy{gx)J}>dT0xwl(2XZ2{{-l+Ne<^gdiov z;30Srq(u=cLBvB2L4pArM8qu>3F1MdhtPxIq0&>R2N4tx9>fZs3uXHze;||we!T3= zH#6V7x2rah1b^x+V6~}X`07@$rOO!CK-xYJPj4e`T@1F=Ix1>C_^CeP20#?4)PX6Ko_nFfFP}jE+4o%>iUSQub9M)mP zB!V^e+@i)sqa@Yfk3{iQ9wDiRQIiHT`Y1B;2^m#@E2}uD_n~Gr@sR6&$zA?mLe23| zGvhkJ`r}ltQQx>ef~0!IvxZQSA{=`LBQk~|DF&-@5>d5`aRsbVM|pnAigdlF7mFk+ z^XwnNrm0~MF`2|2djpr$3h(rcnE+Yiy&2E>@&)jAmaY}IE?f|p##!GnGV_;y=fx-%gMU2y!<}e#DZ(+W|!p367 zQo^!^QuRt44w)@7_u*v#0Lu{~iAVc*04h$DgH1Sbn;5N8Hw7w0@K8LlVXV%$~S z$9On+TzIN@UhpdL+VSS`F5*4GC&uT;w}kHnzYqTs0WkqL!6w0FLK;F*LR~^TgdPb? z2)hX933mvu644P=6U`D!5j!I;CjLRfNTNtOPkNd3AsIE9ESYVxo8;EWJ(8aw|4czk z!B1h4!X-sMMH|I0N_zQVbo)E z%h=Dj%y^#hITJaPCQ}E~d8Yr&Qp^?1*O;HP2(p}Jb;m}{w#jyn9gE!xdjtC&4jzss zjxU@RI7c}ja4~UN=Bnhn&-Iy`fSZHcBzFh*KOO-d%RCu8cX>H@wRkgmhj_2>Vex75 zIp%xB&&F?-zm@+y{}TZ&IABmfLO@2t5&&1OT*elzIRG0002$1-k$R0000000IC300ICO000310fGPk004N} z&0NcFTgMq5+3qDOYNSOGG(iiTRVo2A<69EPn?e$)SdAqMqGTbvPtaHBs>?1r^Zoxnw=<+JG0?&gc`h^a->);YQtG{xyXv-Dx$}-v zD?e9?uUAy2@96rLy1Vj(uWzf_$~Ir$QTJE=%GcjhAFTX~ufMPE-@3!sKTu!Ydd$~9 zRGnLY;_G+R2e)O4a&qThATq8MSKbyB&Sq$EXkW-*5E0 zT92NqT;Fx{XQc+J&|@;Sp;s8`ZwIr+U7^1V{ryIN4zT9Fo`0gpmRQR?&Gh|+{`?#( z=6ZgiPAwjpI#NBoex@D)>wD^n`c!?UzdzG-n17i%$G^6onOdQ(X_%-pO`E|-@aySu zWBvBIe!j2YOw@sXJJi^Xu!>KuPwR&P#~i;}%&oS13O+7}m8nhrIZ>}MS|oG%tPsbS z;BBpEOdy3v8v1>3>9UpXMJt8lGe{?coM+&uq5TXaU+ZTR&E3VC)3Tb=zEpFT>C}f; zgt2&ylqIa`IqOMl5Iwvc=G(M~LqMwop7-^dwVq?NHnW;+^t{yHuk_Ub8lFRb=8dtZ zG5&^_Q9`x^kKXoySH^lPC1a!^ySZ`jc9hKy&X z`F9`u5ndXKXPWm<)kpf@)$Eh`p1mLO{iDD$Y+G)08P_A`X{Pntr_hXK}Gt-|h^!JX|$1d!B zUp>=z&o!kzVDbXvdZ2Ve&)m@8UGI68{X)}5^Y`?33~;w_-@(c|-n$Lp8Lb2ixA0`2X_9pg_2+@+e~+o?B-ifQr$$0V zUxz87wTQ1V2aQw?fWa@Z}v{sC}31UI|jbr*9T&ME2$^VA5#^L7A}VC*4A zZh8Fc=ww^B2uIy*5g+8BR9+L(h**n5xna=j~ z=uPd5yUb@=;eV+S##3xU%lGvRBNegC4XkHm*a2Qd*@1_fuh}x|tx7@1zb<6G1VqdX#wqRR(tbIM^OULt}yBOoNQC8?#%i081 zoBH~QN0Cm*{I-U^o#ai^Bf0fMyX{?z^0Xv&;YAK%KL;$?LykUM;5k7NUlVdYT(4tS zF%fQy?R*(bqnEpwxdW+p8FGiM@6cKW`3cGw#CM0dEVJev_UNQLgek2N$KUs@mouC= zV(zZ#r|X&;F_zbdW~}QaDC;JubGm0F zyu+M+0S>uXzRy|9VjW50d>!kRwBU=5MKhP1NInO97TFI8+hBkBC|Rc=Q5n* zSqPgp2PBV4QF9M^e}pxBpCY!u3tYRFo*ihh!?DepymjSFAeM)C*iD;r?E{}Jeq%Jb zwKQt&SZ}q5Cq$2-Y!lYjx+Gg8jJwITw%LEmTBP0gfQPBCXS&A7QN3iSFsOphQ2!OE z5lziB)z!+3tDh+<`ync&Gag~;k{oqm!FAo3*Pv>1iYn+e*H$L7p5t8~Ri>%pxV9W#~2H?yY)_%p%srogYgdF{!0%K}DB1aE`o|)c)p=e}h zx$4)fqMv1Xsb0%g??UQU8(oZFqbl!8Mst1C##^Y&be-vb!(ncfD%(-|4!p4 z&+CSyW~e4d{N9I6PiRfUy)i!_DVa)iYDb+hb_Rz!sLBW6Va+@+@QKnH^T1JZbv=>Q zh4ukfo!h$BtS%iUL*>YWCp;`bd0Si9h%pq2pE6YHIF!J}aAgL#GnSjrDIeC5yDHMz z*vD#Tpi*$4`rqeMFVX?mGw?eEvZjG!q*~%isIIL>^{|%UWu8;#^b>r3iq9t=^v{ef zXKU(#*WWQT+|$_Sp!|aSpcS+#_i)*OX8PR((`mbaRb*Dna>j?|;Il5qT9#ufcGJa=1hoG`lUwe{jc^FpEW9f6z+QJvIlFG_`27G3ou3i_i!`t^- z%enQ6`PFA>5|(~f%z4LV|EP^fOzWS5tImt$cC=61|C4kKgg3x%3`)c9BvC(S+ZCBO z+iAw7bEeYxp#m#cfOi6K<4eNl%mgDy-sb(-+XE1&sm>i)W69Xk(~^D9h2?Y^Sj;(l zC%1CJ@aEh#BwKw5$$su>C-t~H;PPu=J#Fk>h^)fCqgd|P(=zhhRiL{BVTu^*sHCTY z?HFUpj}oU`q#Q-v&QtKK)ri+&+%Gr+&|HFFCL|EjRCrqGXAc!()EpMpS}>bt4J~i$ zz|zvNa!dO?eg73xWBB-nX_U7!Ubp5*~DF9O~I7!hK_{{{`;H!J8Rh zpK_;L`u1bM7G6v|4@O>VoY4Do?1+pS(K16uH-J@=UQ@&!HR5cz*RhOOl68`gD6xiQ zng#P~3<{=}v!|>XCWHML9i1&?`?aUHSFz=`jk!&oB+TcYk6xrlejauP=taQY#n`r% zKn^+Y;?+b`Az40I&Mxl@mD)bs?#NAiS!nen?gxv3M-~|o@7Vu1aB(ns=r@tI90)> zhtPmU`&#}UX3JW#)*BuvSrqAgWh)5sMinragGZH@2Q11mMv2VVXNEOu6~dszm8qAFL?1$*a+Ywcy>Thgo zj%w#GLr+r&8L4>AF}!*U8Q%mskju>mcrxaAO!YivOI0GLklHkrL!Kq($C@Etzlbun zCL|YO$nOvJnuPT@I3<$K8J1;?x#r#}dxluAM4RHgOKM!&2`(C1MBN(Fy%W~Gp5586 zY*k8L=kN--bSS&!(XX=c+eO2iv*R(>`J$mTS}1VO*YwynFlU= zX!6mKtwN-Ga14mA8Ee04b~RC-vGrllX#>i;LO8-9@n#~cyrBiu4H&~p+syAO$a{@; zPLW?ZnGZm*^cBWIbT6#MO&Ob1oLfwDsy@vV+E2b`4E=Q-G{N8$Jtc}{f}N3Qj&91x zTcJi)G{ySFk5e00Mw~;>qB_WAz9%N{SaPqDxRml8 zaeubWQW>XkOfn>@C?6r~bbh*Ijq$q?VvTRO4|cr zc~KwF?bM|0qHcII|Fk8YWE^pJ=yaX(oHtlAwN8v}#b<&`n|WsFHc75+AU)!J+;ha1 zitC@LJvq0y)9(8M$%&;OH-lW~+C;h4>)w@ic z60L)sHla4mi?4SAR=m>8(>Boz$>&G5Q#lVPow8TCh;{*5oIlK%Z>~cbOVDNE;x$i$ z?)>#Sq+xgPsu;MsY~Xd3`=GD1xopY~Qhmq8okpQCj@5oWZTxp`?KF&xVSjgk43)IT z^?iIv`mlWq6%U(VplEcIp_#Ye9iGV-t#kbd z>f;edns3M46^Y--?-Oc!j=|sKr`=@omxb-1PHZKcdcSU?RAnWWd2m{nXjDw2zxyBa zOLq90IY_+9DNCczQY^grqef4uw0r4tGNQT5bW>}++@f4>d=^xJC)S?g-)E$j5Gz}OhnFh6AMDuWUKQf_ z|65d?W~~8c#5Kv>_L6QEAzYOm#t0eV>mltj7G54y2e@;+rK`8YB|C-j&iG9uqU#gP zu=hogtnxf(`domlTXSc!Z|nO(A|F_$!}9$Ge47XT;YA+H!KaMnl=pwB|C!qeojUar z{I=@Ka&Dt`Ywqa__RfR*@hP?Yk=hN+6aCioX@TqSj7|bekUm_^(Ou4x)Lg~X+?_n- zGeoZvXWG)IHdcApr}Fwk?qmw>2e~()rC?%b9z>_nIJo zn8L@6IquAD4poq>}%86dd8RZO&#J$rfS0(%_*1V!|mg$;$uE*WHe)V$) zvG_dbzgOPL;kkwGa?*%jjP)rz!iaMZSKpnmPR8|^AID-#OvNXYPkqc8-r^IkSMzbr zU;8@EU*6a7bvV5r8K1VQtnSxdr_P-Lru)h}Whw8++=r|ougbHb+D2Y^*6efn(yZ<7 zdg?dMqNVO!L>8Qj$n6=`i(o%Et~tW9bF!;fSvidxXG*_|pEo_a#g;2FIfL}gF4BcP zasQh0unM|SF;*lgKVG)j4qkCBS$Ng-7_opncZ<(HxbO0;SJ*4sHL=$HUJ*X@6zlRgWi}nYEVYKdpt?NVOn` zN&9U%o4BZ&UzATs{VqOOq+Ie2`DMT7xIBuEt-Tn&zP1)3ztyj!U+Yi#|3Rp~^Z%uB z+2~{R$Q||%@c&pG+X}uwRC}mC;oaY-`tynYe1s?T3;ie2|A{bRI~@l*wX%bQ-!c3b zEq?Z_004N}ZO}(dlW`cw@$Unrg|hbs_a5*2wq+=e7N~pgEef<%1T0p?ZNxpIG0~I8 zby4G<(WnPC?iEMWgJ*XVZ}x? zG1!SEj(8GCB#C5FNF|MQGRP#0Y;wpYk9-O!q#f<)Ku0>!nJ#n<@99ntdeV#D^r0^r zItC7$xG18S5=!Yuf65rZKn5`wHy+BVpb{@b7|Jk)GlG$fVl-nI%Q(g}fr(6FGECZiIq5s zmjp?aBuSPONtHB7mkiExfy-Rs4i~w^T{d%4GP%hu$&ze#NRH%6p5#k`)mPWt=+{jQ zTc|#uX>LtfVd_!2$Du53)zCCesVfboL+Mnylts#7Wr_0NJh!Q?=IXk=HsA{dYpSc7 z{AOf$Op1@WcORYV|#7fe5v^dIU?dEymV_vko0=!SxX3ARai|HfyZ|OPVCKb>jSTUFg)nQHH z_y}*VFhKYU6aw`<#2U1QXjpkd4w2zjGs=sW1m}+E5`RlXC;yV*Bz|b7$mFP*A=7bB V37Pl+`?OL900001TdUiQ003jn30(jH literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-book.woff2 b/docs/stable/_static/fonts/FreightSans/freight-sans-book.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..2688739f1f0bbaeb667259838a5ef06ecf9a5c1e GIT binary patch literal 25120 zcmV(@K-Rx^Pew8T0RR910Ae5j6951J0XhT#0AaBJ0{}t*00000000000000000000 z0000#Mn+Uk92!&`=rA0FOa@>8gHi}Q34~=42nvGPK!WsT0X7081BWIHgH`|pAO(Sp zI0uDX41y#-cV%K`23v_laRN8TF*IB14W8C4*1D1?P;NWnc1&O7H}@$}Zo2@^gQA0v zs!sO*|0gAtF*FS`X%SS$_wQ6TNmQYS3Wm_bnMjO@D@#PTed|dqu?wsD9Tt%flYBR5 zVh8=Se289f^Lw$eHgfhbc#mrYcg)iv!zhPpF{}DT?J!4ej8M)$k`qb#>)+__>l-TC zp`sNY(;ftcLLrIq<%86DK^1I~QnN)*u}V2u7$^s^53{TmL;UPrY0Ti}-P|h-T*2Ho zHBu+Yuzw! zEpD#w-@m(mV-A1K-l~67QRZLtsBaT43Jzd0dw#ZGvLxQZ>}~na@7V&vY?0!De6vZF zqj#w=Xc3ho_^7?^{n9S|W@+ju>U0!zC0a2C6@-b9%vT=ez}UCkh~wOWhwa;;dOw}0InoMA>nTu9Dt$zkgaM}T_&wEt>ZhZdZ^l)M?eHo z%B4gx9t%dd9eflO4{y7-A7ZSQ(_^HU&HKx7s20m@T5XHTe`S~G2tWL=Hw__A>| z`j>Nefd`;6FCQc+5XuUL+ElAeN|!1`A$w)2-={shAUVr-Rgcs;pBWp3ls{{O#0C+j zXLo@$0J#R#+g(x;0O}qLNf7|4djOvSKuQNvrtmyPjs~0%8yKDR&{T6 z&-|5f4jE5Z7CD0j-~lT1DOw51fBx|2ALx^Q=Q#oY#x<*V?zy-1vM8gnMRQXaCJ;pu zNJNQPSRknf|6Pgm{;;_u^c%4%Dpswi=%JTe@5KM_@1(o`b7>t_byP)+7%^gu5u+ly z`+lD&LfMkB4Swvyc!7YWj)TM4bUIn2YZr}y(1L=>V-Vf?H;k#^4M05bD74a*?sQ}} z%f9_T@BY94f7AFmO~_!><`&k&yT3WWw#p`UP)s3K{5*R?%%GO$wQe~>sFo;!=h z(>gC3y#0B(q~QNY0@CKGb51RXpFpDkg8*aoOe97s8tCDtFZv%;oDvl7h)t8~P`W5X z6pVx@A627vX_(zJ`p48boL%MPWXH|Mm-$#7n+3CKcK7o&-g7x8r{?xNnpgAD9z0E! z&+qD&Rv2nVP*g29-rPysV5;77R#mX&pHVoShfs-7@U|fAIxAAjt@#dS)ruvOo<}&F zjj}TFua!YiABA)6tes?X{)t%-$!YXIC`4p}k^}P5oM?iu^L4^_`97Yrd zCrljfDs=MI9i-X*5Z#bD(H88LYJV`(5qFkjmSy{gd2ZQ!2Y9a#D1HYDz`;bEWe=jT zID8FGWZA?DoIN;?nOEiA_$1$*3UHO+6yTKLs=%qhZ6BNE`iKz^a9&Wn!TCV(4d6gT zkPWWqZ=;0534;<2B_7UpC_AC-g0dURER-W~j=@=m@&w9LD9@lg*Llh+R0x$&K#3}- z-W9_Nf;AkZcs6is1yF7~@z8KEZu{T>#}Q6&ei$pB3mjMR+~BweJm3j0c*6(2df8PI zLL>D&90h?6AqD~i4l@u3mcQ{(*x)K4R6sq5&c`YltP^a1Rn2B$Fw17HdYgMVjYCOUKD(VLBOQH^}a zbvA{)Rfhu_qGa#P0mCFk>zHX>S#ztoAqGX5g3)xFTdZY(F(ZKoWQC&~0tpJ2fGbL- z#+(qf5 zRn=0469%0wmH{}>VMnLuStbIQI#cUVcS4CHl)#GU@1Wfl&P)|yp=#rf1>F;)7^H+3 z+zEKmaFX@83uFwal1O(kGRTLc^m?j}@RF#WBtp4d>y)hIXhUfHn5msfkJ@>PoY8B^jy2UImzX zYRt+5w*jYXDg+B5GQp%LkFP<{$%9`#;aEw!1&ga2AK^0|^#&lS2&?A0erD{cx?sY! z&tZ93wUjS-3k6{2)9HtuAKxtSb8-wZg>r&&Dbm{_tNZ1@&lKyF4v~*6b2XT*^>QVXHeolbRWvx;JKl2*mGv4ga9&xJnb_z;fd(Wf-2KDVrGgG)?B z-FC-vl<=|($%~?x3bUz|^fp=p69wFqD&asR817KbN|63 zoHli{b6-#PE^hmKcHwfekfTf=*$|O%v2n4+&*w%zKL;+|^zo7z(^eTaEik?Y>=(f0 z{1=V1}PV$WhaH zUjlo0l_NGrjA)9(;ECqp@(P6gDRbd+2&}C4#6GMnsPY`a%Ex$CSP1U|h*M~gnb-(U z0#wvtQUpHEa$${lol z>SmX&Kk$)bYRBOF^Op(uwWCmF+>vMYH5{TXITTyUev8yR@gw|#Id7KO=(UWj3VS`&PMV?KKIlFf=yW@a_Dwz zZcWO5zKDwrgP?Uqyy=8ME|N*so5UxUSr+tEg@MM>n+7)Qze0NDWCV~L7l8x`79upC zSU7jFLr02A5{v-EFk$-E5&xTQ3g!S(va}gM8rG0(*upiM)$D2Dk{2}?27fhZK*hA- z(dz+#NY9uNGN|)l3iq`bYPvon{c{+~m}FFksAI7`2acT9)c_fR1=(ck(yk01(!So}_f6&F^<$jP^-2*99^uDTHn zubV~Wk_JLFA*A{r911cN-*CtOzHMA24Xw#PS&-7%2`ZSnLuFnhZ_bxy|2c@9{t1}OAGY`7J=tof5v z@&-s?{>dGX*Plkn2*1m{``Z8o_RcPA34oK z%A_0$Pyq$0f~L*Gl!5%8U;WpQz_c(bf4+0fT}r2{u5yr%lmFJ?)zQA)-+!+ko_Kip z;Q{;YvC~E?43Y1_;oGk)j~VU$p2ge53hZMg3Q#`_3Jky$tFp3n(>fnws#B-i>ko#b z&8_X7-M#&T!=vMq)3fu7%d6{~+q?URriJV3=I-I?Gwz21x3JuTM~B-2zja4(ds@eSRBzmYx8UO4EIg&h!E8`Ea;%K$?x!ybZ*w z>4b+q(Jg0vV9wS;8@(;x>e{L_tIDr>ZI#g=o}k3KeHnI!DnmGCVk`?Nhx+h+%!TIu zCngj?5oyMu@v$qRJ?F#(Di8V)OnW70SK(<9P=321;DQd&C$5=mL`i9x(%LF_7aG9Q zOi5WXUd3xp>3Fz%w@+ky1~l8lH}S}S7O$<)(vIpreF&R zW5ydk_()j1Vd)KvrM&tjU28=zP6RB&SUUj>(`9IwF_#U!l-$ygbPg&FXvHxzvE8_V z+*_MI3oVcF==<v;fyz4YOqiGVM5LKal~i!8bGc>7Q*ul#oih0G?DaCdkw{3JOfLW+ZO26mVz2s zo{(oS;X9Fy3yDCy7VNCdWANbnmZouZqsBlZv7}>|&&{N_B-+d8Go9tokh^V=B5|64 zvce6rWMAW9Q9)w+?dsWWtaZ1z!VAHb#2?~DNSx=9b}`3F(P5!bl^IrAl^ZTy$uZ`G zy4D=ILgeM$(6ZLp7;2=a6_2zP`}}`e87<&6KAkW~_x}?cadMMcWIqZzl=#0NNQ1jb zYHu42H5s{xpx?DnZt>=KIxEz!(kaG|K!GE_8=q^8Eo`~)S(;;TI zaILj3IxV%7=1<_b)3svG&_w<`2b^j3;H`c)vHH3D>8ria^ndA(U^qk!LNKgx7gSdA zWw>jjC@PEE3Y|e4B;eS@azG6Rq@9IQMqG8r@ygUsFq|)uMV!UL`70W)qtAAAcerilFrAs7VsJJtKDOoblWu*m- z!xm{w+1yG&+RdyX-H;Rf$#>)h@!Aa&<9Xw)P{1 zftSTZG9wq5!d>~}N|lb@ae6_-U@$Dge*N+TP*jsmWHXVnz$-uPje1~qz6$~(&LKy8~GI}E`RTXMq|#l$OrJoA+ibO8jDXj89H=RD|~&UJ=e zQ>l+8ErH<-^`|XBT_iSD*tw`rTk@IA5imujEox_`Nz4?ol=#_vh9dpJ>Q1F|*upA) zsKA}LZ9D7$pwBAOLT}p(m0frvw3@Tfhdo8qMj6#*8AuQ-PvNd?l(AG+PNey11lSja zEoNH`{KXAh4Qp5?D-EI_v{4(@l`5+svxRk#3`HzMn?TdK^ekvqtngfa79TU+VJ_Eb z=sj~~c;shR1Ilpi8eF{=S81uUN{1ij3}lh&JT3Z3fw;OrEYBtt^uV>XMGIbDD|Dd* z4itGB49Sl-Ix_PuSN!9!RQ3Md7$W-$0s(_Ivi!xgFnJGA>)Ik)>Rm@1Bz4)F27}$i#YAzof>mVi+)p+}Q>#2m@9`EEPVj$THf$ z8eS$Y1y{VBZ1G_KJ`s)oagrnh{u@`d-4wNjEC0Mm!U{;;yY;+)jadk0j&+{M1R2FFR*ZrT7^)c1@$bT>lvdn)T-{voS&g|(v zanCmkx~Dw4%b(Amj*dV4mmYhe7BbIsM_ZNfQ8h6up=FiDOnol>FpDjWnw08~TD!fx z4MRKHQY%-W`zL4`N{)j&kg}3SXX%$ix-(A;WLtr#e`b)DB_%P2s1t?}D%f2Raq=F& zTf)VnyBEEYQl~HVX4F7KhqDvjJ51T6*MbPcs}f_*S~yr9=@49E;A&rri2c?m@FVLV zM;Wx)w&okGRaqW3n=s~?@eBkAEFX;*#V!$XTNKIfDgt4cuO)X8?i#xiW$3~wj3z(X zEP2aoR3r=Rev|YTms!%LmayHj)G9xUv@BkjsP>nm=;g0pugt$5MSVk=d5#$R4l4cb zD{(BoS#Y8|?(RQuL{^6))5qo3pZUa9EwS&H{(`-XvfrG*P=1S2yJUT=YoZX}h{ky5 zXl?4_M9`gH_Lov00wzezpHg05R+Y000?zSX*=J=`QDwn4vYZ_(SuDC|OHrF-V`6o- z4|^9EN-S0C10B)Q=Cbj+dW`2qS>zRk!o|uLlab`&)j#obLR|D7_9L)U_k9(jf8MUgKrNe31L2bjkHaxBmX`Vh=tPY^8gpn z=yw5y%bpOqe>DJ=bf_<8)PdRh9wcbYG?%3J(o*_A`o9v3ICN#$n88anfuujKO-Qpa zu-riXWMo7Hxe*z1_8%kXUuDMh1BqwM{KLRoL3;7ZF*O)5!OljomCipL7B8NHpv1fj z4o3IFah8kN+(;1l>zH+CGMVLgp$k?4N5?`5e}McYrp9dCXtt;_2vKmE#T&K;uqx!2 z*&7&GBlwEYs_!SoUb+*6kXQM3Pl;=})q$LB!5<=k=K=sg09jQ_hB(HtoO7*6Fyhfc zoDytoFS?BDTvwKly&$sIV50_aIo)V2)zG7J2 zEZwyEf^OABZ5pREr_%0ks^RiqbN>poccrOcZTPwTmWA(ouqear zv2Tjoif=a#>YK357cbwnuK}|vI2akTb%Mih-#aTys~sqGAZ^~RB+^zHo!DNBRH@Y) zCszIA!Sb2&=gT@$nj6K-^N@r0mjc&|Q&%T$vv08{Vx%GtPh6BPwRh3Hc8eKlqu^5O zTuUL@+J}kXtjw((<{reola=D$j_{hG%*Ai^iQ8YarLc8Ya9PK^<@GkIaw~?D5Bk0> zmBm2vA=9fqi_Erjf=i^0W~2>*%dNA>PFe>v@1?bM?S{#rS+{TJc{<2!>vdvm?UXvH z=xSXl($gvO_Nqm~I84CJ5@aRh|0*fDf?Qv{HS=~PEFS;WGvX z6FQ^~bk(8)u&nRHcV#hH(uo~ou`#7G93?o}oUBhn6*X5&A}1pcVR8bt9FNZ2apUZm zyFqfm*U#Fqsycoej}sI5Y5*oe;Y6OM`E8NI%01P^nWKuP@I=kb+n%?!W^t8@u4$A<=AmNuB1 z^MCnzIH1oull+%LwPsRYZ4wxCHy4!b)s( zxSL&j%Pp@fE!ne z)V~c_MtnUfDtj{5>6ofi;;t)mmjKd_>bYOEvQyH(YU&-o7`0eQWbV2W_tN>NCl^o~ z*(M@thfQ8Nqrh3R34RYhT{~`RWf)Py#_I;!FtfMNc|Q_dcUJbqx<9ldy??cz(Tk5=m=VANOt^D%zMCi6E;g$* zYU#@#eQ@}>NukYfk0ej9kTEYz%e4kG#M#kL83$sOxqND5tS-$;7>o!{yLo!m>{+0 ziggrE6UV0#zimA>fHj|9oHZRCbyS@BV(5DNAGp(pFZ$TqtGW(^obT?xFTejec}er% z58xf36F!R&5zM;gQ0x4D6mQ_)i{u@Y;p<+4bq4+#`ltf5e_^Qk@$=ac36 z>oYkH`4s2fo-ws8QVz-tuNjMpK?K4daI8}O0;zV{_<{EqFaDnd{-l&>NRBHjmEP^w z(K6%xOXqh_&7VFBioMkB8fxks*H9NXSsLM?dkzn!G2X)?3fV{qq+u+SPBZ`^0(eO7 zB_M=R@Jbwm^gn#pdim(b8bF9ZmEosi!$-wR({^CC1&j!&v=>Nl9;pYuOw#&9s?xMmV2VJRA#FMQTUCOcoD&@tsQX3A zLybfg;R9E{Y2u>K--79#6Z5hE%8US*TpnAJ-+u6^ zX2t60#s(vr4wwM-lnM>nOe09izT=z!a6A&k{ey#QJdV@K6f49=WqWo zKGL)QV1J|zz67#XEtoso(p4C0P`#gqQ)ho@R{N=gq7XTv%B+(9p)8QlM?JjW9Ml4h z)T1+OLC?Q}Xh@&%kc_-u8<@btt<;&g=~+asJWfw$pXL>l5NJrWi)`a`TsSi*$D+#IT8RkPk-`$|nlbjjX>|%!D%ix87ORuQP<;zsrSrU~nv#f5U6|nGRMiFH4j?jpjo{6qX zIj1C9nA)#UoC;L`^#464qD~-fPDY&siS|yF8m*AbMpInFI=6vhKXvP?8V$6HXiwE8 z3*W)ik6?Ne?#R}!Sb75zog*e4vOzu8dL0HHh^odjy^;9&i*@ufGG5}{rj;17N~7r| zHRJolaH^S725hCt!q}?gF<6UWgUXX;5+CJ{FlLJ)=59%Z@;aPbxu?bc&?-A_0`C5J zO4!J*fn$eGyc`<23cKe1BHXsiO@JtbOOiw>)pD_i zul!09Q_`&e)6z}rCX;6(gB$udoZ(f zwY_cO+W5@k@I6|*MsB$cWb0%`rNwz2hn{Ts=N8oPtVfygjZbp__&;B@&EA|Cow(4T z8gTFT+o;KL&F}$WLvpw1Kdi1aR0;Wmo_axb_D!Jaep#P!2eW3GUcN zY#+J&PVt?!oZahNsiZAw`V>K9d!5F5h4aIb=k3zzvehNHC3tpy{N1&;i^?h3er%H) zTNK&2?)B*oz~(O4os_=0c;~wIV2WRw9yza}yQWMvPBBB&XoVPmT_dcg5t;+R zFTngzTaG=^EyUi?T&xp3sF^5hIXZPibzV!@iQbMu`}o`0PGjtAL5jWL)+NgB|9)!M z)mwkZCyn*@(G8FDbm&+!L8uE$E9L!0l{{2eAPXNbGP_4Ne?4G%5G*^~C|k)k17QoD z3PEe9&0hK2+Yzg55=sivV0njOoTIR!q|4|2U+gRkHW!ozNfo67wJz#=TGTiyh>94s z8tAht%r`uTYamY0+1NHRR(!fw6gu zJQ$0=UcNrP;R!$YJ{%R(Z0h);KARjI2p;Am~%zN#^Gh zBh8Gve%BM>&`oLfnHG(h$McZ_j4Ph&$Te?3A3w?Xt1Mg(qgZz6Oq5+F+KyF?Le&}G zoSYrH&Mj2BXU^4ieyQebtJ#H&uQk;%FgyPQ?OLISw5C5hF4}m{We8qLr3 zo~s%P_@N;gfrd+)i$>QCszaqt8H52asdy4IkRzE~!=u7jztmY4kZZ5X#^Y@*;%}2Q8-Wecv$m}?Yau#}GyTvc zfEi3p=R{c37M0FHg>{ltERvbX=O-jahB*P(_!$6%iSWZm>M*$2yeL5s-ZzcPCPSV^ z)SER$;#_IolnhRgE$zJm9avb$Wz?>E6qRK~WbTwJa57__=Cbws>=BP_^V-`$Rwa*- zG?!#r{`vUXl&nKzpPvt^uV(GCXg+p3vqJ^*`yhlJIrc~cj1OCQ4Y$Jc$ zc^5hq-!OOOzk%&%R_ABO0e)xyNn21gjTaU*jwx!MjxkOM?r0`Jn~9r}KBuTYH#ZR> zp|?J{iF22Rk_~`AzcXMrIbr2Z4x#;x@zs*QMPDQkntUaBrHV(hQ0;Nk4G{kz%O#Ay zp;=YRvKQ!KIXw?e0%L=zsfm$D+G168*c^MFJjgsMR%do3+yQGC5&gzHA;y~I<`^#K z`0L{JsDyi4hGjIBkSud%X{lX*Ed)o;aCogvrR^Z)H*gm_41)b67lr#7_j{f;GN>iGlpVA?d=bU8t>9suIBBU-7LNEE6~DT z))3`Yeo{yp-hhKG&A&q(c-iYWz&_^r$h&r&aEFvxyX0MY%|l*a4h;kKygylJBpg=; zh3b0fF%0@F(H_S$sF4F(w4~EoYy%}q)I@_Ci=GZj4_OLwc%$2ki@u71`wuL_dOVVO zaQ;l7Dtu8?NX6Cp3ptaTpPHs)cEO9N^;Tz_U{CT0wKte=ZpHyxtn~KQJj~C{Ybb&2 z*83te@@l=;lkonFb-0U$B6+NferBkvKD zYH@8W)3YL?|L=gt!yHf9s*cl#`s61JO0M?bU$gB;>+tsnfC?2DEYxF=KS!`$xXxQo zW&C*pw7N>itOb&J0OPjlKvMDh>|yt8H=ETd{~iAFZ}qf6=}jIdE`mgxSde|nPp>uB zJNySM{ZaeqCVI;Y}%=f3AYzYsFNqxN0_hzrRkQ~nzQI&qA1IM^B*kdjR{vk>Y^ z5+x3XZtnGGAzYZsT^z7UUp;k2i7dg(&Z(Skk5hV`b0$p66s-H zKJ-XpU{v@V{(-Bft~R(hC$A`|-!IV7Qb2ItLty>%Sn#CHC1zP@!7OwO`X9BDV|*{R zcg)GWw9@Xa6(yA9NE&63?n!y7tH3_jJd@}7Kpo`K%_%hB+Sk1nea|pEpmLpLLhYM- zGaYPg-miUY9nDtEhmG{nw9V>oO~wvffq|KGw4k?P{{)d(m5#j;YK5NT5ZXo+EmWI7 z-QhHwa_(!6+bFR}wP{4R_Ptbs6xIVW!~HhhyFh)x`gyS}j9^`-T#ZM!7ebd0qm#dp z*XFPgOgLPPehNv79?v4r73H`4MGVVxz~&L{r&*jg{s!#*SZ;iTU!4V*MT(5P>8Z~i zv;%J&+szxx^E{H%xFaIx`AYcTtd6&@o$U+{h9mK6Vj`M+(I~m(xgd+$Cn9A zn|>kgkRAF+oUb%yXWajEoWDKBgL(C?n$GE5rX908tKedI0P}p=9X0sLY!-&uoh>bK zi8BU;`!ml;E01Jlz^|;#2 zQB-DgS(sUaYU}+}Zeh=Kt82yOXY|GT7s`w8$f_$Uke{u!VOKUz3}#bocS?Cq?k4oe zJ6SHnFQcJwaxnxSL+8(%p-34%S#_?=J>N6`a#`ic8as@23c@xtM?6m-jr(M6%&lFY z=VN|V%N?gH#CnJXC%-N_rv~w8ysI!0nbC!ldpQm5A=P>BZV1dITcOXxoY_D6} z+_Xg**uRPtAR$gY^fP7o7A~bOn;*I$l3r@78PD{1a~U|&pz~XW>J6w98mSv|fjsT; z3k`13YXuXSf?VV-?}8`}N}91=wm}(%Mp3#9h_`o2YP51XFg5Dk1BraIiA_BbD?xg7 z`heG?0Rpo7&Z$FVU(bVl1I_6q>Q!30P+1k9xN`*@rX5(X4q z5{XyeqY{mWTgQVISa2NN-Z>sL!c-9K?p-3}rnGAOhpp0a#Mga~dnej;ri%kgBJ}hm=_GVE`*8cz%k9>?S}L^Ih^wdo^6sOzD9hY; z``4E*-^ggPk+!k~i2IMUFJaTJw`s+h&9AQt+6k1wlPqGo@CHe*?3W?#G2Mz&ypiY& z5#AP4c88d;;NoQvot01zmd2AXJLI(dV`b}df3$h~nyYSM%H)!r>g>e7ZhXGmv9E_& zk$jH47Y0p%gOm`vNd&mQZP`ni^sH$ry&%aU3quQFl!&3slLO$Lg?#X}lg(&jn9JO} z#r@kaHH_fH=nwhq%JT9IgRuwgZ)88*zw&bK%3uD4PyN|n?w$plEWuu#Qa#;OxzMxI zFnYho``0mr)}5Pq4(=4rH4g>i!bnzPQ^>vqUR~yGQDXDZW3?aLHkof93|26qmF6Tk#8{?y1bX+O)1!Q45QlDU zeyYdRXaZk9v@C5|09HiUfo$YdNZ+BjP-*;1wGJ=KVT2{jn1UzDQt(XF7L~drXNWQs zy;5ijm~w~27Xu^$wY;eFCT9+_=8E=hRB%}0Jmn*Icbpkn5GGP5APt7TV~LPkq8t3- zVBs2)?VvZxoF!F8|JaF@`Uo#3pVEYdqf4s={If^&c z1akXQ#4Kfp#p)Uz43%O?f)h`w%Ax)K@falxeM_ARaH+37wvQPJ5^@OM#){SPXcY)m zy2XlCZBXgLV2;%pa(Di(sSMj)e4@_TEYVW5pAm=>6vB1R1S>quYR0yMIOl!IkhNWS z2nD1$l~-y>e4uBxqXy}2Vb@bS4N)d{Jf~DJ#Jg!&ZLdmU)n_DDkR(!D$~AAieD8x& zvsqKUz468$U;fsj@%Fa*rA?~cjg+Wv>zdG%AG22>iHaySn-V}-nYZiioY#(^OwAjz z6-ibGG!poRnS+ZqlR7!IL-sNGDAuUQiWJ?ld^*Vy05uCEco#09LeGSOhF4vtkPa*3 z1Rp7NRxPj^RRTMkV(XqEv~oIGa(7avC!xdE?&*~EqKB)gV^FyD=~el}3}Z`8Z#!si zyjV3J#Jq`fn!98Uv=gQ*D~mwS$y>rkwOCA=>w@X97%L1djjC(!m^=zqkBva_X~1FD zTZMTV3dfXE1d#FEf@CRB#Fraxj$61C5fqg;u1n5wAm^PC67)V2cVQ_;V6iE^BqU7O zh;v2yqnZH7&P8bpQjrXJmIj=mmFX#Xh+4K4*jiDvo~{)>Y2O)bO0|s|FBJ`NSpA97 zt~=04UAWCUmoK{XoZ={#?`)*$6**glyMkU%l#1oeHqG351F$1$^qT3efk`6dOP9Ck zdvN!&|9D%c8T0}4Xv?geHCvnJmdE=xw@dZR13KL?{h7F7>#}+Mw&JME1v-hm{%-xa zJt;O%Zc|<*s;1iqLYl8!pL=O3HdIDwMQDR1Wxq_9#!U%IX;W@Ch88lT(RA$fUB{c^Kko25Ka{6py;zd zkB}FotyAcJMlk?UDghV?B2XJ2hhKFi*2x3dT_Oa5^y}>7m|Y0hLhF?xkp;!dcW3^om)w~tPVHzinL`>6F zGmOrJDB3<4OiF{4iB>{OkWls!GZjH`BL3PTmR<`UjpJlvrXo}m)+~Tnsc^%X9YK^D zP!=c(aEjbQ5h{ruBvd}+y!!)C;o(FckYF~jE%ZoWMRl4q^3kP<3t7k-h(3VJn}F0} zNMv5IB=B)0;wMBBM$j|_!HcI92({Rf$cexzV?a7uw5ft@>hzWoN114((5Nq8BSAwF zVM>zowBtPHt!}SbNe*9@8={3LyL8<*T6>)!NOWZFvB3pVg)VfxZ)EU^F+r z)BLhcN!@>ya}lsWDEI&pQ2kD2XPj58wnMnt;|KqH za2g6gdi#PPXZzhg|KESVDO)Mp024d9$QxPCm-=(F$2b4|=5iGql(s4(iKH6NwB~Ow zPo3$6bE_0Ywig5O?Q?!QdDD_{cV4wLH=c3bw2n7c>uaauXlff?2af0xSl5lJe8>wD z8)$7J9a`kZg$5|Hg@DPhl#P?J#oENXNf##2>;R`-yT6J6K}x~|ELk`N5Gx9(I8Re6 zR>%#VH%m7hF;N-c^T#0s*Nd=NF9AwI1P&V@q1{jkvJ2$}NJz#LYPpCb3vBhIJxbRG z1->E7YOCr9@r>(t0We+R-K1SbMPOc0%xPaCswlpaUB%!OwQp3y+cBZjlx9M{fojYHXhVd_k#|2Q} zwo-PLhf^J#e?FanUokKzLlDi9F^h;etHg#pCYLaxo}f;}V4ZN#n_5}UOXc5vpZMt> zeoea+@}g(Tzn53Z-YHN_l@uOt6>QE>u_mI_D_8I&kpC5L4$^Tf5`m{R% zlO%&v$z-Wm!3hk3hd@){JZS=y&z%K^%efFa(X|waBcaT&E1$HJANgW(v3DPXB#dEP z35+HZy@g%Yb{Le%?<7zyD-TdZjI3gNP9gNO=UFis0vNKBHn<#)e%#`&twN?u%{F~@ z3+2Z}JGMZ`Tx)1kDc^|`hVUKAviJtkvndYEMYN+`Y+<>{%lZCSt ze||cvch0$&!M4NBxq>D;cQe>_7jqX8n^;_9f!)@%^Ww~_XeS)amUJjU>hFSvrCiG> zxIU^JaV+H?*sqttnNp%%n8MNBe#F&LU-&YitqdB^%^W$>Ur+gN)(fS@;pmKCH68xZEGgrEcqs+Y=x!LWv{{DlLDRkPX*k}T(=|=^y z$n1CXo_~D0@g`i>3QD=yL{+w8-~t;LbNJ-+teOr_^hD#s zZ8gGuObN=74$86op@3zzCV--rqDepIECSf9MhAu&9F^>JkClXB1pzK-Z_YwqV6jT4 z%(eoN(?hPJT7(|utcER4g@}PC9eKr>4sW2Wu-{5_gkOFKmAj9a4dDsO9?Up+#25Xw z1@suE#~c45D+R~sh>_h_p(GQHXfIWA;{tFMVtoy#9|A?8qqIYlAJV8dIblzQq4hi* z0rw#Y-Yy=u^qMqF?nq%2;*z4-@)bh?I6==6%;kw|K6E+Y7n)GL%tcakdrw9R?q_vT zy{wxv^iDKFBm;d`YjXW}Tvqkw)G4#E&}zP|)}WNnqS|OdR3<7)lU=K#a()i0BP)uTyx)WV(8l}LBQA#rf%aMGDD!c6p zaH^uN;Z&tnO@q2A>;_wyuA4b3bxV|YR(S@E%tniaZa)q~MHr7$u`eJgZRKjTqD1@f za7{cU0-h3WgNV8hHVMrtz?jF(*+Ia}2zK~-wvAhnqq9?JaUbRe>!AAxH%ImZR+=po zxc*+3t@FsyBY@Pvdk2pmNiv7I_Erbz5_umcy}>_jeiN?Yl)j-Jc+>V=^M%JhKQnz7 z=p9>PH>$W>rCEtmKP_oY@%h?^TPmS}Dcqv>J>J3od_~65D@UEVBKK)6}N7Y>$ z^T0q0T}GWpokjN_&mo5bO6Hw`mFzL3xA`^?M_b#k-@O0oA6dr!eP#1+UJpR!h7%*{ zkvOewl!u^;oOR^K++5#%|BP}inWL-yrJ{HDPrOVvI41SQ&8-;kP}@t8;sDCXgpy*BQIO(nAjDxB!>NxH3e?uwMM0<0hobA)(Jqep>2S@ zfw3C`z}YI~XLWkE-OT>c$wNzz(Op7JjsUV5j#^FGGXZ6#J@GeH&mdqcSmB zI3P@eR-FRa6zwbahJ2%E`>$Us))~>M^sMV#Yq#dER*r00I|TJ7OFUVfKypEMJ3jHU zh(@v44DCVId(~e)=ai#X#YE0v>78LDdW)SXGgCmtAL<%VpM^_G5U!#~rz<8Q4Z6Ya zpt&M0-r#~T;F_V7FJ>wRZzy)Jh+xP0gl1u@lT7UA_{X+GU3QGE`=-CmsU@2B!YBNA zKtGKSi7Y#!8)>_Lcm@eWt?J~!=zL_hj@EKX=@259TMZ3~nIs@GrD6e^XsxPEDs9(X zT=dx!c3_z%nrHr_F;)$4kmIsCS;UUesN*IGkW|e~I1)&}E0Jbgvy~~-6n>ZI!(yb+ z*=@xvmLABG;**S&!%4t<%Uvnn1OmWmP!tIPE4-p1UCTbA`<1`^YlAvjYikz_dy6gP zhE2*UZ%w;FU^}lc%1dM&i`jWQQte0JQCGx}e2?3Asaxj!t;7-?l-hhG5$nhtITFh; zB^-VQ<`RNEld#uKkaWpqofV3dbS$y4O+I8+F+o<6EMu`|Y46*371LOD<3B-Hmnyxy zhhjG6iRrDaBL$d&T!pker9u@eMwjDVxHF|!78yZM5@+dDwA~AwDB5Q~xjLm+8sAT^ zyh~TF3fIEmi{xjWru6ay?Y-VvMRVHV3rB5tHm);{!!FL^a#3-#Qzu`ay@s#dA!-qg z;cA`mpM$XJg*;j8@Tl9$@t_u_+2VE}1r(Y+EE2J79L8qk#FnK3VdKK&f#yp$TCHNb zrI;Trwks5EZNdzjCUDGZ*B^DNMRQNK%oz%^SIR-RGG@(OpT>2Gv)=+65p#B0yok3t zpbGQ>mCutP>@5JDpDFYX!Y#=BEP*OnUZNkqd>8pl?#1{+_sPb0<P*>I}Oaggf=X=jD2u zjwl|hJn1ANde1QxQM1#E{>#+5`XIMtq-oor8qtfn-~VcyjA^_qQ6A-@inPnGAM2_* zH_8M5Wg$eDqXKZ$;2ir%1lcs zs1j}`rV`9e0XX3t!9!Rt9EbpxN&f0;OuIO`phygZ3%>*|3PV#RQH2$=&{Zu+5amYD z#5fW{po_8|&%IPkF=Bp~Yc^*lwQ5eSq>US$AH=IK6s_@47J!-(mY*4_AKW#lNYLw? zqGro?W!`Bmt_&F-VmOhj=`caf3|oOJ1?;kVHxU%{0m8H)Gbv0vUWVc{?<#l#imgxZ z47@2l`vq?$nCD`8j2r)g%<3jyPX)W)wxfgM?CnBSyZs~T$9l)U)5h+#^;u#p=u5;b z)A#x(AN&4!9e;R?uL93f_1ZT@-w!*H4g<4{Hf-CZte|qEZ$haaSc)X_S;H<`a@xJz zeG;S5bYcXzbH1fZHCUt!m99eNm0JpXk>ywTSd}LB{o%slvBZJ1S#<8K;v)?HUeaMv z;-GK&g(43rRhNPZ2wryBXM)p=b6&}ZJd*1ctBe>ARK4wwty(5I=20Wv=kGH=N&kgL zWK~Kbp zkSm2QdV6r4kNT2$j2P41a?Ub<5igJKzjNyd=Hk>#EVPJE8Ve}{#UW;MBK6|=CtKo{ z^m1v3=@^qe(t{fZA!l(+&2GKLu)r_MqOm$C9Z}4vV^kwwXSdD+(m=D$vAa0ERw5MipiVTza?fk=iSS!&fBsUq}%d@%sFQiuLMKZI4R93PBzE(!dxSJB(5nR zbj5m`C)c7U7*|J*^wP=xP3e5~ZlEhftNX&?#q4AGz2H2jTcXQ3HBacxdLTI?-d_n_ zC9cSFox@14u8^^FdD8&)b9^b~_IjPvKr3{xpj8^$^A&_;p&Je-94rH+VL5)rmEOaO z@q^tMzyY! zW}bHmusXiHkNH>;F6RLNqErn85(+`Ke{Oun7SIflS|e4vsj1#VYr?v_xrN^mP(I(` zKy6aGGujW3Stu9su3pSLDl$+ha*CrWJE2&Q8Lk@jh6Lx&rn_OZupxcNfMH#Kcke*L zV(cevwuTFJq9Q}6-clDCh(N46+Qvl!uixU6JKCONnClhEJMC+ zm7YbP>$t?aMwlf_YRYanN=eRvI&(7CtGw0}2S=N8l!JM$7(aa(x8)8{eKdsW5V%=xval>+blM_&!xOKt=#CI(B|iwc2*}C*>6tx0T2e&WWifg5QZM6~5563& z0cNtUSZ~o$3gau;7|#F5`m)|;?b(Ko^bh4Oq zD1#+5wy3op*|R}Wla3X??G3(qw!aD3kZNuX8wnhG%KUuDywby~CanT#Mgue7EclB} zl!`?Z$WR+va^{V{9BO6BDLZoZTxw^UKdOzguBnQfl4@fnP{$pn_cqHwk}cPBr5jQ> zL>{W0Llbmk3YlzC@U$fgd@`~av!-DnGeyABZBmqyV!;&B=?DkY!FYpKwf&$zx;|Kl zvG+7(SGovV*RtG1V!8tencV~)+cF00NYQuGwT5GV&uwk>y;?7mTA`EC+x9#bcIDBZ zSg43o5o@MFtE`cg=sbVmU5t>wM5EiTzj4D!N0D-o*ahsZ0qe6DvPQHLj(r6LBh{Ii zS>BC^Vcm|P!L_M3XtI$Bi1p;JGg$%eF-uwVQP`Edt)w>+JR5{3+_{r z&{^9K8O!7JP2&(zIrgm_kuRK}HXzhRR59{1m1a__gF{=$WfSYy2^C!uj#rtZvaK44 zu+*Ox0{m90atiuY04n+g>#L+62flV<|B$VDOTJaHJWo-QpOF`YpGjZ;vkr$ zP*M4$`wPQf;)Fo=iRRrH6vtI@K7Vu#nC%VLW-b#^RmY5bObO+e6O=;Q`r^%*+OhF zU6^bBzt}DPeRi*G@c9wg1EEmB_W9q@JzD7vb+7vW@bM4jEA@&OpjJaJLxowIZP#yI zE`3AAQJSa!aUA8&+tqnv>J-+4ToTjJF#3zg>Pqz1wPVGaj-f=uwIFp#HeYETox*lWVXy0$mRj}5exQ(Z_l)J~(R4mAOx*tE2pijaK9V2x+p zoTEWa2s5zyV42;uwd~xAJ*rQ2!Gp%B*VdsXAf#}lDN_L}sp-5k$G#_7S+%a5uX>`D zrWV<*Q57M3AsND@+y`j_8bgN&RH+GyZ7StOf1W@@UWM>5qV(K(ygfqgydkwfG2%5T zAJC9}lf+x=Vf>r%lrmd;zFC0RmIEvR|8)8dZU{2i{E|Cd7>IU&MB>#Ym`ezx!X^2o zLDwiDH92Cpv=AMyrIj41rH!7MTiWsFT18)UeuO5c{HZbwG8wshs7zk$Q}2|nJ}%cf zNlj4RxRx))8tsv2k*D>YlV{x8)8;JVTje==soi&SOD;xB({9NB58+QyY%nUopG*h# zT#hXFe{J8f2Dmq3!B+;Xh@>n}w!rt|5O2E;{6O08A5{q4Ldv7qgDb&3loLDLP7F~S z#M8a)y{Z$GXj0@5bY|ci*$xflp2Kb6#gi-Gkn!zHXHK14I(M)<0jAI28nO>tj)+!V z$Zh`wrMCac%Kf$zkITg%2?VZE_i4Q?mieU3r_#ERa7LLuxF(i0lB&$i2>8jXti>>c z5gcksUb_7kwG6NV$_QvIMa%>eQ9s@_AbQh0+Prjavz=~Yo4zr>+63Q!!P#j9T5x8$ zJJSmCTxe}_@14r>Ldam^w*?bcHh~hQhuWp+S&;OU0i6nJEroZH<(p~}8JX(;1vRC1qP*!2Rfc`o^@%4ogo3XBclj;FrJ z`Pty78b@$tsK1aH(IVt=t$XW3J?o=S@*(xM&RQZ!5V0R<|2-`Ll_)Vx#wDf_Q6`BB zt$y3|AbO^Pg&0Pn%Ae01yF+ZEOg;Bd1>;$kNie>Np1M$eFiD260^B+a2~muj^e&f? zb;#6?6xY2f6vVqA6p^W7obvvwM!bk-VV&y}fXS$yCtB+21CV-@M%qjKbF-`>eJ-q? zbk;+Ps|nmV3zJo8b3}&3wP~vYG>|kD?>2Vl00wBPZO~YIrWWTZDWpJVL7w4TtqiQI z$-%f|upHXJo3>{Z&u`5IRI* z`Vvy9RFBh6M9W8ORSLw!I9V8MPFVX!4WWXFB%mHVCz#yXMkw$>gFndI3(qTv%D0O7w(U zemKR7UQ?4HbhB4TqnmdUVC=vBiUjpH(JLlM5fIVvK7wLAV_dq((lYq$GbKz4#pDMj z1!8IBOFQy0XqF7e8H`mUSeW9)8A}dm&baCXl}9}3v7r<=;-<%gB=TwSn#@Fj!UA>N zhCI{>D1BiqIem zBoS^VZSfm(&9rG^xl|M}MJS$6h;(V)GpLxoCq;U|tAzlJqX^vzPCuKaBRLq91=*Jx za=%F;LBb>ZyAy3a@o(w^R}OGrf~2VM4-(EMCV6^8OoM^aR8GD;c-o%y!dD4VV|qFFrqtrVx2&1Uhx0 z5ZjB#X_bNCXR<$7>KkG&3!BSKe*8Dp$e zN7A}5c1wRyV;~t99j*Tv%-qKj+8`hi#UzMp)h+ojx~X`QP6S6IXsakx>C|Pzf;)=H zli)f%`c(fDuv*$diONXv0D8p<6`wn`=wwW0s}d9_s_OK?gZ`T^MRwfBTg0ptB5N+l zQ&2flwS5v8Ona@e#rE&a&`b~yh*Z+rV~TJjRY6eoMU6w8YLLh2t<6?rzx$bZV0 zU;kXEhgyDVUvjgumjuf*^kQCV|9mJ({&K=?hI7ghAWw;shTCojj}3Z`<})#H!XAXL zGh{-@4~kS&II8xdxD6~E+W~HZK4ES`b(~wjt;i9m`)bck5^@ntBJi3H+lchk1~fRj z@v2MF1$?MRf9`}nlP#N(#ceE5Jh;~UIjxMH9|)2v>R_M|63V*>AirVZiPRlISqywL zAr0ag12NSb0&kPkmp9{;>X}s^K7KOD6h;bKK{xA^4>ogE)hzreOaYDV`6hcW+NMYn+ zz7<>CCIO+Sf$Sdc@>8q-29-!*IQL4A(E)EWoG+_)8z&U}qMT&1iW7@OZyt>ctVOrg zToHOcAQu<_p*z-U$dxMLQPT{VlQ%G3BX)_|#sPDzLW%QJ$Po*ed4Ovs*} zyhE`Mmy4<1JOq3Sf(yUY>KiVD1P2j&`|e3Fy9h$2;xSyLU+;1H9&vF$H0dtP*IUH$ zx`|AZR;s}XGo41jp2WFrOg8I}HC(CmpOTalxIDW+CJt;Y)TtbxA3QcC!ZZ^Sd@h^) zg~>hv#=%oJ++n4P&K} zycVpNnL8>hZZS<~k1!(G*v%JFPAKAT=5isdr-%6?LyMz4^d&a;tFj;wsj!^GKP!vH z5R$1Qg13eLHlW~Q^MZyBEBGT=$~zIK2srarr`<4mhg@-_=+$?g)bJVN)o7&IAT;N! zY%(xbRqniuS1P95RNST_HBb$BI9U)Rt=2$u4OHVO76;YQC zcbFt&+l^Qp%{7+n&ITBVS;b8SD`rWntrUcB0R|gIcL;P{n>)EA*q6pZre04COzq~zx)1zZCl&D(*MlbLcdWX}X}8fM?S(W2#O zPz){tZZcW#3uSTCX#Ncvs8)`RB_rvDKJBvfvjf@ot{7Ci#Uq8Oi)XMK$D)IE>ZG}T zlBlRRos)KA)T+6c?FUoaH-Ky>37L{ek)6JD-N786f-_abU?_4{pU_l@gb#T);SFZO z;QIYZk-;i6m8ZZHs) zI!6(ehUr?AI#W%i7GIYHm^&?ncQdL%kx>{L>LF4G9Va$2hX6#M7I28_ZIJK%6Eqbd zTUhetl7UW{eRAEHI}`ZYwRJrv5OezRHURkyOz1;^h6J;56?1 zwvGs-$+!=mqxEeie$S9blM25*o3^X^?@sM zv~1jdWZvoBn=nm;VIB`EP~arcstePu=M;LFbq-n8F1Iur!!_yqonIuVI-#iLt?7vL z%TO*2r>N=+gr*MjhV==yy0VXxjqw=4O=%be(8VJ8x^Y?)t>F%DxX}D@Pd}?~Fr_+X zlm@xbSCsMfQ^7s!XyJWI=ltk{9I?JmN~>X~Dxz&J&cb=;K+s0qVGTe7XTh{$7*U~; z9v?9n^D-`S{zA{&>SHRftBP)$W=u;;{M9qplwDnHpxW11y?=xn4d)7M;%IcLbHrm0 zF#~SOqo|N=%3{vLLKRlMDfLo%9PS#&tqa&&<)*I@{ftV($+w}442M=(W#3y#XXd~7 zlify7Po;n>I%<{r$>zHJw^DIACbKo`)%(p9sF_5(FmRyB1$Mz<-Uw!MZ1l2nN98w~wI)%-U!G@s2fw;g;<;_?SGD*}A}V)kmgE zB=e*nY=OtmBysGI)k`(+GGR;&x29Sy6M#C5mjmM5U~thB-AeD1ux!o}o05D}lwfwJ{gPqu|4Es<95Wr1h#;iARtN9tZ`s?|X30LK^J~1wpQAC$ zcCkAMxUl;zbBzz+O z!zlc?Jl_2^Kmwu%_c8OKec(ZTU^baSFn=%~X7=tVcF-`7tK%3nn1a$F z+dNaHQJp@MMj~q6QmHlC(x5}N6Yj?KoATKwUmlspGwqC1c1p(8jOUybr=4}lc^6#t zT&im>yJD9#?`(C`bvLAY>9rbs88T(dl4C%wJjAD@|0dd<{*s)3q1)?+`9`&M_2$f5vd6Loi&m&impbf7SyS~D&0Dl=H4PuK z<~;82Hy_N`9NxRXa+)R*GOw}1CE!(baKE=F_QioX6i4D%oQP9#76~fxs%?J{T)Y3F z&lswbM;o^p2dnwVD<}$7Mry_JpmlW(gLe>(Xg}@M|2ql)lkfvNVegZWgg4d-m3T}N zv*FoJgblo%{~MbP>P<)(ikjmpuL+u(XW5U^@f1#wCuLBX`R`F=s=HBU7I&b^EDxYF zt9{g&dKZnT`=pwP;Zn@6hCmO$e-7K>9y?8dWxo>U$Nwz@SOcn^1V8X2{3^gG+rJTQ T)@X9ci+<3aS@reXfBhT)8I@;R literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-light-italic.woff new file mode 100644 index 0000000000000000000000000000000000000000..beda58d4e2189693806b79b00cc3c29575a308fc GIT binary patch literal 29304 zcmY&s;Rb$W{vRm* z;{7!#A}gZ+002V(08rxq0Nn$hj%kF5f|39L0JHF;`=bXexuDehETcfr1OUKc{Mf1g zKv&FKy4=7@&*q17_|Xgf!JepKQJ#U50|5Yl@S_4?`414fFayRmCRRV(j|^1%GoFz` z1AVcHp8bz4;>nNBkMI8hf{CT8@eg+c04Tcv0Og9RoWp~rMtX(-fT`(^4%>f#_pXFA z{ULw2oFAF+2PAO+K*mk29DZy}e_{hx0swy1$hq+iT3Q?Y=vn;44>a+EHCV;H8Y?}Q zAHSCW`6c)dz>vUm|MaYk0066>c>tFJ0N@$@Q*dlH*7iSfSvUOHz5MXk1@r@@Hg-lo zwzd^NcD+BmL~o`Vw6l@^&z$XkcmUXcSQFPM|7n4K09d?Ac>J%OeP5yZuU(!E8+BR3 zjgWxqAwU?+n-E@)H}ih_dPnOJ{0r&ZgB=7?(PmZ@N1;wyKXXC1+qE7HLY~Y+(&BoN zeCqkZ_X3y$2Z#9?$7O9ZP0dDO8FQFVqRujWkfzTPEENET*aRT7^b1%7Vc1!z&=-R; zTPv7InM;)%xj{;jB_O$Q89<{DBV;ubfY1{i!zFEicPy4+6%z|Tg^6c#JM$;EKD)-* z6bgq0w%ix~e%++rw7=%ue(k!>vadQ4j|zD)Y@>ge>Qb)bXt*Z7rE0+VvW)~W{UdQ zfzS|6U*e=%Ua70P-&j!Q4CAP)j3ODw?x%XItWr#BnL2b&C`MWb8SQJ%JfykhqMq)D zzG*lJBh(%V2EO43KY_^1VMY-^zxfMK;g>Z7?e-V{1bT1={;eP20t41ZfB_@Z&&+^p z+#{I+YC)(MOUUQzSc9h>kliP7!|Z~o?W@xRwgS-|Aaz640hYe&xB_h5WA;M)0r>^@ z9k6~w-;)1c;xR|8O)hjhK1Yq0FXDCD-(}sQ9k%)$&Mxl#H0m|Jtb!^xhwR30Ey_#G z-oIQ-YKnI$HJRFoI+WjV5zlXCwHX>bdH%t~R!AcATUH^Dgv5Wos=n%$-IXQ9m>;}q ziFBca24lm}`vXaoQ=LG2%CX`meYr?|j`6TU;BBA0Nh;GdZQFT|x4ltT-5dFlB_&CC z{%xl)5N^flkzq4_Ovmx4k+oqnMh>gqLYjV3#*DpFaW&I{N1U(!PBDMeEARUevyz2u zG4?dm@@l1??~DsgX0AHHLu&kJK0%{ZU+7@a_!Fl^qpMhGHr-B39Wu6t#=NYE?0Txf z9$(2}x+(p{I4a$Gz(S1_MQ+c?DkfUfWw2No)}`k!j`+<40S(PS9&DXzS#2H^w05G= z%1s+h@gpr#(I=J~DIzCncVATaij!E?2wrR0(`*yrJ875^c!Bmm+bgSz-~LL@HG5pI z+FS7%8{cQB;=U;DL9JsO!?(7z@9I}NcGmLlD&{9(n}h&TSt@En2$(^>hb$zvo_ph!X>m4!==;j@+7xoYEu3&CTqpTIE zbzQCtA9g)*#aMKpJ0d~tfYERSS41LufdiqG4v1KELd?O`{8c_616`Vk{136G{Z+m} zFJQ?4F$T;cpE<=8F#8fuJQ|MY2 z{yjtzJiR?c0z5(wyn$o>|9J@qg+|oHW7Ze{32F7B+COKB%GCG9O=}l{)z?FSUJT=> zf%&iFuZl6yw}ZzT68OIf0yMB%hW|~en*EQel}9wzz!ZTKSS@o%CjSM@m7Mj&vfxokMW`Li*eu0#3Frh}pgHT1M{AOr8IRflR7cK{uf9t6^Jqvb_cq%QHb9I) zySRI?M#Q`@h9cLJ#{3@MllgfqOx3uR%Ip`{*<_Rc7#MVW!z`8U)SR|b*r`>%(w07c z_omyzlaMgi82j%e*s0f1$A*II29*lS*o3strtG;j`>B7C6l7`*s|_R8CMdUz64(wy z%P0N3%xtK5RT_^Q+-)pso)?G2s%gbPlTkJfM!AMU4bZ`9E9hE zGeH#OWp=yJ7;_x0sF zHac&rLbs{n?UGr|QLSTwn=heA{ciX-dCyCc;bk*pms_x_4m2>ZU62_tQGG>W<^+h*j`LMNCWvlO~A{^(X&{>#pBK2Pm&FYPg>&D?9R z`F7(=FR4G%z3I1;+8;R`h|1rC=OtFsvzV9p1J5;4>8RZY){9~VqRZ&^ZdVl_mlsc5 z)X_ypUdJB=t>3%ZzIEx#o^y13OZNQs7JE%DLE}?HOKxbRR8CzNN4aPkkpY+ZyY18L zZW}kvEmB_*lJG_r?&^4r@>|SgVAs#uQaX_hv*yjX+ZI+f6ik2cx5(9g5WcZ{QVaF30<5U1TDX0p}xgKFO^sfjfdD?wKD%Hj$c5`w0E4}KA)=jNsrzHUrpnd@A!`c%^1^~=8^c)>3Zp&Vd9=~rlF69 z@AV@STE~=?Z1;Q%O<5A_AoZ$`1L41OAYz<*TyG2>vKe@qZ z8{6TPL&5%d{QfUKBlbh+bM5(UyvNQYVy(k!CP`ryt~*}3eg&`N5Q&XF#%%+{(RdF^ zg7n{_BA;mbxemEg|K>6?E(nF@m=Dji=QQ4GuBiv-rn#54vxFp8;9`ai3EugcRK<-# z+|Hk}vJsy*9Uq6Ph7%rb&v{jmPK?V&8aCy0+-8>*@2CiL$JJ3wrV%28?ZfL%#vw2% z^9>tXmF)8*j`rrOUgXu{{jJ*-;?0c+c&U4f3r!KMU}95UEGevDMOqga!Z;%N>mPnM zOAZk>M~J*K0X@csj*!R8`brAp8bO`qYFI~7cSIi}>14$f_XZf3y3si^Wn21^uULm;Y0tFinSstUK zoPSjZ_84QHK&)Xz{<#^*ZHJIJN?4k!gLsf+U>DbwA&_|xk0d3yjpps>#4UB9<|#~F6$UGEiNF3 zFywu#$s8MlK;0@T8Xkj2S0E%9PUL7{u5Gz59Y$26p;@o|1j54)kH8}-Gh4=&zSVOS zRZKB5=y8ASaX&fH3ovuz)(87Mk3k`gbMI7-o%5POtyqczNx%Hno3n{>0TxN5!H~*8 zz6lmBh|{{!#_fvr7_Ek%k^M+X{Vym#Jmhlxgv8L9qYX=5rOQSMTxg%gK$1dzl`4E1 z3tMp|cP!VvG`x9_c-is#!=Fk$%f%@Xz0$=aEu&P@qKK-CCJ8CAL7L)@Far#CYq8k{ zIT5bfk9MXZJ7qP z-uL0nxn~lSor-du-$vkhB>O}w#Gwm~_~3Ui;`C&=(MEbP@=%oC;WQ1Q^ES3O5K=a4 zgwwn8R+Tm80xC^^wjYB_ztE6 zL2YcJpkQENx_H;u6n;t==<0S)7J#E3Rt&yTMUFHiFkFi>!iu+VT5F;V>{W256GWTZ^Z&985FKC1wgNT$)K z^*B6hv{-Zg{ z&mBZ{V;FKAo zXfmBIdG{q$Kc@(b01VJK8*cgE0uu163={wm1F!@5073w1fEFMJpbSU@$O7a5@&E;Z z;*X>PQ2kk`VYMAN|t(M3H3cnOi1Ekg2GxVY-=r&5_E#$wByPc5bu zj!9zrHo+#@KBYw)>jBRN=jV|T4gTQjmc|cbj_+U4VCl07T;_m79&1W?Oerp3^@ifF z*$=3o_YeGgE{q12`Ge9sbOSucIt~6fCSj(!Lf57w>IRBx1&kYS~g!>b^*jdjm%uloT2#UegrE) z{i!B>!B_f~U19Lx;K@+tnn}iJU=g46%LgD3<8h!k4>v}?3jVx`A`NwjiF54-HG&Pr zF8AcH$<%*yr%PE`D$#vqLCpDFuJXJDo)ZZ{Jm8749&=;_a%McKPfr#izOh7;+3nD0}sEC4yn5gct zaK;($gQioPsNT@jh*qI)QTe!)?zL1NsLpS9SJ^n7Itl`oyS#P1^^HH7%4`zHX;Izy zl1`2uvw7#Cw|0sjwYROmL!HX`nH_5&fNEe^Aqj*$&n%fbaOxgv3LQ3K zD`s$!cu^oZ$KVuf8pf2U6nnO2Gjb_D|8gH*7ZRMV5SOl^o|ddpXSls#Aa@-@xU~^x zH#gO3%)t!Y)tv=xHz^P^cWEF{ot*uvaG%;^6{XTv0iZkA45NRsD zRK+U2^`=zUKf+AfgI}!(-s%U#>V~+l>Pxke z1<0l#{15jVV!K*J0j0*m4Al&bd^XB7FcOGW?8=g@FyzW;(gHX&?VJ=?(c!)^ist92 z;R5=lQf1^udBh}#YHmU%qF9jE=FITy9iOS0wuzW7jIw%&uXx&2NYZKpyF3Cadb2hr zsTehmsNgOpn8OaYLcY7GH_;BafrAf{I9Zg7$!oa%j~Tj+CdRMS7Es)BjMH>&c`yk2 z&fLQY`Q*&>s?p1pPNRoWewt|^>-#W@H>``1Bsh11eOSrs`Ls|r+EL~Lf6k*(IOjVm zhSd(HmpL|fWr<5mt8{-^GMKzh9j}UxKgjlu9VZC}M`E#h{k511xaMKjcXK~Q*RLsa zR9v6B^c~$l&oZUhe)*V)>uY+idTNd+M_ZO`%?qJ5VEz>;#oPytpJn*A7;HHkGDmYx9nrUO%a z)v@n13Lqb3D+R7PY51U;dNk^cuA; zK$9#RlCeLvRgwn&k(rqW)wWaLK$a;_J}QT;=lTNmlT8f#FKqE+m|j_G+~5o~Yrlo( z$`ywp%3WBHB8Qv_%~J|B4|N!~DUDw=MkXmwc^(?C->KH-h%nV3#)MpT)k^(wuidL=BV}8 zw+Ha#U^L0DqNa~3l{9Nr-m`kwcb^{-AG2w-p7)UY$~kW}CP)&S4ctiBCVEF$ufn@^ zX+HaWe7(Pm5522|b{mR*liR@S-0`pslLpgz!B@prSM2~J(jV8<_S|LcvRk(mm7bLZO4;H-uP$Z?*Yx1trKX@wC^t(@ zo5muWB_InEcg1T$kwX#thLG<)*x>PNlz_g11q6}=f7-Z28(Wl@5R$UJ5^s*2nqj?4 z{Cl$NuMMhJ+oOv$(z=Rpiu|C)E!}<2tt6KON8Y_UYVz|o&)NEmofOiff+D~egfZ3@ z#0=)C7Qx^m&cPF702LOrPlO%!pv(RmW2j6K_HwZU&coZjy_HKizCNNK>0yG+xgpLr zf~|4*Xvo~D^ZnzxD1CJ*B~<{dLZDfB8p9?bDQ5&P8Kz2m6iVD$w^z?>7S~)a4Qz&D zZ(*^DcdoQ_lG76JsAYmYuc79vM$oH?Ow!IPv4yr1VFm;*GSiIPkHq}1g{IIpQAYtOEI*g9V zcG_#U?je0=K?56D?G*ojL!!U=s1Nw3crL%_B1DZibrrOJvoPuLWiTa`=xYbqKxoAU zJ|C+<#In$Xg9!mdNM7z~1&VoO**#~cN|U$QTE~*`+8)}5tIf5Ua(TBaEtb)j=(8CP zm&S)tOwK?pjD`9mMiMmWtSP4!Vv|L-+;yijp|OIe*BUCk=AzxQx;)Lj4CfN4$&68C zTA$+Dk_w+MH|kqh+)oN(^UFGD$DJ-W+5-p;d2SX-OnacUCI?&^Dm6KVfbSs!$WJsm zQgYM-m5R2zenYB}*7Z=%5kqcJ#-4I+xI7H}a#7O<$sAc1%mwd2M=bek0zo~{3N%L+ z`?GB@;)c6Q)$Bb*pTTkXVD<&MfD1cw+Ytj+t0H>qDPPsZek4elCKvX~p+y3ZVb3S5 zf27%-AHH&dXI|YOD_p7X*9as_P7gE%rJk*9VEX--#KT+-b9mYAbQVushv#K>w8i;q zmCV!h$!Ai>)0Ud=&G79$e=L)YtKH@0A~haKeBEL`5Vly~Oz+w12$ebinQc!GEcG|# zRI&oaY>&UbE|v^_dEJUB**JZi1o`)#UCQlnAwcrbM@pO-_%TNhRkSn|TpTkGHfNr3 zebBGu#CCv*srP-(XHa5~P7#|QwI}{h-}R2HjkyCz+Oy2o`#rfALq#8p$LJb$ae|eG z>#~>iGW^wt?R;aibnE%NK9!Wk>1j^rhbJCeW0(9VQ0u|QJR22si;l{|b_V!5Hb&;#1rHs|z(q2Igk^2=V z_iyg4@StEvU#4V9n9)j;6!_^)&FiJpEd3lACA^#qt1f?%J1J0jL<02%9}zR1Mh#W+ zhXZJUrSSvS%b0p;Xv?PEbhn?Q`P|RLQaR~srNB~G7g?-&>&Fl0Md&ZDOtrqw46yOV z=?#b#4tA5owsoHFyO(Bhd{Q^RF`sL74VY5&i{e9DOO1aeVim|kE3QFpV&%*QsVfS= z8j`J)N~R~kJB>lUX@?PN7HuNdw@R^SQVMReGfFcf(3=sKAUiIUP|&5#yX_g{SwaJ? z_mP1Wrt(~;@@2P<*1W~*@Tm<-b8uhIW@oYP#=^37I88%;mPc=J92L3tZ>=>RekD3`u=iZv!tHc|ebP*+j9lsp(zHQY zA^0;5zeml5-vOnrB7C(FUc9L^ouqzC)For8ZXF=jH z?L5Q&-Gwc-y#3>HT$@W97o_SwNXSMBE=}l#fXh}2Cq3Cyf%sC2_U}9_jPvOjMnJuY zR+bBfV^Fl;ClnmBauu-zGvqF8k3UN|Jh($pQX{u;Hd7@Y;<|{PW`$$7f9n$-*LC-+ z`$Ng==<_uCa%IhE;wHglsWsS4U3nt%s-_r}Ik9#Awslk&8djLa-#%F+oMX7u`|nU_GR@TP11 zg^p+I4RVBO!#~=9w|L(Ty#{k(We8=_?07xU&)NJh6mTKrlW50MF?O$=yXJJ#d&u3>7y`W@ea4OST2yJ7DHUEGN_9 z_XsNP-DAC6llwcL!8WShpQgeIC4ZyzP{jnfRkSp>b|@*BgJuH7+8HuVpJL7(ouD3z zTu7I)<2*SzgJka0n=>1nhBl`APoUq)TDm=j8AuIlCXhUSkUWn?kmTk-T2H1QLhj+H zL+HiT+MRB%JSu-Wr&&f`8RyjV>W_ug_ zW?6|%RJjs4&1NKhIXN>@d$@ZCKr+G(J*rm_5{Pc}VZ*9w4$$2Ovk;+PSAxZFD3tiieVT3Ga{Z zAvz7)EB7X9;gwqCMAQl>QuQ@pfbSj={))eHLc7k zN3$SA@DngS+tJ(lwR{I1wjy8-VmfL#71>SnCE|?KkIl|9wa($T7I}5#nE%hs4=e!0F{2Kd$_^%9PjlYJbYxm{HQ8znbTmfXn6!Z zIxf%Iy6tYXON;%>qyI!ASO)iTObeT@%T>HHqup5I6UpqhvJ+m)y7TjuugXpAYc+cO zOkBA`8pl3)yE_pl^A>hu@ww+)wddQEFifpVI06SBFGQc^H&`Bq9DWiD3p{b2T&M|u zza&(>yL9$PXP;$=IxIr34MuMWm`1Yh4x#xhwyCE$3t(N1-#SEuCQ-=?$?1)gX3!Ss zX9)(dDplpT287G*+gh&;M~{ZjR4P4bq@Q7kIg znabCJ`yDyS%{_4~x%7(kRCuNPO;0xH(W=JsaNPzxEl%^-nd$i__R%)0*GbV-U~@jK z+Uz%`H^$yEjd96Y>s*8Rfq?*5jMQ-e?uHXw5VRTQAEE&RGWX=D+4g;2EP3K+w9e?j zKv7A#F>Yg_Pgro?&ezHQrX?MnuKoK>h0vQ|k?Y27ch#$S@BX67w|yCB>GU;ajIOVr zr2oPC?uF7IL_WUoZUyp=L12LKZ(F3dunHsA4!#^o8nD#YL*{`d>nrNJ_UZ2DVqEg9bb9Z% zXCB~sIWMLq@u+X2?gN`b2BM zx&rPF%MCxj29^_O;0>Fv@c73XGU`IqcnB70SUUd67%LMcCeF>zoHFa0N#!)fN#jf9 zR>G)DL3na9G*T)#jaO0L?K|B)bl7&MCW+g>r=rz)3v7GClqx(V-)Yszb-D;fwm1GZ zP;GQ)RG#T^Wj6^~=amj0EOa>?gy0>9SOf3|2P!C2CYkRlTJ*<76)--KIh?M9ZWD3I zmb#3g=U0X^mzm*KBhUQVL#)QW`Mo@NJ1WdnQKhw$CUm}xOWm?@{`+gT_HH6u8B%gv z!4f>>_NU;dD~YV9>kitGBvnRd3&~Zb%s>hka)4AoT$?bWlkEazwm;HCOP+Pb5vic?We?MruvWfG@>4{KiA!+K_NI&#>JC+2#CYF6?i*a?@RfeYE= z#;F2v8T~+p7{j7{)?OE>U;B+DlC#&SxfFAZeb)}QWD_lsE)l)9Il`oN2naU91vt?x z`UGT+WAt@S6);p5>R^M0wt7fQ$b7f?9m@3=sf#9C1`Vm>&i*IuK*3vk+X@ZtS^S{o>?RYWDy-icy_4iPHD3C+89uhGjqjs9m^F5dzQueCpBz|`=l42{ zS>9ecl3acZF2sJwbO^`|4r0pY^Uf{gzCDby@;Taza679;T&* z^{b!VB8Q;{A!SjSr({bCB8XG#L1DqJERG6=8z_v}qrQh;fk=}&W0(!8QnLv$b@CEA zL&uAg*{;{Q>aO=!-ReAz2R9kf>1e-=2d6VAzamRwUmgjT*`)P;U21y|{Q|!p#jkYP z9e=!z-8hHy24eB|++|$=Gh|3tf;Ld*Ob43kH7z`vLrRL!rIR8|yX(c6U;wL!U4|kd zj6CufWfO7KF-}jJoi}1K>A`8(LEK0ljYCv{N>-DDqU?wIi>5j2TO(0^%m2HdlP9-^ zD{nvxSthAV-Z_*V`>=iJz8`62_Bg%e)bFG1;Adhon_G~eqSw}{u4*q>Y7@x>h&G>3BXXtBA&jSTyW#6(ZVA}7Nu{bYZM zbH3;KWELpYBDz?poUUqV0s;;6HdCU+`0!z?q%u>Lc%GX@a^9J@r60E&i}>&wu2ve0 z@Y$Zyx<93?nYpIU`mL2`xbx;` ?{)rYg%aN&w}ky;bH8!>#J4RhUvTAPlljI0=S zD_Kb{y2JjHJ5&W_v-3nEjxmq+eFAH#Xh~bACqge|y(zlaAf;{ zpRu{!p?0y_Q0FA(U4y%A3w$3TT7GtVxT{@SvQ~dhvHONafbW8Pc4ll(NkMkC1mGD& zG7V`|=&=<4D-%@5j`ZTtrH3y@pz)uNspzqd77$Yy+}>8?_Km}vLyhW>)bD$+4AKeio-A5X{I)hNvK!8rdzD)>%$7Z_v*UhzGxZ-b( z&O6uD{g6&h(OHP3qziv7|dp$sNTwqT9E7(%|3# za?~z8>BRT>UU=)83k1(^!xAwf;Lu}^M9m`opgHjEHjI72lj#}Ts{6hETFjV61oAI8 zQ~L7;ip*Hvz^lV3;wJ6J2bruX;P|qX&r{^1sOUus{+Z^}t$5G%5y1l6jf5XsJ6Ys; z3<`rk&YuPzvYC>TE0;GBX_!AG&n$bAjjk)OapHoD;V`PJxCoG|@zVGZyo>){gsk74 z4eT8?quMIk`1ae#3fh4&i;WR!oa7Df!!5#_0c0*-?*ivlmYkAMhyKhxQq!~W!+d-A zLfc7tTaIncS!5f^$#~lsYK2Nn2`qY(LoJ@81b(_<+d0UbwbJB z#!Xx=5?i>s-vh$TegAxD>f+?JyRq~zPRL+Uz0AX{r->@=Pzyc)7MI1TM3%M~ZI2;K z379DYn?YQcq*DFmkdsy7QKW{Unv~ZnH}t(@TL16}V~T^L+!3SOZLh|S}0c11#lw~y>hPXUn`R>6h1|E%bNERSYNSZ-vdEWuLD;JV9Ftg5MZ5T}g6 z@BJy$V2Mw9(GRNMHvziFwclXjk6L&uG#Ww=n0>{C&Ne z5F&qLIszE3;e!{X8IrjKFOT`*Wcq*gI_@I0=dbIsCWF|H(rwZ$36d5MU4+(o^eHp5L1^v3Qo!YHjztod|6wdQ_L%_I~1h z`{P;b?eREpx^Fyz(N$9+4#!1<*%U!mZLQcJu({ksXJtjQD&Bz%>V=QS#(+kKy^m`y zm0EK!(<3NgY+{8On^#Xq^VE}{&17L!^PI=|ikb*W|3`~hBr)1*!KuEtlH=C2zv4I7>J zyZ(FpErvtiA(q{siZ0@DUavRA*M-h;GUe!w!~95h?W~oOhru2Wb|39R1K1hdYR@@B ztQj^VS=hQiFG-3j;DE1w%9sqME{xzIbhp2r3aj;-D{(>$w}8-@9bKmPf<DMp5Z%B$_-ga0TQr@W)}EBl!|MbvqLr`?sA6Ts4;B&aBaUV z0)PH!`;3K6z6hnOcCncY%Mvo26k{@`)kt!5+kY7Jb}^bBrpC7C$u*9iAeH~ z;EtJ}R8Y~ivZZvbbu)H=L~tvHFK{*{mx{E(luRxva8`>sb$}Kwrp7fXnWDq;aY%<2 zbru%N*V4yfVD3B(Jv2w5niAxXCs8Ptu_Di4bh5IXdaA1HGWw{0R+zQn6%ip;bn^3hFqaT)gJ$&m*R&(;FRo z6vx>PFJ*c0rZp$HgwCfftm=)Q@Z%4kA^vTZR|r1U4&g6Ej~5NBd0F?HHbrrV5iFY& zGA)`(b)bP_Z;-j1Sgb=DhEo-8GD)4K$B=kt;I|@A^WGW``)v={WK!KOzBbnu-)@{c zm4s-T@Yg+1l@xmIcLvDW@Y2_=$^JBVpuy3b^C-+S=ZFmia$v^?eT06Uh`C!k&Qk2} znxQuBS{4c=y9QSW45~NN-#2!m%wtv<1fq^b+O<<;ZnfTuzuu}L#kH}uR?h>}T?I*>lgeDeU;5#6K9G#!^oA7c_CG&)cDYUjl<+@Hr% z-mWd4nvXTq%F?bjIuC2d%G}>#!_e(d-JZw4CX>fZc-Puo_Ukpik={L)w1C!A=Gvr( z58>%Np29-zq4pyzyll8*mM+S%vi^;?dpQ|?J*>U|%c?irOx<Pn$nqNl)5B)aW#K zZ_X?3|6`-kOV*mgzMx(;d(f8#pXmaZVI>~52Ta*w*}!8psL@E|rDps2_|hEeNewzUI;gOMHY$owen?pP7VuIZg|GwDH!b)t6Dx zb#=L}oD_Y{HP946nNhow)rDhTp6<&D4syZIGQ4bRs)E(qCM7$!o7W*!3bk&kgdwpE zw*LqA`4c&t{FYy9c^@P8PmI)xub)bQz0ckZY0byc)#ieV@md%TOITbXuSAr3cZD*MyNr2F9pm zSunef9I-H!R$(Nd-!y^}dkH9>bzY87v!~X_mn(v96heh|3MXIfE(%BNB2Ugr=3DX< zTDy6L@CunyWYl4CFm9J*4>W^K1sj?K+hqL~H(a5O+MV;3Gw`_2q z4!433FQ;glDcNLcX&o@m_k1!lua*-N*y+Y7umVa~|Kp|DdeIo#%5&qW&!$QH-7XK@ zye=pL?~!hL*;|RA2@sME7{58px9t%$nVAZlEa%<3ngJM}Y{`^A9?-7{+ZcA|-L z=``wNMJW&ThMlQa$|Kys!NYSrF5!9XP&{u1jc z-fO56LYWqtHM>f9m2+f4j?m1eup&g`&moQYrdl~olmuOlI@6t~^lxPv%*wW{k_IeT z5_7k%SXJO?w7HxhQ~af?q6?xJloti$yuEu|2UoY zyYibq;oP%%lRh`}VvvW1=}Q_0;}KDi1BOzL)cbA$tNOWuZPQRO$d9l}0%(Kal%3E+ zbW?svjPv=4c=fj!{?uY~wZelJEfEu;8=!`U(o`X!o!uy0wV z1B=U^Yy(a9#Laly)lygg5=e>D9(=8sD#|i}2Dz^pP-Sq`yt3YIX_m>hgd#QA!)d~^ zNVDMz=tQ(=Mh+Q$33U{tWES+sL99bTy?`mdQ@`0P>3L8g{s2=VJs*!=*MD6z()WQv zcP7bnk;TyP!pWq?&_B0lOf06x&=xo+pX9YxtJbU~IEv&@2SXSeS*L(Uow8EP&j#1Y zwSQ~SPtAeO_+gmxZvdr&s0GV5IZXtnkh~x| zjD(9`uCBm~L+=1J`PUH{H! zoV~*?ApIHcn}L=5@>)|63?lMVUQa_>RWerUuSWfrh z%Q{OZV0EAhOoKT3qepwh+f(f#&9Po@9BFLdLMyq(diT{><(^jEsM+ig363?}~EpSocz=Au#cdulKHdHBR%(87ASovf;z}=g?@u zo%ho=y^P!!pVyvNp`X>zSU2e7YS>rDxM6>gGuKZtDXAX^d4WltQsK0iwc9$GH9%$` zX5=S2#{`sRW_Hv))8%o-H2%T4uDvq56?tWHnc3-!d2!R#&k|+K$2lKL7jhOhaS6~S zsYHyquSV=px7&+G?-XWeH^6_@HQsjLZz-PY7z~_lAiiFH9`B=R+1_CVw!6aeHPVgL zT`NtOuR!E}gP-T?6yA~4CRW{E63#O1jH<{cgvad`$l`!H`G~pCc8!M4FgqTb?(lzH zDkkKm1a||kMvSpmBL*z(;%3-{JKe2tF$>NyNo?I*GkXzji+p8%C~6BhjVaj?x~l&R zzJ?zoP55>?vLh=JS~@4VL$Ts7WVz0P2Eo|lP=YMTu8@%wF6=7^&iUCf&Owh3yWF=_ z1#9D>r~2JDFs?Tvs0&V-^P8a`j(#JfcA7WIn6J zh4$%U*ZTXzCFyBoIe>Hsyq|BfgNfe5*e;kGsd&ZWa(1#vQRY@wn#B~ zl|4V))w{D!BWqUIXlVUQZ-LeQoweqwl5(mVJ$f&OsWpGYAm%CYAhO9|0>2*ZNYYf@ zyL~W1dO@XE-j_X=GXU`B=7%At@Maknau=oYRqaC$GZT-&O%vA*CMQXfEe}M+pxHg3 z_@ETIO#t#v5$Avu7K+vyqb$w^z-3nU^1!o>K!P*n1^p9HC?jc*N^_&InxclMeWZ-I zrQKYwcbOi`oz1m)y7|ar{hQzIuA^<9$ zSK3g)u?Q5zhrubl1pL|enXiH4P5Gsi0rN+rQ0R6+H}_5Y7+>9o>fy|oDHmR_bib;t z@^|&fx&YSZv{ufAfKnJJ+nTSXY%LTFx7NY0#v2YE%{y&hwP8Q_O&=~0o8mt{?m`^% zJ>Hoba#_c?Odws}-h9)Yqt)QyN`o1O`f3q#ZCK;{_tN$2b|K(n0xaUBJ^E>zMUUco zj@Dnv*_An9%`h|;+R=tGe57vf1TvR)E|)tU87?Sob~lGM{q(#BW~ON-=XZyRy~D2$ zC&k0^KUt$&T=sZ$*jI@vR%CDUuDeMBX4P|&--xW_ZAPOX4EvFb#Q0PfPP5Gw4T3jV zLA`!!=lF!Gsgha`TjD_2AANW4M&Fh_(gUBR?U7cS{rL6249NP=GHtJeprBtzv-#C2 z2l%dolkDj2Rqg!s=6{8H3($JyQo4?!Fz=0#q~GWCxmZQEjG5DhmW+^G zQ#%rF94qBE(VNnY_D5r!@l}aYZ8$#?q-65GUuwSp^8UT?ecb4m?EtAIqB-=}XGYoP z9}(~Xr{!3AkQJ4L7FTX~>&;Ph>1ZFBv;;M(R@{$Z{dS?2V`qARkVk9EZZn%#PL^ii z7$2a~P;pDbMjs>bPp`s0fw_99D4?^J>e@u4rlIN=s$s!0=QIu1Ck|Z@{_wT25;T(q z?Xtm&sAo~P{2FrIlY0g+{cPS$O4M`D`c5SsM$EK5FMMx)WwUssmsEV(yH&UJK z&&pb9t!U}m@1Met#H@RF59BQWr|$SXAoc8Msiw(p@Zs>9xv4?E)oHG{byBXZy&v;^ zUp9(dRJ7mW5?gW9bT^5zuSMW3MId%sj!@&br=%K-?m4?%>RBU#?NN8r5ZUNnpNWBb1oq`L+9uapajBfbX z%igTSnkX7d!tXfmi=1U&;&jXUJ$lQ&piIY?r0+N#uDAztL6Qnza^wT= zbZN_q-Lo$3B$(y#cJUEO?XL2+{IrxIeB9xytMvY>%XGf06PH&K#@Ubeln=-kWxmbj z1mtbhz+7N9FpCq3yki2({E;VtKR&$VfM%B-7Md0%-)H>3nfdaJ%OeZ zcyaB89@mw3cm);npn>YuAj~)vhS@p-GO7O)h%tB04RG3EliK05+SDt}$MRq^D4%5< zC||%HGmoDD8YgNIw*>OTXw$^WSD`XC*rnhV@Ikvns`R1C`V|C|%sgKM{S9ydkvJ9! z2`HJP3PF)bX{meWJdj0lgSG}c)4V#50zy%tgTJ-#O(k3JjV@`MGFvBiPn30xb6Dt*`sFVPN~n2 z2FkNCc=&WAema7Fy5^4I>|uvLE7zH2PS?6gS(@1!o*aTJ-n2}MN9$sX(+%#zijg26 zQqFJS$^{3+qb{bFp__TJs^HW^nif*__R{*llD!GWUNDc?TqD_xG-bm^p+q6QFv+Kq z@<3y8abtiiFU|E97khI{`8RV)OLKC|%X9J7i+cb$W^MmR!XY6C;`KbHhFQnNO3+Fb zKulaj3mrkp&G(AcK`96}-W!y7UTN5a;Kk`?7V%}GQ?XrYQv_&Gx1N4LJNPn(fZ#1| z-dj)vL1U({`Bz!@~5j`+jsNjnbVi7xo_v% z>!(?!-SRNI{y9?9z3|}I+iv~3dto=JCRfOZqBAVz!dZS0S6bu6^=$V*!TjKfY9+<-(QXhM zgdbM$b3R)a=fiH0e4pA4Vzv3=-5_w9pXBIrajy`@8ul22&_7WKCUM80L8*GxdLp%n zjr3Yh18j;Ub$B$9d<)lk`n174dBMyrj$3YAzcy!hTBIewaV2*(b}VY1+AwcR`OJH- zY|LO^y!mcT)q?BBH%!>RLw#SMEF(QX0S|FwE9F# zTtpjx9cE>-Epm{|%->>RbF6YDndv8o$CC{+5hhlPvmhTVr8)u1 z=~8m6yb>uZ_d!Nd8cIxu$XU|?mr;UXI?F?1(1t;%R0Qve22GBkh}sjhI}TdhUVcak zTFoLmWHMr=142ngDt9Z51eJEFWC)njT1U6u!DJR)%uE4~pzg=@b6Pt`6U8!e!Kw1m zTU&3xWb})j&pg9!xqSXxYcDua&V0ZgTRDH?{7m^6NjJ!UBW)V4ZR{S}qZQysaW?*L zkcaPVd7J~#Lolxnq>*Dfe$*t|Ps@_ooPk>_WYPW7EsUs#NK&9XYB@pKEe&5{ax*=^ zaTDE2+pVGRXm_+DzMGbjT-b)Lp-tZBLvkXspS)qw9Z%l4@b=-0rgm;v*xtE;J9)#i zH!ZyJM;n*6&AR!DsaM^iU>h(I?h$S)&2vU7qoSRGqA@H9kb&q!F9X*y*^Nh|fck0B}I7Lz*)`p)fd0H1Zv7k@4fn)2*aKeyUY|okUU3 zO>7YI=TVH8sZ*_xU7$O~>8{({pr}7CrP+F=JYYkb9ybkpkqxG7#1FAp(-i%QG-?JT zZyd3tI`-YO2b!@P;bWzikq0No?O=HJ4PFye} zFnV- z;9jzB)~$CfzH+6ww&^x@PV=ZsCykrH*O%Totf9WX`Lb#43tfiJORl-uPsc#E@wwa# z+E}{~w=#~pcB03nhBb^Q#bb|s~G_o~t>aU?TJz(G4Uez`$Ty|-Db$fg8 z?uk>kG{}BoYu}cptD-p>HFKKT^=CRZP8wTx8QpL8RR4PJQ(k5)OgST(g54SwA<9a% z1uNq=tdQ#!C_zbMQWv`E_CBAom{RamK81g-WYG8Q;yM>(H-^Jdrn)iv;=FI7FscrFg2oDggT_HDGV!0vyr{^zWde@ zw(Zze>yPxEZsI179aUrY{puO+(up%S(q3SgZw8?r{L@k<;f>tAF^b1 zuU@J2oy6gS1O+bWz~6e6>wJa8ai=J35Nks;A%^xR@`73*P*H^9RcDGF)N3T}$0pp( zzc{M?%t`hTwL`mDp4%+g=-ON}qclFJ^K^#U`GHkc_rR*E`@g!Xx@-Q+tL}Sg!K>Hr zJUsILbyq)h$Tmgo%9%915D^7`@e#({J2%l)V2cZGKZL`~5%Y2XKB zwO7yV4u_>Q0o2wR3dPb~D2X(?9#I+TULX-BC%On7g>Z;zb@&Tf4g7*uL0z!BDXK=Y z)I_0z$0Y!X2dU|krKYQZSnHI;>vf{W243aV;l{`R_JtDAIc%bbZn-h_2wi5o34%Er z9^r#7r^AD7k$B{oL%c?-)0;erT9BR>^d_^->GmYofh334?uZ%ET(rg4l}1%r6zrlA zJHN0BPnANi00(LP5d;p>2pZz35?x9Y1Gc)u^LGYCuQBprgSM z=xDeac^1Y>c2!8(F`6EZxp3lU3ccOhqRLD&%_q8TR(fhv<4VJE$q$~=;WvP%%rQ~G z$12<*tqb5#z-V?A zS`ddc7qac#pj2p7Fj{h9Iv&&rc7rEd>G+j0GC>P14k@21X0ZoNswopuh+>3TE*2%knnYr(3jyu(j?xkPf$kIckj#9zvnI=jLqopCsk; z9vZfXmrtHE7P=ir8P9ypOgf$`nY%A$JmSzGcC;ZfErl(dm5|C+-%To#V<^TdBVd9x z+9g|qvJ^cq$g|eFIAy)->@k1dyt#1N@Ro~Q|ME6RYinE=TmVaE-@C_x(O|Uw7*^Gy z)4(4f4|6p0DGw2yJX5D zP4BX%;g$s_9>MLI(y*(_WOc?{x}_`^xIB%PP?zU*psUl9bQ)3k30+)IR8kcCkW@Da zut)1ge(#qX=3n;F084I|K6=ch(_2PQ=Z|H~s&8Mls(r#z!#DLRQ~uAp^=*^t>Mw1h zc|x2GMeXPoba(;l4QNZ^7Nqfm6Mv_1mx&_b zm>{yUo;ZkWU7*eOX5%w>H{nKzM^`KH& zLXgn}Xbs3(%7+xMSPePLlQ+HNW;-urdxc(%qzZkIcOlkyA*3-~nOVi9N{EOpgHmxm zXtcKwEJqf^j3u}UP8zhYfXa@Phhi6iOFfFHPBp**J zcV&{II3`me_zlGNzaZL?El3Ll|5~&ooag5t9?46V9jQye9%()TAE(gT^gl`~!XTeR z>x-m)9Xe%|SYee?gGuJC^TPC|1rxg%{&n=~RsVY~e>dPRSk(g$B3$vmD(&-ub{FHr z_4RL~9&3Hhu#}<4K!V~0n_#jS>14qsCSjW!oi;pOC`e#Q9lq6jWlq4*$>QewELAD6!yCSsh$;tk7x(e)p!YN~ zQ&Zg2l!W?PLG0jQ6XklU?I(BIEB5@9K1k=$XYns z(vOJgHd|MgEfd~)@yI>Lau%yCGs~OoNu^9IN9yQPJDfGiaCtr_mjKM;d@f(}^Q*pm zmRKI9sCn|MtE?aIIPp9M&YJ@+bv4^Lk)6K&^wYcLe;i%GcJ}>E#n9{fY9h6Nh^quK zyq=lK991m{0xJ#RDv6OG?!pj<1tk-P>{q5jS(^1PL0OV`T!Apu(DvsDo(_-F-Hq0I zcr`hO=bx{sZAf`Vu(B!?u2!In2e6t*>cBr4!imgD74;`Icq9e$hUU!|AN41Ip@eON z!+w}06ekMiF5*NLpv6dm6jhiCDXK_Cis~{8KBdF{;=!VDE?*EZicE*R`$DKue5I0$ z|IdsAS5qzwd^H74=E@6=g#DRfPR|-k@p;Bll4>laNnuX zADF!li`e4S7ETFTTiOH*;SIw%&0r3&zS{!knc zL}e_l`xhDMNLWrYE_wtkiclE6D7!x9fX}*vB@HsXqh-)>#x@z2_inXrns2Ie) zA*01jGSYp;nDwcvmi|(^<2k}x?CHk5kkVr2!~~wQ)DsDF6yY8D8VG5u#F=Q9b*ezn zMS+E{PoFivn+Q8!zWqS^k&amly5(=~Idm_XT3Tvv9mD?Vrbm&VN7p}$+?~|2jWCn{ zwB>GM{l(hZ@AfmTf82Dp{Kc=<&t6pTt6`Wob$`vdx7!i{>x>aaWOKFyR%espPz7G z-@*Vjw^evBA9#?Ct6UfS8SDS0d9w2vSLFS9br0wNxB@VLO&(T?&rj=n1&l|PA9HO2ly zSPD7%??-mMe}ZGbBF!9kcQbX&AJmc6g_V^=a98+CZA7C_YoW2R3t|WQq(a-kv$&M)zzO7?I-3|h8+j)+(oR%Gs8G+Sx(=y> zap&gYvm@+X+H8LW&0Dpj%jznrqEnsqgW+>zvoqo#5LC9)-f7%8K8)B=i-#vZYjDtUv!GP^Y7(tn1lDfdo#FOta%|EXeAVSCdUb&F-v_N3$uMfBf=F%B+%UVBSARYSpX)aajk^RmFLfepp;!qSr>zABGs|t`2NDiWqyP#($LNMU;EmSgp?e-9cFCDWpZoS5Criqh>>{Q5MgYS!+&q)#4Fn)eyO2cw&p81b zullZis@(yb)v6~}9HqLki_*>? zu?b1Mjz11@FwkTmnej!Dg2FiPq>tTT$(+Ao+kMUl_FZ?cedJ3^Z|f@STh2doKixFE z`AD$+m)ES^)4pWZlozJC4SgT6+f-P}kH~n(m<_vt=X2-GT$~I~xoPRz>+e}GarUy6 zQ|6d2QIRSM)6w@8#YQb>Zf9a1bGH%PQjYbd2vbEUn{wxGQPu!@h$pSjITiLzBJ@V zztadI;QY1`%1UYHykjJDTFUOTr3OH zvbhg4dLup+vB(a*z~Z?hCU_lDrN;-i2#GpJ%uu1UE*tT|7XKulQU(j(#4sf3YL#EKZMp|Zy`Xp+tf+|Lv3wT=iN^}$?5I$)cONCM67*W7Njz)bR4^Lf# z3JT9TBV5+9Bhh2s)1ODs^KU+;IG^eoIeSdK_t?RBqlw0bRQJ9hx+qR5#1*}kP8|hT zGWpuBDlrN#sbtGCm(8ijS|>9<5?4vmMRplCD{RrvbmxjHqa|7-3e;cl3fmCV=b zQ{fQuvw}Vi70ZLJAVt{$0sCO~`II5JNm*BcJHt{Yl^A-#I??DQM8_l+AxxL0;wU3o z%oRt8Nai&;_8>X33qCgFv`J5X)+1W@gk!3r~e>%7@QNy}^P|oKvuCxF0VU`wUVbo#v%NI`yf0^16(C zwl@jO@`Bs)(7>k#JQbI}7EK9y%H4o_7LZonl%RF&KTm5n(3+F`&k&21z4M^6?oeZ_ z6!fx|L?1^ofBlGGnS$nmR5TYR(dl`<4O3`{{5DUx7Wq zF)U+NCXK<1=AbocFx~!f>0YzC9=<%)xUNeYmw%i(uA<~|N&azgmVF6K!dAQFn|Q&I zrk-f22j2bPG1*e{Fn{oD<5gX7P(BtiRm_?sPGVhB^oFG%ZO%wij!-fa@v(3|BlUi+ z#M3~92&%;7e#IGWnV7yk}8wV%I2qKgW-4e_n7i}Q~V-wvW`T%0(spvuyf%c?x3PvDj#&>2X zc4)sie*DMkCT+G@eSSFHJ~t(Ghbcb%?GJa5XgA16?a_n0K+}XnwVAX zwelc}pBvfyoK|mkx=1FStX5?scfJ!obz%SUL1N~t8MNy|t2{-*gQ!-H+Bi6xB8-66 znWi*%5T`1YK;Ws(H-EWv>os?@Kdqc>FPX)fE?uzp=L>eWEnWBef?3->y#B~NO#j}d z=RacMOn$KG5SzW{YWDHkJKmSCs=f8y8y~(}iDN0=D%d0JXL#*f5LYuVfVi6VKZ3ZL zqTM4k=qfsy6u_&0grp)B_^OZ=kMof~U#t9iOXMYGjVcAB zxTmOROV*+Rvm?N8{N} zCKadojubwIRWN-)Pwl|g|F0}D+S+Qeg4(i z`2GXn{vsQ^0KWe_Cyu7Z_wV|DhVRdP3%>t?8B0^(`%m88Ap5l=vigpm3-I3@ru#4z znL_GD5zTI8V!5ocGF1q3CnJUSag1nGGyi*tL&e0Fe{&OJkzz*+b80{Q^VVsPYxtWRCRGa|GoE}mDMR4PYq{|3} zy7eX-mNE5O1#Ahy1CsWP96#|p+NzXZA?hmdxqxQFDdbiLybQ*Z=MLg#%k+$-b!`)c z4@etm1Wwv`nO^__CR0zG*s!{L?kl9^dG2SHzBSjdIkvt}=CWHYXSO~gf3dHVWc_{5 zdvwDHTeh2Mk1SU=V{VjVpY${QoVLCHi07=IX%28NGxy_%^wG`p1^LW~eBx!T# zB3G9oJ(I3`nX&VTk&5y_^yvsH(ZzUj=&D5(LQaU~cFCL^Vqb7YD-m}mR<$tHzVl~+ z#@}wn^6hfrS!Ffrj$am!=p7t>bFGP|tU=U$_xg4B(h_cyj+StDeQ*7JNkO<*UCZLs zUcC8?j&Xq2Y&)kL4_Z`u=rwEDk2LBZ(ZA7raS?x`c~qPDo883xfa_>S$GzPmlJz^@xpG~H5(Fc-A z8Wg-Ssz7|fRCT3!=uF+ccUkU{PEBoIX#@A!Wsipom#nER$SxbFjA1#qh3plo;Jb@4 zhwau5d^dZ%+u&k~LHhXh1~yUghq)e5O@l_e{5AUB!ya13J+mFT_8&Mdhhf)p#?%OXFxFy%)UGY&Z-W`tFyi1tt% zhYR?ENJkb@)}5vdBWHk15Ie^>9&wv-(Ldu{t`(k-hh!O?t79I-&&soS%OY<%6Xh*F zhY8mKvcf78z|>PZ6Fe3THi`z9?ouGbRIZ?rwm4h}HGK}}3(q45(w5-dAdwUf5)Z^` zcgP>HB~(6H%!u>RhjO|o;K6K-?9YTHo1XEyVMz1Z{*F%S_=qxj7K|Rxl6> zbqf?bPkm9Fn7fVCIH@lxDzCcY=Lju@v#byIT@fq3PP`6Ht zmqbrk($N@*_Q`6FOX-H*uH1Bls-$f`LBy^UvqR{X_4vImL!ytisq*i?Q#^OJP0Z{O zB$lFZfb-@v5#5Z%7d82DBFv-Jrr;8WYgNGd0@N7_Jw7Z^2aasr}7^wBk@zo zVdHi_)qj30|Kg=Ku5y)cFg~oZEMFn>Ux8??=rB(Ti*7KCb6w}MRzF@|OqX3A$s33P zLokT8aVjLGLYTOd6VpW=+n@3dF%501p`lhl!6X5q)>G@cckIXJ9h@m+cHk!ep(`tk zE1}IZru8IAmr1ha0TL|-=p@+eF1lsK+N&nPWVwAWmQUe%2<=m$Dl$wb8|30v*}KN) zlgzupD;qrHx-^hW^2YC}3LkG<9v!Xb*d=lq)jYdUW!XY?8ZJ{qFhQQ>W?@y6R_++a zVfO*$%wRWP=(BQ2j>vG!<^(-E&wWB3Z@>{@eDpKCz5rDJLSS80Tral69~DTmJc^{Q zn0=JkEQBp3_Nqi(HnulvVSAy&Fd!KN5?a;@acu{fuU(qfeI|v?v^U9OpJMrTx2)eX z5Ptm@oDpfi7S6eu_^bQmqR;`8)VUz~Or$06M*44nq&cvGz@D^jWdRGu+XC~xJhgBm z35?MF+@x!`o(oLN;x;SPJeTr;`r&(n7q-%omBCV;4ec-7WNb^jEi6o_v;We< zE$m80%of&s!*om=m1aXyswnE1_YwPPBo2m{XOt@%R<#&%Mbn_<3{f&1<18jn$q7`l z1QZZh`)7M2*=$yinvy0D%S=tuklc7-{0s_qiAIw!sf2|%7-Yi|Jh?1TDQtcn*ke;& zn*E@eet5X?82z9ka5OCGU3K!0NqXEQmL}?uz|GFlh;uaJ7C^vAYNt27c=Y+NYq;Jc z!>{C1OWolvV8(>trPK@GO<)YXP416s+qtz!ZYMt#yP=&sa88M103Qed00000(fime00000*U^f}{}=uC2V@4{00ICA00IC200000 zc-muNWME*w{r3`()c-I3uZlf|A%#Jd0U5jj0HFH^M*si-c-n1~PiPZC7{%ZG*-elj z;-z4T5Jiee2_cAtutE3wGQP4-y~2P#j)q>vEuBLna7eg%!o1LX@l18ihB}RHQNknYgxth;b=`bXF_BdT zL|Bthw|V{$E~q-j^<+q~vnL;~-qVHHOj-+b?{S}!!r2kJVB+Zy#8C*_tHcAB^; z@6qET_oJuQdpoA$Vu-%y{ILo`j#}R^RoxNPx!v? zf;(v*+bsMX`mj0lRRw2NjsF3zmFyuBw%_GB54#;Sct5C@^kj{` zc1Z`BKZ951?EnA(c-muNV9-H=F$`@CHyCFzX)x_!y2H%J?7&>ZyoLDz^FJ0177Lae ztTL==te4o#*gDw0uuHJ7;1J=6<5a^`6g`iRr>&Ks zi>9)g71jhOw6wOYY^<0X79`kqP2rUiDIkxkb?xsku1x$Rrs~Fp8#n$I&s;8_0YPJ4 z=FOS8bMM@H=F9+Cbs`AD2`vH68lc$0I!8Smz>4!oaS-pEg5nT%obQUmhy`55F46&` z_y*ntii&UIAaJdC39;ac;$`P;Fs}F(vY`)(Z{vOFhvEpb;f~@cO!%YXJ6H>UR(uzm z;a`fcvFb(@e?i*q$N!3md#v~yqHeW^e+ChAyLgNi+PFdob<}WoUpmr zj=eJ)-8VOKUW-NDWjB9}^9p17^s$OI1C`r5 zP-lNmR2B7?xvn7=tB0nvnt0eZ{6DmTEkGH}+|(X`Y7srG=k9m$u?AxzAyP-(q)Hc{leac;6U%HSz;Co@lN|%4zS8 z4C}Nx)`SDAT~EF2zMZMHFHh8zom!DUQBzyh;{U^IH0x#f``6$^Dj6yzBQL>;o_2|S zTWZ-6+cs;+@U`gu`!()PsLGcrI`pgyCqw2-c*&VMZ8p!wZxmoNvj6}9c-n2zS8&Wx z6vy%JNjAI5ruSa-&i?I}RqOcN846(!! zPXdV~kxUAyq>)YrnPibo4!Pu!PXQh1NGCeeg|2i9&FVoq0yD##PR6mTSD>twc+V#7dmROM)ayk|axt zq)M8kO9tmS&m}H%n+sgz4x2e4ncU!}WJxwVBu8>3Px7U}?5%5V^l1jVf!z|U_uFi? zazmF*sj^#Fs(bE|cJZ@tiTE90KsHv`M@);%F zW9amlDuRKAzrpD#YuDp8{t;4F+Lew!cuAwRc-lqIu@b>R5P;#koLoYJNREOr6NTzL z1@0P+Q6)3RV^Gp4c@fPL|}&~uQvxii5d=IO8f++5|}p@#6SLx2!ZhY%4=9qNd%{{x4H+Z8gJK9Y34~=42nvGMOoF^`0X7081BWmRgIWLtAO(*c z2Ze78f=fSlWnyLq8wceV*tU$XVW^#;qKeyZQ}QSfHVz;ZhTSs}!NvjDGv5cY|Nl=* zDl%poG-(Gb98iBxWta+fbTAT*nJ8pP49GzqPKe}MwXrGg&_t&7xP8Gm^wVKH@8s+A zoxG22lZ?dW`wTUxurrB@N}?|`yVi9=L85}9f{4;qlx13i6%5}RQB#W?#2iGW*m&n& zXeu^oEjX|-F{AwOi!1vRhr=%$CaJ|kypRk3@Lk&{{xMV;D*FA!Ox)c;^wpU7TwAEd zF&3@BZRjx?=uuCDXwLuHyNeK_;0dxJ5ZXtAN+pFL$;%I(P-;}%(^6AvM)~bQ-Q+2_ zjgZZU=}}7wFD1f5{MdgxBlo_)D$RhbmFC{Gie*Y5G%|@sqLFAz-B1xtD-!?-o?Jh` zZ?i`oVr0)qj06cmLRut@B1Hic48#u18MO>^ds7$v&&93ccDdc|{zdOlRU?yrCtA_h zS`Ro&cu^^A`=+b z%g+%BkIV1jak+jRK8_z}NLS`)h()|Bt(Nc%boLV@l^v*{0)V=|Pgwz#`G30q{~&}A zLPGZDBpkhJ+4sRWzWbO}g;kAMWvibxE#sCMucsY{|9@{kn08J86oB5hMIq1bs7AbO z4`U*g77RJv8%w|0fS9$_;rrStghl9P#D3G2y2Y<=k5JVF_@cdT=|7|%wOEFt<0wL! z*=&l`PdE<#zxHN!E9Ku4l49DCG_$EUj#EI)2$&&P-Sb|LRJCZ0Tn5B|f990@a*jSN zC%gqZggv4N>mtPo^FEC4|Cwr){%Vk7VJs=lj`Lh=a>|X1&ba8}3zB;eRn>;wg$C5! zASrZ{)BvOm08t|VawMghM#BgIMGnRu<;1~`$>%ibAaCyL@Ik7YP3oCW$pFz3(Hw~L#A9^tyJHXkSm=z!=5 z)W&ubUw+yShB#sr{@(`96cw{Cl5w`qIp zxEcU6F&=@!alu>~)M~pR8{E^j-aj`MuN?UAAgd zgP>!2Rn^t?mUz0YX-l{UL+<^wdRSNW-oCvUL1<3$sIw}`q^h=lTc7&c>`s2NHY(em zyq9M{Z13JzW5esJIxrxv{_eB-sq8zEcj(bzF2`+Kh2#{gZv(kphbjfQqKb_DI>?u4 z5`_q)dBlk%?!|3R|F0Tu;9gV}RTeS2^yo8S$cQl$rp&CYZEWrA9UPsUUEDo9y}VPv zz@4ftSD=;4Scedb4}<3a99i zloVxiO`eKt%B{+gjL#F)=v0jxB`D1aR-ee+*K^O_(#-V1Z33AaPv5`M?o@l*XM)^Q ziMzBHWYn6p>2Rl>Sk@`mnNkQstb}lPIz~m6RvRB88hmmz3)9b%^1=y5$%N+VQo~edB}mH1&{UCUs-eVG*Tbf^pv0tg4dz{Gbj}0A z9$C)>00zBq&5#dX8TZ9o3w~HxYJ%=9Gg$v{;Fy5iYJXabO}tVM>)5(L%@{1e5?sLz ze26dkvk`w1K=zYB5=4SY2ni)&B%DN$ND@V$B$~vKSQ1C#NdieENhFz|0MGzw02lx) zKso>p01rR_AOd6nkO0U46aXp!4S){70LTPj0%QTO0N4N=04_i_Kp8+2X(lbCm9&v| z(m^^&7wINFq?h!OelkD?$q*SPBV?3l$QT(XlVplalLKUi%#sCIge6#p6*vfo;4mD4 zqi_t4!wEPEr{FZ4fwOQ9&cg**g*DiKP1u5qa0x)aunn*y%PznbSsnp=3Gf);3BWUe z=K!w&z5)0a;5&ft0U-2)rq--Qt2XUVXndHn7s^2~f1#X=bMB&8kWg;9`yf;(PhPy+ zK8i&N<)>J*Pyr@TkYFJ*)x(4e7b>2=WTZ-$Av5K0m8alXC{YoAh<1rYrTmq#(#2di zdi3hkpARb5CDgDHqsEMzFlowkno(?&#+Y%FX-cRStJbW?RmBZ%-MRO`qXd61_Ltap zIlq?Z7k|I;$KRTflu`Utg4GizlZMHo*@-J$Do+3a000000001di4Fh&0000000000 zzDhR$;{l6~2DYG8VAITo37kN|lKHWWf)`s6a^xzcW_T@cpC6#wu6RlWS`hW>NgJQ% zh?(4=L3H#6_|>P@&@CMqz!(Zq?_5XF$wMRp!;A0(VkFB@z8^q1qO6ravVtbh@dIVV@BYogZdn_xnHmq^b52dUm%Bp(C9eqRZnt3|917_pEQQocF)Fwv2c?r7zo z6LIXksq}JGQHs>~NHeLl=dzF-$Wf{)=d=>dNkyroT*EMRvvY16A3^II|Bn!zmXczX zFY%AWE4S~mb~)#A-{y`iSD7a5YWgK%T(v+y%2}1)^3D4uf+RBYLC4wG#4#@KdP1xO2JoCy*O2d(@dG2 zQZV;LRE+y?vU-Va%-sX-&U81Qe0p=s+ND6LAnV+&j^klKb%j}r%3&%I=1lSKX-5E9 z(~wV|x#Wsn+H*=I$5vBcX21QK>+*5~ ze+4s!F){VTMRoc9ULGUU_G?JCz|Jd!TxRTiO4EnW5)mrr{li#wzifZ;p7d5{V^KOCgcUO&Y26abJ6J{IdPX zs>qosaNUH|C&bmEw12dX&s4;r_c^y>=zpye0aJ+xV5)^ffOMh? zF9i@BJQ{|W2elCYh?OprUCPZ5)jpz^g+7_{1!zNzwN8)u!`!B<_u&<>lS4!%A`&6S zk)SuCUSV0}d&v_vFEB#%Ic7YXAS{Rg^V=9ga#PVnR5oHrv4-lW{~+ERAHMt~JF*Bh z!ej7UG*ri|XhM2Q`bmclgQo;AhU2W*Z^9U+)(HNCGcCQo%o4AlM?Yo5WozlPi?xXolr@L6l@g z)pP@xi`eB^rW!?79fx%M|NEA8^gH6Zb3B(K%nL?$}8JWy1 z7MsJ(&dJTo2Tf@2>>eJUSzK6JSw499=#gW`Po6k+`t1307uHs{HZNWR$=ekRU)Ib^ z#qX;`AlD?QFF*#8A1U!LQ1ynouoMIUI{Z~ZQ|s8y?xQcCJ%9XU9dzg@=&OfszxfuV zxb*_m($n1A)ju%UH>v^6nLaQDI(A%!QUHbk0Jd5HxsuQjS!|kFs>A|z`3e*&629(A zIg}}v!@{&wJPbTMExgt4;Pd)9dFUGY@xlm4hBN=fhC1nGo_j|68F^x$H~BWg$TJ2@ zfg?-j)V3*DLq|W#H?ljld(vs-IYYHNTAg-~3nCo6xu3Hz z5Cm(Tz#uCxn`uHrZ3vmrayp$*UasL$N2lo97uwQjda=PnMN^`TyqkdzZ9qLtj*Iea zSgwxpoIa0-{XJJ(S1TH2Cu_7?r^Y2T70||l631dnjZ%v?;BKH7`_d`(2%>mVM7e{~ zY@NAPC+eVWDymZxP1Y)*4XKM}yUeY0F6z=XrRi!m_(l^lGE8|Ug=I_k>ZwN~2VgfiR6 zE3W}^nyT|Os^$Tji*@Rr6)gurso`npf$185$?E2{(YZuCwL8)^`=y8UGHgPlXk;=G z%XTGNJ5f!u*a3A49J&h1Lub-ZAjIo4G%9e^fuhOBraD3iY{}Esynow(cB0+}UJ}*u zIL3`2xaXS+Y}u>sBarJLvjPA4fd1p?tENEN;{Z2#?xJNKst1%Or$jGPIVak=OgvYz zVSzgtNkpy;)1oZL;)cAc);KwMPOIl+y>?Xew3oR{T`i>LF`?Gs5DHjUFH}*nYw;*t zfG_3O9nGN!%B|U0*D&1Rtuk0|hmYRkqDZur9YfpE;h<1kIhLnMWwq`nmt)Lz|DLK* zC}g9RmUE4*p+<(@>cmI-G0&LMLUPR8)8V~xw%oQ5$-$@$%RbEDb!M%z2Xk5KJ;6v` z@Z~LTSL)R0|FiM?VNypfZ8YZ;zYWWs3RInb!j;n@wH9+FRcJKm@uX0%Il=@uyd%{T zNPBcJ4y4Vs>{~!1j-~cazEgR;2Tsz2)Vfs;kzfj*NiDDuFS>*6fx|mP>UfmiN^vX% z2H+wd=~+Pb+-L8s#Q|`Vl9JoB&V@j%8)0W7sS`C?QO)Gu2X#FL_>O?Bu4h|+jQ34pTGnEm^UE6c(*>b1ZOyDivzX<2 zt{@@RDntfxAWXK+iK;-aL%FD7HKOu~jG`Op*PQ;`O0&7D=i;z)m1*DX^P|rrnBquT z+;cW}m~6FB$8=d^zL>x9`0cZ!&U8MLuG}yG7Wc;@y!RjKc~3Ks?7ki`2ko(4APViE zJvqPRDp*M>FYjXVywqz)pvn)X!z}&o~=1K}0*@-w< zB;iPBTN^48b55d3@9_Tp#7ZKQL{2q!hcYt_;s!42YI3z5y{3xP5bp0-yRus_BNVMB z(Y=l(;+U0#HeG>k;L?+LmN9cQ`V;<_f!ohowrty?WAl#P$Cf*;T)K7P(Yg7(`}|d#=wSn(`@wYI@E<#QQDucqobbu0ZQOaSeoefWhS6 z^;$?Lo5ckV5(8;?Qc{?L>Xwl7RC11295Fm;3&xQH!(jxhC|m_es|YF-1}I+uvGDK$d(9*N?4cHxqR z*Wb0x9{R{c={eS_cgx%A=*E2GhX-}lp{?c;6;jY~#tIYTacqe3mT{yH($#STjO>HG z`{J#qvk}tZg<82l6U4}vH|l)oUJHe zQ=nTa%j)f-*H@eZ!?M(_eWWAkeB7nqjBTX28s_F``84zSRg@_3RFX2s>*yL{8R)GD zNlQ~6=(k1)Sy9Y*r_Amv>J9Bw#$+`mp!5kO8Jzx4YP({l?Pv>~<1=m)TD&y2AsLNB zaZnR9L-;0u6N9}Wd3QSo_0}C{+Rg1m*ft;>?)^^P0A!+QO-V|^r6|^q%!mzPBKhd_ zyAD|=Ce9HFLqZ7SoUx*_hV-Qic^=A{FG;*_DKUuG${5+jMo_MXd=$gn`a_D00mKuE zgGGv;)>f2#+ykOI1wEC}&MYIIMU$bdM)+FvqMiHH>)8K6J70x->qd{To_yj_w24pW zF!{-RzfLpWI0kTj`+kA|yrR(Wrp8?q-SE3X$WV6Xj5n%{dXHfIYUezw>17;bDC z^f7|Qf4YDwy*?TiLL7C(4y}joA6a46SUK)Kj_P;cZ!#OQw%U5-*8Jd)WAF8qy1v;c z6^BvvsS_72+&K5(*n{`xABGGYPn)!8(WZHaa)o{8cHJbxK-xF)6N3)mc!%}8bR9_wn+KWnA?mwu_ET7RP~w~ zJw_Fm-WJk8u(%<+S~~hFSrL?}T{-x8_kc79q(k7>XD*3W|I=1{+}{m46D^LC4y2x# z-S$gBiVxevH&0)0S3E*Zn~60VJJbWw41dH<;MapzcF;b<)oTiBP3^Uqrgr1gIov_= zE7%bWSMROTnsmpvx#l;2-9+KzAZbjG_+_??!~~xZ0T(!kzbj&LbfJ$6~tZSWzP z5~r>p>K33?;F5Rv?rew))mor@%%I2@l8oMs?K$uA6EtBfMKpYx-vAsZPa z2U87!h{}=o22;tN2m#l_W zM!7;-@*}Q<^pYTQ(kIzhdP<5#ZenzK#bOMXb>hU4BP+awUeY+>-x3kg*{$Av{P^y0 zlc&Ca=-+&BIfYYJe*c8A-hTK_l2GJsRiG%OEMQU3+nX1RV9XB~)H zk>bsR!d0?@DQ=?xsgX%A_dF7sb}09)rA0Bl7CLkMs_}>kM`)azDfP^o=Iphrm1B0b z0>R_^&X{(3l9?|q%6nEc z2GlU+5fg7}W@cK+(Ck?IMNf0kvA4oT?2C=i9eN$!c;a_1O{FXlT440t*`)%+E4^i2 zs}-O}Syyf>QI@vh^%a_IoIrpC#csh#dzz}btqIPLap`n*n{aWRa&zqxQS{&L6zUsO zE_&&lj%xVKg`O|o&W8;Py5qr;OYxMxqQ1EroRXq{#K^G~Vc z``>ZTf3JD=J0x-7EUP*=xXOuG${z4OA7IW$`Y;k*Jed&!+4MP_IQC}h9R%i?cAYmq z#fgB0*ZLhbOmSmGQ!@OElIMQhJ`n=Oe?XA_oT1e_y)M)zgOGv=oLK&M!>Bsn1%9X1 z97Y~W%Bv%Vd61Ftw2-*t4i`BtWLg579F?Fmcx~d#$K&0n7ds~&KAPw}w}|NxcBsX5 zhYr<=J5?pM2M>A5;N@*yXO@~CUM}iXi-g9XVs(CTBt%;Ur#XpWg$-f1khkxMo|MH@I-!4E-I!lF|9-g~E$TpOo{t-ouXb1`MQQOe<=42Wes#bhdkwhG3V)syQ* z^N(MuCaV)F{yr?Kie`DTvfKjV(?XdOOOkMJ%niZFj0g+Jj1;$($@^w` z&ahLG#hwepZRn&*F|9S8Xx?ivJ*l4lb8n4xDIqhsFT%1q)8K9OMM&CiDv4bGU6PQ} z_Upej?M$UP;5DVw^A9d9sE$px$vCz*HlWG+M9}PuA87nzxYD`OYv3QQGfu}MbEk%j zOpcy@JMlMjxPd;YLjMi!u6pH8XoER6cr~6jySqKG{>Oi-ea=`>c}AHG8t(q4J}NMDeYl6>&Cnr`wM6Gb^nnc zmW2uDCksbADjH7@i}GO=`fu^~n!5I)nBetz`pV`;|L5SWEN81sQ)F@U=9|gCGlv`K z?Ugi7^faG6B)8#Z!>9TSi@ICEzV$?uKnA(pV|UseZnE*-Kjp^)Z8@Izrk$=~TWDBR z%K<}O{GEzEU4F(NgTt&3Rl~B-u<38w^DHM~Tof(**H>w)lU`9!;Zxx&EMQ9}8l~|J zW8AfXne{DIihn@{f*HIm^vpy=r4}=Lw5wRT`9IE9d*6BJQ9nDV&%M4<`Olm|iC2s2 z8W2cYH9Z?fR-_^trnmZ3vR8n*L?9rt;__eFSJ9d{(4shsDB^^uP4dm!MvZjef1;hSK#b0`q%?W%7}t@}d^jH6~Nmxc%2kc(64{C?7_F zsYZOiM(bKg<2Pjl(`IDsxqhsz_UOq24pQr$NdYEKDa!+5>YVVt*LFtgc_({U{%?x; zrG0I*Ld6!0cP_o_ky@6=y{x>vIPi|zbaRz#O2T{H4QKU!C?CB{{xst0zJFiL4r0uR zgQzu~AVbXfTjj%6G%Wh}$h<<`t;2q$7rw;5>HV3sb)xvUsz(l=k1~SkBJL>ht@(z| z@^q&n!&U{JZ}SW5q>NZz>||hyHI$%pnw-VwyUoK$;w;k)d!)!VZU6sbX-x|?vnc6B zb1kFMP0K+gy;&}D)(oyKwdC;0G>qlArFWK&>m;Qa>U5SEdM@@jd1HGe`xuO2b3Fa{ z+JE!=hfACNYKm=(wJVBy36rRNq0YHa6d7U>1*(=)*Y+2zsg1osmljM0G)Y9IFSvex$!()wqola=h>F7PX-;(lZf1+!6NB?19x~@bOW=B+_ z$cTqRvbY>hYDcIL;c6kx7t)W5=rp;8T^5B;}yP^ z@NW(hg6-ck(LVI#B+sk}fqd#5R$RQ7hKzM0AmguMb5!ShWG*&5CwsJCSkqCgKNB7f z^W;R&eF9Eayu=ZVN29e5tC zt=%KiMLjn|XtSoU6#3{VMNANuaB;(UahxMW*Zhpy+`5YCP!iW9(M;TWb!4Vd_w``g z;d+T_`r9cuwQRMy_=ReGdb2)5lm5JwHo#B2(lD{b6`yDndY~0W)52Ai=u!3b;VUC$ zm9b#FJf`mKn-4Lhs$Z!2(NblND1hN$?p;qcWz-8N#_v9-Y>gk-tS5~j+e*=U<8Zv} zVvFF#1?GSVeZ59=l2y=oYNnbK!0FWq^PW0(brkPppL_dJKuJW!G3zS@^_R~V;|%i2 zbr5=!S-{m~pXVi*I1e;7iZl@N+;7(kjB)aR0Qw>Rhn)7_W*^rOR6rb^m116+H6f53 zJHpPXB|=>-^w=K^lAMK$-#$1%P4kIJfVqdUlV!F<)A8V`^1SGnVlm@fl@%E zBgIflk;W+REFQ0*ZLPoe%?-F=>H@>wP`wvYaL~i87V+H8{RATG%_U`6#fW3C(U6p3 zZ)}z3I@@zf72>3OTNyb(toacbysg{^o*f!`zU?R8fcN{j@2g~=ig{E$BGHhtz_hnh zbSfrTD!N_zfw@!i?si!h9)$#HlJ1sO$m4X)4Z8%HQcIJZIzR!B(>m7D|KW2t+ir)B zuDdk|97E%!;ocVjZD63`pATb$7oPsGezpGfQS+%k`IdHZ^61Wpa=W?ke$DjZYZ}E) zd*LI5%QJIp71Q3P{N|qF_%E*JM=KT8rSQ6@y@?Up(i-kgTad<)ZR?b~$p;ivnIn4{ zC6QyIER#U+pp!rLeRd;RUHptfae_!(}RgvUOSJTL?+cHO~L(O z367Xh+3|M8f%a@Kgo@L`t;)uwl1YmA%D5sCRlYja!#D{iw#F7`1-X9b7~%kyvXzwtF=mL^jqmj#N`+SmGf-cYZq{3_k6RQ)JL$82%}?a5JE~$pHFf=9nJNmY*gK%^tAC%X{@LVK@?wa z6HG5ZnbF3WV7Dh!jbTL=FB&=koxxN;kUTa*ZkkM3v2byCVz?_4hL?t8b+qeqDh-3- zU9%z*#X-r$3H+uBN~o&|l14{oCS#$q(0*uf1zTgpy*~;PC6*ix;_KA6Win~O!b>6& z!`^yj$6s;}S4!HJF+Dl;TXzS?$WovYZy(hz)8nT%S#BDSu+vmez_B;K(9Z1vn zS8LoY{Ce`59T4Ges7KdXd@pa<4ia%sn{9_n@<795+4frMsp>v0nYHw3<{#er;Aq^B zD58~@#1^AfwZH2jQ&U}Zg5znH?;K%6A0-y4;#Yq0n&{Lncc{0MoAzzjNxx#O3K%k{ z#WXR6t+9-`Hl`b?pa`cHD4BZ>(PhIz}MG#+6Enq|!)CQ9cw`SJ$RgVq$s4*l4M=s7x9|DCR@c%uaGD zRrz@pDh{`zA+KP3>Hg}!efs4t+`&ej@6L?UGdQgRU9wkcX6Ee_A zOSd1*G@WppXl*m9ouZc}@Vmyx){opOqp;(a@i5ee` zqY?ZH_Yc9~PSpJzSWrf2LP9|vOzhXP8I=`5W|XTVh~t`Eztn+jMHI&cNsRYcff548 zHEQE?xNv`d3cCGdtFLsaC?}GwS1snAynwGIM8Bt*@dHwyah>zRp@eYBNqU|-Cf2p_ z3d`|*^e4`n;dRs(-f40FrZYdO+Syf^Br0`r9&>eBhD+^-fMbBxD9@jYj68j={da!t z8UF7RCyW)foBG2|JQsQTR74CLJMZAIxU@{2XM_Z{&ZWnX(h3QE`|fm8R=hom5hJc1 zn(65nSWr|@Q;dn!xRwJ-)_T*QeLui`-aJAXpWB3SY{u5q)^s@aB*$3tpco{iNSmnB zF*QAjE)c;;F=RwSag#9cEqBm{>;15^H(}KT<5*|RyocB32(6nMjWJGjCd|8=2k-{v zQZOF6LeBcoUGlDfw=({}_m$hNoiZLSqZnbo9>6|odsN>>Yjo((xSjZ8U zxCaoNyN%~BRcnt`3A^eAH7iGI1f31ZC1gQ1gM+52L-gGB4TSKjN_4@r?^$d-l}ic}c6lRM=5M{~55 zbR2i50cA3@+aT3CZhSi!rT3kc=;zbr#n(3#<-;2r!*bsBONxrY)lK+l?Z}iee^Qch zBu_Cgr6`ydlaIhxZqgk@R;`UD7}Gkz@M^h%(?Nnaj0Ydt!6d`oLv{bYVHstlV@wr- z>yN1eZlnj^=%Xc}ScgCRhPDONz~ik#TH=CA5CZ;-QWkcvy{MPETE|Np|h>buH^ z@eC3YO~=KZE9ofYI>-br@hr$&hHuN)cUIyP?|@&Fo?~j>dDY}xJ2&uYG}iAoXz#(* zcUjaC_jhgQ@H6|ry~oA)QG4jik4Em1AdJ9%qT{$lJOPHYj)U zgvKNP7`ysjS+gPfdo~o*hS2q7E~|XbLxo@66;Dvl8*{6l2V>zb$QeIvlqskG{G_9< z-qvwH>4juD%f$HgtYzD~p|!pO*FKsZ*e4rgyBN9cv0F4$1KrXrC;WQu!M1J+m6!=f zA7>DE{dxa)@_`}Tz4R=W?gT6S(H7}(==%8<{0&MaC%kwx``{cQ<(zFKOi`(%v=mXT zaycTf3}or{I~3G$!j;dw&3@h7Uu<&v)V`X*t+mJy+`)vx9B8=Bi{I7{z!J*y>vz7G!G5>>9cTDWl_GVwS#Ra!p*!}}Z8AMT7if&T2ooFt)Nk{X}X{#ILS z=eV+17Va)f{DAXYM}%9}FHXqAU1*LA8%|%j`Uc_=i3BVH8JHc>aB#SneJq9Thek!$ zL8*CFl5A)~4joIwB^1W(cR~O3|J^^inD=v}<=lrsUm2gNc#-XSIR~Y5bn|O`_>9LP z)~((*eLX~NIIO^vbk^1zTqkRH(%{h+v;p=z3)D6>(5*WQ#{JAEYqIC`rUZYB3buDg zEIBa-ySbBAks7@IoiksMm=E9T=E7ISa3?+K&UrSoVsYQjN}cph)HMr>XtT)p&g})Z z&fFMfUspU_-#)~!z&zlzbg?6yih*YPvPN2 zxj7kY5~|FasGsj<)Mn4Hje9lIXF%Fsnvr5j+rOCZs3XILS_A&4-gOoBOyXSwF`@iJ ztbYsnb3%o6ZB$`?bwaxwc`usH&4)9mbv8%aZ%lP9`E_T&pCh{OQi9#mw5>|J_rx^q zx0_DWDUl-R68N*&T4H0f;_a)pj^!S~zM#6EyIQJ{6tfg_^R#z~vH1Lat|>A(ga0hv z0&^++ew?oeaaXx=xkz^E-5z6W^yHa=yaSm9{ZVIf66#XoPNmXMKL6>D65!{pK|zG- z-A|wOB}T_kr^zj^@7$sU?!QI7 z{epHo_@6D*#=8D}K_bT0r8DUPuXYuNkr_Q8y z!RR6M(|sS#(#}HVuBegua7<85!%tu|Xndyy7o2uN7rm~veP(BiC-Bh17NcJk~|pv% zHCUw{(Ax^Lp?{IBMcj7tIF2~*c?Z0X&LJbT_UH4gUmUx6xD43WD`RE-|1VVje*CDg z3-pyh)^%2`|G#)pf9siGA&^G#Brt~>B;)97~|ZP;xXU1m|9e(j=*x zB_z`IcnqqLcsCk}65;+CVp#QQ(KBLhK@6YXMi!(g^+eF?zDeYF1zeAzynm9KtpZ!9 zSq9CidBLS9Ftdo8`hoB*@_6&^<*(F{d)7z-gG`$TxkWqzL-0| znvKHeXOn50iiOY4pP!uGRJ^#Z(nJ@Pr~csR_Q`Q4Ylccw=)`m_Jpiuf{{GF&?=l>c z`i$uc12Lm~%T6}LuEK@r85k6sB~5o$<&K2scwggzhKqHQmL2|dMMkkTaLmJCj#4flf}4TSY+6;3rNBL*sKwu#wPE9;W(4>wbsK7KBum3K z$oqWS7qLqBN}irx4;$EITC{XE$b%U7=fx{Q?e50#DTuSJY&6{Q_5+O=l?-9u6fqT~ zV2-iM%a1_Mk6;>Og;4Q2PGI;FQ<--WQt*igkXR&8K!O3T*xvvt*#h>0gW}OdYm%J7 zfUtlA=D18lg=ujL7wpmD968o;oRI}3F}|gj^qx*46TrD3bAVhc-0xO`7ESM^p-fBL z(ndk^B7&P;UhK4uAkae1Fxe3sZ1AOUUVi_jfBt}i8Bxx&zI&1hz#%V2@T~P|dZt>I z{_jEQV>P3cEl=1+VHy<0_u|8wnz*i~)tf5jZm&wDHS>(yNfH5;I|SSSMT3PtIlmc1I9dFz22rEU=7-; znn?!fKf)NwbKA%OjkaIK3R=YZvJUiI0DhWh+V4B)nc8&nK?Pr`ei^=wG`I$u+w3%>Es zf8gW8ee#f*==xmVAAb6ZwUwU<#A2?Wdr5r(C&%qa&tANW2HfLZnw}*-8=rxmU9y|q zbUqe!%p_F4%XS;BMM@$!9WJ-iH(;G}0CjlU|-4d$-OAEnE zJZe^}n)f@q0i1gMBMss&abuNKp}5efh(g(wC^G=sDGjj02GxrNl657>-;qY=Y&4N9 zq$i+d$3PAdrlVdUd*L5^;mfdYP!^b5?FYD-M&{4jG75Ep0PGBxw9$AEt2JRDU- z<-=HX2ot7?8$r+-Zzw6gLRqy^?qx3$u|k4N0wAHQzApHSKgU2pu6=jme6CfMvq9Y? z5<%9iDb~9SQ?CjYx8)&vPG^1SW6@vz<9PCE!&z>rY`^MUWLqx3GF$cs@yKE@2fz&U z4KxX^J;xzqwSE`qm_b>m{RK5bjZilZ9!Oo)fC2)}z$6GmJCy-2L{thg$rt|K`;_*+ zeM1Ty8AsNJ>q-+1{Z5dtVzyAp|2;y=Ci#9_1vP+BpuMu*7XHfh?t|=5r0D1wQ5~`9 zb`kB+T<|?>ftn;&ah?~<;js~E=cl3uaEkT8+cO~bK8`k{Lb`))w!MGw85u-+sH^Iz zqn4Pi8e}eSD^n;1km;Q+DColuNB7sQ{x8r@k=ly_qfijk~`p4nQD- zAe482pz^!wR4H@W!O6GK+3d<-(r5mq7jpG3M@Z%GE0^W}&9I{+C$wqDhq*z^e*|Fu zUSIyPg4B%2!9(%HUMp1hbE9G021P+ruX0SJF=e+!3M<6c7^tMET3`ev_3N}iAY4PN zEbjI<{{v*v$z6K?C%-j~$uQkPKs4`_XwF};0#Qrn0wa*gjX)J0Avo+HmrJHQM_^>k zfkxtLIOCWunKQL~cw4GhFR(}RhgHnqOPCbXSC4&M6$~N7tnr#R?rFCmBX-BW^ zDEpQmWB-wm1-c>*dhb98pd^??g%qHPjyvU~qcxl8&+!d)Qjb$vjluC6wP$(v&f*oN zqm9aj(cFK*Klu%W(6)Kp(V~XHg^+d_G0)PS8NmzmrdkD{D1>f)e7KknJca}f!GT}# z8Pq3$*>Uz1_sibJcy08hxEbKXVEfimA=)K^4*)w@1?!v}3?O!sDhWiS2chGHFGiSx zSVnZcx@}y7Bg9A|w9c^g@8Lve*A@f4iI9pDz{rA&B=7pt*V7-T^jXlrBB9iG|09Qb z1Pcb-c0QLwX+wY`KQn-BX(JcJD?`x%#nUvfF#|CcA6p+Z2p@FK!&m<^W++MX6%74; zm*5l=Rx`ZN%*2%s@n2;~Lt6hmNd9Mn>BO{QnM2fmh?iU|C>0*> z+6I=DRK^+yc|_SS5-t792Ouj;2rNjE$`XpvBC4a>D=g=&e9sVxRUXZcfvOQaOEQJV zm4%e`H&Mgt%=hocA~^Xt0t69&i9jF-r#$3i(6Azc9C(Y4tuqiI5QiS<7neQ+|FKzz z2~2`M3=S%QooK#;p9gqeB*fc_5kLT<<6(I)ZM%aC2tf-<36M}mt!ZKl%y|hkc;(hy zu7*BQgyqb|Dj_;rDbX~~0>VP<(EPyKL>h@xFvq21yFm{hY))BuJEuIkwDu6+QH`{s z@Ss2x$9?eW^78v{{p&4U>)@QM?b6%?+fnejv|E!o8->UAsJ7zitO#( zP4at)0`({nS1nnye~{K0k9L_haCRC+im*?2eZ(*5Z9yceXu|bUR>?JFO`MIe@3-Cv zKv|IkJ4+*cq1T!QnMVMyX$gwL9}dS`2Hr>_iAy&bZqxLxPz~QHIi;lCZ@V4pR9u13 zS4l~dL}zU16H}^26bH6h3;_UV<3Ip%aq0cJhY1d1fM(qAu|OKtTsFQ`$W+U@F*|aa zlqHcArle}r*dAh-^NxZBDSAqd+OB7;>0~AG(b9_rUh6#OR&zN|H}b|Z3pD@=*b-+^02W^HkI-CkXuts}{Jf1n2X*I|LXr|4d^CBpow?&peo`|xiyJYq z!6fkU6fgh#-G--!dz>?0{=*Nse181&xdW1)8NgkYA(Xg1o^C(mda(R<`})JACRE5) zxKBE$j!!-x>=h!mIV*Oq{EJBiQEfOREDKdTvYFQ+cKb4%0;EV=3L+J7woLG9`pIBZ z$*bY;Wd=dOQ(JWD{qMZ(9475jL76i9U1(KZ)vz6nvU*lBl9LBO(?Mm2;aaUQG;E@& z#}E_uN&*oRELs=^o-IRgjr0V7g&@5_q(+)pW%8f>IA*Do5ZlWEsR;`ENNKW(BEo3# z1bGu@7XHq=Ct@;fnC9Sahw?I1)@Z*9R91C)mBl->Qza&Fx%OAM|Fr-y1zkjXLx~gC zWLlv25J}P+LX@bl3=%!B-UP(g^c5<&#+^|-?(CkWHH=$Uz(yB~xZ1b2R#58g@3?a3 zXfBoePzZhq4+&x0unjf)uA|8FWUU>oU|xB6okpU=TtC-f&slc631^ZP_66 z0NdXO!gt$yr^6dF?p%QTgCtvUF$!{;Gu07qty73di5yt4;(Nh6wb?(xcus+`@to?C zQg>aPwDgpqs*I9T=rvMlgo4IY)bzdv+a_pJrXb5l8KjNasPbrB;L8D2T+6PSPM&PG z07y1kJj>dSs%1b46(!7lRw}i{R|{E8xoLb(XjIF&v0men8r;LywiBqZz|PEqZWC=@ z4v=j0NYO}9fuFKcXy5~Z`^D7S4%RXw;HMwbVV@>?8#fLrYGB^oao@ zd0E_%AxkRLw_K*WV0F6PLMaqCx_cAPOAY)RbT0ht|IAWqo{_gd;*Aq(qa}pYP@&?6RB< zn*_$K55~Z%?^+!Jj2Lt0b8ra20Bl1=qtPZhZ*yb#yH?XcHMx5MbiG4STe0!_6)3>( z7{)RRS1}H~EA-3K2w3Ee5J!AWOa*?k&UJ9U9+{5zi`)Itbm8nHI{vtPJ{dZ~Wgc9^ z=_@U4kh!q|y?KGscp9!rS{o{`t0sY6M*fxemB06w!aLY6j@Czfxy}A>ZP_{S?cmM( zSe~Ly9@U69#aDf0x6;HgTorH@)wB_TAHmxPNemT4va%k%4@dx5;IRVjR~1}ly_w;G zSpyd|0}fomf%YsJ1#9o5!QURoRp6>#{HmjPK%Y1xxayTcEAKAqvU@q19C;={1v!~G z)+0~|)y+CN4@R%x`{~O-9<Yvi^HrV#)KM1tL;C>pNl5(WI%h4*Zbs*N0t_0lj zx8v2|4iBKcBbeRQ13a9wt_Zp3uOW1k<9~@tY9{=w&<&CWaI^Hw(g%C3W;PL{+eqx0 zWE~WCp_vb8x(*I?ZUuem%YVKW598gm;aN_&`iJjwjRcSOtHzbbCwsoyP3qmtpUqV5 zFl(D1UhyIHjlxznnfoRjJ|vAv+hj=e+w?eviQDmIPbqVu;ln!{zs=F^<<-yRYKF6I zL(0>fbwD?4Hqsg(PFf~-%|`1O83X_{=Pa@Xh50I}gm9G{P6wLiXX(SY_=fXg?dwne z|4g?vDf3C~^5B{I_Jo{E)nBOwFShN*)oi5nBg&`MYU&Oy4$$LuHl1JPut=1n<#Ons zcnVh#VQ5K7$mU}NEqC~x=0&Dta%pgi+{4y(`{P!3_F^!Lv1hs+6Gbo1a!Hg*Q9 zHl`w(S7=>9h|{xTh}q^ffhrh5cf#fSCq*+L@Ve5-qBuBWj0U#-gI8CX*;}+I zWD{$!@U=KZ@nx78AV?w~%rl>~4G(p$kD-?=NS4S-+z}^XeMjKZM0+4GPwc}t94nTr zr0IdmD&nV1Yim@uE{(E&?&oBw*u=Vl}9SID8caCGi)bYUM3NK$@Sl5oG)RBJXu17kbKN|+Eq zG~SYXl|U^3TZ(n;3lAD z~FA>Y^3agF?DO& zxmKx$4DvvD6srabbpR}Y9z-I6IDMsh17I6>v;kym=_VNPbw{~|Q7*eHFEuyc-{C`CSd;Ai>ObpC zS$G189Uqnr0V4LLAw8$8^avMI`ghD+Zxv;J)r94sv4dX@FQf-Gemo|T#4ko8OU6v& zq@o?3Y*Gj*h-I5|5RKhg{YyWqgA5{Lg&yRS!(iNGn(Lby!LXPBju^%oD2Vmn{n!5a zv$}$a=dQDKozFA4Mm2vTg)xs^C#z-QL(Ik&S4}I;{KWk;&}Ai!4UKl^NxK1&YRTl# z%5{zp#dezrN}+TPs~}1e%L*A$OAJ_kHc@PK00-L{#9BkMRFF@>Qra0SQ~jmu!O3Z| zLspm6;$>3@#FkkZy7~o$RA>EnY^wdk<1sf$P9bc&J(NxV?KW}=PV-n1E#(cUB*V^c z`|B-;Grs?}!uo7$IMclM)bzkX|%>#+vpeH7S+`|2^>6A)Zx!yL9H=x|)EbAB~>|ZZm zI-re+s1`RLd~jMxY(f0CV*|8P@d!> zkwV-*5*VLL$x6Mp6V<9nG;0SL(@+DJCzy+MIvE()9hR*$mQu4H4uD~tD0LetNHpT5%@`!ss=nd?>p)XR*V5Cz zD#g=5I!7)h?CoH+J-<}VhLW*=eeCbfk~Olr8g}j41}#_^rVspl34>ETk)DKXGIN&D zE^ac8l)8ap@OaQxXbKQzR%4LzJC%lCHW^k4W@1)@(B?+WMe!|-(~kOB;&@aLYe_gM zK#?TqOHqOt3)r6xc>z%@RgcF|Nr`x8?>__E?VUE&$jV zves`Efk6eA1Z-AlZkiJg)LC#F>`@4O9N93k=+|DF*-ok^S->}41t6AX!J#c&7vz*K z4_-q*t99gOFuQ}o`OqKiDvh^K#an{=B59!WTcxTe9Chq3TcWiu|wRC6W*Dx74Y52_8Ii7vvu2XcSA`blfiQal+S6tQow8 zaj)ff5qA9j626YIemlnV551f8He~tUB(}LDts_URdkm-^DFrmQmPQw$?udFx$}Z_@ zPp@{Jik*+M7MkdeFT$hK@rP14C|^a9#V@wR(# zu4@R4$Lk#*g3?iU^<`8J3PA5IPc?t)06f^XvAK_@e5Oz_Iz9Rp%b^O|mZzIvz0j7s zM{lO9+sLTjoFYW;DGX*ESVs-IBB|2KbkC1NfNTFsh@0i!8_?KdZZumX-JL&6b=fF| zLE~rUavCc`w?{x=L=79z0WwP21K)1BISJ`jvxSTu+YW!)-GpJUp2rbvY7~HGTiSkv z*#yT(oMTu^jR5AIJUS2u0{@ zcl4~vXWuz@x&0MD3|oaAs=ui)X5|=7vqp#)7<%%9CS8VlX18j zs9PP3#67@`zD2~h8fvruRi`R>)Rpw4`mwNeR#BqJl8mib-^o#4ge9>-Z*67 z^7jN!47ukb%4$EnRBTsXjmvH364uV$k3l^_;vwe>lIciXCFYTytyCKlQ)rD(-a3KD zd(GZA7dtszY>gk0@DRVad^CJDjy+>ho`#~;tDEcdz6V-4nn$ZJYJks1VT1~J9?ISF zY<0@q3EH*cD}%*{wy;Il40t~pQFee*V@(MzH7gl%(&lBpz>J11KR zaSwNUOdMK?QGVR!8!CYipX?l_qIh4dXQ zJ80k;JU77Z!&aBzA9m5nKiEt2)yZ|R28+^3#Zl^G0tH^2rzv-=ulO|~lHA@!I*@Y_ zVBw`s%}d`s>yj7ydM78+bZj68wzFO@b<@0Udgwkn+1wzf_*ncr&=|SXScha6d*bfg zOB4j81#w_kVD0ENo_A5yiX@hZANi&`XJkKRT@iJZZ6fYWPG)edz?(W9PrdJ@T=*|= z+;>yP{t9lE^~5Cj8zG3CO)3XgR^h4vnXBj(^2SvR4wuad0E){vxVDwlD9|P-Ap`9M z4{(wYB&ddyZuhKeGI$8#jx>^MsE|cov78hJKto_57-4%F5{k{XXh_AhbtF)m@n z3(IlpcosuS36+xobLNYwn9?CLE?@ZGhS)UNklNwbq^H($6VuqI9}C+Z@gu4Kg0qok z>=OT#zeS8#wE>si)OlnpL1)Q=I}1{zgkBeN)e3UaimK_-Pe+kSDk)E4s{5hiRahCn zVcj~sIZ76+8)}H-f#YejJXx&^21yw-s};xHK1D#!{sx)$1=n2{t)ToENPFh}B!J?y zyPslIewwH{$vN5=ptVG(kBRJ!l=W#y{W5ml6Cudd#%W#xLBU<%5D-mp7cGHK|l-E5gff^<)%?XP0wI}gvE=kFNYl*%3FMb-!xamtVS>* zgG#`BomE5|=BI8w=F9=mF8v~X0z?wFyT>n(eGQYB`ku519dm-h?hs-KWS zxeZ<(u23P%ql4wn_-fR}X`6Jw1?ch_JV&V^`AF91m9cv}Sc6BcY@hUc3ECo}xTZ96 zDq1(1nYN;2uyw4+deV|jk=EgoY!bqJxAG2J-r*bQ5p=sDA9w;rcY)wY<)6KrqJn0p z3xe5qC}GPg-m^*z*ulK=a6YF)@eADzDN-2|>{l4Dc4T8g2MCdp#9Y==Fj^K3m2J$1 zhESPOpXzvsD2+s+DdR40tL8Jt>_fZMd|2pcE?L zXTM+r>EU>cqBPv*-t1b;Z^1}q0NNl*g3+v2ovqp|=He33pmOPQUs#+n^AH2mYg1p3 z!mb&{wSFuZz;D*34BR=Q0%fubg{$vV$|L+HXZl^B+ZPS5Uo8x*H;NNI=$n$FAQ5Lz z+US2Te|`VQb!bvmZh;V#Y4QSZhQqjmXQ`#Yaovl98=1L;v9MbKB%nnWf!@CGJ~TSV zozc>bcOibc4cn;vjH!r-fY_|uD`Diz9HjEH*Z1JR{%w#o1h9V-;-6xmgIWlCSnDOB4=Mr9V76o?h(V z73%qB7c$?{cdq#gZl@gRd6T)kaajGFi@Obl|qf~tP!4K(Tvu{&K_Dw{>s#d z-E%OLgFNNbG%ot=KfmKMzhmgj%hCdi;52c$d^y?OGejdgA@_IqZ9 zUlYWbR|h>}R(&?TTZ?{2^jF089)}aLt2QUGYD>b*a4+~i>YuEg_-zZq2-TlX-tge? z%5N!_zGT&A1Zg#hLW~VI1Kh=UGtI{cL;h>!HoQhanzOl1@N$``*RVHV=>Q{XvH|@-E)OhOhIr zOaPeg1zqIV_fPb1Uv`q#F0u?|#HquL9UQ zi@v7!QnST6*2dBeNHtPdd}E$eJaEiFlG& z17)0dDu$epz(x&N+XIz(USh0goZJkw2y#{gxMpQn(mt;3AokGneJ7BBhZjT6iU2Jx z`j~bwnztXB(W()R^ZAgtQQhGo6~f~Im=}Qj2;`w~THFHiKA;s8gG`>Tx-cE0E{PQL zpyxBpq+Dkn^qgiYYlWPuA^vA7bN3-c3*3_P7&y@FcaGHtV1H(E*HOgwJXuJx?RO@9c}jzyY99h=d+b-PKe&Q z+fS13lIu@V?lzx;8wK^|h!RGq#5ay`c_j3YYASLXisvU+)l`7?JVYo>KvPsq#x*5q z!Oe*D6tqOw%pOID#&n#;wgegoRgQdI+%>idHtvJX%9p;U7(%CcsAn`Xd_%KrQJqWJ zL1=_7P6kjNIK8N&tvgOvZCr#t_g>;Ak1jD$ktvFdmN?PHE3r1UuM~yW0kq&cRhrYy z`B0gX%=JMpd~iy7LY9C{)F+3tyr)%3GI%8OtK^PAuu`b5lxz8__F zQfNv%lI7hSK$K&smcsIt3bxurr1BS-p0_~WbcQKgB(gn=m42w-fFh%!W8FP< zk#|ll-YvDb9WrCi;_j$j+pQfraprQ*&?n}}i#H#>{P+tHC`jxiOQP%qrt(IEk|w$t5nfF=EiNeOsZpe5N)T zbsSS^R-YGM1X%3Af1iD|+_87AI;K*-KGkk2bj=NS-E!L<-zu`_p8q+n&L`8oci#g% z-}~V0v5)&ygqh6U((mMJrAXlN%cdFE=rcg`if_1IxG-&tFM{CZUJEmQ(yAeqK zZT9_z$ccg|iHdj-PvS+qi4XB5eyFp_R@)r5*%n8Pw?6!_*S=5_4JJnb2_!)zm{4K+ zE(3dXh&eaKZWJf#+pS}C1T!DcZ5%Cq+hvL+&2d7C8mldl=9uQ#=D6ng=7i?N=A`Ci zIj*M0T`|$*fH9JG9ozTq)wWHm-W`tT#G11c%v@IAUfsim3Famdvxx>0G{-i_9bf!* z^4~%#VfjI9chnLx#aV0qKkKIRN~eYQDu|{Y;SA)35#MYF~sA~Ja)&Dsf;xFaLJ z1wmh(aQGf^=L*b7bS`@liO&_8klQ(8bsym+v8wJ8EYZYjS#!TRlJ>ZVeXYsTTq|YF zI12JFAR`7)tg4eA4UzJE#d@SVU)djdoUdw*JkR$qBd_y4vyk`sUQLis*gKokIC-xK Lkf!TzE&u=kDziPT literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-light.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-light.woff new file mode 100644 index 0000000000000000000000000000000000000000..226a0bf83583627ed9d9575063e22d874b84e790 GIT binary patch literal 26908 zcmZU4V{j&2xa}KfVodBzY-eIjY}>YN+qP}nwr$&XZoYGWojSW}ZLC@ky1Tk+Kf9}) zrG>S|Lgze|GNnZDM$hUKqWsc_J8nhe`*yL5ET0127cCjKTrVS z2ALF=k(LJlfFS_@s5Ah8si2?AP)b-{ksknnDfrR;=zt{_6nmegRamC003~jKf2H#?1=~z<>@;)-~#~g6F(Z7{{XWDd23{2Z27}|{Ag-_{29tO z&=wo(+W*)h*8FIGV*d{ij4fP^ez+C@K++8Wh|kc~`=@PUsA~WKXfyq2SpEaNO8KnG z5BbAc{H*bQKm^ARCSzjh;PS&o{P-6I06=PFH$-17to46%x+_0%fARrZBjN&(S?ap{ z#MS?~HUR#A0R9E4W~FOs_{06&7vN$50K#G2?h4Ds+TH;GF#3rB^z_4D7jO)e+SnQX z*qZ$01E~Dr(Qo@jSey;@esVVb{lkI(hc#dI@}Cjt2Y|(^gvbBJ+4rR%-vGe!Y}mKU z8cu{bR1ZGFVBUn#dc3Lk)7Lv{hrqvIzCBn$pcQSVMR8RVnbLReTtz zBEn~S9Z#gLrfxQNx-5sUUlm(VO|*U;e}CI!Mdt!R=EAP$cHT@xcSXxB{CTP``-M`CW`ndhC zO9-fRRJu_F6NJX`iUlO%<{i;7ELP_=1Ak(d-mJu8hT5)y(29lEr{WS49 z<|Ku%48y;hNQn@M{XNc<=LS@y`x1#t4kAY4oWugio#2tmB2FpoMHfrP5V!0K#gYgV z%_T;0pBFg~$kQF<1tzJcRob7d)ZHaa_dj*-kmfVCND7UB>qv@&kQA#~hn56Z8i5^! zTpd}noUw!Rh+_#a$*#4F&`whVOF`*0*DApy8`P1z0@+qVoD}qtn{)`8c10KTNT2o) zFNO)Igwbj-g%SJn|B74m3+91vz4-k3qTn82df&mezFd_e=zeb?aI1ilItURa>;xMg;X0+y0f)oY)#w{ za-wtO=qDWo>F6H0fqKQdW+IVFBHZmj*cB3RUT0uMLw`9 zB9R%cdkkT4W@Mg=7&HLMPNvfkqtTx_gz4t1!dH$fPx+DzD#zd+vw>X|sUl7vaV+44 zUKMkDU*u!=ToX;dB6*H+PqpUR4tvdmZypD=&i%MZ;gJPf7A*Sax+FQ)DUK|7igZ5R>yFT)y2cmiEt6SBPah*I6`@q z7@F%Jp*zEv=dCixQiM^CH+R`KY1FT@$PF)SQ=tg^To6tugoEs)+a@o{HhTF#8Q2Nf zW*$o1fzQ7mbg-r7muf_~N9F!=L8YA^e}yCAnDxF$g)00#I^hs22gZV7b zo~FbJqN%#aIkMRA8W~}*kG+3CzcgbKhq~e)Y(8ut{u%WYaHNSB-QtyXlNeK<*ieW= zlFEkRL^h--yZ^csG52p>J7}+Ms!83r; z>q(JHAOw9%Sa15Wg7E3D3}ecCdWw8@@m5K@+FY+t;618)^s+Prko$XbI$9cQX zK9C`%`zND|Zlo!*vMTTAt0j*mhPma&jpag(XB@i+%FD7fc_oe^=H-UAQi$Bhl84W~ z`$R6*+m9kRtIx8gtixVg?KJS7F^@#@T+){xn%7Q8{x>7$-HiVZ>!2reFQ+Xv;dS}H zGVFV8-1qb{cI{$qKBfrSR%sH}*%fLwMHW0mqB^!lT>HsdB!q&BS&u2&r=y1^&)GN9+F|GB5>CYo zk(DC0PCFj`*P;MJcy_~J5Bt4gHdKOyRjRb;KA#|vyWn-^y=cbP4MHDw zyn2P@_)as*n7pztJg>B(D=lwI8XCYOF{ti;XSte_<2ctQk%4y;c<#q7qo zor~&DOqP-`c|;ntF2cv% z)LdqQSf4z(v8sJQr)@(&)`Y1v<3WM@eTv+CJSNrLq8ODeZ_KVXmG;VLX$w{a2ljw; ze&s{N^w7%5_5SXU{e!IG<^derUmWas0`V%`GSIKZ2Ay)+q@(JmsE8fwC?Zs?BI>Dj`Q=P!7S~q9h z(tVuC-YHq}S410DtbJ1q1m9Hybj{N-*FSJYO=pYIySS4AN4X3_q1(oZ%@NmYs{u*s zc=YGl?8WFI%PEB7nTbULPot^2r*u`PKZdW@f_6zFu0mt~5(K8woJbq7$fqJGg=JB* z8&*9JO9fO4uX9p%*#YUs2;;8^z=w5=p|J@SX>1k(Pf0Yrd02tme56* zY2o$ttBlwnn#qK--)bY}e_PO|%YOIx(D^qG9txMxhuMAZ6;Rm;{ zZF5*HRTtn&X@4?~L^a4jrL24lCqIn5aO!X-Cz3DQj8Y-mP#42qUL<86Drk`pcV1fS z*0jjW*@KxhD@7Rsdq=tMR6z)=BAK8ETMLL3fHV&sH6a|UiAo{-j-tn+9#CjNny-l^ zGsd~R=zQsN#$`dYuT3@SN7dtQLd2ToB{zU2vP>&nk^>%lekANUJsjwj`o1_ zoBXW*c*5)Q0CtDJGx;S-hx9L=m~Gqdq>@DK+u%dx58V$S?*f=T^)ILt0Wd2BAdiZ! z<1774TMsb|?mr~E)d5k{p`tHXx#P5-l!wV*mL6pW-4qGV5GzTRiArXwAKSeiuig-g zD^gSiK~A;G(z0lc3%z`5S`SX-?(35b)-}PB7u$kM>-|{`076 zg$d0zB4YO+-T=eNBCY@Srj9YwV*IiQL*B<4jIl8Yl&vBn;W4N*1%d+MgpT@VS{D0K zVT3j68udy~pj>?L2wW1N#TRlfn#bhIc9{0x{_mdO708=+kJ@C)-nBT;4?wx9} zb6ztj6-!Z|>6akAIh(&Pz#|FO=~C%PH^HL?uv<6UI9)LxqgC-TvLDGQt%CByLoUZp zhzy+nwqeRCciAXH2<|iKOOUCpQiM-qVkxZTj^(5 z%P5sFFQVw8NDW>)E_bG`EmOzR{XV=o_e^52Q&y_;+Xy_5WSwY*JanNF8~hGN zoSqCf+(<7*9*WXEoTegh-p293Jx*xDV2*|ZHYEy%^DGba7Z7P*dUp${(L~#m~ zZvrp~G=Mh~=E-GE|Hs_OM+XC32ppjWigS&au@7Oy$2msaWV%#2R|1dMA7n> z@+73fcky@h2kqPSbM)mwyVueuP=l<-i2GuY3~?BB|OzNUh8ApBillt z0EiGv$a0Sb34X6%m5;A)Vq~U&e0X|raqNL_O zd2w}NW$EpnnUR%&g^7*6xv{mOrKzpn+0oU(#mUY7`SJDP54n zEHvB%^yI0z`StD2XBA-L$yDmK9*1X*=1Y~^ExPkZmFvxyD~CH}z*xW-m53~Dd zv>0X9+xjNw=9ky0e@*WKP8rdQCewM6cV9yFa*8nV!2x}<;Xg&?PXj3f0|2A|-T-C* z4?qwg1P}p;0h9nZ00n>qK<1}c0k{I^{Z#7zz!3j?uQX-brPTQlHeumGdlx$z$T+ zsJEX=WfUKaEpt9KpO!x+is{<~pJe%z5^1alJQtjwM@BUGL#SC8J&ZZNe?fz%&n9q~ z0S>vW$>A|1IegU`ioa$*pn~2%@a{R#8<^%F7TABC5`Z41+lu>NV@$SeKj*3n~m2r#?E&5Ga|l@GV#irTY{#2Ef6f;+x`}h88513NlB^uB0}yB z9Cc8XZ=85p>(#8QKhg20nDm8M=~r@v!G(h-`90T6G)4uB_@q}p0Erln4aIi2G5S@& z`znGo)FCRyu^-e3J`}s$lfxoi|IL{$X=#nq_o9m3>)eeY&i*nIw1$qT$Dv2=^GkqV zNbxlYbGnlX{6Ow~nm57c@HW~z6SCoIMSwH8(GiYb3DV7LedBICtX-*fDxBZhy3DL( zj(qZ6YpQD)Bnub1pHIIxefs@N%=e9xs*1@34)mWJtJ`;8<5@^~2O4)B>3nys+JBE2 z2mti`4S;u9Tl9o>`F+@W^E`PmwN;m%*?H8pY7r+{Cz^B-M)D4u2$G%K>-Vv6J%@X`YxlGJbu$5Wssv=S3QzFO0I!*$Sl>Ppc;>D(qUy}Im4@H(PRs~r zSLJXsGI;Mi4-Mb#XUPfL_(z^MgK|Z}TzjJ78=Lm0PTkdCU-`I!RQOW^PuSWRi@Fpd ziGTn%R~+)0z!C#^@dJbEogU>i`DjV}AsOqY$b?gVIj5H8BHYyrE<=`b>Vx($`r(3~yojRrQ$Ia6G)Iq9E+^f?6ITQbCzuC-M0XBf z)b{wUZ|OT4_o7`~$Qne`cnn;-tUB)~vt20LOnn@?G`2giPb<`@_%mf}ws|w9eU&^+*l9ba~Yi;@!zr?Lg zEHBjPSYC>Y?>v#i@Fpp-9c6|07oeuAKo%~xJYC+Gz!+(qY!Rn48LajaB{{TaI`7zz zQ`eT{=x$2eJ50~wZk;>tlHu9Vyeo``PGr5%isv_1rJt%*yynUHABb#@TD}6h;11H$ z58@b*NSb2M+FUf#AT} zGa!NwZB(q1|JkU(P-#F+6Xw(o54hU_N!yiRYq>y!u-*8LG~yyixDMX7T=4Ue#r)lw zNhN{Mr-AR63Ee4>MV@>S-D!2d@_hQ~xEc1RqOUhTPp`rqzFdpHM^7Gfzy z2+?i`Tl4Ft7n13K#Yc3J@d^77WC0#BI?0bU3;5XJmv+q9Y89S#zq88yc}az6mX*!) z#h(s=Wn1^;R5&FbP>YLyk3ykoDv6)RpECkbdhK94BFn_V*h3Q`EEi7iSbCRZoYqbX z1kb(Qj+cs^x9|4bxwVQqL@(ptj=wICK)Ns7Z1(50sXeZin>p(as!Pfca zdF7U;dXWo_X3J5t7rp<5sP%#K7erP}wmHQF^^$fXd)%P^WZ5H@;xEjo5s)5J;uWKL z5ADA)+0)ZXhjZ^oX}|pvH?pxrO5}-iHb7pW+VGlwSoD zd*Mob0%y2kW~sdf-{s!HcTiOIPh0Nbg5sPIp@9DOuI3Yl34uV{13DyV93=`W(y@3l$>(wb_eGYiEeD(x8|7k5aGs84^3T& z53rqq66Yz33zQ|Uu^><}IE&91l!7o)lrwTrVo_A|;~R`9-Alt1`ls_W+x+khV!;?H zw8;Q37tuWa+bI!*yl5FFcHcX@8k4XVeD3Pvp0oMEO}OX zEs@GFHFq~$JCBR`bwc=~-VN=d80|vINPcYfh`UB^x5x5IpGg9}NVbVxrYS5U!w^D5 zHkCE^%r?J*2sl2`NtkI5;Zh?fK%6IQ<9i{AbOhMEXO ze9BX#&Yp_~iuzIXzj^1;CI-0@Bc=-%hsKJFBy47w?ZdEx4$CBIYC^ zQj-oPCra)mKdjK3fhJnN(w(DQGLQ+(AXpK=h+KsM&)Ei(oSxX;nA3{8@43tjt#w6? z(s&#(Z_;rUzK=}iWJ+?EJ@K|yx@UxLYNSo4FTQKy+LV+pcEID3VY7ca5Uf0J!$V*9 zrL*F>C+No2fr+z<8xca^>}Uch^w74}m1VJp9Sv;3K+z?KU;$AX)%K7}08{ZM239`- z*TGShQqYr&4G|J5%}<6jl9xd_!kqET<;(BTnaD4D;|yd(~M-q-L%{M7g?t(v zx!qy*SU}G!y+7?S)m_dCVy!YNkjSwT^_WI2WCknhp}>+Zqa`znniN4=SP^0YSOW)& zjfAdE;WLPB5fB^=vzaeJe+1XD{=}yrvQKZ*Le2vM>Nfre)`aa@(_Ayb#M}Jptw4g@6sb zoZ95*a(=k^V{T1mer)s$#5?^al9;yRJ2|_V8}a($?Q4q*g3@0@Ian_KJu zFbOD~hLYqW>ut`fi$B_EL5RMNU0IxRGCoy!mB&jwM`!z&-83-i@>gGnX?k_m z&2h2z+rMM^?e+Tw_~p(gI^pfMyG3ozrb2_D@1=h{s$-IuRaa}w@}>+U>n2d$mW zoA)`Hqf$G=ku}1=Bbx!-qBsn-fSVe~KXZV4$k)H~NLrXVnZVZ0@EwbTua3d&SkkO# zaEM2g6@Uy2h%R!%#)4ETqEO(0FgzGStSoy@OaW>b0&dfaHib5<-nkSl{p zv#JP81ZhLL8M}&+C&JU>GdSX?iqG~rDajYkl}eU z)Wl>O%opVR7UBjuIF*7z846H)`7$tZ0YGpFKAa=`+^7QKHA;m^DPlDItZ_o%VW$QO zenkD%UPseo#n)Mq4EHf|9=r?`n~a*{$-bPnu`dXglx+Q-EI6aWn&(DI(ZqSDs-sz) zaa{e(RV6rZXId_0W@4r~SHw;-<+nKXJUefzJFye^IWX>-Ys~j?sE{Xuqp$%B=F2p= zSB~Xq(+~=GPBBmJTSI+K1_IAgt1D(kpugVj53svRV>reFkUXEA>x zb`L~jpg+OCaY3Fb`+3QqIr@16j2fHghu0O=&j39mwrL}Lz@$F*)@->vXA&j6TW zzf6ZL|EX&^E0us3Fx;c06X?+BfDy4i&W|l0Gs#jvApE^p@o; zWzXym8i?~SlV!@)V`U20TihmE@Mk>9lIC2S;lnd3RfogYKi0^5QV_j1w6zS$aBFnd zr@3E>TVuDm;xtaQDr;>u41cYc7U8qB@&dV<^0^vF%8)BeG;_-oOBbnd0O*K@a zdgYzaJ==U{NF}30=KJk{+?p*X;v&{YTvW^#XpVb;NdfaxHRC8cWofGrcM^*3%%0!# zw2LOg3hhjnB^oeKIoD*TwmRTD(pFY%BUbD6@jZLLGvQL$ zElAh|qrysto}+^1Oay}KriXHW^?LNa&6Y5A&}#vJW(It*1EwwTF>%-tAhmk5Oxcu& z3P8f?sI;EhVL2Ou)4W~v=jSBlk1FmZK9&yB@mjtL&6Bm6t!=fIKY|J~^Od=uM~fD_ z-)Af5adR7mlQvTuUC%qmjD@T6tShwY+)g`MmpAX^-){dkd{m~>zJK*}Id-r7J={xL zjrX=2E_$}UG*R#{Z)Vc^qy2f=_1?DLeLsPx#8dt8F`WDJ;k=U{M_g&MF|mvNnQ8x$ zjDWoX4F+GcOw=z&cv4u=UXTunPT2s6try8Cs9hq`2ZK?E5Gu&Q=lNcW2m8a3&9mG6 zx~JE(Gei1SLRt#BOKSxK1+N-IKq-)TD0m*r0eK z9XP74z5Sm!6XAG)rG84ZI>1{QkW0Rdfeiv11bl@pxpF{qoHlH}pZeg0{FDzCG3eOG zAYn{>_f|E9{rA4Z<9hcUvnJ!^6^M1VToj14*6SQ?qIBuyS2owvpFkT(^~Og!-hrg2?JO&twRv$BkLK$J(Q-m| zmBLq5$B5<1^C2>H(nLvA|SUBc{)yI$_tV$>yey^M9LT;bPnB zZh4#)AEInU*k;)skIn65xsE<)8^_cD|3C>WI3SUEt^BOZT14MbF`)c#pZ>%KfnLxc z5RewUvTztc>*w0`{zf~9d264q{Du;jDc(zkUw(NA|qL3~~|%T!7ZibUZ!)QT|1 z=su+J?}4M~WNj3C`lX=To^7H=iHJlt{@PK*xbJGZV5174WDAJuQn_n^Tksy z0%&5tADaVnD5HCl_=m65ovzN^ zb_c_+EH5sS@b;A-uU!r=*NABT{`E%1DBSH!=>towFvIpC;B6wlbN~?GD)U9^aWe6O z)VM{hdu;XqUyYuFEA{?@c(NUbV1c*TNrK&woO2P(PH3Bu7Jq%D!0lNTHrsvVr4Hf) zZQniU0V(ESSIDs7$v>&qr=y*lB2LM}TxjDfQKXD)1^n)i#7v?#t69lW>x@I>;qM;} zQVK8>aiZwOqMpzS(Su|A$h@Mgp@olKnCg)YO`NU+?J1o+s3GUF#jVkVKxp`+kwPe9kBKIFHl zR<3_78n<;8FVm?)a+Xzt;5q^)OD*O7>WQUDMun{_!lK(#vLs;~;XfGmVAT+r)>tK^ z#ZGHnVXezDBg)OZ*5O|V0OB4QPo54whV}M1xU*tETiA+jqOc~@Z;f>J zy7dnX_qvJ3arSaG*+mKd;=7jlTX@{gbZ%=Gq!p8}-T~S|57K}(OSBCa#D@BY;=j6)i=X?GY!^@wJpLthE@g{rIs5I!z8lGjI!! zv*sDIEAd*7dzc2{Vn-tS7s1$7G~;!WyfFjh?|5y#p8nr|iW&iWV%E+jUUFIg7KgoV zsSa$xJAI-45ZoNdH?Wo%NTV|nqJB%dn(e(anl!N?Q!s0EfwVOAQ5>0Xn9GsFdUkG6 zVf`ZwcXeGg>q(2TKpZUte?-V+o1hHJYz8!ny2Y-dvqanxe2U}J<4pPirSN7)o!)3m ziKd?}LMI9*I(M%)T@3!IJ|CSWZF)+A>lg#e1i`Zlk9Y%V-snoc6~ubEDhC`l8sLg9 z5E-Uo^-c;$1oy}77ifUep{^PN0m{d&;)Mx@aLk}25VH3wuRiK9nXb#R>sTRvv1S{y z(FL;>hml7apo4qd8-PcUBlbpmv5WhHiLHG=cz3-I7wV&Gu;-S#o7$f|M|VEec&9ns z;=n_k(Vk~4FgUAAaQ)N%nb7V?H19)Oold63=%U!po%I$AIzaA+2FGpm%`NJqaaUj z%_%{YD+Bu1u+Faib{m`R;B^e$i2Nyg0Sus=}5xu%R4p7cVQ@M!W59q1B1x z#sSY3b*m4BT6Nf-PlY@mlRaK~Wr@^A`36DL51bxF{&-#A&%-y2s>Q5gewuT5i{jwz z2ApH@pvA29pp~KfyYlg?am<_?QRar@U^p!oNbJ5eEkQIzyRBoNIAcg%+FuN$M+dGdM% zLZ8C5T{0K~uMmRRw+cfN;Dl`lS2139!t{YhIjfMYzDV<{;mqm$TG z?P*+)r`y?BxUM?Bhj93a`{KD}+jlfy8_M2VD#~h?4z<5NO$NXRsygsnU-du0Fh3R& z;*9W4NX=lSfMptBPbOUEXH!C-`ipJIFn*2`GeUCR=ha30>9?btAe4EuR5+2E2C=UG!jpQlj_ON%?0o43LuPdxh29ldpVE z)IHJywV(li7#Y^vTtComDcIJD9`7T)S=wwNjCbEw^pQ|1>iUOL21}vf`xpM4eVF~a zN#|C1>n&h)v_A82XswF|Pu$k)I?ZSN<%Jwxa#XmgA5h{{Y7_wu$cOWi{${>}iQ)nT zqwVa_LyQK?^JXN5Cz4MSzKKXP)fmGzHk~w+zsnd)g5l6*dHaHj;=>2yvibg7Q`1Vy-u4u1 zpQh_Gh%KBw-GuDhsHps!+dhn^U|XcFH+)I2K?kL7AiyVlO47bS(HkU#3?9mw(!Sh~ zLzf?$sgBBEmp@bW46BupauZYW7)3uX=}tE`O$AYM=nkxZGjleWCRU1^C*8w6pt@Cm z&7a5nsJ*@$i*mtw3g<6_ncKHH9vPm(i&3J&bLYp2J2k9D_i>-rVy{-~?6ostzU(;r}ejvT_| zbbrrQiLqJ-80Hw_F>cM|HPO5G26`c>v386MAo(x&pFl7Cp|%lK(^2ZzI5bAKj&^Wb zx;*zd%C9=UZq<>7xq0rPJ)!I_UPa-4i~gu)nJmu~2}I`k(2)aimJ~e!f&Ciw;Xv!@ zmZ+z#{J=lyw2BAOzq1lBkR4lcWK?GrzK24%!DeLtqXdD}icb=;`tyMtZ4q(HnO}d@WG|2Nj#@7@j=U5@dMqcwW3> zs?T;{R%B=;F9TuDBsMfYq2q783=F0y>6MY&^{VTP-7JR3Uzi99@|hnUZuGY+U2{!! z(OIUK-Wk^l_PlU{!#qGOn}S`!xPPSM2e?8s@oM5=Yu`h1tA!i5^g#QU=14Cg$K4}5 zJ6qv5^!t6VhLrH&#oZ>bM*;!oUn0{ufo?AhK#>5bV>lOzhH(hMBks$Je)4nQE=!F- zH~cgNl%TLUs&zewePr`xi~Y3bGaczp7FSJ=rOuZ)x9pxTX>Tb-fHHhY9X(zaP84pq z@i!2Yx}ahjy^`%d^*#9FiKZyR-y&P`@DtO#QuL=8?LIMUxnJzPwMFjd zgUD&dcqjevZRenOkjdl5fo4Ee9aQpVeF@-^6$g->cbgbO+h5fm=A%)$Y5` zm%*mu*>~^nq*`B+x~mz?&Z?<;3Yn!0(rD7OEC<%aQeeA1P3 zGw={YrgpgL$#8z##(i}`V7st&P1eYW! zAWU+0K0BGNmi2qcL>W`T5efKWZcxUkKA794;LFpD_JCa8?==!n9ZKbIqW_DLu~RNd z^vdxmG$YL&k;=Hv&Ourx$^eJ0)#`H{pJ8U&rc?qznlv^>QDRd&{r;e*D>}iF9zc*}MqGqTl}! zngSj$c8Yygu3OX2j5$l9iIRWNjv)qplew|1rVXkpMV}F9?gTV=RH^x_UnqEH7Q?^B zUa7F&DXOD+g>wzFqGnmCR}IzY11t!_Wnlx5j*3k&Zh%~nmF^V+M(0*7<)X-?Gl$NYewlo)t-pB_u!}f0o_AwNq5-3*#U5hV8Ra*WFCMkm}Qhk>7ioaJcCUI_G_! z3f~H1(@EBvK}0yA3{Faf=G;62iH41n#7Sz%gC6C@tcB9_IQppKbBGb49@Z+Th{y!! zhhyvZ0o&*my&_(a_wk}QvcI4APj1ni90XGxH>V^6;13slPY>W65l8 zow=~omczB6aBS$qA?&R%<2{G;o zt>T+UjD?`(fs}-3cxtW>%D5^p1@0_vhwn)=fL#fmpBj$Yb9St0bu-X8A&cmz1GCIA z9st`%<=X^3raP<6{vJm;NUUsW5wbFi;&T1D+Xna&c`B)OWAIM`Um;EXmB zWcBq+QApQ9p$>qaqL(Kj4FetR=Mx7L%ZE`YC}QZzB3`dguBW6CnY>1SjgA64!A!Ni z`1&3U=it=DGyj%(Ju7B4-(w}n*3-GgLwg+y-HxlIOP?+K;^sq4ttHk?qGgsPu5Qwy z-kP82o%j70@A7&e?V2$RmlJ2k8LKe_=!An)5&m$RRKaa|`;9R`Ec?s7NsW?$X@lI} zjp{ehJ0m5=Zh~l%(#K8&JCIm0=IMj;H;3a?PLp$cVzgSKbRs+n$i(&S*O&84-Ir8X z(YK@zqKD&^zK^F#Mtl~>RBH7go25h9-4Lf>YI%ofu~wBwg@=Ijta1Xg1J?X>H05T6 z(VHQ0^6si{tLz^`cepEZBb1T@m~on-55dNk1;~tPnt^)46{E5sE7+FuefuH_s}4?f z#nnp7{?Ej zk$y)?Rk#|q%?PA`*-gcmgkbU5__gcr5Lb~;DK`zhZ<`5X%J6TfeX=lwQQ@n+V9-4g zJW-ttF9jBk#g7;d@-D0*maarO%B1894KE>z{$9e5M@>rOKe-gD!77rIA5sO9xK;2I zX$Uz(tuKJP7u2_+imI9ey^s^a@hn!(_pQA zS^K2b@SJwk*Q;fIo^d{_r_n@Rzn^(%q^y*$GoDH*!N$W&UB_LtZ4-GfoV^{ZPN%h) z&N91+Y%Opd^5wuvnrdd~$dXB#MF^jF6E%{De!Ue?u8Ye&98MIoYV;}(%a-+V>4|2g zIyQMhloHbus2W&172z*2Xibv!fwLAUGnmyLMTs@v+2=>$VQ!%%hzN=H^S9Bs3$xRe zwtb{Z2}!ZRCp<&s2%(COt}|me>cN8Yw%dm44nzMA`xLJ3dVZt7r8mD`JiXH7 zAo;4crlhP}HUkA$ z#9uU+VnPqMH>>>%QuF|c84f4Aw7#2aKVk%}4q5K~4+Yz>RkQ&QRcS+k#d>WbBQDL) z?Qe&JnY&d^TSyf^K+J32H%`H>s1DyWyuZr4!H4bO37MbU;>ZYQWlD0Pol<&GFcQIV9{Qc!~mKaVs z{;wQCvP~V+d`SxRa6-gIh6RkBNpTW1s{L&Kh4yd`DxP z45jO3-S7=DOUfy%_Svps^i;WWI}I#uFUHxZUbNEF$&m1e6r5LaaR}GjO)M#wTkpQ1 zqO0^djH;6Jl+Hadq^4>#i$5}14SIhmi5@OYG0U9FLJrHFX>Vr^0Rp9P_}F=XrN_fV zc-IHHtGF?Hv9@jH_0i*0y7SSxhy3R=*LI_OKl9bQcQ3FfkLw_gDc0eYcYfz3|5bH% zVjSc2)~6CZ-kH2Thw;|o_RPB|^{N;W3R$yxehU0BVdMrGiCc^_Z;P5Mv%Wbud3$}M zo8$$b{W_w5r=giIp^^2bO0wMVu;Ifw;91+1vx%w6TV=X`8``;0#(WpM;^X`)X~zR| z5R7sailB)Rn&>?)y}Wd4+$rPKsG3?ecytS+6FN%LLt)6Uj~q!*v`{>Z+>Z-W)<-Nf z){0+7dF`Nvol+TZa`MNl(CHfQRcY=8^Ub$sb!(O9N}DTACVS=A(ivI@@^J54*6sUw zZ2F`cPJ_)B>fId8`WM(d`StFnm|nG3ge(8}>|jL`=<^t!=oh14P8iD^7h6dWfw9i} z`rFLHB`eEmMgqZH>7i5zpKSa#1DCSEMz+F)a;AZ@*EDZTQ&;rs-0pAXGyff>B%VdM zs)7_R`-nec`dLi-qq9c1&Tk~T@0+V7YdiLqtvJH!vkb9Ls5CwJo$GB!3=d#ct27u* z<_(%V4Aq+NMN;Z7H&SaYz-^;*9JQ@DqS1T~Jq&!PTcvjE%#$n>mBXrkrF3&MAw0CB&S*Nkdl+H}7h@)TI@I#5t`}a=&k%p-)92RraUDbMMqtOW zDMuI%gp9J~;irP&P-*Dq$UhqBU^qYb)SXsO$#CbZGHPYQHQ4jBfw5EfoK-@{FRqp3 z%_5+z$WwS@ptu%ox3wk6Z^VPtzi`~fnsD8izjE1R88M_w5y;U|DTBzb_0|0<^d^lr zrRMdZF2f*OQ^6#H2`;ozF-un8oq-@?6GpMd+j?dwPV=>GU$9ZgGI*CuvwK*D|NfZM zPMut_N?wZsKKCL&#J#+2v*+_V-1+0tNs4OildOLun`UsG%4rIP-LUR%)YQ!O@0SbP z3P`4pwJwMQRhCcuj;s_X(6;}dIX!!Kdpj&W{k?b+Y18yo?gpUDtap8-5vm5Cmy&^$ zHif{b#d#7eZKR4UHv$IPQZi%Jm_dt0e8+%_xiN}Ika#lHg`^;b6RuMo*&CrS(51IB z@V&;MQZ;oLbKx5iw1(SU_oW62H@o>GYcwm3dYj8a`|28~#@p5W;nz-QjqansO8AY0 z=F=v@_WCd=%&{Ucjy}e+eXs6H&x^(74V15nGDF+v$X?mZ`tue+<%_Xr?Txytq|td2 zYLoJAI+=ofi>ST4G+E>^G|zR`#~>_cCyDNxu<(vV-wzLADJ3bVu3@|qE6Byk5YWD= zaBFDE($X=?@uuYAB1X2t^^whTXCKY8AxL%>K41BWNoW;1>yR2}x5%`nr-DZcF}Ea! z`8JG{U$MBYG~)7;&q=I`N-T-&@ff;ZUc;k7Fhyp~doPy)$TsIR)x zLarw~Ku=2Q%i^}YKM4~X|C66e@hci!Eo7ijtai}A?(h8vF_B90LPnwgqD5gwf0W`` zp-zu&p)XQq|CWtYR$YGZ65iI6px*1<0NOen<*?fxgy~m)^z4T}L?S${yTYr^r1W+7 z<6Os;`O?*9{Z|lM6I(M^kEKi6(ld?sS-9o)SDv#gEso?dW$=%IXTlK=uy-^Uiw(r( zTe9u^9{gp;XY4PWNDA#`TXwj2z&p7KGLdMdCyWjf^moYSuYdRRJAq4?FY?15iRPc{ z)}QX?>LXKNPEsuvhUUiiDU!X~A3hvj{YJG`vncnN$>iFf(<1s&bBKE982$%;#JBCm zw|0S@MYck-PzaU2Q@du3D7#9`)?kwxB=FQfbM?vh8d3)w9Y0WGT2_7L-OdWF%tW|Z z=Z`PPgxW^A!@EqCyZEl_=Ksi_hAAClNV&EpYH|5nnJL2Ajrl4W!BZ1f-#pt>hGZoW zw;RT3PP50WwU^0v{{B}s%7#ya^b(|+(XDw}WdFUorN4=wRYnYZz!zu2Yehl-M2t~N zLKO|XRfZ85v-r~6Um8}hLctPI`C%3!Ru(Fv%TZo3b3|M~zl!d{I+nK2DcEMVBrQ+l zXH0a+#=C^nD#R>bD4VCD3cDl@vQh>Yr|iXz4Ur~w?gMoL9%~G)72-ub-s$Xd?o5+r zXRsA7ZKqD7>E!Xb%%Jhju@QQn+(ZTN00!kW6CyFO`F7kn8k zApK+~MP(^!B|LG>MKpu#XLUD`%BziMuYIdpgZH;yk3!d;9Mq8}Fy>*P<_W2`GOFkV zMZtWfd@@%?GpjnA<@R=~@x1GO)6x7{O6_(6>P6eQ>&q9SO8c<~q@1Sfb|}1kxk}M+ zgb-;T@1VI)t!|Xak-5T54BA(;NQ8fQ491Z~o-B8n*t|Gp*oY(4C`1ueC?SN|s2sx1h9thBVQ~ zHX_RfuHST2YImSQ4a(=s*PNJcKQb(J{>}F`l_gGX~-jsgF3G2I> zUqxL+djkG7R$2OCw0|LD|8MtUHFoZ3=cG~jXi3O#e~bo_nkuE@P7~tzm>sCfF2aTb zp$gkSZ6Yx&uTGHnLPsMo^7pSgC`le6`I$^0JjC?Mkcs3nt1v+cng17HD4*BCk0lq_ z2cK}cwQy#Jw(8Vc2D`6291B);uf6uNhH<-h&!2hCWxm=?EvvR(yW)mhe{j>-bzJAf z$n5EpW?NfIE}78SGNUO_vv~H@%l-Vdo!|IY0j`A>>U`XTto|xB3nOcUSr{1FqJ=bT zjdDb-`$AmoR|5QE{t1$c{VF2HC2^1r*7B%PQd^WIDe{;xnX4vqav1?*MF(ar@{=u5 zGB!7A^`sa~XyUMt-#ChA!?L-Psx5-8Os6I3=LpIlrK8{FEoEv(KmyC{SSV_bHDg%19!k=d!6+q)2-wK&d2H)!7#@j z(|lb>d(sN^hYfSE46}?n)pkvKBCS7@>IOdM){sh&*Ud~in1Radnsji4CF@BRXK=D2 zAFTk>j4esVxMUY)LB!mNT0W--ho6xTk`qSf3{>~ttY@y;^~86L>uzb^)YobMjh+7-o=IKA$=q0tWF~7SxhWJm;fNul$@i0J_v_JkP*~e=n~lXy)=s1 z{?sm!qdHlKaSr`ao=Cb{DieLG>=nl@7J{7 zcT#o=m^qNGf?6X)Ps(Pvs98;LA{wy9&2GkEB$J6wD~blapr+=SZRW89_oR|@#X{?w z_w9fE#QytkefXA}9(ni-d4&5NUGVmAlmow`TlY#&@7&w-j0#UT*a0(=c0i2Q(-;^4 zW!iH$-KQ+K(>IkG&Pg{b8|?Ha%7jgH0Y~SxDjR1g8|TyM9Bpk?c28CA#PjLMF6Ax# zL55dcp%*KUkbxv?RCWm%fdz?xSavx;%(`8y@yP;*237@buooW<<7l!WdyzAAFR;|q zp=Q|7h0BUyPWZexM|N-^mdqA67K?#Xgb`=AOU-ELMc5FpDzDfEVnn}89kt*N)Z?fH zwxzkcu6d`Q;OgjiXKm7LX;rSdfVL*L`Kzn-AARL`g|iKO)qLz&Gu3+XcPQ`ZuK~R9 zfR}0_6SXA0I?RSQxmiOivH^xnbZp>;PLE5k!CGY0IyksVAHmQXb|v_%eF>ifzvinl zg;?!p`ug>DNeAmU;`&Xvej}`3XVTU$fW<=1xdoNgXtDT2#$1)vnEoTI8N0`vlk3Ac zlc@mSG_dg{ID(F6IH4n`q8$t=a}ht>Ptw&=Vd$1=s#+?Y6}j3^U(gTqeS}ETzpbHc zL%NP?Dc5vVWBH8g%L*r}bk)J=3UyQ!_)23ywkI02$Xh+o)u>3Kz5(%39|dA+sK>*G zYB;O_G>@dydim0ZEG&sYy%s-9uVJPV3_mFrkt56!WaAM)XgU(1>INjAh$vOy&4~Z&1|;*m>^9ic-DElHi90 zi``qm=4lbRDi^CXU1}#8HU_D|%osz9jZ^(iHw(k3EI?2y*m8d9v@Oy7mn|IE8hPlZ zmJ6^Iy*vE5HoAb;v!3(nvDkBd*^<_UmARv4$FF|SbpbY{^RE@xuvj_rChK<=-UAtE zA-1zSp0fr!p0mShvYcYl&z9ct%%0M_p69NiSQ!}jMP*EDCkrZK}c zGjv%8Y4og4>l$X!r1x9D$c&rxYi1M2+1IoWUNe~(q;{5<9=xVjFmItl)_UkqOIZ2* zxb)@fS0`)f)kP<-zko&gib>n>>lY=jkMc0v)CWeEY{sy$Ka5chfTjqbDUQq8*pO!| zRNG^%%*aaNBtzn6=)P<}JB?FPU1S*o`L+|`czDd56XCJpFCl*1S2L;|q9&mh~ofYB$$KyOGInSt8 zEcwJU~|V;e69Ip!in z5Wr4G3hy55AsEthZnJ0MKH1<kQ+2~*kI{fj}FS@ut z^7dpe`anP_{2evX!`PJmdHcX;?R34eg}%ytqV|gqe9UGARFkW;c}!SQRE5q)vS}QH z*LjHdn)HV8QXFysc(28fHBd~8NpErH<`w%ZtJFTrpc?CCbu#BSV?fBJLyhNPtbBx- z1Gm4n_Nraq|E_7>Ei*TD8V)GRI~m6hTxwb~)ZF$Tu+htsy_j1-{$O78!mpbKVE?*l z0AjLcAS*s>Dgbg#+GK!%aW`OdO>#Z}is8ZD&?G$5{wLrW1_wHrX>ce^GC!XJL=})j z;BbCD9Imf}!+jPU)TwqkrQ>9?CbEY?qqaV2^EtMFti$+?pImotsIYh18STwg#hopZ zYz!V5O6=>-0ud;x0#RgrJiP^~Kn$F{f{fBHeLY))DENYORmfKq3^Ku&+;9Kcv3Ksw zdSpbvIp&-yNJ5unK9+sboy7SCTHBm#&@l<8HM3#*X$$D~7FQQi(pTG)a|k$`VZhaE z=L`;+Vqi?wY2XQYR?%aK$F=5toIm(PSGz_Xr-?=w49+6}=SP}t64d6#nbL=e=eD>RrF(U3frL# z(OyyAR4$?gJ%(mOSWwK$>PUNCso_otLt06HR7?-ug@R2L6-~htT~n)kOh;9zJLIdW z`BFi^k{ecwIQ4^mI>h20`D7_cSW-PgNQGXYLP{*WH6-P!^EPuLtkllJ+H4RuZzN&0 zAo*GIz#Fhw@z6qm00C&HRtr{`8rooRs^gE6Lr*HH=uC}0L0a@ngR@VRx4H(WpU^GJ zdTw4D*PR@K!p$4_G&u)l;PW;nZ*b-T*gq!X!yGx&v_fj?0hZ*5VD%J)eGLVu(6J{k%qiQgXHY6)t z(nl!_wTw_su(=9ApEy^c1f^Go$qIo?lNCy}$qGICY@eU4H@{}e z0_S1&@c(t#W5DR=g8eWv-M%c%sIq*hZSoR}t)Mr{%L)xmUhOSC3$MA-4H1I^7D3^x6Zk-vJ zozbWG8VnpA%1Yp0sCZb$@K6DmxG57RB4MdK+FPwkWq)KCF2FMHEmZ|{sU1se zdc*3g@K8LJV!K2UW`YxXZ#KpFs;0$wx3&3x(4mneV!A>wZ4OWC8AOae6I$$P)(;NNXk zp6);0N@q|4FImqX&cpyYA+oB-3{x>YnWkcRwW%11Y=Z_8$Rj`UX z|128#Zn7hRm^6 zpR;Fgpv^ouf5W52yauOm@GDX_bZVYb7SP=VX?A(9#mYPw9MZtWStuqKAZ&u?+OePn zjm5qOjNwN`(b1b-P*OFD)l`a-8}V5pI-bhT^%d9RY)jAx7=5ygN;aO9qv*oGKJRuF z>4(n6t`$Zn$N0Lnj&wSD?uzD}4_7ueR#s2#oHMohiLLGT#`ABz<+64DhAGphH25pV z#w%`Ee)W3Rt-fXc&N)?&Zi>vGJie)*sx{UyZpz|un|Fsw+dHpZRNG!Zy0X5ZV?jsb z*qJp;H}1LR^zmd*JDZCEvfIz@fpHZ{_|SsRiK6&MLw|5GhAP3!t*m0BdT=rZ3h{EB zT$W`mL8VtMK4r4|0;Oe$8<#`QV@*ebMiP&4v;3{T7;BrA_l5YiE+V^oY4 zTIXtWF=8kqmEWFYI!1&x^V4uThTf^{>pz}0B4Y>+LPK#-mKg^nfP)e?4>cVJC8_Bk z6$}H_HDMJ4iM)bZb#_A8APy?e4+n$OK`@DNE*zw0gFMdo_FOnfO$gzBk+ORSaZpL3 z#UPFz{azxG&&e19%ghVdW|A7qZ zty2|oT{<`RjB`~OL%j{^M-AcLacU3+G2c)JV%+X4Ga<6aiAmw~Pcddjy?$P-Y95|h zI4`c{uRkmJmc;A*46jkrL>?Vcib>J1925FL2g+fLhBv3<6tjx;pa|=SAhZq04c*kT2`jLK2IPfdo8V-@%&wJWUBjyeMeabe$mG=i=7=tH6)9S_fy zM6JuI6MHP>0xK%W0VmuRU^=BiOo;i#>iDUIr?8003vWJFuoM;r@Ebf=dKP z=4#N8U0N1v(5ierTCe_)p*`A|?c^<&>UCwwAwE;7jrw%vf8Tz0@P`kt+Utq;T)X4P zE_JAnV(ZJHa+>Pp#!BRpPjSGrWT|nHlqo2t&7d+UNwA5lMm-~t5cfN=5(9YIo)RF zfG~R=XM&oMPTUxse2TX^JYIHB7QvBmTob$Z*2v?U(ApYtWK$=zuzot?;3nNJWuIbK zhc}U-Yb=B{Uj22Gs7gkdL>15`QQ_)}SX>Z$wqSL|Y;XS?)2Psn8gU}kUsNA=q`6e| zvXs4)zP_@rUtjqM>kFo@uefAzeg6OM`jUx?5!Yx-I?m@_C3By3+BzLZ{s8yr3~z5deW^b@14lCg;?`>OKF> zEzhW-M?{b_nf3y$%+g*XVv4hzodzS^nI-V}F{_sy%!3K^yz*c%@m}OKTPMyfJpH~l zq6z%%{qW!$JSX$FIhx9YX`NlER2Y`vsS{$*so&q($(#aqx@280cLs+{oMQ7G2lHTN zIzk>yFAU3r#l&{a&VICWaK_^x%>0>SaQZ9Por%kj?|$x+ zu9x;nKQ3!}xc$(J^vO3r??^l|eR|V1<8!7i9yg=zVP*d#_xyFo=~oWC^&kJT$@5rLYv4^HfHu^_OqFMV`RZ?4bgLIr0^-5TiZDLxoe6Nx&-!y@(yt^yxM zh$XWcRv$9ghYc%|9lO#IPr(i;1;D#0jkaD`UusIXF^E8n+GQe8?O@-OWV6G(JOoQ2j>`-nT z1im-lFM5C%@{t#+GOZ&tWF5Y<)*-_g9I^|iYLts|ZfSfZYRN$MnO51Dk^VET`TIjy zK&P$S!PZ>}E8mi~az9FbPqa6vO8)#v+Wrk$w+rNbuBr%fReNDD|CWxET&#aJaB;j1 zAs_^K-y!p;sLqFVGFt^1(6M@Wo-HF|O~_yi@MIC$!9U2?upX&=62ti!cn{TlT`1Wv zl^fPNWf>9cPT~#4%<8?SJnASTwG&LA2BnyG%(|qMyyV@XlF#(Wj2Z81-632>@}^Ll zj9SBgsH-P#QbMY5N-P_2TbAUfV%(1qlgZj7(hBX4zL>Z3Xac!Hd|KdbqSL54oECM? zOd04f$sU)A#ixuGC&{g1&Zs&V+Bur2$vYfYLewD=wG=aVP1MogW_zNkC)kCTS1(x9 zv8L;C<#RY%*m3=w%R8q}%$w7@rfu;)1{V) zeC^OCl+Njcn|v&nch>YK-B;?2>VFnSoF>(i?Qpu8k`yJ^`M`*hdB||7TxKes4JChv zW;dq6nTz*-$OvcN2yo^P!DTPtlHC~_b-Md}z)cG0bdZyF=twZDnLu8H+o9zF*AIo< zhr-Df?JZMx ztFZ7m2P;F^t#{4`` z*I>4xk8j{r^9xOQk)=@Gs=APbNW#Jz(<~-aNUaDoV8c#XLI+d3OZe)e1-|nBJyr%5zrp?s*7OM{A)&Q|}Cdhpd0n@7^48xbZE!)I zgBl{EF~d-#MZyDSoPVm$nKVV^N-X_}^j2pV;X0~$wFnB#5S(jPY?sTiMG(h8Fd7VN znSKn@mKWn_AwI4)PUf>xpM=TDZoHW#CTwmGi@s$C@D4F(M-u`mRUg0Neu~%lN+;Q_wccuj| z?!0;9o|MsYsTOzTwHktD#0b<~g!jowuOh(hwMTnBDw$g%?8Z)}lf5>Tu5E+2wh~wc zsAjyeJ3XD_ULTn&o(3xr-bxlk4AvhH79;5L&#Hv@U%F=nS*WYz-UM6TjraBDSdY1} zPaA~xzcT@CxM!I@$AJu)((uoGHjy6VYuIPrvt~mKP9T^laPJA)kJxX7kI&;b!b67C zZ^Ha_G@!2m3%HQkzr2o^Q*+x`4TzX*Z^nUIIP{NELU`r;;Or^&W9kBRlllJk$HoOx zH>YLi_3~?|Szo7~H#>~;;F9N6@0Oc9@30%-rrq|Izjw&}ZdJUn`|y(O|IB%Z-A7lI ztAGb?55t2`ounrfMm1!ljb#RWNCve}*(<8m$#w%y5!RWlsnPC3@KkmVywO!F=a(np z8ivI2!%>R&Am-O_>lscNjM-ty7u65|HW7m{bthsL1uM{C#M^@O*gdStH3M$SM#dGL zkp%Xv{Rj-hBD+U2x1R0I{}cFz?dx#(C}|}f{5*a!!Mly&-I?`lqJZ|KwW9Z$ox})l zs!!T&qaCTrP{E*1GbnGJ({wykeXOZ-cCqhxb?A7ZkN&9pwWis#n_jD~IaciJoKxgS z+1-hDE!j-WVBeLHPHlFsu{avVn^uF!mbtu0guScX&swX&PK&l$)W(qlwQ&R_S#O~# z!%K(X)H+>OrynP8aZTeX7j$fedQ_3T;dQa*hZd*#qaagAoGWAf^8VzcvR;7=LecJ2 zBnv30b#e7B(`=m1MEYw{Sw`;`>9c8*b0}!8IM+P%TyPTYbc5%jc&=n?{yciOvP`7E zR%+Ax@U?qRoq3<{)%(F;0DG@gyWa;YmVzkW913hc$-wTTY&4;5frej}S&eCeD<5R} zUD;U}VHsZ5co0V40MsE?3;W};Xk<1l%1xX!7z+fW$@$R5yx-s#+%B)*>v#B*#zDRC zZN1@k;qKB8Uwmk_0v=>8%G=WuFYD1@TF6JdZhL8oW|DHKmMcX zf@Wn^>*aa*OIno`@#$kf|A5IOjuX1redD~C(Pm9ZCR;Qr69MlWmz*~wm_ohAY`fE2 z>W#3VpuleXo%wEZNJ~aFBa8kKNU`2*8|ebvYXb$E`})7f8tRR9$#m2PUy?jWpFNt_ zf0a5G&Tf~)qk1XZF4=H8u;{_*z$P0$tG)JiR!wbHt1Pvf)?qhRRMp`W^>~=uru<K^o1s+q!o-OKler`Crwp`m&kS&2PoPaPm$19e@lWzk0!O7f zjc984J{k_l$?B7)>*_?mh(GC{g?#8^_X)sz)?n|$cJ013zGxx`F(9Yn9JYFQ zG86-qR~Vl+qgp9IGi(H28$uZz|>{x?wP{C7uazs zIqi~wvkurqc<}y$Y<1=8>dNhy(3Y|ztlS1fM~S+-MX+3S2Lt8k+G5|4uLKtEbQiFN z+sa_!6>;#Om<26cOe`0QL*msc_mb=dH$kOBs3sDtZET^cWC5J{3Ja)d#E8!o9_TBznqy>F5DmNo8>m z?hHLxj|<4Da3qZGVEpUQV@TpAja)L|hmj7TN7tPhLo<@tNs-e30jo!@MF0Q*0RR91 z0{}wOh->g)4?Oh%9|!;d0002d`_{w&0002j(Te~7X8rI6@CM%i000O80ssI20001Z z+GAj3U|@gw_XLpC|DX0hhwTFcPy_|M0RXBN2cQ6W+HI51YZE~f$GP_42>!TvQ+f~& zMS3X3)(Rpb(yrgxBoIm$K7421yf-`hzO!l%li;J);6K)<9>1}n=Dj1mh^PjN_8(ld z6=tlwcceO~s*bm<+K9*wZdl_OQgKXEGgjSeFn^N#B)3R~O!Rb)WJrn3`uVKoc#+h} z6ZHxur;UPs1XoX?sqb^Vji!7j&pH2wf_(u^tA#b0LP)!eucPU3fBOuY_5pUf=V4 z_XJ5x+zN5NqE8`j`Yo=d8L!D4Qmmh~U!q2;B+cGu{Jx)nvw-u)3*UzUtd@7rctL$l zHtn;B^nExE&yn@jcrggQFrMiV_Sf|N1nhUzTGxj`=s}TQtWY<|M?K7V--qL{FGhVY z2B8;xXgN;km}k$E(8|!#ja@8y_2=9p4nb8~j%M zlLVLqj0ExoCJBlNx(SvEE)hH@#3ZCA6d_b3G(}iII7@hi2!}|H$TrbMqL0Ls#5ak* zl3iN9n!m`zscyxERxwHYb09&guCQYy);Ub_*~tJal?=uIakz7U<5>J*6k5*P!=G-$Q?e0gu51Lj^-8!#zd} zMs7xvjG2sWjN42UOct4{n65F4FlRB}Vv%8S%hJbkkClPdDXTBma@Ky<3vAqMPT0!W z7TKP$^RT;MFJ(W){+WZ7!!$=B$1uljPFhY~&MM9w&YPUSV23VDE&?ucE*369E_E)e zT&}rlxK_BHaAR^y0>Tcr4Q?CU<=mIKAM;mZk1poj5 z000620RRF3761SN00C_P0001Z+O1T}Zc@BT)xL8J$tZ1SnXll!38YF&bis zSUaYG^hU}>E>`PjICA9NKQIoBF+RS)U+^1@ah=WP84zO3&An%xv)}9NeF4Os7=my@ zYk+eRQ0ySd-2ev=b8adQ;@EkhID~@pNpTnn=a=FLQh~VQQ+OG;rT8><10NKxArbhk zIO@C#4r84`KJ;4gS-cH>Rvbe<+)#WDCj3V6c|^nS6<Mjtm_+M zk*wnH#5V@`M-Yif8Mon~ixc$VqJ=~BVepRcRb=_(krtH07E(N|P{TzVm*I0)qHTv3 zJ~G_hBd&|qI^X-0`P6=n8h5+=_Rv6+ItDxR(B?Xh>C3LkwaN9E--h_C(EfnBuK2R2 zhWitK?~2D7?VETq%EMp}Rr)vBl&sSz;5xRsZnF;i7`%`tL(6#{GOWSF5o@#g4D+i} zuS02>=M_pk`YDU_A+=h}vMt^wtDdZReTum;Zz*LhgQD=cT1+b~PTbL|c4j8q$HKNx znJ4$M$vm>c+cJ;oiA<(-jog)yTki|MmVqOQ_mpQIxnA5ysZk$gmiE!#I&6K#7>j+( zn~NlEuFRc{VOK7X`9G)Qkwo*SIIq*EPaE6AX4KcV>s_vgL=EZpnskAc*y-xX+Y$>` zI%E)UAgxPgzRq7?I;unu_WXc5Pj1%sNJDb5UVWKd+glC3HL0ele~KN-TAHUJe_7#Y z$fd2gPwtD@;IrDaD(~jf4DTCL|B2FwjXRp_o^m>vka3+h$Chwld$*}pHg0cj?a`T< z@^dTlXKLzdxA_0?|5-GDi>H5IG9GtCPNscdZ?(#`D>+{i*%ounv5q2j3Zq&IGu_jb z|4B`5+n?aKif24=8Q!c8$Hlu|}t`cY1Q1~8C8c<@p|B~=&JK4_`Zu3Fx9OMMQ_{ndMaEb@q<1}-b$8KtPL@oDu$TJ@Egr|I>j+Z>= z1!wT_gFU?A6|eF0g>P(SJ^>c6ka`+8L?er6B1khK7PEw+Ch3wPnUckME^wJE+~FdZxXWfvN;WsS zB{`DI4#|^zDUd=bvij;w&&R?djrP3uNrh)v3$JMMrs*`3QiVB3MBn40`m%^KW~o-xXH4eUiytGCzHsTL mLi$e8gJK9S34~=42nvFy7=n=y0X7081BWmRgIWLtAO((e z2Ze78f*3z{WnyLq8+X(faOVp!ZU?N%fBO@U3&zsBDc2LQ_v}kf9U>jkv2YdrP4rS8-rFG_dKRFhpSpVaYKad!Wlz zcC%&bl4i{{=%q|<6pk{(5$5ebIO7dUj*pTLMM6PrV{q^ukR?6H; z86(1wJV%%*e(}4aFcs$41KrTY{d0+h<&ZDvw)e4uR{m46SW6_5_ko=0TDjGl90W1N;&6SAm@p|SV+K=?j z%ND)iLWNNBon4l=DArZEt!^bHlvw@y&8AqLQFCy;y&f zu6jVxXscRv0RLZm)A}pr-=m3H%GQiiJ0tT=oFzc-dHqJJGBRC27I4SK2|oN2uX?Gp zxumUZd_3KkP4wFg2LuBgKJ|YM#m~E?ZF;~10*gYp3Fk2YeEp@Ww1k9!;_xHDgnWGX za1=UGP={4`{9!97a>sh8>nZ5uj?3wPN|r54vL)}z_AC9j_V(ZEnl2sD2511u@UQ@! z02FGP#xmxCX@Gy(mId%Xq}S6AGGI6ncC%UOi6>QeMc1oq@c~gy%Tc(yRoGQJ>2nx6 zqje_8fZ)+5Gh#s6p|}X8iN;Ms!@wvVCnvp8`#$06=#M$|rNmC#_w1w@-agR@m?S+X zTIVh}ggCk+>1}ZP1UN=R8;|I4|M0<|nxtccSE;&0=iQIJ`%L8w?f-A8z3saLKzvXn zxlwYG{y%icI`-+QqJo#Cq|Kpnh_Ur??LM}tC{t7UxTu{wSMF;UmH%A(v;|U~6Rt*eJn;A` zeH*;x0;9qtFUisY(fE=^ZK_T7tG%jL*0@6ddh*Y*_z@0qVuhwj^RoDd#>-8i@qrEo zG=b@0IETT%GgLh(**3{X7-e=(?mhK3th^cT`Pv%7R>Cz6RzySw8DtO<38AF#-l^&^ zrf}^s4lyEBsK8Ozk{YF(h<1P6+mp1uF~&v4owGs~Gx>MbeF1?!K%W*5&8{G5{1ALI)w!))ZZ7i1e))88Q)BTQjodAaa{C^1Ma7 z^ASZ7pc2_KaURRLb~-Z|Amv89;Rp@}w@oz0=-7-4?${GR={Oib?Kn&$_kotoAh~)z zJw(N785k6RrdLmqH=QT?n$b0!Lzm#|go6P94EIS|RR{k-uLgDX6J)hm9LTEJ!Jjh1R zVW`iQm_$Ur3sABRfh&{{<{uOB^udY>@th!H&zDI#XzQE})>=FrW5K+UWnzUTPpnHe ziEZVUFt=Lea5&*&OCTl3=84e-ERIvV-Zu>~2taE}EP->{GWL?;Ni>Z!Fv@@}&Xt!| zZNjhdJu zdk7M(z>DMqtcSn0HkuUjm9d(XhE@@^747a?Iw(9e6vd(Zp8`&hcnGUg{NfQ7IK7DFv9yyfMhD}fso zPvB1ALEuT{Mc_@~6TSp~1pWj81c7r1+B%pjgdh~cARHo`NW%2tqX?o2IYG9Z&*Dl> zA*YyAA}BLZL3#5EUL|XOE=X7ii(oO-Hm~bnv%%14&;-q}1X`dK+MpdeXeYD_=!PEX zg=Jfo8&<$dwu)c(uun+EIRG4lLvR?5z_Bf-@%nzHHB`>x^b)ahZdrBRHm#`g$hoJhK|x% zw9BCzdY~7UZCPGcz)H4?g4>+#`kzs;P(Dd4RDq*38A5|hx+33Z*}I9&rQ zd=9@SS`?vQRA$~JLM~xBMrP1fSpNelSvZ5WkWi~>j%hBXjSZT@x6TWx72Pgu2O%{; zn-|L@S;Z1#CbnjCYvrc|J>dl99J>)9+p1v67~?&;i84_M{A?|lRoXWM{pZ=u89d|A zFvi}_(@u5gzb9EQHSa z=-AzKKPZF_KD3k4OW}1;lM7Xvo@S{h85N1Yr4aSzCKT-TVILPqCOS;Vz@r{c!4mF~ zdF zg=Ka#nf4?$P9%nSlDCDiyU^0 zQ5@@FmFY28lbWAf;(LW%d#WIQ zTd++#We@_#@qf8DvdbKkF(SlVf)7zFjHCxV>??AZ(|NsDDBpp#kC}z<`02rSbc{71 zA+ksytIVK_sHlOok7Rbe{hx~gfF3Uvv*ogyAw5zbz|LMf4b1FONOIm{Vq_Cl% zGULYr9!7hVYfEJg4^Eu78h*@k zF&jkgivggr;>6?E_idvhVJwD=&!G)u1szK*9<+zoD&Q_V?g;gi+m#QiW=`P!0R=jP z3agHNe^+dR$~a4A^OxP7SU-#2!FUvPK;sIMl|YWonRD|L0BCnI7p~mkP%qjURK_WxX!;2q(CZNbR{*etY|Um3NsF5k z3J^iAP#D%xcI44}@#e$#G6FnGDr3olh!dvBJi+KlLBWa(*{KCc;0D7J5g)|-m3YF| zQv?Dn7^zU~gdr1-jtKNbO7~|NZYc`%MPncdLn)GX=+uioo!`=yTSe!L zzUkL33~lu{xovi`w#!~g_8FZ0ayh|2aV_V#Q;g2>n`!PFSrdhJ_AGZ`sqe0Pi2F!9 zfbs~5$55WV2WEcC|M)l*bH+@Cous)GWPCI|NmAJU&Vi+nBkIVN3OFfJ!bO=%e5T-i zea(Ed>lI;{)#9wPQwn{iBVWLS{yf5Y~SXz#!5psk{_DqtilSG+Q&Ws(n zw`X1pS5;9RfN4)j8wy?z2MB^)9DttRn-P}Wc|47Sl^9bKD&uS$`jj5L$%)#+Lr zoQs`3QFIXUs18MM~ZZQv@;eUp2A%ty+-$m9Bs`0LZtQ z22fz08Q9MOz$1Ed0X7(?0G>Y2wjMy=Oxm-zn4JB2ccq+S5eiX+Vw53)IxNDrts0g7 zeK#$5b}6iiye|k)%tJonZM6{3Uyc8GIy7VK*!!0J{`cAWXPRfHpPhPk{P!cj@A-Z6 z?+bnpd!TvnQS_YL)M&yq$yBTv0e~bh_DCN8Z9#)p>y0bQs`jC2%UC(B*6NLBtKI4L z`h($UJekhsXXh7}SJyYUkM0)B$4{O?Sn#Ne6ct@fk^TrQ>gv{fk8An zBbdozbGSUdAS5&_JR%aXxVpB!y=Q2AY-0DWJ^Q96_aB&dAK0&P6?%vv==3dVK%EQGH+H z^`9@_y~nhl?qlIQ3z{2RTHBU%b))lxmCNzx*W&=dbO1oV=s=cPf<#MHf?KC4WJ@XI z*-|PtCD(`*zm3@$gdwu}IWS_`x9b>4b3VnPZFKI3cH;0tm{px0kr;eYhY=~$KW00{ z>6nE!+xcM3h}BotMPy90CR+5N{Z+S4kzCRKtG7;xrYN-R+I5~G@~jSsOv)VBF0nav zh$T~YBdMBG1dWK~>vL*rX)QIG8K_M})G;_ofZm`LfplrFfJ|8*gAvcK`(-=|Yn0Yb zd53?DB&N!8mP%LYnps-c;HfAPJ={Qol5oxf-O{zZ1BZ$km`_W%?|oh_QkYD}P( zx)Y?)q_Rtl=OHP3kO`|i%ZxPAi^N26J&AH3TrZ1^?{GD^+ zpr^WQvw7LkmNfVy9%o^j_dEJu27C;TW1H#UWQE7BiHx;4KF3yiUwL??qM&jWM>!0l zuq0&_&W~PKxuPw%!kI4-DVzmjU6_r}W8edk|15H8&#cG!((GM^wAUl}k>iL(=SQzA zXJuTvolmgIAVe>_!u`6RDF7my6T4fQlMI^<=yGs%aUhEp7W zgU9ZVrI?9K83+2hH-SmBeIFP%)c}S@=}2OV`Afd2lF~@%(2E5*Y}|2?@F4v>MX2uw z1dr#|26HGOOYfSe!T^PTi2MD5%zHc>(A|UwhL}oQjl9*q*u*FkUNuy_sRaf%anFTX zCC9{QgrAP7=W2O}Fp}6(vP}TPADUongC$@%A$+Bscq{b_g-XQS`O|xI;UA>;a zJ;wJ*Xugc4vNXn{f?Qeue#=aUUbUz|L1P0+MiIoDo$WeB~Vk2-qYxlB3hToRtAKkw|(6j9T162CU=K6M3~p%<&IWU}MuNuRDqOw-R)Ve!($z8e=zw;M*tj8WFpZi>L?N;? zyTd8>`L$&oJ}E@T=5gH~|9u)4vqWQabSUP)+)-;2Znb=4s!^&$Dj#Dlt4H=HEik+Y zXAp*TiNZyLaiJJS3vR-bFHjNAn~QVi;q2s$*#^yKfsNecc@Zr=GbQ6PCvaIC11P!1 z;cnm(Z4Os_dkK}3K zY5GsXc$qc2X~9Ho5JR#H3O+MLPh}|pzE>)(SM2q=8iu?XlbDYmFMQC|l2;P7h${e$ zE1Vn*$ZkR%K-3GbhS3YpLpWkM88!McRoK|y;&F@#UD#5uzf>@h%Co&8@y z3I-_WB?&jrrNN=ig-^<1^Nl724anGX0S*w8=~CHc5S(9 zp_0iijKzW)I=^M>A4T*O1z;pWpuZwBL8fo7J?SEwm}x zC0~1O>TkQEO)I`%(ora*!WkX*^uA59-_i%QyAr=@8?7FOY|II+9xC_Vxn=`PnkPoh zOWh%AhbV$v=*LkZBvW5GWWSO-AtnySCc8Eq4KqcOjVuI9SEU4mHx0ATmAe?Aq?*bj z5E|)7UVhnKeA^R;w}Y51goX`ltikZ~g5;a*uTc@W9ptqkqX<&8uLaO@| zC={N1cMLTnDW+V+bYoOX_83p{)sLYtuSf!mUJ@ZGAzxIUGb`~j#oQ#TT4|4O`bA)% zvgzu-+#GT_m~Q2kY_!@|tv?jXgRc?EwrcO%_uonrqodr+UEhDb`|tSobHU&H*RTJW zzCXC`y2~B!-`sWkzK|a2t*thIH}kvc87$Lj@N_@znUM4hd>8tsfs@C^5(|LfCYs^& z;nP2HGR?;i)sf%ZZA#JfL!d+^`ze3$F=W!`tPqK`UaQL&ZB zUmI_lSSuN{I+B7W=->)-z?oUGFv*?*us^-*(aWcA7UABexI&PP5vH00Tb!Vj_NW(z z#ri^PLu&E{)mhY{AL4dIDmGg*4=uc0#(+EJqS8ejwO4D8>PJHn)v+c#a1pB04bnHHD|vCzVjlC1mJnZ)Xz7sEXnW_5H%ZjZ802EL*F>Fgwd{DEx@W4C0Aj&X zlU?@L`^G9*d71wg`R`Cf01_|N{U883QpH*=dSQY<)29B$q)ey!J9N?)Q`ZJ4hn9@c zbJ?Sp&O&XAeHCLhswh?ojJXl$)tZWO^m)NPKh?b;mexh#*(>Ta-1{L(69%Wkk@*1qr-ACQ0+Ad*jLg7FxW|z1 zMNAwL>e6s8!9C!HXZd3nT;==v@Y?gbcg{Tw>0FPi`s^f4b`snRB2+`d(vfS(6l7}h zFKtwifw3RILHC6r)4s<#2c?lBP7TDZ6}xt&dT3XwO({3^_C!)BM|!RtdKSBG6FU2b z9TD}Ck(T_CjcVC?v^Aj**$)ash#HY9n&w#5V@1W1(91l^!1A)?bCf1$4QipVUIS=93|a>iX~Y2M`s|_rjz0STInR!lU*2YSv~?3mQmjIcMk5- zyP~^=(LWC=(2U2y$Xp-M4uaxXfvLdD4QCeZ)%kR5VTZwUH1suS+!Q!^(MF&2InXWu zHj0zT&}tlRRv80mwk-6f#Mcy7*!{;9h&xTIo83Jtn@tU?$GW@Lj+5$An|nXR=2THr z@23U98Yvh}Pd@sor!qQJt0y)|jXPDe{YhCtcK?aV54X?jip)Q@(o(rNo|o9PtHf@Z z*Rl4#FwXjbG96>_+lSNmX0LeU)QRAV_`%-d*mIqsRjbc01(vvs?TJRrzKkw%5?6=w z>UKt_R#rv|@S(u!7#dia#545=>O{UK)_Am0d2-1}R%5WT!lXyE5mA}I!N6VI4;wY{0XnlhX+{G^l}Y?t@QX-y1}`SDfEei zuV%3bUIA0nifP4(emc_2Xl0&S@Z1`a=mws&5|rchd1AUxaY;-@>EEzC>~0`kR(ReQT!rCPyv%2M(EH zl_ksPiveIMMypjgL=bd;X^Af=t@5E#V+~2#49kjvmKw$kt6vv~!mhCr#?KrYu~ej( z(M){^QL?KWJj%nD>xqUoy<|BsiPqts{nuB=gcJC<&cx#?#b^lAo=>rj z@NT|xdhPVl1^TF}$3Rbgj-v%*Z-h&b;bufAfeyM+*AWITn~;}NzL)U?otIG!x~pT>w|eyTHuQVfyS0%4V5VH~Qv50oL_DfycJbA^ zcntLxc`537S$B5bz5iLLH*Ch};;D7`P-tpSoF*@3q^-!)9md6=(3Vnc z3xX${vyBE#DU_9@uCq$=E(pmK+}+z;ln>*3AjtDL}@!(-RTXiB%~tR{K7% zN$J}g|*W;S4R!S`@d*svCw&GPUMyF+V#E4=lcqciWjM8Jj_9G0& z0r3W!0I#iq-`+5H7b<_Kp3f9d=fVZ#q4cCxX_p&M#AQXr~WfsBy zJ#{8W1d*f4;ZdnHTj}6e7n862ofCi3xRflD4P`U$Sw58SvsdS>>omNeN^fG7HndOg zN@Hf6IC$$wP`3>n_e$}g(jghc1yiLN4Y8m&d-oEiD+8@-K?}%*FjgTNtJxq?Tn2cx zRX?*mS%1C@c(o$}=OIPeN7dHS03wAD`{V7^n8>0Q!S~UFNj247mrN0iFk4ADV2w zJi300+V7Dk*Zk(ubK7R54@xLmSDGC4;oH4?bZBqofw@xS1Y{V;%t%cm;=6Peb9%V6 z=5X8k1p#?TdkRKVx0;vxk{yj{c>-syF-qeq%Q(iv8)FseS%2@~uCpV;_~C_jIN-`^ z8fl5OHS)uTAn1k|tuGwYST7&El@QhWVr$_xU!rYS;CA5VG;`X$>bwI3e(cEYmGzDL z7j`fpuRbVj-~I6aKHI_GAvyW+L#_}K3{V?Nm4l<9k+1nAKS>Llul6OMt7Qs!QQKrp zTVpJ66SCd1kr>&8Y(_>L6+Uc_o(Y`_H3MD)gw>5#jfZZ0mtjiTJ9|0IXaAwZ_)(5! z>riZ8YyqTgzLACiQa!8u>?FPpP@(2X3N^aG6(fBM8EGe@0evEj9e4U(Hk15M6Ry_k z->N6;T%D~TL|Nb-VLcK`S&v)?}js5rV)A7Gc|>tOAMh@9z1zt6t@tG9`*Ha7qT z9chFUhBjXun^?NvMsM|rW^yiG|E+aoB7>q%&L%euoP062c>3u?aUNBjTt{A{F2aHLfBsxG>o$$SYbKci{A)E)fH%r2-CqAsc63?A3pI zy@y0F$aEvbT>+X=&_x4%NFZ4#a;5`alP8aWWuBL(roR1dTwhGMCb&SF5l&F=*vo zRzg)=%HB#_iFY|!ts+#qRcL?}Y}M%A##-r60ab5#E@e=KwWL>Z3+vwJ{QyC7bY1;H z;bykC^y1}4uL8Qp)v6^o=aOz7z5Og@^QPo2qzA8Pbp0-wxSwLYcdw7n7N*CKvY$$j zpuC2*%0687Gh0)i_br8pj>_0}=wr#8Z}YNh6+pS`B2zC8UfdoVzVc$>;HB-+d$C{V zT>!W~U#GlB13J6b)1j)EN`#u6e{k#R`qcWvZgMj!ilqceFZ!W_0s*34rP~%Btw%>G zoP0dRFD->#MS?(*sSf!IX)?jc-;Lm|OD&_9hSi4$>%ILq6M)gf7lKQ#ydv8?gyoOW z+=(*r&T@=Cxs;F|nj!FqD+b$}qBLM=*!!CSHdkPQjya2sl|$hmI-gOX0=`cJZ~KD@ zekxR1nOJ264Nww=RKZ7kBoqOH(jBm63JS^uW`l(Yxciim9oIP3e<_&Ao2QBlSi6_2 zA%ReDMXU-HrbM~qTLqCYNR>SPA#Yc8mYF>Rg0t zeH8aBTzQxAE;G-g+WmDitVdY5V>xb3>+=XT@ww7@%>%Dt#X)cEahr1bHv4j(w z{FFYn^d_QVw;5gXS==}mWD9bLlzT1ZmICgeQg4PICdtvu5z0YR(;fXJK>#>H;v8WcPWHi?F2|8nbu{r^_w=7`qKR{$p|k; z7>zau4CCFIY>T3{A5y~I(ruT~?mFz!Brb>Bt?1;`EuWL04wn{JxUrIu&{XrQMW#GE zybHxn$LPckhO_fZTsxc#)lS!V5fbbnZ*t=OnZe$rpCKUQI&iel4tV7lb^PM>Us^|? zdK)<1tr!i8gBqD=7oj2b*bXWl1^E&6KeQc}3|``4pZkp0{{^pfHC-=&N?n0#4xD(g zjjgkZsmMzMXCDERH>LN*otocXXh=ir5T}F&M0G!?%yuJ~TrSa=8!U@M9SQMfq($iG z-&3z>Z%yE?ap(arix>&8O4%`K^~5ZKYkY#AdumEddb%??D#C?YR8&<^Kyr(V^>ay2 zk4Z~+^^X-Ii7GRK?2^cctP(*;mN+6RA3_ok2dHT1tmzbVc7pIy#HdqIQltb0rb3ec z{=}RbAt!td2p z**i1$UaET0X>Jzb%J&=9(;J`I1v+};QD~1$jQ<~ymBKuBVPGfMLl@6-P0DQ>YN%-$ z%gpu2#64*=J4cd2JL(=)dpS{5D+-VS01dZtn*3KciC$8 z>ZK!8srl5(3JGMg;sL`zkYzm{9hxE{uu*iPkE_@%;mrxvy)l5Oqxkh?WrCOh5~p}` zP=eoO@#YR5sH1!yKs5|ZMee3?O=p0nyiT4v#m}bK&Q;cC#8CocvASzk{3-1zStVf5 zPB0e~8$|3mmp>$T91Esg)9i-Il5&}C1G#yv1DR>ro0t2Jz@)O0vzNEcEOv6v@@K<` zBf%ucSr88=!PGP+jhW=1t{HALH($@JWpNtHok?rn{}n_3ZS5blIrzSZA%{z}NzJAM6S740Jj zN{hx88}3DC0b@%zmbhI2YA;hJTo%{gXIJBw5<#WOA7UdJzmWu>;Rd$1d7+QeX!ndw#aSG7xPHrdA0f9XdMb?hh4-E$j>U9j7HUV_Z*7ZrrPzKkTbvu! zR@Z3B0_>fPaw1BC7XVJ|36~UxfKbE!p0%oq1`^eSVtg5endjBZ@a@o?(m#HN8N0Ky zVc`+aV!m6Q^0Umr5wcnr=Z^;?&UwelP7a>@gw^Or%~11Gcq|@ zZ4!3->wJ{&MA-1~UqzFUg#CBI8vh_mL5K?Qu^`SOQN9L@kgz+){2G)E80s*D2zh}M zQ}5gZH4s9Qogu@lxxFYLPBb4Cx7hM9C7-5Yn@i%DhQzRtkQ*nm&8bC(cjiK%1^tRl zP@LfyLr6DN)JOM|U_8n+`vIjeKQruFVyT2%e8$6;S}Ol{PALp#*;Pj4)}b97iTv3?Kqm?BPMYD#Jo? zF4e37u>SrBt0^f*0xOa){uz$4lW1I*@B>$0ZXQ@%$8m`L2qB{V!X zC#uYW3kbE3%t?UF$#7@kY%coTf1Hd*(05b5s9Hgog%Rr0RaAB3tcqYje{~k6)U#6! zmS=`w3z^&CdBrBB1ic!{G0-`#hzUf&8% zz#-|*3=bR@9_d-Or>WAv+p0{@*Zb31Xl_)5EsV;*B8V1Pr(jq>hHmR#X(9 znAcK@U@#h^ejh5WDTDyx;3Iyvgh+h`FILMyME*{5LL=p3hsLfGlQGq`TqH@~Bj6j* zvHCJje!W1x2i3v3@q`x6Z0FEl2f*4)J#uC{>P>?(tadA(P`yd|c_c{Lpx{S3fxFqR zIBcgNL6`mgy1p~h3_B?vO=}C&jBypr3X=n>-vFZ?L{+~>J{tqs%L&MEPxj?Zc5Y2Y zP(ol_5S={3$FnSA#MgBlQyhX!Lands)bv7-cOc%kL`{`9pCW;He-@M;%EgCu9Wb_$ zoon7lcTZPZB-nT!!tJLkar?6sLj=H-?RActoL=OsRze%wWyfm=eE?^!7!H`Ns4@9^y5#&tA^TS-WX{x;A79s6Dgr=3Gy9g5iE6~B zUMY!8PnI?)3>O)B*F%*wV-IRxGZQ)99rp5UUGv(9U%^`=N3M5FFMqnIHi^GA;^IGf z_}tOZ*6Ra$g5ZvmuMK|1?Ej7T=N-cZbGQc9N^S9|%ZDw3N%?5t?HI3E9+Z@%_re##!+Ub&yo2Kui z1lzn0;4hxC>6G)MagI7u3IhtC^~BP?yXbMsf+k-FS*=-&tkB4;x4<-h+P}(-wyLrY zP7Do}p&=NJx?`N9z0k3`TaAcfyTZ-pQZBA96m`GigEgm{ZHk?X?zYq%qljrvC?0Ci zq2Di3%-mrIcx>TD{6yRZOG$DPF+rJ^A72Ofb|CgNnEF79_K!XBDuCSMfIj~L^vwub znx8BDsC~&0U`9`dNHt|6)dZknq>EeVcw(3qdmt}o|xx0cJ$F#*ppDumkmrc2DXl0zrJSsH{rvQl+2Ra`Y+aWTpB!H z{H6{Vk1qiB^Hj|Lz;SMHmhNYyiH@E*@;lJv598j4THxmp8u7OyL6{7tVHxv}DYPWK zFWrZUnvP5w+2l>$j=caz1$7{}S3)5h4CdyBzP6%!qwzSC11VKZDGdiwvz;BJK`gs9_E4mY&0fCT2H@gUu9AX_a?-4AlL zq4LS?v$=)52tKH53_m#2n%ux6Bb18mx`~tsfg*?g_dVMb-8>ub*(^pteDrbs-0$@_%fihES{$P2b zIW-%nIAgV2U{}eA0}2o6C1#MSyUmB!KmHEnF86XP&*I7}fudga5$5bq`1kLekOP*U zrAJW;IJ3}W5E}F$8<){$d$`Q_%27TisVvqz50rMu#XfTFW;`%*fP5jBkZ@nd$MEGm zyJ*E?Rn0Yt+!Ar!g6r~&nW`RZ&DM>XEq=6X8FylBDMa$0Q8l_*+WIJ~W)WM6pxirP zw)JNd^9xahi5oY%fV<(s-bs4S)?*7iOJ(TD2iG>e|7f*Xuq7y(n1+9tPMeNq8Q;aq z<%jq0-Jb0IM{n1+`55|ierx8o337Q`DUwoMNfh-=!d*xZcOS-%J-#7ol*oT3)MTzh z7n9D}yvQ?B>9xlQDs)pKx2V$2D%P{BV zFN$G5SC*t16&IG1;2Z_{1A0!Ve-hh6LBv) z9mUn?IEN4!C#tgc))(B$eM~pj_v&e|C+Z>KFuQZuNE69;=F`WJ?p`NT6 z1eElTNeV|1X);)Z3E=eD)W#wA{b6Vg`xWeT$-KGO^f)VAW&!^*viKo&%q z4v+^{)U0WctBtYUJ4CTv(H9Ae$|*p zs8q8gQ*$dOx1}6xXkktn#1%_cLcudm+Hv>hFYYeXT8t>c7N+HeTeHD2Dy}wbP z`gJ7c8ed)X{gJw6{3M<3yaxH2(G|#H&QF8SfR$92Z<_o}@0;F!`~PxX!M+Lf?eFw% z!A&7|r}tBq;a>v9B}`+4>cmtwaU&z46QK%a18}Y(KPu-8Q>>csR7lO(V1{l9s2|VBR-y_tOP}q5`Yf|L8Wyl z5ZkVuGKV0~Sze8V<2`!+LmrzD(#R6x2inN`OQ@rY{dXjjS)n%p)vzAz53t#)v+0%0 zF}i^hN(7fzdg?aKU{}whmx^PWPSPBrGRSM4j7NDYGis#|E~;HiYCkIwNsirZ4IVG3 zV#HDamzwRj0`rVCtV8(#@@{pE7ROASEzoqGTyPo|ub?R28X$sTSicF_MAaP69MNmqV+~Gqg2$2giM+TP4Tu)Q z?Q$AY%tv_9J1d$aStIBlWJIB%tlAnbik2Ac9vm0b98ex;3Fj*9Lh2(B^@LeQ_C0sN zzVu0?TS(XFz-eCw23ge@Mg%E20*{q(L{K(h+bT?SQYm5?-7hP}A+AD46yesR7AW3y z+=<|YU@^rN_G5c`o!dFJiDavk0+n(PP_JFEL=Dy5IR(s01kpU=DbY&{lgu_+{l-+n zszo3mQ1WzyWGXCLlr{|85Ng&F->FD)zx*6p129cE*$P3Rdyp|39w407y?S2tw2E^~1LxR}`%bpMixlj(sET@=(+Nw>*0* zT_nyZ=S=zsiK1Sl&j0@2b-lA!IinylJs5~j_w|6P(o?f6%|gh(u3X+@mDzh0GBk;i z89<_oc#e%c^Na9hwWk?dHCd?~|;Z3tfU7`DeICuoK zSQ3P!KEXB;-D5q7GEI;RA`g3ggKz22;J<(I!yRHm(&K;wCcJJL9F|?E5eM6FPJo2t zFKspPcOdo%Q?daOgyg5-97)o5K$s3L@A_Fl1wH`HHMB_N8e6tNiq!C3V71T+I7k&? z5(gBoM02!{Ne46m)AVeXMEc8=0%A3__|TyB!*FzKkvam&rcY8SW`d;7MJ0m*nj%AZ z-oGQUg46(TP~MWiJ%@MC~O>Vh>TO zz1dc6AU9B0YrODlf$ogu!d*4e$-i6~!9jGOVFI9uNf;NHC_&FMs?maVwqN(ImyAF% zCx^9MGms{6?hY9CeWF_wjd$%c6%gKwfl3hkQWp?kQy~c?tR=95a$AsNv4af}GLSli zeSlybDs&NKn~#YYY4K(Ld5P_&R6ziUCAXCA32T6w2c^tU=y+vn4aw&Fb*!;1<2zMoE6tsS^{s7P zJ-P$;v@K38auQdHjDM)6A5z98Mx32`^@5~8Nu2G8NX7$y z0;NfDYbczyX#wkn3Clq6ka~Fu(1OdF2j-CY=}Rt~O5EWNNoFhf*tl#)waa>5BG@pI zznYF4S$PGw*f1D3yTz`mGMqS&^jFJ~&4ZecOKOEUbSgQDImGH9oM7Z$JwsslK04}9 z90z+-$`L-Mq#&aDlmfLdJKnP>3Qpr2SSa0k>H(HSIodZvXM7+C+~8nG8N@%$fi+#s zSZ`7a(8I>@W}sXpK5R4*6{ysU9{mEuLMTWF;X-HxGV+KskifCBM6{xCqE}=eRTvqtWv_cb< zV|Dxos}7K^ihz)+ooU7d{Gp7B2HtPs%C*J5(50|)ib6^K<;)$Vrit-_ktz$)0g?#T zXoKrs0BQda?joCC?+lgc^UZbr^LOX(|1oS7%ojBCM&z*)uC`b%2DH8|X>45I|hw-ZjcVD8l7Y7 z`B=@@d-$v#*!yclKxZSi=7r3-nJgg1T%^)JurJnE6W|XiN#R(lkw^>bL;!<~$Xi4d zg|N;JZ1kb|Abpq4SZjMJqlDQ9sm*zCuV9P8YYQE-GNgG)A8Z+#jSnFrkk#RV9y8wz zFs7skA#fi?5!pmdiAj92A|>WCK09Mx_&TWj&{uaa+7r~N-HBL5%we&RW4c#=g&ynU zMTO0SXpk9c&~++-3C@!-(Xwc6*;(|g3`pyR7%OD@*oU&W+=Pib*kX(6Z$860vYE?` z+#;?kT9-$nr}~XKL-EV2Yd<#9MOl565FwF`bD%eV?hvAcm90ba?pmOvBEb~IIv5j1 z`B6y5T^ATLYVUxe(ey{{uNuyr(t0HQHqGpgeFm8)B*Y+XsrskUNBIauTg%lP$vR6Z;|Bnk~*D1;-7XoT-jXXZEZ z4*k9SFX{|E9(y5GutJ^?QSy0m z7-_N)6uy)Dm>Dofdvvn9f2|YmhXdO1IJS4M4l2IDo^=HON`e@@qk;!^js9<0^fq3d zUriHgn^i__a$wK#zkQvM^>a$Pi!Nel&4wsrKZD@Bs1FAhLITvU6B>f4x_=@BYB+sH z&V_`hC^JjbnnD|blP#GBIfBg#mS8XR8!26FK~Tr+%#5lrHPo=WqNzfZsK$)wYD6vK z$XzR=)POpcF@w;I0!@i_HS^kn&r_RFy3M%8R4!WvaL^Fr%QI9NS?Oi}^+<&V^;x0- z%q1-uEgT6x-Kjcfp$H1MC*U@aVUvOCE7ihwX2rxgGHkI%t5~`Mz!BD7Or}_ElL77# z5$kH{^Beo^ox4#=HY<#OWW7-o^aOL53@$fq(KG(6d7h#TPWt! zkwPs&X&2;K0YWh);gEqLVhI=pN3|coB4s%*k3j*DVX|>k@XMdRV-149^o^(Weko+S zFZ0gD45|>3;D}9(H8^3kA3Figkpa@RA-F+`182bL4#$392G!bW0GB6|NVFZq*CXj2 zPNHpY_#Jfn3hQJusN2EEMq~)v$qk|Kq@$G8z&lDa#h7LxD!OR08513aXv~>zFn)D{ zrPX$9X(422?5G@pIbet&q8$y-@(o%Q6t*WZD-Y9v2%~rrSI}-3x53?o5U45X9U|ay zoD;y;c@}B+3c9_{`4u?okmR8$1P3hOw@j9+X5ZgJ>^w9ERcm)f1isbT6Wi_kw(;%K zw!3$~gLhYN)2a~V%+ZdopB{MS&h0-M1`(LkJ#&$X0vI~xdQnE(K0lpGbb(cHe9s=- zutOkAMK<1cB_>yJ=9Ka$6w1hP+D1KbJ3UD%ECD;_#RSgMJEuPAtkDBWCSP8lz0OdF z4g_7of_vJu^-kGEDzI6ndK(z_%fM>gTIBlzXivdHaN*s`AP?#WvfB}aaH0sUI*?_C zeTzyY>6Fi4b`Tsgea0WRCf|ceud`Vt{3d8gQSw~<#|$?#ATY1Rng=AF+ zl4XT4jSG_!B31I=R65&%hk`Po1TWt9gMUAIN%ZHde^EN znyta`{Nlna?@Tbkai0{tSvJXdN&!q64PD9_9kqHj)-~BWb}89aH$g{=lNWTAu$-8x zvd)PTfR^eZ<*_ibYnO_n=Kj9vNHPMUJDdkbl#P>N?WNV<(K>dx?7rI75z+E^vgoV~ z^LN7A8oS?Z=OV_Wn}glk_vws`)W+xwX-d9ZT<0735p|}IbcRN{M(XidgJTcyc$`gb z)*6j0k0alsj#143xoF+m?jCIprO_fJ3p>+bwj|yi#y=3-*V=pM^I_q2{Ozg88u49H z`fY3bV0fFoRtl@`Fb!nG=Eu{-XMcD0+hN(Q+No+xiztif9SIT%XJJY8&O*niiW9Fy+(_J&J zSVs4Eql-tzN~BOO2f$?+SN^&`xOuf1-^sm(ns??E*TwR*^oW4-Brts^WAbiv%?`mC zCS>|~TD9+RHP|0drtR^kHRTRHa1VP=+nwu6xt*O@z>Libw@OgY3@T--hg+|%Z;=@{ z>8#t$_RhDv_uaq-ZMvP{wste3TTXbtdv;#=r`=w*cjtc&j>W)@q0NF_)9&RVdpqNc z&fy{U0K}6JG11}6+K0;b_8F7&+h_i(ZlA&1-(A*JMrI#U(haiYG}p#s;c}S%&PR3E zDXIHn_R-GtGYi$7NipHP3}-v)cWs^yYT4Jb7P8-juOd7Ox-j z-oJajqf@R&6Rs56fpQckL;b3c?APUB->c4vtHK3SUT`&ufPQWboMl!5%^kTbD+g6T zs0?0VJ%@PK8MC_G@*E5Dm`S*x>(6nA7H$XRc;AxldpO(nW-uw^jn~8|l%c7N&TZH} z?XjH5&sP$a#vj?8Xr*f?FF2qzvw&i4V?=*RmxbZ9)gDjGYNJYW1Cm{KpQm{i$4r2A zq;zY@`0xF03YV&P(G!4#f{bt~F1 z3{m$hB5zXda>U8OG`7@534j#$nO)PSn1y@uXF%E-EE4s9a2$oGP{*%-1}d<@ZvbivG!vp)`2cF8R*^3?RSF6c9C!_&|rx< zc#fgGAiKr!Xv=6lHY>eCt^ui(@Ek>B6gnL`{rQC|jnC5T10mxfO>~#Q3W??4ChC34 zOW0u^D7e)di&^J-wq1f!*^;(8;R<$IN`Dhq_&`E5SB*j3)Z`{J1ol9c$0I1O8PRS75BQ2Q%*gJ--)& z?K&0ppN14o!nY#GN~@z znkx1SpW-E$C0cz(ig%qHEMTNp>sdM=Cs3*MU4jWSEBmnHA)d)6po#f!1^l#Jx?LJM z&}Ok%5k5_{(cCIiRovL$fz033j_)tyi5X3wONHArLPZzl<+Z%8RC&&PFm7Yf?HGVm zCm;mlu-GYLF>ZORS0OZgDq`B3ExoXG;{D=U2)C+qqNWaxC^uw6BX(pkaU3FiTmtl| z;!)f<$ZRm{^a$Pm6r*W5C%y<9G>xs2KU zU`eSaKZO|`a9S+17_ly>RtX@L6@zFxmEQ}Bg)MvGpU{Nnj(mP@j5<23sGiSiwlu@fpBSocyvTm3uT4?N9*!Jym3EeqhznqqS6=TM?W=6AQv*+Qy za^!G|Mj{*g=AWavzxQ`)r%trBeVFL4_QbC=9pMr!{KA&P`}S2dW1e6oo+bjI#r^*+ ze1>cw|) z^4+}jS$lHmc-t27Xn?x^n`5Vt5jC4VxkDq%7%y{@eZ5r}VJy;5S`{m!ZFDyv0r&Lx{8W5E2ZP3cS ztlHpF0ebs=Ik}QwTO`_)8X#N#dTkr$wB~iGkgapdFKipItVvPBb*ch_xh~UOgQYw+ zQ``?}4rrt~-#b%sUI_=M;n*?=CGcX1*qEmHqQ~OU?XJRJm4$QzN);(X3G$3SBb3rb|Q~e|L#_rg@jd1Kcp_;a-AU!))=A#=$vmmdY#xCtB4a9i!FU z8IqZO%cSw#&orTR5{FpEgYXBgqFG!)PflSj`0(ZgxP;vYkx*Pt%$aV1pVw|Eg2x-A zLFbZ+OfB<<>t~sJm!5lW0QU~}t4u+65~(EPlKc84Yo2>;fA1I)E~g+JanvT|nUau3 zAj&?Oc1T4f2QDMpuGzH*5@q}t%EVA`rnn`*x>O(A1`Kiuwe5G!s9Id7V;R57@QO9@ z-q)l}UC4r*WV*jImBHQ<)f3;Aq&rjc?SZ7{f*%#7gcMGW3xHUmW&O+wZ&ryzOeH#) zB&AX9k+>FILTr{WG}a_$y^B>58C5Nild5uKRWxRNVmdw}FUMwaBlBpLRAfd8Q&Ui> z66asl%H{QDi;!(exS*+h@F`Y@C&wI|DDrnQokdejo_Nl?(ITG{d-sh9v{5Lhn_5p* zwO3{3voEq9F)+K*iDi5W-eg9)EU>$M37gjjEKwefBH=irtSF4K_HaA?%|xrrw4&|1 zfVz;k&ui4XuYc(lND`VbM0CY`le}6!@e29G`(ZmA#TD^+?vLiV^Wb@%c-QC0Uw}YC zf`tebCS1gaYuG2dUHL17UX6r(NIuFd#7E+>vMqL)$&yFV{hZVN?%P;@3#&+lZX@?qr zSYm*S20G`0t1h|hiucrRy5_o_2KlDN9k<*z*hin$8fu7PMi_3SRYnYn={PyRXiV}%$HLt+%hVG^dnhFO@0MOfNn zuLBO!*k`}97TFqBPBbH51e^lS0c5}>;3~)RqvH&%EfdI|0RkIJX7KM-%ss8bAx~@2cC(8) zkZCD6pAFb00mJ27aO|`s3^yxb2!ekD;8l_!wGLbg((9x+AhS-I4YKQG+(2%ftSiW` zld}hffP4}-t)CbvH(doN0s(-8&q(02E+M6XS>=jHrt-tDQ2bv}#c!ZObdcJ>tf~=E T^;oYa-GVc%0S9p2zT08|&6ZP< literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff new file mode 100644 index 0000000000000000000000000000000000000000..a42115d63b39e9dc66c33d6294a81e0115569003 GIT binary patch literal 19420 zcmZsBV{|6I6K-v{Tidp|wQak7YumPMZf%=eyWQHh-M($_?|;wzcqiw{Imyg3Nj@br z6AwiR2}LzEMGz3#Ef6@6|D^PP-TzlaB_xPJK=8OhK*%FOK&U&fEJ7tDl@wJ$KtxbL zKwu|8KnN%m*^O=#Rhd{pKtR?0bE5GdjIhDQ3{CBf9YH{_3qe4@0zp8)S;O+6wgBeF zX8-A*{sYT@;5=l9vIP7$`5%}1zcKm$07C_x2(WYY1OX8m_>ZgfAGe|p182(4*z-Ss zpmP7&K>c4_D1q1;+nN72R|5iq`3M4{Ui)_0Wb5eQ;`-m7iT|_t^&g0Td_gcgK~0U! zj6n8}zjzVvft-donOTO+AcLUi5ns$70lWP_^hML!2yzJ1zY!ontd0;3|09Bc{yP`gwap@j~0YF_!-FZ&+PwH|4%MJK>pj_f4wIt!$>3IpfEP{{Ojip5M5W#1UwB^ z3NT&N9HbL#2F_N&P*_~J$Dw$fRGww9%6s_0=ON29$?kUY5jB!u+dqTLveO$kH(5uJ zEKJ%O!zMV}Brdd(BK9 z|F`i>boORP(i0H$8@fkNp{iuG6hk^*<+HHaG^2ebiZ1$3rtkw~5J&;e8;HpVW^B<$tCZp*= zR~*s0V9c6qKB!y?pe1T)Hqo!ws3zJKh+KwCxFmXIcULb97bd0s2Wp}ywJl7Z82+5PtF%{G-Izy zkMrfA)Iz$fwSf{-k#PJ+mvR)SB(BMXsffcaJA*TqVw$)X8=PwD>pG0ac}GmXDBcIN z4$_i1#OqTes4@`ZuQv6BE6r@1b_#;GZ3V)@b7<*ETK-pYa+l=A`7)BSe|KX zFxmoULD??WL^QnyPI%t6-q$fbc@eqKLpV&Qr@i(;nrNh=L|yk}0)2=fMbzhge5I32 zelK-enK^=xO;_G#u5NB5#^V+o@@a;%vH{N6y#~$+7PNlkPfdvnVf3H8OJi(;PDwRF z7gQ?a^;eAMzcaNeBqL<;y&E#D!pvFe$!7BZu#6Fu-9*+Uuk*q}THo^6RlT%4n5(ua zE?G{6H}w~?YkJe-+LA7Wjz6|z6y<)=kX2EYiH-bgsrCPNO^nD+j3QSUuD3~D)D>s# zGGp6Hf^H0uvO#&REqVVO?MB@9e0}(J!WJr7i!=ZM-b+RPq|zj_&qlWXzV4KbMtwpR zK}h0i#0#yJZW|D1P--`X{M`6T3vr;UgsODV_mn>LtLf`A^(*gRCGrdgIG>WERWdql zaQ%iexpFFiY~hl!_@cc_z0H_qu~MW|@3KMC1*|&BQ{`WUJ8>v`7hbHG9wm7QJ-+KE zPHOCHVI1m;_|Rw2if0*sO1E`-dCAJLFBW`SYR$WLD1$d8^zn|&QPXLYA}rzaH;M)* zh8Mzo0&nYPFJFb^aYmGvoGXt7yT%|&#(TJyH_H>nzLnBgqAy*ZBGNyi-fyhqEu_D! z$dS{Z#2|p**Q>>)(KE@ia)VczYcBQQxr4-f95Xm3Wy)qf4A%~Jk$`jK0UCg0KGXxZV3Re|qY`QTema5?(?Lsim%F`Nm#>h?nwDfFZV4QQO; zi$%)$WFk|djG+)@VOVpI4cgLq(?z#l&I?(aetFzia=Q&A8H6XhD3|gcsxdDp40Joj# z&WShf=;rF>PQWpC=vMO3wBTgIC2On>aL!;o3zePmn#Y4zZs0L|XhDV7g$ybGufE2Z zF!DmTve9}{a0Y;vdo*#Se_SBQDIW%07YoR``p3?MSjYHb2eIe(i=IblZkv7Fo={;r zSa9UA%x%pYkBR)oKR88nfz-1<2?K(lPzmFdjd*NRTl+jpsRD5*_ozwUR@xKa4sI>9 zjz38?JvgyX1-$4;R(DP8yi0UL+tx7ctE9@bRH#ig;*kx0oFuju1&DxUO-Nr=;#Rsp0(dAJ+dTM?w_UFA5@1nn_R}IodEzSS>78M9;u*B_pF9tG)VJU0za9 z2sc?f`W5+gXk4_ec_2<=R#8Y%G5Hu6yZH}`RW^z95G6=@iLP(z-AWYHxdp_S6wL{5 z1TC~{nTEFYji;T7+2aUbkzx>k$J3bErQkrP?)Tm>aELoZ3M%(E!|lH%=~7ucm+Par z(mT-KPClkp#X&R3%?IP z4&JTZoF0As?uVDJpH=3uHTB-u16qqY+J*GCfR29pF`5sD+h&e#&0QppWByJ`?!B*H zjCrAi*}c}#C1)t%9fWDab7%$xiDVJO+{|(73~8L%&MWk<&Cbh62MC(2F8>lSU_z9G zqgAlSMglNh)lQ9+vra_&xmBN$(pDlp82?6$UJB-~t`i8JDI6lO`QmIx4w1^J6gBO?EZP3ub*dh#p%^1ND{wWU}WbvaiZXO^MT=VwS5d5 z)y)$|k=t2~-s59XaIm{@)C;OcPxkD@q|)*|1|D&hULI3T9j|(rgBMaM0?0vb)TYNLS$yeo&iFYTq;~fu zO6(4o_Mvk?F6s<&-5yGA!tEb?O3mo)6n(46cIU($!hhFbSO)my)KUi+EZzEU>w z2w8#@V)@`i%7>v0QcP=0&HNk+o5YI=Gh_zIA8>0*6~So`fzPbhJ_UNa+^yp`PMn<_J;;s5rd-j>+-`4V=jyq)|rAfXSzLSGfMpJxi0VvbLo*-Dy zvhgFf%DL*7*Wft6vS_=ZJUeYVh-}H6_+fj;I9vadSv%3J{sGkLB|`+8Gk29Q@zKGN zM!O`65!2UKpX>kasEmRy8USY^EYt}09#77s>e`KQip+jeb^Qa+bn(+2e~7!s}CT+!aa^>Q@ue>{nL6 z+$}xSQp-TH|WMh{JAL zE>pKVXq&y_&;kcenB>qiXybaX@xEr~P9aWIv{0%cx~8(+;S*+&roAWEOq!u`pjlv+ z_dz~LFLZ?O(BHzso6V)nWEPjAgyDCB`v#4z6DdE1(-$RErLTNxBbxJ1vyF6}n=%Wv z=SgZ&6j;XRg1gE%bhg z)ShbwP)sMX?8{i1SUBUa zVUb`N<>VUaAWYBVjz33_t3ONVaqQHGu1GJNeVb(&7S&J`=sJ`j#hZA{0Nmrz%5;;> zy3STR#A4?I1Aj>4yCRG8dDCEpF#yhHKHeCkgUc`W(lKE9TbupDU96k1f?7Cn z=yq-wM4N#EGoC#ZdUppD+Bp*$)FAsmFZO)`Zfe5V{e}a#dw3~mppZw3(!R>N&+0ZN zo7jdb5KTHo>tUR$inoG%kFxnQPe;zCTZwQ@mUCHN7AILkr_QY(5DEa|Q6xD}6%&pg zMIg#YkIuFiJC4e1!OgShK0&VF<6aEzW`cQrV(lQ&fxZ>TQfb6Xb{@+ zJ!?|JsFsKxUgqgmE>$tZJtjoTLp^$GjE#!v`?Lg!R!6#2wB3H^f*mdPe5u`SubZfv zD0Xv!@f!4YWb~V1N}g*3zcIq$8jS_K?sGo_WP~il-*9-)i_r>Ix)XG>-PvY0&v@K( zCQ%>jjSJs^qjjLZ^SOap6YhQuhV|%i>q!bFvbNCV>vxt}x&`utxd8R-n}37MWl#_D z=4yqAZVd!Hrq4t~ZQZvm0-TMwGu`Z6bG)2|9~{oTj7?MzqCe2Iwa2kxQp3p|D{T+z zBq~~4h2)PD{f-g$GVZBpu56y|_n?mdoLOM3M^>wcpEq7)I+Yv-RF}YIs8Ir=UojS@ zSgP+q^PC5L)51b#^O+;TnwSINy_#wHF>wW<^X%%TeXpu*wQf_6^I3np+ZZsycC1)sVLH zr(n3=IsTYcmKR0LSU>@g*dfc~#9-n{HLi}EAJ;ODkLOU!vejLF>}D(Do^0j>0JFbK zIb*+>t>koWo4{9VM|?{V{h5A}neE}6ehc<^)@4_Gm)&F5OzfUO)2z@tVY{qbuJQJ7 z?U42%-WBW>%4CiDiM-zJ2(x~FyM*w7NX=vbVguUUvRO5o|pdgR^k+MJ}}|@7GmN9Za3Y?p z4hHNV@Wyqy&?*r^Cq*}!IT4Zkci&F!slDMR2bQ0*k3lf-o>akFj>gYjo?CqDk)ydR zk#S4@Pw8eLfkRAn#Jd>Grh_)c;Pq_er2B2GXe6!Q>f`{Ex2hRyk6E>+3T1^|<%TT| zekEH=*=kiyHi4J1K?%f(PYlvU7GCDFnpd>07c`GxZ6Lpp=ID$TSf>yf=bh8F2|DPk zS@v$eZv)X|?HQxsnb!`skT!wb&-G}1o{3le!1{gMwv}F4LSG~dvx2vQJ#tlVC|X|? z*PkGJhlH_!&+6g(OV?Je?7f!DKnZubkfeF5z0 z{C#%#=;zUsGdzS4y8||twk$3T`h)YD-%gU6KZx?pPOf_8K_X5Cn%O;YZ+U|>V|BK2m3ZcUj8EOjwnwk;9=t6>8$ICad z_VYP_DOZTe6B_EYX8yAN37W3`dpM2P4tj&N5$=*8yzS+c2Gy9Njt$*S6WF;(RadpK z6TifO=3UIHe~^I}>29PR|7sdx@ea`Vv|{L*aI0(8+eU#yG2=p`a>MocXd@1KG$`g6 z0@xQ8vJBOEmW$gX0j?d5Qs^=&yj#O*S?mE`?s`@ITPRI8@5@`5yw@3?AJ9vP5N20|T zSvbJGD{>|tbMfk*t$RbFkbirk{o+`w?x^!vjMZrypC0vr0_GuFLT5>stw-C2;k5sv z)oh0B+O1KgEpgm*FjdTPR|%vYb*%zF8@<_Z%1Bd)#dvS$MTKYzxh@W3%^LwuaE z^iYvT^$VereTRYfv;20007PH;-y<=0L12NYM*05zx^nM=AIm4)#Bz+ghHTi zxz}jQeWe9ap29!9Du2QWj^>*QOE=@!yfeK&e$Icq?{77kPP+h>&pY>CMC&|!X)fQq z;1K_v{l-HLM}(kwpI_*_>PwUGMAcP`6V={bmn2G8H4=UV+<9Iiv=1r=6-WDkT=}CZ zD5VCk9adK%m_7!`+V>}4JGp08TNFr-aW`)0lHJKC;5F{A$7-FylRm$f6vCNPrQ({%rR{x)Jrg{b=_uR5)1mX)Ya_tLuh zcJ*cYO?YJ}#kS4yU(Vaa$jB(%NP+@N0tzJ&^Z{Ggm)OGeH;4rDC`hc_G<}Wj6;7_+ z7JrW~5G+_^h_u+`04-T%k+sSB0WMl*lD5kF0ykS{gSX5311el%l(Nj?1T$S@g|p55 z1&Kppma)$MhA`h?N3hTT8#G9Gkfg|XA2msNfu%99?Tnm!k~{YB?UjJ?0ehK5s8NrVX~>QQV(Bp%#S5w($7P0G#AH^ftrlhHIn$u zE|N#;DU4|D+Qq95a^9$o-QyW4E;b=vUO`TNZf1IR6B9Ed{hF@Y=8`djZ4uj3fSa>y zOYM`HCa(vqinszT5J(y=i|+PoD^%%gTQN@lE6g0VlLS_$-ukGWtIc5-7Cs?5KBixD zSH;rK&gS;N8~S~Viq~Q!UTH8Z1gu2hi;pC(dj(* zhzU~0dxAKvVpL6`(GVXaB2?ln()6I^4*i4-cV?$xlBu;A1k5nLm<)xCqh~;buh(9j zPCv=jo?@alw(82HfSb4{?5#R?qo|O2l2YfjXS=%);Xr!9>xSaZ&Q!9g-&a|XgeZN^ z_HVb0yTi|E4i(Rwc^w#%-uT`+rs4ajhyR^}VW2L=eXP-9B}IV}Ept+E5^Zt?c{qTT zMr2Nv@G(~mx3VOk5|=q8B1|LOYEXvCu4QSEtm~{Khw$V4_!lGs+W@g>&pzyNEZY!u zN-T>oi|RDX3_|l-re%=0K~rd+xrUZ$nzf8mV>~z&-alfO0yH?9LylXoF-jp6JuZV_?wVM{j4qb-EH`SxL=A|do|X^OKgg(KnElc@!PIFD zvp@89HyVH-4ehD$x|Y(Fq)v~Qm_i=8P>pi@3rmHjZ0W&^!<0ZZOQCrKFd#3ui7L-t zGVY?wwRJRM&(PrNUWlX7K}~2ag#)P5tNf0MiRO$riwC5B=llUIO7l!KD~JMSy`jVp5dpxfMC~dFo(l!p zV=Wd)o1@N6Br=7rgkLcDCTb6zJ5ky*Y>s$4+kMGyA54KQg>YRB;sA7e_>dvK{q~9| zbCXUjlCA@NCkWL$Y=HcTT+Reb1Cydb8;TvN9(2}Qe@*jK}McPyYX`HOI_9=O1 zIe+?uam8cU_hwR>DwS${jqFr|<3Fr*JsQo+zR$(p+Pz8Q&EZWF9KzMBC#^qjrJqlJ zUDY1v9#7C8@LwU{s)n4-v4;|W0UUL>=%cxfMmOwqGvF4rUjJNY=uQ|O^SyTQ*(~{+ z#_k{lIP$6T9d;yc*=_^+&rG#*ZQ;r1Q#rhI?9@=HNKw}QphQdiyIJ8W8JEP=FH(k5Oi;Ih? zD*|H*gDa@Viwg^h|JRzj>CnU=AkBm!AxJHL|5j7OP#d6N1nB)8sgUkriPE-r&h{Do z{1TwteFW~ZEq!UME-w33DJ*-*nJ#zfp|zN8Nd1WuLc7(Dh%5HXsz>Uf!2#xBH|>1NK3^u|H_fQ zk>&V^^6}Rj36E?-2R}|G z)6Ea@YBwn^(Jz>{3~U6kvpswtiJi-rv^k-8c81#%n;BVIOiqbM0TrmzS$dxWtBiv$-fXEW_7>j_TxD_~LZwcn~x{!X`R zujs*DDJ{4_e|FKYo{_Jf8-C#uBn)GOP*@bC@KEIlj3SF>NI-%rv+TgOQG&t%X2ncx zvLjFZBUxcD0{Q@sJA7gox)EFx5{=#rrvV69{GcP~@O&aOC%f~9!h2wZ|E~G%MkHCm zF=U|li#_(4IuN6l)x-P;F8f>_lsopSbk<044xPJuntuY--A!9|D8957?c_|SxWbU2W_I?z^0O!Zhr77`Z>}M|5(U6X} z;$(2^1N8wARU<%Q;XwtYv#5Q+4c-`(r~xHM0h)%`a9eBZU& z7d4S4q8T1%uWDaVoO~p8%nvWF|JeH_Lv|bU3nD|+9>EtP$rpN>JW(8;2)##`;eI6u zBU#_y%p>jXL7^h=Q0~X-5uEn>yzzY@yk2~7;B2=L(n$#qcS@Jb(*9TH!xGBy;lQoL z5XjrnYr!DGhr&hM%Tr8@9_969AEXBJ8!tiy^X_-3kpj#|5g#QSQ`-ZF57KF>lq4Rb znjS{IK9Z~Jle-6Y`Z{(OgkFxIqnft0JH7sNe}q3 zUhTrRe6L`czji#lh~sT)$_=k-k#U#6z6J-jULWK_oGgjk3&}_h&#q?#&i*(FlD#DV zBuah5p&UL>VC@ZZJY+57If90njMUl&r(ms%F=6qnK#hVlMlB}2AJEukl#-j>NvAK9 zC(;xh-5fpQQm?v|4q5hDHl81qf6q7+U3FLDv>4?_{;uTf);h0M5KyUgL4PCoA~6u= z?Lh?9?sHif5@D1J^|e4`>IBzij8PYe3P7ft<2?D*DwT6FF`D>a!i#ivY;FK|YY??h%oc0mxkr7W^ji{_(8n?}6(-I%R}!PE{%$LwqlL zve*6VoAs&T^(Xa#52tIOJY=BqOWH}jI@dFylm%%zRS3k(ad57GED9}NiSMyU2@E$A z4F32&$169S2uTtTR{qRG=Iz=q7H_Ttqv4u@-~TN>@wDYvH0)K`NCJ$-5)=&Syh4mO z40HjtTN==uTa-Dbj2ksmO!E+;iN*K0jT!a*UVwPu@6SNL(ZL9v)dJTMeYi3p%9Q2d z7TT@Py}F@F^~5N&Y3+(94B1Co@NrlsnCT3%g8XITsx!NS}SMrAV*`^m!EYj4LawdGjo18b!Y=3`JHn(IGwb%jA z1KkMu8?Jx&B}5!`Mh%;)V97|8ef!c6b=-SwNU#AkMM3-WX$sa;yMaieGJowH-k&2> zbp=4}0q%i_mz<*`&fYfgSn`iB(A3Q!5SYFSEv? zY-aYjhqKgQX_SN}fCbZHrV^SI*uRT|ACwQV&SW^NkYmlBVb9r#3mEn1HluVLX8d8u zJ6R|SI7sgnN);G-^H>$jMXebX=9#5)FPcpgj?so^&6<^!jZew6csCaWY+BHecd(d= z1vI>a4D*A(SThf3r$rddD&>v9-1lL7-clcqt;?+}!G2ePt zV+^JGHD3Fs)M}fF#vWV>FZ%g4W`YVmyl|2HRb*_}4j`AjD9-Wx7C&d$6tV{EO+v6W zOKro4ff|%~VIrTdN4x7n0uA{S=$wz;S*d@`W>wD(2cX$neEA4I%vrVVP`7O6piP^Y zHVM;a26|S*3L*XZVzk+izATvSIi7+leU(hK#41qKQ@yFxvvX07E*a!}u3(3a&JtD) zc6H~C{FG#7C{?3HUN1wN7L$whtW?cMW_eL#Z-ssut#zNvOy0+8y9rCo;b>y?RZ)nG z8=HG)ibadMuyF)y5A?|t^?KHwo30hEAH?fqGkm7rXrLa`Hv6`Ax+b11KJK6QMG7_l zVMnZw>EI<0IjA!<_1+C!!CnV)ChIqS{w|t7+&nnD?51SbvX1!px`G1AS;yzm3Q@EX z%Ic-&XQd}Gx~z3%8JB(s0C`7^o3s7DZ}0=0_nXmYGUx`TeJ_pAG(m}Ep>HsRX#B1d z1&OWXCXd)Pm$2(Y#V#uny&B85NU;&C#Aw#`EJEDnLlMS9mmy$B00_k0XD;R& z{$=i(=50|f;^eH;%85%QK;OR9nS|nJm8|4X0(3Z_Pcknbq&M#6f z{Evf^w(gMj7Fgz3Nf#UF5sIU1_Na&9)x+>Hsg=OKrHgsE+bscYRQxgaZ`iP)-ahnV zzjcldp_n2Z>>T}|&C>TXC;e_M{Z58X3C3FzalB&ZlE1(E&jzI42fkjv&}PG&&R`^G z7H<-3@A2BkqLr23-5~$_aO35SXNHO`+}uyXotXX;!M)^p>2#0d|LXp>hm@Su#l|6? zx!8K)K6BsuSw(KoerO@$Nr^0(S_t!^LL|nG{ppSskTAsm_9_kJQdW(UVa5?(fHGu? z2uL}{u-w0Fy$h+*-Eyqp^pRe`<{Z@1vaB!# z9~6)~V6vX4_7ESbjAy$>4*d+WymtQcm7T%EaC##I)A5gjor?gfg#SDlD#FeMj)<+( zJSd`pn0~}RClxiBKqTLxTJa$pi#wo+`9v84VUyBHYqz9o?`PTpxT@xE_ZS z_gB!v+-a=ZjR0|rj712dR2<@tm%1FTEmrv&u4TB}gyaf@GdSgc~`V-z?^#*-ZC~+&#?GZ*( z)%uNhUrr+{4P3Ev$}&lpiQ$am-T)KF50tQLCKQ{ZOC+mpArvI;M#3NGC+6fhPGcW` z<(NP%(iI)4)op5On~dm^*_4^h)DWum&ZYThTPw$xT5(B;qKT>B;$2V~1D!DHnE*i) zZFPg0-}(hQ+R0;Rb|R%mi4Z;pgz4Ip)PMD;S!;CGHS4nZmwkU;w%(o6l>Db=MjNkr z(GVlIo`1vHdHqjXp7v`~UAso_Ge7qL1t%+`62g5oK##-{%ZzAEHM4rEa!y%0muY)R zVm{}jbD{Q*rhp6yPk?CV@W?KSFT(+z_1M2NbCNE@ zCjv#Wsvl?%vu|B5%!14YsA3z$VV{Wzq|=WmNE&NF6J*mns?wJ$7e|iLscgtA&Tlwp0%f^)RQg$XMJ3;q&*boXL`MBI! z|0k?nN>f-{J5Niwk)>6S@r%}SH^1jf?{VA5{dQuKlOaY|8=rh5;$wq+vXbpCvj$9MDyh5JZw=T&Cgop?DOSCX;*hCC16$<*(o<>_wQa)M>K?&eV5H1 z{ou30bR36@rry}bACbfW>$l|2<1R-UU#KX6Z^KRa+gC~3E#4whb=mUsL0!uB&=H3J zNc5UIjf#lz(9WChlDhb?jUs&9X#ze&GLjvC&*41fQdg=b3nKtsC@&QO53F-z!N z9a(PeLER0Gznn*Dje9|8^^8WNu8abOp*t+qFmv*sAh8CLRd!F@>lBG3bHH$h{%^iS z1aTIl|Nc|hIJqur4HdCn)gH+*?Xx#9W0Zx+uPg^-SBAZ(@wW_wE)uaoUZr3TskIPi z;mV2P4G{$WkqIDNnYa4*CJ*Crpz5&4kj*(C%vY+jfXaEW=dQ0Sh_N9F)vN(XKrbL` z$d~Ah#6nws^(mMel7J&CKK(v@UB|TMG!U_L_=V3msLaq?N~n;3W>yhIHTVlny_?WK z{0oTRBiU8iwL44BomKpt30onamJcaZKQ%W!o6+et2_bb{P1>hOorXlQeql0#-F=6M zcZ7&HbY&8Hc8ET?A{%ASG+Z^3o#6(JJQ;y>ZlD2g&D1I2OWqvTDu5#g?MLLxl&ja8 zUP;PnCaCs=Q$zUS|2X_Jqd#*aTpJ%@%G~wDrw3n>;%kgA14S}|>r2kTwhP~IB`F8_ zDaMo;JB(m14Fx&`aop|FW3TT9INQjKj3l$c#ifPErp&x%&CSIw2cC~(wD;O%&b?5V z&Ax`*@i=_N+~yP)Gd0DOf+I^=Sj$*cb$Gho$Eh-eVMQO$F2+ami7ah&jl)^JW2z)8 z5u%fYhB~~+B=Mu`o7O^T6?Qn7;;T_psN&eF%3NXhuxOFX7~Kw_A_%Iu@~bW7!b40^ zm($-oA6u%tcAz*2EpWXbtTsUt^pv|)T68*fVO$#SHgbHCA5}-B;TDTqUW9QZN zbZH!5XdY)ls!BZhHw6c!;US*{pm9Z``8NbQ-6_!Q4-#15%CS=AN{?m@az9&xiNilf zhXCaEJqoDsOjm59&`OHo@q7jzMoVzLXj|7qbDLY4PgTq!Zq~j@(*#AN>I>LZDRED< zZuy4seV|iApeK#i1`jddCLFWQ#?BQV?pU}{wB$?XgQm^J6JRjujX%89h%A{w7m#R) zWBOHdb&}j|4P(51!9WOd^OtycVRw)+2#PUkNg*w-5>=q<(j5Z1Ciu`{dF3AS&sJNW zoL%h1XOwwzPX3;j9+hVgfuXB=B%^CmNk%R-nlHOa;tkSEo-_@$gWz1n3o&J3`3qP^2BDSXr*nEY$+(h?3G^{y)aY!pfhB3o*Yaky3>y6nK zLm1lUhQx~F69k=kt^2SNU4uPffzQXeH%XmYxeZ5B9hbKvgHnt4>WcV z4sEnluvOD!wO#3cxD&$>iw{r#@Pgx>#)Upu8%PX(fN?|V{6SL7_EJ}=%8I6_Q0~@^ zWW4KSkV{Q_zr|Ux^2P_k*eaTL=*xegd|A_<8sk2F;z&pviP$~wH#fn^tRM0X>g|ov z5>C>7@467i4kQpPz{pD&uAuMK0=}O zl&n_<;}MpU$0O4I-`|(ue+$Q>Rk7*`LXtWk1RM`_S3c7Dq6}P8DA|}e%eSxBYiF5m z4bm)DcOyMkl#y=*a=n~a&vSU`J-kiJ zJ;L6bRh4y5Go$H6H4(tQ#YEL6A*y((i_2?^e}g`20xm=>c@jmFj6XFaxkur|@sISo zjsO+5aYV^{Pb+N6O1k~;p)hR@`)t^xhbV86W6N4{i&O3DMCbK+US;?ypg?OSYO7ts&a z&KReS8HQZ?HtF}AnRl|67dQQoTyYl@SGxdCVaB_LXF~oz{&d)5naE z$}f`oT!msC`>{&&>4!;KY;UohDu5-6xx%f>_<0hYO(o)It~Neqt*O`wu&%hWY`( z=_!~qOcK`3FG9w~f97aF>fYWxxTQz;|6$QZ!5@z{T_eyB3d9%jX%!#4hKp{z_t?{+75x z{P%Vjz}e*TU)WVO%{{MvLfIgIfV2;0fc0q(06b+!MrMzPhga$3L1f6gF0>XEH722= zs$WDKsNU_YFhJ0Kw>B{rtY2s*tyelZ4IXrs8LaD{i0{X6B1y>Wc zS#-l5+z);tvd*(>?47CfLu$w>$KG@GgLkvL(tR1$Lfqq_&Ysusg(bX}A z@zhR%SUPnHVL>6=@4F-gP)nj8&#%F+y6U8)5tWs)?HrXb2jsgfqbvB)$Nm6M$AMXQ z_V{(B)_unV;m7^+w$&%?j&MR$K=5qFOvji`DH#hsTHI7O*x!DW-EO3Pp#@Yhf zVha@9LlkyzBWV6LU=?j-?eqw3ItMU|4FGK_EC4pUr`00LL4X26i@RtEhF~75x;y{q z@oj`h69cz{MT(To2@Y$0GYeKSdg5o!@i2;%+`-7tAeW2k`0b=Pwcb5u-A=xO&9Bp- zaNlN1t3kKJNlQB_(-#)K_s3JweEV*;K)CT>7+Uf#M1&-PHQ>9dtFw-iL~W>Og+KUv z1pg%%Nj<;nfWIpGaa3d*xb707r-2_Ym`+>yswt>(6Kk|P^d1L%f`r!^{091cy2#7I z#lSa*!mqc-=uV>FB1)O^;l%$8!_lLe+w+GkYhGo-$!prGDX|aeR=}jx%b9~-_32;^ z1ZqyHMQTi`@fTwgXTYRYh(J_TesJ(Fnx7cue8K=of-wXXPy@sxzJTzC*@E*=L$x&L{hRY7okku zsJKJP*R2RVQQ}n>XI2&@8q~wqq1m3bWe@ptH#8gAc{el~=zswa4~`UsrMgZIwPs^e zUYxv&XE|7mQ*)k{yAHvrp-$vS7||OpWUJ{tkPY2_diHXJN&h#jnKJkj!c}rkm&=Ky z3$MQQ1!6mP%!g8=o7lr4^R-~ck%*z@n~L_eovfa#x_Cve#Fir$vfVDL34%ud1Gwj1pfmW60#!EU&=w;>d-Ey~;EQdsL0bG8o887C3 zoqb<^d$@n^o!(HTBM}vuukIzYnRZqN_wYOKIB!SzH@3WI^Q*0G6aV?y*lFK|ynbqg z8}IT$wLwf*Z>c3k8$2dcv0YQA^TrwBGIa2U+C4oPZ7FfaTS>bZ+)rpUeoVQh!Jc5= z%*;(_uJ83-Z3T=MzG&&mdwS|UT|`D_c6CKt=YA)B({ zjNj(beL=G=E9JoVKim?Ol*Qn5eP3gLjlt-W$L(H>8m%!AL$O}VyrB%e(I8014h3<8 zMD*pE+#N~ZAaHspnr)z&!(-gduijGu0fe7pzKN7KN1|&glveNW>`s!_XI;nD_S`Y{ zHUg*Qllm?6LrqNAM(h>oX>{4Cj+4Lyns@nJZ-cOJgU@6})X&DHl9urJQ0d zmlB{~Q?F@Ky3+*Tr^R<8uH@Em2s^ckH%9a?AoBmlA4olsC!7`B(X7k!c<pGF#iAb|PB zkkK6{m)Z)ov!@WQj30XwN;jW1V-CgEqSSf>tz1jHp*T&C6X39rkuOQU8@+enT~t(8 z)7i>n?iQi;9f_~s(Bas}J?NE`clkbE$K7e?=W-xptgSTE^v9oK3{&b}fxPTXWYXh1{9l4796Z)!cM5aLoGPaU@zRLmWv(S-PZw2t>E5R}N)}RApGZJ! z)7P^C4iF&0Hz>|t?df5qpzXe%AHri^mMGl3yd~yc-}Zb!aDR1Ny8Qg0*)FYLC|Ow^ z`QwZq1ILFe#z@LlPV7KmVLVkIG)LU9#;w^`UAA9pTpXsvX5+CB$3Ue7(2;zRO~)E&73M%On{sb%7$3NfA;L`9nthphCgjB^m(HGj{D8|Jf zY!N{LB~^~rGXZjkJ~9DdAv@K~M58U^dx)8vY}f8k8he;y^N92d*1SuU%u*#w&tF{3 z47UlNQX*z%CJkpNF8A-7c>Q1ARmP&WY146^W4A@eKa|RCT9q?_ z`YTGhl+WX9Tqe_5Njr%-7Rl(fUe(Ue&%KazY=Mim1WSYpe0_r66xDEKRi0Fg+|Of6Z9;<`&uY#`kex@UmG_1! zMskkP*R|H-^^1?XeXLJ(|7%jaz#mBpw&MxUQj+e1l zJzm#;^(R*8x#_(}eCQhLx+?43MJxW|cxDeT&YtS9q)gn{!Y#qYjLpGIC$?NXaI5_q zDSA3|VxiN1HfW?=F=59mC^bB%(^~|;8n!8@Y%jxc2F$%VYGs-+CyBPTdk+k*+05yjkSS^ z*2=){lwK@jj&Lo8aXR=g_z|To3o4hi7U@)dptU%G!YB8L zUpf2;?tM@J;qkwbI;`)>>BD}inx)~h>A~uRfKcFO-RwK&_w-kmg+Ui~zt7wnx%jV; zjst)TsTU0mrI{5K$(bJ?$rmNb8u~w$RWtIIJz`|L9I%%|Ej?W}T_wwqVAfvUsZn3dv{k&a-bRg90+ zaScz{qgZWPPS|nNe299ZPH~m5Eg8h!B+M{0+lR55Y?2Xgr%X`bN1^f2?Y_GBP9X)7 z+SF?Q>p|BFsvRXL&*5emHJnabpWn-@V2)Lkz_Nd3N(=Kbslvi{fc8%em?x-RvZB-v~vBbiAu_hWGR2{4BN%rBgTE$%I7p)#OS@|n69rPh84tV zLb6<=`#HoMMj2nz{yesG=x;V?&%S#zdiVr(aXSt=<2c2;CBqRM<8paNBj`m62Q=5v zWgMqPnME4ilth5nB24$;X+|jpkJ{nWYnkQM2qUAsl3O&FubmLWw$GJzUUGJa#`OLr z`A+?)*IqNtpqa0ujc&*@nvn?_m$%f5^>~9G&7i(trE`zdU3m0MKJlRZ!bL^2Fo+NW zZyz?pkMF#5E_ZPcDMVyYA%#ilJk=c|om9e4%GHT-?sOM=go*&OEJC8A6g01%yrSre ztwfZ|S%XmfKr|2!%mqt=-NBSSZ!g$O_DaYXY7b2~wsXPx>ilNASPMJBI@v|m z!|t#_mSSl(!^7O=jl7L_@GgFpC-?wQ@(KQy&+$dR#8-q*a8W9%MZIVhCq+zL5^-@` z42f}(5g!XG3+l*2mi$8j2!fZ2$e=Q)k{_r>{-mhM_{^4bTY9 zIB^JCP#;FNB9FjP)NODadgVOz;nrKwL_vHxWKa>+(K>9e7}i4xY=BbO2%De`@TbkN z1p$YcEek!B`$b-<1`aCiQah&ae8<2pT z=nWu;;VvZM9*n?!NWlXbg@-T(k6;`g!xMN46YvZs;W?z?1x&$9cm=N^Lk_)xY19hd zdyxd`crm~HMyCAoe^X?TjSS(19VC-~i6e_~rWDFiS0F1Pg1Sl$Q8ltgPEr(HY;Y&; zJcb?oYsY?mSVkm4NY&GK&i!%k zx#ymH?!E6#%LpBEN4}3U;WTY$@HWok3g(PBj|DBSk&B3$N{yCj+}g*mbE1M-LX5$5IJCEvq>Uhk71;3|HMYjPiwi{wwp>$riNxP>L$#xm}p z7)>7|i=>+I>u6wGyQZ=h+R`1eYxY^B?Q8RhJRqmSwE32B+FXm?KaYX(6IwhAB@8eT z&hfH3ztiD+p@gzoCf*%LzZs^bN{FRey_4vXpNcgza>d%+w020+$c7TitUGW252R0p zZ~L9*?Gnn;XVsk7`mS^j1LdqtG-H6Nw2u-*ZRDuE^rPC!}=jpliG}hixHY*aJ>-kZ5%laqtFSC7C&7AZ+u3-_$$#;{yC1RJbj5~VX)%PBa zE6Libe~2d9{M{kDWKWwNZFc4R#v77*+W%hECVt<{eXI2X$=>lDB!5@C9{AnNC(R)1 zIU^l=i>C|ZMO?-l=COclSj6>YF68Z0<0^aBP(f8$jjW@g-L`z357|NoEHDr8COstg zwLQSu)S9PZMyf=X$qTp$r$yXh`Mj(HmgVoHmb8^e?b;9YACljVT4&Oz%hqHeUzcQfl{# zvQ_CdR8Tdqb!?y}ze(1~crV#j@&!~r=vN9i!a_e_oKjh|79jkpHhB@ETdkM?P zqHcwbRq?)t3aVJg25Q(u9b0JNA)08RjSjkq*0Nzh-;mr(=52o+s2e*)()Ap_o!(E+ z)^3;`BQ=i=k%ZQd6an;w+8Y;N2yh^U~ z<_2ogn`E8bA{%sX)ANvQ>f1sa9dy~j`s@@MpN0B;I^u~$UW&e-Qe!r8CZaN0|9$Dq z`e-?><5ueHd`}#*ABt}PsUQ0o7VfFV^yEoK{)8VNr#tj<8rA%m-L52GvZ(NmVm?>( zO(Q(NVd+|N>aMzh*HA$f57d2(b?FV%u!%ah(7?9wACk?l{~lU=(?-X*U9v}S+|}$G zlJPrK`GNMcqjSdDnffoc^s0Q($~#U1(f^;`EBR{54^g~V9{gW@3IBooUwsXq3|G?K z*xljyB9j>VxVp+)*ki_nZ7R?C-$eYMSF)l7bc|QZVMks zRpNJ0ylu%cncaD_$<43+o_6=;d(u17pGtp*Bm(;K1O0~5kCc5U-A%OfzB#1tsODi! ztH$4z@2eXq8%m$h%Q)!aWXzQdCFf#PPf5-2nAv3C3fUX)3vb0HT`jcH!LI&&dWK}W z`^QH=p4X4r>RWL=&qT|oodnUtWZCE|MtJ&^XM?bq*6Cy;am9Gn1*UW3(L~~0Qi*bpf7FxM;cG3+MJ*$vw20U!)%JH zqb_Qaj+*qzZKElj)!W&cAF7ETQoB-ZFw>~9INLKXb*3{>+fiQcsz1z*Z%)bM|7YmE zzT$sH#%|)4a7G(W!|60 z-$#`d|M`Bs|A?MQ{|kE>SLvm$bBv+Q>}%loxkK*Pe=A9Ef{CopA<>)bJO4^==Pz)? z=&wBYP41I$7d?#SUTYLQ!4W5@vEK=Ac!IZsQ=<1R&&euL)WWu!X85`L{+FI=*zx>~9lKimD)hA)cxvHu&t@39YwGTr-`a(Ho@Uthw8DX> i?aujQ=(u&hpzqI~p4;ck@C%mzBmDYb*-G0;0000+1zP+7 literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 b/docs/stable/_static/fonts/FreightSans/freight-sans-medium-italic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..16a7713a451ad09b4351a550b1d54345ea7c08bc GIT binary patch literal 16000 zcmV-`K7YY?Pew8T0RR9106u^K4*&oF0K~`u06ri90RR9100000000000000000000 z0000#Mn+Uk92y=QtyUZ$24Db$5)lXrftMuoBnyLB00A}vBm;*m1Rw>0tYHU)6B}o+ zHF(ZAb$6>01TL_zVB1XU-43pE*grTMVZp|M@j$wk{r?{m=olh&vq9$7hZUe|RtaN7 zlR5}e<*$1ZogA&I8E^UN59!z^=8So%aH>{^Kmdxgg?s56KV>;B%`m6cXTt1+uGuAu zKsZ_W!ByT%HnKe4P5<1yUXEqIa?%zH=`aEhvj}jfhe)xD?uq{xqwV`AY9WC~8s07x zOp_;=PSzC2o~Tq3@ghUz55YD&5E~J7x~Zsu;!LDfEFw}HtU!C4+0*GF<8^8N(naOx z_KW;20RrUyH>c)s`G>nYwAWbuUy8shuu}>YyuBO^6d>6Juw=s8-V@LYO~NovAy(l3 z_r23xwLB9QL&r{hgfP3Kc%4&x%2oHC%V4#CR{N&|rr7{q2R zc!C7+)nGw(F}tIo=%`ML_G<5_n%km7-lm~C#Zs<$8xc?7%)|}J!ADx(5K|Q|2wa~Z z>|{K7nVg)wR3?>5r4ormvgzKJVac#-YU4X_t}9#Kcj}k+c3}%ppOL9#CvgLC`Zas) z+xx$5%DEya0wV}rt6cJyvhVF?;T>!a_j1g+4-45n%A}zJqY4xOT)!}Xd~y7ZulrNw zswA+ju%=UZzNRVFn;YGop~e6S8X7QqlK9g3SLWKLa%*aTDa@Ojh!1&4!o zw*g>T7!vG@0JHK7T?7$FK#XzFQ&#}=|>Z}4Jr3LXgJZC`9= zlbo2d%yhrfpRJk2EJg@ngb>3-m*f#40KZl}Tmazn#*>Bn2MPg90+s;^1jHX8U=a#D zJYbwJVL%>Ut0)F`U;sTy&4XKIaSQ+?U^d-$Aj`{G`Cn-V@d%z2Xd*NndXe|p3T1WE z>I-E9L&5@K444U416u>z1-s~P`vY>d9=1MblWcRq7H&IYd*4j_#|~+?#-3xJT6j+F zYwdr6#^I%7fa60rfFE$Gb^2tSaAqM3k@J!3kWId;Ps%Wd`5PP_o z)KRh~wvY+2&s1Xo?$c%q`JfOLlTufMz2pKIcYIiWtN>OJD})usieQ0qAWDol33!sE zNRuH;F3oc&99(pz#G#B;fi(}T4(w_60+9l!2VtOrMw)1*g;v^Vr-M$q=%$BW z`dG*SgDhf*VV1CzWh`e!SPABURViH!W`S{?1k-x5#3^3tE9?OQEKjE9W@&P)66+S= zNU1Y$rS8n5{(2g2B$S7ysF@tHEjbDKj)Lr6YMYj3wFSbVa0IVNGc9mt#fBXw^#ft$ zny=@96kuGZ{DlHVgP(O9LFU;qi(=4x2tQ5$5k!O#VVnqpMR2t!B8G?~5(pkAiAdq3 z5g9}lkwfGW1w;{1LX;8N(k|p0dT0YO(s2t@W;)L-u3iEgv>Y~G&|<$@gKnH{`Vs{$ zJ(W~h1%{l?DmWqdBC=!7Od7MGtn_46RRCf*jd)n>02-*8CKCvDevgdMO2Smy8*@dc zL`&ZXx^(89S;EZK3wmXSnrT9VT+xXQ;Yo=SG?zlYQV>d^4BUk!A*igZ6U)>{| zj?t?iefU?KfnFC_vZJ;-?R7BF@Vvz`}2aFBq`9TyCLv9!2NP=XA*4s0R1@uuI4dHVwGHXp6z4vDz zS)gDUL91D7q@Ie=GhOXXT^}YAkT9}PG98uX8rd3*C5#hti1$?n#C8loI|&m}Q8$X0 z-857|_f62LYt*HnC==Il<=!JLzy=vxC6)Lu%U`$Ni$q^(GLX*FeKEjxL?n(I)PJDZou_s2bqQqH!;D z%B~s>@vul%6yq|ZUhh(7+7jz%pUJ}VTF6L=avmLv$ACH_&vfb0XTXpV6Q<0VvtXI~ zJ?OEmy;OFkJzjEZW!*;E*Wp>+G3Vgr!`?U;T3_8#-Y7(ONf9uI6h%=qP0=PXTpKkd zQcdFgJaxKUhLyQZ+uAnz!FcW9)AH12Vax$FQ?rs5hOKFa%XL=^DyHyU>QZ}|z7B-p z>``SLOmw0R@sG7b^!wq-altv=Yp8itaK9C4Y2$L3(NkgNGo zOx7cjJTD#0noYBPAS?{)#?NsMb>$&PkoSCF}fN-LNTLhz$j--Tl8%c{PS8Q3YwUdy${Ua;>lq+PcS)X5{Fr_W) z(b7jF*@A7DR)ZRKM18KhF1ZQxb#P(0uvleexwa{)Va#m1qRz+6_!l6;~%pYI%~;GgdJ5 z#!X;g#te@ND&RApVFH#|#mpv~In=Yueuw>%0+$6*Loy&Wkb1}>=xOK!^enU& z+82#Q2cijRI+}ytg~4K7zbMZ8{eJzzF6-=Z$k7nPj4J7=N!_mfPSx#QH}o7d3XOi4 z8y)fErvJQC@8mn)W8K&FOPWgy)Bf-OzuFvg)Da_g+iVr;qlt%CY+nq$0P(z$VUTtz zHD5gtU|`VN#`Z^+WYP~W)2}i7pMt^q2jBwdmgM<=4*LN=LK1SIw6WpeD96Oc#V0VB zEH;PB;|qi$u|z79E0ij=Myt~sjEPCfDXD4c8KAkPtz+TvinVLjZCJl?^VTifw(s1r zYZ-R$-M{a^p@WAdMudx2|3P4V?J#_{qaZFJIlQx^eqKV`oEGdrxm)_u!)E1Iv~#9Xor;GIRby z_2rA_0FYk=0FD516UZiDxdGUD1pqLw0qw23`B0lt(6$fhFcC>)0Hu(al`lCar{s1T z;Z`tNw$<^2hyey(u0E>+_Z~eHI<^g$jZ~1HXB07`V>Qfu6h*K*pI#z7I%j@pf@n*2 zVIzBNr#z$DWS(UF+zh5??Ju$$s^kW zNiYN6KYDK%eR>4|rGRMPQa_%Iv>Y@jbC%H+5`U`|T_ zkQGrk)r|SLZh@DHoWCN;I^Pv?=_VvdFzi8l zplC$v7$3!ayUchMe1uN4_O;*~mQI>Q|iPMNo2J?)eyIMw) zRF&G5O{J&6W5+R5%yke)qN#bjBS=tE)6T=hh0OIRt++}plNcUH1(Pb5#Fd@^fzd*N zf4mdJi)___&D#WC;zVR{bzXwc@V%mMNI{8;Z$uNb1IUT|KaCWU&TpzDD!0kIdIO}Z z>&r18V{dg2=d(#N4Pn41y?u{>%W8dekTQ+P;a*@(8-Fl z(I9eK+0?+c>}Pq_3M(i40-f5mPfQtp&kF9(+q<`UW6`wkTuT#6^x2|HJt#!W941pO zz9=)gl(4Fyt=W{G^dhJ-f;^Ige*G%It(UHXM;Q=Gssj%*KB7}C&#g82-u+?-Iw?@f zX&!;DO=a{(pDqIl9C!;~COY}&F)-S?%mEjjFT zYMjEAQs=7*A5mSNMMCvT24RG#aC9}`xAr$kMG2w!XX?aZto|^n14XP4!4qZ$%%Do; zue>Yuxv1i}APX+=i}SgI3eLq)l4Xm$Lh1DwV=!FAJ0I!9(nOoqBN-iV z1IzwonYrY$b8zoj-bCh8t7i3XNMvN5v(PEp^}^lUWDd2FMVus6-r!(!{{~*G$DJmX zkIG|DbrTdx?t`>SGbc*;Ueca1EY<#A@Uo+Q;-;)Ovpxls=kI@PmUdS|NS zq&Hdt6c;<-vXkj8S3AEo9x5IQj)qyy=fNL0C(Mce(^yCAHa)<)m?*N05iq5L-E;{F z3SG{=sJGHn%F6?DQcZ8wIK>f${_#3HHZ)9-g#4b35iu4br}GN8w!{;jpVM4D4rkph zLh8a%r}ZK3NnXPW$YtHPg0{K0uHGqduEhS`JyuuTeCTuv)D5wS5sKTONI)B<)i0=2 zKZ%Be=5AU-BSZ27zTTJv8OK%xtk5|0?8mTqkRnDJ$!sY)5z;=eooYnNDL?G6hpVpL zlKYTgfWg9izyOK1056u3EOkp({Law2~;D5??#N_I{6`Z5!eifb#_=d5S$jLN+9-M)o=jzB!sT z-8REo1y43?s69F@OXPZ+0EC;iQ@UVwO|*Y|zYxCro=cCkk->sR>BRwnn3bdfW6r2K z-rr%$X&rGa5AyqNdVL(sg1uqLs~3R!DIK3FS&VIydbqS#%a$4@>GeIv-l;4-V?w`m z{1kGG@5zz`xQJKq?UH=a0 z5PsT>LV0;44UFv2VZNTn)jJEqu?bGiJE(&uD7G}EhIYC&01&Dtig?=D3Wt66?Ti_6 zr4Tw|rz?z(ddth=Hk^LBLK!_AFyc429xh%ks<;dXE+yFu1p!DY=W>eId;!mZ^EpLi zp|(x0Z~$#_jEU{>yRELu3X!`~2Y-0Lh*DOeHkSX9PuKF#JVg^(O_Dr;GLT^)rj((e zi*@&&x+N3tiekZ1*G42&0Tq7N09302apZ!6(ni`>o9NcfXgjn$nylS%$@PQiWJl^a zl*1*3n~Y=3fysDJ=wnBxr_%|FT%4Y2*o4~o+J5++lXsCQyvSSF0?fiGJX8#F74y05 z0GMn>j}z=lu}`KT9|O3_Ir_AH9N8wARqxbm%$Z5*A4pDus9!+Ri%G`so}2f z(W&xUJ~EM8`*01#cII|h>P!7T=HB}E#jmqyH_@PdJcBGY?6!?GJIjiCnmFo4m#TGk+nB`?{-`}yA=Chf)X3`dhK9_75F(Ma%$L;EWa1 z8`b8m#_!IL)fPzaD4^~~9^ur4e7EdaMUzh}eTr)uPjJL+N6olFLVqQPI@cMSITp_E zm0ZlBd2_wH#3(&u*x!XixJr&#!O<|^UvX``hVh1{0!O$`gC@MALIXGBv|SVPa*bYh z4vLr3r~}R^aZgUzFsr(SYxpv3)@lB>gmVyCS-!V$;lcT!Z32o?;Nxlm+?r=iv^C?_ z4;(yw7Gl5yhvJie6-;?lb-i*!DJTaM*HE$g|Mn)E$xQYHSDsHbCgR!nH^Pd&6bacW z?NFZ+GvFiitI%f_GRj!I5v)VTy!!+^36?E74C5`S=;CALlMH{96C7`^BuNUYr1S~(MWqj`-tg)Zp*uu(B%`-vv`9Rdavb8g27vNx`duP43*06IR%@(N{ay@D;tD-v^z@;N+CVIK!}M_O1c;b;|XS#E)p zZIrb_3EK!%zLXjR0|Lq~Yg-;;>#po>I02xSZdjlJWDxdc}i7&r?a{S1D>u zC1r!bnDjH3toj6uMM1K~XxKdcJ!vZ}Vxcf7v&?l7a8`9hVIl_9wvJ0Mz~j^o-j3~1 z-j?mW?VF%HV9$FRiJ8F0RTS_rhuXAK3<~(qNIywIuY^YR_TkdlKHebm6y3|JjSf6r zE0^yoGdGlFR4z;Z*J~_O=dlzrmNa{Tk|me1WZAV~vU|ZCN8$CbwAX3?b=L1U<*DMr z0_?+?f>39^BwQ~`lf(yx@bzpFeriMo9Hqc&y^JZOCJt7l5exn9`Uur@WsHs!9q6_$ zw%SQR<{+fLpKu}U;NS##2!bbzNEVb!1&rimP-%~wR0mFm)-2s5mMzSJODmEghznr; zO&AiERGX4!=&VhlqMOlWl>_lOLMh8Cr%<>>h za;wiY-h=Ad`jh3W%qvo)cW(|L2am@L9G3^RI|R3ZKn#r6-)-0V8;sYgg9-UEikw?D2?M0@t<41GEP~P(KNZSdEvRMgWU(mx}sQaoA@4#l{42?ZG7^2 z)3RHm-sS)*U1EwS7lnjw0<-j98{E@2q*RrpB_!?ezXy70dDsj^t*Q22jpg^GR6RRJ zsBo$aS|3>wFkK{2L}H$X^GHz@xap2XKyGIW(bWuZtjjDf&5)~uT}7;d*`ZCFmo;rT zcV%r-E)Jw-3&ZM+Swqq3lQotlO*?yZ9pjH-TwuIvsai1r{A(<|2w7CX1k71xg`0{* z$iPZVGeV9JJon96_LXF#1zvLub90-vENj|u=F0k{T%5Dq`{oTjCUa$(iPnr>x7k_t z1)QY1$tZ5x8jF_VEF(L=6o8|kIX(~kZ2_Yj(r$Q5STsh8M~(KV28?p!LkbO%&!e(# z20@~O2(;*sa=e$dxxRR3CD;H#e=+FZm3}i0Su80(1AbOzS@eKqQB_rCKXFd8S65o{ zAK8J?q@i=S7B_5K-!gpm#*&6j>vY}b%G&g@zRjhXRki7*eOrLsBp1!SJvRfmUpn$wkC2~qWR5;eM96{aL6a+U z=GP1pbe@3ux6xSx>{ng==DIey0=;c?Jv6UgzT~Jub`;Qsq8B27uq{56!#f0M53kx7 z(ZkqiW57fUJD$T0v=Q@g`s~CEw+!@3%XuFcj@}y1g)mq`x>!z@EHDNCDVd{L);Fu^%t1Zhb=v!B$mxLl^3Ia!> ztKx|zLMFukX4b3Wt$S3d7)hq*kjGVwgCeusK$W-{iA6c9E>LqtEKROB3D5rtq}EmB z=iYrg!NKuf1|q- z*I6pk@EM?ZN3B$tpRb4;8+3*!Qf)oMg}M0(vhT@Ar#XQP_|w&X*}P_5L0+|l9?InJ ziS|cbSsxs_ixNhmgzP4TQN=TQtS?TFXK>{q{^UG2y({3*ya=;J8pcMJLDjro1E+b5 zIl?$TI|eDHhkmD#MHhevBK#$v{c9QgJMwP4hBEVE_kv7+Q$!Vn`E=hI1ucL977v z!u$;_>0e)JwA3UO_iwT!S{A5Mm~2Iy*swsvWXt2l`gve{hs62&b^YO=RBeYeaDhc? zXNG!Bw3Y4UYEd1Nej+SfRD|c&NnFMY3ZS0!kaq-gBBemr%F0rT^x9AlnSsc`5z$_c z-89>XjC2`Wr(Riz=m|{h1r&0{>c3lv)mwqbMPigFGSbmReJ6tX8$3KrGLPI&sBj7O zbDDERyl)+tI9u%x!&M;BpVP!;#+)z%bBpbb6M8up}zR$P|z<_Z%^x%m3 z-dIk690uek*llb;A7jE=f_)-dLfJ)e7Fd)V#_`8`<7HO=%AgwQJNx(`Nhiad6;MPDnm5ABmE{g>D1j^818ai*C+BZw zz$Lm0ekKnKdimatIubIq#-vJSM`JlR__=dxT5Oo()G0fDl~pq{fgRvQIp`)ZNC)IV zp$cWyA>H8-Uz-+}+lh?sfM*GmDrUB4aj6(?v)&xp{5soLdLlUqhXadV5@W z0l7EEchuVzIhH=m+#mZ&K|o%+A20I05S6xM`o{oyJV6VvAie}~Q*7s~ zEN(Pa!uHbj@FjHut4#`!fG!A$tpoEtuRCzSqCZ3hSrWEF$r9w}h&U<*g@bR4AoB!d zeC0s+0=l(hogM4rbadWdzaxI>@zQu6$r5j>5bzUTGPLBjbOiwa+a~lL$^lDe%_=RZ z$TO*EIN@(9UtBy_jF1g=De=~0kwhmomlkd5+{Xude_Da>pG7Rr0!&0o zR0@<*X+^`A*@Si#vBAi7V_9ROz>p?1l(r=D6HQDJg~lMp@Xaxl5@IYb19)IV?+rp$ zcg?!L+{K9B==_}9V}o9;cTENLLydTBHS?)4I{nxigb2c#D9~8ebIdoqNgI0jwQjtm zx=MF;;?R(A6y}toh+AiXdYo`3`j6!f^$siO#D6U}hrHq>dFm^xP#$~KS=hV41T|(| z*uw~POrEwuYUG%EVvo?)#rII<*!BA?os}(44ug<3r{=nSc#5GDs1hH{2pUy_!kp@7 z<;GH^2~;ANN)QQAIYK6-LGQa5!VALayYz>aBTn_YR)~_lz8h|W5XTZk=b}p6oOPqE zuKNuX1*F2Dc3YK=%E?J5g?ohK1SPS1LWdv~z9)1-Wlwuoch-)V;xl6+*pdMo%-T$q zjVJPx(lg6R7ak5gQ&PQrpl~=+yC5U81tBap8+ts;0 z#;%hrM(_QfV~O{l??i(CtD@)WQhop9y7$D@3(LArOE2m@o9xxpd+I%S#wocJX*`X{ zBS%nW3>uM3m7|J%(5L-SB`EZ5P&3EFyZfZboAx@)5LPM>t`q&u;+nj`8TRADC0io1 z4cR%dG6makL;+R^KTux{PTC0&hGK(9$`YQSkU{RaJ@GzC{ac0bBq`bawh?#{r2g|( z?w+K`K|;PiqOTV!VP*i>ORRLYxM23+lmYuiA}^fUh1N0%41HJ?7#^+8`=CGM&MziA;)Li9 z2fygT-36&$aRPLQnZ3MG2=V*er2k1FoMtI>N2?yUzcOC@Oints9M^^(C0MD%J1bl8 zZ`m$Fl{=lJrFlVxv&3$~En%QZwKjq0D$#lX%-;BPHE@}b=@o_gEq;l3aYresv6=R-Ik@v5bI`2aYO$jhx)lPgt0&SGa(^?LQ~mOITx z5;vpjWUG$A^SRB@c0=Kj_0WQQLLd+TSK3KMw+(sn#XnU&!=EFi;~tk1^>P` z2@9>S6}ck>2q9BgdjJ|quXXA6MP2QMbE+ML^KmEPEU|#^;3_iMI|+cUYny$M$yfL9 zlU^EgRyRG0=jHjIr`J;oSX?(yJ%Kwfqap}w5}wyxD7BNg7{fGT z95prw22OWi8)640`P2OYxwfLM`}F#K!hwA{;CC^kFXRK5Z}#EtoJ3FbWlVf|0slK9 z#fo!Wsptu8-m#2LSA)FEFu+D0*zcS0;Tas-eKjgwmK6bvD9%{SBuoCR_3b+IO+cNx zA7~hjLE{+HTmIP+fLz;VncuG|YWryH=#HozAQnu(K>4=VZ32PD*4VAvuTP%<*8K>X zYyS0*ImpA^9Q@#Muqh~P zeoUqK5O4DC901##UN43081Q_#a!pMI^|6G$*x9L}iRcgGf8mFliq5jDrUg?v*~H^VB~QlN;`y zMeFTL{U6bROLO*g$Nu|rw0ivyJ@01!|FC!7A{_24#z(&QcY%iN8s|OW>a!4GQdP>8Q@36-&8cmBshtP>k9C`qN@+?vP`z z4(Vq0LZ1}YVX{*pfVa~E;KzFBK^iN_hkY_@;LT&+Jm@Vsb5`aF2-8C1l3)MezZG@7 z?doqD%*`4^ctk|2&|KX&Jkm=t+%Hvg=NW}+E_QPh`+dcELV!Q}y`OmH$6s|n_m+7e z-3q#mZn}^-oXQbkqocbma321-@cwfA^~L|FxGw^?#c?xXqW)E%f~F-fG@EIbh#f%p zwXu6f?37~}2P*OemTG|kMF471Ne#5v(-Hvk8Lbs0;?m{KdW$7Qo)#6fe8DBBmoB*o z07XFZ9!2&?v8M$9H!(Op4p%6X2J<;d%86(*O@EQ zDm87L7ofMXaGyBBy8hiw#1S^-aS0m45o7C{c7>Hze|U?H$E;e=-LT*|v)Cf*MKS6FW)@$V4`YMg@<0hs?83@^G85 z*_^$$>qUWckwv>``u5#*-Ms=OEa+&z(?XZx{X^$p*bIH=!w=&~DC-H5)P>Q0{Qv4| z53wEd=3*~}@z%vpe!O*I_4Eiaer-ZAcUs2S>X_-#>@9(iXK(@!3on!x+?l|Y}YWkuLD-)?`FldRsqAUXJA;N5f633_?=b2_ebksT*rye5}6 z6=w$L7yT21Q_7E^jt(>{rLv!EDb!sHe)}K5#?mMjd&RfluD=1=&P_sI^#5}EEee>Jow0m6vTyZhXmEqU^3fvMAh+D`GJn88! zk-HLbX;t!-c3Gc*S4~|PcevsNQ2)EuMi`IcdGAma#_KJWne}K#O87Qnat%?Ad*slS z&u*dMqvUJg`G9x9mS>L|!6$>HiWr(}rm^m6LhlrsH+|F--WqI1Lq(MG?kowqa{_sa zcVJVSoCs1gQy>x@oA#(6Vf6KN=Rg7;iKm^~pQ)RtEg&O^0$iGh{Gn$B0eS8>e-$84YW2fGw&(FO5+b^%3wO()h}9d?*T9J70F7G$fv^q z86Ok6p8DRP@St0Qu^XA`jkVJ{vzT!73Z<(Z!`QaWw|RI*lb@X->u%v|Df{YpQc4+q zqeuC(+qeg#*%%oZKlDPg)K3sKSn;84JA!GWqbl&73A*P4+Td$uei4u5GW+M&4$1Vkl_t2+?!Wj zp-pCQ7>N}R!dAgWA#ZIT`keY+;&3oFP^Oj+T4SSYwSfBto&Y%%r6n^%I?D!5EnM5> zy1rrRD}@*QsaY%z75X#{ZCBs3k_8ZW1$6t>$HJ<$O)Z<;)E%` zE4$FxG;*2`8oYcg3ry1i0wK&ztQf{=$wBYbvW!~ROMTi*e+crgh~g(xFW#Pd0K4xv z-w`GA{g&du(^YRwWkF|aTZLLjJB#&{l4l58ScZikR+UP8CnTsVmq4~qN9 zv@-o@P0%#bPBR(;w2Cj@(>2W*St&}4GZ`K=UIyaP3ZG~SVQuBKj}eUu6qg}x>1zjt z2_uEOFc+F;(BnU>Edh%QsFe?H;LcD+sb);siweFPcUZMJfIbwHq zSY$NaxFWYyqc)V-c)EE@fKlV+zSkw{-*UHS|1;jNRgGl@=v3cY%gVL#DSDqMI%Pmk zfIceW*LCW0{?zr*MU*izmbt{ZqOc5=?jdzk7(U`QbYw0GQ8H$^1=dRkZCMP#pwlZ! zAfi&gvF%KDWU_&KyyEvYphyiiif?QU(_Omnhh$syxCBug#yrTsa!iw)#dq1In*N+L z$L;TwCTk(c##c54Ugc)xNK4(Z6iw9=yYx$9n!Cc?7fp}ll5n{+Tf7A1U6teku1Y zrc=e%XFU?~_Zzo1W{WwmJ!D5*c@TSW_5Zn0T(E7)TxO2OOVHhN;<>t3ft8^Jg9&XtEsbOJ zTKebCg577Hn6}V!w`yPF7Ohudg^8V7`G=Ug1Tw5mdc3JISMCURxFgygX=`;$!=Z(( zk{1K#lL$UXaNaDjj4JR@M33&u6@=W?Gs`Rq(#)llUIlFvP|$$*&&#a&jVH5;m>-MZ zVitwhnBHIStoy=kDlRjC#a){rKBTVr?q9@? zzc+yM(g2PF$*-gy+zM<9VDaK%`@#Q0zeB&&U`}_FX96{EwLv{&11P6Mk2^FiKY!`XyK4H*^_R|IC;@;XKjTm$I)*HyPVE@KCC)c z0MQ$!my2bkjIl{XGPIf2#l@1s@ofY~3f@35QM)-uW%}xgVnT_X)XFixB^E0RPp62u z)vHb82$OVCS;EqTQ0vHXHHtlfTsi7^vIB(BLRuKi#ABwHWRLy`2iR;1afEFoLD-9STbq`u&;1Epuy9>hzRf#(YrlsyI6c z0Ffni;y^omZHFsEn5nWdCt)c_H}}gxwsO?v27b50Sh35{nCo--!rVwqc?@MOa9f$$ z>~PKxZIeEXcU0JLAj5kK`spn`_dz)SX0!!j*w0CP7%%;(@ik0effHOyg*lTv0hNjr zCMnmA1rU$&LK4OSEnhDkr2ti_0JjV%$yTo$8<6vwPSD8UNHH}SO{YL z)$iGs+_XCqtOUtMPYDsQWHouLQPgL1+i>|_K|ui$kRV8u3gCLmG>Y2LC7z6 zY=(|GMe28IcM;6`8Pu{7?2$t)+eUyELST&=i}q9e{pdAG@b%{{{PM1p{s~0L@(>Td zes^+>lW+*5;mf9}JB_swtx_G*D)j*bFzVJE@Q|!~;NvCxZ;?3F5S+;AsCn>hTc<^z zlr{WXr$%$gP<4ZMZ#`^>ly>}Q#+hyp+~7+EcbK_f{*Dbi-h|=x*Y*9? zU5Lk+PAk)|U-ezye;rER&%&!jNj~K1bN24R6HtH&4rnRjXacCMEL$vG#*I^s#Ckks z_y|o#A@HDsf(bA;UBCm9OM-(8mcOL4Weh4y8KO2i3f(bG$uTqmCdvebh}0Om?SeLM zFiAKRMJcFbrA&aO>L6xY<{cxTl`z@n!6;gBKT$PTmdWknxjKLP`^*};*n2^=rx}2V#viLYNN5M z+;ZqJlZ)wWe#wB3BiOX9(WeHeXl_oM#l@^a=>{$XS?%Jp^4ax|Kju$xzidx}F6<{m zu&Jn-WgFc)z*&i=shg`JTLsy&5F-Op*CDK;13{~~Rv;GJH04Vq^x z@Z;qT%!8*iSK}u062*Zyn>)cc7ZGDbX9pqwf-Nan(&f5^f(TWErxoCsQ9(QQZpEv~ z5B>zRif@Rtw%5o`7E$-2gfQAs^;{?*p z2>Y_?C+wi34X1bK>h51dOu8D!at;m~7nxUrFWyvA8x3?Jx3vj1O2(=5W6|;fYzY|^ z0(0>}SvED)qpQtW!C7kJnLfih_i5gTy;ix-oIiS$7Km?c4m3aqGb^LAY91gt-r3X+GIYmIquekdJIcT29}2@-kPp;4v$zF{ z#-Fl66|dn6+JOrhxN}FN9ksT(W>q8)YiWhb`MRNLn2GIvqxF!aQrr_c8+2fu2D;}# z+fl5QV6cNdS#QLH5VI*owEnHtW|0|MoMSzM0lKsXw0f~YI0(w-TnUuXK@WijM^5;v zA(&g?26yk^#f;+EyP7_h4=>V~wX*Ttrhsz#l;p&)C)XC{3oQ-K5`7M7;g!^bnW%9f zPhkpyw6ka=>n-Nen@~~2b#yL1?hBg};S=jDqqd_;QyhI|w~}(FaD4GWJW)Qg28K{SUwbAV5M1E`$jTlH>=_>HT=R!V>RI3?%jI zlZj9HB+{qOSV$FZ9%O8hh|{M|s7hilI`r{kgh>P$C1IyiYQV+nEJRgXmUO%wh-REo0*ALAFgQODzPxX{1{yuSDV5jB}2ofl9%FK-vT^t|2J za59@+Cw!XeOEc#et$XvdUdIL&g`A%B$Un^xzzSJ&3gqA7+({8EF|&f}aTJ+y(DAge zmkBCH804OIs|s~pew%xhhBA5CD7Ads#xTCarhx9AIVjC)5+e$KhH|_lg?^F|uJa}w ziYz-~0PoxAyc})(L}5yY-ARi^D!l+4Iweh$u+j6Vwz|>>@TxhhIFM*M-o8O+`pCwE z1urU7$(o5;hBWAz;qz4if+xM8YtMjCi+V=f)gshrw|@-I=}R_6gN1jBrn;b(DrF93NP96#xZXB?ZxsCR(OmPRZ5Ir(shGN zzRcX(S{%|w?bCW~qYi0n_IWGW#+Pg?z*AZf^Q?f=Rn(7^z39%;`m>HqJAsM{Y%p_b z5@`TCqH!_9xiA8bd|(d%n_N8y4}zM%42cPeJ(#>vLiA~(M-egabQFP3>b7z+?(le3 zMp>uUh$BmF5Zy(;>=tv-)g81=qM1>!xz955+SY(ajmI`N=gXl@c&$d5Zk|abE;Mo>tODW|k!wqQl6P8& z@Ubezm_K)yH2VvB?2qKuM|5=;ypDY^=i4P60&&l`HhhjjRu3mBOBcpE#LAPyY9ih8ED#Gccl=oH4V&tZeKYoLt;IyohrbE?v3C z@bL=>3JHr~aiU`45_m}|X&G5Lc?Cr!WjAi!x%c3aprWd#u0hn)($?|hSyxZrz|hFp z#MI2(!qOXWtp->|pf+mN=7c3)YuB&GYTIqn?XDI>`YhFH+CyvX@X-gKY_Qu|r|n^4 zHXzfCEN7f^$$1xC^hUO8F1uo{9B&P}>AD+o{poLA^5rQ|q)@SSN|drFQ?62lD%Ji_ zW4?J7s8j2(^%~S`)Z{P!IO>+Cp4n%==UzDGrB}AN?TLHtd*HD<9y#Ek5u<*0$Zt++ uHl@Q(r;HhQJk#r6SEEdr3}rYYBadhFU<8xQWk9nRt03g9qlHlRF8}~ScC7ON literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff b/docs/stable/_static/fonts/FreightSans/freight-sans-medium.woff new file mode 100644 index 0000000000000000000000000000000000000000..5ea34539c6f54063a4a89213bfffebb244888eae GIT binary patch literal 32072 zcmY&eb8sh3wEe}ly>YU!ZQHhOn;YAZiodENedCV4~5@O$>|y0OPnH4eNhEn5_F{{zLw7 zqdz{u4@lsLz*^01oZWx8lb`&;002mt$EyI7wVlzA&W!q}?&J@aDQZ8(Z4BIh>YBg& z#QpjYzz`5}wgxsP0DuMNPwhYeKvX{dLy_Cw&dC`7@XY#&yZXtw$xg=cXzyt96YKrc zAE4=n=U!s#iE}eC{OQ?;|Azzr4;xill|S~Ue*j?VD(Ue*JNLde`=4Ezi~8-pju#~b z?N5(7IDeO?$(@#4(fzHP-%2kIY0Ll%rQT#wl0d;JVQSjU2IUIhDPixp)(VmTAlgE91NlVKbbp`9obeVd>5JMeYXmSXCI0P&cQJi5IQ^7EdP+Jk0FpZR` zIz!abyxZcCTb>fSPoPP{!ij3VDgPH=Q*wumxm30CZ=wk)d}@`5c^M&8i(>vv%OR!a zyNw4fv+2p{i=oaIHw-sEA1Bio#Yz*MJ6qXj=w2IZ8N{Q?n=4NN!YM|&SDXdp zXU12!uh@6(Ozlp5g1`>)4zyvkT_NkiM!`^8OAw1O!(c~?vk+rH0g7}nqy6RX-MNjtAiOw^sK?UO^UR}AzdL1nG6<- z_Z)+$L~2wMn|;TLzh#(A^J5$n=LG`a;e8?^JSCIQ`A$;T|EO> z3wt_eItoLmziVjE4P&vM%DiER=rl>QZFD9ZAPm?T)3rx#3h_!_5kD(= z`tJ!a_PJMt^%L!p?Gfl1YqE>COXMS`OJKOe8~zOZAbqWAvZM3rO6S>*;!9i^?Tsruct3DC za}N6O_dxOP8GC4FMaYZvU6ZNZl6E%lVva zS|rlpr;RHQ0GH@wMbdQs$LJbW7|$js0YvGBpT&Jq@{UaDu77Sd1Ez_!TL? z2uTze2o&T7bVaJ-^N#9*$1&*SRS&+mMWL>~&KsCk7v zk4p5DMddd;arU&$)ORNrr8H^ghD22(_@WY}YJD45sn%iAt-nj+a>P^p4 z6st0uu@P|$dnQ<-Nl`?zYuQKF>u+grMk}VaU33@SeH1kwM{&+b#BxWv$BdZbMk~H{ zHfQxPI<7Q3t`yJn9AbUi&ahr|BXwUf+c*VWVzh+K3lYzYmeSm8ci_acHe98PYD~cI z<03sZ>MzDqjXKV+wH#l4WUuA5FRt6o$fIe#PiU+KrN#3d(;yE$4K}-nQi+Y6bw?)6 zx%G`z3!$w>uXg?sFMkxB#t|KI8Q0R7)2g|`=c~xraZ-g%H4oUc6>%+bd1@DpcI{+x ztyX49!`{ zSJk6I0=-V5eXE<<79A7KttZ=v4Te}^m5=FyD6MKpq zCwVC+p%aP248{j>SGE(_>buEo)c^N+O0h;hkNX-aLK?_Fnu6fk!)sC5b~j>KYw?1! z!rnXrnfZppYwIB`^RKfwE+!21cQNP-QNba#qX?@aZsK!7Nn$UV&g89V)E3=hw{J&O zv>Od8p8L^AcllR8JSI6P0ve0L&NZKAqm5hbnH=*GRR2Ru$k$CH1P%%**Gxn@owNaQ)k1^93;&=Z-lU{wx;u+)OY=w6O=eLm(1p40=5 zq+0p?rcF1Ve5wV>d%v%;*6TGt`~=7Jj*H;PzgqKU`@UX{ajgAeDVcw7HL8!RQmgp? zOb6~!P5V`gMq{KY+N7x%<@^@FT2}dTRbhm1$r;;{C1>7r4V==HWnTQKI$KgnvL42( zS)J`tO=>zek6xbyxB3Sn+}E1K+dV8TdTJ2b8Of>c4`g0uT4c2_BF`YQHSyUY)+u<( z)(HN#4D%k6A=bpXS>>R{PWGx4^$4#a+Y?IcE<*vF9{He!t3%40gCPq%3Jtp;cNBTp zyF*f1EP1#GyD0TvKt1wE_@6NTkDY!fk9Ezg;1zrDLgnu(^~Y)j)6T52jWa!Qy)KTbYmrTo}C5)^Ac-OY~X~7?gh^m4Es# zzPE9I66B}k?iDQ_t$hQvL1_MbWV)}r$WKTP$RqMRmn5#DoFPj|IZr51Z%^ zfs9Ro55LZfE#{Dm*(4XvI9zh?TNxvzr$umZ3EjiJ{>bQoXli5?Hb; z%h^6%=a!gV%8n^xD|P~zt34Nxr!a*J6PTjaep452(^MA=Ab*?BZ+%F2-Kz9M6p19~ zwmKB~4bLmH)O5sxGRtN`T9ITD7>nT85q3k%JBXt~)P*dxRxMR26w;Og5*{%>3af4y zcn^)A*~(LSn2Lu|GEx6ZA3}87!SMqc*`~CeL z`%Unsce8VR{oPxOkLzcD()R`Nj(_1>x8D~6F+sr0N8jsrvMjY+DzAnFhPj5hJC2p^ z4~o(OoTsLtzP8H2-rnZn{`vw2`X^XKcxaH6q`1h$`0xM?H91K|d1;Y_xw*;3`SAe` zHabd1dTNq}y1L57`tkw~H#P! zS10*QTC4k9f{9dWt#-HN)cWH&tMz7=%R~#_#`DE?XKX$NDa*a>P=DYBxrqiWUL$4% z7Uuj!U(H0hfqH!<0|&z3bai*?;v(ng>|W)H?N*Pk5&1wLF>EXWxA#+4{@oE?NJ`Gp zcFh+Q%6o~dQ5vJ!m32tr2y{S3g+C@BLMqJnzeOAXqyh{8C;;37JOQDAI6x904Uh#W z0ayac09$}WKrW#0=coqI0O$Zj1DpY_07F0uAP7(ghy?rr=mYEket-;sFc{K*&xnpf zr>vF$;uahNNWkFgFOGH8#`L&huY545!KG+JW3E4kkQQu?@sSQd1pEQQaO<^LdHF@+ znc##N;@*P1gvd-*VFk=wT#ZiC=}c1N@fB{TRx?V+Bys&);8U!hvf|B+fajv~^XRCi zAP7xs(}!`F_b(Xm%()~kOTZzI9VG&m3|D|=Q|Z^-2Xx5$2f;lTW)t(m!y+ftDIw@l zrh`<_HP%$e1j1Y3(w-qbeNkG;KU%&L#jjT8W6O!U=J>g;ASR^OF=l=xD;v;^uSLS8 zdk0jhxQUW_*0i*SFA|i#KV!}+O3jlm>wP*6jVJm+zo!BqRtHo)VDaG*$WZ57Nye$+ zke&=H2O*IXaiKX5H^;t;_+Q15hr1*sx%NYv!H45l{^qgDHGXqv%GlW9^}lG~_PO<9 zNpZf6hOA>E8FCp?`a=l|imJSZV9#_@gC8iq&+sStAKu3LWkWVytqOCeHoL$xs6u-B zZfxF7M0BdQPe%&6*;QDU%~MXj>rVHKfaKuA3kZ{c&Ub0K8Oa%Gge|58 zW_tfo+Dw0I;Ft@dh6+exA^}igWoDvZ$iji*kmY9<@(x0qL4%mYzQdR-;hyBS?Y`~d z$U)d9QQuUlOp)VQRKhv|7_*@&gc~3gOq-zmBGQlR=bLk z;+$`)J4*L=!IRyFUhBdhd(<3?6=5D{s;_O#5+-y&`8TY@ni-0QE+bAyF6pQ)wzT_d zYuiX@Zz@%6gWBGL!z8K5ppar z%J(8;yV8~oQsWvHAGWzMPvm7~PyZfDGNI8vzfGh9 z?FbGVnb*+#Yt+X-7v_@2F^E$T#O`Ax@eI;LnX4!wkyB&zZJ986z+V}1%3CCa%H42x z(og;+x>+Ms?{NzLBX0bUX}z|1@5MmvRQd%^WVhLVXHR?4{v?M~wAqEM1$?Y`lRWavr zTQzFYgLQy58AEyNsx-}2zoC2E7G=9WEkcoQZl$K0n$}LND6wemjJl?()o9);XY2-I zkpVlbu#}GNi%Q<;Zbb0;xuw*Ep^u-llhC(?6Zpg1zx0Gj4i2tmX6i1M6$868E$wZg13`z4eaPI(nvqgizdU4!;EDQHXRlVRjj$hu6wfwyAv-Is@phfML|JhzHqOA zcKsSqT!HrfW#ngvm!**bwbp7#&@U!VnlZCC}+1D;{e z*n)nU4YVpgYL)+j*wZ3>Cwm4jLA5DpT$}W2^mlfwL+yFF15UsBGrFYv%gNP%nyiy) zca-0dUljwT2cOKR(3Y+cJ1$mqvT1|LL+*0j=U=%FSFWz(MD5D?1llE z=x$?!v@?F=4aXO9fI|!gSl$j{?+Lf;gR7mY1ReX-8!kn@*&)ssF6_M-igJO(iHS>oTv09%*_xsVe;u#)q%WJ*k zX|0a~mxWTAg2D{hq$+pIzi?`gK8~MRp%sqHx}wD6jwVt-k5mL*4rLAlrG8;Gd46q% z9*-|PsdVb%6lcuqgU*_SmU&5>3kC zEj1;#>+C~&&MHpH`E%-oFD6V`kAJCSDsi_AZJERXGWnULK+)ja^fpl ztpJk~mOa!NGh95jq0{P&&J!W+aBxm+)>xdD4>dX~u~E`GEqluW<5Ba;5IL`BN6mUX z_X}vn?)R<9GuKvc7wtkg*f8pPo0~*eZM>jzW1+`wOCGzwMDzS1z*kE$5X)F4&m;?a z$|gqMCAmpOQ{(BFIsyj2S-)kJ*7|(FHA}jzR(s?7DfMvP2`uO- z{NNTmuV!Al)`|#vb9VqvuE33CQF$-XGzq-80ov)H*`F6sxa6GBTq%tH~L0Y3p6gX+-OXYC0F=^Ls zD#*cGt{I155lma~yZk*gE78CvFggRSyYy_P zG?}J#-ig2}aJ>l#A`c92_AeO|rr3_eWs!I7*V~LI%Nn z6R3h?N(#_GVsPXzK9~zKC&5VJfd+-*n48sdPE%k-0U~|plgXVk(ocyT@ea%1C_dxE zzk%Qv`DFOkO1Wz{8csWV&qAb4_XSM-0ozawtzocaw?swKK=Q+XlIqj<`U2vNd8xc~ zXdLMFZEFeIIx(ZIwE|9}5@GFNsXbCDp!icy0ENHlqA6*Yu;4_~08EB_>8|^ENFwQ( zHF&Ll^50UrdPy(zPTD&@3wC7_hs)_&dVK1BJHcqgrZH*i^}EY+aL#5EMwugSOr@5ihp0p;2{AP3;u*`)J`hWRIt8a=SG*!pFfPO8WxpBIhum|-YC&`u zs`eGeZadpQADue+K91nj;(e~>2@L4vQgwTs+oSF>mx z>G?j^`EnT+5^VK+#8<3atgsCF5g2@61v0!S+W~!##}w*II`|J+0>j6>gjpLfh6f6f zrcX<<>I33Vn6aky)+7qMw*BR`b~R?#p+uoyfP6p~&Qk8cZ9YXuI&zAN z=glRvKp=4gy1uzI9qkX4@@25t0^wqd?d|2HPwt$UeNr)BeZlztnwn6{jku($?fpDD zXGJwdw- zo|Zmu9`q8IWO;?yVSeR&46gW??(~#;Z`^a)YB!%IoGIe+)d60D=zYP{`W$=`oQ(U0 z)S?L2k{m-{F&D&mu>qumY;EhZJmW*o(eEue8x>Z<8h;aS`AZ8u`P2!MixAR9{=JHq z8|rtz#aWA>P!EUgL;Op#msRNt(9=&z1%#To)54f9VaN(s{6%f-xR3NYO~2L9gWU<2 z7(G)dT+`Q2Rvy?4oEthl$Q$;?U(%chH5#85n1hy*?Ymtv8d0Ouye~aI=5|)O-eY=O zipA5IKh6-t_&R!icdKnbeA<7W(#)aUVli0GhOz64hR1i`dMRu5J>L}bMayh-eJO)W z#W-4u%|gTT+4%6S#mi_lRoL&UHZf^+SYu*KNn(*n+i(WC8$Y=x2&z?&J4l8t^4Fhr z76~sW-J6)$4N&vO^8mvvhBOU?nFNkjYX~@T3;{$bkc@XBc#zIOlxmOik``Fow)SZB zdKhW&gbRi$ea2dDGQOcCvni*%rm2=uJ9+kwlXu$c^`!UijroafrzIF;RhUuVlk6ul z?qXixES!E&&z{=e1S^eeJBAo@ojD^g*@&|f()+*zlb5e`XESr{;MYt1ew`bWQ`vB; zw%i5Jow&epui>_%!}dy!`YNefQ;D8VKjhS2*F1n6RgZq)L-d=7D%Zy5lSR+S=JVjw>-rFO zW6vXT&tqfpSmd1I>o|RK%64O91?)+wG_>QH%1UjZk)O;-i=R9-o<3dZ9}ByzK=yD@ zF?%C1Vflx3fhMUAnP>tiod*2KQXnq>{m=%{%D*aVh?sq;fc=W0eb!cTIzIIQQzHw+ z6U$#Ym`44aD*eC+f#!E@>8bp4d=UjYdg}&V7%>?)Cj^d?;E%M~@k*-N3f#Uo(%-wj zOCm$@)+;)E72{Y7HiPv0T5 z@1O8z-i4?0Xd5FkT^x3EM~|^J?vEwqkyjb}So9W``6hC{F6XX?nWd@td3`8s+gF`l z_d??sGjw}xhicN9ocVS2wo5_}wY`OGA3e9n5e?@lY_>QIu7hXC*ScO$DH+&94pR}a zx@#}TI*DrCMISp}Im)_xstlaY`&Nss274V7Cszf!y}=Ikhy@$q9Yc^}$t1F~T*t%} z;fC}`%!+X4MVdkl(Wf{lvxC5>Vg4NWyPsVXGg>DEdL`FGYOUSf9qGKcIXPLx&Rs-v zbnP0_&wrU09z)F7!%|Uyt)Q3|-_um^QUSCE&y8a5{uM%}A4~!Er!i6(l{2@XcO2tr z9%|5|vc@^|>!rvrsqI3r-Jim5RDR1F4eX|ocKa#Mk$ws4KmMv-XlnA~<9XO4NYU|{ zxXkqfL+?yI;_;L_$H=_iX;Xa_mBaTlf7&|y<5lBz@#<5nyU^ygrowb=!Fc!~)`7$C zXPf{;a%HqWP%xzp!^$1$aqMbtz?vWlNdDD6_XVUjd?I+Ymz_WuV#^iqv?mNY(hmZR zgBpm?B(z6;L)1{~WoMl`#+QAcB6wwp2hyTtXC%V8vD&&O+Ick9y)H9EcWz0Lhkn(Dv9CF>j$1=_osd9 z^pq1*t9xA3(>jc13DLPW{oJM{BS`e0B@alqH) z_3P=F7ifkhQQ*vJBiCZS(JaG#Df{wH+Wxs2LX(Tfp6}XbLjvv3LL!Ld{(ucZYwV{l z<~yzD9xYQ?{);(HUrfs7iVvpVa}l+76y*r+3#WQ4Ldbg2drqnUfMC< zPvUn=h>8m*sA`Y4OX*$#Y&;k8)fc!$5`TqcKmu>VE=^QI7b%M;J4Sn*G1@G!NbeHl zWD+i$G^@xg57INiVGfUDkxgArQ}>WcHnFzOhDCA;#GXwcK4}o2&1O?rz~sQzO|jh8 z?Jg23<=M_rz}Fx{tI-+$Q;cy-h^9TcjxHx*HHR9JRF^B8iK0vQpmM4>M9$N(%zcs|~24;@q<@$DjoBqBBihi{3io4wKvedB4LLMESQ zw(R#!HtDCnuEm)@9_c_#PLSbR@d>oMkj(T4)RCNcd+ESY>3JD&qc!Xh-N4+V^Ns$Ax-aly8-5H)xhLmZ-KmJ)eklb*pTJQ7Z)fnrH+xn1{>1JTHbeqJGGNlv{y zAizeKjm$(;&y;pNm0kF~XQS=ixs-kd90Y!%PDye~j@aP1%+)C63$*pKIBaXm;U<5` zI@J?tW{s`X8|ofV6IX2y*rUvglCq}vmrZ7=vBB6RZ-UA>I-|4Hc)vIDQQqNyi|KEB zKTK{A++{H)<5cgrZVI|YnU2G}Rr51pnD2*Y5F9XBF;7^j_1T*HXBLxSPRU=Ic8Zua zPUjd1*np!>IIKLKdw z2?hAnzQpenKo1pN|3tfwPR2BB~uD(@fAY)1+$Ki!aN>LzKAg?yda!k~#Ur3Iuh^_1j+ z-km^d11v&xNS3?NbY@v7$W(`o`C-WEnvYjs+3+spkz)y^RF_kKIxs(Zr~7T_pugfdQ9`A~_!n(Xh$~CGaL`=8uYT-A8i8fZ$BZ+D06Y1_|(U2es zZb2N8?biQo%mdyj`LZh!w|TSmxSeBsTsD32fr@@w>~B9k(mdC zngv3)J&cvpvd(WVZ&wc~pCUYG_KTz5s~+e3xjmZ8#(jg(?@ruepNE&T*XjYUyTS=_ ze09y<57E~s=ux*Dxz?Pamk(?`l$dt?OF~{le~Hn)EEENd1^z7CLN`)WzO;{H*^`iL zqYo}b+gmj<7{8h|5mPIYe%obfkL>)_R7S8e2y1}KDCVW~YhfIR%EOY-q#0SDG1uwe zDiF6j;ejbV)?JP6LA8k_raq2P>#=uLIt{rTc9-2qy?QDJHT8Q{*IJqs&m$|FAf0x|_-aQzc9HerR zd*x@}!auOUn772NRTf_==1Q%bw;(c!n!s0MW?QD0&(QA+HJs2T6WT4*JD=f<^S=#u zj{6=G+JdyI*3b5~+E{6gajf?YYm;&p2D~b>RNyV}(wd_G65ALHM30J(hKEg5z#&^E zg*F`iM9vl-$yE;VcUX2!%iLVAI4LVww@$-w&s~#P%J?)pV1b*gl!dvM<}YMCg_Vsi z^900mLWB2)zVQ%VUA9p4_EtxDIe~m{f5}v%W#~DphdW%K9tTyy)|;nn&jwluSnAJF z8w1h5{qszqwl~C*Q{lI?oi(Uwofm(mWH4G9w%geeKY2wN7p$pLvW=}smQPA|%y#rD zJ?rf#_>XAk>FM0WkNU17dvi(jrIWe<`picdNbvDb=g&j%7GJjT0YFRueDb7Ml`cSX z&w_G2gnikXdgEbQKP5eu5m#TPw0_%=o&Ih^cnIC?5lBHf()g3EQcZ6JCB;IxRliIf zy7m{yciwD-*UB@QtGY7pJn2!ty2)_+e&IgzWktP+n9E|~Tx%6bYNR4Y8bFab^j**5 z$6__&YA&YS3-MKw#Xq7LExrV%-gHqd?NJgGU#lH(X|`=Mf348M;N=Lpidwll<~tAL z?e89-s-a*wI4ZFz3l+IV(sl>Tl_r2oHXUgF61hpE82>9(f%U-_5m?<^AsA&iN>wg> z)w=E&Ew97Ps4H@m6pRR8{IK8x97m4T{zXeVdQ}7+J_4Hbw46D2W&bG;oh1-Y9-Y3( zlI{%G84{zzbEqyC{uaUas=};PS7BEFwhcZdBW0^|PySg>2TQmfP8xkMm!M#ZsKaUXvp|C``+B_8$-K*-DR+psI!umdPZV1MUEAn+vjwG3*Xc7 z?R$r_v(WdbM~&F6@=vF)uKfN}kv^Sb{!^Yf7r|2*lh<<6I_@8bY#{GTLO7DE{?~)Q z>M(bWV+jq1)fJC18saR{R>=EBu1D*45 z2Ihih7^9ZNqK<(0Qfe7>W-}6xixTaIVA_i!2$w?1N-R4~9?5ZNwV5O6`7B*I-g#Tp z8fSBOn=7zN-4T-W6Ta8=y|^RfyjO*KU62qvVMJ%!s-4E5Cut9QyBri7E@KIIK=S&> z+A2;Gh0opGJjk&K?>Z}r-b^+;0iDe~OGg-WNH6T_rSYQB5~&%s!xHE8HozNK>azNm zTiQ23gE-C2!BdmTC1-=F)Aa>W(?EWkZ}6C=LQ~{e&B2In|F@}5qk-9ZY8POAD*xmK z!+)7+l*p0TBc-ln2yeCLT%cGEVJ!NFfsX`3LzCXC4HWI8BCd#!N@KvWXO>6@C*=;z zf(-l{stNrfJ>69mw+$DfZ+o75q;nHBsWglx0W-xeF!9xsCXj>%BbKXyOvWy&7ZGCY zJPAxSjI&q4dAKw#oq;B(6LCT)tAUq*H#Mw*5`wqZja5rWmt0kov!oHJ`t(xC3g$eq z-9~Rwt;7^FVjLGW(dx6Gy{F@Wo2~OmvJ%mrx})MWVy}?w;|zms=jD3Xh=bJbx)u*3 z42@NHZi-sDvhyI;HtPlIe`OtWj~JNMcH}W!c0NG^lz1ZW41`Cab%)~wD2-lQ%7)E zMQS(xF76UPVkIA6vBw#f|lcX9t7)1%hoGY}Z~EGI%FpbHrcOe}NZoa+n%CjYrS7#^fazV~FPrM9X`18`qbbwf7jj#c&+k592tg zm^lW9I?a(Sjc+S1gQ6Ad`@&@-^NEII!a2M=@fbF82R3{~g{|Gy+OmgkNtrHrHJU5) zh2w}r5%HP+K) zG@#`n&@w6)){`Q$^qkz$f|$-!a@iegX40?Z z;=z1Bo&|4o_BLNQ(s{T7TTTpvxqxHSPx^}Q#JPaITGB(tXEjVU zh;U1dw<7+=yRqas3TFj3$SUz%b7W!0yc3&LL+Y1c;40TkG#F8^(AZ8Vq|w?S6tX{0 z;Ckj8K^~d+1okvHRftD_Dic`VbdxmOO#~31Ou%5YEq2u@ zSTB1xRn7)n9VRg~s$2LaXJAaDWtLM_rk-g_rJu5VEp;?l_E?#wsYQN5fWLnk4+dKW z8-DxGC#)t><|jULJDSly@qf=PG)91n1b&vtRC^rujX65Ic}#_Y>sj=SST#ZxgW%xU z`k1d?_0Na!uJvu=_We$R%iuBJ@55%bjkzaqICnmmKwoaHRKUw4RVpPzhY3HI89S&m zwnT`Or^MMOY*6thq~WHL)Iice7%8dC3L40aI!P1eAf_q-7bOzyotx?8v89PpQpK*W zcH)5ba(;(v8qDiTonx-3*Pml?gKrP!eCZO&U%0-oeor`F0rS;n_?WLILoriYc@EfK zpC$m;!+9wEYsieW=3mU8bUI(u316pz-Ix;ag}VDUKt_tJwl5ed z8nc{w%h4R$b3GK^2B98gh$32armKci@%Z}9^q=+TaPQmFxQV;%a~MLgVpI_|f|B+4?x)ie$GMzu zm}m$V5Kb%(DYEkhlejyS*JMv#OOvTZoyegQ+MuqjqQwoxWdI$5mhA|16-sbvu??X& z3b@+=$}*YQb8(o+IqS1#)-HO7J)mgilr`2^>@NX_Ovwresxq3***krrW)JzfcB=1a z!M?A%?H27~RbTmM{gsj9Jm4ID8@*1wR`34zJ5$v=-Obe&t*&;Yqg$mrf)%IQn10e` znvXP!4&v~C#azToR$Jx8p>vWSyZh<;g3;HKao|Us0@+!d>M)rRZD}W={{7yKLNBK# z!I|hor=2v9{{#FIiEVScTSOOA4mEsdN!hCT8{+^t&aXXiQ|jeuu>8Ddb9{AFou<8l zwD1?qN2|Hd*{hvgm;8qZ=E~T>scDeHb-&2>RKczzSZ4N%Z^3u@%-5)|{4eHXv;!hk z;GTWS_lP^)U-ElAGI{+X+G@ENkpnXGDVqbNZ9L=a0`T2^kV=bxyllL8F$W!GRD#2UgIpw!aUn-OU$MGW^w%R>MB;-L0=qDMi2 z@8(6Qd>RQXZpbrO7|+Py^Z6Ln`Pt+7Y`y7@;Jx`cn)|J-)aZ!f;}+;KnAE{VfZO8H z{RqBs)7OnI-O=;&o{zG<=V4o{&SGx$UNuQcKGJH1^}3nvpb}MgsFI_AIYmrCd5TY( z*tapDv)QAB%tvu+s$D4}0esY>y%Z+IkU1dSg<@b9GLoRYP(Y9U86{A$4B-J0?F3yR zOa=DCl){V&!FUM@&U{5Cz6pgV#QVLAv37W~U>VI&^YPCaxkg~TW*GWTd!n^1LaYg3 zg!}!EcNjCo(ik`BS8BhAK0cndx50v$D(e+^zJ}c_A-VNORYdj}V#Xg>B==ux=F?+S zAyRbG9~Vfj<;v!?R5TDZ{scu1U?0$YyNc@)ZB+XZnX-1Hh=UhdoAV8K@LjW27>?uX zul~xw1fUve>*_%Jx+h6ftg2~bTyX?lm<z^2BbSPdhqF zPREz6I~qz(&tGz1s8>Fpra=bxkKF@)U+QaCSS-xm8&6VpT^kpy+fMs&UTn*{jQy@b zDtIqr#8W6_>SEdf-VKMUOv^HJ=Pw$YoOkr$NEVFd&_Yt1%vho0%s?S$l5wV(b2|hH z1OoAf8Bs7&r3ME&N41NC5il~v-tf#8y+{<(_FKUW?x(yXQCNLz10%+d7@n+#orCI} z5Oka$OyqWJGfN9J3(>0&@5MXOW^_4)GMUS&ODfeHZ7mHoj@col_t0CFGe+*5#NnF1 zY?p7GIq52FTd>L~(_#vY3Mo9XNgwFmO{Fy{;dZ@+7Ca3tE^0wFHGrj+iLr4 zoj4);sSnK&ZcLxQT2gWLz;veJD zUrpQO0K*=Sl+(WyqrMOm`mcQ0;sD9mt*@0m`2j31o=328I!JJdh8brI95ILW$=2+e z)_Y{f{g#a!oc41B99$zI@@Ko4#|u&@-KA3q$pd&Rxmq?}?0FPx=R{^^Y7E!BZT?}n zgW=e$;$PFHNINmqy*XtVMWbJu5wzViD#oj#aAw@;YRA+$@(d2+Q3SQqTlVUsR@Ce_ zoB8LaGi}ELZ%+he&*2zZ@dPVe>%%cy3{RhaF!BTC8^zwTocZI8_t*CkJ^soGUQ5jr ziu6S9Q#9&z1^=dZkxhf@Cv6cVdFd~$`Sz)7l+te0v@Zn8dl(jK9?jurruBOdq$a0o7=&+rA4jrX5>(Rm&XUe2dqmhvzi0 zwOnbK_rse7CREt98XeB-Tm-UP46i*Un`S9BSoFN;csIJ+U;AFEqH*6T3-uqYTSN&$ zmO)ZYWV94fpMo%+)$&@`mMpuqZo8!Xu zLaOvVGThhTJ3ZVbQ_X?Cj%BH|6kP%>-O83?%bUmB1< zAPI-ZNBxEEi92AY;ur{X3Dkuce!hq?)=3v*>hv(Z>09+sUWD>PjjCtteMimaAK|O6 zDq;v~2%To3;6nJ#1?ODTFv4%-Iqw=0*<_>BVpo30W~tR-myX@Zp8o;m?B-wXF-fd> zi$Z$@IHbW^6{{elCc2Z3-Ej!LBfm1=0R7VSR=ZYm^JVel*At{AOkEfWQ$|X#`n%*9 zJiA{o4@#y64ns5b);EZn^qaT=GG<9%03S@}Vc=Eqn?n8o>Bf$jupQNVBRs>->uoW) z!j6kS?ky`RRI@iZB$xX(`Fzp7q-O+bJ}r@6peOlGHjsJ@WbU8Q_5W?4Y<$Lj%89(T zZ#-J{=e=pB^SYkd#<|hK{>_Yl!-)$&3^ipm5)H%vs<)ub_5mH;@x@SXCmk{L??(P( z8Zik`l9G$;&|eHUa)5vd4}p8LwRa>lJvlpbg?yP_|DWbjg%y+WODgLmn zQolxQ&2fh+v6{9#d#5!$L}S+XvwD6vD9JC>gr4f*GaiGjIq44vL|(_HuSLHc#5; zix3cdO?}SNRtgy?lg(b;CikazoXyZxVn^Sg z*eju9Sa~J$U46hD8l`Fhv^q3056^(b>_WzHJ^c4-0$k)bo&nLmy)rtFFfvZ%mKYyN-f#!vUPdcx@}-Gfc%6`0TC#ggQb_s4r_ zJoii8m{p~<+PwEm_++O#9MAh!+MBg{I9e$3vwT0=?mP_@{^4kX@qHYW@)fV$gmOS2 z{052XT62@Qs9|CIewkmYUs+32nNy{Oe`Cr-P1ItJoijJ@Gv|TT?fIVolss$0>J;!u zp%NycEZ{PF*&+$FM)06F_jvE-%i6{7jfDPLEpbK?K!0 z;4rc!n={a-YM&HpEWS)lkhsxkXs=zY$Ku7R*FU^$$nqO{-?{6?dnRqG$l~U%YFxc> zXl-Ox-rg zX28jwXF!EI0}wH-q7vIxkq@3iX*f|3W)}*2nh!zMW~c~DL4nyv0pNr{m)Q}_hvClk zu!Zr>c71sX&Vzt6?nOtM;lPvvQ&b>enhO|3sssaql$7JI!p^ueD-er)-;7b07aLsx z-ss^OmP3$!?~Hkh8aHm=8sENm2iJ6P!lb6}oiD9_;NHGz@t&yiN6MMMrE@v{)nGVb*;??Gf?RSCQ~NMQ{l0Z3&Tr9;UA!3hPvX||V>P^_5t@JE0W-uL zQqk!$JE7A#Q|NTvVkUKqGncZhq$hUleBz0n_dj`E)BJ@^O$!&S;osQy!VBB(Jay{M z#@laioV#%&&WYI|K=<<-p+6bqYAqs#2n^62Yed$=kvSaKvBf+(hdG!A%}FJJnNDD^ z*fY>{irXD{&5jXXOE~VZ<2PViwoX-4)36AX(gD1SuQjxuY=`jLzMa0kofaq`ZC5^` z1wij87vg5=hXAz)6L6K}e6-_2vWWC5LeL)B?QMy>S!~%|2y-leu5Nx|5{pQ-Gguid zKs&&o37K}R3xm-Rjz)a`X!%WjFE6QZjK5^g?5gUmBiG#BYwqefW*z_X+_}28_qn}A z3&NwSV&RH5<)ZtM0mm63ITw18k&8Xaut87CSqQRN?TJ~GsUI+=NAuOZrELaF)ir zk$tP`%8RZks+>FRj{3#p7H`PM{8*r%=Uo>AV`RnV2JTps>@ ztrFlj@`p(lu2V;^n7PBaNL-D!XGGwxEE%9V!(15sVVDAFL5TJiH637bYLY{XwqxXC z0FMic19i-q64xH89yv6ST{EcKf9O!r%3*hpRRa1=ZHF7C_Hr7tg5&DAsjZWz_itE* zE21fxaegg77<`A9$uxCFO3y_SFEXIG_=`+(zjzXP{T0^F=hy-T={Z4u9)|4s5F90l zdJbg4t1wBQC#i%s{~`?CDKv0#XS|_~ZzCN`=W~;Y1=m!VSe>A1_gTAQ88bACq~z)d zc0=Dv0hdssw6$=X$as)UMeLpoehh?;?JNUg!DXZdo9e06^!7y~jM?>NY!Voizpwt| zs*Q?dTXJnv)BdmDyK>Bv!D|ius(Y8a@!l)B5IwDL0R26QNP5t*pb-fZbA)R8TLXsk zrUSz{DxZI#12A0S1%Tm#jfex^Mw>m`A0d76h0S@%g zeXO+73JI03z3HI`*53S+-D-QlCfby=iK4WQM!^^=(LM*$ZOTdqm)Z7jTm@}X zHVTN%aFm90G}NRdPAZ8l)UTudmCDOcD6g>PbY!LSp8gP{E21I!$Z^5|0wlr!L^==| zt5Y%}Oat1(aW{iOb2~xDc-#!o*^1+k*>N0j5fjP;Z#W^U=!P~{L4DCip4&$VN1z8@T@5^PGjwdtjBWX|=Lt zwUQj<@9x)s`JMAa`hS(Y`OiN;pUy@91ZlETqe+lFaRG|-3d!X_4o^tVv8fA_yXm}~ z4|*csiGP&e3-m|8?^r1Np46dow~YRr_KYN+L35|E}&p_B+B>-^=mIuNBWTGNW7du{+4Gs z&@^i4taR%_QcXmz2WnP_<8}2&t-1&>SA88G)>oR7dYPfGU5rB3Alam7I1yHPuPUrU zviX5_!OD81U09ULftMclaq@NA0*g|NU$x#fg zSBlQ#228nB*CNmL7Ue*B5|uhm#FAJF-;V*_QjC5u+$@JqT|MMMVx&dl$L5q?(z~=c zKjLu)Dgt9Ky(%lv5Gv8YvU<-PHu=#%yoFHrWBT@?!P<$n)!qz;E!!LL=3i1(lvQ@q zi1JH<zq%)3nwr2#b=zE-xiE=91W^k-VSd#0)TXLbdJvQk%sb^?YPfd*MRIu!g` zHO`XHRhSNNg8$g*xkBo>CUgqg)8iBiADn5A)|P5oX@@S4{XFq`(`(#mZ(6tY>ynE& zS?7~&*a>YoOny!-@7#u;rP^>+h~WkbSK+i2u7qhhH-zA%R5}`xf5DpdGicV&W3q4+ zV%*K1&zLJko%Vd!$h3S{MI; zCm_6b!jj6Y$v&NkVE9;5WuzWE$|wtBZGb1~5}gU1oqDoZW+OtiVH zZ5i76_9cs(n#9fLm6y1+G}IZcZH9eGngcL-lb0?`;kWT;K;~M3;u9{6pavqBxgQq> zQA3;EjPX#m1xq@Z8VbiWK()4l*WX=}qAu!z&g~iXJ?*hwWP3Wd1GGb0I}}b;4O&A$ zGOyIuxT|eZpe-(P#rL)aZ24rt`30Ke>6tt(b)A0)5`Ctsj`Y?g7yt8G`!=z7nx8a% zPYXREgt5*55tkiiv7-w%l-0l?0H{-yf@W6vpU6|CASQ~P2%DARiFN5mx2cTuUU}Qo z8c9)?mvqr2vZ~XT@^OeU`h=j2fRsiUo(7|EV4kuNwf1woP6L? zO02w{;YUzY3;4FIxj z0<2cD&(jtGw}aC4P`8xCs?#N{t^bDlrM@nDkrrG}_reuZ>1cW#4LD|#Gi*JWpNOqj zfSRBSx}H=JN)``w0@(9w0DH|=U2Z)=tdIo=E7b1F~Z@z z#{Kg9I+Mc6N%m(a>(8@@zR?EAQ`dynkGy@EOdTbv)NfVoqlNnklozql%5wcPa-my`=E4JX=TKEc z4yBK%3i86_*sE&K%j(K!Jn`>Joo%6{UZGqun-GL1xB+37R^dhHQ%D{Xaa=H1VUyCte(NI9eBlj}!aWm6Y71cYR1L%!H-BRLgn8Juh|r|22Rtb|v~W0$3hA89E{D=#B( zY1v#P+?huAt}4XD?l0rkc?Ts+q;ST9zLwFCA#)W~8xYA?UOU4AEV{B6*J%hKYO z35$b8dM(zL1h3|`*iv3k5_?|e?BNvpJbFW3hP#I=J&bB($rI5a{XJq5(*J0p6Quu} zDyIKCv?&Scj~IaGKhRyL-%k3IJtW=_Ta<@RMvE_^j%Sn-3Qj)Z7(Y!_v(JFKT;NJl&(uF)&?W>xSKWzp}0M-6W!JIvq+n(xI?39Ri){kOy?gQ}@YB zqeET_QMgE@LTOorN(I@+YA*^kIt2bxbVwlzd4(Kqr_HI(w0S8>8w~1| z7+T^bET~ta1@#IssP`%(+H*j(=LGr16w#z*Vfi(n+jAP-)*zu?ROwb&vht=RAC)21 zUOEMb-O)=f!3bcU76H8Qtm;0R|NW$N{;!Yo@3`i~2-3k;Bn8^&N0l^Bkej_>3q98@_VpzQx4ke;$ zGOPjDc;u<;dctC}A=_Sw!ybiU&%|L5RrWCK-#66hNwwto(v(5l<46}LjoZ6af>N&< zSL}hXx@fX$WTQ$`etLUoF-6}mfxe|A1oVBfoxZ&~)3+)`-;z)w!YWFnSSV4b)t0Da zE(0=`2~rIbGhbc8ksnIbs7Gb2N-jzBdi+h1T780KuCKxJxa@*Ld#@68<*i2Js$M-! ztyl!U=jDIz#A2YSK5w!(7-^jjhub5yuiyH~7LL2~`jul;YKO)RzV!0TFC9FtCvu0M z-SDHvt8Q(5Tch^zzT?LAjbR}-%B+v{$NASluho$Tq%>Qr?T^S+K)U?rh^AHb;G3#i;nEyDz2W{GUkJeC-G#1?rVLNuk+UIc;k%h*D^ zEN48UxCSkPsMzex^`jXjc|h>z7cjwJRi>IxnPP(E7qg9YWkWsM5gDvbxCfm5WU*!% zqI7A8jb^JUu3Do>T3@tJK%J|txvB#Juv#jD$tlSCtiAgxyAF&39dl} zp#GBfb9>fY%_zQi=DSlOJPnIT=#|dEL(M(BHNc-yQWFp z^!#r+V!RKsrHXOFrLM^voz%+oKEnnF8^}#E!5X}wGdg&_sU?x6qJw8)sk}cO9Xwk@ z2gmv}3x@4Ii2Qw=w>!PQ%q*5+cJ=@hp2Zf}Pr z9h>w%qQ$%H`w-Cm-65bCX$a`pJ5OGIp_)H$hTf@Z#{Vn|I+}79Ktg|zMPz$|L#JC( zc~iBIhgctr$*RtM1ivVer}i;VpW;ickNH|3OVh?NUwoX6V^(gSI*{1a%x=1x$GTc{ zfv%=1R4*`^qcro()zgUb^-0StIj%ab&%L3~g`ly+6xIZ zgJHJf(sMSEqm`#%1+_)pjSNfYsvRv-wICVrbdkswkAz{R*oj8f+X#ywpXYXoW6#NCl5RrfMqqfJB$G*cYA=~AtR39d+;64XX5!doJ>RZ@*s+$505fqq}eWbGF9xR}+%1}~bZtn^1vf7D zT6E)b7l0eLEF}BTjeEQYnBOPh(}hg$UV*S>V#46dy>^*e-7M!XjJ1RR%DLBqx#M24 z0?Vy@%ivsg4}idY_vIQLaDq4d@2}423T3Qo=vaiM>amYOu2@-B##jy6*p);!&N8>} zaB5GL#b9|G8dri^!J0daXc8A2Ar>!ne@aD{{9IYHL`{FQ}RimLRE^~re3*CV;)8kCcCfj?6)fX3F zg|Rug8!BsWp1FNDRC{VSRD%Y%?bhg01A}foW@GW|IPbKffQ2>_CYq0rMqc)Um~SDryPn@`T7vu*J6e zvyvm6Fgr*FVt=T+6RAiRL~CTAh}RZwgJpsE4Q=m^zj5@KzWv7I3l8bd?c0@imy8`7 z?Q=QzU7yP*EMe=pc{2U{D*kg_9oRd5Qh}?H5UWcrPL4={ITg~*hAENFHpyd#7RIx) zQI9#jY=^vK23~aJ6{*lqvC)mYt*7 z5R`3y^U^cxU=+%>+bgeZxa7?GN3P zSqIWxgVS>QYMXmQoAbdRT-$A{Q5ly;WG~1cKggeoR2N(rAk0$5Oo#4((w2(xVu5R8Dabfoah45&wjPCSS4{1DRJzo?u$(<^|FC7{tAFme`-90iBd6_yYn9L5 zW7|JCxWo_JKm5;c|G;bfm7H7iJ9AjqY2`YboK0DRT!7jdyyoPLY39wM5k*1)MtP-T z1cALc=InkF^}w!Iq)(?I;D&d@xw|?~CyvY|#AwnJB7oMMt!Yd*k64!K@*y7_>%$47 z#etC}BhD`sLq~LBa!1?g1z{!QR)(-)HJkC>rou9@WkW^_s7|6T8^ZNM?F&N-iOa3) zN5w#qZZ2I&hw*=J+rYWnzTnoja>`4}{YRGBU9rRTvknQ?vYbmgPrN zKyWiJ3(?1HVvAf4pJjdw5g}NGAj$^ZJFpteFjr=ks*d6gkC#P!U72iEvXmjp9{g36 zGfw;rlB|~~+YBiS!P|3Czk}DKFKIWUZ+X7YfLB*P_Kc_Pj%29pwqFnE_hM6;H^g&V zzzpq%Weesld-<_3BeY=I(z)}Nw0lQ=lABBNT8+&nVuGW59Q_VeHqMZg8{EoZlv==`ng3dZ(w9i63% zR`_q(`NdJ4n8o(SwcL&Lv*BqSeT*ilmuY-0K>hJQVyEBBDy}ywAF=Y%o#N7 zje!#lBO3-U=3i*Ie(v~Q!*(nw?N?eiWU<-@A8(`E^pn+l3!#Lm{ob4!$<^+SlRFL4 z#`o(%6AdHaw&5#h58@|iH@$XT!}asV_rm)sFVJn&_PzJ@^?2VIR}cCo?Y7W#hU?3l z_}PpWe$o%AAr(Y4g0RJ*MM;|AMk-j&mIaT(CCMW0y{R`?9qQhjooQ8aLE6#B@#|3h zI-(m=j_XWW4(ZAdVRU8f6he|eq7i|?NE9XMfC)zz;!A7obXlRDqBqO0wX+j_N-i`G zbm^OeCb0&GXLKEvVGmr_VUU>r0OQ0DVdM0#wbOUe?Sygd*v?L)JT$GDa~UrUCgb_Z z{9KUq)r^s%52vuNZJ9Sb2KDq=|(EXDWkd z@(SoLroKL4;=}>3S5}=aESPjfz902VBv9gL-KjOh~_h4A_{ zi!Zxu@wkmD6kNTA2nTY7M7a43tIVH7&BpeabPS9ZGdu=V96H8R8mwyrmnA3*=q`b- zOG9v_WHQ`qUB`RDO|;v!-;3hClCWr#>277dz-6_4nvQ{{%9sl9_ukhRkZp*fxfO|a zySXL40!_44=9UDZTUXeF(yb8$nv;GMPAs{OooI^4OW?$6;KI9D?&}s;i+ndc{T9r9 zE#1cIWp9UuY>vsRmcZu?3_A)z<)gv*Ws%fISG?coXYleqf6{I#dA!&zDHZHem*$&IGs1_rHWz_s#v0e4bD2$)F>0tQiQ zhw9X98$$s=&@YCwD)?~ax!tJ(6QD*!s8WQd!2bfwAPMkOxi^(F>$%*FmIJ=0-?wbw zyzk!gtk0s`xOQ#ZI&M9@ls1VQ-^Ovv+YWM5+qSEGf`;8u{wU18WTukR)TWi15tOur zR9)Fy^o#f>trKw03h?$G+i$#p1ju%E&vB_R(%rV>MY!=(+TP|A|ni1R|S$AY_9NO~M%=%*CIg9wgwpODmxb+|(9aD`en?F`E$ zaH24bHM3Yx5Vf~7=hjvMoqEHN$KXt91?IPnFyNRzjr%-BIQYG5;cRt;)d6B9nk5@f z>g{k;RTJx$I??W^^?jW2byqqnwnMs4w0*>ppmP{OM*~3@E?wV=xX$iaN97&E3`XY= z*_otrB+?rP203Qed00000(firZ z00000*U^kB|B?Oa2RR1*0000800IC200000c-muNWME+b@pmJToc@2}{}y&j2A~KE zcm)8f#s`Z4c-n1~L1+^}6o&tuncawp1SyDz3MxixC^<+dp@cw5QEX5Pp#%vb)FQ=0 zL3=P1BzOuvc-TuL2wDB7gq^WK}; zx9{IoyGVnVS^=Cb>QyYL8M|-haM1t>_Xqk-3o&Qd?yDWls~!7IeLx>`lg=7Wt2v~p zMW<;O7`slIBu@$?MyAOGx!oZX)N^mA=Dy_BS4?{^kug;qHx<;(CJJtZ8beM0LCrnO zbsjaRg=L*%Pm_%QM$H>U+08M(#h+$a{!6Ce~rif5lOFXtAO`P#B$f^p$dJ`wjdHBu-9y#mwC-ZXedlT>_ZW2Mg zAM827{c)t!yI{|}uE4k@^y^ztx?-F98luca6|hRZ!1rHthTF5}{Zk^b2L^UmYV@DMNA)`KH(9|&?^OxC6jJYz`jUZOXkighKt{>~a$jiLBK45%S7cgQP7;)cYw!;u- ze>0f-5qPicFY!Wr=*4N-*Wv}WN#44nIM?;zFwz%wO1$VL@FKYOjQdW> zUYrdp^M6KNCNIn==il|=Fnm7sj(E|FcwsKnD{82SDen_&$2;Fc1PNURyaS-CD$9T5 zBe~KB->LVgcbTIXU33G<&_g6S_qcrLf-cd+5;ae%oPUx30QagUl4L^8WuF6~DsuV_ z@5c~kFJW(+>>>OgVNcT-c-muNV9zS|6$Q%Nn)ABa)*_JRgE=> zwTJZ%n-p6f+Z}c(_8#_M910wZI0ZO;IA?IKBxS%Vdhl z2~!2rCNnFuS!RFCQ!I=u_E_ArjIml^{mj?wq z0X6}P0?q`6;DAYi1%Y*eGXggSJ_zCovIy!4dJt?BoD+Nm2w#N=0pXTVwNSUvQ(-1y zFT(2p%X6ET009610O|#l00jU5000020096302TlM0RREF00000c-qZe+in}j86Mg0 zA#Q7=MG-VXTI^LS0W{-N65BVeCDO4P>i|)*k;@gil((ixc9*oQJV+m)cfH9Y^s;Ev zC+JuT-BdrGbSLMgspQ|tPUuCJ+E zYro~|>+0>=E??hJ57++6*WXhgt^J#?zpvWYWbO~t+iOqx`iH7>?Js=&p8DweHNL*N z_RaN=`T9rd!Hqxi^-q5D_QrR7eM>#)Z1VN{D(k%A>z}G0cVzs})OzPneEm1|K}Y8P zUES&Yi?6>^x4Q51^*_|j?yu>>)Ib$_ zOs1ab6-N5o!K`st=2Uo?ob;#UoQks;Ae_ z)O}!mTRl`?s7Lzyk*34^%hVbEwe`%@3T;ipM4f8d3_gNiPmdexx6k$Sef?&l*VzF+ z2m13wV>!ZVKGiztUF&=zI=6FmH^7jq(2kGfL+D;Gk_+9;(>CZj=y{+}J3#;E(&-C4MO=%C9 z>|tCFls?fjpXl$d_dLr!*0j<5U45@t&wdG5X8udF7T)jbc}Ao5zza9Ni?N3Rv<&M! z)mJY8vukG>tiRUZoAAYE)vo^Tf#bWN;}Gu+jTzo|^ydrxzoTb7(U4<&yQ&L(4dmxM zcNg%U0qz#=J6L(!d-nu*Mk@isEj-z0nq-|r{du7I-(xB|$+dg-sgV%T*I`O%E#hm; zK_is|V6elSAc?yf+YH&Qe*jz$!Obps-NjsobBa2`JT=1bydA(K7`up?TOR*9`n!t} zq>c5;OlNy~^rrU3UFI{b z@W0du<0&?w<@IU1F;d+@Hb7(d;JeBH`S_)z-&*LIzJkTQ1eZIIO)v0gWCP}| zhkULJ9%it?=M5WNU4z{`v9DM=Td=KN*1jI|rQ`X~9gK0>C@b`=Wo-hhO?`dDqev%Y zep^F-n&eH>Bf0fMyX{?y^0Xv&;6)B$KL;$?LykUM;5k7NUlVdYT(4tSF%fQy?R*(b zqnA6Fxecjz8FGiM@6cKW`3cGw#CM0dEVJfq_UNQLgek2N$KMaFmouC=V(zZ%ryH6Y z^jO#PSdiyJ>T-ec-djZ;U3lmPV~@>#cV2 zgy=DpZNl1G7i4RMaW}cvQ}&;-7HRiw;9=_PnXWN%R4*AS465HV)PDtPL{l?Ob+t0% z>Sv0&{sfiMDUUF9NshX(;JR+iYf!a0MHTd#Ybz63&+x8~D$`W_=RB*gE1o&(&J5My z32MX~b*>pxV9W#~2H?yo)_%p%srogYgd82A0%K}DB1bQSo|)c)p=e}hx$4) zfqMv1Xsb0%g??UQU8(m@F_$u#8Mst1C##^Y&be-vb!(ncfD%(-|3Tv?&+CSyW~e4d z{N9I6PiRfUy)i!_DVa)iYDb+ib_Rz!sLBW6Va+@+@QKnH^T1KEx}M1DLi+%#&TU<5 zR+s*gp>pKG6CM_zysa&4#2AXihYXcE4kd6gT$usxjOFHY%7=BrT@~qU>|?c4P$@W2 z{qOUs7wLfO8TcInS<}EVQY~>MRM%FcdRR;FGS8`(`Z+$oz~@5``Xgh@*}A&x^>+*n zw>9=TC_m@UXa%jxJzSnZGyQIY>9n20Dl)5OIpaffa<5nCU`;((( z5|(~f%z4LV|EP^fOzRK9Rp&)=JKCr1|4BLq!W-Z>2Bl$llBl1v?TSpC?KI=k8B=Ne zP=S>zz&nPw@g?DNW`YqUZ}Wca?FtCgROgPYv1DxNY019l+;X}MEar^8lUuo9cysO= zlC9o@WWV&ZlX~18aQQW`o;LO~L{?$nQ7m`tX&HI$D$remFhz`YRMJzyc8sy)M~PD| zQjVf-=MemAHR5#`_X~~yG?(C)2?>NW6`mIQ*+YdGHHU?@7R;tuCziK$U}Ivyx?2TR4Zg3hkCY&aNk<%-^2Ylcr)YcA$Pi^Z$AcX z;l;%BVC1#N3B5nVj>xDHEi+_v16U>LHAUP}BhH3<9m{wnStt335^G4NSunrGpkQh_ zJ7mo;8SH21=xia|uRXn8#g^MP<~DVbFrRxqdXXOadDtnS7Xfz{W7}E+Ipn;HR})Qz zWcg${JG?JcYWr}zBUkZdq1BVTr%Ww*5y}OPJ(`U1m|>Bf2vwh2EJz2}ky91#6w?~I zsR}lI0u5NSujTJy zwyY&}+EAq+}fnR?ks^d9snX9>sJ+tsmSW~nA`PVsqFTFJjH@^kN2e`8y7R6BnedYU@O zNX2uG;niEn_$J7KTy8eNlQG9*s^=+NsuDSc)TXH%@-#6&)(r9bMU=5MA-M=ce*Z+T zNm!4Ab0g`TVOhqQYwiu%GsJo&+7#zqQsdH2aM92r>ei6%9kcHB?9P5=t5Whhhb!dL zq3o7NzskmM7Y%dHj>lZ*i-yu@p};*~*JInjuocZKm`{Dq8VJq?`&UPy$wx=F3X$%? zOF(?hSo>A8tBLxItq+4v8&KXA!VwOMHxpUq4K1K=#zSV`*c*5gjT;fc7O?Z7|B-ie&c^D{r+6O(r=xmQVCO8JhsKU;fNK=NFE zN~^x`oK};EF^8HvvyNw}j8ix!84^{L_mOovKV7oM_}vJx#y8xDk=#|RYk*EteMt-! z(Z9I%n|t|Fv_G>BsneVm z>wr7$N%m0k`Cq}$ziy5?_0EsR_s!8YGP4rvjX1u=&(fI4=w+iuypI*$g(Ej?o-^Uj z;BjQXasD04OHL{V7Rc2a=^$zFDSOk}!kKcdRa;$f{;I3J-E-J!?3ow!@!U>L+Aivb zH}g+h(n-bHB;-v*j9WdxU`vPhHjJO+6K~n-p4&dY^k{Znc9@mGfv9pvC#ajQQp|l(7U|7A{_M7au~?Rqlhn(&n-$J4p2%7k3(k#yD2{^|bNdxwX?UGKT%#0Wwt58rS#nCF#TVEmS;g zzCyR&?K>T_M$XypZeH5%PP~1Q6r^!Ok6foHX9>-`{qFEgzG$86M^GP+IMRGO=B`Nm zMt+}A+j9*59zX3SlfNu%2X$gA+0^?D8>K2MvCMm|wEP*UUlURfa5$ zK1;Fi=8qaZrPA)D%gKo5F4I-5^_ihrMx$`&Obp0EYEygJ?FYFxp{JH{VU{Qo>vOayBc6x5y~Ou*ugjTxzxSFTf0)9@jXCbjZ4V^Q z!I$f6Xh{?HmvvxIZUq-jeXVsAUB&0*!AZr`F9?16i2bq4^h^cu0MZk`O}~GXnx`Lh zl){Q4&ge_qkqqG#?0DD6ILmZNJ=f!|UcdUegIIhX^xrG*xTXuNr(`D(se3z;s`Er!3|DnEQ}5Vwu4t(OBP-=Jw_}b&)wp)4>``EwYYxp`yYkviiJFA&@Asn2;2__6+cq(ASdFYNF1A4Y!}VZzor4)$wh9|yl?_zzhz`N;qP zc-n2yM@*A(7{~GN1Eqzs_XhVK@B6l8D2^7Wd+#j@v{eKwR>f_^J)$wulg4#XAI?m&=|_Lc7{EXVF&H-<%Bi3dFGCp0ForXNk&I$AV;IXg#xsG5Oky%qn94M!GlQAT zVm5P_OO;qSN(&pg!9G5+iCt{x0LM8dQEcNq8`vvWv9Xi=Y~eN^B$|Vq;1@sn%@I!V zfP0)~9`o5vHIJy_J`Z`uW1jGoPt@{~=e*zyK7O!=H@xCCe!lRHtt=qGLKaa+J%?yu zF^vRiBE%AwvMl`N<*cBYm8@nJS2@gD*07HCeC0dWILkS%^OkoKBX)_EIEj}8Nt7f> zmJ~^qG)b2X&U1mwT;UEExx`&Ib5b(7$t}r}Y<5VFrC zO<7^;QMt#VEN#`$G)<{14W&crRJxQ!%3@`S^4~nSsjlYgy1h2w3k7SctD5{~WOz+o zUQ1;t*wEIvyydNWJ?0%@4W&cr{0pp9ocjO(c-lqHJrBW96vpx1UcGHq^_4_IQt=XU zR|Bi4#UiFQd=9h4$Y(h<`F1>!lWRM_Gd%yd_}oc6tOI$+suAK@H+!Y4F~#+P@)M3* zJtCzql|`~BN$MT7it;5BTmRqytv*8m&Y9r?YQyjVcg^qtZ^>u_{(=zz?E#|$1l$o6 za=nXhIJ9*m_g63iyA#70j8=vT7|#t;Fqs)8gHi}e34~=42nvGTe1iHE0X7081BWOJgH`|pAO(Sp zPzQxv41zpAcV%K`23uxFaRGO}SQy<2MQYvJqx#9VVefX36tQ;+MF(owIDmosA7S?Y z|0zkw7_~t+z_hgd9tu`KVPv8zL_!6(q`-+x^oD_?y6&3S*#eyceR>;BLo0u!J;sah zO_paHI^eYRyGrq-_V@iy-noo?&u(N(EBxipeTf8_pWGo+JFEwy6wNC$A0s>;SGLv}^fG?ZeXv4@8ZCKL$dc+N05eC4 zBM14tuS$}SGXKmmQ0S};7%lvtPHV=t0{=_)X0c6q&;jy{zU4_)3jkK8k&{C^+abaU z3!@7*slP`L{#ksp76=6%6tH9gp`w=H`j2zfFG13znRXdomsQ=7PN8>rl4~{*%7?? z_VjI-BjDKD*&;@+IuOyYHDkeeWAWN-AVK^%O}V6bk(j z67{SaQ2d{&R{IAa#ewX5E3Caqm%9{SFQ?1hR+sxLD$Wc55;Gtr0-&@66qX_#2uf=+ zI8fo1q+~y8qjoPP?S&LYN)EKVLDq#3nkMA#iuS5L#B#5TU3*YwW7#4vKbLEp4uCT1 z|HgX0&K2q}?=eY)6ORZH5s`~rL_$KEty#7~=CtAZv894qR9=8U5(0$tChFdC%TTps z?#;Sn@ATXCdzgUv0y%s0KWoqSXXbN{vph-&K}1AEL_{QnULy$b)vrr0@Yz3=hYaAG z&pHQ$g|I(BauW{1srRb5Gy-b8SC}TqK+X0F(;OA3<%Ynt@($>;??D*IL?xK74m$As zQ-%)&LH3PL?m^(NXVkjI6l;qET(>yX#}LPD*mX8@Pc! zhg{1i`Az;&Dw98ia?7yHsb9Nt4@vm|{1m6?h*BhJ%p_2A7a5Vc6Osr7-aN3QototX z8H11{r`D;0LS;!6yXVyes-KQeBq5Ffv6w1S5f{ZH5~9Q)DN4?*B3^XN_24`=Rb`;U z{>&M8hImxT8UjM%*T6Ca69Psz(oNnQrmZ+(O?4M@XhBMNzp?q+pE~6FUeZ zxHSbBT4FLB8`yj#NuX8b3O~b#5*+cQ`kDX}ON4=Pd4sDgh%r1Oq%aYzM4~}qisc|q z5?A6OpY5gE0B=Kas$);lC@rB|v4L-Eh^IR9wKV5kB=XQ>S)O`E?8Okz^S?oU)A(Eg zFl&IpjI>(H_){8J{vA8UP|btz0e(aP2m%O{0+m2DPy^ILbwE8pp#$I;H~~(9GvEb! zgFb;D;1~D<{tI_tOobOBL>Xh8iBGjM(l9}0lm)U1z_y|6P`aIw%7n7WCgD;?NBGs^>jBxfgVJdMr0t$7~@RXQ>bZZhFRvA zXMshQSZ1ZLidtix4K~>>>_EHh4fdsVqH`LT;EHQ*xaGd^0=)hAJL-eRCzSgt5l`gH z`0;e0Di~mN5PS|j?xDVeeO9M4@_x`qQ46Q%U9pKs$uSJ_^coVv`8jx4_pj{b6=LPt z0<~J#K(3%GLSys0fYLepB7P}lQ$SHR*i=lI=fiaEwnAcxXjZM zbFyf6+YUWNHL4ZbHgr*fd}5L1Hlf-Ur&oav7$EuwF0TrM^C$&1lxUh`OproMpp4Ey zsTFmL_5u-3f!MyPX%wLk_u^nf`HY<-8gFU4Rv@W0JE?_sUB=85c^FEl`h>Nm_urWF zNIDUpD+tj0Db!J?#C@MYGNJbNQuYX@Znb3WxZ^%D*+e?3sWT&865U~$8nWSH0SCNf z)sfe`*K)~YKh6Rl8-Mu#sH$AT7-24T3zTk9BI%T(Z1}T`bt3!{tQ8(mYs!YB4G=bg zPp~AUS~m| z1i8m858LTVuf%fBN*S91g@WPDyAApnw0|s%hD^<`{0$ee%a**Y-5g_IyLz=-E5fRZ zU#)iYWs0VaLt8eWKcvf3hVG#6b}vJK__L-6n@VG?+D;?|Gva4JBG%i|Rh?4q?4b#; z(A^{VbV(EnPvQERa=I|j4K z$4>3~(?+m?RZF^I0wta?GDp*~=_YvZm2Q&RVHWPmG~4*(-f3OM-{3Xe`oYZ);U#Es z>!X3Yq9rmF&KYkq7numlou53KfY}gxr?xlhD5~Em)U0Jf2tAH65m+-XbDoVZMv@Eh z5tI}rq^Ecouae#~9s*PI&A;c&)RR2fc;cgbrqqzIVZc?U0sIV-F67rUo5ml1&L41O zF)&-XuBOrPQJHhjj66%0X>Hx`xj2az=wQ9msc_J^nzxnNOhhALHh!sm>ZiVSkC}a{ z+G6HfLsPNccxGdhJkiLbKNo$SNX7$P4t7tqYu{)hB-9?X`J)XG zVNe&^x;|%{b_D8-g2v{uLp|EsC!em26xE=v7T!Q97XyLvYrOUwAF3i5%d)9MC+q)mq|`47_vh6S(5h%vxAOqq?^ z`ae9@ZU70eAxl;Oo3Mdm%Wf#rZOsqt8nr-^r1+E0Su~6{uMfbQI}^qLILih^5&(c~ zjtM3cr9mf=$I;GQpt-KKfGt5Zrm58gEfo|}^_i(w%y$^EMm8!QN`)yR;E~M3W0VYm zn^MELt5D@sRTWfo-m02PDne!TfGVoGs;Z#?aqxL`MngTQhcr-)kyKM*s)=wFD?&9B zsS;6C61r-Gp`tLOm@KHaqEw`46(>fu5UXlotD4%V+TvA9392rRsv}Y8J%UGIp@FqV zs4&Ya!}J&ceHLE{wa!t9$MNW3VcH>ksGG4FvrNocM6@)hM=T4}(|P$w#s)&SOw^Gt*6=KlGjf}6a4wy!R3_cshDa@E92krvk(XI0vfKkNIfW&-ry0ugw?RJyI`$Kj45qjZ1 zZ4mW4&`RUm#5e8_JttP92h*uv+#?=;bu$dKmcd@;C@E8D!S z+rFLKqxNe14&?p~H=6!`d%lFh@nBAm4036kwq@iB8<@s82SBi&9!kKgAj66F6^foe9p!|8H+ygq**xXcg2C{EHWFUqQJ z+O8kQX`WPDJ9`I5CubM5tDC!rC&tU$$Jft4ATTI6BovFo6Nsd+@Cb4wg-W9{n5?Mi zm{@jPd;%viiOb^y^$m^9-2+F*l;e{VQzuW&oSvPZTUcCPU0K^$-`U=!(`oWg@6qR# zbj#p=A0>TJ+(QB2tg8TU{`E{hKc(A104})UwP5OuRp&2+QMDGAPj&81_U4D~fk9UC z-3wNG^RxwX7cE}0aK%b`?%S}AOK;Bwa4i7!Xb13!U#)(Fj&{1;h)Efyz0|FkV}~E_ zoF!XY6_OC>Q)5B1Iy_=>Lv}KJr@a82ETG$z`_rBKN)lPs%t*lQrpqtvEaAnH!6nNH zC*+q^!I{=XUiYabjaAT;o`JXNt|N%cX-~o0sOho)%558g+0g&xuZ@@zgUvZfvW<$M zI)o<;PTMLW(_#-Q5-uGSZ{-L<6mj}3kCELvvq`6fW<4jLkY_QNH^N1M=p@`g`!s^+ zN5>gWry%#04N*-JCm|+{%@TxVi&9i%IXNMpXo)L6gE}cV4$N(~cLc}besBuIokWSe zCZZ+99N(gC8sg#>af-XCByIG}<8CPy0xxtTwcWR*s^D!d3rK+Ja1cb3CDhNfJ6Yi^ z=C%f3+YlxE+B&E?GTdT7#)}5$`_@!ycsWfTAjduI>Hda%z{7~!zGOEQRL%rWAe{ez zWo;u4EvSO-9l3DaFmyTxArVq%R3IN+3JC3}Py2Oj(B{=_+XsPF9gzW&Oz(1==}Sm( zr;V1e4}xPk2)R~DIhjo4M2C!#SnnN*1CWeaV*45W2q3-$#6y6ccL2byH+nfd0OlU4 z$|7GgnoY-z1@y{Hf@)Y^Ae?~-*Mw|*B_bf+3hY54k3k04BZJ1Fn+?rC;x(Oy;b|K0 zkvR6E-e%r0_URn&gJ8Jv(oo z>csEI2NNa@$$_|Jn@g|m`x{XAGmh7YZhjo74ZMQ9re@#C3^gSpqV6zTqxT&li?SPv zp_7YD4126Hfg!PL0t;WM;Pp%;Y@nl=qUa)9la^VYl3y#r8xV@em#bs;rvRQyZb*C> z!iI(+XZ#9D%Q=Q_+KJ01*d&N<%S4pfS*cMZV=1Xrp>3_hy+_vZ8l6M(ourhQ4^g$L zXdVk}d#bB8Jx2126?ABlA5p$IXyK#Ic;l=_lL|4GLO+0Fq%jX|N=`t%gW{-wTMDx; zDl+Idv!I5^s#2m*s@u`l_Q;KwGfyLSsN$?&CNiO_TkoqV&GvZ=7q@C z@d+mTqM9N@holXXA!DA~#Z`vrQ5cmNXNPbb^dM zwzpsq-H!W0af`*QL)jX)qlTqQqMf8GtE#Yz~{t2)jFJYZ60<&*$^F$29kQQq9^}$lWsOE{6%18^0LEEMVoU?i)T~+ianO~PJ;DbW=Jz{Mo=ib! zpe-)xPqg=$HkNvMp$7nU6Vr)|u6?{7i443Oxf?kDOJB4JP* z^9~dIz9V9+n77scbo-d8FiEBCi1GHI-E3GTnjkoQ=v)}eb%&zn!>9&mRCKT)e|X;# z&dK#bae(8UJQ(aicz+W(eHu=jj8mq&OE`(K8r}speB6^*!BVmiM&&^y%BjU%Ku- z2;Dwr->F;IUc8p@XegLMG5p)#;~D1@0;XQ%qU$wt;nmr)1V&02*7}w+?w2 z-2ymy5<1h20pzk-g7t~ovT#v(*F(NI%`xVP1+)mJ`@Sq4@ltewUgkJP?XotTvx#xb8+04&(&O(Lfy*Xv#<@ zJYuuJDOS|$J(_zV{w=<6WlQU{3pwmsm{>Y|X^Bv#UCJ4D)Mvf*8}{AtjusvOn%HCQt$cZW1P6vc#NFDgYD=awWd=@`)Yp^WgcnhzpH+ z_U_3Sjvl?abFZ6(W8u+BFFC|BG&+!<6qo(uO0Ll5H$(>-`es?vqRmQ|;a*G8QwG?% z)k)tL_3eO3H@PYJq~B_Ff}s=+uKm+HBxzK3`7c=nZc%L;M0t4=)gmMU~$3^ zF8-9xryvs8RKrw31X3=CcZ&580Jq+EbvYVAi*}~rT*LbmNGPFV)6$AL*a5kh`7LF7 zHZc>;9YglF6WnrVZ#tctJT$r80$o&_iDJ^=I7;oG7OqNed;Rz5D7bHGSkfl~B0IeE zNdN~H(KiDWPPpKefY$>oB)YyO;})FSz)E3=)Tzgk9W_zmsKq&h4 zx7hRXfeVie7w&w5c@+rvnp^@He4aD1QGMILzdGXDY(SkB9YHotqgNq$$QcpY5|~2v z4%OGvbBlDqUII31T5Bqk>ETkF9}>a%TZC}XSCZJetYwgj~Z+T z#U5u_E_=V18iRl;xHjVs3sS*?3ZJrIm;zYxL`iwX!HlIPU!wEt#MIhsY*C?aBMVDW z)1=xz50F?^;KVbGj}Q;4_F26dD-Oh0;QDzKIsG;i1Lqd)&|!#t9e55b7>J-=(4MEu z3R)0DaLV#XmA92Dz$A65=yM9E5mDdb{5l7e-;G!g#V(%7DDv@BTl0vlQZTp;C7Mfg ztPSsv$X`Kawd2r_J?t9gYZ);Ak=&?5kg(^`WHdSxKUxCCBvktUOKL%k*q8D?N@U=-9%*BtjoBLpF<-UTsb{a09mn zTzXS5kz_xgu!yWMs*f*)3W2NMfO^abSil(ihrb5HgXcdOiwl~!uqH_!G8{e^n@Z1@ zS`FKESSx-^`|cqzt$bD5)G*<1Ww40Ea0ve*trX`wS`|}j5U{t%wiNgWhTG@hPyBRc z*T!I7Xnelcup}~|FAXm@1Wj&eq#R7WBeY2g##0;$2vh$)xoQh)T=t)LY+V{0+Pu`!wsm1}VEbZr3$MKXZdpAq zrL_M3-aq64H~3OOm7elG)KhDP_U5!N?GB{y%6#7vl`i3>^PBb7_w%-cJPJ8U-M;Mc zt&J*S{YaKmMrT<_#6|auK5br-RrA-#tH?iB?%qB%#jC6qpE>@`xtF6}Kj#TwMknWH zI&aWM7+IaoA7k9`_(_NQt%*5x4YE+huA&wRCTqS z`#~4PQITcp29+rtcYn%5xlec=(}+R&pGE~l4{A=hSz;Ks-}PTReHqM zi$$vN{3Y|o_3u+LUgDgld?5YsveFI!ig@|wMAix54_s_~R}a9oj^l%(V-qIoj&`vb zMR~y_632pMz_d@RmuJ#WkpK1X3h}?e%2;`If5yiknOf{iJ2o80EELf;=Jvb z4Sr19MNH77U;KD|Ve8Yk%lvb0brBKB>-P_=Iii;MoRoL$9P*UN2@2hiLQ);4mKiSfEam)!U*es~(;uEi~9U zi$bG$Jxs@xG$oIHee0ZX#3xDMT1JW>RwvNsITTD-n02Ceq<4)dWztV{|M}GuQz?Tb zvWmKNUdn1y7b+2zTVr69IA7ap&A2@v88f+ACX&jm&jPGw|nbj*Zuml z6C3!Q7TeZ=v-Nomy@H%R1o98G-l%eg(oFgj%B-;S%tQ^ z(+omUoZ>JyWJIXP>^;*Uhe4D&|0+wvoI!`UZ)Jo_{Jn7-N7^hs{V zzZ>5xdc@qs+T?PCFp-j!ns1!_%$MToJreCb?jhX&Py46$fbj$2Spw0^WR~J~&nMF~ zD8jX@^3RAtI;C-l*H7&Kpz+5yKdArz;|(9jxj8D!n{|{r?d3|6byWRv(eck)K4(G@ zFEsCaf2zCBF`CD@Anwc3I_pAQ(@to5aQ!|8XPcJEp*iP0=hNl@r!jk&;7J+i>tu1( zXsRYomLsZD6r3)-xcBP(Fxuy`1Q2unbDA`|{uPo7;kjNGhm$exB!{d5-zJQk85Wwo zf%F0kz(p{OGMK+-7y%#cbU(#8Ya2%(W&rrX9f!Ai2y9~6p3)t)uxXl_<$SER2WoM% z<)@^Rr?`L5kNyZTeQ*nE{O(7X@d|*3_>d2|;J?qWcOUA$`W-R_PDgG+k3l9B6|kt& zC>DL|B4Lu+b(P#_WAVFTRNMaqHQTRlTA6hT$>02bbxsGF7-x9L>z`*D9~$n%YhcQ< zeDRdHZznN4&S!K}wkzvkmXz4UpK|;q(IP7lAMR2R)^io_%*(ahz`k&|W$&1pN>McG z^m8UE)(ortN??;36uUbX5U2k7H~jr2NFit3dHfvbG{^7QGn>=u3o~b@2B(%5!ENl2 za~lxMFoPW9faqqB169H$vLG9%h^sV_I;Y9FfSQCkE|W#nV3Qf&aaj_{B9!N(ep=^G zAo}S8OcUVT#Vi>aH%<(7Jb&_0_@+p&!x~c&aCIxl*$HYkH`0v zE+;Pos?Kh8#{m_q<8^3Q%FzzLODRh?^g?m>7{(n9$J_rak zvD)F0TTk*!?QrigE=DH#JV8Ej+!yDJ>58D3K5`K1em7^FGC7=X0h~ZA2+%DZRua8>cez=T!r+9=uZ1yE9uwa~RC0 zbhZz#9zSt>qkgD>Vm>UVtUmu$E0A@!(8k!BG6rn&>6qgDea>v>Xzx~4S@Aj5>QV^+ z*7dJcO|P8Xxpb1Di<3arU`;3I>n92*W`+0yYI|Q*dy(`Ss+pB8%gk%<$pWEfi1Yy` zPy$PFWhXm9bSZif5Wbx!3742&V-N4Q(wWwxXne;=FHX{yVbc7fghaz|2SyLC>(<@* zvLehc`YIDE$dlH&UhNOADm|(Cj&(@$-?{qHImW+18ui!}QoR+Q+1#{1i@>RLIRK5OH#4dz}D0dCEAXv2jT; za%!M%nktDCa3dT1w$5Qy3(ur_wi)eae_; zNoPe40Xf%Du4Uv?qD)pO9I01hRM<76dJM*Lsf!~?GE=X@Hg?62^i!lwCvG&T2;WW# zafN?d8Upt1HqlO?Eni*@tY-G%-1frz$2GTMcbskP!!I}OY{1V~Rej|oOHQ?n93I}N z9-1AhnvRghq$bL9GJNZf8BId&wG@9NwS|TrCDYrF_X-zVF^8W@sP*4GM_@g84LN-C z`X`+?ub_HYul=Jv@n&fBLW81zcVu|v;<~E_&fq&<0PyB172o`8UAT$mnDXd3^ z)!523DS<|XV#K(30)U%Y(Rl)YvSgrpde{Qo|G0)PuRmEH{CV}%=9c!@+|bY4Z?}g! zAff+5U*CPixXCzccRBRy-S2YV=Adw^ZbPw7fj$(Lv&6SUf4^@<>xpV4mt4V0&{XK@ zzJ|O+N5^(#rRLDHW@N3!kc-0;IHMksPm0B$o6$o=#n%tsSF=cYpp}SqFi*8+nHP9T z+S_YY^lA~+u?SDaE9>nlhth`hsWv?U>~reCk|%);!-VVZLK%^GkMwatpRNp9dH1!gJ2} zdKvflt5>VzO|f>p^5}YLaaXA+Vy6?H(*P|AqCW}s8VU5Duzgl(s&-;+RX({q8kuw3 z9ayTWhv4&NCE+f~nMBvZ0xnl7b)s;1PUPxpKEJZkX?v`bL}G8-qa#!J7h~isLzK^s zNy}%j((+>1qC6=`0M0kM)7*Dj3Vq??$}A;*nfZ zPTiH0LoxH{5L<_VsgXK(%>^CS%jyf&r^)!PUy%((u44|&VYYQzvRyh-;q;A!B(U-2 zIS7w5I9{k{eK~C|E_o6nFkZw6PyOQZ!Vt(~o)}zs7Cs!~g^$R>MYxm88q+^5u6*j# zxaPUM!|~UxC_-s$I1ynmk$&#!_OMu*e=q8pvB5NLb^37=OO=bG0n8kDq`wAf~{(p!*i zZ4hB*F;7}to8yi5^7Kfwxp`q!MG;+LSH1CHWzBpU66SN%%tLyfH6S07wYtE<&U^sA zHoVhorNh$oA(^olUu*}KEg!1ZbrfffGGP zP}_z%0KLQD0kdK+Tj~}Y^ z2Se>u@^AfbilHf`=St2MOH)24ucg@?glFR602Bo0y&#%U!lks3jmd*q*|*AnhT?Fy z?@3~XIKz_ypW733^vx|pS}Z7rSX5GRn4MvW!;Bz1nxh1rn0th6H9Yq43947T zf=LDl7l6e4Hu>yV+wZ&^7Yz;2`AF_dl=OT*uy^twD^~0+y?Ptl3kpNpaQ8zA-JM*O zat_lJ{x`3wpUo2=!$3N$F?&3a0` zcDd4#y1Ply|CyJYkXVKeaSqm%aP^=#n^Cxf)iO_uR&+!C=a^p_c7GaN^nbhgBwxmS zA$a7Sd{Rv!_S3l>mj6J^qF>2p5+R4MAqbYj>uISiT-lelhrQOHGu<+KsTu> zA2Ur94ZB=&TTPq&S0)5jL?uJTAp`nmoFU%nN&;txt`Db~&__rMojL$+y1`- zm7Xzm>TB>%I$z6@*anl?ox0e48ve)yalDiC7SvGXjQPg`fbqN9n0+cP10W9^AcC#O zI|TrG3L5pa$(Pp+?8iTVZ^O^LZeUSOf%+u_ADbC~{|fRU#M@NwXwIDXR*;r0JucPA zksq5Y0LcOb0bL@d92GfoWvRv0D#qCcW8maSAi71Nqr8b&M-jGVYy9Y7Tdc!iW1=J~ zE)0uy3?sNlIY+q@f*g5)Xe9N|Pe_sjU68`yi8521r?p0+#(JHZn}zJ%biJl&-;@Nw zItx2Lm#%H(SeDhkpMW{@aqsrtxZLd_`*X(SjMt#RSUZ|q)0Zb`b>5bnKuzf1&*nBW zF$ms!VNQT}Q(Dr5+QN9VBg;;e{E(>P-S?WRzfn|yTrjCwURVIzcTCO^^81<}m5RmD zseZn_H4b~-nN8e#f2{6dnV45x^y&Ca9`y-ZjLO7B!7px`*G~EM)Sx&o=6ZH99X$x4 z92yJPLoFWF>q9-#-AoR6jhBjb6DJo+3vNFk4i6?js!%Is?CxG-Bhxvt>TAzx-&4mT zvjXt9Cu>2DNAD}kvLZB>Z%9G)aph(oA2#Mi>ESw6HB<4#U>DBJc8-+L_vUuj7E8f4 zm5&(G5g~Z{kx8R9+ksuO+8^%a32foxQK!kOg>UDnVht(;G{?&wCj>l~3f}yPV~xeS z4l=XSX?ZB0)E1IA*3CHVBoHzz8`$VoJUH)#<%^(F<|7$>-hEgSF-!0I%sV{G(SgF5>obIrIP zA67HMw6VoznF45(jZZet@&J19utcjRt90!6go86>Y@x^~xcWx#IU?s{qu*98bWY}^ z;W2|0!Qm?70;xjDZFA-SQ~O)!Ds`35Q|Z20NB6m7XW8bXQ zqDLya8*A_61Pc99U4;$SH)F>i)ZX#{rPNeQ%E+p+DoP zBO?yV)LbAA71Q}E2|(DP!9=PQoidV3kVIfz99_>{!|BTE-tt_S@>;mIe$AX=Q$?_` zX@~Gtq^J$Sc=a0fc3M;jR<`%rYNsuck1g}*t0~C9SfWKI@hA$3`_VeY0NwqeZ_Kfm z=1P`kBT)L6)wjS-g2Uqa#_yliO^ee9sZt;xU_drp-}vXhzw5ANl|Ve8fM~cahiWbE zhG|7Ri4ZJ;%IEh&tU(q2VMrodo&J%lFgw|kLaNCL2_dHhU9v#YQ%covt)Zl*=B7MF zemptb2Hm^hjf6?I>ob%7r0*L#6FqJ(3Z#1sBE>p*ZU}Kq*7Jum5vXxK_r$RAUADsY zc4F+-(iU`QmMQpk4R~3?*0ML4b-!Cb{&cL1dw8O&;nvOkt#%t9ScJ8;4jz^?HLFPeiAtJ2{i`S8)4jf{|xw!4lZbI&{=?yqk0Bni+V|U+>&z z(AMX|QH-eQ;rC#UT#Mw9ERsahm_B6E&W$IBN5?O}cX{RDY+KpA3*Stw_sE{SeE#bE z2KHrx_1}zcxivlmBQ7#Vj3bV=5BJ%yEhTw=ya7;if)$#@)?Un$jw7fig@{F}1a#Pa z0#_9$nOK$SO$cjuzg}8?4EM)vZUL?z$JO{s<s3L?n;;B-uKyjSBhbC@QIj-Lq#oVu**4OR_;;8IF0{{Y26MLZ_+)u zXa{O=E1_fAGjpx&d7m$Y+S=ZKM(-w&AtsOZeZB;#8OQr4c3N8zYe|Hisw=Hpnu1s& zta_X zX0f~>n}Zu?UbITKC`4?;)k&%hLyaZLoW4_!u*OQu`YuC&unl#Zo^pXiLp6QIGS(=^ z_Ff1Yo3zfu%9P3WmB^>(b3q`KXjbDE#ykJ^?=N%#2do1CM6D1IA_tEjep z4$zE6MFPZNg%&AMXek5#yeq4-@ng^grDk~@5^D}R&)5#QyFc`nmfaw8(RpPpkr+@P zqtw!lqt!8FNE~xdqgxeaSp)?|*{bbS6Ktdwc0+FBF2e279E6Bdt5VwJ$u6j3WVW7~ zV(>@wr43ANr!_&xrXbNY%a^|W=_8XKN)PY-#oYu$QnzPk-IH$mQOktWY_0}NN!rW! zD+CH>MU_Icij5P1slG}dFWdp`$C4Xxj-`Uh!FG;4L@T9TVBH;ohmwqJa#)`$071K= zM4rSKb(T82ZDAft zb{l&qx3kH^$)XD6ZhUgyw9E@vNE%(nVft7$SwBq-Oj1W?&`%Sv94NU^T!6k8O_gfO z1Bzj~0~d3t#{r*o6cM!|Il#?m<6xeL;(&|{p`grrV@qa}2_Vx|VOg@MF@fr=TG;1c z-MEy}u-F-y$ZbaHqc18*$LUQ%C!bkeeMT(DwvXIQXSB0NKY=|OF!6AHQJRZBi_S`6 zkC&=jT;G^7FPogN6V4Mclyq{tv_EK7$=$8|1+~+|3{7?w1%%^UDLl{2jbg=}zOd1< z_9G0jAIOz^-49%cs_Qgv>-r+DzVFSy{{I$L=c@KdI=29egl*D^LtfFf5RwcQtAu$X|s#1yaK>?T3 zRg^1j6gQswmSWMC8|=3EgmGI-Q=agG1qIv=c3Ov`@fjAA@xCA<39iyKBjtIKh?K?@ z7Z)f+*4M=yQsy9|nFfNeTwn#V2$TSdkYPD?;Q8Sbgm{*N&WsWP!F5Tm@Ch~v1Gc+3 z6F3eYQM3z`1vH~r+!q=v7DZj~Z7h}8p2S#EVHTD*m{UHvVLz#tZm24TmIW>jW6KiD z@uz68TVSH%TUDsduZRStdM-(`=rc(qL_j2RIPKl-Wi#7+Xpkzvp&>)~2r|^uVyv|o z2{MT^ydG&g(5?5&sUjDu8@;=%zzFI?=#IwZfb|61Q3bRG;u2wgrw&4)k{?z#mn%eO zF1#>e{jDZCff$Wdwp>bP8iIC!t#$Hc&<{X55LjD+zuhpgjI@%ll^Dr}N<^0fQ0WCt z0PqAFG$5&FRiI)k`v zg%}xmm@?9~N$bmkgIGO3(+&^XnV3jM<+h@9&cq0hNvBLFm@y(GHM#bg^u&E2rLYAT z7ZwUc@WNUq6<_&+$m!VzAEhz?w|aK{zQdPxvm@Z@uK7{s+gOzFn*QpfsflNx^1@J)AvSY0a#U z{AQ^%tLmmT6GtU-C1_YANsHMXMV#d$XbGRS4V%^VS8y$mR?oP=mt?&@m;_n(C6K0y zo-M?OnW*)pVzTyYD=+>w0FYRdO+w8{KmawefP$=y!8a@r3wmV)Wb7L?k2;~V7<&GJf;W{H! zKn97%?6s$?ch7|*MN8OVt&7da23i$kGhDO#ZlDJja;W^!(h5F*FZwJub76s%*qu#< zhhvZ+Ic@uWCzfa^N|8+95T+eeJfr|?!%?gw%y17&@{6&g)^VyC2GFiZHm&E9pLE{2 zah`LdqQ7ZilrpL^ri3LSW-&pj1`||d`$aqhg29?7H%L0J&zJ~DTxkj*oud>*u^nC# zWnn>po6QTriem{0hs4IN6tQJ(M%tBz&2q`muhJ7bsBTddsnGAWepb_)$#)v?ey=wi zF)JU}@P*a!NU}8ye1yT~Z5jdqoVQs=1o{-jpVfVj{|N%V0S7$xixSqU6l%w(@KVp8 z$9L}8dr>5I8gC7L|7WM;4dUml_QO|8$eegHgO6jjtl_h+%62J-@zoz6m>5F?$5>9N zl^k+>t44rt3b2h^U*VF2Nn9I3%%;Rb(rUqc0+J%`oOv$5a=`@~c54Fs7&Tew@iN4|7XJ0k4o&(;sN8R4;QS>#VEodg-LwJ6+oa zmkT;uS1JH&DF{S7tT4>hoj7iTk|9761oP)8vIR0BX-2Egi6O~-D=tCB5~4>KEf|){ zS5E05!aR*cQi^;wgc=$oxijB+9IsTuF$7+N15*$kS|ip8XY7CG?W-$g1H5G# zEM2Qpw_(Zvbv)i5O=DNT8WR^r9z~x%GDmw24;b0rv_Si9zjg}i-gLg*zD4Dw?f3-Z z)|5}U1t(cB_8kwLOW|d(6WQxDufiot7_e$vfh>qW-UDL`2_7-UP^Y3Q_K4Vw;;UIvb%4QOHJe3oUC7GvVcEd7T~4QRq3N<9>BW3*+puN} zZ5@wU|BVm6U%e-(E)SFkmS6*KDZzCG{|B>Mv&XrV9taLl(5RIFz=i)q*8i9-;Ne9; zC0Q1x7^ohMM$_u=ir3fw`1ilQ3Qb?NU9)Qq2>Q~xx;Pn41JpAGaUmc}6(fSWmrUj) zY63c>57{g)T%SjeO~_6BCi{=jy%5an8^`xqSq>*MOzrn*ab6108PClJE0lE!xk|-E zrsIdN2H_FgQ4CuIpqE-}r?#05;M0_~c?NpvsgjKVRCIc`#K!sDl_$28}GiX%=iXBSweYy);QJb3_!hjq%}9Y1-}8Qeb8_)uXq z>#9O8p;dSV5!Y-UjU7ROnXf0A=CBX5NXb86>|02r8NZ~T-zTq=?8BiP;A6e?gy<6i zOe3@0OJ70>4teets!B)oK<6b&hPkYnSga-pIyqGw2~FDB!+gSYgjxwvHY)=oi53<* zM2Lk!nU+xivJOzQqeEVmMPRsnE5G8n%-8BwyRQ=vs#+%T1q(F6 zL^_dv70J#XLsCI1%9yqtG3s^uYX-T}DQ&fbgrgtao2x|1XI9Z+6%`l0-B{T+3oAwp z)12s-nCuG`9q%1G&C$<%R)9V%x$EPTO-d7Kr=Ht$8~B`t*6(`7mv;DhXeT$GjwLI$m4=Eb;w@4dih24E({evLkYG) z4+idnFP5-#>YMkUIl?m>IAeOo!Pn>K`xcj0#*?-6H_Q7!@TaVi-(&4Y zOB`>pE+x~Ngl7z(+nHy0q4SILPtm3F*@ZO6Ma9RL>#>b!yujt@>01f54tQ{CAIeZd znho7Zqiv1l0a;KJdNp|2EgT`WxRRY)2t4_a%MIG`&Ew3#SAhW4 z*6JQsEtwCUYGy{}oGYily}iAA=D)|=yA9b#X8e#^)sI%j8);8BT+ulOWdTzy(r#Gw zs>M;V(o{jPg?BJHowUZg7(X~mScpEuqd5YDR_jF<#yA6CO0u5yS-&@+TmV9)#)?ps zH>;!?J1v-Dgk;^c@?|0$nqFHySt2J3tuAV8Lo522J$h1<_;RCynOnop-lS%vt&Mmo zu9yvov9~?=fnnV7W@B5@Te`>Ur04r$z~CYtN3g(Q$sCg_51|Az*!FXAas;P`{rPSw zqhVLnW3cbb)>3v(18k=%;uI0EyENi29K3BXdMM**0$CZvha%^&)A0sdkInHrH3AJ0 zsqw<8ov}^I5FFZ`D7EajS#O@8<#h|`ksglF&thXkKSXF<;BJfd4+oOgE-|McFrj?f4(Ft%2_peZY6Vla@ z2#atG8QK6-1BPYROwkPhGK+L2z_xe}K5Yv|JxjwV&D@L|b64bE_|?Ps0Vf`&aQx@9 zey5LBY2T}D+i(wl{(%0#1L%YMEAaICfP5Ux2LL-3qd(g`s5mf>0o7kgNW2$=xAZIw z00-l!5duV>ODg$9Jplu71}uZLh)zg7sK1MUMt$?^U+mS6(d@P`Rdfo;TsT(x0SUD` zevp(SUfDAo*82m<9DYR91{CsA+_VfK);_YCZi4#NMTIt&01@5c1J>_D{9tPe?vq*ZDyPrZB@Ly@`9TzEg?dajX7C3Qldfa3N34DbuoSdeYqJU zmR=%`gNJ#r=eYEMH2l!qi^D_5@dq#6KUH4?afM0sApaB>+)ii1-7$gvWNdC;Cp3GC zEHyknsNix0Aw%!Zv8IDKuY=<3Cj!qC#$jF2)(^C3CAXaf#(yHSnVWnw_D-zl*vUXsorba9I zl)~1^+v6sZ84^?u3InUsB=MQR%hgZi@EF9MgfUsSeQvgmhOt7A)XM`x2)vxnO~a$P z&;jvC3&CME^7wWS)sk>+W^DN=)Jm zuwp)Zivc%=*M2%+n$KyI3avW?$4V>-$7hW~s0foel{9UM2D&p0$g%(zjMJhAbd)io zQVL=)`>>-hezO!8iY75ZEevxju0Tj&hL+J{#Nm`m9jrScZ=|x<0h%eAGQ(jK%P9(m zI($I~-bwnJ<9{}&;y(ZJzK?V?`y`{0up2&iI3DRZe~cUzC~~ zy;t_Y`*-u>Um(XHXy+(@EUR4487JZ>*d(-O%&)$qt*+HmAoOS{@0PKv7oBZe9=(u_ zQNJY9Gq|-uWCB2`lY({WsP=x;RA8Xm&3Hu`S4Y1U5#{Xr@Eo~)x$lwvvGD5KqQgVvNDugQa7bz%_DuT ze7E^Jt*fY9pQAcDF4{B`631l!XrE~!0Gq$qI(?rX2k7gA!@EQ!!dO=>Zomq0`(c zwTs_5Z%AN_JHaETSjpn?gfqI3W>3%2O+zzxyN=xECTeF|W|1q@+TTWN(PDA|9M7%V z$&#Kg_INBc;UJhr3T89%iw*_4sl>-Q%aHXLptHN;rL)YE!)M&`s?! zDjV(Lhk3bc+zzwDxpPR}4shUSWmLY2*4EmbBEZQIqM+9_g$M_4nS*PV={v(}pPkN* z%v`u9=S4HP!0NBIU$=osO*WhPmRXS`s3D>Q22Y1wX#m8S~N0&6tJ*~*qUi4@V zbBvtfrQM2SN4q}Rxnh&V*68T*>{0I_p=^cz$B=*`en(F5qr)k<|YPqRsG zFTp*wLqKFUcanAlMoj-6vAkiDKo1|OM$?oy=11FxH zE2IAV#fPki*bubFz*-pX8ttf)l$tTC4}3#J{L;gmG^Xmn;tBw35DFMBt$-g{D!nFb zklda2*+P^8L1D0%_?!+tLYA6SexuV-R^Nh8O36+68*T^v-3&GtF#Nu6L1H7*weRH` z<}0kf>Wk)y`>UXxBm6c@wwQePvL#3O^aRVY0xcY|6i{Fplky`GV?(y;tj-xG3*m+K zScWTTW9#EpljH_-2YQlYh*?R?s)9r;1r#L0cJ<|Wk+*JipIL`-&n#&yu;-8P_YU4e zhME(8f#pi~CHSD^%Kt*Y+y6WG`Cn>6{|^5S{pA78__Yl5&inlR7eArl7qoMzFZZon zYfpi?d_-(a@8Ox1pBGFmubQQMu&@)USRzj9O7Dio+(>Qpn`uYAqf^z^_KIY;>}>^# z8cp1A`u3JwOwD`5e71mRX>PohF5RM&$XHL($+4*;O`#&<+(PayBvvlD+0LW*e(nM< zi1JT%cEDgq7NM?;H8#^(G}z=Ut!a;dUR0dNGVM*0NIBA41Q7?T=@yrIT#srxwPj&P zjaJi!s^3_JL}YJ@rDfYtOD|8%shbxUNh`~P=3ty0?zq!c`Scrr+?LIIV_g9Sj4q)j z-4K1`TuYpqHiqe1bYES?O=G0&gfd}a(j?si%^MEJRzU{MLC2ak2Nf0mZrUpwL`23j zM;EMtEw-vqkz|hwB&9GnYV6#WVyFdzu-B$Nh+C`}ARICz;gwajv)V8jD#oN{;~Yhf zQhGxb2gV#VM+(*euM0T!cFnq`>{9uX(^-p*^0{L=QHb_fAv)m2zMW`O3I-)hvwH6; zZ&yy2PcC%7qz);;4B|-_jUb9U7hn%szP&Z7v=Mz*K2Zi$LJ4=;nA2s|)kTSViu^%N z&6S}LIW2#{JfTeBH^Hx;stE5iZ$JmhA3RuPYCIyG9O&BKe(6=!OS3Gu=T#g_x@Nn_9zU|g<_e{J=q^B zNidMu%1VPNLHWG*{CHb$X>2GUvf+4JgxY97j17+3Ig!4#5?T-KlL0xj6eUvw@vXm zw%`zDid$+Okp2$__Gng|k;f4u8?e7qlr{IW_3RxXW3k`z;(*UNqYpXc=T(jS^zl?j zW&U1z_lyDi^qT|UDunHecDxcwLF51}8tq36#zyfgr44cWxR(vs{ZSaUH1ej{ZeYam zyF@_^b2wRRfLb1}KY>||)sLQ3&`z{q%aBWx;~u*^FhagNI>&3Plh} zm|$Q#+VivQfXnhaw6a$IG4Rbvvs<1hqSr#*E-Y_(fBFQT)a;cDd%nCDfs85Nlacg0 z48kY(p8J3~J_;Gd!bZXNWp{VwZ~qx)28zn+C8;_an3Rmy+TC+_d^$V}3tA z&pmn#;M6$JAZ73<@M34|Lt*a*@FLLh=tQ(VIvX9CICv5J`;su-&ChjhGM^Il$3(V% z>TMQfyDh0hnFpaM>tm`j-vg&j{AFjcnKgk8elO{r$vd`!^XW!b#Aj1YM?{mc?HnC% zRe;I%g5$O~Y7BbK5jx+S;cITG$JQ&9s9G4medx{_hHvP?gs6tG#UHauHqffu4z(O= z;=1CvIDT-^Pte)N=p?84`if3}^I&KZBRJPE)L5Jd>qavx8pu^L?gSD?d%-aQo;TH2 z4d9{hm%X9^+|qz!a7hFH|LopWU4$u=$nL(#yvw!@6x!zw5gde*eaQHSs za4Zf8@{IYd)o}<-nhRo0z-K`enwn6>AGK5)9aJ?c#bvcg$LC$U5*elrqmx)G$4u@) zUD5?ncZ@Al^(fc+)VJ%^u|)*LvNd*`?(h z)MwZKg=Yn%XYIgc7HYULtl3Q9A8=GJd4}6p8Q%c$X2ZG%5LnWJnD0VHYzD={*+OjT z`1B?qFo#Spq;9|}+7313mBUTo+fQ!* zm+{R*kx*#dF}!*%esM?U(b-^C9+>XdoDTU)v~{`Pp9!wc6&dRKxuC)3TOdVQ zVGShfeJ7Dcns-X5kqQnetkf$g#odABE7^7n)pANK;z}tBz>|BQmx^-}jwbmn5ntS^ zFjN}Kn6iEDG#rif3a9l<@;5ciw@=#yZ4+4V)NeVz7VLAJ!9>fyklmtF$m5px$jN8x zjpDeTTbu}mbP@X&_n(syq_iZ4$%Vp9A;uNa;KbiFJqaB$$wJ&mGRr^A0(*dMp>-h0!D zf1+EMa(x`IIHO@=sV+YOX?y8Wd$E6gR#~K}uy)Xy4`W;|5XMnBS~YG?%Tzcw?O7lW zBn`#8i9G~>0j**iZ>&AXmf$HRq(EjtnBiN^46Lip$;DsWF$jnClSvSAH+K$Cp*-Tz z6mMDzNN7&cF{b}()o}i+_h0SE zq=L|t6En+5#{^P+;Bu z*s8*xc5$WhV%*FMx-8rDGLg5mD~aeS%l`G1+HWQGni*OvROWm^*3<$aIqAE9%6M*+ z$Z0%P_pr38FQFbdTW$nHg_nvOnbm=1uP;eAh;a8|Nrg-ys#fqU4KM(GCSW{RmHUFF zTbjk2;dWw-0+b6Y0M%|h`mUNsAS=+f102mpR&jIa1}HHfMsLm>ZW^ZZ?}vkQbn~l5 zkDJpn-@pU}q*hn2*9MgYM=j$R{CX_Z8zh6HB8$s(4p3 zE}uM;*ne6x;UGJnw3UF5C3Hq^p-*JeSD0hY^4(L*^OpbMmdVV-@R){A@}0Jqfir_j z5j9MFa=X*954J%l1>H4oc}pf1ZI|c3>6gID;#6V}pz^C3woyt=iqN~h!ZmvLPJV>4 z|8chBE#i|}O&7=za7NXA2FCV?y0i>o7N5H-Ml}ZQPd`~qh)Ly38|G0oi-L6qwQ2?( z&2=d@$O+vhE*D*>124<5Rwk^t>GeP%dYh^=nPLD~fW}RzLW_=`pW~Rq`B;D?llz1z zzA5NWpAY~JMq2x5AO){v)suY=DMk!KN-Tbksnnp7VWs-85Q&cz?4P(Aqe$;JloFmf zSUg}|VNJ-K1lkq!;((r!6;(f*LOxYQJgt*ck~--d5-Vwhi%DDZg*hs1QtXzBMnVya z{0WgR*_(ih)qBy%3V7L2K;S6KRzlLR6@(=RgSH^oRzuzv$xe{)sQ!+l(Wm^TFL3Pu z*CnKa3jZSE>}rzF8!=4|dQ=I%JrMR0@LS_^{N2OmhIKoaO?B`_L}$ZPf+?y;9A9su zH3|DG@JGdRC`j0*-zQb}v=o&HOjFs+4atLF!l#Ch&$uQ_>{m&5Q9@fwq z0T-&71aY;tr5wgKl}OTt;OrDF6@x0N7&am}Rz#md>h$VmBS64vYX>8$K#B|46&xyj zbY{`Unaolxpg>X0rVsFbrA(14?%^D9CzBd>8I&pL9BJBK6b9Gcnr^ZDrxkP~hzlZ> z%h;oaz>*2cpwUfG3e+H~gUA=Usm%fU7_%SH=}CbuIo!>tDikv$E0dVW6|Y-2Sp!T4 z+RH)VN~AbeSf8G#S*ac}h8x%TS9INk-noboP8~CZkz%izFUjN<7rJ6sww%qLOMPSK8FWoC`-w283 zFB-hfa6vf(P$^N<2;1=~VuL+LGfzyM@COm|Oqo#f15zw1u&SjhZUP;?E@GDS33DB0 z`E(13LlRTt8d@L?K#-U)p*J8VW)_p!k6kXpxk zTA4dP8C-N6KXe6hM0>j*P`IQ#sez;# zagB7V)$|xBAHGvAhydF&f|rsqd!a7KUcPXJ_P$RS6wy3&%mu+keAb8!S3v?s#P1&| zx|?GXG98bRB0cvwof4Pqmx}GedOaeMw<$BpWu%&%aHi7;*ca*?yO7QHV-Ht?JrprH zaCLTtN*wrDXj9?nryRQyVVVgEe3a|@!gQU0U?c9T=x3)So#pd!3y`_&$Oh5XpQPx7 z2N}+2?Q|{>3pY=(u4fdZVPGQ2d#6@}R5RFS>~31A;zruw7>>tt0(XUbkouep!x@!( zO5Ccx5q7h^8vs%saG-T(ey{KsvQyd08osJ&lW$B%D3d?!(yB)TK4>CnM7?$}%&jz5 zI`kAJD}XmqZ>)W$_`>B}DRyNTBbDN{V!zDVQ4w(@XgYs{6T!!>f2wjpJKoJ)bezW1 z%lwmJi=%w{OKk2>W#F^UPDjIkD}x0Ql4YTKw1tliC^*gC)l9WbV*1CFW_W z(>XGZoYz%Xgh)jT$Ib!Y`XXniBpPm4@&c zfN-Pem{8dZW=_IkcXL2D=?YD?x2ztBy4{rN!h=k_;_)jTsh>)tDwyTd4nk~$q>PVf z1>6Qe(tTo+Gt*9n;+X_a$L!ZITD2Vwn!#1TO(pAVqbwOUx_^TOs-0tF%Sd`*OuH)m zqE5#(6oYQJYFO@d>zLlY0c0#1GA)s!IQ`sp z2j}=yoarJaM47YtL@$j1ynY%dVsi-q^6Lb z+x&-Ial2Q^AK?Nmh0r2kPqX7wpOnQOoktOs4bznv^*}S3UVJ+XU{0+RzM4@@ip;{W zP!E|p*l}VZYY0H}a{x?KUk3U4d+5@DjIia)rUIREi^*+rcNXw%+1B+~Kn(ZuL9jJa zhr>FOZI5&X;b01O@Xn`BgJRK}T@WS>?z5`(#SWYBt8B%uBn(YQFijTB5<_&&hLZrU zRBR=4srJWe7HHNlf0Qd!r0a&8+{!>Bt?0V@6}e>$=jy?Wf30dAiGNW>yzKLd+_<-k zrt0RSx(Te*{iZ{KmA9f>X##d|>cP%467>S3*i3@8A24ms*%}Tu)Y zokLcw%O%aua7Fh1&~E}o5Q=%;mRMw5hI(n>qG~S?S{AH2))$%W%D#YdjK>UaTEifL zu9nHSDQHc$MmW6lX!FN4{mpz6n%yz8G{}XqqKvO^8t#Kw3ty*n!H<2ABi6@BX*KOs zRWugOS$N(#70^cDsw8ZtEp~GXH08q{4G9L zR9#(epxL+By?>TD8qNW1@@T}>8REHwSO9m*qhOFRRWTP~p$V(elzJ)k(_Q0n>k9r> zdC}LX{(w%y>9=7Q86H|?(|wOEojL!-zuax~^eho(bh9e$lRb3#Sn0T&kQq1Y)z_P; zP;(OT%D@9n4saJ7){Wq7jvKvf61hXatJD5d?O>y6DTkbepV+#eN-jNRu*n!g$$oqu zbHJ?00?2plA&j)#enXC>LPM-`P_FyP)QMz1>4#X5@vF&x?2pk4b6aK7m^yBiUM>@W zW*9FTlH6c$*%P~!z81#^(y?z-gY0u!N+PFNerfP*o+UOF`KBzv>Q4Wqz>xn-a>U=t zabOQhf>4t3sdn(&^hd6~O_kXv>-nlbSflaBN;Ljhi=Y0BEacD32z9_U{5}}~7dY>H z$$DS>9xC)L`lINFBB<8XtvssbBb=nZz5jO|zL>oG=Roq2-;vCDA|(etjS@V85-7`~ zC*+Bf8090odin;>VdT#-oXg`2grYQw+50~^zBx#+kZ%y$;ozr2MEZuUIRaVa6FXdJ z=osJ1`9h5rBNkhncnLTXCE?=X2aIiM)!EhOJN4Q$8+Fonz?tir8r`8H4i@OAaKZlq z!A3Ja`s3fT0C}NZbHyA1p$^GzNO9G5cieQ#ZQo0^@2-31rTN7Yk38^@$dBG@lp$TF zY*}&~CzezUH!IlM!wmtDQ)Wj_w`CE1V9mFpI z35Is)gl_1?;n25a*_w41typzQgHvH(&zUd`BQ0SZCSe+8LHxa}^*>cJlnR0R6afaVfDonVz;)ySTgc7w;c_uTAf?Juha%FXq1J+uYFOT}m2ZWYF_3F>S z^m0l<%?L~Q-uM~%oNRzP(UM*E%BhUkTn v6YiWowqm00JzmtD;YYnwf~bE&cqxd6C>lOPJQgK+O`H{8r|$c{|D6H=72yoL literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff new file mode 100644 index 0000000000000000000000000000000000000000..cf37a5c50bdb70f837f65c6233c0be5e4fc8bfa1 GIT binary patch literal 50680 zcmY&;V{|25*X@bjQO8Ngwr$(CZ9D1MPCB-2yJPi9yP0K&6;a#tX-pO zjqNTkCI$cje3zkT0P^<*q<#6@{vY!n`~Mbk5mB*kLBKa>{f*x^W;l}K3i8U|+&KUM z8Uz49kl@hhiij(#3IPD%4&OF6007g$XcuTDuT0PMeP8wMr}YhkNNbA}V_U;-4J9Zv)Obtx{fO7b64eK}95Ok5YExyTbPUPDr{00ex zFIa(vt&7JuX8{0!DFFZo3VJ6eI5zgi-@2+U006S^8x=ZVppdqP9^dUY8hrZ^{0E3g zFgZIzThnhY;Csxj-(!qeo7X^fuy=L=09v2F>-q@*;39$@OH(>HnST2Y{nwZ8`2zWU zf+gwgkwrSr2pgn;Bno^p^TUcqasVSF&{)$bLV0`Fm1x;M1b>Pr^X*gJ4|Lw?3li)L zx%-V10_kYlByV;#gtl38?W#m!L+oFnZ0`NgQn9L9xzV(u+sW&a_xv|*lCTFf9=gX^;!WkNhB)_#d-^G~i*o1V45aaEpROenKH5=lfk{=C(vd1>s&fUu|A( zbf%@bLxCCKi!s32yHt6==&IMGl^~^Ow6Vp2gT6;*<8jpLZkz*YyDCwOC0!zr8F#$T z&h#GuGZ%l6MSQi37&CwKt7A91r#mfG{wvJhq#HiRAIlHMS{K2BOMWXefQhRcP|U9~ zLOAoNd4Samr@z6rzxlCNX8i%Nb=|mQNONJ`^K?}&VkzRl>frehj(ijS2hDp3)kuma z@@Ob{neIm^z^|UgJiDkR>@R)I|wb;zwocqE2)lZp9y7yWVgCHXUo#v=E|WnCmRion{5Pi z+_QIGP?6=MBpC^FAzn!SCZ9tv(#S;y8LhW+&o~>k{z1MolEYQQpm}4&Gd5C0{uS5Mc{h00VTnvo&8aC~n^5;IWs4!abbH|h0xMh3v23t2?F~0l(28ApF_ah(2_E<3T zjOC2K@33AXUGVWO9*ccXA`_dN*Z8z#y2eDZ8}p=q#nBw8nv&Uyd5eR_rPW?kT`oD~v-FR`6h_$lm0gLGcS2Ma!5my>#+-AfF{-qz zbY|z#OoYb3r17)lTWmGGk%!^tQ(vifP}{*g`R^D^N!7MVRz({5V!pGby?7kZvdCY% zfGA4NEY>M5rY+V*LQX*v3MX7tBch=xf`42BZ+AozA@Ho9$p5mWp^;E zbm=Zg7Yg+Y9#KOj%l$`7nzU4m3VArVB#}KbsGZQ}(KvMrfg6 z*wm3_a88fMC!7ZD_s@GFgn+acO{8yyRsj-p~tIqK-jXlpIsFYB1I82o0s@R`!7VfkonE$s>M zS@YqGVcl`qRbw0G7HVWQ=#r$|N%_-db0QUX;wOd0^6< z)GXiQeRXP|J^XVXYZAyf=i>WDL{r$GNIjVHa$CSeL!68p!%}uPHRsH(Fl-^Mk2khd z&XAojE`(J~BX`R>c|R^=AAW+siU6#4ePC6O@ST5ilvY%V!Z(#G;4K$rJ}4&Bv~20( zjH_PGtoC+jHw*02>=+jR%R#5Oac+Z+~ZG9XUEzWhCqWKOr@zz*(|V?cCA*p0L}$ zo8CJJCePL^sc|5w|9UQnx{I9a+1c$l9`4;8?oGgySC_bWshI0I_4(v}Wo4hqoStFD zc$y+Vw{PFnj?;Q^FS?moAAWlQ;O`6j{8@W}#$U*BUUiO+bB1!ig}K~K@`;qc!@5{0 z zg*jkeaQrUh2;DmtAeYO7BX@)?b%Zo`{Bh(6g2CCR>`~$0H}92xams&#@i{E~g>P&$ikaom7nWZuyh@NVs=Enof|SM2cFhWmP0c zCf&^eJhd%Z-aY7x!F3|nW4 z#YTikW!w)-v}z%~>XHTMImp!j&{dI4r2Oh8w2-E_$NccsGK8UdK|G55H5VMP;~yJ9 zf3#2NA+1sr;^$Azy;I{S?dF|@-kMR=-8BaW& zZ?lg0{&_fvKE4%-A1bA}cFff*_&wEShhX=G*b7t){K$98WgnJf#((r`P7X5lmhWLn zM41#QR%UUE*$SC5fktT z@URQ;3fm|9m3{yE`mQr0(vNy!!NA~%3+q6^_FUp|QQ_w32#Wjq-~k|Uu&03Ux~CRh zUC`7)A+N9A%us#{Ln8@6Gb0jdKwr`gk+x|&AYS(CN(F+_{-XGox~otmLAhaCY^_U? zm?i)>w4ptlDL#I3vU9%jVQ)Uodw1#0e0RzIw7I1NK=%<9B$#bcP%f;!oG~xSL6-MD z+J5l-8Zyi0)-ncFGep`Hq`nuz9X4XpDj>|r=dZ})vdSZ~%4a*xqdU##(;`4`WWZ|# zq9SY$1PM_@JBSdVC{p|`pa|Wi2((BRftSi>8PB5`&*!1aBcjS@@-haz8W3e@r5g_7 zV<8!==D?&UAgm;SaUzH7KnvOJ3+jOr;|s-a710JT{0t82!Tjy_2fU6yj480OEjKLv zclL7F#&Xc$GwxVhU^LbbEXiE%1I&UcXv`%tm!y1JnDCjh%6FAms_M^$WpboP zwa>3xlPjm_qsr5jog=5Pzjg`*yNWI>Y;m!HqOhH~y@wNFEm16Ov>9o)FzvV^Av_#xy5^*NyF)%L`ugi&6ad0G;*fvm*eoiR8W5N+xFq?-N36dHjc35=f{S9PHPtaF^0_} z(z}1#6ThbU5VqxPkycDTrkPJ9JFF|?Bg+8v#1=(rp6;rxDU%$+2oJRccZ{O0tDd5gm)!H0vr!i8b>av=@sk?%txI%mECsTn0C#&tK``A>ZM}jo}4e zvBjw2#Wk@d7HzQTnqjqf0~cBwAJ>@bUoytYH_d(-b?1{_V%_~5eLc%VgdC$|61yrz z4%l?guw7ZP)LDU7?U?SII7=M(?Cs!B98mmDXgSWLIX7Tk`&M1s*opWk>*F-*12Q_| znfQ#%>`LpyRqw%^FO*>EQQVj&>l4OZqf@$*6C3b{zYh2K4v+Lg>i~!h(IZ%6Gbh)W z#Is$8TN;612Be?aXfN_=hrOCHTMblSLsVUTTs-!}AMRB{XEtM%He&Q!J8?Gs_{+&l z%UMq=fpIG#@-K(CkDlGfK{kWfx^^foisMZr!B^uOd;+3f2z^18DgYqUGU{ z6@MZtg9}uZ7^#Y9sR|pYN^gg!A}fl$C`(5Y|UFeua z?2ruS5EbPRcD2iwwhNNBgPgX@`OA>duiqiZkn%I#^cf@dKwmS~ z-^kK$X!kt>_zot08yCG}nYwMtw)(Yf9B*wHX4>Mm)9p5#g23Vu)+diZ3!X-Vw$!yp9 z6F|M(c1aRsEvjBgb~SJ~+f*UA&z@wMDDp!_ah-bf#&j%R*=vtSw%;0H+^C_tNwBIX6TkO=X44EYk;n$Qbm|(`R>zC3RU23g`7xQ}I$6yNDX&Slo{j+%BH} zNj?+zZ#L=Ko~&eLPlJ-eC{qE)+!NFk7LKO6(rEX@-7r38<-Ez5k&TfgO$Ou2Tp}PzXbe0!(jo~Ftdk%) zagnSWrX65{jtOBx`QJidKN7Cq^lF4cB+baYg7RA?cE z1Q3M#ay90mMhkr<=3$(MKx#|BC<@y9svu26U$eHu^(bwQfD2k+uYQyXOb={bSN^I2 zuNiJUk(owsa)q+(4>(Re-=CO`pZ2*d{`1VhLkx?fria2UQi=3aX#grUSdWm_atcoG8fq4v{Z0vo_Wc$#u71vf5g;XeN^FZ_(AH1 zAiO&DeF*;!lX3LGbE{!1Wo_MmE_7!P7{0f?UPF90d)MDn_EtiR zGtBKv+x;Rtmt9=v?BQZk&&$~69(8|VleU2IfIBI|8$q&dVRS%xi_@nV^^f#@Fwp>` zBZT2136HUQd-OMEU75ptcz_t1KpdbO){++5!Z1MWxHR`s2hrUv5)TxKyUR+_0yG@| zayH!#3rzI5nOc2xMF@HI%10@=^E0o;b*^t`f{vS~u3S}}u6HQy%CX(+wkKq0s2cc= zqkosdepslTHX~Z~+4hvV*|z1?zk9z6A%U0%%FFQ=WAU<5^MtxT6t%zfzUT*tYei26 zvVq#eLMQ+SY+ewNx2e{wxRxuRu15>7oA29vWW&kU5zBS#OtKH3(ja-fr)Rw3b2LC%~?il2iN9~BE2qU8|cx^|vS9+#G| z5#1RS6D(Is{;XmLET}o*%R~(#yv@u(YwE{rjCV_NYtyg}hCK}8@Vbc=@V!J^ELP6V zT^^UBwb_Emf zDaE{w5N}h-h#!mmUWoQdb!tK;JfgwS8AN@!;yYxR$?UVn09o`YO81OMzwfu z_oJv)-`}8KBjeSBf{BljulFrid}nL)czJtZ@~;8Ul8XIIh&Rqd?38Z;P$hHZ=voFM(HFtSz3k^+bW9V%W)O1#ZAJ7qF zr6;tapf=Mj-n8>RPNLzGgM|JamPH}kI>(_V-BUMkqI?jaJ^1p=hX+e^qqxJ%(KIxqtRYtsTD(jfx=fD-Y(Qh!O22f2mzD=1T6}((evj@n4=*> zC8sSAPok&)v6_AW>Fu6xzE&I_iE!PYdgOhDy&~J5_&_dJe_YH8cVY)4P;9cmlBsrb zEc#fp6rx+V)h$xOQOE8)&iH@!;-|5h|J7!mxWj5WMC^a->dWm2j-%^e`@_;`C8yG9 zD-xgxVId}P$E8%7F{cjV2EL5dH+q1RTq5Xg(kK?@rP_!#hT(4+Y|Z^vjC_RZ79vZL z=BBq$ZWjv6oRvRX+ zvc3zy+b%45vuXzQwm#@?uJEe7Ov;d~+C?t#bEXaJ<79_ovP5lK0Z&3;0R#CfMNisW z@#*~QLI8d8w9Th_7Cshw#C)?VBuk#FZnvru|b09oKX;cK9MA@GG;=) z+F>zJ1!n|)cvA93P1h9`Bt8Z%M!>@>NBc)aGp_fc^yB-GY{R75R!rytz%N5~{N zP-u!KxPR&<$oe}!X>@F57#vdjT{wdZCKw%mXY{~wsBw%r>0}a*S?njYwzMSc`Mn&7 z{8>-5wCuh!ESW&wSss_(7tO0w+esv)Ixl;%i(#8uiocu8QUh;NqlJ?7>0yC+u=%OH zkZv@wqLBLw<%Uk=*FPyU({b0au6yvkj{VsK=jfV4K&k9MvqFYInruz(C0%MO6KGaB zSAh*d-RU1+ClVj1ANas6Z9k!TX~|obo)~CB4|SMLQ{)=qcMS;b>Mo_ASnNA!zb^C_+@V=CMx3rq*7W5cekFIC9M(Uq8Shalzun}8 zJ8l16lI^ULmg!NOM?A~j_^kJu@m7zob9{+#k=Gj){*$ydxj1k(Gs-UfAah8rHdZKW0Qe;td5?NHh%rW%9*Tbe^ts#{$Y zky9C=nRygO)Ql`-z#^A8{{=q)UysK_K zM*|d@nMTB-`$7SSbFhZCk~GGOx)Ks{owCog6vw|*)uq&AEekpCSICvSAZHe&N<723 zSvo-)snK6E^~=W(ryB)*FNYv<-c4#rf85MJnBM!uDLXsk;(v?5q6(eS>n+z@T!c5% z@TnisM~d_l+ofvA*(p9EZc{Zi8nynG3Q(1x=*FL3R1R-;9-Vh|6C(J(IN4GW(w$>AFJ-p-dVHZk)9vhyr*X$))5l2nQQYN$S5{F_ zT6VoReKD=A;6}gFdYAXi8jg^kZ^I>UX$No%Cg?aQI-*@$}dtSz7@Yg$^wrO`~&G{crX7=^nFLJE)TYd2NM#A!GU5T8)Dm{OB<v5B@8**yk|FR5!g zK0B60*i6RP*x|3X8ryn1drKd{z82lvSaLBXh9;gv_gOoM=YCXwQt7?(gbvh+AQO zKQcc5rgj_bU)XB|-Jm?0QgTOKcgnKm{UI^?m7X2)5Fd1r(vvL>UQ@ML_&s*n=05W+ zDO{#LrxclCZrI3ZEjlowrAUm~6)Jxxa&z^_c7L`%FY5ON==f6uC1)hl4A<-q&O!w# z@+Q^9A)b2;?Tn(9asO(1ScjYk^ysIlj|G+mWg8vxS60Z`5nb1mwA{&v*s0_LP&c@a zx_Kv-E968GNKA+bNyL)J12A)|->$>Xq~PqCyIhSWc}PCM+C5+F1p39tE*DrlF?&Dx zv$${B_^lg}heTHygj%;Z?I>sy*`874X?pP#p6G5+EaEBPL1u0NGp}bYQO)8*GUcf& zbl6{!7rXZh>bb)+ITveBeIxJxf)M$Fb9rcN*w-CL5b8RBtOzfy+9HZWgfs;|N1AlV zh8Ai!mGvrh!?gxpS}BLr5QjJLn9yzxs$=%|Aug|TpYXYwzQ)Ky#l=E%k<#c^I-gKZ z*7lc0__A~JF0i%Q!*|Ca*P+E~A%cSld_|+o>_en07Tg@wgh!wkSgb3?4F#6@2RUTn zLRe-IAR@<*R@DaCLS-v~>DnPP{{FU2N1frotDY;y*x4tN0W|WJNQ;z9=U*OC*!A$W zzqsVr2P91p8*JMm*o;%u6m0p$@3q95Nz0^uLRSxE8<|~@TKU)=LTa!OD8odLxE8FyW%;I|loj4y2S zDRKPSHxz7&7K4At;i;{;>@Z7=Nh&fD1gC!pH0xfuG%=kC;zg92QB!*J zI2m}o<+P85yqnCseGEmB|5p5(E)U6`!#RXgR$N0kOpR?TrPltl#=vD_WpzBR0t)q* zHoF1be_q~Z5a?)3+0Kv3&&8W(iyc@exMi-^({8l8*(oSg)~yHMmu2{HEGBA9*&68E zYVpbT(9W5V;08$>4O-IHxB9L&vu;pW2TjBJ!V)jI|Rfm7g$@A6MFRe}2 zw1#uW&enShN}xaOWqAfO@U2C&H|XN{aTOWWR_;tI2U{(te^-;?dfU}v7FdJYHa+Gu z8gG<&DP~t^DW)k@x^PTi^Yd`h<)YX00};a#tw#~8AroQ(kSfXh0_YD!#KBt8TG9Jk%b@phu4Htu62jy%mJ7ypfs za$vGM{yaH{*zhDtp&oUWTkhb7*MjBW_#W%}A`SS;7;H@K%KsF@n4jYnRi16GktIi*0v%X;XKhN&~2+si8t@^YPv2#_0CNm0WeS} zbQ`bK^}3Cg>Wy?)H%%>G+l`B_GSSs)mCY%BirfOt@^ahSOGdf-s*0D&XIr!6i#Uy9 z_F}^-;%T3x8m$N1_{k`3)#^x-4b|!=m9eg~q60ptJVGiBBb%Z@f$&c`e=F~c^@`(m zb^l4~x1^kN*T2iTQTslFcNv(9UKVPQ7ahOj9J!dFJT9K1u@Y%l>nYa$LzbEgo}pPO zs{}GfC!k!)UT_*e8yzh@`H|9JzL-2WHmq~QUDSbH`bx~W35bupxFE3dGNUO$^|s!{ zpd@P`9fnXRoV2ruaI9bw3v!OBtWKQk@wZW~PmL zcsXfd?^Cn9b3XmT_3E_|IR5D4Qh?b^=A%7w=uT_5BDz_ZzpjEcB??yE9f&TR`<98JD;*U7pR3q?D#%8jAgMFcT-~?N>3wC?R zn<68)SHh=iXI&>!v_-%t;UYt(E<7SVEuN6^5B!M=wH)5;sH=N4(9O>VTFDT%=06NLewSkYT)@+h)p)JxCkOwh5J_RQhQ8f6`xm$TwfvZC_XPqDjE?f_x^;)Q|h(YV@I}KobO&<(;bQeJ2pimSblZ5H%Ok11>F?CGY7nGFkp_iZ?$}%lo=w z$Sws%Ym;}m-m;6w@1WjTeZW+CWc%_qH3+K3j@Q9qVsg=TZf>n5j$_tJ$P=17PU^lT&)VvcP+MV z7FaY(LEa8yf(%{%N!8D^k*+K;IynsA2-mJv$0MR1*AS_@RqxnBc>Ivt} zE=NUu4CRg5sN=6}YvtL~_Fe*~{|&x+6@CuCbda5~qIN#S@(!(M-Hkc<^q)`2KDMMq zS&CaIH+VA~+F4$;v_hZ;aVj5aoQrRSg)xDxd2e}0I&!3)e`p$bHx&D)0_V@i*F*w$ zy;Rx|L}M8Zv(i}sOM~HOH}lxPKRH;{2|Jl?Zbxf$%uR6L1gOo|T=@Ey0U;|ptss%5 z0D;?g+BN`Y`whv&y7IisAvl6_${^T-^R^xymElFYm63SS^RdgmNm^8aK9K0{A&-;YeuiEsUNjV z!1MdQOmVg82)i9<1pVV7-669-yo_9nQ}dd!y4;J==oR&`N&&-z(Zfi*R2(x5%9h9Q z)2yzYPDd|iRJjueK{MX!x({}ljYAi9j;rg|8~M?4ju4fGn7k$c!fRPRSPmbz10tt# z2UxF5l^9YtxR~-28Fq@zzb?lXugUjJi^3CgUM*#3glf0A>FD|aUhk`3Dh18<7VOx% zPHaXOLAx;u#0&U6;Qn3OJ|L&-kD4I+g|?YkygXpndrsNA6i4sa|m{41eq0dnS%{1PMrOPU50t$Dk2zrcT69yOoH zz36_@^SIRCX@5I73V&?p^050OB1KnvqtE}y_~zd0ebfE)IFPg)3t=eIlz=bD{kR!r z+^+6HdvV0lCku#!ep9piSr_u=z>Q;Jfo?GhF53vFsWOebUf3D?YCO7putsP*`KbDZ zg1vv{@QEhHnWkKt9U|-2+8s3WH_X4g>)qNhK~9v5@`O9VF723mM7>v8V~ebEgL6O( z(}EZf##t(QmMRVUNZ6UqzUw{F`}uI|OqD|b3#A?^!h634CVd25bJWd~2)g8ag~c~} z-<QMDKS@G8tckF2GdRkszK33BP&g*8_ zU0i!{gX&$S#nF(l?Q@;V`AhKk_G@9~V%LRHF}8YWgmB{4;stc__pgRuZ2m?mT)*@x z-k=f>qPp~OATrg!9PF#&*24}C*S%GcNrkO!+EObK#Ad~Nx!W|x85w5NjEd16I{6E- zN{M(+2=IM5LOeMvJx{5RXy|U_KjmxqlQp2{D<0y*MW3VH9{R510|Bl}4(k3Q=3O!L7hC$i$OCbN`3R<#GsKAr(WP(Oh7rc@mTQ@xP2)t9~Tpo-Q(iUQ1_+U zTK`%b`f8te#;$tSdhTTrnxWetyN;dsL5a^2cyMwO4)m5EX1A@hRmn2>n4Vcs#yC`fU*nuxi zm-u;q-ndJBWG8j(g;KReo>zmBUt>5QhaHS;n7n|te*rriH*mvAklkripu?aakZNM*u^iW(zNQKBWG-)z{`-fWs`=ge?(Z4%6x|bGx4>OS5pav?}W&QaRjju zlVUy9QYVww1=?Fnp_%(=Z(M~ey09?oWq%r1lx&0Jmb1Q0K7T!jOV2*h$M?iht56^7 z826o>3n8)e>*bAbMw>~&;DmT;2}atJmUv*Y=ykrZ4%9NAt?_Xf*MfQ>|Eol~D}WAf z(1X5~CeFm6M4uUVqf2i{4U5Dio-Wn+yMaWT5Y{Ql6?blhP_QU9aAUw>UoP+Vlc>5H z`H#A1*C97iu~Jc~3X%P!Kg$=)bNrLbPgT-^mjInzCUYB>U-&Vc2iEfCxU^}<7%<9N znKR1ZBf`Nq%Y(Krd2(jW|x>3suGqqsv9j3k$ZTKL5Xs^uB zqoc>Qg~I_jNMDx=AmGw{lgn4^d{izwSA#!K1Dyufe++|8rV6y;l&rG+41WAOO#=e~ zkr=3?+;nL8Z2Jojc5y?A!L1YivEoPB&4ayOF&HGPe|J6;w|4pD)DO4Pxs8o-m3SW@ zQOvq_dU|>At1XvyyS0IvTVAEj+}e1yMhSH`87M70yM6jvm*)@ujTIMpF6rJHuevkA zlP#oWkgxs93W0~HTNxuLJqAxcy#S(aUg|p#RnIA^;_=1+`BP<$bisc^f0Xi!n@t+` ztLrqn&0n@|GRGM^)nX%wnxkg&-5rI}=U~e0Z{gFxDbTYSm{Yc z$J?xLu4{9me33sb%owRf$Ik0N$Whf%&;x`tQlKy~d$F7IgSp^upcCaxKK4&f_g9~o z!(w3QT~^sln6#*J&G@Qea6BnD#YGB5wF$pt$_(fk{=Nv2g;jPT!CU9U9s=6PjQJ?< z`!oh^a@O^&C@Rb{_IdJK>;NQ)4N$8_Fe$YP4lu>hC`)N!gk3PjUv!&SRBOt?a~%q0 zPI%Ixc7@4XNRqVW;K!DBKW`j7eEbviC-!iTMonP8o%$aTkqy)8I$mOy zt|Z9Zb$T5?JMhMGHctk9S9%8Ly2qy~pKMkdO39BMTGO2CeO9&>vBP0qI&_5}Y?(W>kczZ*N$LbnRxwJcony3SNw=W!;i@5jlEeMcEy;tY<5=K2bbbH)5247-R zV||v(*Wq~%@3^{OU$D|K{Y1s$i%iu-S^ym%XZ>3>PTPRaAV@8tHuZA!()`$KZw58itdYYiI->C{KNTHJ3} zAmE2y!8>JsxZu7_L3bxS#W+m20Ee)Ss&FYoQ(4&V6q8LdTct6{|6aTsp0<(|#G)#Z zHvmiOK{kjACbtEJJ~?;Y!nu?gRcW@`S$}0)omu>vMv!~-W6Fkh7YiFR{n5FykP@el z6!Dn4e$gFtFq3=lDZ^(^5`2j~um%F9zvnbHtmWUQ64Rx$=D%pmeBp+7#fD&`x5VgR zc9(i`LaSZk-0s7cIg>yP)oRP>;V{t>lu++N#>$pDUNn+tYfr$Qz?>{E@{hjmly~P} zQ}sATo}>$@z;~cJ5zkeKsd3ijvNyODj6K@SpG)wt#9LTP!v}LwdsMr$h<>P5@~Voo!&j@(Ur_4RJzwY&A@OT_!)`IS^vlb*r{BAt5yKipHUr4V(pK1F*9_p&QCqf@EMM&F!9w=oawXs5Onl?%8irVVBLVv*eZU7adN z<{wGThV+pnKH;iRF<-F^RiRP;;tw;a()}B5_MZ?ZY$E zfF+|}8!QusQ2#g5wsT8UA>`_WWC;dGl(LYdHYqohdZHHHa|!t<27x?1r=tQW-W}EL z>6xCk(Izm8S{t4_<=1P_uAX)%KtW#6@xT>Z4KE4QSb&n>58lvK z2p^KP*?QBv@!rDmqr1a>mp>!6EeM;esmCx;xqv`OXe^>l62vz=HTDMIv^V^mpRA0( zof#<~CKynTGv*;-k;B(6z!pGVDxTTm0=t}SM3?#s%j*6}pJcxNy13Q@y&XcnB`3O_`BU?v0 z%l8N}hnYtmS&d(Me&+O~vG`KR9psnZ-}WCf_8YPCIcMBZuJoX@uDj^R)so?Rd?C0| zWgDgcxH|L8$)S#045RX#HYqHr+v$^#brG@o*snM8OHbhA&)6GYBb4~_C4O49(ZDrS zv8`gcK&FK59ql15`r2)u^p}QN`rP3M^CubaV}`O-J}Nolp1tr~>UqE2&*(ulF7B0p zf$(lq-rC*DE5s?Q&Y%>tZeTaVn=pZ0dsblHh(z8;U7StyltCU(g<6q><2p24_QZK{ ztL^*gyBsQ-n{Ti6bp-~kMI5{wz2*9E-u*>&mFE$BO!}A&D?&t*3>GQEm*W4)VS9yw z6WEG{Dd#~Wmr{`MrVzJ?FEKL8?(R!@2lbehc}O7eqZf@k@1gJj#ZNXi*43w}+=RYV znlWoguCK^|Z>?lW_cO=;pMU+8_x?&zGKzkj-Wp5lFOwLy3~n<_ocs-pqr`IiW}*CX z8|4vZ=pyEtteEr*OQy{0IAUP!M;rO;g8M1aj>zLW;vmd8B2Gx=Ux-`K>jnl2^HK+p z2{;HYt?GYPTUc&W1WPa!J|OP6yvg()WiaTO*m-_Vw~-WKSH?>h<_tS4g+jORYFEgK z6%X|;e1lq&iaV8VtcAH<=1y*Y!jAp<_BnELl&!$)_zk|Fv4>73uszGU?06bFJavYH z`_VqKHkepA^cYD)OdkWi6jNT``xnE?{C*Xp7MMIvh?H~R+ z`23ksgIzY57HbrSKsgtIwIS`zcQMjdCsmQUZ zuvMNc5oRYE`}f|oVDHL}8hetz1El^zGPfcqKZzTi)|kJ%zxeAe-!pe*16J)e2{An0&rau#|DvSqDZao1DYsVE8dCtBQ=-tSnTXX z1L$939VY_(z||5#A0mGGLnqN^iJb0QW|M7UyC3ZvgwN5RaGYJnS2;sNyx#5-_(*A~ z_87GgV=t=*YhxG1_Zl4{y>jA(Laa+OC@chlO~~7d5;nKki@9K``249NlX>TCOixrn zi4iceZ6GaoWMRPTF4E#6y2LviHBuZK*n9gaw3a6*uJOC_TeG-LHBF9q?tMJ$^C(Y0 zwnq0Wc%N{`+k&y_nj7Ji@%)<_pSVBkT&j-R-mrVzbSZva?&_vXdatB)xJK@YXgAo~ zLfHeA8d52Andkh{$$X0>`br}a<)(SV%r+w}L8b^igKoi^hkfWZ9nCkDM>G`<>z7&1 zuQ00=#0+2xcnn;wv1)16=^T;~BtAgE+fGnHv{XT-F5#JnCDmI|X z*vsUtCv@xMCa%|dz17xaHDSRFh9X+ujbx)~A+hIr_6BqYML=zOz^h$eM9u|i=}fI( zwK8*QQaOrdKs70>V8HzF2DWUZf%n6T3FtEXr!h>b9pw;;={Jux{0YOF*c+l7uhN%A z7$f?J)gWRAeyzU`&br@0b^-012uH?;qM8+~xFgSN;Ij|IFUIJrq;B&sGW%KdOWWz; z!ZdEA)|-R!9Ie^9YCQ8lAo)qd>i=YYnzF{_@+y}3I2(zsck`V2u-T6hZEdZDzoT24 zE9tZPqp@>!Qr=&+bS7*x(_A+*k`~0e#hWf+RpVYg5^rq&J@v1QddrGA-ufOer5qR7 zdTR!(jp~1wgCX4!mT;zl1pO#U$5hbRvKBqNUK+i#C-cxp^@*uh?2gg}y5wxH#ffQg zmYg^^07bWP{(8h@i4Hc$m@AJP%OSRK^{)q|ESX0AI~IS4b^$Jn7cv(O+T#C70%!ki zP=N9kE8V%L%PqFbh;$n{7?BRgg?iy^>Tg)Fm%DUi^8W}qtDriYU|mB95Ind; z2=10Ga(#gn!twps~FuV zh(f`$hpBM5fRt_H15YGq@j|&CX2IGEyo&=(grPJvQ6JsEf8{q%J3&J(yG5oqnjbo8 zG*z~|jJtNTmqh&3^Mw1JF+=RVH9}due6~#2D(ea4reB9?c2=_|o(@DZhjfc06}X1p z_Tu^~YWp1Llp3yv()IG(Xy$@7)Y#X@tVxKk3PQ5q{TVyhB2Qvmcz_TS<{N!v zO|lA+P-xfSm*9Bw@tU+aAPjYULsnvUEUoR*Y=QHUv7@$rp6>4#t5n)wtW=6jCp)h0 zGNDz%6o=3nF)vl4>^P%s^31oB;LEHTjX7k$G4Z8Z3t|ZJ4@E}-X zsi^rWb0XrvelpYI%shJ-X_ci!zo2V18F3*i2EC_0dHJ@gb9>ZZ!p!4dtLZHi11*h^ zl;_=K2{1@H`B3EYTz}tJzWAWsUkE5+4Ij&OJn1*`?6{L_j*HT%&Wh^>(e;4N^7u}h z3+}p|$#J{Z=-O1Z@gf6w){LU40j}bbaXsx!Hyyb+Y94*w9HAC1Uq2$9W(WG5)BO|j zKf*h{K6#f5w7z+G0DS=pgWn=$%NYr<*!AZlY-Fq}f-|pUoujLEzQ|2*5@E7zQY;HX zc)nnbc6C@V-?oN5?QTSmxq$re3NM-a0t#W8;A2-(R4R_jqK)|xj?Y9sm^(D&M*Jym z>#g1Cdr9v?rt>J9D9%_@Nmq(eI#pE(k|HJt6vut0o*D9}^ADS{{)F+3hJV~jBpU+m zZ7-zUI|(SS8(Wqz+9rRG4&PjZEJiFgC_GkQuQAK~>}Okb7;pu{O&X zXgatE!4*nnJH%HeF+(&wVkp$9^vHascwx`(N1Yw#*jH@KuI!zk;Zk`jPBtC*_hH|? zt~I%aS@bzOO!^eiC*mJ@smN;cv|Cxm?LX((yZt?}b>Z*|f&yL}fawh_kv&&Qufn@X z_h@7WrMgm5hrzE>5lOSe={}tPhrv`0Pnc}FBMs(z4~T`o=H5>m)RW?xB|&ir?mr{; zc^k~Pl^>RlYXj!;b2;L;#fP2Ei^*@^y|t@!0RSc~W# z+tDp1<-d3v`H8;tKS)g!jd>rtl1bdi9zQw0n7^cSz3rzo+!;8Ex8d`rn_*lX6g_|O z?p-*lo#L>`Aen6Jw9|>9Dn%|-X5@(!bNsUtZK!}~TwPBWR|_)CXP{U~4sH|>-lWml zyFhA4ewf;HdDXdnmD)Xy>Q5$9e=E=X+wN_9sq0`drF+bAlQ}bbvyhD3B;+TwgOGPP z5W-d+rJK&J@#|4_95h;OQEaBIS)s{`_D%e^>uS~b&J=+K-qdV(=dX%`U}`GCAgAMv zon-}5DlyM(gsW#{&D_V`0)mo=V`Ii%)Bb!JS7A3t8v9&9P6|djgF}gh1Fb6C_k}$j z_5$YHLeE!gi+`gHf28VN-*wzDOG_eaZcF}XYWn9el=$rNvg$}sBsbbVD_rUetE)1 z!r40-wy}nosQh%J{iKwMEZW_Fe>>OM{9Ya}#c*}>Qt-1~JCw9Onbw6q@g4zEPuNA~ zO|PxhDW#cKdk&U0tnyN&gcq8}ZyZ!P8Y<_&DERJ-%kZZ2h^vw()K)zL2_)_8(zYhci%He#%GYmOPd>kUu9Pkn9{EXg}zRu^t5SyJy|>wTN6%Px^e zzn=ZHuD7M~wY}?+vpu_;=gZ#xC*|1Xfdbx7Ny^_ZE%D^d`J6hjV*U9-n0t{b!Fs!= zQ$F`U3UdWf^qf7Pjus2YJK!K-w@GsfVkPeDj1+-PaKWnR8+gw~FZ1S5yd_~ncR%t^ zL$Vyg5C~JR4koUiA-wG)k01NA`?w!?y?vvckfb01FgFW9>E*B%#8S*lo-gR8TEfi~ z(p@k{O}WS86W$uX9cb;D<&&CfXj0Jz|C773Ie7{_t=2*mc)jvbI?4QA=U~b|%n?Aansd z?_xrZ1e;7_1CbSAd4f@XF~m^nwPNZXA7BJ5ZQKJrTP5U2Tu}Ec-qyUWFx{Z04G&c5 za46h!vWsWF6ff(b>5sS4WEd{<|BYImD$t*mQi2Xt92G@r42|Dig&B+ou9bgnEBK1W z()3|&I~TLmsD4ImX8N0ajsPjS%lX+MsbeGOMNC*}Jpk%??QCslb=NfeGs(_p4|08= zRMQk~QeLnA?p~(GE1OEUU@pRK&3S7pW+N4WA*QR+=&csrxaH>&q{%jH*#%R!)LA|e846I9>kkA4+jFVWOUBSo0gt-YhKWi*4L3kJIIFbFa2ZiXkHRabw=eZ;%#}inq zgAq?n>TY+A^_1+3bEwV5ai_D3@nAnPoMXv{79(K;yJqMB`i^r_r34W-3i{tI1|_#J z0a+cP#X|a>)_g}YOD|Yo`RF23>Z6p(qKSsxHrxQ%>kNT1_E zLlIE4%?T(N9N|c?bqvM+p5|sfO_gUL3LVZjQ1B52DoWp$|Xr`dd)~%jvC>& zBYkfnW#jmYSP~ir>pF&7k=BHiu^&I1E04A%N(OP=*)&ubH46T;P~b~({n%u`rj+MM zv8uh!RKPYMiEm+RxS~6>jowHSO(4J)j+UkAJ1iqP3!>cC+mE`k6Lu6{Wc`k_!vEws zt2~Sqi|zP)*j2~b{P@(lwvpC0^!ON~T}vOrmc7Nc4qw)mLyw#tnRljpPFve_XPU=n zp_zfiUSENsIyD06;tNX&yM&vH_Cp|LLs8{i?S{HnkwBsD#Jj2v<>TG(bGz@A|9U4X zw|mbw*L?`EkpRpg2gF^jdRepTB1@et}oe5U6!IW$N zzIdtN2>ZTa+px6Y5F4^I$Xcm)z*@PfkkeHXJ49>nkx8KT2$ej*dJW|=$#XPLJ`3#)cxFzGeL>C4%*17r)3I3ru(DX8 zXEn<{O7HPFt0ivERL+k*&d@E4bIb6xXxlUJ|>v@m6RC_8j4D&%(Z;IT@*w=0Vc8${}k^A$OSx4O=ih~GV+BnH%xrHshN zjqP8zY43TR>9_7X?RhB*wbwU>5Ms{H(mbZMfAg%s{IxF&3U%b0rU0M4f>+}1s!AR+ zStqHa2bH0m4IC z-L_5n9TyCUDpL2itx#RrtiNKle_PixskJ`0Cp5KXJwMV-QlT3^LHH##0AKs+or!KN#-Jb@4{SE+b0^OE zxw>-fl(p^}HDYIRj_ii)a@4Q?su?!wn?Xb=g_5c=SgnJL0QNj{ex$a~GL_C%-SU)q zo*Pwvq%radh#c%qe&E`#eAZSLR-(jkmxkgm=-$P9LW1Y*n0%c8ZxE~BYpW`4NpNX`h0uUHb9=Iu0}ak$p?x89MEke@26bm%vRj;ZXiyU? zr6nNy zO9U#~>eA^cRf+NA@xxgw!A&d0Y|&NvXkgLp7)1g5VU7i!BtD7TVAvAvbn%U8UXYcvyHO;4EN5VHjH}MChGWP6%NkA zP#(cCSV)#0kr9kcbEf|4VI#&9cUxF25kOzN5Osv(L)5H-@CxO!w+e>YHrm{tjQ*EiKw9Nxo=U3|pX9mxk3d7T$ARmW6faQIHN@W=Ihsh!{ zwE}$^j1jfwvpcu*Op{%ip@^HT`sgVKo;)*krIv)ADw>v-g6^!!@lYpya9`P;p%7EcUVE7x9at!7OlvF zkoG_YdL@l|25!gj)rnL6qfC|yJ%?TqKb~Z!H&TGA+8<7W-)8{VD)R+UNj4#RQ~}A~ zne|i43nr~|WGk>Br-|n-d-Ga(?+h(`>^R<?$|o%+Z-B_y-8$f z_9>eycqKQ;%DQtw`;|AWAcqsEmc3)DhSX`H92?$DCANs$Fyw-bk*Ml$0Ad#Dhf33S z%(L6bXH2_~eI~MF*l`te+-Q}_8s0t@U(*J*;JZ7>js*=+678F%1f2~(fQY&YDVLiU zrBota+%hd={dQ2(7h=`qfQA6^!e5pI4ZW~2q~ba;AnrWDZmEuU`4p7f@jwo$fu^6< z)E?-wgK()fA|EVv{wHuol!Z5VNsTXuF;)?L+i8Hs+Jo=Z?}izIjw<6|~| zOMXSog-<7Mvb3}OFY{%7Y;ASNgZb&qY0GQ5yJ?-#?KYTVFitjDS+)!v`#~CZXmtm< zzQ)9}HP1mf#6xt-v$VY;;F?AOy7V@_=ilqbgx+Y7^{9vWkb%~%3Gn-O^q5c3?hm|S zyC<1?_1P3MKt|gAAhPm6#(xn~gkp6f$U4ky)yzP&%>~YV;Jcc;r$M{asJjzk<3aKe zUM_4@`*A;k9ii&-AtdLA)LK`7O%Z~!8kz?l9r-eoFH=)x&6bA`>yL!dW^a(Q!A zt?YZ!X3nZET;8j+uhlZ+dW)Hqhm=)i>st0$LA2VXU9_z(2;raidh-Ms78W4Vk?(ag zJbX1Xa&Zv>wcpfaBX-MeXErdGVs)5E^lbC~fYAQWi_OCxo2zPHuC~o5y`K%kKuNA* zL^UUrKKq4O;>9Z=*5tmm(sR5FG~c~X#{Tv%76@=fljnF?1lC$ZZTQWcuuAxVxqT-_ zBHzw%ms!F91$-u)Da(JEHK&Ke82TLz${&d|_m_Fawd*GF$Tkgxx7yryO_s)XSz5T3 zfEZ3vQbiFY?Yz(btm1pj!w$~EddnIo{s0};9Xu4cVy?2@3HGa&Nr)EanOX~lzT-DT z`@)~3L#>Eby2U{j+otTG#i=d&%$rSxt-(*zI_czG{tK~*5hSM{d5I>LvSC$lWEC2g z;K845i|YClzq>V!@-GnY-7&Ul9*ue@;yf>oc1naTg(y|=kq}sF`@+E_xmE;{ynMQy zrwr#k{b@V!H{$2p{j{{Se%*XZ|7w=Ar5^ivS4>-h*;dv3m)uKMxi(KrQ>~pKLWP_J zIr1Uq{7YdX36a4uC>pL<`mq0(Pgw{K8uh#t`iJp23OTEvb(2bR(zppc>MM9}Om#aM zy4`R0yR+v0sfml-2`bSM2jUpj#wA^L&Z5O{e>m)P_wVzY8aSv&eD*{X={&{vx}eJz z*b8`$^DzuEuI#_fw>?($M@=b6rQB%}kT&Q)6Ck5dt)xYY zz*py}+839$%+9eMeIm99p@@u6Jh>J*XxxYn311`rX?tGiTB-2MQR4HMm&f!wxwRY5 zln#=?5JSd`Tk8j65vU%#$A!5lSYUI~H&DS_I?bkz`Oxm$-|?@Y3KBJEJ#8aYE&g)^ zuKnqMteZOrQ&=@#YcguL&r%*ZdqyHO=;_5VtJb}*j5-+XtD3Ygg3>*5<}?;`^g=*1 zM0uKsr4`3cMe_k3N+};M&cx$>q~26D7phicVZh6vxJrbFgjW&XZj*FE6eB+3sV&8b z;#4#LP{Iai1i_Wq-F~w7Nb6XU&nJd!cbLslmkPc*>BFj!>nNkT>IKR{Dm{IB)-b(# z!|!8kZdEaphu<|#dgA8&BA}~c#!QKt z1@`l?z3GUdiCv&HtWlgxFR|SoZjisQCzZA1lwDyD_K!!NzeYDX7hH~-YYi{kY}Z8m zuICTV6@GG}&%CSx<4RtT!*atl`Fpnd+Cn<`qOLN?y|svF(*-8jDgs z)6-wNs*NzIKUQVm&4bpqjEaykrPvO?{ju6N`MXGgF8n@PLkM9&Q4sF9)aMs6Lr3j< zPM(%v>#Nkv0=V5wh|`ITUz{x(b2`7?vwjtqZ+-n3)L{@RsJBm0Er?kaejROIJHEBG zRJssxG~XOHk47Xbvhbwzq3^j8yB1jr&S4+?w1BR0rlEaE0z=>lofb=6J91h)jBwuq zQ0~EI6*39~h%G7O1mcQoiq5IjHdWGug~+HZf$99Z)xT*xm(H$S#L$gclmmGnO4&1dv>K=hsUqrDHHZ{p7L%dyr+Hmb^)tdIC}5T ze7JaRR3f2Xf}#kN3>CyaI8V01)exhlV!UyjFNTevw$as!R<-?w-%_i2A9(>hztu1y)=HB-)e#Do@*!sI9;2Sf_~ z%`q~P*B3|apwsiLiW|E6`f{+FE;LG0lpYj0wo#hI;;!qIcgOZ!5x;95udx3yDb@2V zk*C4Da6h-&P`|(*&uy>x+v#WLH-91P+*s$J*}X=Tywr-Tg9P4k_A$Alz(sbHP)r`% zeX%^?`H(V%sw$0HsQ;}yEBpfZOx?BHB@ubFNQH<6AdsQt$=@EmPM)XT zWlgHrxSaPUVdUey@SBAY>ESwxZUxo}jK5Ehpm-t4(ZmU(spYdF9%iy5&V|Ou#8Yd2 z``E(y3d>6Pu_NvL(mVNkWJwD#W&&cZ5qis4&w)cw+M0M*tKvLDLYQIW|7(q3BX&UR z>{B3&QcJF7qBSz^uHCXHP;$LkU-;n_6pdu=i4J36{(jd&=aWNV2bxl4=WOH;*G=GD z-6p|Ooy$oy5|oB8sr|tL2HCac*xBawh`4e)*4>9wm>$CZNeOHMlg`BXBoKUzrYvpx zz8@Ve#p&%lRoq2C{cAiQzt(?rCrB|&w2uKe6W^oyZz5nJ8Q*s<{cG1kHt5IU|JXA8 z!DB4s{c>}7>xC=Qy=BQ**pBuxpMR#LUxM2thdLx|)tZKV>z>dNXZ+{vl)^f_zJG_54-Sa?}-B2a`G{x*jtoi+i#-FQ7M)Y!HrLUmx9V`QXd^FI?w zx(d+{RbBV$XS~Vg=l>odlS8r2ZHt(-80m2>SX}DsS8CW^CZf=eeGatvrR@Hw;GGp100@)-cS zp9lvO9B<(z#A2jsdhf$pT3V8fU2oIEpV{B|*b0-iMDM5@x)mc8W z_CE*?5b6)Tp1eKSX}hop#P=1`#gIE0pHRRyeFqCXgB~28EuT3iGR%Z3@Nqi}mWQJu zV#{y!bA7oQ(is$0@$||SM3Z9mW4fA)jkXm8EpInan0LKTgKLx7PMu#aj4}JMi|oX1 z?OM*8IQI%?8GXI%hXx5@Q)1}4dumhChNGMbuj*U`3|orqBc)*MbYX25ySvJ}PycCG zh_q9&h{9$~-TVJhBPln6D$k!qECAZgJnF8Z?o+ZRT|%u9REd7|e61@omIEC|bLj9B zfdxAKPsR;Sl1W@4=%7mYukItd^{1*-a95`nRJH>rj4He`@TJrjO8fU z9Bs6#G8v^}ObHj1!g+NgsQ;5(a+U9&nPc1e;)(5rA;?cDUzm7Ag{N}#0&dnu~U7}E=frse^v6*?aixy%jAsbmrPeht`uh6 zb_uA@*e_|1*;m$!i0hQKMN4}jeCt!$9F-UCARU2@;TVfG?Osv!1WV>L$`bJ@NXv%IFgJ7ohR zWfHG7rZwo7!K#(|OuTyJKjHz}mx;;W$3tK)`9i=Bev&RO@z47~+JFfLIc|Ut4UzHb z(YGev+i3UfJJaG%IqG*}rCq5moq#y2EBBx;jFzKe7;mzpPXi8$gY42nT2+13zP6G; z2G6uFn|T48v{x2Lyp`RY)wdt{?3;Q%wt`BzVgYuu-Rtn)2vrhAtyF(oA6>VJ!5T6d zG4*Yq@=k%)6C-{qo`RRSLDQA)Q{VTy?|8_QYc-hh>me#k$s<&=Q=pmM!lFq40PIL# zX)-g?AYxo1?@(o;m%l?qFE)=VmmUuq4%D>o%(U+`v>Vjf6g6fo`98g^BDVTY?)*V> z1bWV~AvkN`P(SSe6t87Xb_6Flr@;Hd0V*2AZ+S8!9YAp;C!|BO12;vUB_8y?V|`)N=ihDloo#gYCIyjMU7@x!j)=**L&# z(HdCqg#kjcu=mgmNaKF{b|6&3bz#W$!M>~Z11ZX82W_3O{fmWgQ*9yLW`nP_p-P*H z?{{xTL2gV$7jepgKe4osU(~*gD`VO2(IlZ&Dkt_imBB8J5CVu=o)>C5p97|T2-SFA zdW}?LjI94B{to|_(~y`rIqQ4<5akk_P3eyFQC-NkIAK0cRkrm=b%l!bwhQm=o_!;X z_rwn0t_Q_m)jh^S@FY&CW7ozFJ5s$oDJTPUqur+zmxbmpNWNsP<95V32-gznS~->1 zF#}Juff4!ZN=DgR4s%3Z(H0bt1CCpRB2$NMY@w@IYQEiURdHDQ3UA79p$p=_R$`8! z@=9N%Y=}Nc5L2X;HMM%=h2EJuP&0Ob(mq)A@Z<7NUXdg>qiD1Ts8B8r+5u_HZf;H+RlUId^>vWj;n7;c)Yf1}Ac6zlaIN0OeSKpY>rm@+ z`6fMYBth{3)``pXQ=&HxFiV2(nQKU#MEyRPq5_>@8a29K&xf7la@AovE54Mg6 z&3$2E>0BfEV%gwh4QfJ%v%QvEJ+|p$<^Bo^1(@8m`||Q~dfEMfB+QS~`P15#jfuA^ z6ouLzYWhxzAhyj}QRSmSv2(54Jv!rnLv(+IoL6VUx|T<6OmFH`uO6y$!^*(`3sJL* zR&=hoHJEmbv@Xe1JN=?tz~kdLA~0f_cKOT5Z0hL~*R;MXa3ZZ&dLm}+Uws%uKn@qb z5<^fc{0|(o39K@&ZMwJ2i>rxkwNTv?y-(JEsELV@tSvpqXOz9r<_;wLCUlhA0q)s-*OTf-N|G5d5t@C{fu#=E@@7?kOGp*kvOK1!@G z%KDrx@XkFW`@G=LAZqcfGC|{{>xnXl{&HqGlko;=Nt)*eu}`5_`IRAo>7XJ#(2uwAxDNoL`f4pwg#> zj^A{ngaA2oy9!$CJF8d69_=JKr;c?w4ndE!y`k!ha`8_FiI;ipP9KAnos;`!>lNvb zAB3jo8wgG=$)o1PumU1#8p{$xH9x%rKVm)7{KSU$^-i9Y3BKMwZ zDTHgVem-g9>NjZks2g*?f|&=}sqol!F%~MZ#&Zh-G%rq407*Arx0fB5*0Yg-G-doD ztW-QjW6gFoAfS}%_ZrSQSy^%7jrjGe(Ua#KZKKZrViN7`eD|S+qWo)~@-cIHff&fm zN1ceGA4z)IzU)dl$szyOdf9ceve}3tejwfWufaDOnaEv&+B(GSAD5^H_ZaZKiZE?u zHithgSd)J$9GlGUr}VU>%T2lkVOlRMFBIZ^p<=CdoAUsB9>(Q2DBWP6q1R6oE*P5R zDwtz!9|DM^t_lRWabBUL@oP&DytLQf_cBlcLriD@Xp5mYPxZ%D#^bxwPL0A@~Qp|*w_6$J(HAB`5 z3iWu#!#mh-Wb@Xf)>BNmEO2y^lwXzn>$TsiYDFvP2NieH3VrWY)}DQJHtI)2}$iyz^jgZN;vw2bB6tVos&U3F>LpKU~$2p(ZflwrUK+6}=)r;qX=w z7VoDB>rSwpmzMx)3V)3G*l%_QmE}5miL0(%x6OIUcF8UhKti6#$pk_u5+ z0V7OI#RfRR+w2e5^q|Fj3cQ+G@j2`v51Ldu<@JhT0F>F}A2UX~$m*1wdOQR1NId+} zhh3fP;eg|FuLBBfrq=s=Y{r~#zup0*Sgx{m>5S5%Hfnmtx<+xj{F%q2SO*-@$QhY< zZ7eC>0q@1mZfx9q99)R;7uMwNTj|yPZU-t)9b!F~@qunoJ869mAVZAH+dFS_wvQa` z0_K#HzoB^9?G%RMK5I;l7+Kx2h*(d@DmfThIzUy|;DJKP9j}Ur+xkhcYQ@QcmwAr& zeJ8qF4i{59nDhj93UPv-PTE`6^ZVC<0POp{#6mF!2@&D!^(3S1nT*-Xv>GGl`5vb$ zjo`kPmfuPZixV0(93m%I^CDTGR?#gD$&+O1hw(gpKGNK-z}_m527~xx(1`VZ53C=p zr|;{JW!iPAhmy(t+e1af&jy|3i(9@ZqO`Zn7YL}b5($u=-Gn~w` z9PK_V+kPCCeWPr6odpbiyh}I0k>+JbB6!LOb4%8E0j@uYshkQ=X?(-Ag{7w=sf|Rw zFJ)pIJB6-#qQHUv{&#HX+VaK*PT8Jgih$9;(b0n!8cA491uYg#1SeM;y9HE=)#cVj zz{5kpdeENkalvftwYQa2USZ#X;iypyV0ts3Gt{(tO=P2Ra7+nacaZI=O)x;s^29I&~rij}uaNFjDe4ipDXYRneKC6p}K!kRZL`L2q=)0v)|T5X7ZdbtS$q|3vuMJs`hp*SR`{ z{oQ20Mq%`^Mycze!;SV{+We$dkeczbd8U5S9QdB;%JMXX`Scsc_do51q}w(t=1Y2F z0NI9lNXPG`I7V%yy|}ulMrKYTmfoa`EAfKKpVZKVa;RpwLf$d)2Cd61rFAr4{>HhLmE1`;OhdlKD+pjpG z!oK$7%jzvq`o6GrTCg|J&T8gG)h?`Q=J99*0J6Y+sYa;9vY*oM`p=!JNtecT{1aQc z)$ZR~x$|EiT`d_k+&~l$yy#wV^w3s4-S5av%w!Ifj8Ibps?}sKW~nHJ{1S=jiPLhR ze#Vvh_5PH1H?1@I^)r2s{A zUAs?A`*(92Y1fpxzQ&nqvNjwnv2gU3YVQ1gi9^hySY7u#3**z?)vnn;dEiN7=Nd`7 z^;3NH1a;?+=cui+x1ICckHyL0{Pr#(%M7>c+3Oxg?WAQE#`*f=x1L+hnIwPLR3ID7 zm_QQfYp_(j-6&RoCw>%6_0))GeiV(OM5;D7gFj#E9cDl}L90P%Dbc{rY%hhRmIJvN zSgB+6wr99NMD4Ttu9yGQ%+1d3&MU6j#ibF*fOmlOc$Y;!r_Lyjt#f|yP>n6y6sDd3 zK#4=xi7QJSp3a|CGPCDG8g)-nDjk9bDc*efg)=Ftp0QL^nBY6iDZ)iQmo>D3J9;LP z$*T*J&Vz?9Tu5Njo~U_F0yJV%jA6rwXv^2O_aG^>sFP>ZB-BFT2Z;OmYPnaQk*yWu z2N2own-nH{&WZb>joMmn44>1}73*Fn@Yp44&bHqq)eXX-6%;E0alixj`!&RF>zD8a zQP5{oV2+Nis5D>7f@Bz>QOUH5yAETTr(c8&hxy{#OQe7&x>r1cl{(Ou5|WP z2h=BU$$~{Zr;SrC2gBGWc`&OH4qraPh!(*3=tSbC#~nHz$v#HiuDU_WZNaJqt(rNQ zrU|RWpt3Dp#?F+|A=wENerY$jnxrNx8)H`8MFZsF^D`e^SZr_^38g=F_dJ>%m7yQ= z1X$0ww}Tsb2%;L;TnDgYC}y$Bt?9U1zq~d;$zIOfw?k&QR?G6ZiR8r|Swf>loEE0g z$u5D!4H~L}AtuVMMH91I{ExNu=EEOXH!jBa$8rBi5vO&VU$l+weZJL+V5X#1ct}GJ zcD6z2#@R!l_slDB|DkGc(^t3A=kduiWfBt{?mLdB9Yp(PB>sCCiODtRIwFMei>84N zVL$CCm*LlVE4`O39bvuSJYbvZ3c`1vM%~lSAOW}y-&bNC_C&!72R<~nHa&OZS4fPw zcBuC>v1|^-bIX3Eb6zinkDIwEyJPXbJQ>y~l*#8(BSmeQ zy~W0g^}i_AMXROEHl>V=0Tk(?FB9HZwLaLJQvC$FPs`HV8i-C>qT9H$eT7M-4OeA5 zNvJkb()#f6C%N&&pTQ&IGa3t=iy}Unnmx zQVYf2^U!TbJ3)dmorMSzvO7UVnI3ZTXuwR5@Z@f*uP{%9Z%E!*3WmE_vzv{*Ng;P` z?{BfcI>^s6^8%FxvebtMxZ*+Vxi%bg@?Y7(spuHhrNYY&c-#17hNg4<>5 zPIafeF^2nNB7y_{st|sJ2j}m#g+nuZHD)eT?XIx#1)&KDX|m@y7Upno&r?qpL-Vr; z*GjGr{K>iZ&c^kGz;Mzee{@$KGnp@AVZX)B3aug+dnR?1cnD_xO=lUYYi)%r0{8_v z9nt;&m^UMYll>u)shG7A>>d7v8j{=du+)1@Z@x)>CfGJi7w+*Q1&1n}?eliZ!d=S3 zQwO2c>rZ}ASBD54fkb7jJkxN1s7a2EH#jp47dlP7B^czz)8XjiX}p$YeP1-^_W+$H z1N;!LXzpD;^WMG_i{?b?Q|Fkj)fyU&%pnBM-pgv?$f9g#J!y zj}7~RFutofy5x5J{IYgiCMBchZn!@RL1_^zm@fu6(pC1D%$uz>W)yTwq$gM(k0V#_N&}l zm+}1xIpv1<-6Q1%WZT0Iw^UjV`eh_cp-vYDl}yrM%a^~ku)vrqZ5RV~j^DAljn1mj zlzjH7k}f4F5(HOByTZtew6J%hf?nh_7Z{C^>IJ9h1()_OEN6?tdVRL%KmN>ivo`=q zYe*Kum7CDqH=BMura85#n@>ePh@W>4g}Ldypa$P>ZV0LqkA@`8!fsg*2O@5efx_Ig z|4E7%JlbZn$g|>GLuHiTUv`ln2 zHqx1+o&CN&clvJz;IW68wXiT_cjtN|3BYNyvJ7Bc&(2BnLEqM$^68b%+u6kfAm-AR zAvL$q7m1I^B3D@>a7*(gy$iyu@cMjD>K?zq2oojCklnuJ`mD6%%<{{oFPe5A7R0Hb z|LXosM;j4bU7@gpBB2UfN9*dWL?LP);Vlz5JYQ0!vj}sQ-tnFrl_e>-B~uI)_TBsDX}Q+oVkFQWc`-WR&R#8Jhe>Fv*aP= zmIX8Pn!k8igExn;;}qsQGIqS#WgE$A`4cA}S?B;&V@>pkSHTtU&jQNT3DI*}8h0Ks zBOAdM-XN)JpAf?`sX=+{0r`YU4Q_8Vb}Vd3nFjS?dInUHfm;@_PCm!51rxWHsr2R_g}T|#>AIg`G8AH1 zCVinyNwhafMb?2N{4d0dZSi1=H3FR5ufUYOw`ljH4I4Mzw!bP$3#XY~yG>P$xw`%j zP!+kPz=#UIvO6d6$rL#sO0LF5q>DYzyI$jAW9(YmNN4$lhlA%~{*^PBSrH@0|3plv z>$3QYyv#c;*cU0*#kP}UqYZh%h1?W8&+~93JKoZYlZ5VE;xE~XdP@$T7mPi#^l>|D z;$!wazw$ESXkKDo@JcwE@O^zb9X^@kTwGK$iYW_S_QMS8UxH{A{t>wzY+MkA{R{qj zi>5s^fmEg6p30(FOgHNtj~wN_QDdmV!?^M`4ZsmCy$ zB-I`oY$AJU$|pA_b_~s z4TUJz&y?}$4tEv7`A8#hsN$tryGyYjF&-q(!mp!i2IxXDClW`p?zQ!P9uqDs6wVsn z2!Pbj;|Atak-@L#G1_jVIUO{hVBC!JT0J$HIHshvqf+#GZwYe569@IeD_>|V;^Lxp zcB!lTHpq*g*_)7LRtf|X6ddm)1Pa(|xPKq3E3C>fY))8_<^E_HYIfK?I~$DYB*Lm+ zW8HXGAV*@azPwskb9V8mK?+I&oUW-)v3i+3$c7G4eHO&D=h+AhKjKm;_;si&n%d%1 z9WLN!*yIV-A}LcfK^Ax6xQ0DDNiM@wU*Oq*$+KW#IMB^j%^R!;*x#?K2RRzx8rW~F zOm6S)wd&>ad7GXy%S|_1IrY4+Ck|gcb$sx#d=0ik`O+MPhrU?tyk{)=gI4*b<>jep z&58AAg*$!n0#{(qw0+9n<#hXduV5AfJjZ1ajcL@O(ye%5}A3>F9b)>qNY zlq7J;tM)@nPmoRev)VuA0FTP9>vM}YV?G!@l<#ynIMk*dqJDOtqjy&R`gthYnrz>- zi?5pbb%ujnAqBRY&a0F1m)CZHuKS*@yLsd+4gsk>4#zaISDCKz;Q8u;Gg2Q-F$aRj zDqN8Zd3k5r;8=XfT+ZNjM2RsNAk)Ur)KUtgR-Kc%6yI@kFebVY2!=E#Io1G<)@6Oql#$cXWJ2c}+dhB9bT(JjZ#T zqN7Ro{AL_X*l9;)M||15%7|qsUvQLfEhWsh#gwX^W|W8ay2Y!AWg00+-2h8nJb6=t zR~au0{yU=4YA9H4Rm<%Z7Swtwy?;!12CVw=n3{>p8Yrx52tlFwv)Ndn%PFt6}5k7G03V2R6Un!-+{YfWb;rqzm@p)`j` zJ3rvPe8NRhf9Xvb?@-G?5%o;?erwMITw8Ehy+TrTYg||H5adxgZPZ(tx#xRu-Rx}E zb6gbiv&u#p(%CTMrnMyetG1`YBN?=W;F)Z3)}!LjUYlPl+4O-I}eBPW@)Fwk?G6FtFvwB z^VsvWg2V!xkY~fqw|WoS>@Vt z=DDO*J5-{7;0YZPWv)yewYhelie82X;=vt zIWA}7ls@%7OAeRGuRZU+1HF0FS6UhQp5>2#t?y)gys#A}$|(%rSKVZ?WJkz0#T+W0 zuN=B%sDc>~ntu%0G!#CTyfAT-7Y?Ii0zr9ClLiFnW3U!GEaoG;$!yKEUb;EAum4_l z-FVyA9JQGhb^K}M3@v;qY_lRKvBM)Zt`u;;*kH872Aa!WvOb12Kkif8cF&!Yzt{5r z0c}8%zY+5Hr$2g814p&+SIFpd>DAmWQ953$%`qz}-hCJ)YnowKt(U10_zVY-DuKjy z(@h-88*fZUqcWt?yK&4WfwqnVtajWZ$lt%vaS)wDd`T%@LRpr269V55jP%Z=MERQd z4g#^sRc28ZuyeWH9B@QHS_gLNMj5?o>}%3j$A9#L`S(b7kNxoI>g2C}MU1nrzn*>L z8HN+grw(PoMutQ0F`QTp{bMq~A+;)u+&`TMm`;z;cBdM*K)6Uw-Rro47iNZ9dqs|Q z)zsI{#Jj`uUbs~+m+Gd7mq{oj*RZr6 z^_-E=uGpX}HrapBM*Rl`{1xpJ%Wq|vVfn30Cz!jlonIZi&MHucq})9>P-#@;nS-G{ z74BMfUOJWO>K;$0|6J!cuUmut>+O~9y3^PiLN7zy1El_EIG4U0dcgADqwS|oU~z?e zsPC~wSpD&QqgBSwR1lTz(_I;=MD9uxTl(wCZ!k~J(;po^OkrZR)P4^zRck)$>)c^o ztRK_g*(mJow64#;fF^eg=P=igg3D#ze95guT_5f?p%`UoWkV(b_~5{t3=K9_xwP}N z{HR>A#aB&dkjW{o85C<-F`&FMK>8AgIpu&fMGo^y0%>X;4)a=Jyso2YB5aHgS|(=yGBMzH70OWF#H>@gR$ z6&@xF`l(yc9WR|e2yOI38(tbqxl1oA=^{n7?vljDcAn70C=;McBtWC^fNnL}=#88^ zz{vfEP=|Kdxh;^++)?yYdgYbOh2%EU+IupVy_Z}NgLb#fUP$*md`M3!RNc@Q%e4s> z%Kt&PhJAEv*ry~?wQ&Ujv8b9GYp6X?HvNf6H;=;2BkY4jjf1o^*U%f5UXXnbNSu9`YL(20po=%joLC|tQo`PJwXK)+i~4e5f5ncR z^~9b(q&(+-Gqa~>cZ$13%<=R-+5~gsZgpGEe8)nUOQ3|~&zv^F8g3J$4mD=xS<_5! zR4XCdeM2iD%OmfZ!EF(V>*KhZf{J%sgzeo%qc#o}^-$#AN=I|f;?MrMfsP7n^BbNJa-~)zxxX+C~WnE}r zw8FybK3ku%X0GIy@+o5!u9HibT}&%DQy(*GVH77X@Rjf~+qJR3!Zg}l%FS%`#bq}O zdA>qzx%n!&e^+k4%D+{f=jNZ>syy%4Jde=d^L%l9jL(HJ#(dlL!qe#VUC{ogjxkqf zOYBzW+W%BfW{Pnp*-y+w_e0puZ4G>~D0u5({s&Z$q)dGnxxz zIl@h~3Ak4LZ$J81tXqe^AAa%Bp*;^=HqYs(>a88V=;+=j^Br1LpRj7-F+|E-{e5ni zp*eM8W-pT1Y)bNMN_-zwn;qlqJ&gR+u~F+zwmSPy8Rk$X<1-mX7b8*V<8z9(%6su5 z+bU~vY?X66Z0T0DYP{5~>JfQWH|MPC8YLrT1y&NXv+SzOdjqGPtzmn?>F)Z$z~D0j z37q6hF2PAos6ExWD{)zT36;e^tCz)fz11lIXKc5W-Tm17S``0Z*^()H1lj#eK(`0_ zGp`frh0N>Ry|?%DLFz?bEEg%pYVo|{XJgMSEEiPrMVD*OtWfNk6~*kCl4{Sy?38$% zzcTy4%9Uh2rl_pM%#?Wa-gI?6NTp<@4HHyqeSO8d1XjagC#~TSVD`*how(S%DE<`H zipP&jscz!uLlB@lqj@s4reFiG!q&Gmg%*5jX$DJkM>^PFQA1Yc$k1{b2GDxz4TqT= zL4AR#e9@mHU$EzUmFELF&mUHv59U07NO>;nvouc=m4DK7y!dD6`3@v3xpZpoJiWPP z8HgriOf_~s#S5~tyymbFwU)(XAW5u$nm=QBb>dI=+>^P7)RSY`d&t)8=GgClA7g8+ z5$1Fq^^blGmv%w>qK;dfF)g)FeK_B~sMhvY?TZzPeUZ{qyG%>%GA*^swA2o?)UHVU zAno%mJCK?N(m#@382`6l&p$4GfBe@!p6(}!?91c=a$feAB%J+r_LY~=K8?P*biOle zzPT-O5XJ&?J=WpQkUcLCmX3|+LS?jFw=1RqdrVGRhefdMim<2(L3OOlb#j;MvLrs$m&r&yOXI|3 zx^nl6FRoiR#yAO6PGjXWh~qpEM>QLFaYd>`oe^V8K2MpMVcJ z;a;<6>qqXF89z+|^JIp!XP%H9ix=EcT8GNZ`A>7V)8DnzK3Zlk6q~91cSFuzlUKG8 z>C_+9hDB{eUTP!qDxQi?G*@v(13!2a9?*`B(i;}3fl-9AAY`LrzLPFG0 zmT}gRhqJo*)43*V_m@?7YvT;kJ5kOK%KBI#_pyTZ(ZT&QD=1ZzvvetbZh@rS$M32g z$*`U&efaHfS*OsB$9ElPXx}RupR|G_$I*GjTDjR(&=c~3-@m!=-3ch7d71X_uB1&+q@7n6|bNZ@Xi!7fK5RMF0X#>SuF zKC;M}4q}sqbrb}=6x-~P0_Fr+_xuyXnM|kjm3zseWsyH;gUiUm*I|CX3G?$fwF#Z4 z&oQgBMRKvzjt`n`GG13IYZJrS9g1gMJ%c1*P87;iz-J+qAjkgBB2~BhQez9p5}6u5 zjBjUWTct&qv#TOP*;23>ohNm$q#XI;N69BaIIhC7{ldihlp>*dCZ0G#Ru3$@GVrXb0 z&=QJH&TnsRZO0?p-5rhM@D6r`qh*GQuBOgJqO+;1!cZ0scMZPj_m+8mJ~;UwZ}C}t zE%Zcj<~|shM_?=vXEq_QGR`<`TD&P%p2semzGOTxaZg|LKx6n+0aq`>9a@Ken4}_d zl4>MXoA94U{4>51~#*$>H6WZzlw zSa%WoK|YTaxFtd5u{!&~+45M^oGcc3ES!E!c9-O_h5K+eJT{@@u_}ip!h-MZ)(oqqq92v|EBYy?%+SicBU4gg4{C^YRY)7kZ z5#H*~=dD$r0B_ZD)X*o#QD2>tp#ncu@S`L@<&?7!rZ9eLEWl5VI(}+Y_^H1rKUKLY z;_o+MP96vNC_k;s`4TZ`Gzh-K*F@{?$G?)Dx?9} z738KNf$>v-{Yij*Zhl&L1+!X09uDkD1`f%p_wD5@4)6r>#@9X($WYiYAS`BS4X##x%5eZho4Y zgP(Ff8s&V|RG6IxRUZ1d%2^rC+y~?R2#m#<@l$s`Kds8=r(Vu?3h~p>$K$7eD9KJ? zUcW=Ok?$xv>veyb&gz&;XQkeG^^C;+C*0r;u*iH7>*+4AxcltYTAWfmIzypOJY=9 zU10|QOv`u6INvR!+zw#K;kuH-b)A7H_j#KE#{-s#(b3e_(3g&dH~jEDa{C3Ic%8}9 z;%~Isn%vgD_V9>r3Cak63%GI=U~R+tmfpg-mJ8#CS;2I(11jF)yrTYxBoJ|)$vK{t=;J{cPQ!}%`CPWD?9z& zgX8Iz_I5uUWZQTqG~U-y-8S7%+>QO+iJ@S?U+xdk6OR4&05`rt_0|#Q-lgTU#de(? z@_rx6>T*_ks<nc&+sG9q{#k=F zh(%p4ql`McRGb;lgvS7hj%iT``7Fp!-yy5cN^fs`-M0|`One;vS&Dnk&SGX=^g|IN;&ZdR0E#eX2N2BSD+2Qo zaLs;L@cWsZ@1KS5e^U5+c>iR9zyDdz_xShE&t6pM`y%bXI!F8Z?{nWTe{WOX|3Zns z*SG(g?)w)De_v#LU!UXsZ_EMz8*{X;|Nf=f9fiIx0^ghXJDVlc2dDjIqW zVsvxh1^UiyaC%>S9z!Y64$j|MY>!!u0R!r3jsbbcoV1Z@5=oKwweKdgwCU3yh~=zl z+ScqzXzO_4wsf`xIgxrx2AN`0pVzOynsNjS~!6ah%xAXoSY z@s(t2OZHxTW%ggmH92szdSTlDPX10oeJ7#u0CYT)33m#h1O$vE6LuoyfVC~J34CW8 z&v9!LS`i;|>k+AJ`)NRK0_#SD;OPCtQ2afH;_jZ`a1=jIr|tAf@=HE*bF`!Sp6J`5 zKFZp5e$$iv9czaiq3w`go<6Cx6A})K!{Q!j$Blv}Z|7O1APs4BTDW7x-&C+4y%Tuch#A19+F}#zBRby%(VM^A-Id z1NQ~8E9bp`>dC&LzIPqa=a@W?qy)Z{D~p`aMUFn^@-pASCz81NM0T>K!(yef)aPnT zi!*9lenwExZ#1W^?C)gcq%Db+*)VSjc#=aPK&Ox={Q-E?F5pI3lb7+UobTWn{f=@j z#tV?X)!`Vs<$czt@o#c?0SCp*#zD&#o?W(hyYf8A=`u*X7h|?f;K^ttnf4_U>P@p+A9W$9iL+$Q{ z?up3LDhyQs!zvjDQH8-njTl-ON0KN0>>yfXc&w>6 z;&MfLo7yImO-;$kw#WGDc#F=7-5>YE|MnhFY+gs_M9ibmyqxxl+CI{d1`VH8DrY${ zpuV_c6I)84wP@a@xKpHqqHJFXriy0a4ob%$^iSVV(A3*4k=N)%r9zsq_8J zNr^;tK|)giU~DV_JtP57f>bw2#AI&H(sj%5W7vdKuK5k)N$GjOF7{qOPU7Blmp44Q zePnq1qSpGZpjIwS=+fB0rxrxoCb#s*r$_qgqh$03;-+gar=P*RekQP62~9tP_#nvonSkE)(0&GK zvTX_OLArn(v3(vfR}CLbFcpaR+PN%~pG(tsSn2Jf+S`TXXX9M?C>w^MlMg1JPUJ!+uZ<%c!k?uWp~pP?xBl&$&=zw(QYqmq+ ztQp;3j5ESjY^EEqoI=jrUJl2#aNG*VF*qKD<5>aIK?}m$aA3?#)ovO(0ONE8U$cFQUU>BbsJr|ENQ75l7QN*Ud3#g)WyF zxOp3n_s~~u!&# zZ-M-frs!Bsf3B7`#g!?c%*QqhLAEl3Air#{|G<9VUh&k;?gIzhH?zL1hrZ0hPan+f z3u1;*!365SAftk)AqsqgS$9?JfbO~}cHp-@IOakz%Wm2WLGHc4x^sf`g78h- zRAcB0wpq^G45L&*i5?1AK=wLggKlr4qcUK(2P!)fUiYAQUPo8jeu1UD+;V|E-PHl3 z@FQ^%xkGBC`)c#BwR-O4$Nb`A1!%TJ2N;LaaRN;jBB#rhbNHW-&aOaXIEN}-4K&eJqPDw z9CzI(m?I_p)d^+>w=pPTex%}%CYqOaeku=Isnc;;L-x@^AX7TCl{5;08yR@nZEdc!IcjY#qqJd}X!JLS&4|LIQZQ zSu!in_KABzCi^KoD^s4`CQb`K2Ht0u%9UrofM*v2FEC3L%CkG*SsQ(3QJ(D(R|{VO zI%1Y8HP6<=vsrjnr98VxTq}H;{?4jAyHi{w?1D9FmTc^qKxQ8mCxvz5A7R}l1vCOs zEGd(*jNd#W-z>+Q-56XbOD4)5Y$O#}zQ57!aJ2-+uMz*~u~)ZwyY0JK-9OyjMsWsoRbi( zu7=3v*w&-737}|I@ZMUbkFU;d5xQpo4rA{Hwre5?Mu6R!BfD^s<*QDc1?3bA$|+>_ z_^eTAoqd+FA4IV3xEa=5aB94B^l5BwJ+$Wld^Af=-kvzSO%9aiWZN%-REm}ds-}Y!5&`_z@Y!_M^h0J;1U9V7W`W5?Fez&_ZD`Gg!^>vVe)H)bEzytTd=XFUXgEN^ailv1nz3q(4CvB z5JHd22w5gm>kQh^*e`ICNTp?bhPwAL@-==<6^H3^kKNV^H0$z}GGH>U_HbP7E`qB{ z%Q}qpJN@nw7<)Z2Wj_q$qij$7?jpK6(9)$qOD_f4&nN5=FqDK2Pm?@6P4e6{v3!m& zH&rK?RL>x+qKVNBCsm8gRn#Jb))xoe&ApmhUoa2_DFG7SCR!Z@X)p9hOF{!$;))hn z7}v?7B=kl;&X?MNUfQe;wGGP6=MK5{>~S4pa`VlShrA5 zW&Q8}s-aBs6f~59UMV$@=YZ$eu^M_>xFrfR(ZkH*>q^7SrsZQ9#iWLUMlm8hASK8X zz?&suZNA;ZNNr+h1ryPZfOavogozupIj?4z8f2tjEoZ;wrn3z?i}7m@pB0$>NMxxB1on2>EP$Et%~W74Vp5YDH6}H*sYyQaq_mgX)GjSxQ{%IP zrU$|x=hL(~cfRB&tj~g+Nr8zjX2wNrOmtzK^z4jtlCR)`N88*f@~-qSm3jLM*ycF= zvoc7EVn4&tte`#ZRGvNU6uFb?=EHh>nqnY$MAyV@Zd+Qb+9|ZE3DmB3ima61oo`pG zmUZYyNX>C-J1hnKnkj)46Hmc3KsW_H9 z6=!X=lB5DwD}>{SWF#+14*~b<(c7(NigfL)3|Nve&wz!?T=hcP+X>5au#_MqYI@G9x%9-)#Ob(a5FCi)!Scj{|IX8Jf91o{{zO8LE#{ z*vv9)pNNKz61Pr6m#!|Up?{1q{G_yYLuFB|9d*SIgi{o@3L&Mll+Kh?)n&D`u&Pd{ zPF2PYdf8&8gz)e&RdneFftU zXFXIe-!ALrdFnWY^Gx(|Ms`c<<)4HmZt#@Q#9M^v*`Qbh zBrFxq5mpIng$=@|gl)n_!X?6P;Y#6vaJBGR;d8=G!e0x2Bit_BB|IQJDtupfNvu0A z91;Fe_$ft)BhZ5*Gpq{D5h@w6tCL^r__dK=1N<80*Aaf5&#w#kbt%72^Xs|%x|Uzp z^Xn#l-O8^Q^6Mr1dKtg&uiC=H#*IW7Zc7DB!U+?4BhxqjgetiagIi7>j zL0=97T%V(nGdAU}S-JBmcVXo&qTJ0_?iMI_OO?B6y0F=1Th`+J!phqO;C+VO42G*&Je$vQ}mDB<|HdIXXMvN!W^JiIL!Pi->2X^tSuW) zC8}{J*PSp+nzkZzzmny0k4)L$)X}zPyA%z)4P8$QLI}lB$qi|dp^frd0(Qu(^bc$~G?%S%*I90&066j50uiDFu* zNorHG71PyXgM>NMEEhjZtg-@W(T-+A0?I(>OQ{7mN`jnDumAjIQlYT|z>BQVUEGtdUL ztUt{2CANj}g)qprd<&1Oo z^M1Q{e|f$`aFFeCk37cQq&>`$ls{k}%a2w{8p>Dj0lvXWxWM*txCd{TlK{^Zz0eMy z;WT`QKISsQW06S)_e!^Xo#3~1GvgqAxCB#h9=7qh#9GGsJ-O&r$FA+DhxGHM%3~JC zbR}eSe0DHzA)B}8SCqS9C*$21S3?flB*!vOS~+&|zr@(Nv5E0AmV044pGU4!OpS{u zPaQWbqGENthC*@FFh+9NoFu=AU9Vo=z9H998QbB1AblP3^j+rOsImKiSHxqPncXKA zQca1(R;pu}>n%#WT*z9Jy{OPb_O8K2lJ8MJj~=DyKgpqB9J2RjQ#eUA>R9%PI*(Om zc27@KKXevn6d8yr)-XMYZFjIiNsc_<1){)SH&UgFxMlTL=NUs>X9*zaz*BN zg|Y9QdX9~(8MpUIzfZx0vGYOx7qA>6S?}vqoqO-QZ-*eSEqMyCu1n=_1b36j2zo7g=! z#OsTVs=iI-%H?Z>`H^+t=xXC z8*tUwZ!B@2b+=XA-38(v@c%SCV*C?$3{T-9B^sBxfV2c+Dwq%X)*GM=%vFIZx6o!| z*Q+bLDP56kEQQsoE{kN2UtvyvGB3~Ztts*HZ97F0a}i!!Gwtu-3!L-$qJ1z1Z($TJ z!zZ`~Lv$Rj!?bPlrJ#Jl|L@@$^uT~3*B#;cu9W)as;)2D=it4Qxu2}%mft6PU=aWS zc$_W5e@s$|0|0RIJdQIXGR{LJ&ht1W&pf^!U-R&ZqJ%F*eEjf%C=VhfGbBPZe;|5F zL}X@0Pc$PSG9qHkF~^#7%r(cFbFMk(oNMmZoNKN*=bCfQegFCH^GQfZDEPnkCzvO~ zCyo*giJRXQB_$@=zlXjLU=lGf#*111LH)xl7K1fm_kQI6xRgvu?o1AyOg^bP=|8!h zQkXK8a(Jrjl;uQray{#pLnk+aJL zCLu_O5DtEVep*T+rCHJrey0B%BQl6z(kAv*Y^yTzzQYy(zvXi`|5NVOLM*5aP z$jHpd%iv@{8QKh6h9kqBu}&tFJINd8QqBSA)aPuJM2d-mQo@w4zszJ#W=1kMezpA? z%WC)y_{~ktqUx!u)T7^7zfWZ2vJ0{YvIE)k*~dB5oQ@nbL^6qkL`DnSfJXHR!e4&C~0aSn$ zk_uIYwxYAbQZd3w=A>~1963kBF>uTrI|t=>IiX8MmpGR`aOqqP*T6M%?c7!Fzud3f z!%9r0q0(GwuS6@oAP&@nCeQ{tKsOi!qu_gR8Qi$6zihf}yZnup$iwkSylh?pkHr)4 z)HOn;{HQP1&Py&hX4qKB#;WyLYT~N0>&xr6>i6o88#E2m4RZ}28dep#ib6%X zLZFZltgtIkBn`sb|!4SGiZiSC1O8joQX&6RrtvLYmMfSJPC}YSSl8 zSQFJ8T!XJoUi;b%HqTtgURPh=YJppjmg$yvEn7DPH{ct)T9r1ajkeNSJ6g@H*4E+H zh1R9k)z(kCT-}Ur?q=#u)y*k=ie9B(xP`x^zxBe(J&vBqo=-hnJ$pTeCX>lx8ZtRdZd1?{F~v-Ky=A@8-uJ!By&Go0 z44NfYE?$F(DHl}Ul9{!%{-kd$tF0!xPXWjST|1!iH ziax+T&^(AgI2`UD-g>BhIQ8&oL^m?`i1f(xC^X6#op_w|7<}w<;2enK-4pT?!IKrV z2nA6Y+KhIhHq?pw(I^^6*U)YBU<^A(9HWgDje%pvG3VI)*nuTq?8GeOhTakxfv)OWZ}hxE0> zcW8=!ad?v6r{5f&qBp%+hwst6_sHRC`o`OEc!n0epB$d0Y3~n*=V-$FyTo|Rd;d7> z(Y!zDuulvATMk!f&JP@}(xU${I`XQM`252E++mL%`ad}A(~SSC!xj3#|I^{>7-xbO zEAKd5qovA+4%g}1%9jq`p*Jf3Iy^}y)dh#AXstSQ_#Qo|e(&(KC#pX?JVTEs{&IMh z7HjW1JV$qHA33~0ruMYNXzoGnM~6M&-yQbpVZGvTg_i1%9In#i`e)l|f0*fR&xo}~ z^NHBq+7sJp)(^yH6bUxuB2&4_&Qus!>p`SmioG;RMax=`RCf@`Y%ORs8XMMbAC_Y& zD@=K=vRtQ$V9qPnQOxgz&V>{vlc9=bb}G`7i)xlpLRaTTWhxXp5wT2$;>94-d8j)E zv2#(RSrisJRKZpng|@&@4)^q_5}WO|PEMD_fgD8QOeP{!nP^FwU`Z>)jk7cBi zo-zIP)zzXXf;Ixh94F|c@t8rkW4rRKhl+R7#E9ec#1t}9wuMM{RFb34L1Gh`o)X8q zEwSHMNl9*%gk>?(r5Q8>!NHs_D{&@uB-@b^g*H71v$J_9q*)iPj=YoUzR81JM?so( zSNC^Xw_C348>^n{xR2OTTBxieky5_0(#<28_s~c(h}-DoTxDrLl8}FFK}HZTk!IXc z<>$&6+NKnL2Sdt8Q?D@V2ZBya0HUB3xq3Bj+!Lx5E1j&k)R! z?*Z6~UIOhQN`hNZ>%#X4wk{38k?>uEUxTlW3;z)s#Ti9e8H@6qGw1lWHZd0G^}g5Y zyuSpjxhIL+Ag)9|iej$t6l*D++@Q^}Q=!Y=hAXm4*l4yU>z_brK`+p6#;g#}4!C9A zHfO=nB3KHn+Pnesq`WdxFhrywutqhCcchKWW;x|54qzYf9-VPc!ErLyUGhrF(pWXl zGGZPfL(a8YH4;|Hpu2Ti#Z~YXAX`~cJXTMDPExGg?4jWkUvQp*RcgG$5$x0*v#thg z$;yp(A!3ab>r?7E;GAXM29Xqd@1i?F`^c=glgr4hD-mq(*XN~)%9=dgmz7=3dpQ$U zDb4fRW{m{zl&zzjm>ulLA!7p##5Fx~#I$?g=W`Qq4H4dU*SoOq!@u>vd~QE)m$UvU zz6zdGN$2d}2I^pLR(L97_Cprz0Wv_iu(at^m`JpPE^n zWg$g2D{?4B6KN84&}44lVKjxN(sneBI%#{_fu_?8nn_)>Bh8|nXg1BEooN@^mDZtk zX+2t>HlPh@qYBR^v?)!X%_v9)nG~WhMW~&k6r(t8PFv8Hv=xoz05{S&T8@^d6=+3T ziB_gnXjN*WwP_R&ryJ=evS>72MXS+BTAi9{7~M*@(am%VJxq_#y>uU4L08g6)Ib-~ zTGT?Vv^8x@yU`vrhPI)*X*fMfyVK`%C>>5m(UEjCokQo+LDWVE(t{MB1f^*NeMcwJ z_jEh0L6^~SG@jO^lj#(Chz_GW=wiBr&ZZN&iJQ5FPN##pl`f@2sF63*M5q;;nfb-j)+=aguFz=zLDmKb3+O zcp^{Y4rcn27V>1CLPZ+jsk|NS$jeA90ZfxI|01%!_C#FQ&a{ zZ(c&ncquRAz32kooA%*-cwgR+_vZunKt6~M=0o^UK8$YQ!}$n4l8>TK=r2B+kD(*@ zSU!%A=M(ru`j+1)1( z?&M4PGQOOz;4A4fzKWjUtLZVC%h%8~d@Wzc*HeaXplkU?zKL(=g?godUX@`IkVY9UP0}na(kg8-LPp9c87*UEtc;_tWI0)$ z?vfQ`MOjH!rcY%RI*l%;GwBREkIs@+Wi?q{#>*PArmQ7v%Q~{ItS9Ts2C|`SBpb^n zvZ+jv%_Jy>m=cn(M5JA!5|g-WE?dZ!vXyKt+sL+(5KEF`iz6wSD3hc^Cd(9=D%;64 z>6Gnd2bnH2WTteDN@94SZ1(Q=F& zE62(4a)O*FC&|fjikvE^$?0;2oGE9?*>aAYE9c4ia)DeZ7snl|Tq#${ z)pCtoE7!^Oa)aC`H_6R%i`*)=$?bB7+$nd--ExoIEBDF$@_;-j56Q#wh&(Ef(GT<^ z{Unde6Y``yB~QyU@~k{3&&vz)qP!$8%PaD#ye6;98}g>SC2z|+@~*rm@5=}Bp?oAC z%O~=wd?ugE7xJZiML)~e@(uk$zsk4roqR7p$dB@q{4Br7ukt_nO@5a@Q+DL7zHc^|Z z32HMHRE9EDNQG5IwX3L#skqu)ZK1YQTdA$pHfmdyP?kz6TRAGFCaOuQLrqpw)Ks;d znx;C{_G$+;UCmH4RhQaP%~Crxmh-t_FlaZXC#3sIL8Gy!4$bCe*&g6=ja{s)(>=HdQGToia7PAAn0WG(|ZEtf=G3{-} zQV0^_@gyCUmRj7{^mtS)PS0j3*VkQdGv$}8Xq{x#NufF^QfH-Jvg@Qen~8XxWZ_Jv zv#y&hnOc&XGu?$!I+MxfOM&i8x)N_7>*h0%b*1L);5w>=Dp*!y76s4h)LINE={59KTn*&)dS0*Red{gx^uoeGsaROpn^oC-PoUs;9QjVB+6ukp z%0Voa`}@-6Qe#1f(NY-bOAqwA#)lPp3&p(Fw-K;|df2dIqizoBWqXLa+nzqU)Q$#< z+Vi6Jyy){hFrYIu;LFf}Z+%oxw%A`u&)hzAXG5t{q(DhqD)}^Z&J8P2_QmeN2PfKA zu8X~l5|(udhn1@>8YsJk99FI+!9h?d1nGE5K{A|FV6l#1aV>(T#omn)SnS>&ElX>Q zuonp|b(bKb2U79CGS^)5vRcrs@+q)!)Tat6e-zuTRtK(|qg7sT{#hzhB5j(rN3gJH4Ig+)iKT zcKSLO@i@qp4Lde+>L8c4hp36|>ElG$?Sbjq+v(cd>Ha`x=tRu$C1S>qel(o8(=fBz zursxRng4CrM4-#xu%LF@+q&vP?m}U@(6C)@!*=P0?eaA&Vo4!3$IAtB;5dO<+VNSw zhMhIou(Nc-&i+rsh6A(x4U1!$iUsEAhMiM)(1YPnunmZ3tCl=8fgxZR7y-5eqrez2 z4oms<4fh%LGuAQxVf(@R0YhN+1M`FV zP4`*$bJhv}QTyTh0Yl*SgN?mrC$@fH|7y+uo*N$&n`{fu9Jjq0?d~I(FJIHEASS&} zMKD|DwOLJ<@-&r{m*44mU>@^5jcc<0^50}WEKksX z3LLo+j@Z5-FVU9ux7eHL4YQft_Ec=JZ<^({$7bO&!6)iW^*RfHF>o#S$!)i~<~Ume z`R||JtWB=lu;rZ#m zyANygtJ`YmUNb(s^xi4!ebza%!ZpMVPw6(@bI9wwCcVOj<#W}HvtPeV+md9*-aKou zb^MER%R7!!7h5s7m#(nqMIhc->Bp>v?{$s4)=OP9`10t+`uBUe4$0q*7j2O*{2|C*FY_02D%^~i~(JRDNRdx$lA?)LDs`%0N zVFT|&MX5uF^;+7KIQS1vki2~EwYY5gi+x zEM6?q?)|pEd6)6oU)FurSK`}NIm_>iD>{7D`O4l^q1H^h!&AS@WTWx<3p{zpj|Ss?u`VI-_@2b|^lTw2JJ`zB9pT=j7zqtCv21mh}W$()x$6Ko|YAL(cyH|Ns9b z$wbC1xxyu(Z9$+iw>h0|^IwR}4AM+I#3#v<1nE+whD4pRM6gAT5LTMZQcRYUa_W_` z?b>ZuU3&OgACVA|wkTi4!MvzVXG3ppse2f_5WNdioKn=s=LW7{kmLKPg}m8$vimnx zzh#`0Nl5${V4bUU{T$ya3z+t^_-3kA3vumesRo^pOf5^P@}^pZ1oaQ}0SGzZ+2M0^ z@3+%$H#Os7a8nwVitXmS85M=i$E<||E8j4!|oT~kr0OdIt-iVbsV zhp>0)t8BnZK_&#$8({h~@zo}6iJu8&u$=Z2=wwe%x~4y1w)Dp&g{s|l)%PaNL;)2W){ZH)@(gL%G5APA($&OtG@TGr8Y%tKOzFHY-;~-7 zn#6G=jG$1U6iy;6 zxH+QuC8*reQ-6T&yZaNu5F|6;@%O^4AsTU-m^9YJr6pD&&Q@{UTUa|M zR~>UCA~Gt3FddA&Sh67i{p(4W)S%K-zS)uk*bZzH3D=gsr|OK&tPl}1w;kR`N%bAr(};}H!cI-a>|Yl_)P;FgA6H9*Y*cG9b9EH0Q*zpFq=DHG^3rtyyfHNUm8pSiff9 zhI$B1TPiXzpg)Cl-@|#?HshBYmXPSx&gBOP$edY6XsjdrdH8jGCqyaPp%KX?UFdi9 zV~GK@>w(|AN%`(M_rASM2|xl+Kp6O+soMVlXwSS4jJEay z5R_<(q3ML#aB|x1y2w}Rnxd4}x1Va4p``;yJ~c;$P!o1W{pz5vsRyJNzyL(GZ1!iH z{BdiDLQ*FYhU_Vv3Gsd)tCSRvd-c4+y^sW@kRb($kSK#h7|YMwG;8Nu-mKOkE9gBS zY_$R6`iko7{Xf#oe2ry|WLeT^c4f)gSk{7IL6#u`Cns>S90HUSMrQ+dcEi*i0ro&e zb=Ovb@_@=Kx(1=>j)#Bu*+y>fe$}Y94yUk+lb@vR%slq6F2=1E6c~yB|Eg7{yzlV? zHM5q><}e)|$hjb7v&^JDYtuM@3iC8MoS}fIPoIFH=GVK+7Zlsj74Igq7W@e`Uus#5Pnf$$hf-+7@3ArR8w$9`IQ1A+x&85P+C6;StT z|GLe$*&~}{58QJ}+BJrnAY9?7j+*$f9wxxF|IdHcR`YbCPqB#&BNmF=b>zdkjA^~S zF?O&0t1S*HC?i0k#NPW;zh9g!ujPI|>}~D|#()t>AoUp)n$N<&^XoO|>O0-CsGIu` zLKtVCY=arx{FT-dz(DcP6Y`!K?MB$2*veZ4;Y z&>4i(L2rjgc24AJ+k1N+*#ONYc@fr!xw8L^73d&XYhg-Y&kvjA%sTLtEFa!nD;qIE*;n1A6U5&ctNG zL`#%TsiW?q`A0L5k8Vyh1Kj(mj8kw8fm=|Unwb;vKUjo;7`tr$$p@#KWr1odthRur zO}6-O7ag_VQKy`D)#`b89v5w4bISwIyz$8vbUFV+OFygys566*IUhXa5j=l2{I)X) zWL-sGs0%+S5;BQ4MKhJA0E-3E39v~(_E^Uelh$YOr2ir1b6|h~5 zELXCJq6B>MWitZ#EtJtZ8e9CFv4^6^_hYCCHLn2xrceq%z!D-Op2A+>)NR8JBMke( z#EAjLh_n222pUU>;OmUXa(Z(s7dH$T8CgvLd0Q7up~|GdLgX}n=so5=h0p?2tRy+N zg+-D*C!BJ|IXw7Wac#Fa$#j;IrDanV ze>;2Sumb+aIV+cy*ExoF9<^)%2(c{H@9o2Q!5dRtf=hJdJUKwzXiDO-^4aXl^ZPyn&C#SEU&{tSKF z(Ve-scGeBA*rTPCSedf_paizEs=lUHB6y+=Mmd+LoRTuL-|$80Di0C4V?QQL&kuk=Q_ockuIZZ-EN=E$z*4f>#cIZ7{~jT|XET}os*Q`9Yn zZk6;<-DX%5&;Lv&0pB<~XKrC>WzFUB1#zdse@S zqtO{m7Mo*YT3~f3Ads;N>!RLZG?^_{o894bxjj6+ynTHA{I?f(!T|%UBu>2k_^TBg z0Fn&_93Y8szymTg6gD&|7O$*WX5vO=QIBp|$5OEHztC2G+`4_|?!Ehm$4kYNShU&Y z(Qmv$JeVXF_AcG^xXya&Z>Z7oXyACcM$2?>X2LixiVNbnD2YqbxGak+^0*4bHAPs# z#6b#&B$qN$l*=c%a4 zDf39BgSeK4a0KEOg`uw_jwN;4L`|K5*y<2CGSNxg(r7>(S-f@#8l|vc@&%`C7ZZH_ z7f5dV0kH3@hXf4ZpCLRpcu)g*egJwBkQFn)R0KRX=E&=+@I(Z14cxGY`}u```0Ryb<)o`s_Was+ea5YO%UQft+&4BnQgwYI+KzK5-fyVs4(FoL{f+nEk>+3 z@e(9Tk}O54H0k~^Q69Y>>jD&)14mArxs2?Q;CIm;JbCfv!q1PCNCo`%{q z5EskO(7Xk25aftBh>OGo5{wiX5p=9uo$ErU%u2NiR4FWH z#fp@yGjW8u={sWyg9R$Au5~L0VZa3zE2RZKjj)X|$|BWj%!!3hBlX(8O{%bSHI%$= zx*KG$0S207VI%vsNsNXW&SH;pm4+BN@I*S-KM6uxMgi@Z!8v>;*dEpm*(M8!XqAeCn~70 z>K}i)Iuu^CV&x4FKmPQa;w~QJEq>wyC98jI#eYN=B8kkwMWKmqhUhY21O{({uH09C*$D z|F@oCkdD-$+^#V9IQ;(&(1)LY1AMby@a0+od^xXVm-IrOCB?vi{^Vxc1z-H?@BZq~ z{`%lvTg!cK1z+8NI{)bcCmyllUJ7O1xYhV@{%P)?%k&lhu;roeA@q>=kZ||(y&Lyd zvNix10$lM|yb1~s+tXA6DANm0UhKbCB3qpzL1N);*v}#V5b8i@(fMzW#6M zQ$H?w>g%%;Py4DNL~EZ z@)fKj6e%^rB3%_P7s?n$iuQFjGgGK?z1&M{Axmoh&x5(7BGA*%?xDX69)>cv#L3*=xV!4mjq4Cq8;rR_Aj>#11dD7_4ihL=4@c z+$T&@FMi?Wrs7fG5#!rO(^1oVWNJ_K^~~fR>+iXNUYOfw(|c{W_vZD*jNVvUvnAOr z@3&>OSP2;bf@gC!QHP5KyIei)J9xzHa98e*%%2v&nLX0@BiFAczvuWzfwzjh^GTU6%6;Z@mio-9 zDaN!OI-Pk-e1CM5Qtla1{reuXtFyc<%D$_bVZPk?_4RL?G`Ml|maPjGZl61Q)g~f| zoRLYP(&!8(iw$tNJbsn{6pEm1u|z79E0l1FR3=v_Rceh^7ZV#7pOA=5N=`wgqSG)~ zTsod$&gs-!TH4w>yd_<(f|}MPzR89u#;K+m_Lzj6T4^Ow{O;k@%_38$4clJ!P4msu&&dOh0wzPbC<%+76)oU9zG>3fO8P*pD_l==V zbg;<|SLRIUoQrbj<6jqIsU3K5QS4+3eTZtGVyUk!^Q{$rw9?Ovnp*CA zn{z^&azG~v;#47AEe>l5{8u9XD~W5xb0Gzniso`LTq%}|Me(ezy{LQd>(|Ey^r?Y; zZcrcUpPkV)F{)<9)WTQ;3^c?bgAJuquF3``8<`nxQa8`$^}L(+^Km}RAOHGkKF?Rb z_)WXtZ5F|d`Q=ZW{87J+DrPkDOkZ)ro;YHSP$}MIVu>eCMcQedgo%H3%6?S}y?4g-q7Z3Bn` zd+#P8YHL2e06SCCcJa-WynwAL@$IG|wQ09Z`OG0qY=<|pYBTYh8E&_-0QEPiyr4ix zA3!N|6raS@h^-0M)tIFC<$#+h40IC^d2YTXf48AMYPV8ZfWk@LUEkl`U0)rp70?d@ zXK8X;Z*>x1Bl{`LKM$dZL#&J z|2)RUO{9&Y40cg(Gugw9j?q311KDs8zdT!Ymx{H-%eM9)TlJwi?%lMIfNB`7G>edG zKz{MOhY;Ieza=3~b$Cow^kXsC@eZMQpRRPN&eh=<>LwTQ>fyT;Yo_m*dF2!kIJXfk zZl8h`Qg_nIf=Lex8zpp`#0LRYXeDS1(YCVT7>QqrU7=|lV~*w^3mq1|j>_9p+iu*S z4IgV`wii;zLCYw;Z(Nb}N&GigCxEbg<1;pi+fm!wTanr$WtY=vl89Csdd*PPudJLGtKCgWB zYqCx_5)SYT8S~P)M7rI7i03gJt*)ZSQ_Ibzi`b0dm_!l>`DqcYTy9hY%l%2Y1QT+Nzo?RiwKGcZhKYnp#Q6PB7M`D{|0Tn%xWnk z!-D_=+45g6$h-hCpBzU@Y5@_N8?E+eCbR&s7zWriZZA~U#6kb#R7LeVBev_+0fIj_W^M5 z0#rnUg%@lxfrOxh?8+gre^$yqOk6DOlAy1`Q_FBv)IJtuK9y%b(mqqb!yXlF3lVbC zIEJZ^ds5*AA!L5egBFJ3wgNmf?Z!-k6|iG4@g7 zivl8jodAiy*})~2of#U)U^h^dkCA*Adma!aIgHHtNFpQ~Y^rIYhj9#U3hpN_`5p=b z`GkbOk&FD8LC0Nco@vs*?shZDiL!ugP(oRH1&^5xxJkyso9XiI<%iYm29g%HYYRKH zvuJ1|P1?11FL*!|i@nq65zZt-W&R2qy(22SATkWbWY?Fj|8D)CKo&ByeTe9y_u*majrwqS%`?e?C@80I;Hjc>Mv04J z`9e~7U7W0x@EauH4i#!Ae#NEwYh{lsAgX9{EXJm;os~iqNBru7b@cAYd(G4`r%&5d z$8yu#)4AL!o8Nl_OygnwiQ%rnH~GpP>={S?*TPW$M3gZd%kj^Af_V)aW>;+KyVxde zUR9USmgT71AEVLdQ(*SrrNVRffQ24})-jy+kbHMtHg;^e79|9la~h4!&OahEJ9lh^ z?V!3QSI!!{H}*HmZ*}>JA_Y3M75G^~mzbpVW3~;2e^7aQtLT4${9Z=-ODFz1{K?bq zloV}%phB!aQ$fC1&MN(kHPn8SKsj#^{(li-yhHw>2RWOseSn6eKdIDs%UMre;I}Hs*rr zU?n9oa_s1;bsD{WA5Jl(00Y#zb4MGd*ygqT!`afd+4|jwOyDq}Ps(xn^kd54Igq#B zp>~LwNC%zO_SZQ;Ow6S*^_5zDWp1xOx3!FBOzBc|qT@}(%Ce;U#~9a25kD0ks>fk) zEtJbw{Rj+B%aj#}Gt5e~K&S_EHKt}YsjX^_Jc)O~_pbfkxtdl7$7-#kFADikbi*q% zBki^mJBIp~r-z!o*{}~CCr@D-Q>Y9dhO((;bewAOveJpsp@pw$-Nr#z-DIx>z7~EV zI23{eaJM?rjDLYU{6H1g`wSp^@ID!tF<~jC`k?*MQSy`zK{0yj4 zn(cRsH{5ArJJ(KtVXlM(jI^I?m2nWqtK~-{i~|m!tIT64f`SZdQ=@FzXJpG>11%a8mRInnvBy z6w<7Z%HM`0iQ!zN1M_U6q!(ZNpG-RwvK|a zScB{ezuRHE9D`h@Rbiq3i&(xDm0WsjDBU9UGW(SDe*uCcQbRv@h?uPZhh8*33sDMO zE6Ns|d=*5K)V0w-gue=fW3|X+bCh$HKZIaj$U=bC)pOxNMdbumJo!ygt#6<@R337t z)u`)<&Se(~{b3=UucM7FK=}f7K@kQEa-pN8I9q~|SY9TAIw3s`C4x@b%nu7$OFI2X zM4aq=qlu`sf2l*wQt^wzQn#K;@B1;&t?!YUFA_0m|ET-Ec<5a->iYk)_Z@)K@>gX7 zLzeX|<4fi}9nv(gb%jD(_g1Pu>_{1Qp`IcKZ|UZHs7ld5m338N!4zy6U%M05a@N$0 z&qTClZH+JnNLsCEXT5KjO$9RkHc4oJd6XxnKq;5azqTS{UDcMqxqY~mEPUL`LIi;VA?7Gd<^e&;{$kS9euKCjrr9KR43;gO| z5U#XMKmwlbT-&yN)g{yTF+v37vO^p2q!ww;*12!VD@EYg2izdl#PvxOpgCXtERZzm(#L%p63!5pQoAU|#=?t`;Xs7*`^lfY-2A$6hS|XtN_m{EGx}jhq zwt3?Pc)`BNb99-GdW*RVW^~rdH&jWaVlHbh1cQ1a);VsY%e^pN>cCcamYt*Ad|bN1p@=Z_E91OtqwwQUZk&@GFVq}y15K{f~4Tew2PJ?0k= z7eOZT#*)X9#UMyy`IZsHJK}!mpy#SpnYrK@+vgHi@TWFl2@fw791?e=y>fWk03Oo{DmYz~F zKXklq&iCLEJcu-HGSH^tMK-4yeeoi7DJLldBDLq^;-HPTDi8ZZF<#}wLg~JnzHcLD z@~x3?e!~z!$g*Xix8otPAu6Ec*vy~O7-rLQVj#{W8UzQ`b8Ke5Q{5i4z|B1$)Kr8P z?V{uF4)W$kCMf)vS&deCJRhl^d~yzqZx*Kw-35~2B2#9p;;oYOk+(}FWx7A=LMOuk z+oxrp=u_rw>Y`ohNl?g|yOKWE`m`0+?;lGevtk1E$dQe)5}Puvf=A&{E_vFoenz@s zqip+X%L)eRnXKI2z--wo8Z>hX_NoWa2`ryaW~YXI?K|d%MT8HIOgJp+hxIOmh&pPE z`K%s^ZSpIYtK~Mi^ZqPG6gpNr3OV(W6GjUV?&)84j1@3t`$2R#LH+yVma*Yo-x!Qd z_k#CE_PK*l=y&^{Qa(Acs@7U`tx=j3MaaCjj0&bJHBh&B9r>tdF;81*(IX9NMJiaw z79r1TiK8|?&BB-+j^oUrvK`bZA8rdwX$MOvNXxZN84|vgx79D1FBs_Ef@aAUm~n+O zNUXt1%QY`!M|LUAyLW~EAo$1DL_**__Ju|R=LZ)diCa5Yg!OeP98ic*BKYq>XA?&% zg`&AA%C)it4O5#%F^c5K`#2!WZN|N_bV+gz#Ihw+8lsftlOEJ5Zoo6xi}Ny71c%d-9A83-7~(%NCVNH43a0$x#`dF^bVBYcL1pwJjm!*rntO_D)e;9j}Q z>p*n|GK5`I0+PX|y9pWWL9Gnr7-W?Te8Xx_fuQ5f*&ql9GOvUR3#ftz4UD#`wxl(k zYGxD5?Q^!GuADV2E4tcp;7e!9A~x0I;{4_E%b*#x!d@2uGmz3gH{eVV_?Uij7`rJF zQjp%`EiqmFMT-@zt|r?`S1uaW@f7*XPA33%M>&n@4U8T`I^zh~2|9#=?+RS;y1>n7 ziW_iUK9kt&+4(Vo)Mx|1kZcnL3G67)g$=Hiv!UTlMH(D?%UqpZmbP3fnDLq)5h!EC zR3yu&KvXchzU5|cS%_y=v)rUrMhitVigYwEeQ#z^5Z8F1G##_o`J=>sBLy+tXU3Ih z&yeZ|IyI)2g}$d|@l}N4J8-O^_>=Q$`O#V1yr;36KD8Ui6A7WlP79EVRPA_q`BF8K z9MQt^!vs_mQ>x6mf;Ee5D`8rrof;cKo+M#P_Oxw6&#RxpuK%wIFBX%bNRzPG<$UDB zo?J1~*Q8Z5&@IomqE;@H!D3#GY&J-400b5a3qVM*DPj^5IlUqWuB9xtb4XMrjzC*z zGdU5j95}VRI2;~+nF!Yfmr~T3Ouk+u#L6q{0!e!UF(IzmLt8}tdcz~`60tDoK?JT>VV1N8gRsXzH*pzEjKF{PTCWo3Hh4mvJeCbYf#Q-#Wdy&A7vYU`y z$~LX@v<=loHd#8@SeC@oZO_>MKREuBieIH@E6Q{ey(=#6V^CF7bTW5F)eQG6`SDju zWs%S+N~fH=isGJic=3?0I;(|cC~UpQ0pu~l%`@sUxQ$1)j`R*6N2uG#*iEah5qKx9 zQEqUD({e)H*?g*!z-F?zt*Q!I*c;M?Y4wiGCr+!jVJ~~K!U8XPInpF`O7E$aAFEz1 zah9XX`D^Yf;_qWnS5wMKO+@5v$ zUgmZC*EG?QP0xEhxYF8{KBEo>Nxa}GFU05xPwUb|j6eU`==PNm-_&}`E?oFV>In@X zJufVEEekkB67>7vnDNXDHOhirF67UTwR`?1$(K^m5-v=ad6q(BQ8A%hQ^WENaxs(E z4!B`%)rP&lA*q`Q6j;uToaL#ZTI9y6fYq&qvl(xg)8t}_0_DK3S^d5=Hcgw4s|!3K z^_rguX;po<_-i?k;RYM@4m`!|^B@Qb%T#6MW4Lds z`%=6&{--k=NsgmlXoCKORqU{ycK95*q+96$X-NN^mD7XQQ{j}G4)(*WM_z0--M*S#?qUi zs>pF9V#ZDB=vSU|0+;dT<~}un1o?Q;m(Q(x&#Mxj zT29t$y;C{}TDG@h%X|A}8+!%r@or(<_L{#V@2^Oi{benxeLUJ~hl}kax1fQ*8h7Om z65$Uzz@MqehWKeiD-Q>qEYbBW`Q8PFC<+WaSRw)_P(7#~Ec##x3#`VH_;T*JT)j!M zX|Y^Q3tP0%L)nUo$jzdc6(L zYtt);Iq||t5}8^esmZk0LLSJFUsh-VOaK;-o4les#XHHOW>He;a{B6h7FdCqBPyw# z+23C~vqWU(6u_(?9Gpu+OL}WjS5URR613S~;?O$mB~xpjf&kWak+eI-5!kW`ma4sA4v1dgh3e+U=#&>Eo=92Q_>bqrPn- zp+%6))O+`|VroIDEuBrHbx%PS{HZ!iW_SBMCQy;9)X7ok+8%T*SZkP#s)&hWJ*C5%s-K09oo7jFQ208C~XI}eL@DI4pbz+<5{-ysw%{^pA z;W4nx#yZ+}l>K(!5%zA47|w%W$Nr?!J~{%fWiud#+0enOm*7bR1kh^^z@~aQnA@^Z zwbbyMj%?!Vgle`i&+C@D$9m)M{ZZ7wKx;njZVGtuPxqYwYIxAuLMGj^X=5nbQbw$8 z+r@r2Uan=bn(9h(!>?b7pUC7tD@}2=3`!ko$|3ehVoc(ktb_lt|69vD#J4A>hwjO6 zusY-@Ot|4HeEn7MC`^fkakN*1E3e8cuLf(b0uGp|>|#*)MN^OV6#Ur#s^#ObW#h8r zU(N)o*4%}yyxaKz21d;Np)S+OZda`T;jL)rZg{>sD{r}(JP-{{yb5ty)U5`i(p_GvGBxu8{FF`3jl*Q1S*n^cN?C z^Bme3T(1cKSY7>*DnlZP$4E=I<@jSgt{J_aUe^o{)<0+KQqnOZQifENjUJ!ewR`)H zu3eLlAK`cl2RaKo zvwyT_Zx2<0L=t!K)71(|QNfH}*F^j5BI4o+g#_Db)-98u zyEuim8NHr~wxPm8xF>pggG5Ei*vBGomCJ=|QFI)rI3|%OxGb(jAvxywWyt_#A_@8_ z-g>xTeeAYp#o@WeSF#eCy$cR!xg8#Tl&3fZ^c@9g{%bI{i5kw-#T@@Db6-gjS8FfO zYh?OtWtb^Kpv2D$$9j6)RgOA-^fz~$bhX(jmuG9l>M&!3Rz(;I#}!YgEYl56gQmUW zrf{?8k8a2g&Hs3W!#VOXG(UR-v@{pd#L!uBa=8`9&>@;~m#Sg6Mx`evz)w!dzBQ_> z5{>dX6N2kUmGz@R+zDLKXhG%9pvs+{eAOu)>^x8Yd+B|rYLo~&LX_XPcSfbWU##**H>`Boj}0wTiCr0Wjn&S> zx3iG@Q(@qVtqJFMyQZDty5i1f|Mw>|yEH3KrZsBSMrE>mb+SR1%mR$5iWDtfZ|M4X z@5*#r%yhj)4Msv{sYI*;3VuAGjh4n_()37onluxy(NNCAf9V_?eFT@_47+lQ_jM?&j+CdLhgDsLBKGkI?-l; zlV;s&!*Da@2(zxedd2)eGTZaC2%XhoPF>=+`M<2Bp87mmxWG;T<*6RfLYt{=F3OKG zZz{}D%jlY0i1+pUS!@kkGx_K~i^m{E%8K+>Ymr`t zlzI%gUY$iQj1^ktTCEju@R2bw`kaL}Q`hXyk1{WI;<_Ehis&Y))kM#p zR&bQ^mA;VvmBQpm&47s`owhnwlq5om5_pe!$V6T~FH*FU`S_#IXQry!pysx5QS z)6`Z?+F&=ht`Jsk)F2?eTrRVy!mMizH5z0>F=p$*<}DcPmTUg4(hnDoZ~?9AILII? zBF&2T))BA(9q69gG6y3~MdD2UVNWaeunS|k!#7|D`7L_)YG}7HUr>YXl7f813x0Zg z-VLqRyZJ5H!GzvDPV?`fzF_*zjGOe#ig&LM6e4#B^8}Tfy5(hJch-X#ijkz#)ATwc zi4yZ5t2^=KmegF-6U;&S>7t~P!%wTgVH9)59f6MW^YS^mpOQOsZZ1 zs!37ap+s9Bhno`4s-(zK?p7-I+Y(d4wzU?ctVxj4Xf&oqiNSE~DR6%$SAZ0-f}Bw$ z%_MR+Qn_17^zJPQ$JqB@c_H^*!szt5kWQbQYg7o**JaKU|6ygY{vi^d$cQJ{))V`l z4$bQ`AZ-RWogw_HC|J}Sg6M!&5n)ag{8^6w#R!$`=TJC{$FW0wKwm=ayh@;wouaNk z;=KUEVUjYA!m_143vT^Dm8iHZ`r|ZdoGHMk`7PUq`wnMivvJn%EI8JU93Hj` z%i~gG#C7ys2ff>5r1#`H>gZzkSv^J*y}Q&czB@y%Ar=b&fxruXD#+jHS3#%3P++kb z3l#L}KiM|9(6kT^l^a6C_Fm+@y3|Q6AjwhDC^?yol1KY|(%qZn!6?hiFtM1jawX>P z!!IRm)QozLN~nor8q=v-{30wm7>%XKzIxVf{KuXCDBb5l46!U|(lj z2Yc9ep_ZJ)iH(45v@!Ypw9+|NGUYHuJ?-~R>7JW7HT1bx@s$~(1;edRSWeBed{W9gKi;3uXDu8mrW z3m=e4b(C{}{tTW-IV)MxtI`!Y9L6FI-=vOH!y}a%EknI|?kh;yYG$k~TTY^6@EQCJ zHPlnyx>C)p^ZMK>dB!hE#fm<$AJ$gd?;Vl>&&-> z7t>tj4)e|I=qR7*3aesN$^vW>=FQ(nvr}&q z?jC7AVtxJexOT>C(l4T#`LQ{YV!(PfNl#~D3>$1{j=oBi@bH0!A=Kv?E%YSqzH7i) zI)upP6lB8Yf@SD12dqXS+n4R&z9H-vDDfi^IA?>q7*;{A(85aiX(tcpsRMD>wJO5Y zNL*oquPm^FzHpLQ1IfboL~7tBscXJm4=KZ@rfT6i+I6|?{HunYZ5G}lx9#)Qxgh!tqe|dmJ_SgXVfAHlmyb3e< zWmMp{fb&b`VdabfPGCqxiNXdytt#KQ;7a*WmRyWKD4gUwlyFgRr>ZgV8dkOFWv7`X zw#`bG>s1X^5b5C~D?`-xq#oO!th+N_P*1Qj$xnGD zop$$2JlYRszImn7?^yO)WR_Z~(buW|2YM@ipokeeIstuna_Ts>L+GP#IVIo!VS96Vh|%PJ^qtf|9&yC6$PPlqpCMPDW7%hh!CTmAqZ z^*`iH>Q8b)|D!Y}gG?+p8D^|wZ)YFc2HA3fTpq+z{J~W|%X^4``St3|ubQBC06&(Q+ zvE)&5r@^qvoIZ4&zk%?ooLIhoCja_Sy7z0v-2gq>ymjQz#63N+KgU)wcr){Pv>AztwGXW=j7_gL=3geW9%5x;R(4#=VdrE(FV+zI+g&0!I36$# zI%lVp$416fq~!5N#Fo$gyLAzJ=dPWM&1b*xDfkOG2a|AW#iod)z47roXFgc90Y~wX z(uJIZU2L-R$jT-{Vd#fc2>*}7wU{! zvrCh&R%kORi4lwF#g(n=p})}#DeVyq_(0W~8r%3miC)md&LH1g+d10?Ph)E!6zqr! zT7hcW^B^T(DwETA2>L?d84j46mv)noFVNi(M>Ey^ZE?vo{s(aJG3a6paR~)~bH@+E zUlWmg;}IL|ucK71u=1H5Q#xk1_=kI{X7=}Uj7MPPfbdNA#j(OcBc}z$te=3of|^jz zL>bIoz~kX9KB^@hqAklnzAcQdq^~rBMkQTYn+gAj*n9FvuQDn-hgqE3s#IWnpu&UZ7(oN->HD382GAhSKU19NQx-Xx7Y;|ABY$M@}$Nkq`kK@<~A|<9wD&g7V zt`6()v8e;d^>`QNyyV(tL*{+keLqAn^2Lp;y>z8+NH+gkCeEWZ8nft!y@o<9 zZrW~Q(i2;@W9U3biL+$%(&_wnjPJpp|34w0uVVoG+`P033QfUHga!Re*Bgfh`i(R7 zOqt*vELF*2sP&C){Qpyn1cj`OKS$SAPUQT0KyZa{bLH{nUlS8g$0IiQevVQq;AMb| zGw(*afX+MrWfL&^g8S=S=+z4tr!IC(sl{WTGBTeqcpy-X_juc;e`ZdAZfq3a$i6-b0S6*C1DlZtfLDast<#oHpQRYd*H1)vCq>Q+ zP`HgUlsp7(`xY{X?=RKTnCE0VIeRB}?(8tfOy0YT<8kHpaq}j4+P3c^IkOfL$(TQEvQ<~8Rzds`3eJF01Ksd}3p#mK2N zMQM(!1(E(ZrOurD+77LT%<=P4wiLDvRS&uEsexhR_s)^LjQ>?(lIkd7EW(nsq`JWR ziPZ3te1>UrmS=l%pELWM;d?R(^ev6LRCqL#wRY?%%VN`CC056uL+a5~GGiH~oF4aX zs;IJw6SW{9-|cE4D%KX#dnI15ok?ETKzCvHxLe*z-jzN&iYslfYANeLzyzMQ@``|i zeSfR;8As2Gc07aE`88LS`|7GZl04d<^GDZ{swmNmuBCT>F{( zKhGgFy!z}bm6VJ@3y&9hs|pJaeAKSFetd|WvpaIh@v=FF3&;v`#K{#mMt)?7W00o@ z>?5$8h}Hc7+lZ4{(&<`6bI#Jn?T)**iBplNM-y6RDbB-PbKwdo%TX9*D0FQN$oA{M z3ckdR*1+MV-}WV3>(9B`&;R#xPw=R^N9P#bJ_b$WPJ{G_3qSD_X26FR} z)#te$&x_h?SUu_6#`W#(jqAUiWLUj7Y99Nnt?LR-7H?%0wwOBeoaWA!LRP#LC%e*R zJL~DVR?fz;eQSMe~0xBF`kyIOK3`~^S zg`lr}XflK;0TosPPFLV56crYDbnk!gmAAs)Nwf+vK)wlU2c=w@$)fr6lFrJeASv1O z(F=RtWZoer67OX0-CsWwR>$UygWx>y6Lq9zooKw^7;o0DS-fdS*IsS|LknYcUB7t| zZ)$aKK`2Ot?IT z``1~VUOIu^%UN-^E*l13Y+O6f<~8lB?$%rb;oz)})Ev9!`Qz~=+I+Y4EcSh%&F;(L zs~IUbfg+g!5%}9@*XbYM;w9TfS|BUe?z6?d&zllXK0KUI;-{_smj~xPsq>d76n1PP zXI`2UUtu=KSIoII7uB*QQpvG$B9&WOP$?(>2$`y3fzk;hil1Tsm^50w!>oS*2d~X@ ztUZ4V$uGLLoHI)9so`(e*3Uu-$kws%J<9f6v1(e7)%5`EB z!*=C$1>aCnTVm50bGHP_g6&o_1EXJWOD%GaLMKBP!)5*N$HB2vSoiP}&)JYIS(tXG zAON0T`>^qak}E+5-=7jYM+*&~8hpC4JaSP|JwfM)abxgYSvy{dtPCIVeP1{R*$YS4 zjzQBX(;)q9PA$3WMXGGX&g%=HO`Yrzzv^uL&~P@+m>(&7?V<_wD4$4(DtGvSD<|Yy zes;M~gz}YkLw+8ACGJ3+iliis`*lEx^B3MG3`Oi4gQnr8@hq}l7W9nbU`LHnjPCN* zu8Yt`AXYU&8pm{e=Yz%|JAYI-ZPm1Y5o6C>2@l6=GzSX7c&*!lvBBNk=NWT*Z7JVf z;^vsFE5p0~sY{n|vfp;Hy_EJr;h1oOZxqTH@D@v4I8B*8ab`=m=EPt`wXV%rl z&djvPFypC%8zSABnVLxVhQZW7UFE5*(y|{9&mh2EFngJ$z+2^dF$VFLP4K@TmbB=LF;b9X) zg!W%MDZIIPfm3<)@jA`B*+=^HaKqz3Zd({Gh~uGSyzVEj#iy^bxRcRI9eE_~zjVFq zXLx{Zf9ZCRofUsBqW%PR0%PG$ku@EU%P$J$;?mWgD`daj=1k%V; zoVNZWJfzs^V&prVmnv|}_AX&8{#QmON)2^h4NQ_9Hu%O_yjh!^&SN&0nmUmKzGiq- zy-09bJ`Iws8p)W%mH+!A^T#r%tdd##oA1Exbgi};h*b^0VNeWF-^-w}>}5j0ju@g@ zUFGc^HH|st=K1rcYf=UwP3Q5*kyL1(sd5W|AFUkXEo>7B+cmjbp*EL0Bg-(Bcf8i$ zRG0a0{6jT?rIN3|o{qDowkUX5Q2%d?xGg-!jpNOi1xK!E)TblWYuIb}tFqH-+NJ7C zrK(;ixnd;pN_Q-uPJm6@2X}D)6gPif`(`b)P%_vztu1U_d_$T-neI-D=W-M;;;TYe zbsT(~f#YbCxe5hdJdV;6;nd^zXFb3&YEnJ)|WggMg{26?4+OM(Jx0t?%kqm^7@yFO?ad$+;A zK$)CMIxNu$pU0jQb5Pb@h_U^P1d{@N4~%9=CC5l3^`(hq?;~V0cNTZ^-|(89Bv*cU z8J$!|xj3+QaJr4(q2JNFJ*i|AYRi1GI7UJ@Rj!kl6uDW{Ila3mK}!TO4Yu+!Fcz{Y^_kjIh&O9b~%Z902d^4vWNqP z+MM&`eILdDLj)*?s)j8cUs=2Qn?qqEu&0Z$SyG6XrIyP$`;$U03>0k?AA%pEp@9d! z$nWmu(dA;bT9Jv;u3$7sq?x9?FW8%3@O-%}tCfwD0yBa$0!gH|)UR*zUTPN^_L(wT zLdXkr*;KhoBV>^7XTg){X*4;Ioh=7wY4l{c<_d`sEvLLeX+npfCsQbsaVkxjd+7gQ z!?FL+CzV=v@-kDjD;Sd`AEM23>v<)KM5UfKnG{G4B_Gp7d+}Q@HL$Dm4^I7iL#YE& z&qKW_A4)Po;Ho5HbpHD3H}mOc(m?2I=9llf`e&=0E(OVx9h++ z9ix^E-TD*6zw?MbR@;Z98?JOhBRWW6&L9!VG-MDm z70cWFKDeI3Ii7*7I0sAU#M7m`8q4k-)D;i1f8`K3YI@Bv*x=DxnvZ4wBjiRQmeiRR zh_Mn<{I|77j9wyb2%Q2SSE1P(*c@55jp+lWGKiQcQ(bP59@d#|Ne>>uu{NGMdQtGdlsUdZ1Es|9L(Fn`G7O+SAg`TGBKujdQ`<_iTvVLrqeuP3sqOyAfvdlv>@ zczZc8Ljm{KDND;@%1V_w1Y9u#NI|9=8Z6uhrS2}oMB8*qe+67GyKvI91AOn$_-ZuT zSY!ln_t%A%@oKhjBfs7SB9M)Hw{sr*0R(EP3l|bl8==+$;D_ZvprZ9zDW;>68nYKw zZM1Z^?eH8U^QH|cv@L&*VbPYZ*TY`fJ>yLukFroUw;`QH*QYtrCT0*Ph*&q&fg>$z zjF*#-9IhGn%!52%_6Rv%-nf;B>lmsdmhTD^ZF1_<=qy9J8)ai1g~mG%T6SUgq_K)~ zak$)KRvM70L7LGDDh(|+`^hr*wAt00gCx73C8tr*iX7m;RQ1OYXB6m{!2`lB&Y){j zU66vSfsL%I9zK6sKJy7*a@Zv;vE`7 zG5{bg!nMGF1VOt)Y)CM%1!*NhjWvVIv5`b7OtFxeU~II%+bj5Y{6ATnr14!TSNErL`W(64pJ7iaGve)|(^fwRR2lO$m_{;WE9i7qgyj zO@(7`ST~AbBSh5ZFUU>>`D1&s5)pw=CRh2Zon+!86V9_1$zJixJCI@eHq0+L`6npu zffj-JUHiWo>89vKI3D zOsbQ)wf-WOQaY9q6O%DUDaBr_-J<{F{v139EbL^ zv05n;Z0t#E(;zB4Yp;gS2-k}m@+ z-(>cgkn^@HwK66;!;QhpnkMsPk%>aIjcjS+U{@t85#e7AT75N+lo(}Wv|zs+d=O#E zQbdPuB^JBV*MZnNg`0qBWqZ~p;r+F9qLUT_`@P+q+L+oAgAlke2x(IXy*ZY&2gwrF zf9yv`_tQ_@vUcp=&~t3hy3AXjKDYc$ze`at*E)xP?d0_^sHk$ zynDEZ&f#RK-mh&1#v_OCv~vI6Z;#R8{o8*} z%eM!;=gR!n6=by^3V`(%!dr#y7Wq}JKPY%0VmG0XbxpH9krfcvt=18Y9*9^^Sul7C zr&(P0*FOj!NV-rJS69cMR}g1+S=OSF(PhYJBzo-whwT*S@43Dqe$&XUx`ufCl+uTf z3#doliNZyh+){hxJ6qz~N`cb0I5Huw%4)P$#ZA*$-Ej}&+}3Fe;)>!L8hD$&v>u=8 zwCb%#5Z5N`Svs2H0l~R8E#gvElFrI1^($prAz{FvZpk*-$mXRZNZM_VzhMQ_`K&(zp* z702>rjiX-Y)H!PkHq{h#T@2R-KM>&Z8)%>azxLt(AJlyK0|ONqe%Sh23Z6dH-*N}S z0sA-Pf5cd%u#DQP`;LzgL(Ng7fxOyYd=wtX8?iPs-8gzX-P6qS^{GC?MPbr}f3+O2 z4{0D>VzpQI;2CPo5vYN5iB11CjzR0_joz;94ZcOAPxTot%9o&rA}ob$*cTz*3x$CL zHte&6Fy7ty=wsPk-_NHJ*ymWp zZV>6E|NoPNC&A)%-CS|NelJg1k@^?!1{=bl^@3-}de_<)>g(wJV^QGskY_Oxe^3DM z>u(Z&0}RF6R|5X}-;$CNcXV!H47&a36}mslyis$&KBT1{x0n~}EK6Aop=XwRIa@2EK(U+^kso+NHDs}t3T~=)v_@;EMw(e-ZPq!- za^gQxn!c#uEumP#VgZG4Q}XPJ>pwtr*N+&2fu$^AF$)1&A`u`&w)KE;60tUsk61IB zILm_RGDMD}b6_dt6hi0{VmqxP10i6_=>1}5?s4Yw%q0r}dVjh`c9Vz5tf1%L^561h zxD0*GLq380whCnh3bP_!P*V|kq2cd*Z5P@}oIx3e4X=Xz`dc^bGZGyYBz|Udukd%7 ziXoP=7{W#lW`fx$h+LXK`=5$sGA+*f%sRb$Ia@0iSEjT_u=IFVkIvgg!pS+e0yM-zvKbgm##3y zPP;Id`Eag`W52$`sxiVoS4}$_r(2QxV*0WtA$kn{EAT5<3-b*GpY0H+cY#~iuW^Ly zfyYQ6fy2Hq^Zksbp93lP%19z-IhPfe>3=aP{C!3hFrCGzi!wX)=hMZa+AC90PEV(s z442HiAZ?kc=yy*^cLND{*_HEA#0#kA@0sKInjv1}*?DyN8a$@RvRCah6iWjUbXW=f#WtR@TEB|`}uQ)dQy%%_jLv?e@j+7GOp@$ zAuxJYNjt32C8)4TnJ6u`S=gw$UfNZMPI0PYeZ`Yd&w@UU_6PBCDLzTS3S9!&q)e0+ z+qJM!b-lE!4xQpD)iHkQD_)qVAO2lIzXt!b-BK8-E-Q;!+P50?5O@(5+g)r+D;l6n z(8DG&UgMrTeRa^jWYwbuzOY93>qclFQp6dvL3`PXA1E!y&;EMf1o3kg8jzEtRMKv& z`P22j!X`3r8;!i;Zxp{UQv0+APV*&rfquAn{TK6I$(Wb=m#^m(Ih7hDOY z61Kw%U4n`>iR3vWrCrkOhmr6dCA~%5uos-ZGH73->d^w9tI-qW;%H7be{50c&YBa! zVitCACYT)bu}{8{Wg2-L#QUZABmpaQ(TdkJ{9#FL1jVVw71~!D+WM5Vf}N(h4WuB~ zM6LGN8eQdH>5J7)YhuQx&l?Q~@oy9>JHJ6F$Yoi|nT_V;@{nQ(Ge`PlEz{xUnAV<=gg`6*f2wu`AjyBta45$XOArvoXhPLgIn^2*NyPvCM zw7X6A!$|m^h+a>(7kqLXFiQ;C$SFMc+ZbSi*`7Q0Wh5O*YJuBfHmrE@<5qKB)DE1b zF=Nd^m9R@h<;>6;lSLDEs|sH9^zA{_ro<7gK+~R%Vh;#3$Jz+q@eYhR*9=VL(q?L+ zUoRGO#ksw@a@1CX+7TMqV>O5$ZJE`#1)FrG7ji_$f?0Ih8RRe_G`tVN@b1XHRevjO zZPShJpmrTID_c~-*l2IpUXnlv!s^tn2`6#Yg<8OYH`7Ol0%>|kbcDe_BOHUz}yW0ehZg85IEBPj@u^6*|4R(yhU zt&cd0?PImL<%LTzn?-|dM3aRw7RLTFtH5aI72xu43yi~)lorTh%eT#%yOx;}=j_3# zeQA)enU~6q*6bc^<87~dlUxwLBc+^1zSq8}7jIK$)a{oZKG^>CKJ9T|A7QW1TAw+h zz^c?5S+DZvJfZBMf8>hMFi+#@s8G@V=YK4ckPTOcR@GQ<89kjVguTmT@>14_@b9l? zpw(l?@L$QYA49C-SJu$Qq!m~HPxa7!Ie=2J{`2DYCT9g)+wh#e4UmZcq9pHOzoN6j zBfklr`jTMxH~{AJmrx7B->86oLi9sj*~;DtR%u9X;FR(Q$+2Vz`tZ-mIVQwO$eE1# z6=9up{NB9~UbAYH{~c1R-}xn4NA4O%JwHDAC{GC+w6<+oVs3#XrDUfqseN@xbW9nF z`|%SsM^j@-y-!*-K^n;%6C=?mwWmIzF;Wws=HyTN6!jx;u2O$QRIsL+7|!@0njqK% zk!%3Zk}8nGk42~kY>&(HZN2xYx0Z9a6}{PU5K zOjYE4Q%}7=X-VlaZH@udlMi_C#ivR`o_L6O(Pfmzo~*sjCezV+;>NzkCy%I@E~wk@ zmWX_Suw@5svo)gVtv`bw!+#U)lTT56h61nLLahJp-|F4^)oaeE)6VO0)ADld`AHPv z1vSOS?a`!)#?xcUL%vk8Vomz%qH2kecOmB$46nw{Dw}wT8I$!%-r{p@p^+Ks$|P8V z?oe=b-`GxDZAGGwe;t{EdCu-8&g5Jhj3w!N-QDa41F2L({u`Z6s^ObZGQ z2w=GCMya7>nJjq$SrYVL??7+&h#io5l=JUWa^s~kgAiI=<2LFNxuoP;V8S$%dL{rB zCn>L4>?vR}@RU$Hb`pG80HrhQPAjp#NU;guf#~5NoAy$e4x)%g^?hj<1=Jrl)Cp($ zs>dtEpP#yP)fiQWRL1aTHD^lNhEq_&mc<3Ni+q138$xAc#%5}>XRifFjYtnQ*PN&@ z3p}x=Ou)(BL}TnKVsQRcd(bF*DPw{es(~SGi)<}{An1BtMeHPOP14dwLlJ=irSFw} zaaL)JQz%Rh{g*f?!9vyoljM!IX^h%$SM#F1Lt!pnF`Q9<1we12pKdp}o??rj z=*u9Ztk7X1^+J9{Ms4gTo*5J|53!iwsdkI&)R6=T)DFl43XW==bkP&}F;}#};&^FQ zq!T4nrG|$70N;-)&Zb8AdRigOkJo({z|{Em5JjvNy>N5*E$5)r(15|=7gnQtV657|tc zcCP|*HE4L^h#?i2#X3FWRFRri#E1zAI0`Ewfc2Ihh*1d=LMhf+j9p-ibuln?gxV1i zmVL+y7(x*O{C!;retV3`i?I9Xu^ko17lkfRptU&5v_5R91^fpE?|c&$@z2YfdcI4T zaK&pqY#9QLAH6x$v}W{5JLirq- zK*{gMB;g?zN}v#}-g^}=Y46j@s&FAo#$|x)J)G?Z30rZ9edg1!o#WC?QHm~ihtZCq(x;JFj!_>Cj zCWI(?-BqZOj-iyy0iOd$A>RgFm#jJuW16ur=Q#`w+H!ct*uiODIj_g1s8r>}Dd` zD)~W){d~pO5_~k#ChzgVP1||?v3B65U9#K!ah{vq#Lla-;~ofeZ6ogW#Pvs*ma*CJ zQ4Hp%d6^xV+Gi&mm|WSL3fdYxP9gN@;vn&tdZNJJKY`b*m5j9~uJTBl)pGi%F20i` z&fVE`?sNkhgAkwLDKP<{*stPuAnN;J?^kX6`yjNPo>N_#5w({MJ_)RCZV;?>-rCyf zol0Z%++{vr_%j*>)qdeL?YE*VQFSZythoJS7A8pGY{1oZ!8$8NNp=`!Am)LOVI*3A z6C6es7p3OFE!qAO{FF|C@IiR=lWz|uPBUIvvn$MxCd)~!?4Z70Fmj4=S0gK|!R`9V zk(0R>N&j$uA{LdP`$aZQaFdVKvZq%dV!p?)pGR6X99IPf{eC)9KMZ?qb(7+TZ1 z*FOU*sx^B%q&HH6cXMt+W$W__7xI0ao2N3*n;gNbG`!$))0jI4zNYI@gQnQg$?Bwz zuX9a$Ko7Qt6gHV%>-{vt!RkIk-vN!kKF$UQ*X7Lo_$DU3C{Iq{Y0El)9KDpz*Agq_ z!X>SHuNfkY<573w=;EmxR&0n&{JH9v!^ zf?_$+xd`{ZBrPByemX`#3Iork>*v}k4b$tV4P7zmUh z{|da%Jw*A7z#o2`tR9Z?5q>{iNp0Lz%oIojYB8xv<$j%&GJ=siMbU}~;*TK>DxmDDswu?+=Y5uk*vwJ-;PlKnc10VtGFnt)J zff+`Q`CwyP(G1#m#^bO1@CXoh^>EKO3llr;`nTcBBbGgz(|Jpg@re zAXWjH9BK?p{kWN>g%DC%JU&WW7BXs_M4oIc$1#sJKyEx&VqE}#S* zW~xcJ3L#>0BVg9U5G0n%bW1^EDo;4UEX=lxFDb4S50|p%l*uGxB)zYfxqg*GfQdMW z37A#Yd}kr2Hlc`(&-X!q=N<;Wy*$LsmxSFzUFjl4f{;Uxc6!M_KTg>~rqAkLt-|Tj z^8lr$$SPl(d$2(3A1wQwCn1=F081?cAl%-|g{BNQgmG1u^uJwzsLVU)grR}Iw$r(F z)JF7?i0%VQHR-;FbT(7boL1>b$@2}Qx?Ai0y0sfHHUeNBz$*NxHR0hEqXL<4d6-DVAgzE#(Drhg5k|f_ zavVR^I)P@6tL<$?N^3SU0TGhR-d?au2(PfT5rQBL)4XkAXDZ&`y5KOS-6<6QYkv#a z&>LYGi%4N%)>+pz7ShqQp$lC|@3b+(>xhxWp!M0RFb=TT8L;RCx*Jc;4ALT#2r;S( z5h(i~w4ww8in0+^02IpHSFFDesawqh;l>mw?I>n0;<)7>{JGdKh+`Bp_tQCYv`*xf zNHTItrrUE>t#n{M@EE}he$v&A52-}ZN}6c}-)joENfl%HTD&UdCa!$p-YXt0Bu@fh z&>L-sZr26Oph8V~If%^B@Cf)w>DXLMeH_7gQLv4a7^W@&m;-)k=xm9-uyRJud|zrr z`Y0iFhze%`Bt*QQkQoXRbl+E401h`>mnM7IvKws|xoO&#(UQ9VR(!&sfqzK3midB6 z)(so1@xj=}vf=mNsU6hkPxyo%NZDom3aq6?M^*!ZlE&u_n=i5rRBg7WzWkX)S{(A~ zdl`Tg@8HE9kG<5b05%z&vHiVS0ng2F$6^sz)%8mYy*M8(?-h&|MS6Hl8EmI6v&Jf8PNzdZ1$v;GfjOqG{OD@X(^c zB(^P_VT~5JJIC*I!Rmr#dOTo^1Vw;>=<#35{2!nt^Mezod917CUu++5Wm?k_9i1m~ zQC)yV7Yf=L{vqWTe=m>Vj}PC5_PJj`{Q={PUrSa17IAR}n0OI1(V^a9M6k;uaMs45 zp<;ofcPi8gb)%Uz!r`aUNdyQd0Xw_(bTqbB_d>=s;3R%p$iZ08~B~Dakk{ zv)0Elk;i6A#MC0;Wax=u;9FI+Np#K*pP`h5(c=Vy9G^K4y~4#1jslX+T2lazupG<7 zC3GQG&_D2s5T6KvH_VnuNXT0$!$EwdM-hO}84vKICH?m@**{6d~$u<9=h4oFbL#o&FyDC;(cG0?-F8zBnT< zN2kCZ3Fnd$%+zem)}PQ)EQTNo^eC{>S=;H{F7W~Vn*k^s{lN9&SQv9djnDlk)^Cn+ zC~4RD+7Fa3*4FHtT4!raA z>whmezuN85+8YoEmx*Y#g`)HTw#}}{3EFu|M{P}nFNG%w zIdpH8n7RO2V@Z>^lb)}6=HH- zR5*9so3s^pQyrNB)+Qyz7r-OlA0#2cI2D3=?Ouyf6nl&YJpg_ChOg+xW@KY0ypiG~oY&xSFb@Mo-m^lLhTEhzV z2ioPbOtDxVUG1ihj2chcf%;(K$@$qAtg*_kE;h@f#VPy zM9;?YMQNb_qE%`cyE*<2ZCi^*0b`O{f@BSk^46s|Ypc6ipX1svsYo(372nHHY3!#Q z#F7~wIqnQC7=6N#3tD!W={#bY{Ej04l#tO`k_6RvzcB#LLLrP6+)zobqH+a8)XoTf zF~H*zC&Du5%*AMQ)gdy0m02H+3cK^1GrlKqKh={#t4`bh7a>D_Y?%?CH8wjnA}n;5$^?)H~30(Ty%^_g181FyM=e(^&*9 z0zU6MNoj_t1pqWKbSUy?YUchbYhI+5a38~G3h<_Uq_#TZq{S4Cj-l!f;RnK?^LvWt z5xb{r^>bCR!ey@~!Vffm*Aw6cq}Jy;%`9J`GNOnd)wrrsM8#}^*czFZ>MLD%U4{|< z5t&SH7^*#|w74gNk&{`@81{aOK5|)CJ!!WpV${VF9?t%9W~|0?6wy?7sTR>*D#02g z%)E6O^3wV?CZRthD3G-E8mG=3ZAY@$OfL(ELLbcmnMj`~I8$t{3|fW#NQbb@WN43P z=51L}MyF+b`hB@v#+bbj!;b3^x9?>SO9M<_pdvB{k4Wt*+A-oZ=s6rSkDypEJOEkzGz zj06o4xe*rBLHy287(s+A+9i~-^&Z@s7vFjHPt!I`VXzj)O|Mkw`YBs$VcIcqx}r}^ zXGv8GQp(z@2!Z`(Rdfvoz>JUK}pnx^a1d966wSqB` zF+ja>l<5#!**8v&4vgnoQP3TfYh8^I6I5nqM-KeT6=;?*h-Zl0r}5ggJMPjRdsRor zegvR`IJ~Z0#34t{Io6nQ9TUHD)RG^%-N3IJO^YR9{K_&ye)Rl6#+U*4vGT@BAJ^+! z>bbAlg=}+kP3A`45dEm%P|79`@*wb-FD1-fyzqh;H_c))=7hph@iWU3rKHsQfR2|y$P zpy=`O;A?Ba05IcRaSy>d7lss$^B4e$^xY?vG2u>~-6fu;Hs_`dZ>q>4*|<^vNOZTq!39sZ#Iullc&O-hTR8|3uG@e&fI$+v^@;vDKl0U zOfh~KCr8&(`EFq%J%APmMTwa0z8qMu^+jUB-5A5H3eT-J+C|W=xu;TkZ>xMztVB%^ z!rND4U;K=lF^~2N`vQ>}$BN6NTBGD`p_#8UVv)REPDOwgcqcC}yKx6oo zf)!@Vzhp>^wGhOHx_eddsaMXdk#Q&{Rb)$c0yuB$jVaRYy1^Q{qcgRxfjJ9FaH<ZFE4VJ+z2r9m76+Nu-OLjv& zW5*rO*Q=;3V^j&{L^GM~-)|lL zIko$MkjHKGC$YEMf#r`A7omuciwWvc@|4LCe`y zTQ=N~YvqS2qG3~*b`85~nM2_m9g5XG%7Ea2b+xdQx}bh8~^w(c!$Z4gpGoQA2v+KDTsV-%v|Ya@F!#Z(Edh_+^YTQ^xMbc6!}@mB-g znpx-j#HJYg4`Z;}Vde34vLMztd?2)|T6q?&z@UHqxscW0>dlCJ`q?a}$6{BY@l|KFos@CJk<-0p%EJh~gNBat+sk^SaMhLglMQQNGU>1MiQXtT8Qv>JzTw&@oXzCB?`84Y+ zqVd{Dhugbm@F$7$3Dxb-Z1tZgH z5;NY!4AHOoM2=nnZv^xL{F(p-3=9l>GbsMtNY>pjjifeIOC36gN5wGZpe$Ul^C9vY zAt^~hfdU1Jyz)r0Y#{DPw5#$5qk-@G{Z^rX1aAtQaYOnM=tBIu=B+y|R32P{MD-U4 z-Uy!BgRmM%6m1oQ)U7|7Cp97u=*3Bl=BMVpPQ?z9ved%P_V;a!URbM3zbUC3tw*k> zDcAPBifkP^W7V>09@$E$8*zrk0uwhHZ#Cq0XKHPA>qL}o&x5DhVg4&ZoUIB=ebNUqyyyAu8F z;Uk-gh~><##*An17Od{XcAVSryLux^eb4j)1sVa^!kvc(U#QPz%ky!q`HIFVyZKBc z>(Gp25O7Rk2$*H*S_O_B^APwnlvR2f*M-cqA+PU^G-Gt3K%}eyaaeU%_&!1DfFr}j z>31n;m7<@@=7dt4;A-`$x|ccM)mwNRDY|WmffW3rNaU2b6Y!770VFBT%}W!XGx4n` zxO1tLp{ECI8m>b>V#HWr(^y1f4EPa8vn;7Dr403FzG*2)z2t!F!bFFqcDcF@wqSXn znAd=-X{Nt8fl%Y4$s=Q=`2=nRyAxp187YRv*9`UAdk$-LzFNG65Icrt^cZY#I z>V!fOx3%NXY1*HzV8#+096Pu|`Hv&+ZZI_$!JAo9q^#G~Y0t*Z@(6VzTc!F|p6D|W z`GXq~=L?5L{6t`ITadluRQng{%5Xi-<#+*iq0{yYqm|VoR)SssFp$sj3roDTC>K_% z&FcjvW5`)o6&0=3!&$yDS&9DbDJtcyl>8^U075$pEq;|7`|#_GqH5TC=QJeqVBbJQNQ|%+i_e^sb7+=P$S7 z03dVLS&Y*_C}e)wvLaDZB~?-(1u1n6+nMfcoel9|jS{$qEfijN9gQNf}A~G==qh-w{KKMCF3nYZeT^T#Hb8s!jx-!R4 zYlAA`Tso~Khlkr3r-g2mvBL-T97jnSwL%1d-I~h+*cOCj4O|$|XQEExj#H^JN(_ak z@g6_fnchrmZ@T2Ndg=Hns{F8dVZH|?nJb3v(|*g+^E8wlM-nAeonL z3}8LjretY!6;&E0Fh_S0S6r5LO3I{6N+g@)ki*Zl`roa~r;o9>Jye_;vjI?W(pq)C z_LB?}J?UsNESTA$7v!1_^&89&S!|8+6V=MB~37(p-@QbfdE$xzvNHj+WChaDVqQZy*wOPsIV zL<+yGSEYn}L|4|szYFlAyk(^qeWoq%>}dHf-pKH(4ETO{WC_=ou=wyy5qXcmi>FRT z0G7ozEnWDHUGVFx1gS@c0nieYH$`6)=2&nEV0f00hv;~5YL96tx@uJ{kaAD{jZx38I<# zD{p4zue518Tpe@jJcAwR$ z7gyyHTw!VY(NZr5$x;i@*+9ca!fMy`d~7Bl-pKFfwu^x$b)IKphfA(FF$b5e&nIL)6GWo9Wrb zQzF@cq=ra0A&UyB<>j{Ml^y0C@Cp` zBs~07K3~Bktx-RtDa&T5iGp&X$j*?S&B!Fp2ItHX%qwr^=oL3w3%oNkqFt0ck{wTH zQ_xfRdV^f{F%XjbAIaDH+00RD^6X5=aU=IX`}wr9`ekj+K3sr$PkN6%dXuFV9Jx!v ze)f;xGt~vldoyxdeksPJ>Lqix)|RaD-D0hy%=>C%}iIdcrgOgs6JYUjA;d{iJ_Gd8%GQA(g9g z7y8iOg4Cwi!>3Rf{au}@8ODZ9A#mgVy4HnD45$h?F#vh%h4W~W!67p9XH&M@W`)t& z^cl_iGo0}$W^6O$lx5@~m-2^;d^5(Ec7Whlx*LA*D&y0?ifr+%*CZ0e(B~xRCT#!T zp7a5VEkZ2#*+n9HpM-}&m-o1N4glR)EM-AUTS*J}RFnh@MHMd@*(3F@rQVXui+mqOx4~G0RFxL1ONaykJ8ud9-;N~cF^}YTaUNg z;^Rx@f$<7H*`s9N+FtUV1O@^ESOQDQK#=w|L2bJ(X0Bh=Tp_YbzZe$=ZB2_5#h zdCYcEpm}2HV&C5nhEL1S1bz%ZyL@$&UY*g3_ZC@AtR_#FE+2y7L){zZPKtuSrHlQ@xZDlEUxsUp2^Jc znSf7_4j2D$vn9IE27uidcK^?(()3~9_P%XOv&*NR?PW$b>RiQ zq%-(bFu$lq>W8zm&c*SI))hvv4;*Oh*0z@-QXiMOt1npegWi`QFhW3M{c$HCOW80; zrT65~kl>JO)$etI%D>yE2{lybIs=LeZ4Dx{;nR;NFDa732UfWHVvn88%J}RXQY<8R zPu^lcNs;rxrebon9^u^G8u|K7(j*Nx=t{#O`M#L>1CACdoWGZoNrY)A^G$zRW*Rg) z`}0=MkaaEJ`WXNqKP99&_UY>+3&y7nO0ge4=-jK+Ge08~~7iG?=czEMAx-QVu|QkFp#U6$H2sd2q36Z~@mrH6r91 zA{M8T7`9c(i<;Ql_iPo}oplN$J0>I{2{9=|#vbS83@zUK{Kz3TON=F(QxDgj#KwF@ z?J5;$AKmq7Q+;yKVXLDlP9Onp_BTWh`J_y2#;|MjkPA*IXrDgACjk&dBY_ut7 zhpD!@9uiJ(`^Z3NZ<<-PYpkfI>g9XC#*_l*&7WA1C^s>-X@5-rnm)w`GfjEw^XB>3PDf1CK3{~9Oq2Jrs*k6jL>G0&5Nw_`0>#h0F6sSE zcKi{$ey7~BNmu}UOKVG zpNhNpSMf&^vHTY$J%dDZm?t6_e#1oPv(Cse<&3iP_1jtbHSnmKdUWVK%)YFC6*ME>NoBpe z*15mLcC!vMYKL1!QhUl38-B}hN0m+8|B-xFsKfMpm2w&-T?Fw+f{2sMQ_58~9^KQv zuQ2+)E2c#6u63?PjK{r>XH3g5DdE#uMmuW4l%*EHNk-CR=N^+b!%1m__f(nARr2Z8 zk$<%|(34?At6s2m3H9C?(RT74FiW%&0i&$PK7y?RanB7blE&fxGRt9*Sd`P zFFL~)$a<$Hp2GLEh<#MGRa(@Fh@V|-#q&9~#xonPVp;{=xn+!_+W8#Hh5C_k|7R8i zN~a~X^MJ>%f4Ax_Z#`Cq8^0P>0scs^XXr zy%p-TFwagtMAzCZ#|qV(XU{V>_n+p2HX1q&$}Dr2vD7B0x!)V_ikxb18o?Oe|1qN# zt<-+5?s)nuqaVZh?SA}i0q6@C^nt(jgg`my8W}Y%1kfKOJZ_}r<}b+ZBbd3U`9<&) zMRK5|5bJcUIX$k%(Q#KmAAQWEFBgsUli(o(_=kXjJR+PymXyI@!nnpzuJJM*h7;ea z-8?*g$%rJKNbpP%1>7@wsUXaMtbD0X0FKWPK;(G<%q{Oiq~_}@&EXK1K)7Tj4F~2O zh9jTy7bkho6BqGS2TyR_iEBu%0XK=g&j<*+Alx~3S(&Gjk4_r|Fta<3iQLCvkoO@F zko5^)DI`9-^+_<^(_ zxD$i`>K)iR3u4pzjEs3hf)g`L18LfYNyyEeoP7~8bIF5&hY6+CX#oqB)2T5(XqKX@ z(ff>B>3RT*L!Q7A#r^E*)tAglP4fItLt*BtSFgi0BZCxf;RDg|%;!PzgjxQ#1w# zo-XvEj6@J#GH2n@wjIiAGlgYtJd1{5%Mtz@P@6F6xx?{~EPV9L!Vui#AX(N;JFqDQ$i86CCsu$rxJ(mKlwxp0onjZwC znT)xD7U5Y2R#{?N@_~rNM6NO_4})F;4m4ZBXAX5N-lDAxQm9VY?<_rKFfm8N=6YP@ zxr4mrOn~pJ}x%{6B0@7B81~Lb zLkM{21^I?E2(UG5O{c#pPG{c(X(C~OQT7*9&YZh&=_()w27!jAmbQ+rp1y&h*WP&R zo%cTYNX6`1Yuk=pFXKp%FZ=&|xDW4H6873h?zx}Po=WpTpo0$C?|`!|IOT*bwps^n zt#l7X5IgY6XD{sgCB%87{5Q`G!)BUgrB(Vx>T?>G4DrM(%l%DQU(JOv<%@;Rm~Yg2 z8>}!thZNGV#6Sug4GOlX76r(nP$ZT}jbw6#QpFjoHQN3;V?yi2WE+j!G&M`L)2c~5 z5;-H2LZ#6eOcop9aC!VJ0Votf*DTB$NVZMO;mpfdcSNDfUF7i=mz0+I$}1|Xs%vWN>KhuHnp;|lt`$2~x$d3`6MHA!uwsC) zTM?`BlyM=IR(FJTJ|tGonaV?@|kth#`nWGkxu)kKR%8%frNyPaMXB-PFKGY6o zSTk&~uy3hko>HyVkrWeH6vLG&t&U{AS|D`jz0}QDhLYiHL59hHTk7mun-Big+9$-l z-&;SsUb2Oqs_`&IREuOZy4NZsdA2sa4Xe^F{hJLP+&TslNDKX+4B2w(knT3ksLDfd z&rib<9Ysc}^i@}=;U-Y4t#8tcBW-2=;g_36y-$&mnRR#bvKT&Ar#H?15ipGgUpC$(x#J#l4o2;vi${ATD3A(La+};p2vIs z!(#dxZfg#@(u0iOMmn;>{FR-q+s*KLA-V~UK);zmau#}7-~P1Y4M@}2i3OYHByAiv zpi`*WeGQyyU2To66I|yQJ7X70=hM@+)JB`LhS0?cUW&1&-MveuAX9rTtk#ZAbd@vx zRcA^syLsYkM2q+-r3L1~uF*2q+#<4p&q7gqqr&AC_a-<2_Z%2TPP%znY6W}EUzVE_ zwSzye$^8Eomk0NxVd$O{0CxqbMscN*R%^dV0;t~Q#B!HFMe65yQ*I4|g7*vw*G06? zBtebPK-8e%Qhv(v1*GT(9XWinW>y0EtR(_T@j?4qet;S?&HW9USwxubegbd{<-hAX^gc~m>Gn;2+ zB8zUt%;Q4h7*kGR2)&P&A^{#*;P8^Bw=_0aYIAO>4CEEK_wmwb8(1)^1In?~E(9-d z0aGp`0RR9N=>Ave3^-+642zlx5GIsyA)nM2^B3)voc=HFUQJK8mtue8+jC)^U3+d` zZR06!JiD`atWIrc-fE?Ww%YB)uF0WU6&35Up=}x}8tQUY@4 zDdR#a4aPd|743o~m1h=!2^p3xgPHl531p)JAm9Id@6iFRl%ezR2as>)rp47}@L)e~N9U7Rcp>6(ybD?< zLNxWAJaiGc#-^FOPAs_ec<#tt{zvB&n-;!BJ{9&6s&w}AeytGTv^;<2=l!rk{L#xR z-z1OnRQ)!?Bi=VKCirQ!Pz3$T;Uv(F&pf;guU~j1;u?JJp)X3(2NF`BLZ*MRAe~LW E0I?7Y`Tzg` literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff new file mode 100644 index 0000000000000000000000000000000000000000..fc65a679c2263361a257094d35efba5431eac4da GIT binary patch literal 51872 zcmY&;Q;=p&u3 zCMPNi00R89v0MPs&kLx#^2h%l=0Em-H!)!m(VqanAI|Cr0XU{O5@PalN+Av)X3GD5CA~p|A`U)5Ac^D#HRLUHa}d@&pg0Ca01uJWHU2x`WcHH_7j8n zKfnYKnOS?7{&0T*fb@F+kZIhBPQbUhiGeWyP!;tP!}0@GI61^S^B?ku8~E{we((#- z4kW}udEL{-Eo(cYpSW6n006x72jvSP?$R~}?mzQ3|HJ_Z{{vVA zh@7o~jR^qI0{N4($4`#Q!cYt9_I6G``FH%R84e8qz(oLsmXfk}H2E1jn)(0p*unaJ z!0et$I>rDUD32&I`)>LND-O{fgosdmUA++b{X<8*Y3B&^IgZSyN9DlZX}2d(peOh~ z04EsS!K6XX^m-6&tNO-8k;0n9w_M50>#?bPO{HS9VO6J{$2s?9Aa;cH+L+hzpHkNd z-^DHb3cx+HGpSc=~N`~FXQx|UVGVNr2PYehwMMnx-nQW`m(#3vB4^JqY!P`HS& zy!hYHI7kE{Bz*M16GVJL0d8Rks08>qBJQBj6GS}Xf7kCciGc$0$~=!dSQ8pAD$RQzs<2pT0$~f0v)6ki|M1WCKQWHo!rAc zsLgpJO>Swnc0qUz@o>_82i+v6jPUp-gP-iuijRn2uwOdh!(0riv#SgnSBRRzLG~ON zh(rha_s|$4lGJI*RMOt-G6orMlU7JsJ=?&VE1{Dqrzw}3B9f!?(C|ifKrjES z?b_LoO`}k>*CAXaT-nU^eDTp2kM%vGdedha}5-Cxu#7*|(zE^gUv_i~E)#t}f zwl(9Kyl8ixg}l{{tX%4=4&5w!`Pgv0x5y4&6y;% zHe?N()np!dqNppDtk!(TS^8WpZ>O?{F^-x2v%B0glT);;lT;9gznSiBXfB`nG|ltY zFTo0vGK#c|ifD>-5RsA*1;dDzREepn3&D_~7=j>#+zAIT#og(7G2Iy=!oo@+HEj>) z6tCO_XhR_IVG-1oGwq)IU|7hE-F)wTjwdG4oQ^U%V1VO7qB}4Y39@)$IApFHsRbAF zhfExp`{#AJy~C){0#F|W<#U8i0=W-J0t{h=e~0`!jpyYn!Dot#KAy$+L=+{Gya5I` zGEcxK@l!4&LFtSS)P4{Hil7Gs%7++WZmqPKW0~OGM~TSDM5fN^8?`!kJ9&>9ma~XP z&mT84v(8HI}?>&oWo-RHd2Vj`Wx->H$H&|6aV0Fa7A(i8 zqzcs|Ej;;FwJoixkpp5zMj%3-W0CZsly_Ecc7V&y>5iOSj>BVo^j+4KyfBGN-x+tYY24#YhspR-r47!n7W`hfvP|G^)8>0nj{G+nz{6Ta^8tee#l%(4{vn2 zj6N%VOc1MxTK0}*;$ckMF69B}Q!=kB! zBerTKqsq&^)zrU3qpgqVq6ZCH39Pe>A=$jKv zJbE{*UkT0kY8`>yM_C_m*K^r&jmKJBliRn?A0XGe1RiwP_tQXjPxV!LN72}J00?`; zPjj6WcN<|F;Si`@9m5|2T`A)qpo|G^6md_GCtqqts^5EQdMG>nMvkjxlr^a`9{ojs zDH`^-&e%^>?aueMOos1woM6R!Srbg?DU-z}QFr;eQyI^DbF-exgKh$2m9WNAx{VXX zQaxOR67Hu3T9qJgRq-O^JouU)@S1Q2VqR4PT5v<`Q(o9wDcs8Hi0(O=!_)58qil?Uiw zB8>7Bt8+L-tObl2vxwj53$>IiSJ(@N2~!qzQ>8!(x4-7U2uxm|tAv?B!W$8PFT%Q; z61+g*>4i}LoSjDc1`+xneLZ76Jqv@Vy*)iUaCun!nBG`2{`ni$nU&HLu+kGSn1Am; zFj32~a(m`S)%^4P{dd_=n5KQvz`@Z)MGe5fhfWDO$k2;*_$B@Qu>guPFiZcP%acnk z&S+}D;5XOrrpN(8kcfhiObEp4kXN*Wq%B(Z2v@y2lK#Ln_!KiKd-8?izc)>ataK<6 zQu$#9H?`(6#KulfcP~~y?aZcl?ytOuN(D7n(`LolNOC^M zJC7dUgQj_0nnu8?28de%R1bod8}u-v}bv|nuO@}^mz5N zC~!M{fr1oK_QHh73KRkP6d`*Q{^m)-u#$PqW4Y90dEAt_#FTlAo<@K-ed2V@G=qKu zEJXdaY^bz&xYc+N4x}(`NI}~J0bNiM0>QZLLK;7M)S$pF%z!_CLI3fEGWyrIFr4y8vn}k7q5Hrq}?e{q8roAsA;Klj{ys)G)U*zZk43-;@c#)V^T#p{K1rF~VJ( zgnN>N^by+FnBi(lck<+z{^7rX$|zM^Dyj=V_4x#*;^Z%LV_p*$xsGBB6}|b;4ED6h z)`bl#a;0QFv;_gNheq18cjH>%$;a0!LDeLejVasVmCXyJCv30!&)UvmUhABO?)uIr zm-pIFZmsBlX$DsP*<1p$VJJuLnULf|Wfu!BAsqXetyAFSv#Wa-_O4v-nL<+v$A2@% zC-*%!AlcSzPMci0+H^Kmu3X(>y?S^qvK{5T4sM@~pFuqZxec-nv%#~kvbT24h@C`O zf2Z6ZHIg6eQnDt~PFXb?-IuW(!_ zx^{F9=-SyeyRE!m?mw|TiM-eO4F55$`}LRlJWE^lAIcef7j?3?hA1c0sYVlLduHcU z@3x7>Zj%*1~rFoA}KA86l>z)g;YAVw9kiUT@hLFbeo@Yw;uUU zg;UK^I+7w^XL|pHUzCN@vr5)Cw*!GQc(pQk0##QvIfg`a%7sKshj#x(atB@Kbf+lj zHOaNmwa>NAHQqJq^)EZ?I}tlNLKD-mizJStm8m3f)0#L%R*SOMJ>EEG#^1cZY0E4X z-4%E5$?3ek3??(Wy=|r}w2N#g8##hVe1hC7`;JD3JBUcv%FP$Ol`53K3VVZhJutB>pJTdn{j&D<> z0P#eL`$WFt8`3hHPO+^0Dz;*phxBBY!QEdnkUoSWkxl3N?L8nhH0U$5v^liM zBf1nhw6rd|%&Y|xRXwEoq3=v%?d=j>g)eQCbldnR{oloer)Xy{dr#NOAQAh>sQ8|8 zp*=RO6Ld#r3{|H8bt|SD2hK7(0b48RGdl#IBU-i-Y4$Bh$AM+X4t4?o^2QkTMxV5{ zSOx(D6Px13P~}Gu$Lnv9v`8*Y{dqiuEnZ++6QEVNfS)uV2W=_KRC zkID{J0jiZTv(^-&V08o4LNr?ui}d1iBbg;>n^{$xdDqU8!#Cl`MtQ5p_&&5eagnmH zi1NP?6+!tbiVT!RbCd;jlqGjVlM&^GW99kf<;7>^dFkavYvl!Pb%!;u(DI@til!zC zswPUVhw`#ul&JwpS{^@3&5$EHvOz8fhE@{ba6D8XLQ`d8S;k~hlp3@u6t!q*D+?Ve zPptLy6E!c|V-|@{&LvAPFHSF5N-t^B%%Ad9(`iuFX_Ve<6y#~t^l6X~WknDW)e>)R; zoX37#7KNKNg*z5<-Q#IG25~(ATpSW!9AjqguzQc*vJXPNL>M_F+_D=Mz6TW8M-@0^ zlGrD~*hfa%hhFdTrtSfy?joh`GpOxAZTAb=nu^*Qg5w(y{pq#OHu(LOX7YlOa;T>f z<7;T~XK?Q$ef9%H>Mk~F*CJ)dgmv%Px-Y|eaNW9}%X+-sdRWVP((Gc2{9@?%q8H=h zR7=VErizv`-DSePK0w24D-q}4Y$Y8lkwU7A44bo3vx^kpsoCbN&gL=?&kPgKf)kR` zgL3SnaBQWjDpZB>TEqc{zZ@sWj{<-J5Cup9)B(HzWq=|e5}*sv02Bf!etd0!4d6!u z0Kk5;17Lw90N_AYKal%@0{|Qt6aWj{_Jacetb3+T9FOb(J!0@MJM{s$IC=sN6)IKD zNtq&hynH%81vebuT($5MytswAwtY$-lMZc&enNE9$`I4__L`NOFlXWGW+uUiou>v1>BjMC+YC#;q!f zbKdd&Z%j%Bymh=iJg2(w#L3Js=@q)5Pig%gI=ZZO(Zmjmexcl6DoP$oBWDpK9P@`U z!n>ujze(p}zKzCRI};TwY^e~E7^TXfnEL`6LPAlLcUI2r?RS}m!UodrpV#PP_0|+ta z+oz!qxIf^w%XejWs<{YkA~Uqfa>frFBc7}SI!NASlXs^viU%ro+45)V9R#AnM0<%q zgSpheY7mk89`JNuTv|Wixw`sk7xuX*_(K7BUKR>e&O^t+qInN}ABQR?;d&h+4(lK&3rp$g!s*Tv)LD9;jXb-6)=So+^RM#gH>JX zbr@aNgejJH1o2^_N*wcC_1=3nshMF#RiBHg*X#{*IiJ4BLJ&2PA|UZkS^FqVGGZW8 z!liB=ZmvEC6lXb@jqcnH-wr3fzaP4xvE4jX=wWz!<!Dv+gyevBvSfy6=lSP%pQ$Ga}D5+(sy`m?M#8&)UfG}&Pp-2UpC7o?X}#5;aZ zFC?1l#>ZP!H5=<37W4Rgf2w#6m@~47ygcpg84_CVC(^i3;s+o33MTf>ru9 z`QI&LGWM*+X*3bwTNV@h{B54SHqeNj7m22Dr_aynaw9FabZ#3j*bcU2XKBaq&~qQx zb8C{5sv*?!Orir&bSbS_+$HyD#!1AvNX4VuRiqnAL~zk`sCI>rVo8;npamIqyqIX! zOzn;@1m3+x%%nWcOX=3MShSqddifmLvv|4p2qc?4g-}j|w?<8UwH(#uXSUGy;|q95 zSf@%cW4}2rIY3>IF4KbRCe5cG_=@{d8lrb!*+)rPf+S3yA<|8zoC7^Vzys|!@ zK(^iRVhFFJam^k>hZLe8=g_4QR8c#snx`ocShP~BI9PfoFyXQp0Dy7DWAFnF?=Q}m zD-zB0gAMV1tITDT^a<^%7Fk!M{}axBn4=oYV*3c#cb4(}C@Xmj$^Lj-?*1}vvWuUf zp>>I-+iLymG8{<3 z5u<%-gQcapo(#F##u`C%4X>=soblp`TJQR4uYO(O(8fj2>9uQIu*~e!CTo5EGnU-2 z7E~LrK9S7KN{Aw#j_RKE1;ix%Y&B*_zi`Rek(p-2rU2=sK(wM(>oO#5c5SG#^0Nypb3k3bc1u|nCM4Z3$!=o9QJ+H)r1?rw)H$9EFBbpymAGM7oSII|s z6=@2w;bCR)oN3mCrZ>P4IWl&(7{PpA%mQU~>=(@fv>U;XA&x@WxkQbmm~fsiQFX@# zPfuQ%Bx8nhs{4y;z%W5BSf9Wv(GrKR^?Ce+IeuSyJF%Gw=}Vh<_{>Hw+TYOHs45WV zQ=fPcj0Y69oJ09^X1Q|4m!d@rZ&k_vmNH|O-_rWp` zw4BRD6_LdCr4ebFD|1d+R9>NW+Q?F@6@Y&fVX`*zptjAk$JRF0i*8iGBlo2 zKOLW)@vwMxc0nS4UNm#$_rfXPAkzXWg0N&6cO=i4@;jI=MdUt|wP`GjSn$9Mndjg9?U1K zX;pokdX0Y6?k;cnkCf{pLZ|SVd!X9D+u)<`1-4qS_Q$8&QX-Iv7;brXzwusYBsC7P(h&To{52v>(n4mJx>hNZYH^P=FSnEe5xSfqEO z)h&@)ABWF$k%&-JcRft^1dLY&tkfOoBa!%}BoJl7TtgTSvC&N@02A;8tP&+fxe=2X zTCEYtSupgI8`e}IGzl`}6SxW^d6tr&Bo#`BjrP}2Yw(5W?Mz1@Lqkviji!qT^&E;u zxlQRK^&<0vhwx)=MI-5sMaFveC!vNUI8XLPdr|uYc~q3#U+5Q5F}Lbw5lG52UNOXL zn=-=PX27;wzIOTN4Bj7 zZLP=$wyE24*4?25T|(RZb}Y2E3>G~%CXWf<;H%=4gQn#M!&BwJumk1146DWu`#4@}KN3{6RtQPXN6 z&CtTK;0fD|f$0f|swjDXDHA~emBq6w^T!sNT!C-rdUa9F+LhYUU)dL~W81YDUzC zH@KqNrg_*!3wk`Ra&+uX**21(JH~S}4s(v3GDZ`pm1LuD$vqCeP-w^LIBA$?UMI$Z zpLO@g)3I*xJNc6iZ%)#7Bn#CA;MjFgV)6F(9=5ZEJC-I$Ui7xa-#a%c4!nfI>i1OU zBnGOb_NssP$fR(KL3rIqKEV=J9i|*cji0P~9CTRgI=;*z<|%d>pw_f0VRw6uYvx79 z1>?EP2Fk!GII1JqkWrD8yFI=RPI6uCA71YB-;z`H{sebV3m&2s{=-oiWKcvilZw$` zzKy`{Nt1)O$b_c=97;LB6=|Rn{lddrEq=%u3;fndF)UIVCX?E4AD)6ujmbn>T(!7^ z)jjtmdp1?e-K3ehGBt0OaOaA#L1RIcyhOjv+S3|Tir=X6--dQhQR#_=eFz6M1jMlA zg<$blar8LA?5PF0Q%a1dPHizJEs%v&dwo$g%aS%+CJJZpF!T7FOkNPkO3P;Rdqenz zHY<43q3?TDEH;FZm9bYOMi)jQj^`N^QP>Wvp#73YzD33RKz%(^vD19>a@)kzX!jJ$ zX_8Z};X{n1>B_47K4Hq~Wt9P=8 z{p8Bi(M8hG!lP~+jz4`1_Os3+vAWPzp)fem{L4h23#&Y}c*0pNDs>kKpWDbd{j7Mo zG7$usWt`^VfMfqxee!O8itUNuGW^-L!j8ahO83P^OipfiQjJWpY#@2>Ebn!zR#Ikg zLfxHu_TLQl^N^OW1ZRMP9rKG2W(lq3{U0FVHh~6k&l5x91En-0DOH=janfpY=~OdE zKcOHMG13hGdBZW)JYt_~PM@{#UsnH^EJ;%PASMux^Z=GNJ zF6%-u<%tm0T=sQiNOkRmWGCwu`O>CMzK8g*CYN`@VFY&bu4!NQ^_j1%H_d~k~BNh z8T-+g@#^v5k~>pNFA0!}4MF3IYvnDYCdpq~ggKMhBw4E%P!q9-b~NAm8kT`F1Iu!| z!^KtudfVWG88R>jL4-Yi@xqh_mA;W!!|IS{wK1+W?WhOS!t~oft58z+D)@C z&qQ()1ZGq25fn5KJ0Sy;DFRK)MTS-qg?gmc06*$pAyVGVpwQISK+zQh1o9o^+jGcZ zVp)fOkSFNH?4Q#@_+O7>&>wPpt9FqUtG-0Nk4O~UFg_G{K7T2$gpk-&ENz2`b%n~n zawL~l+CnE1CEt2x6h6{@9uyp(X2p(0_CG$}u8-4TrFBaf4ZxR+z0=|``|*1{WISc_uwJgPb`>l!j&x zaLNd()Y|katYpxv6dD>u1CO45c1P<@nyjVl2;K_yYe89gT6UUbNW4IADE<| z6SB1v9_rug}+Ujat&W40f8jp4Vn?Fsw zY?CFiUnBW$VjWfk#g6+c9GMQiuaPDv_2e`*THdfINxth>rLF5?yl@K=d zrH7I$V84Is`fan>;hjlH4__ijAP~HdxUom%U|-&)x?Xg8NDAJEs>yH>Obam$J0;yA zR{RTauP(;nft}{?Vp?W)^w|sB&b-yiqsVJiCt-(Bc%cF-HC`|=1{K=bH3Fmuk0lxg z>VKX=;f9>vc6YzprA}%8dBwsFUmb^Ay)Sf~{<`_vhLrAZ0WhA!2x5BV$eN>TqTWYn ziLjRF;}r#3nhS&I{H5BNDlH5sx2PcerK;o_td(L0cCFcU*{f&nIm5UPlB^f*dp#** zB;&;iA-c|#SeqC3x|E!?>?wwA+BWxj`(-l@Q~mWB(lub;P*hS-Bx6T$JIItR74s{f zB-@@C-*!GVBgQ&%%9UA=z)IhmQCaU~&?VaA+q$*-mdKEjA=NjIZwGomj|z*U z^7|42+A|Fu(-$+WJ99smWe}3~gk{+bZD#>^MWsSVf%wZPRzcC@nsvS*bUoxuG8|^m zX+5?VGit;d3sv#T*6nR41IoYAtljvgwY8>rdJkeRKlVP#2UOR5PU4t=dy7z^~o&8f! z{Xy3w(p_`Q8gfrJVT`zq8d^iAxem3VoJh5HYwM)7V~}MVcxkZ4vAl!IXap4}PRn0| z9;1{=`n?evruBwIuC1?ryDdh}VyyM2C*MEMm(AT7+M-9YS-d`!@9Z5tfXnPct9^V_ z)Vt*&;uxg{R!WUdC_R zv6?1LKq^3;Xp@b&A^#)}F&vrT%_`D0L06I%71sIV#D{=te`^WK8WRv7*I$#A&=P&S zy(Uz`>;BbB^%2x=!BsnQCF)f(6&3vY9)<3NrWC;wizfK%5Sc5)2Tj5qmx!U!)r!WK z++C#6kSX`{EccdpH74mccU@L--=osdaLwhh^9DkWqo_1{UZ7MKa=oZkPV@LcQnL;B zue=NIou;Z>u^Z2>XuEP4U1QjJrfb>If)m~4E)nCB{sGDc72Q^AN-QvfwuW_1a>dje zu;S%#Tmqq7IIx1^@AVWrl${?Fxtzo8Ts0}J?LYHIw^O^8@ToxvO1HTZ_eNtWu!+=? zMJI6Fw`GWTL|hnaOkwdJ_0=HmR%baHHR-__O&izuSvOdd%+2^Qfs&(({3uu0-SJgR zx)$@Rp&x92HtfC!HoffJ&@X-A!?66i`e9iohR7dVR6kINNyH<_ME zTVvR2^eTI*T6!>#(f9dD*s*I|VLsu>CmpwQZ}3D=Ip<7P%%^(G+{efeqW>^@EV5HGm&(3%7n6; zV~0fK@BrF?&uCdR@qk3Twoyi5lzaAw%AQBZk!pS{o!+R(ybByYg;1OmzXE|J*J$-P z>=`5!-h}V5?P&_+z)RzMFRLb@S%Fi~mk*(#0_sxCMNl5{_s}mtIs^*Mf`tKKQPKH| z+;q%VtT{Y^Ti|t*Bo>6dxmg}@R$$!w@)kqy3EH~&PfL|>pVD@d^7+Fnt*h`Tw~#2V zF`N!0vF{msyujc5g(q@6;xRvaD}I~+it0zQRE zrj$FYPzQ*4GEPUYhI&a%TxEo~e%Dw{AV^7yX?z8VoDlh>EW^=?x6sK`DFt=FtKE%W zdO7Vb>)A)|p72qk<1U_JO6?UUbp^BY-8J=g#)WiDjD|O{_*2r^8mo(l*;!g=s8wB_ z1qDv5OI=i#t4x`{$6@CY$Y5MSXcm>;_bL7)&B4ty^9xu%qMZIMi>Jf_5|rB;FcBG5 zdbG$N`}!h$ea#sxn&;zb`1*Y|9qigfII#{Mx5pJ68u#o5wK>rregAb-)kt~xu3!~0 z&f5-uHG+1W8OG`O2@B{J5~F42_Cdlg1v^vq<}aA14(3LP5dPhW&8lSk4|=~p8~U^5 zZgT0B#E5pre}3Gj`c&F+Cj^Du7E|1`SjT7l)PHsssKb*n4Zj68deml;#HkqhJ|_Dz z6yD}~)qs{^GMd>=iuLMBQXNI-Aux(O|8N%c6X0Etm>YU`hQ1JRXHi!@Bjv>8gX*hH zZoU#roWO^|*$+m64+}70ngJ9@#Lauml`8`#6I;=gQm}s<)~Ts*&XM?S>t&u9AJ{qe(W+mcW0Z+SEJj#b~D1T4tJ zB=h%o#gOT&*$^Fst`qjn3YpB1tdv?gXxPN+RJNeWj{VX>yevIR5Ke^(ifL%hE5K?| zt{SqJWMC7w70VOa94k9x)Q^R3Zi_VLk5#=_@$pys6}s?O z;;=<)QZD^PiaUN4HAJyuOx=E$B_q=Qlc)v;2}hC$fk{^ozuEA5aQ zpZ9D!Uc4vMARXVcl8+SX7&(qEq_Oed%0ata*KqQDySR*v4+5mh~ zdPbs;r>@p8i6LHS&57gjE=AXR)x~x9ViFs>?MCo%C3VwIUbr3qv2C&IhJ`G?TG>Xo z>C=2(lv>?=-R1B?+GHdi1{n6hTdRE*T8ZiGYd3%Q1lWSP^RF~JFl}zFyK&sj6WCaj zG)JJ#FxMjr9ZvM^D>OLw3-r-$SC*Q&Hq^nGKhO!NZ3c1JHkIF%zCH{1{`=W4cERUP z+r);f?=-X5cK_J^A($?IBF$V@Ie!mExqf{-Lu;+O+`EG=6q7v=a zRn5ZiaE6wsq3q>eVEL7_2=&_Lo|`Mna(Jy^W{mX*kogDLTIyxXl7tTfR?!zlCXpk% z4L?7Kkuj!HFSRI~^J#y~q7rNga)#v)rxov_dOi#7qf zdq17D#A)}=>iy|a$Llqq-r+T!Yvp4L=8=zZv1WSFZtq?yfwTM;^n79M>u^Z~~Jctmv)f$H!NmNC6YaXoy4U{Y~{PL(vx^MgQs$JzeQ@2Khm6}>e z$8EOFyOlx<2(Ex#;FqucL+;X?XV|F7bI7R9DuMTN`yRpfM~ua7>U`yPXkImpyc*q^ zJUC)LV`a7IR<$SxWI6Da_qM;(76oT|%X4xMiA3QehA0VK9Z4J4AoArGKb2w3AoDID z54*1ssR~|<#+FnlyooWfXhHdpay%b zMcTRT%<>eZC5ruV0IqB@Jhor>KHZ=k`>8C(pLJM76tqOBB8ObqsL(v#GF7R@Nq^Pxq8RMU9E@-A_tVnR$9a7_*nfXmgK0cE8-g;_ z3EXV8A)LNqt+NylmwxvxW8j1*W*NWtUMTU)`~C@)f56FmVZ(lb^I(2o5U9)g77zN} z_jJdw-lGBwr7?U8!{VS_=h9;R?eMjKPY@`jJ^ANb{^`9% z@l>EY9k{C)0<9Qms1S_e`n;6?8M1TZV-Qw=w{3d#3hrZT*=zct9(4nnc14HZRJuT+O*Z5B2H9`*Ez1rN+(h{^R{|gmPcHZxn46bBwI@H zS5&gXz(LKLE@n7>Xzzdwi~xqC%YI(=3MC^8x%nxh!nYtFh)6Zv6M&PEfoDP`E-hS5 z@Nl}P`5wDDUSA9hQ0t!R8E=%N4?taAG zYR1}|8E!qYHjQk8C-nvk+0-fk)#x4}lC2cVT}XiuB{C1f9hO(hJnsb@2c;NiRY%YfPJQ3Qn)jdO~r3~};^cQkCb&&NG6w3K9h9$(JW zD=6tAWAhopZF;JyDU0Wd&sX%L`ec8@gB*@gaI&9Js$vd@EZWQmlcxkE}vO z{vNMe^v$>z_*h|yol1Cm-xeIm9I{Jp9u)TyATfWoM)Gr^K<^Vt#KMhdEe=j!h z*WRode^+%$prkP{mH6WEnvvIm7>^`ZGYIS3pfwdRaz+LC*oTuluq-QKmBw?ivxwPe zBBuCrG36PbWTW=&0c05FNt`JJ3S__c!VKu4Skwv}6ddb`Cck>seSBc!HaGdNLG~y@L)l~3m)fq%w5Is(8U&PQV7@oC~nr zTqIn$Zi*!D5Kq2asBFc$lxc?RRgYggocNj}-Ki)yUuS+JnEj2gDYi$f?=fHeKAZ*PW<9@~s9>T>>^yrgPu1jMDo{-f{j<3BMWyIA zFSF{~?z#N=d~N9AxFv! z%KO%%ItkupS-vyHTK4&IYP!+bTo7cI%Yf*b*E>*mDxT(KZ$fjdloLW|m&^g-65wZe z`Ad#{GhZ{uQo?mw7B;3eW4vIfwmPq@r<`r4^nPHZuAq7=HG&MK>DJTgA20LD^KI9A zg$;N{V=Yk_dn=s%9SChd^?paiA?TL4%$a zl;b3m*VDuf8f<8weSEMG3{2?F*npr8=Ga#7DQa#fTY}I*Ns=0KzFc09Tl03rwGj=$ zLl%+0Yx2VJhx$=_Fy8RQxTbJjzxkrs7bvk~D4(MYi==n(4G(nv&d_qO>*( zXqy|i5rGG~ETRcv?QRQq2nzW(JC0Rl06-e z0vZeBLikDmOg^Fic|VMK0Qd!0heivtI+d_ zIIY0<;3zNOvU|tvX>L(Oaia)pg=W0@54#?Vp;!a2wtzZtDF7FuK_M>*ni7GT46iZ% zj8pB^dQdp^E;dJta=ePy%;@;-0U*e?3 zeRW@_Fna6{!E}Xs8=u4oYg<^!5_9iv}zWrpA6GE*p%SDQB}%#S5}P}eoaf7V5_ z3&GqkIk2(l6oacX!O!V~We?z+7mEMHFInu!R0gl+B;LnXU+D&a69g~Mtwea#3R8zZ zi#8o#WE`>qF3u^e^1Z{E`BuK;V;}g$`L^TmcM{)#3WE_JsuJ5C*bJ{17|l|&Hx{YC zs6z=vyWj3dvYj{j%g(rMw+%yJ26VbsnYYPc-9M+vQY$1Jh_Zbz>eer4-vp)(vcV$0 zAq+aY1Rfh#$lf%oO^)MU`GH(8kBmvO5-SC;S#ve$V9&xg+q=8Ho`=9^J|Fp?ziHos zmHHTRFO>Q|qj-a4dn6z!GE8ZAxsjg_5E%`c-Eux(X7IuUkU2AeL|9rn<-|-c#Mxa^fRoy2yYIBe! z;#G}lRaM}$-Taz;E|=_8E&S4Q1L~2&9a%Q`55GFKT9H$Ev{5{K>R+}^C)$*2PR*vq z?E3)!gaV3XWw|BnBD*2TiXEMI!#ocOkEZjZWA`J*Hj|?5VZ?^Dh#C2#C3toXu!~lrxV<*9lk^S}F(@!i5e4b%BN?-`pv*TgCbM#VO?~rPj_SAAj1*opoP`*C zju)G|l`At=){#TSs92N(EQrbAM1h0jVFQQLWsTAi3dTos4{*@*v{0-^3rGB8w)YU; zz~zXS&Fb61NAYF$!&J{*yggJ8IQJ}qhu*}N8;P#I#SO;DXVwEr*)+YL_L&)ZNH=r zJOnj#xE)%e?mEEE%xU@4k z?cX0BE+G`%?Ot`O<~81VPV|u9%#eNQK!Y&faMShwtsj04759CL9v2Hozemd`bC&mV zqu*^Qf^2&*C;@7Waw0A14pC_&- z3dFY7RyP?|Z!9mRmOd7G;=oDGqmin#g+r@>F_4$!FTNkGqaPn@3YMt~*(m8$P@y%1 z`z~*I@OlvI%wC|K^wEXY@2UB*+R(!bObDuzdL<}hX~gNGJpv5ezL$8bMHH|_@C~&$ z*Qs0QIQZMKDWJ)v{nl|Nyk8fb#(`mA)tsJvB|uV9RSYARk+7pm$njA|o1FtsM09l| zTwIHnmU$7aDbo57?wT|UYSF7N`F^eE=xx=Ty}n*3?O8s}ukwYzFNuxN=4<#SbLHj zL^r$g0(o@CLAH#=8;`H1(rd@BCcdnVTYTMv{>Z;S@fa4l<6V70+VyLP;dedzwIYUZ z4CjMtjj^*7F)KHvNGVM`5clzjrMU)4`M`BA_bKMa&pi)T!3U&F`$3sPmm+gkj49eM zsUT97sdrt}9kx!mp2Pdp&!bdimS+>VQ_syIx4x+#dR4cb@gxt^(w#=KvN505J)a9s zi8pi~I5OfBFvazU)V3US4|*GR_A#}yl+qw6K% zKS?26C*y;@`I#F1^6C3ynkczmxV>JOvubXEQ{%#4sM_J{agPMlD~PaAL9AcMi?rFg5H z?u>#bKER1E$-4J(v@nk*w>}mUGNt;b{%bYjSoGE**xT%Os@i{W7^5CW2ZTMwre|1? zK4y}ldmYB{7n(;Hs-tx~?lAHmgNW-He=I1i35i1`gQV+h3%oNtq+dhbD616hY#}`8 ze64)qS)D}SV3vlojNWNlHC~h(yvphKse&M-4IJ9RiA(X<{{wSCjK3C>X~EeaXr1bC zbjSO>p`LzEcf*3tplfLL)|KcE1Mb2b?-e7eFfhp$5I#PMO-%4$2_4@J#CvROubASP zOze3x&)U$;*D74<%zQHi&H2rI8*)TLv2an(qTB-sfGo%5YS? zCKNGz!;n4N64C##q=U_Q$*yF^d=n)F_OPNkUu2?G#$XDZw6#oA}a`cx&+wn)7UBZsHMO z0>=k(kQNFvz;9bjQu5RC^SH|4wiJW1V0PxdUI)LZq)%?S9yFkO*Kp^siI-16sU|k3 zJnn$bLp|G*m?SD@OT~3KtNZ~KTOu(qG(>bN2e49eE5Qk7#`Q9$s<)T*8wUE850>?p zdM%~>`cQ{?m(d<^2P$^e%#W1z+teCOR9{n}jp?nQFjj?{y!w)>?hJhV#t%McKYpqD z=*^aqmZs*~tVy5nGEm_YG%(X{5{v&&~>CmpcmL<%%#0!Ea2E(Qtk z>lEeI7vkPz>Rg~lU07pgtx6+hx8}KJLBB-2wz26!^;EU>N#id$h22vz%CM`i}FN+Sdbw@;UGyL73Mxp`~3kgeg87%FuR4o9b>IzF>Yjm#a~=dpUI zQIi@rZIW>lFDigz)?4DfVOJ;oyMKz`5A7?-mK?rt;dfI5Q@`Ukk|fUTtKTC|DPP8n zi1eNj9zpIb?JeWpLeA&uqS=TjAcBAh6SgyQAUJO9w)2pq4A<@=vWQX(8qUn zk=~kEwY$8|*kZL;8tcsZnfbm&+e~916oCK5)q{(@dn{k~2U3PAQ z`W9@mCGnoakpI#arXgj9*kx>TpCCtZ4f%o4l*K&W3ujOu|9A(J~MU;e=lUWQwC zgM2)@cL&*-eF86P=V+rjhH;(q2-mJsLmk7GXjH~|Z6z4w0Lo(_ltLK=n2qd!1j%^? zjbZp#-FcROIhWEQESh*gjtK}XOiW*#o3jQQt9I4LhJ#Pv^O@jqTm4*BL%_NqHfEa* zbzvu2&l+OeVs>%W4!7(l$)U#PI>Y+=1ZvO1JVtHeYjh227Cy?BpjO($IwRZ5$9quj zDeqX$;-O5rg-SZ^1XQxoJ{K zHK5JJM3s>_QN%Hg2u;Ps#6(Qm^5RM+vl9LCjW4V|^PSV*Y$IKJh(G%ZUVE}HkS;-) z%Y*KkiUs*ei_$dZ@|c{1IgfJ}t(RuFSJOGnGA$6E!^K8Dn$U8@!pl_hlNNu$?ROK^ZSD)y6fPZ48c568k-Bovcov16XWyi4?tPmN09|s)LJM za@8I1@4V((bZO zco_gcgX?#=L4!Hl@L_}QGQ%I0^i(G`R;m$Bz{4%<4;8FxYV?>Ym5W5+LZEsHB?kUl zd5>F1_pTxW{!k^Tme@jD!8Jb9nzR+jsL~4K6vSF+JA*m#Zc(qM3v9tLdqw<)QN@Kw8Kwt}l4(YMlV#$=pdhUt@{Ci_&xvDRb z=&M?uIrC~swB@GPUc0F!TJkEHm>QgV|F$vs9NR{&^mu+aKK>q$XN1N-zgM(?4&Vg7 z7sdEzH?Vr9%({(Cwscc9Wy|@lT=uk-f0xtQP{)0waa}7r&mww*To2Gv{RugWn{qsi zI?!_7*BX&>XduIFJdgg?LF$bSN+OdZx`-s-gYXKg9a;fH@C4j|GQLJ?==A8Szdbws zj(4iH8smaF-cz-!tlr-A#rn><4o#WHG=Fo+w&DI=>E5GGwZ{3>U5|f(Tz0KxD(0zg zt-Nq;$G!nSxlH`o)rYV9n^?_`f%prA@{u}_#d<-8PY7SZH6lqJ;kJx&R@%leNooa? z;Ug$=vWV|8DF(cZO7&%&InY8Ctk4WToMegg*@A+ov{F>s0AiI@Dx{$bu4JMp8{nd8 zXpO015&D}Lydh#zhLhvQEADJDM3CU7HBn>(P0#A4IQ`V#HVcfCLR8*<2y zY@0}>1DAERZj0CJ7vJ@MV#|KS)Rpv)H}(2Y8XDq5O;%UM*n)b@Tpx|52g0G+mW}` z+O2oi?E>m@G&fpH_G8M`cuBd=GepQ70rwmUKT zDj`S2S2b5&c3ozFEtR)gxV(isZQHlrUoMjQm6&iYU&GU;Bp9!3OfIbc@S81P3if5WaXv87vXvt zom80rP$^}BtTn3h)Lm62nSoU}A9OM|{vu_-z@BH};Q(XL8sTYp_!9ncQra!8P$+Jw zu$53@8C$NRsUV_lE)`8@o&qvQ)=8I3S-UlpcYNpO`w~N9_IIRGGuULPu*;m^sj=o-uqauAF7lC(<3WGowt~|W} zF=*s61)+|1NCzUfh^R456}N~;`U9qT)ze?-FqVdvE0dqf`7cnylxi{Ynpq4W%SM=z zgo!v8By?7EppR2i)SbKF-xW{5zqZ-=E1Z3o41Drmn|20go93E+_QdSm?Dp+*v;TPX zSl@}A5u{(tS052^-_{}gJzAd|`BbwTm06sNw#Sqi^JT=T?qC6ET(&C#||7A>mTbM?Mh_EY@u%N*i9_m^{I~T?v7K;&Uo^A;Cj#93_YS1N(Fe>-3w3gLexL5l5@_j;QA?+8PnU*V|%u%G= zT;gso(gu@`8ny6ACfX(kPRT%MvDB8)BciwsDap>o9sdtOewb$0`F|HiXXI0rm zKh3P8Rh3;3SiCufYh`%F7I1?SE}oa+B{F;z&wu4`oyt!fih6SI)#ue;DwlUD%Zo3_ z<+T^sc=T*U#BtFGGFzYUL5vjY7Y68fB&n<#ls91R8w%d9rS;{07t4Fk2V{6W5B?q*p5SnL<|?IY4VzDY5EO8& z46oP%ZcxJMd}8nt89s_<7jZcDz`5yr^?CJ|%H>_k@^n72^4dSJ`4orw3Kef9C>FdDQDT|6~`-mSDHY|vyYV%j5zYoT#|7flW}Xz z8;cyyO?hMe6B(|~EB{e>?1J*xjVfryPicu$!lu6q<?B~Vupd)koCXE zQ&EQbd}(0C8=!atJvcZ_?I$8#M;c`N35%Y@P}mrcyF;0MkRBv#lUE17BFG@lqpdtL zjjNOa%o3T?)3)H3f73C!Z!zifxV(lD(_~Le+N8E*0@HJEHiz6k{X|J^pxqUpv8eR} zwIP2v8gC%ZVCdr^!_>Cf*;FFZ<^hd zP2}CZoq<4SFC9xdX2^$BnBPe&oaZz_&&yU-A-w!g=zg0AuK;*?xgfy>JYPFP&)4q2 zbIXE!@4CSAIybDj;PbQcd0#9vyn;V5n>*fXQFFy(!W(I6x!Ynv2r^H*M$!cbblVS3m#OtueM8XV3fo%}`e13J&0JI0TvM@pxhu0*H0Gb% zJt(%SRce>V=Cqd8S48xtib{h`qZ+EX=PEp}FNZIfdn!W?UlLsWSC~UO%0qORjtf0Z z0e%}j1KbIW^TUOw-J#%&cWs{}=b552(8LLa7kK+TxruW$!w%blO1qog=X$#ph&dQv~4z6@meGC+12rK>U{J97oq zeZS%mKxtE`j^@BbmA#B_Az%U%BXb($L~Eqh`oFL>bg=^i|MqW>_w;OjXyB)Iq|-ar zAJ3hqKGDa{Q)g#Zz99ravc3b^QetuCvs=w9DFGMquDQTWKot@VuG5rIaNDdD(AmNI#dDB+afZC+kwD2AgPs4Ce6 z{;zWVI=TK)fEzg+SLJwqJ9&A1UVBRA@-EV%sQ(4Iyq4tjEvTO=S|7xGA_M#o_3k`< zpL@I%oh-Sz^jFQ35#jo2;2Bzr)=z_+&&P{dWMpwLN@HYk)$FIMrYsmr+Ljcc;>rny zI;DhYPDlyK$x$jBc8I0*rqtd(uLlSxxtMlk@0lEojaCmCQ$D-fT3S=d8gjzl#<8?tunv8u=e*OoS3HnQ=v(i>4jt0~B#&h1J?M;&e+zM-6~(h6q6*iWd{1S=StXSF;Mv?L;#-UzqC zF;n1 z@-+GUpv8aTdVKahbeNP1{Ac;`@;T+5ji6F*SH*LtT^V0Lk@2Ob_)-gF8y&EkzD~XQ z(SPUzNbx~#qQ@>_ypW#zQ|OJ#(Lx(u=D{j@>)=xO$Vmx%;MhAIT#DjlmOr@S~qHW1HIQ3T*^9>qPiL{FCIP}_Y z$*%sPmsZ63MSLgF6~1Sk&f7{#S7ozo-U3_7lETiMDq) z^=VXJ^enz2K9>XhHD~dmszJ};zZxBZ{7$3*q-mfrtz_FJ^Q z6VR>-)wh9PRY5a#RO|m7pcBrV@)u$@L%sZ$J|eltib&~s zVGbC(td{p+)$$&!R8HVqF{wE2wNkpEJFQmN#AEM$?_&dyud@J!SW(}SiUn-Y( z<(0QW{iU0{7vv3ggST#3v9PM^ZlP&TS`R(}FHj~Sct;^oapDM}$ ze-ux2ooHg~L>^v$B9D(+VDlN_t>U%xtjMd8eMzTU>8anLgw3LD89rM#r+%xfVyodt zJVQ!^>3lWknetZ4Ofl%+U!8A^vBeVw!MUx@$N-|Ib8p6t_ZI(-kMpox@!Cs0;o+MQ zDWIpN@oqS|{)6$c>9KM8!Ta8tn-e6gpk?Sg;3`~1%d|X&=hJd4m65|mwA?EG80y#j zcYggB#38PGH%rtk%^`Wt<;^7ey@k#VrhDiA?twU zpye)M6-{T|LJu7czY(+bvU|?j;=wWWCj>qLZmXP1$o`;97j3W1BucA*j zc^{+|ATjR4*{=~E2EfyBVG5{fx`BabjC)M%*fSju`cLJNgD=4!&A45ml}wqNZD>5} z2?RWFkwZ;>f78xjzfIjVJGEoS^rBZ)-QP5^{f%(Y=`Nkn)%aqW-l5xvdg;f11Uh?r zI|JEo2K-ZK-<)r0y5VGxbznz~ZCe1&C-FQ|MpHo?2|FJ<2B+(6J{;EBD9By7&QdKO zzoEMS1<|PB*EPI6gVTC)aC)9IdO;Y4G0ZqnEsP7>6l)YeuPQfOJqYMw!`09BEBb+n z^SYFEYpJKcF4I?N8x!yW1IXscGv@LDOwP#apKupd+SEMY9&zqy>}_kj>R_o_Z!L4+&dGNbdm#gw_NE?R!0R_wSCok5 z>MB=z)69;rzt0vJcGalMHAVnT?9${mQk3J8TQBADg7GzSIVvoF!(@tHke8@eQyuqe zDv-A?l0%LQo;l4<>C38?WzYtcR=K%T%7~x`D6@Q{Da9g7t&QBMBDpkKb=295W^9*E zgG+f#bA7|_f1J$y-SV+}t{+HGPtNW#Kky&o!LIRzcklUO*@3-BuUhEnwfDpqzLVX7 zeu99PG4c06Z1N4aw3H`Si1UpOoDOjc!` z#nMu`Gg>P9aY;tpyv)9ZnSfc$HA{$S^2}s1j?NwJ@f{tGc&y9otQ+dxzmQsJ?5|EZ zJuNlNnOd3;gbzR>G(^(?ec)CU~(`ZD`K_spEv-&1dI4Al6`%gXGo z7U$qLZRyD9=*as2hMqO?v$yMnJ28xhNz_}O7FA>_RUVOtsi2tpp^He4pL*HTOV2%E z%Z#}bYO2?mx>h5bRH(BNJ=B@IJ0e-+f)wMT@#{l;o$}4Y`%a*U9%A=R6^Yq9Qjc zrIT<0wX?L92H9eP`v*qunZm4vY^}iTTlMELe~#8;+6+U9xH#L`ay%a+dzZyZUntvG zjD(YNe`j+vPIf40bC$~DJBnzNmDd-PM|dgl3rdyj8wt*2@i&#_G5^|-Le6T)gXh-s zJh(#tZ78gVLUOu60QgiJg*P(2izwk*8E#O*sb3z0m*m0K9FBPS!KU@=^Xe~^%S-#N zte&69<)!_=DQLf5fd{9cgy-bVJUHDua?*Nq3Oo#}z6@{|h4;d^`Y>UmZ!Ka^K~MK0e#0@X0OSpWgKSx!+Ca)z>S-XY$}F8NQv?B;rYf z^z7?OkQ46CWjkei6uEenqWo?}CkE#BD9~d^!;exjMWjQrSu0TYW@2kgI}j zsX>pLxzp*hpeu zlf}8NC^nKb9;2LFMoak<8i}Z`ib2OUAU4!cU9}Z-)u!`kS&|Nqh|*&P*x95~+}&St zuanY#sYqtqkjb6&If4#L$)yHQ`)D+$D%xAy6?*20W2fI0pGf))mzG!@LGkFx1J^~T zl8!1z$nN%}TkAJ>w;MG6<3mq;zAlpSy6quHl@f<(ne?6RbVf_+= z6z5)s^M7aj%Y<-omF_0Kh;&jXyE$P#mo2jvDg9^Ea=fDecfRuwp&_UIV7oQ;P)?aY z9(RXR8^1jO*9tc1YAE%pAV^0_@ZUWxJs72jTj_(eM_$e2ZCJh(6kW=>h9-wrrQ~!U zNYsm?qyK#7^yxGI{MD(6iK)3;ZzUu1r16=@9(!irkptHpIP${me=vM;yog$IJN>eShWF*c&&u#jVR=CC=JhBsT>vJZ-@%imi5!SZJ7 zni4N}$o)?$@IFR)y^pUE(H!38@yo+L{oaoCsmDQ{oTe~30=&K^buYkYir{*>M)>#| z5f!97Jk9j1@DOJH%bg>c+t23=aPu?n>HeE6jay_^FUXm~wA9N#E9z1u3*~-c5bj(AH291IO{}aN75wK(%RJ^v$x^ zE&M#p5X^m@R`s9Bu@Q5Tw-l@*W~2N zH@-nSiMnQ24cVXl%E02l3k-jZ7oZHmJbqdQB^+g%RwmO(aD~maS^9-s9_W!$BCiL! z%zW4|@y5+e39=~gRFq|+NDV5o;}u2nanp7cOAq0D;CPW~fCmIajNb>bddXrNYk zOk?+cyYx`s(9btQ-!27x_*Y`hI_Vl;xEk)iVtu9SvH~{|K?z4%jO6h{8BRH2Vfiz1 z`7Rkgit9dy=lEXky}g^%^DN+gh~qw@fPWFncW+XDMSd^ihai@1d<%5a-vLka2rcLr zA;?)#oSc{CVseUdSSg1Dvr0LsxQAXoPD+s){&bcHOqNrNcsbu?sgj=>@H_hCKvjW0 zh2^Ja{w+!@EXX3~9KkXS&{UgryM>$71FCA$yeru@k*Ti*QDq?a_$`&@C+@C^^j#Ba zd@=-ggfDNsb^H7s``X~~XzMN87tZW!BdzwZh1}pfc_8~dXlw9C_6{H7k>ZQ;tS{%S ziwwVa$oQ3bFv3$bcWe-~TZ5eY3jJK`x6WCfJ6vrO#S7yUJQSH~_>o8UkEo_$WlSZi zXM7rooZ%?XK_Z@?rd>=mHdll_h$)oz@ZRd*vy}a>iFA4bF7X9VbLhPX5i=<2A+ii7 z*Zn6CUVGpqpy$9z83(D{93fv*&^F)zaQ3w#+D_$YTTve2&&uV8^2)yp%BS8eEI&kL z@5zF>Fshh?c{5A)lW68Ld(j-?GJ7#RCC^dGu2Tyi-6A@()e1+@B2y=CoGNXB7s+Za z9RY{(dFdLiBxWwfmq*YFC`s_O%zy(-%*DJGaz-1UNeUAr^|#$}%Wb5QS#Bz(;2$cw z)ONE=G#op;a`+hS&m8qN!1Xo8Xjv;L;AdrcMuJlx1jg?fXOD4PG5VEYJ@LFb!Qf|Q zc!sY70g%DGAlDCLj@oKrL>N`913pBGPk|*RmrpahPFiS$D*{)s4?-n%Ehy(n5LlY+ z2Dr17G%=_;=O^5`c2Tbaf1y)>V#_zrtDrs4+B0*NcdTho=fG%tW->MuGT*wg^IGpn z!%SDYKidEwft$~JW==+|VT!_k(Wj+QpPp~M%@crH1Th>m!|iFmk8Uv4_GZRu)V zq`;qm;rMA8$CBI~fU)v&-N{eKDo$~aXDFbIyc&qICt|>RF{* z85Zt7z*gM`BXc8QWZTHhGcFK8q{ulydbPq|z-97ukl)i@X-z)PjRtg7k@nI*1ZJ8@yX*{K; z?wF*EHUiJyqLnk~GXsTTO*%6M1z#y&9-DgP@XEsv|5)_@_+e^}=$!w~^741)*%;7v zQTlJslbdQ_Y%+OrQ~TC%Xe!P}FSXTK(CfNF_I0nImXo)Tn-_aZ^1O@%E@`4!m{|x^ zMwSzg3P@HtLs5d~Cz@ON0Olt8z->p5-X^Zzv2yUh%KLw}9LBxLVW259o&Slv`On}_ z%kaKJIG2qQdGIrG`7RkgDw?U>oyc219bHU?y&n=9y@9^t|XowNXo- zOrO=*qoGjLu36DmHwR+rLJiVvAK%kv0zGVJc%-eiwrwO#p2!|a{J*_@34B}CmG`^v zY4I+}vaHS8EZg!TTef9c-j~?%lEhh@%}H!0glr_3I1otKp%iG@O4)|e7HCU5?SwWd z1%?UmgXsWe3Z-W0ZlL{!ZcLf!*QOnYA@X<5z4tvm=}C4{n2+%Ep5Bw+-Ojydx&L#{ z`oZ?HiYAwpDU^9upRF?H(&<90W>=8Usrm)?FqdWMuN#&AYE}B{lsG|SQ%!39!T844 zBK?*0Xl7l*WTc{b?4km$xmqKsofC^9n!+NBH_5)3m@2EWG;bRn-PWAe^h106^@(q+ zJhZZ^3O=x@a+@YpvdgXJLUHUaDNq@o9!uclifd|+?V-A0kn4&oonqW?Pb5eLHT`0miKq3 z-2aa9p23v&+^GCs-WzO0J1KonM@l`}{U?d91XvujA@cID9_;GwUtUnP>Y}lW zzj|JaK6g*9V#6eUqpg?%ZM4>wBZV)*8pS&*Sj;Kl^j9tJLMC zxv{T>j}JwRGfKma@;sQ?+LHoDQ#>AVo6r)xmRz3r(~PK_074Q4%fex?2i{x6GG`U@ z%vmhPuPz;9ktr^OJVA|fvH-$%Vf4y`RWeFPF-su22UAK14IG1BS~@-DWXJlQiND)P z4t~b%nrOIg>z5K6-nr$LcgTjZiL2RuB9ChzvPc@tm%bnB9S|Nx zFXCva*4)w{)3~Q4@mWCkv~WM`YPzR|$^pyTQOFer%N$08E~dV1jIH=+a&2QU-b3Os z(3yEol6cfeBY^ryFch=}6DvA*Hs|`>_2#DS9mA8u9ow7C^=@Bo^Ulr{lW)KMbf7}7 zFKmD0b@=a*_CmeBBJlLvZ!;d$4t*twW>|;!AmA!YQ}d+Bm6_n4GP>OiXNI3LGuk&( z+BYcy5}9*WCEPcqc^Yv2G@4BD8Im!)Y-$*Ycf)L8$hES)tE0N+x}&wB=8o1O*FfcY z8}^+ai$pt*7PK_AV$@z+zo+9|DH zZMONKagWU8{K&YW-pumUTB9~2n!kgwcu)$iKR-|Z&R+T<>fXAA>_7eCTel?c0s5r% zLENFpVpEv7d^HL8{ZLPba05o;aMGwrS?j5B2v#TJP}?~v6_(;II$w(^6_x@OmQpGd z=?Gzp+)~9Qp-A!H1X9EFK}c(q+e8Qu+~myE6&GJ*@!K4JOMhEmAX2hv3YLkvxu-l1 zyg4s|wh=0u>XM`o0ZqU&dgstFF7N++m`94YIE2qwq|0=cTC zGjzAjpJ%S}Iu5`PxQ)XKGZ?olEpbdJWVZx+ubwr|9^S4q-h8uBw_Viz`Oi?(;I_o; zq<&lCO(u5>0+qY-(aN2dmY;a(x@vr1lXYco@r*8V8C~Q8UF0fs!DxVnYJg$iF!SRX z_#X%Q^oR5FxYjb?MO4Z`oyADQ??zgmJ^^DFV|}`{_0h;<>iQ(bFa4Y%a0UPJ82RbrOvKv2Y=jIGY@k?P zNvMGDEg;Y4gZ|zxypA?AIeL<4X67zD_swP1-8Fb|ZnaI`h~bTxN^jCoV=_?`Qlrdu zU1nLW!ga^YcZXa&XDga?yXn(%D6QMV0)fdPJDcGg;SPVWTyxpX9#jWlDrL+`9oHw- z6$Ad{`Sx~yATXgf*L%G6KC{_Z@AcH1_0yUg_rTBm?W3c=9Bo^@XLXw?+*#@{80TZksucO8~81y0gSyLreoq2*xu-2v5(H5aa8nD=Z}U4G~Ox6O=We08=A*` zo^Aq1**{y5yp`BL^U}=vouV$W0ZH^7SJyrPmydvz$87Jq!77U=cp#e}VASVfdFI(06Oe2|VPw z)WY~pKwV720p#DQHt40tPZQoR^MJ}3i*R5~DFe#}S0ss=sSDs==jLP;6YG8@P#p^M zZ$_bx1+WgCj}y!;I4p9^@T6?4sjYhJQC@M7TZOW__G&=dYii})D@2he-XM%IRKRDcT=W*Vz2*enBa5DLHVOk#ea5^bk1E zgQ4y@Ojv5=N$- zXW6MmUh2*B6kls*>%w-pI0y$_Ek48UO6XukQ(YCzF-5GH+kX-Em{iEP-G)%LELs>Z z@!Jc?)XKey&+Q{uek$bu;k0|?+^g1o@AN~b?zu<&(OKUYlm=a4%~K`Waf z5yGHx@X0l5J3h4p|GQ)Sz+hp8$5o!+vu8!eo{klJdh&x#PetM2f$@$#B$jxI2+g$y zgP}6~KZzfaIz0X-TnRU7n->Ip&g3M-D-?t7(TH}ALcUVUedWbwd9&nBH*oZrft#L8 z5!OJ=dChRp7@)*N-JQ4zr*u&Z4ko5ya0KGqbV=37@b0eVoxSHwb;LWmV#BVH>hsqf zyx3M5_4wm8j$q$ZL0{9*mX4?;5}R&mHrGe11KrX1?oE~PkVCJRN<;0T$XMKm`=CG% zR=S6g-IlWm&QYUqX1C>7m@UVGww&b`6d6zNryt_U#EZ|9)2D|>+w%#A5vFy*{XUUK zW0m^3JqNLx&EiRB&yh0QbCOll0HhaLsx|hU?Qr9uV$Uh0_MB1OPb{s%-@sCMu;{7VW&Y<_#JU(4j|90XPiqRM7K2j;# zbVR-ir$yJ+#6-vxij<|$WJ^%KvVnwa2HloN(&FYF^3?|m9KljoSJ*pP;*t5>T{Lg+#7P=BPaO-7m+noMdb}A(hB@;w z=rfD;Dn{gBsBtN3wMXMf8d4p(>R{RLk6vCQR-Je7tpay>*gG~oEZP!3IdUlpoqoh$ z>&Qc5UicN%^C!S9J+M-L9t{aqLWtp%cmxZN94QmMWvK7k4g_H{Uk-lJC|?Nlf^EAR z=PAE2DmQYu0nN(Kws~r5w(%dxpfnr+=D~{zPZ6+LIVWcV#H46!K{DEu%zt!`p`|Kx z>frh3ADnF)Z*{v{$J=HNy=`s1yYQne_ls9u^~K!HoAu#U`+9r#t*X~;7FQV84eT3O zXXGcgXKsaY8lvOYM#pU#@aBFI6_3lZ+8i`a0GWX7vQJmi=0H0qpLN@UeBomkxDw>U zN{|msJIyxnv&4C`Z_KXT2jut=lH(~_&t}GNVG>Uvk)`I<)ntj)u+wDBDHh+}&b?)+ zdSRhk0XjaZnWc!DArNAMxcMOYyxz5}wrfMHW46%OT-nz;J4=lD1)lEd{#RcMcUG8e zWb^4E%G(6Y?|hQkBc#msh(*>?jbBo#J)$s!JtDN{eE4_zuh3r)iaY2rB=gz*;yQ|n zyECgjVv(_3B=@d*uv6_3bkI%mpqu1DH_3x;f4LTqA|4KBTyB+=wY*jVo!9OxR z0@w19q)c6^$ke5o>=v12YEfClQ)Ox~U8c_LLwe6`i6bac|Mur7Q12HfrxT}U_wG3a z5_MZ+ohHt3_UxJZ(!JP>X410<80e`(d-fdK+R)V1)bIgzKSDmJ-*AYxW!;jt2@T<#Y5mlC z%lK@X(vsT6gkl1`AUjqG>#;^G61-d!zQ37ScFdfnv3SD*UkH=km( zjPh>`+JLr#YtwB7NgP8qCaGC1G)N^Sl5P&S6|mOJ=7B=RJfO50#Q=;=w3vC|y88Y) zyEwRK&!A)v_k|+^;gXW@K&04RoSR#W<^g&x@QhrsJ@GP(+>a)Y4ww1Y-E#iI7BYFE)tDKuA9Eq z>juel*T(5tkhZv*%h%&CO=w_jz6_k!Cl7?-Wyk zu~|eKb4JSJ7%qmjxE+L*{E}T0l?BbSp5*v7)S~K0hKBD!3 z)0p1X8ci|^9a^Z3e5C+}aU5Nj;qeBK*G-Nb8wify~j+p}dMLv9sOP+?8xmHRpB`>Nb9CXvw#-=AbQUrUy1ikrw!!A%q{92U0$ z?Nwr~YaQ1^vRZ~Udls4mavD^gCSsS`&lKhtSE!84c5H;+Hd1T3#7$&Y9#&f3yR*-s zz9eyJnV4CqW0p_1avmf6g=os(MQ}g3A!TUlTrrsi`s420`1acwp+Eb6g?-6zKiJ1C z??}%3G9KtK#RD}UHqHk`(Rg-vAd7+rvM@XlQv!-KcpwXporB9@nCo@*(004xB(yX> z2voU$@7-Ls2tsIjVrQsZ_6xbykKN|ftUbqq~BP_+h;-BmrsK|QeY zAO3-S_t>#S&xF`AF>(6k2|>0O>*-h&rqDaX3E7qV`Re=V&&ux`_P2zb1@ z^)RL(8CR5UhtljnYXEWTl=K>D2$2;AF6UF~*8%nG=z5&X0b;qT6hJl>>5rj^EE;$b zdA`}dX=+A|E*jZs?7VM21zYqE$M3X4o63b_xLc9Bj%s+KW;0D=22MIiFsrXFGh&aq zFXiym-T-MpmcMzNMf&HM_RHLHojd?9dsaSA<4%;rG(bMeS{>5@t1!$iJpD=Yo;}TD zH5bjCXAasO?54=jx3>zUQIrSX-C@6RwiLUeZ)xEi@2w!nlU$p~{mg{E7*)I^?I>8 z5iNufEmZAs=POQoR6Ctl5Mj8(Ls4NP6!J3t62*lf70^j5z_BWZj{hq1KM&tt zC`$3qUb+Q~beo2CpTwk|n@ z;i;RZnsN9(bJG;L5C^V7)V=l^9ksXKTFZxvV`uuQynY4R;g`UhE-JvM$s;)Yg-Q++ z+S+_*ZN6%}WRagYhJx0x9pSzAlG5XQ@7*ivUVU{_V*JfFg(QA&UdcT`c@M0A9M^}q zXINbKp-eAFVJzXy%`W4FvRQ*gY@GBU3Cfa(GJ@urQ26qL(q<&Ul|A7WV z!g8Jg`P@64I5phcGfYSU8CC>K`MiE)r6jE!A(sq~jt-~b6~$d#9zRUwaUa?}Olf{~ z8WfXS9>;jyCPf}ERnY4m-aS5;DtbAbLWW$kAiNOM^j;K^(^AUX#=3Uq@O-c-$z$vL z@;<3x4qs5>hZ@c#ZYhlk%$J@7vaCtAHJxri$%ByMeWr6k8C-d7UA>ipzV@^ zjFh+sm&9$ZSVI%k&(ZV9s6!66@$IK#F5xOTYiytd3QCTs_!ohz(xyPoG%7^Wdsg2ZWC^LBVhR+Dv`Cxf^(u3hyXmG?#Io85hb=dLCsWtB;5#!wLuOa)y-8WgyCsfb&w! z1l6n8aQ_G+2XL9Ww4s}0p~wvL3_fn*XDiPM42wk!qrPLi?7*SWvVr?2pV6DjOUAZu zo+%HFl~>Qqyi;73Gr4Z#rpJ$0)#z`#P4BZKq7(N$Db^3i(4V4X(3}a>&c$*fY^3@h z&izdv4Qrm1AK27NN)L2ng6g*$XWp3k+Rm>rwCw2*Z@GoliRI~g59(x6eV0ZI9ZrqS6W}siZE;@lXDOkQb>0uaFdOt6H84grOlwJPA!#^Lo+qKR7xt%gB_!?jRg8Y zlzGAi`8(#+O!1BSFG0arWs31Eq{f8q=l6M>U3mW$^gG2ip>MIsV%&Av+1kMFuU*tG znq^--t6lU`65dDQPxqWU{SdCTP==RcK`zqDSsIUBH8ho&*-jg-q0EirhxR)30o#@hYJFRSRozzaR9! zi*Y_FdTMzjl`b>KWGPo-@S1rH8$^?sxEaY2fr>r=H;%*c3>*@Ris+zAOdd7B^F3}B zh0+pab!IqCW(Hljt+=$%+f-5B<(|bh=v~XgT^nMK>5YfSEz9ED>pCk;`FV+7izlal zlV6BCv(uN&2uZA#Dt>EsN_^F9aaWUZUHf@lRE~jQdr!Kwy(ez2%hW&e7*=2^D+}|# zl>5uZBWA%k*;rm-3_vXBBVtj8xz+O9>3dDrr|w0;LYubTq}q!zPP4XAoEo-StV@|k z?EVSmKF0#XDyv*%?~?Ny4f5kGY?j4Io8@-6aWKth$u=0# zX33(@IOfnEDDmB4!b6TR-Wy`u(;<0#IwWsThve<)5WGJ`x2FRX^JZBL7ssrdp1$$6 zJ#A`e9DG%vapB>u6d1QCIPPnIRO0v))3FJ&vH26DJKV=dyj&CdfEA|2AUPwvoR{0a z7Q@T26)nQ0$|l*wd1m-IG;#jAf6Bi&fbP7>eK|RuwM!uA#t#*M)Sx($fWA8YH$5DZG32v>9wMgs|jZCbz zKkDHL+l|#TN=BVnPvyltzA>%mil@MbhDlmZMzc8?-FL%`{A+nYZ_t-lKQ>+4__I41P2jq?Sv<+)ZF`!$y)=dXUb37)XER$?QGTayys_f-cVIeWlVfbt_XQf~blvlJ@F zsrVsVNjfTwp{d-knbFF8^;NS{REeh57-rA1JeMZYaenBNSU%35}?H5v!+83|) z8*XFrgCF;>s9e#;412_fIJI`}utuAB)O8?X=JMcc6)VC`hFG zdeE$m@ z_TBW!Pu@iKkPTN}d+n8phZFCgf-!5BbxE;{jXk$L)^J|q7QS?nPHJ^5vTUff%z?r& z0v!ZK5)x@;FkNqE%$*N`oOI)m<;G=#%lD=zM778 zQO9hdClc(5BG*w6(Dk5umy*pgRvTqU6ZI##5oM;-eSFPYtf2!ngUN-TG6gV~wK;>Db@TeVn%P->FueYc zh9g(J`r3-+IbZsU&Ww<5Vk|OTPjxfI_0sX}OVK@O>{aR)5#{?d`>CHszHk-JwvUdd zC)}^#Y%%pwnX|&=eg9jt@63KnM$`}8lR>x2mZO2j*{8p&rR~dESSz+rIq`CoDwLdp z9@$Q$i4U%}XL%H0rVW0hAi`uP35q~q@GU)Ue4!#NnW+k3qcI>5GG}3WD79%N5p&EW zNeZ3_H#wS$XYV5S-lZ>V@$T5zKlqh#66=rW=Oms0F?jl9LH>c6#Gg7wWlTZ~rPr&_ z^JpWj9FLkur>-2lLqY!qV{#+EkZj2; zYnsxCU;P_v6%;$he0E^UeOl`yu{5k)z1^-;u--Z}-dzJXZinMRI3zSXVuF&jaO_|= zFi8{3SYqKD20AKKqd+7Z@q#SJ3=UcJX{|fOcT(Ib65oJ!r1(LRzoYWe-DH6Li})poj6QFF-w>guZnPn~{D}d6foy&X=kZ7GDla66a9=v2aZmaVIQyp_ALL zp+5t$xB_-n8IepRN}f#o{?dTY=k$7WmKP27^bMKIs%-22nT!tYa8)~X4u{w7s*3kC z*{jQargp6F9Y70@Lw#!z16atjBxTK0z*X@QyirI6_5vPjCO~T`z?}dWjrwzy5Cn42 z0o{d||5bMiGpP#H@(az8NkW783#La8Bkppa*VW>U+IpkCE9?Ei#&rXo%dDM+p@2V7 z83>6XXR)rt5e}D?$LfQ1ddU#3YVNEp3)?+zht65r2ZV7ujL&nFPKq%8A&Nw1KG^CQ z1KG5l5}eq$Go`c0o*0F+PA~qF{L1-~C6|Fe#@Y8ie~`Jf5UBqr*6zQf)@rV*k?d@R?;=E>Py90q$58{{_{W znm7poA1;d=7>KQ&Bxp=YuAJ1w*KPwqDp@t5CDqH?Kl0W zc=V{t-xF^ig+4F!2Fi3UpCvcvRTP-N0PXrNm1~2-ZWKFZgh`HBokK?>c&q!p5V;Ke z(UnMho)=qRZ9{ zdV@hP9Ase4KxlbqgSl~iE3uXL#M(zXy`CJ8mwvFh{t(d8QzAB|8$WU>GtQcQGT9Oq z15u=7r3314qj?Rumh^8oT|Y=1*TZX@2R^YZ@hVW&=b+!7f%%CmbqsASg3v0&*%Hgw z>gpx)iqPJ3aSi+moW>5|KhMIgIXHd^hja*T+zuBH!f^s`Acxuk7qf6&Lw|u$rgSt5 zor{KtLSfOxqlXsqC4db|H4VPa7Jk_wL_M=pN5P&tCgI^#bwN+%P=EhWr6*XoY8d+* z4;vv)pIR7)I zw)1IcP1LETyrHo>Iq``NX{5Jie=6+(4`29z{0lj9Ry^F5(Z-?W;R3h5C3(31qw;XW zk}Mo~ID9-$4lT;VGq>Svcz9I9!;>5w_&0n!3H`bl|8`|mHO`cO*O0U3-@v_V99r&8 z`8UwP<51sP;pMFS+m+70%}ep`E3kXn8X?f+qrXI=JJfH2HA29wVl=a;`XRcQ8ZVS{ zk1{WB9#aUFJ)$0rrMwUkgm4LVQ_@*JhTMdbp% zs%N4?CKu>4$OU?hT%hN2K_IJKkdz8QCV(-166)p`?m$9Hl?nLXr7MF9!geSHS`Wm+ z=F59$Mi3&<2}lo8nVw=>^35#A9%D^TSx|7w$rs_`5E}c7sXl}mPn^`I03z`c0)y@U zW|tR8B%{2r4CMuA^LL@mXC^OX9*@Q3g;H)vPs$7Wk0dV`78MsjRsi~X3fi-ntdOY{ z>9Ru9nkFl7NdbAn=b+D?fwh3i3Xh$UtdP0qs*|!pHD3p+VX9ZdRIkQqE5UrJMvpTi zy?79g6ZnJ)4b?Eit6_#$Q=uWig@$UnvY^mVE$awodDX{TT`k@g;-?E-d)C z@LJTsZWl`b=wmy#JgoW~|*hRTm7GyHlnkwFr^LUxnK zKB~@N%H#!|A}{DZvd%xeY}qg#Zs?ys(=A6ts@a#3K& z?y?^!E zKa;Qiq^>!h`1cORgTD^-co_QD!(;}FhDNDoO^v`5}3DlOtr;6_=LT9TuO%)Lv@~2E3-` z4q12EO^#8${|M${NR=D1;bBUd@1Kr>g}i*!i;$l*!i_#S*1{n%F`$U%8Mn|DdZ|5K zNAX)09$AWH4Gyl24IXOkHn-+ic-)?#*B9`Xx$Tj%hLX01_Q4uYpl*chu60-1${Zy| zTWP%1oMW-oRJ(nRHU5xZrw;||TN#i43XIcZN!?#kcsxSEleyY8k#>@l!**+VZcvH? z($Rf`BA$U*KPStX{H3V-D=a?P4eB-NzyH(Q8XAUld&sf=SAHJryZGLF;jItDcsxS& z{=L|_sXShznP|tu4bK_8*satHG2}&b&9xx7OMyO=%5yXWw^r%@q&C)JYAFc1Jzk&F zR%Yxe{P|aN_4(z&hTb9G<9>(EURIow^GxTg-5ZNW2WdZlgSQ3O&dq9X63>!)K5bDc zt-V^OXIo)7SZAZ%*Y_^BSJd1046MsHh`@1LZ{}}*>?O(sn-Lw!{|=Q*%hdA9|if2>^$82;U#0@ zHT{?OB>oegVm}>Mbo*7r?t%ZC~}YV*jtQO}o+04REA zQ}6$FRlawy#^FVmKaQkWodtGH^EBJ%F*Rid8?vP_<+k$d2Re( z?baNtoqpWuEXgTx(ho{oU!!Y2)8%PgNSU?@Q{=)Wd&|Sy>fx)HF@p2OLuZLRL#%1c zn8IpO8~ptwZS^o}Gsq-%AW2)C*p8`540$rhYmbqQXT_5pS#y z7xPM%{ID4Xwv*yKyrC2fWVHlssD2*)2hxsa-iCS=cbQ$@uPQ!<( z0zZ$gHCgy@pTdTb55rhI0(D!A4=*+rZYA);net)%N8!VUtZW$hFmKD!d^mH9WImkK zm9>2MOtj|geAuu!8%911AIG2#RBz70haKsB*sSKmKvwfwE=+8U3*$PB{d64WALGMs zoDm<+++#B5t)`F#&Uy6;qZJR)>FI*$>4NF$V$%~X?1arz7h25nQy4Q^=kbA_+0mvt;0gv*+KGKWsch{++3pQ~9^RZSR@*cl4w1@5Q+H0>VZF%>OHf z#OD?c2naC5Powxl;U2o@p1<%x#^-Y>&!2_o&u4y~<^8XwJjcI(A@lPr<-fK>`P%0% zEb<)R|Gg!irCcI1)J)DZWPXxwTiLD!zae#7XOb|n6Ag#5bfyWjgB`E20yI(Tnj zVL^?UH52W)|m(MOD-KlH4MkS&Ok-a;a)dfpV< zdsbBI43h?3^~#=k`pz}f|L_HPX9RgCe!i(*;poN^jzegM(UHysYyCnT)@x0A?sWb6hKj|6}d-jty z^ofOcR><$nNxrj?22rK&1Q{puAEn&T@e7Cedj9WZZ_1WsX$ zjKmW?D`C|BI(Z)q8qlz-b4oOdr zpZ<$ps#(Fxu?k1TZt*C_`)AP>i&-BTXs&fSmUPb8FjoMZHRK2EJ-)>C>^dW2>wGHqYeKYNb#IMNZ zyc=40HzfW$`Q9NKQ;LrxS%hA4l}S$MB&Y5ce41}f8%TkbyiUyL1Gz*wYI;eQ6X4}U z)5=i@llGgKpI9f8CGaED!Vh4LdJp&!X2gCJIMI7>e=P0Ud;HnE?3sX$<2Q*DVk5}D zb_6|gf4wJRJX84quZoe?jnVPzJ^hVj>&~sDvA^f_lrLgqPd_Ypu->sJ%C(SOyI+7Z zBdAoT`UozP&Rkx*IbGV>1W+yGdOcmm-=wRpdPSdS%lVt?FKlhs3)SLE@h+IxcJ$HX zKHF@4D^=4M_wbMrr5Mnam8_@3;fk(mu3y>eo$P7$)Vd2fMeFFm-5we53HsVrM~aKv zmzRcO-dmD%SP12>m&+HE<(Fb6(UkJ3TQ@CUt_dsubCl=`-L;YNTwE(&%*yvC%P&hGf6Ts(b%=2{mpa0T;MYAX zVcZXw*7~hhe{E@aO=mFJxh8xV2myOwfsRFiGvs!MoCTX5jU$bXBTWuAIO*k+Wcj#m zxK+GW$vkJnND3uhv6Jx0aS^M4T(G?cD^g?U6`zG9xNaI)!0~36F>vj>sIGMWSc*Z;jgx%qakSvx?Gh&?0v1 z?i9W5=1P0@$nNg$-J>;Q%i9l@H4RkP4n$nBCZEo*Sv?KW%u2b6ONo!Xan-SLTNzrzNp*3~1U5GR7uS(3no#9A>BX8XEib5@&_Q9yD8ArW#YUQvF4oH09(M6c)DF%ase-hFipG^0N2~ zSZ-+>IL7>J+mNHQA&0l2KxsoUZ9@s%VU8aqsy3*bKwEHrslTY$V{*G{Op$WcwK!dv zlb`2owg=@i)cy0~Ve&Mr-ICCWb=OgFi$Fa!nU^MEA!Q{w=L5euK`(GN8F+@H&y(`R zTjC4jj9&kbxS3oc{u}hVAMGzm-@}N~5y&cyZ0je*TwN0g)W9KbE_aqX%jpNRe_bzb zC$Ee1@J=_2bY8}d1^H$<>vu%m%@4{KYgOgJS3gfrfrg+W9=r$hR$Egl5>5H`Vx`wx z3CCCXS={cmTkYPo4_43n#Q{+-*{DPlK`BR0P=+ueNvUTfl`b%l5Y!;ZV;e9yKm z-EfElp2xypg9G~X6zB`zlU zc}qeCp&9FujG309&8p&adGS@qLg`|Lq691k;f@izb6Z6&CZWr%*ygm4h$GQxn}5Gf zl63q1ZP6&B*<)f1Xsa1kOOG&yC3`vJE78s?r5egQc94&ObDrTN*{W5-VZ-VEv@{1T zOezv~HJ0%x}i&K*vd#pR?QFy8ONcwZ-(HFG-ioFq|W zkaUbXu9eI$GJ5&CSVqF)JMg?bWloa_-600?1&#;`HaMk&@#v{lhO5Lf8lUf_g$VgJ zw6#oVVOys>?yNaN{X~sQz;O&Oj;O*yTrB6=)}DH!gNi=!sIp;DB$D>}SV3*B&u(>= z)j3PrExLSLzNe-nQ1@zCQ6%3|WGb-hrCsAXNf!v({kA-;&v{UvA)p$kP@#=A!euua z(924P$IeBt5IQm^qnFP+YW&4r?S@r3RrPkG1DJPSLw@Yl+!9}DRj1pOQ{pIh73Y+g zO$GI6!(LceSSwBnkAMtflyr$7VYkA)S@E!N2k=FsWKiz?qqtdE2|Udx8I^lm#qBT# z^KdUmxp$E`Ap9KGf1{ME-1}F!_X${gjZ&U+?;5z*PVeO__r}E`;RMWoqg0@}w;Jvh z!M#G&y;1(VBGvn1P)0M{GqZaFS@@#ZA?%khQ}`AEB@4>Bb68k-6JD^1kR#7*{wN~f znUnAAkT2*RgqY6D+@I$GK^-o5?&S>?9=od}dQ{4-bU8ab{<3qiZqJA#!UWK{Q7V?} zD~Mv5a0Ae;QL-*<<@J4lJS&_7`a=$I=~Of&4fZC@GcIbx2q!emYcv);#}_%gdrCA_ z)(#%RvL0NB2sZ#-8>JGZAD&(q76ulc1KGhR(8x(>nlf?`hDrr8-?XLhea`OxDV4{Q zg>NKI3(FRsr)&v{v$4qtbF=r_wWFOcVjX&*4sM`jqh#ZC5EoX8eZphX7lC~3Ag%LN zHBA;&Nk@59{40T$S~y!FGF!n_zP78*o!e=1)%pE(E?Z}=yROTN0z;pxCh81$t2~}6 zZ@?L?aovp4!{1!KJyZD-FwqRW-zT)5Y58R^;@)}m%0yDr z){BmKVI3^sE9-ux9J|U{%iYD%( znh%p8$v%sD&_^einWRM|pLx+wgp_nOQag$VuCO&qjNdBV87if388r&xy}svaE_a%eaJ z z_DZ^_ucJ_CMZ#wygN5mdw3uF^S&;ZgC*cCo@d?{X1QFGT&{`wa^}D@6N4P& zAk4MJ%Au_{@J5HN*{2wKs+JnMr83t9y?aL6U~vK&+|*u0xZv#!ez@NU0V zyBgnrLwMi5@D0r7B^k(@(v8s0T`HRw=Q+nh-#;Vp%hD^pTBHSY7 zke8+FL4w$(vUNSdve;*>GR?cSgl(&sXKP-u>+m_@d zPfN3zZCi61lh(6%afkwVB#N&~vu#PUvTaL(o&!29WIB4PZEHazGwe`VpR!uJB%-!< ziNf_#E_q$Lf!exOXSa3jkf{ZG`vmR!kJZd2Z5 zs2o2)%x3UD2ChG6G;pDP>rdn@D9ggi%4FXltEp+n$Q9WPTt5z8)R=f+F#{L3LA)dEm%amwSa;fvyuW7) z?e-kG6*=i)6Jf<6_I&R~ctP_8;p8f73diLK~hqKnTHFZ$w z+E|AU=yRXYCh!R953mU75{;0~>rjA>nzRn3v$j z^~@7CF`ZStJ~ zZpfOOX? zm#X}Hkkz_BP3lsJp}#R-1cobc3Vm>~7LFZo%))UE9Jj;qARH&)covR1IDQGoA2F!M zRDXWTTN>fOm*@{=Gc6?9%~GZXI<6m=SwRbNsUg06=q^E@Mi_Vk6 zZNf{!%d}~?L9=d~W0{Wd+RV>3es=P689xX4xt^bU`FS}%5ApLDKdPr33dS5?YYNV!_BTn#B#W6ITP1_ z^v5y+T}O)u(mF=IPE3gz>S%6cJ~axo_L*%}CWt;D0Ry3`#=e9<0oUs|R2 z%-;q7ttbiRUXvRvVW*xVtF;J@o&0?Cu(8>AIPuechE;}rHyS42nl#)vb-`5Pr||L2 zCc}!!HvI1``swH~HaZQBMq{I4pu*rV6c{RPX0r{ChQ?s95s%*%uWfE#TTDMcQ9I(a zjn>zX+MFW~+DmR5w|eY;zujXUzpcbRS$VMXV2Q<2LL~gmD=sQ3Rv(F5t?*wZeyqTc zyOgh=U>7UsXC;43KPIikR;h~B$Yy30Y7Ex5x7Wk5x586aRbO9K=BW^b{|D;D`waki zoMT{QU|?VZqB5(U@%%Pl8913=07V$CCA|%U(D!}+z4-T+shK4Q$mIm7WncgRWws5% z0001ZoMT{QU|?bV_m+WyDdOLYe=nGt8Gs_lfH?#Jq8|qJ0001ZoUPYONK{c62k_g0 zQc|(UgqjgkHfiPLt1@TIByzH=qk}WbWKx@21TBJSp%E0(u0;gV`vE2|cxBb~t+N*&c zmSQ=Sv)@KV`riiYVKwKtmHkM+tsK`DxI~>QMj!cGDPPi0xB}1M19ZWD-jn$~hS!Xx zwDF1LUV`_e{yR7ThbT>DBx9H&S))2$I{}q&4$i^^$GD3sAi}wKDJgYq?+rU-p7&uL z?@MgL9M4L|GR(E{gZWLrN7(|K*!KX}xQ%Ng>#&S<m#>uM~3%_W7@4XIogK)+K`$5i;E4&VqdmoK)y?2`2^DOTQ`|Z%p7|1&#%JoPvcG7nR zkL4Y=m&Y-VBgsC-Sx@$pVS9G&(q5^_{~qu;mKfs^Ho5Nrc?Y(twJBve#5vosy6=Dz z#zC|&E;`5hj7=5C+ChCPRyJN~#!LEMVDk5<&Sk#&doS6wi!zSgoRj=E&e+==J*LK# z;W%{;7$<3~-{e2TF~%CEf2jUbCV%hsatxbUcZ%bYx(z1(xruQtG`lY=xGo#mmhI0s zxf~E@%tVrH$~W09xB(a80$k(yJ-7;&=TPr1JcLKQF6U2R5}vYrFo$|&MIbq5jS0T% z>^o(Z3BHG_RJoR9pVz7~L6zjOjfIT6jj7EaC*eF?hc71hmXz3iH^KMoDP@A!WWL`R zkGF8d^QKd944%U%9EUe>8io`(zX`A8-w>3y;SRijN$7$;$1tSx%U@sBwbXp)!LiJz zKZ)zWZ(G|m%m4s*oGrqCNSX-)0AS}iuWQYDuIrqd*Sf9D>$(bo5fULsM2L(CiHLFkd^`^f z2Fv-snRBFb!{4mWi2MM_e?u3(z)QjyG7g0H=K2+cm`jYgL^->s3MjO#HhSi^v@)Sq#^Z? z7D;9{s8;IkeQfi$vpkB`^ViZFzfgy=%;A5@8_(aS8~+9Ab!#2 z!gH0m(f@s-Sa0OsSiEtTXZw}#Ykz)fzOw*cfGWThq!r{8PzyK(f&vAA1TX**AOjSD z3Ge_ZP!H$;3t$I^fpK6CSOEgS4zLd#0iO%w3nvTb3s(y_3qysG!V@Zjil!2%nbbTg zg9;Uqizr33BCu$uXus&FSXY87ahFV$ER^_5wn}zO4ohM*I1NR^(N<^y+79iI7Nf)I zD0&OsO7EkO&?o8h^i}#MJ;V?&6bvWB$CzO(G1eJDMwk(0oc?C{&CbL!jm#P55_6py zWTmnwEE)@BiCKQu7HgMv$cnL*Y(IO8y~{pi$4cR)s8U>MT4_$HzjUi~mjiKXICUH? zr=8Qo8Q_d@W;x58jWR=-rOaM7TsB@dSFS5JmD_%Y|2_i7gGpd2m<4*lX>bu-1Gm9F zFan-bAS%!m_KM+(aVQB&g|eW0hzao^DO3;XAQNPRoRANiftH|kCR131BnYsAYl0=gx*#YB3!;M4 zYGgI0npjP)rc{qqPgb7^6NFgdHDR_85VD1Qp zTe4l*p)4kc%TaQiJWbv!58OoFgl^6&;EH$!MqyQ)-b%Y=xMfnJl_ur7GI+cAcF*mw zDqEGOYE4CU{s8?z`Cz>%u}Rl-)~s&UH=CP# zn_bP`=IQ3JHdpJ_PHPvnYY)W_RS%nV2|BE>()g9_$kKm8gkB;?q`tTprKNbvV z1HoW5^coHgvB&VosK>a+^2hTnsV!M8eXWRAbSt)1+^T5RwCY+1Jgn5Aa_tYXdP>&a?|pklt0JJDQ3OdY>t?Z zI|ZGJPEDtw)6!|{3|r(DwPn8x>Z<8dcFlJ!cWrcSTd7v}lY}RUPmE7ix^dn1?(kFa z>HO2fo}QlAGx;<3vs2r!E%KM@uRt%X*Y_OtT>pI0o@DQ}@4jHX@V)rh*VOm%CGDm8 zWvHLu?|v2cs{WO0fHyEV;2#JM><`2oNJo+*%|UT69Ri2Sp>tRq{SKdF&f#|)Ig^~l zPMveo861QSk_Y*N#=)7veHX?>b=AB2UF)vTLkUB;A^wnaXlEEVEFN~gM!s%(y*-jM zA|0`gght~=Nu%u1_R+b~Q#Zxk?LP8gJlP(-XV$aiS@UdpLY{rkC(o%D;YE3|-c+y3 z+vqiTU0$Cz;NA5eydk}*do$-t^{IUeZxL^qZ|!e`W0_<6vB*2lyODPr?>>)%Rs1^@v7000003IG5C00J2R0{{VdoSjrnZzDwztrOORPH;p* z962B^ysl{{PBsxMJerx;-LGD~uC6f=edxbI9+kg!+$HwtEsRR+Q=QrlSLi-Hb+|gk znV|(z4%cXjGKcGQKwmq2hvw*KhiB;{`pw}vddFLKc%GKLHHR1I8}Ac`@6w9*lf(CD z!Ta6eMXGs!l^BmD?;nRfTJjeh_G#IF*Wn5+`Yng6wBm1LM_zRn!_WOM9QNn~{|ASC zy6gYyaD_JgKOL@4ab{?x^1j10TCKDluG6;_Wka_UXO)n+{j#Vf|x= ztMsV;*>*ZQ%k*GqM61z!EOxiLVmr-7foMmO;DuadDp%R53In^kAE}q3nZr4pp#~Mxpf>%CoLMQKH@J>EvWh?8|W^PGurO zm5Gj&38u7yCQlxwc`PH942>CWZfq1q5%eGwvrf=Y=q*3S4j?^!7}}oi zO%X0J6_N8u;jQpK#WMtR61xw!qL)Bjs3f>8vK&ogis=w|02Gl#bPJIUzMfntOwdGW zqO*QQ=Q(H3F|w;-O6hf?uep8j5LolhByIy`39bUux;(*LO2;=;XwB7Pz|n?1*(_Gg zy0Y;J>MiI6@@CAk9{S*x&TYSygL}%nUbY3Yuw6&d58=-d$rX__>>KHw@Dkg3cdnlD;>pCegfR^-*@r4$k9%u|W;QwR+^xv}Zr!uP0y+ z5#ILH2Uy=jeCL0~-2T-~cl%R}3+_}&=d92J>Z5bkxi4d6v7Iyk=NPLgkllCR^@r8A z-?{BB>%-=;5nm#i0AqH3SSCG>K=2_jcS`j<2_3txBajpKFV|_$C8oEpg`|Y_- zIOliD8q$4pH#2N+mwlh`DczRQ{;!?$H)8$;jNIW-0001ZoMl*LU>iphotbr{n8Ia9 znVafPk|mXOx>MpLjoTz{niLgVv6a|TDoddeNKT=vKOo zZl+u4VS0q_rTgd#x{@xU2D*^eqGoELt!Z1@jrO21v<=-&!|74loj#{S>2NxVj-;dM z96FZ{qEO7wArw1sFuc@y50C-7z*VuMW%bA+SZ#xag_f;Z+)i9CthndwVf$dh>r6={H{@^-W*PvZ{Wo_FBsJcDO) zC-2C!=y%?UXY(B1nRnq`c{kpj_n_CP#2H#di+L`Uxr@6w%k#L0dpXDRc>(uvKj*o? z3wclah>JYHC0fE|UPMcIG3`Zr^AcLdOL-aZMHleiv=8sY`|^IgKOevc@U8C_;$X7@8rAac)pwdrhloM@8Ns-KE9v6pe#SY57OiGBtOIt(^LEi zKgy5s<=}=lL^wfqM9JdWpZ_FZnAvfxo8f_#6I~zvJ)u2mXmAs8<^3RT(A$3CeJ3lqPAG z7HO3cGEzp#Xc;48WgLAa%gOR|m#iQw%1W{_eJZQaX>>WANoUY`be60ttI6szUe=H` zWi44-){%8(Jy~BikPT%c*;qD_O=W^?CLuAzl(0l3Ds2*zxFlqA*+RCItz>K2Mz)ot zSdtQ397)SWnI!EpS*FNT*-oZOhior9$aI+@Go@2@lv%Qq%$7N_v+N?f%5Ji|>>(MM zD_znpS(zt2(knTcFAJnk`Xw&~Stxr-Q3j+WWmzPPWr-}6WwMv-E&Is6vY+fP2greP zkQ^+B$f0tW94<%5k#dwAEyu{Qa-1A5C&-C%lAJ83$fopP7lE%(U1 za-ZBU56FY^kUT7p$fNQY{Xjp`Px81tAy3Lv^0Yi7&&qT1yu2VU%1iRHydtm4Yx26h zA#ciC^0vGq@5+1fzI-4790}w0AIeAav3w$*%4hPqd?8=TSM;-dE#J^D^s9U;-^us# zgZwBz$bZMBYCSFNYkR~x7e)kbP#wTaqPO;DSukTR61!YZPos!hdITqV@zY74cc+DdJ$ zwo%)vq_R{>*~(FAHBn7c?P{``qNb|t)HKzhwpTl->1u|WsXEn;YL?n5SkC7{p^zQS zOvv<=LPoHs4$Y=z*JhXw(*AZ6GYRR}}by{1rJ6af>>B<$m%Kh{D zvP<;L$V_)GTg(pR2DID?x4q3h#f-NZOJPWe$CGqaT55J<)8kRKI6a%CTwiy+&9q;# zVs(;HCxz>zXq}aM$*z;?Y$g+Rl7%yw$+~Vfch!>A)YV-mWxBeu`BI>}D^rO#kahDJ z$huNfc5oe4LKQ44m_@;}IyIO_)htvyh3chH?G&n=a;qFNqlxC8nma8$U4{Psj3#XA z8RTy1@fW#gXpzVGx1lVx&3dAtcW$QG+*{ik$obB3$^s^WIfRwdVFhw-IVCqgn43R% zoI_?f7SLkze4qade2%m%=qYBi`MylPJJ%KHbCm;qZdzOVe0mLi6;}g!y`IDEGTxpEMT<^H}*xfCquFq#VkeVKt?*Z8nPZ=smi z`ZfY~P!Ah+Y}Cy`y=)IrciYoPm)fyFQF~t0o)>+d2L^P827DPB@U4&P$rk%7>6zPy z?rbPkiWDemOC_JC&beU)%D&hg_~67^%XP7rQNpq=;jnVGMFVBGki*KgBsd5vjUXK_ zDM*Hs4lLFYEUrb+xY)Z<0*l?-qj_m<5%!{irS1|$^guchSmv5*T2|}2r2jSz0nK0% zAtZ5|CWqdp4*IMOh^_iwuV4gv9;SfM*Uya#CWA>w$C&b63#poew!RkZiaVK7z|psZ zqi+Xi=&J{b)D|r(tHbVP|RsGymJL$v~&SVL|P*wRYBp+=;?;qG3DT zhV9f1+v#gq#F9pAj+YDMz;ObzwBxgU4LfVFVQ1-vo&BGNjRa=<8y3eh9S_XW4Lhgq zpob#iP%9A6RxNpG0>i)vFbZq~#(;5P0+<9^z!cC1I>5BXNC*i22>2u5kAOb{{s{OZ z;E#Ym0{#g2BjAsKKLY*;_#@yqz;A%x0KWl#1N;W~4e%S_H^6Uz-vGY>egpgl_zmzI z;5Wf)mi@;vEf4$ zdlHyWc(S*Sn4;c3_!>nlHhf7T)&$mV_>@2_Daejz7yBIWMG;#9u_myd1aA~}lAbYJw~hRUyn0|h;pN#|$NXSj!5{MKgS@7^`eNNi+zI%Xf*r@R>(vK*A+J8j zdjfLO3j8oJqNrotUKUMLeGIeY1m79cD(ukEv#EuPs6?i`xfk`fg#iI1)Q{1J_XfcT>t!ynZc{s6~fKu;fkT;tdm#~Y7SS?<*1I_c#J5%GU$7x zf>B@_Fb0eR6Tl=e1hjxDAiicq@ijx=TV@PjHDVFyC9q%LdliH~y1yz2KN9dG0saL1 zv9KQ>Ff9GdF)e-0Dsl)i{J;kf!vaqV_EU&I;UV&1Vch~hKCBq{uwqyqTFt)y`Az!& zY1h(XUt`^yddS3uW9oO`P#70W7#Bqt4G~5|gwY6LTr6Q+G+{JKION%B115p`A_(K* z8^*&njE86#54$iPieWqi!)9DNuU~ZBq5CS-=ni`eg#~)jjkeW$m2$NQCHMVrP;$4A zti5Box<1)8=aXIi5jDBWIl?oh-wb`)t{+W0_g$HR>?mIu+=*s%x?$XWsP z-oYyPmTSR}sjEl*RL#pVb(L|a#tkXY(8*d{;Q53`nR-_94fzF&dQqc~@ zxj)fyW^aF$w?@*S!2;=iY_N~WG0zu9=YdCF1DHBhD(j#(RGEHrN^mO`;UwCwVgmu+gJ|QdGox2#?l;K!FKfkx&yLuB`-Mp|NsC0 zCCMU=H9ItW=Qtutl~VG(5pj1Sgb)y`RHu}hrcOvHqwOlxRYueiF-xYaj}g5M7*3;^ zaBiteYr*Z#g8PL!^VKxH1jmPR}xn_13t3 zgc2Mw8&Sbi@h4XDb==uL9m)ky{T+=X=xd!j1X-;{vJzBIe6iUbXGw5~ecg?|f=38T zS5t&=DUEZtGm+kxBi~L@J|IgrWd%ItC2uVw_P$V)z$QjYcer)f(FCinA zDj}0qDoHcb#WSfA3lV;t^Xm>!o|`;i$wDD1bw>TK2S9Dd<#hN{SWz3~T3+}WdPJX)- zw69XGd!2uv|2lDRZZbE~Pz0;NFIe(aecumMsn%X|NV7P24tSN1OrR@#(bnEb&ukc( zX-{l|N{TECFQSmrx$N!Q2athID4G<209tsxyvaapAl-_Y$*JBE{BZ2ucT-T8VGsm^B4`+cIT_=on#K3`v#)Fp#1QX{hz=c6{c7VcAQK(+?Rauje8Rs&L$<&1E`|8(L$lV6? zmaez!Am@fCEgHcSBQ4ocP{Pm#`!nNopZ7v}TQD60g(pg;L@9|KF!WverMl^HWV6 z4rP9VP!SFzleB$|m-#Uc%2QV){{Nd=mg2sTU!Z)~f>^9^JSCec2+<&iN?W5jt#nP2 zkiQ=43;6Z$XGo}ZS`?Ntp2M}4g;&Kc0$|Iy9vjOC$$Ecs8${WJ$_*CTC>pr`sY-SJ zx{R%4LqoFwT2W-3&vJgw7iH}K)WMpk?qzqY-Dm+e2cHYj0#v|?I4JJ_Z%)5<9#|*+ z$VPcXitP_67R{DhJ?w>0V{{<-lk%Jr?|TI_^ixoML}( zHIouN=#81k`Q)IJ)1*Kwm7cCe=*YS!YzT;LYd;7josY}K+4XLl38tB2krmdxblhT> z{f?&I1|D_VMc3WUz&f4}y}^5Y#20)=cR2eS|B$1}y*?8?fzmGF?mPk|S>A>cnnH50 zVx$5OdiZD=Pd=TvF(Y%PMs97A0;Kf8;m9SudmYu$Cgh`CrO*|DQpk$tM+hq{zfp}% z%5`fin!s2>vGM5EEa|-4lK6jk8U>`|I`!S}qAjq@8k_8JJKG(0(s@_0^R|b)#yfQR z^t1F1llsoj_=cZ&!e1zzl&xMt;2=~&WY>qtUQMNJ}I zrfhi#8k7RqQX*2#e$z@SgF+6RidH^7!`?s@iRb}&cl>LxZU}{`yYqXD)x=Nudd1({ zqlrXm?B4GuX-`@i(S6*n=Y~vSR(ZjIt%)pSP4`WICz%D=L3{VZf9LEKAtyO6SG^gz z%|m(3Tab6?lFz&q`G%k5iMJttx!z$YoO9KL2?u^zJQQh~$jMz*1H7OeA1_Oa_xS%dZZ!v1nk8eHp3$59C7D<{Yn+_$&_smOi9#g5+qW2d9@S~qDGvh%7E z*Uu1ROfkn2YizN{VW(Y&0OF}&5N2p)H|qOX#8D}cTula9ghN8l)=$Iq(6gEQKJbJ8 z(%a00;gwwDhaDgGulNcuN``C1BKJ|htF54xkzq`;G9D!;Jj;dc&wjmJE_K@mKEYIO z%qV)8mqoOdVuGH<nffY8`Q6GW<^ghR#uPHt!ZpnM<0luVpZ36W0 zoG^e}m`FE;2pV~g05aC~7G@Zc<|tT%kY`qpccBqz zta-qYF(@S!&Q!57drx4WfI|w718^|}cm$j=a0Borz|Xuwz>JOAbAXvYAr=6Y>xhFQ zLQGXC0$X(bPMPJDH!xTg%YB%mSQ^qbP>-VO!HWnBpniQxFEo(T9fM*3vYeH9-6+{v zH%d;nj*^=rbL0(748E+)+b{}|ALmdFQqmvP!UxKfK>lgPX$M7_E(n;5DI3L<0B#pf(0!(^GRv(3;s&A|lFqeZ1bYNO9by4)27Q+WCK1r+KNUM$=!EOltqxj_xA zd%1Q9$Bc}G#oLmNYomYpCq!?)Mt~)+MY5gB*Z?g!$!rt@wRX@mJ~I3o000(k&H?6u zV`}`DeZd8hyaDn60P5&G;5(_OgNQ4qWMNLDCM9HU9K+m9%kx3CV3o9Et2PYR=n-PYX zrLuY8#3ZC-Po#zrSd&93e>yq;> zxZ#?w_<|MSD`_pdT|S^N(>fV}D#e&x5K7$0L{94r+}!bC|+#$*Y= z(d3T%1RYgNFN4_01yE# zKs{guU_GD*unVvca1?L{@Br`<@CEQAiVd3zTM7FCm%_nt7#s=5z=`lYxB%{lkA`=^ z*TB2s8{k{vJK=la0r&y3sBzw z8fBfbUfH5thXa&xh1$gqUkX}Lsv)`^pW(pu9Ygzpq#@!7|Nr{yGzjz5Qr}>nb^f0N z!|dG(zfCsyc_N+T=RsS~me`VepV=}lJ=Lkh;HUkXHLCye$(M6AThZ-jUk*0-<+?}h zkEVHkgX0(Cc*oAl4VRg5HGVvo6WB_I8miO#pLTw_`*4Chtp1z)AS^&;JH|->Snt$n zGo4PJHaus3>RB-tu=%xiaNx?yo_5sCW9DCM%^S8Uz#Vh>m5YWy%nv(1^dI#v5W8ng z=f0xB#ohI0bvGaK?pJ`q=S@LB=j$l_z0|VrFxI%{!r3P4Yqc^Hn*$e^H&AqxV%KN9 z6)nB9Owq4CxexDC24>AFd3e?ON`8#uSF4H3t#HuO_!qnEmbZBf5A18=d&K9g&m)8G zVYkP2JMFVepZ*^9wr76z4xtuD-z!6^6b`G#HYr6#$=|%}gq(c+eP}z5>7R4DZ(@&3 z>ap=XFu5nDG+=h`wfEeNUYpYg(|Tb^KP>LMW&LMqKdpec0P>CREya!HgXe+z68K58 zg;=buq_#`9jm!=ic9PpA(;iuNQ-JzguEWyw!5yM?Nsh~MT}5{V%{6&$V!DChx_o!> z{fp-gu9vi)QF%b@1&ybao>TkC>I;W&{!`+&Qopzzhx3Y#zrfkub5w>~So#gR@#XFU zssCon*6NiiqgrD&i3U5i%=*&5`YF`!&)0i8{|@{62RBaAEHBEcZrZLN#%W&GZ9mTI zeqJ75Amj+Cw6Sc*^?U#kCX{g@l{VIKJwFIPO;WCl#Nr8~IYsC_(Uq;JC@IUT7SusT zub?w~ez%5;l z_nFNH7M~QTX!lX=J+&i7cT7viwRS>tN7c3eqz<0kp;J1nz5`eFMrZHL?W1{pGQU9! z`eI>Ub@kbL$zGEDsT`zn7Rh;J7f@V8bxyVuGMz$jT9z}2PQrUkW`M$LdT$xMWAdKa z8wTGM`pV^pB0m*pTv(YxkSl%c6O&a|TVt(-*6FUjT&31(JYJpNXfQd&*>+WS+ymP$UPiFC((EP&8L0#X_+hYO#LxloAHl(=EEXo;5sASMh$C zaW}`!ZK^<6LoYk|T0)u0{-VQ;xmhSKLhtw@Mkl3r(|0i5g-oxjC^&iuObGA1&u>sYP zU&9gvjU#^z-4ce?^Oto9raCxAD%!D#b#~976h2zp_0H2e>|zEA~h-@?l7wR;6vGl63p4Cil7Tczj9QHp}aOQlbIj9F-8&7 zv`==OWS71}%Uk8quCbvbLuO0)yd%06*UtME6zW*N(a+4++e|zR6;C2LGP-sL{#=v0 z9Mt+<+_=t7vJOfBN{AeMO?@pc0?Fo|x7_9abeL#KZ}x5uX`szO$fHPyOui>1<@2dC z#MsX!WFlr^*ALMYnKG;SHdmLaI3Xt+ZP`dFCbOGoXe#z4v)0>~oi(e3F>Y+1XtUAx zvdje`v6K3;Zgp$F)5gy3d7RrJ9DTAK%C6C&sTFYDfup%Z5`j@Yx622w=;FE{GsL)2 zcYGY{CS~Imr4K@371~ElK;g!%Ar^V7iUFLstyIqZI?a0AbV|hlgA7)5IvpCPal6Cr*V?s6TAR{#~GW-q}bcK{l;Dqf~?F zo}p+U^cQAPQjuP{qdx}-LiMtNsVocCOX_{D>Bfp)CiE(4$Y=eeB91PI=JGK($j-kT z&yQOV!CW#N-(0w9#D1A%d0Z8%Y_JQfI?Zy09m7%)iZ$9qAeT z{DGR^!~Yaw8fQw_paCm%xKM7IVyYp61cd(dTXDV&d3xJgiOkeZjy<`9U*VTXu%;r{ zxJe*QQ>O^n+_z3zoNcly=)uzChihrJ9J0=fb+ zM-X=DC}boRl1;!xPqz3FoLHqAGuS~@g#--+^}wwtx`f$fc7hK29Yi(f+v$?DHmIP? zm-nxrz^J+UsTgFyIk1f`I-PpqbdM&4jHddjn?VHg$=q=`92W#NCrjx8H;kynVL}_M z9?wldp-)tQTPAc%zbuIN<_epuu#k8cG=~y7W-;3gmTbzvIjerW!vL^^GM8{`*wR~; zY$+O*%)bM2hFccEq6_Uj;FKHLM2u9J?r&ej1Dh#w%xlet|H4r0-M50!#fD$1p zqS|)7OH2~o&bHWEEVB`=hAU6GS0jrxK?H8b5Hc>_4{N^7{~I81Kj2s`g?qH|3p9t59l;&eUI;&vsKPSEZ*1hzD1 zb!W;0{_&A@cF$ep4))^m@KGed?MfCkOxOPw^Oh) zKM7~u#MEE24TEDjkqgP6QbF$a?RYDD`?qF}5x`kQQqKkdpmIK}N3 z9w(a+)nGwvLcc26Qe;b-XXM#ZeZtmqN5~YZ_EK_0EvoZyheGxyqro#SrI4J!<n29H zMe6tte|ttPW%fu^VE-w8RJY^D)iC6n!Hn~B>~4Dbw+e8CMJ|V-^`t~xDvx-UufgyYxn0KJ z`mlKQ@6wqIF<7^=w^_X^6Mr=a@Lqrv2^uq0`FxtRO31X@r5*ROZf)Q= ziq<2!7c3(?f+kk3BDzuxi#l`ZX|jQtO#1a=cAc!UX|CUJNI_v&r_u2@}K9 z?n-v3a&i>O2SIWz#4yBtkCw`f>jilSmlCL%*7O}bL3uHBg|Vh?EUR&WHnfy#3JqQ4 znwAP%(m&v*t@#0Oy;jVXLGBi+7iJw5AZ9_@oxW zn1#!KUZEoJ_e6MO`?uU;*XYQys6I*eSRr3wx!30JcoD2C8{R8;LK?YEK5k*sYW$aA!SnE$5_ar#J5|P z9l8iYS80vT#M8+CZt7U*8~*oODeufu&UY~?4^CJ7?i9c~C7g}8VUdL}VgFoU<6JX4 zA0`n&!ZwcM;CP|=W&niGaq2WsS*w~*gY6oeS&s2=Bv_FldcCTaXsG_zGxikI6z_-a z5dFshgpv}HyqT;z`sHPWh2Ffyf42_gHhmXMVX%}C&YE{A%+z3{!Ur-N@4<#pGAKN} z~PmQy?SUv{KK{cLf>+Kz!YGv2hC+W%@kcQ!1 zC~{!5I=sDJS(UqVF%%54<9a)ZXc`bY%gTr7WrX|=)7Vyd(U256czmB}H^REz`V9AW z{QZYy_O|N`Jg#)zTUHctRjwlcfzB~Xn*EL;Ms{T6k-+#6p|ApiD^uWLRZJ;I>Iyvy z9mDiV66C~~3MVq}pM@`>j4cVYIdO`b1MqFb8v;8Hzdts3Mxa2BEyvacO2s}r_ctsW zEgLPsGssUU7RY?mV$XHd`G+K*b&DwsJ-!KNdIl8_%iPBWPPYOINVQPi0ctI*>!SQU zGmsEc(Xg=q9ii+Ngvbpw+z(CI=J~v{lzRJl_=16E+hOg6lx{X(RQ3P9mTZYIo)|*y zM#%Y41lkdZZr%)c3pw!n3k7oEYYNl%9t1uRwO)Y_VirzRTaXBQS;+d4k2Na?MWnJx z1ZGe6sS#tiK$}1#?v6Vv2|b5Mg4t;UW^j(<59?lNbti_b+-~al!BsPajIc<>5Z~Ee zmrf6eLz<*wl`vW4Th{Y%M~O+}KV{=6{V0P#lG~BeIrhheRRu8U0x=<<@wsinYyEDm z2#%lKpzG*8wnUAXn_PX3eIVQRsD~vfQZs5f!b3W5=56-rtXaiD4O9sskJY*0hKl+9 zU`#LryDk@41=`hQS#N|b3_NLX+CV9tuQMo96=n*`-2`VsEksG|^u)I=$=`cf-twHv z2nbN8NbG@{iRn&72tATuD$})NHcA3-lYoyC9M4p9Rc+u*ax4)fGy%Jg7>5UtAksGZ zbP~u^-%)lZ5!VORXwvWf2H0Fb2z|D zq_d`#kj&Qng%;$IMPS4~q*4N5PTMGsQ;Hgj=WH`znLw7@KV~F_Lt$@#QhM<`jM0W@1C^5v23=3mqlhFN8RB_7PCGzMfxG0h5`nr)l?Q+eMw^$Xm7#XTzi-qV)vWb*mF)&|O#WnvDYD8gXvUa5GYuLyG`Tp~87u(jm8%O(ND0e3{|MC0jBDKIR3RxC=H5u$uj`fP9O^jIBq>fdiAAxTALdO|K$Ij81JI#jPD9bgA zLBm+M8|+4!ntve>wgaVXx-d~oVrJ#cWxS!>an~-HRhkM3T@QkVRRHPV5mV2@Nv@{bsg$-l_>#lrkP_C7Qt8(J!9eDM zUdB!0_A5Y(jR>RZEIqMg0^nF6L6ChYi_<0oLc5KUhf3^Gwhdt$rxhcx2o!4EdLzjd z1>&tG*E+8U3KvCY2?id~ugBBA6HbC=QJkXJZ0sTJR|w>Cm|#@jcDK|q54`{Jh$XK)uFfN{=SK%k{DWShtLClb`AM9mGnt(;3E6G zxTLJ5{KN_@Y7BRX>Jh#^A9HkvJHp70)?#GW#5JEDVL75fQN3YgIy@6|Ad0LB_g|~o z*9CHK^o5RyfZ!dT;42uNFabUx&G<}*eBGhUq7k4qA`Fz|yh>KJkdOc-Al3LRG$0S7 zo^mC3Spd86slLdVHOe&w3I1D_^K3~VN`x1${kA|F66qB&oG>*+SBubA^WimJr#nKi zGOK>ZY>sqS7WEZ0*SW^-`=z~6SXLtu4N9~U`@N++!-=kBO%;4fCMm=E3{A=i%$722 z%xP1v0LLT>&qMIwZ^_)*YvVW(DK;*NCRV4H-m!%^Wlw0M+=`^tJh;SzQ-b|UbXFA++%tm8# z*vx|8C@kzAkf?-O!{OYN8P@m1!~kd8UhiBI!|nxBAkQCrXi+PF5}6K=ADPkkq)Wx2 zRV!{89QuKy#7jmlU#BZ!Vbi0D>1%~V>03We#q*LP!fO&m&KdaUl?bH0knadTt%I?& z9D_}uqy0}Aw7e>{ezYVf!MRAgaU5w``7@%?Sh$VgLG2+mW9)5W__b0E2_CSn?tD|4 z?zYY3dYsFLpFs?s@U!=OMxI%;q3hpgBZ<(|jLu=P-+LQ8UbI+ihL1<9T(hB#aC_3} z5au|~=j+Sn$_s8j*LP;!(Rn$!L;w3OCjOPaP*#?#q{v3^a+oETn+2Q5R2gvkqj!*;mkq%dHc)c5$xHS9y##zaTm1BCFuca=n?2KNRe-G>a!(K4R9_?$RtmW_Jx- z4lK&e^&@a5j1?3}ZYNx@56K$?>!_iTlC&I&7X^IvUjJM{;BJ&nwnrp4O1f}Sx2T&( zb8%`R*(6hxKA=rK(NRctj_&j&!%=Q>zU5e2+IEq{F1Tj%9E=d$C726|M*U2(&b|Z- zfyIql1a>N>Nezb{{mwWPiDfD*I~!T9#YiNJ8Sel=WFO;;OUd0bxd!-G5Q8+3+L2!Ike>Uw-xQAv!wz*D_N2=8W1C-(fQA#*~EPrA4|X^6C=+m#?_6> zdz1BE1bn9=Gt~~AR9>yBFG(-2$-b%PP^@JPU2%=W8pJhW_*8xtMqbn?jzf){w#*z{ zEvL|JmF;b9mF+gVoKhWZ@d11dio|8#kn-L{EsK-dqo>AT zA=nt|csaL+Ce*aHHVR$o?-`;Toq#LiJA!L!>uMV}mMvBk*4DLP@%Azf_bkiWvDehP zQ3TsQRl%nVV1+K1qnRq7aRNo}heV9eo{jn^J{quoO5Pv1%2n!A_!zB~@hx8v=}upz zya+oqDL!dv#^V=sMA&x2b_=w3z2#_0QO!W|D~nm0$B_um$LzxH^a3}VpZE>|HM%)7 zCSE@KoHlvaPciQwUy01zL+P<3&g`)q6K4V(eBkyA*o^Zgz}4t120^c_bU1PrL%~Q? zqcco{LN0ux0pPPJR#%COpnq5hF$R%{j-VMCB8fyL8ElN%h458DK1&xnGq~?joa<&V zwqG#kcQ)?jMsDsfbXdN21Xs*$SeVB0dKhvMHZ<_XVm`T)N2m;VISitg|(2KRV+h4%duB!~@ikamPAc2vlBgk|rrii6q& zv%`e5+gk94fPeoU&o81XgDFaWQR6>9j)YB6R8!=^WVyV$>1a^(k1`U6#57x42#uG1 zP0-$AZx3c~w)?WUm?@s}fPF6!m_A1*5 zO?mwEBr2(bIq$QR9mCOkZ=ZhgsDOt0&37g~MZfEsP*N4}+gVQPe&w($p))))JfXv` zs7eAI6>%YzaT5b2wL25Tl8OU!&QIo1gp1%65?MCrRFe_fR^i3Uqg`!AQwm`O*Dh!J zi$)a=ynHS$Y>ea^0zkL7rnp#tQMwl-)KBpi;#}S-#vfZJ2=YaF7>P1By8zUx+}l&X zw4|>*TEF~4PgT*en&|adqb!p|tVWILN^(rKdR;h|x9z1L9bs!_H;4t}t(kP5D4&KW zzLH0P_^I;zrKOO77CB5uz4&Fg^Yu9!ljXpXro6)gsVBZPFP8a&F;6~bz+>% z-F;hd0HOZ}R`USTf(TrA6cjDMRRu(?n_rUGM6h*A)n ziNht>(N|DCaS3rfsAMX=A{Nr5;`)n6fsi5auYCmQN=X9hZ^b5y<9I~qnqC2O$KfrG zO%5C4apjYft~?_#A~!(Kd;6d$b&h?T213m-z!pGdcx<95lXF-3*Q|1V)Dk|2h6^D+ zrjkYYCOPNp5~pxvCP@#vkKkbuLP^pd;EjT%Ef@ky#-lot^GLF|dA;9s>?I-!izj5{ zgyu+SLb3;{3ktK=8-bTYn_=o~3ev=3+K@CWM<`&2vxNcrH8QSKeni|G{msQn;K2+*OnQ-YB2aIXqiRjdLs)){mXs(1YKpajSV8m6c zkbBhG7v_C(o?ZEhZ%fBEHojlqCD$VeVi+(*32-E_*tIWkJ)q-~vj6DL;7Y&5V4Zr=~{e$c~&G%k!-1_eXQ zh>tlzUO?*Ggw%w$si@zNt$(lX7In2z?$3*swWb^6fAnG9SQzPvfts-l7V>1-m<$7K zb5kSXT#u)(flFpAkV^carXF6A^%aSKzvo=~P)z#JbNOh35t;}!5{NrH@r~h|3%y5v z-S~~mCmhMvK<2?DnfMeTiI;-5lrPf^bL6q3vbT{aKleM)v!#^^Fr36Okm}KWhAY%5 ztCWMY_tTs7f{apoC`Sr!qt(mpt9+N##{4o=1INx8@Qha@UGlAzJ81Rrw)jOm?44uf z9aqWiBVO#7gi~F4c14vj?c4lS0nP8A0@8EAi=uL;2c3`Rj@7I5W4Y1igQnAdZv^M2 z1E||o`#Ev;jZa1c9>B3{O~IzMSoj0L=qDTPah&}saqKtVgV^|+cJCRvQ442JZ}}W6 zpIg$i7P^l3*hxiP8NY7c;ai`v8)sZw)JEtx*n~Q}#e$+RjTZ0`EG!YrLWCayRY?WQ zan`Lx)@8VY^$~0lCpu>fWNNuwh=L%R&xCzX(WQKc1x=K>C)d#W7MtKanMPKs;lU-r zHXUtMdyifFdOV+Jzm`vOPKy6Er_5RA01tUXe;9_j*Q!X!BDx^ObV1t-40$sek?>0x zt4|PXNCcYL7~Y&J$T)F`S`?r=E|Mk^JB7LBnA&!$~13S|mmwr6A$n#Ds|$qe#VP$cL&1OYOKmeE2u-h_m-s*9sKbemYSPz^kzRHot1X%m6pr7jkS`HM01Ga7(!N+}Ax{SkF zrVUFu9LrM3Vvc_MZ~729C~*k&*qfZX>h9yIK6c!_HZ*0AqEU;YpjEt<$JXeeagG*Q zV)*6V@}WYuH&nK#GTtz1j1%LV$oP88R?BBP5S>%5-gj+jrW|rqxMb>*17yQ(BN>L#asDDYOUwNpf@d>9oD}{ zJ9z&Gi~jeN6`k+XrPQ3c@*C~kVY&JgVXqu3Ryd4CtwTnUG2;b*a2}71%P$J9LJs`< z_6`zCPs60qQDXr*S(Dl|#S$?JFLx?_o{s)EAWzC-xpTxW%w&~{vB!&zMZa|{kIlSV zE=A%YlIP6hg}u5@yG_eY-}_$HV33Qv_4g-u^CAaGu5Ec+p)Q`P_eH!E&5lF zKP(FUO4$v@(fdmo)vTz@C$0w?f1fkQN~h;ooqbuFKv%jc84zk$&Z6t)fPbc!r~d@U z&ri(?-c8zxp}r!79UZ_$b&j^xN;PMp`do?QmpG{_-Pv#Tg9?cV*4PR z5ebD{A=wxtOZo8H3P)7w%<5{5^_yIU!TVgpR%>y+e}65*_Q2My?SUyIF-XL_VDe{hyj!%1euZ~Q8t0^O@G)?YNpUEalSV)&o|~b$ zs>(d1lGDY@Ak+EzTokzXps71JA_V%j^0s z-0cc0Gn8?IUF1v&cihwY*VkpA*+x0nGuOP4XWG4V zw|VT&+NSljA?s@!cGiwHw-Hwxb$)&A&H&=^cI;u9;-oAvKZDH>s{yOT3K1?@Oc5t% zj!@0Kt-8H`dGP*@c= z)SXHW^>V{oyK;q@rnLV?6W)Q{S50Zt4SHdCsF$1Y0087<#do(w1(Tp_cpA)PmxMNW^-fhv{uo1nHLnhiuRyZ$k%mEu!odSLVO-QXns#u z(s_1(_lp#(5k*v6N+|sI708$SSY}{)fw9oS%(Mo{kKfpk%Fd0{t{%EM`i~=@CKND} zpe_XeG^`~aAFpbD_h(YqftV_pkz=ML@KiTac$j%uFBT8K!PBm-RS#h)7{j!5MQ-rL zq9NDCL$5o7FXk%f!?X+qD|ARMzdMf9y~w4_&~b<9qJwn24BftiFvE32eB~=J{I_{9 zJ4+fP0A+0N4D84dfQCY(*;Yew3gm2Mys2i?tj9nqFDpmw^pur3%hWl%EGlqXr>2wU zp)obglOB&4zXEAPuE6Is5wZwPEAJT&8djY0=dlae7(a%17~OIL9Vpm&fj;pxoiwrd z^qJ~KZ46x-!;(yXzTbwi^)Fp(cV=IuoT{Yq@i_B7svXi8F^A$fGuFQ&)NNG{>ptOU zKDi+skhaXkf2w;Z`qOm)>J;VHj^45RCjo*vBT7vp8&LU*7!z{YfpKY{4~?QhY&5PO zO#_B=qDzgf_d{iZT6g0N4#||rVrY5f@{|+ARiub)WwH*!PmlLI^pdG_jR26OcxN)v zMUwlN+=|kSJc2YS^B_9)FH6ab$Q^ zb`nP$ZGrcG++vF1h67;iZulntqy20dx3srl`lLM8?Z^p(ooPo6_{Abj`>a zAm~BmmR_}?cWZk!SoAk(DZLeyoRn&|u9(2vc;exhvHr1AlT=P?E&Ay8Ru>j|E8P?2 z|C;W69bfJwIm>I;Fu1jn+QL8~7{iO8nKMFEAY{-*9N>GgLXwuiox7UaAZhtsMXeyB zh!xbTkz;9F$!PLc+Sm?9PrfC8y3?$SsIW|&MhEzM&-eOjiZWCeB^G;GfI) z^$)X9-3glMZyyxh`$5g}=@B%!LZ-2)EjhVV6$+8NLkJ%xXEX3O0RY>RIr3rL~gz<^@zB(AMQtyq8=ig93D4_q! zzfpZF9pN8f!Ios={{ztE+tC~$qJ+spx>>>sdsM4$`49({8w^YxljUP|>AL=+nyZ() z&B%8T@l8>xsJ9$Tz(2D`=*f8aptEg3K7g;P=469UD&_-h3o4<@wc+2T?Yefb;Lo_` zT@L)H_3u-5vp1}7CEs5s*)g9K;+`r%)%#$*efHc5mo7e|OfHF#nS1ti5{#XAbdM#` zF^gcFg}=Mma&(y|sKNrQZ@|7!krxy>H5H7pp16ve2#~YF@x(_z#+Y)r=5B8eAbv~d zcI9}-Yk7Pm^sQ*0>u|1(P?d)2y4*KP_d~^^`{kfcePiqR_b6 zLVViu>u6ZBnMBgzCYYd(Lchcx_MP+FYMtXBWCxPaS zzFPCzn~Thg9;`J-fGZ7Ff5A+aHzm-97_EqRpkFR$C!Q&Sd}RLYiG=(qy@I^z5#@VV zWfzWDXBLW$;*_(wggu-+dR>N-6EqXvoHF+16O!t7kJ1*ZceKcviCIxf%-9T)gdQh# zViF3PokhZ;0%NBJ{<0Tkes{R8q5iq` zFY4c_4Oxd|tYF?odMf~ZKeF=V^L5E&XN`S?%9lhL>@7^DEa$!w`{(#o;7tH|qPc$v zm!EZtYiT%1pDRf=&_62de`NYor~7tEV{j2>3sEb>vdRu5jn~rFrbEW-9H*tOJD$ZS zV)^~eae3JSd73C^z#fdWD(3|1b!OBiWM@&iv^*O8iW6)FwT|#R8HeDyGEY|h0;!d-rU361WjuQsl}n7}Bzdog_K6d3~k z_Aa-#rl`3YhPt%1vi)z1i@Bzh@@)-%)& zEQe#qGS$#}>C5!*@9-Ih`%|sCqb|a|#(alJl)VFhWeFXQ>dm9GHvAKoqkC z7IdzWE|Ty>BEE>hj42tJVMfru^5x~KN

    4^Ms(tkTAT$IaHvmd`A%eqxxdw1CI0a{az9 z%e!Ap+CSk*H}L#-(>WLYs`#4ynwWmoMcncIJh1!81k(QEy!)~T_TA73Lk)Sjkl7_m zr85@6!{N0VlQqrQLRdI7HZMnroWrB4y%dADQo$xjUjfp#k|j+hk1nX{e%K7)lkzga z>+P5hTafRvqxI6Lx_yz~1i*&x!#NE!3kUHo+qlHh?`s2@`G*ykg7vhuK7g-xsbVs^ zYiQX)M5=)mG`{9jNX@4PfD08=5HdY;dS*ya0m|jm2l+1FyzB$``fv98>I^|!0lwbr zy*}As&mPNd%idle^SFdAeJG{FLsTABh&c-Q^&%8@=G}-grE)~syE8E8#josN&R;** z4|6VvzJW0s-mv}m zAi$r9+ycC1s-f6o+wvcoE4zByErXjUXrUwA3%#+;O6V- zR@k72f4}8v-TZ|LLi`&NPatOlPwsDvNI$tD!8(kud*}kVuIsZBja_@vus^oMm8n$m zWh;JUL`U`vpMrH`rws2I`Q`7SPP#cimA`ke2r#G?i_ooSpQR09`3@KHMpOS__?oTb zF_TijDU&ecTNHguoEiTtEv)dl-4(vV@Lx6pi%1}{2(#X}n)~KEQ|*3wq$4*kMVwF^ zF;;uZVV4hWq}V*12bU+sdYC_Z zQl|!=pdaD?sjQ{sQ9Zj8BTS>%8nIAhDP;)g&#zAoai1SsH6^2dR0wc{K6uLe zP~{$`Q-F?Vxb9bG``fy0tH%^a%m#UoJmccsnJ}k-e0QWRY>M+jOTcfC_8kxS?E`kd zY^)N&I1JtF#njwW9@+%zgDEYabH{{ME+03JvCeAtn;Wz^svKO#W&?)o z+RS>hA1Hm6tKob3L*+B~_F2E|&Z28KcjgDt z+!bG4_ktty9i@Jc2Upp#W|5V^Br!-#LiSH)Q@|5w)%N)#mwsX=R!2MIuUb&VqJ;~L zx$3+cvT#8puW`tWeo_RUlZ+K7v<`I&D4u`^Cm3c+Xu^s9%QW&*aGKZ|RaJ~VUa}CZ zBYR^*xzvl)VYb-bksp6kM{7|DrY+toQWyEPxPgu=)myG%C$%SmY=N9ddK0f8EOhht2a~kt)IHF% z@72I%`HzezUxhw1k@mb>AjhFIhfXeTl$keEHd~Hb783=I%1I3Ty!1&1;}r@}u|6E?6(0kM=8jX7^09e7USE@3^*r z^P*Jt`{oSM7j4H!@_=^U?C8w|wcV=M+pX#u+Y4CAFn^4yYlNO1yiJ;Ry$Dq1h(|+tarfH@%`|9tJ)(ItEMYOPqJM;s^9rv9Bz*Pcx z&IV^udOA1zB4NUO+B%;AA3<{MO0COeFcmst65XB3&bVT8qU`5{-GdMHOrp;0|ln$bqI9ns#q z;?|2l6ucV<;e^=pemoDU^H$p;JH5VJPxgeG;`9Mc{gTlQdiS2q&L&p|DskBh^b4@f zSbt^Ml_73U!=rDZ-3%uOBTdoX#&+q~cG_k+e`Dj#8TDsNKD^NJUxc?z+5S1nF0*Mu z=*E#1=g6L7RD=Wfp*G4Dyrg9lG&0)esVOV-)YuxUt(kZdgM`lv``L0wPrkllin$IZ z+TR;N8Bf)0&Es*NNW}HqwvYcz2>!V^t*;Wd5|>o4wXl-9l3F=t(M+U8$*wLP2_9KG zjb-z|VN`i=gIwXk<U#g zqbyATTBBdLZnkBL4guo#Ox3r4-|8aQJ+O75cU*+_A1Ze*DKQ)*@!RJ+7dz(%fcx@b zg@y`W6rpnl>spuDj*WZde{%In9Jha`9gOI}@l=vxyN!8O314KhM=ROrLpU++5ITIq z)WVnp;Su2nV%Eg9Tf*|Zbr7C6F$`oI%NCG;M9r?S=ShG>{Ot+5*CCja^ah6_pg3~<_L*_E6bd{K&2g5@{p?Daqap_}82Wcu_ zFe#BYg}}t|#Bv4!)te4DBZcFHR4x}Had7DwKzc8Vkc}4Ihu(@0i@yaWPnf(eB@`3^ z3QbXZ_v7X5HGcLL5Qr42dF3)^AP#76?VeEB7w*#+iM!pse$ETR>Hk0o&nezK0uVG$ zU{?UOdB*JxTW9)HY|s7yQsWkL3(jEX#lA?Vj~3-3d(*k;y~wZ?!V+^JJj~W8?6>;9 z?t)u-?3(f%RlFwevu#_C&v>g11Q@v>A?aM(Ee-4WL1_MMZ2aRnOcsx{CGVFRx)9%To&x0&LvYl)J20fR{?)BdVg}%5alaC_;t>^6LfL7u- znLKl}{aLEZuFCQEZp3l(Qp^pi^qMQKkL2a$=W{qiBoCpiFw;ONBHUMr$miwp`A!;n z9g4|a26#TVfX!IOy|!7n5qm2xe3^gu-IGaQ46^{fM%cG4wE%`am&xwk?N3jOWzX-s zbelcm>m6Rmq{Zi#MUI05EIldq))3)=cS^P61Za@TI`13OYES9Gj=(+`NoJ<2i?9hUFPYR-k&U}(kz4Fv*k#9 zk~V8jy^06^hl?6jL&CD}m@Ek9`?3UcrV6wTDJBw`TOFj#hRcNmH}-?-1dPV=nAK=o zl^HanV$T^M^72maGna0Ry|p1ORghq!OVaAaS~rf#bYmsz^jeWkF;_t9sMgJ+K(t{z zSUQ#mf1PBYs3_iuS0rnhnWR-sGnZLOuOcX36(z~=ng_?G^I%~REoEM{u0ud-*2*SI zua&6XSSHJj6KgT`5_*CuRS>t~R_w;o#|JXXmc$CFz(vO_e>3?R0=@G7R#rL^e%%6} z839>5@!4Z!IDF;O6G$C0A7{!Knv`(1nMoK1e#(iy2Z{yVLqBTQQA6nO^7;#QTX2l^ z;LFP4@y@onP@7*^JmXbXR027;X~B|F^^NuA+ASYc0%~Nec@^U%*sY!IO&l0!M<%j7 zItAnAa9~Y~#%;~ViSxwS#q9+tsPf5IBO|X)2CV1ZUM$Ye#!(=jqP>u0X3QrD4Ibl3 zhOnYP_2g0^)ZEUpDMx&h+P*g z1)Z9R`V6T6o*IXwo!>fdYzaMDdp^06a`#irGjPg7>OW)n!lnk3!&(UxaMGEoRm-xA zo2`XW=$Bayg3?+Cyw61?*irOX>0%LgHE?#x0%Id zJ;WzA-S?9ZFS6+f zcU!71jx~%i9DL08S^C2I>Gk9DYPzA&?wY*utKfz7n921P+?`qPUCj(@hkfp9`k=$D zGnKP64wV((wC=E24N{Lx--hC>(E(kv>=L|kMe9^sQ(4F5`?EG0R1$nlb#^Bl3r%?Q zJR=arF6(SzyZrU+BaaP@&T4k#=rH?!S0XtCdK)dakoej57yZoNgbJUpN+~(bL`4_o1o#bR#*7Xt&GrH|xaNLgHaW~$J-c-X)(lJuiyl0h4 zf@X~$dx(}ab@y`3FPI>iG{`(?l7v)ChD9r)!z4?j(S+oSI1ydaCFv^}V_-2mYq~u# z9xZcjr(}|FI7+eC=kHXO3En{_d1=%%2=tPo!4`E?Udz|1zP~6Wy1mzzsF$eVXMT3I z(`>H9>{x+W=X-P|s#VFw5xv0xS8?{Lpxui+vvFVqSkR*F`%_gy;*fK(*M9twpbi8fW-f8D%pDKo^6B}F0*xe7Tnu9AN8-? zGvYosi}D=to{tuZwQ7$QFUcJbV|tLU4(CPqJU%+TAJ}VqGrdbQ&e%uxF`Z6WZK8ko z+~3AF{UUEPm!{WV*b+dKSxhOmG3mr>9 zrxSYy4T*tZHRxXS_lfwwiLv6}^}oXyqkqQee-D19XXbVFd(~5Y^1{P`bsrh6H>E!& zI8pqGS!qY7+GE8_fFjsa*W57Iwb@#XhX@DJ0KK6A;P*cCt$6S{V#3Bm>;K@HfWQ08 zo=^i zZ(koYvGGjyY5FI0*i#6&At@!qe{Ux3$CO_bEd_mKeN8_SG=wpDO#;LH+)H%l5Bga4WsS+s+u$2hrP`!>w$4qgJS*)BPWGG$A;M~D1T`E(g*Al@@7`^?uy1w` zCKv@q_-A{yt2BkqKpvN#LNLyD9}P~yy_nI09boWq&cpOwl|nCJ(vx~(kAy+UP2ozoM&yuENkE_u`TIrTRULmwse~}_*!{-Rp6ef z!7k99)-B>&E`gW0yRTwL*>I4%y{zpyj~@`bs?F{1NN(Q%F<7K14E?l8}d7C_x0xF&Gel0ThfdK?SRFc0bG(9zy>i&O-^C7(UAbZa+ox z0V6`mxmviQyge=egy1LK|BQeE=Ya!ZFXiRLsklGJXzP@qI3+oklJm?OG1>aBMO-Gl zn=uqO6!Z$dQ=+>u0^16t&ii+g@C~?$%qkaD!gPS+=EnFjS66 zJ@Rk*Hd~(bPRgTC+vwdH9r!fpz4?zYz9Z%Fr&H4VGhV@`W&Q`C&hZ`NUrBlG(?VTt zU>?30Ug!aWl6rJ^<}NbUi%y^f97(v6rQqQcNF^dZmxO|nqH`uP@Xrzle{2JEr2JwS zAAW~@Pve5?@An^vniKz2zWB1ecflctUMw^G{iomlq+cb`z8y$hfG2(X;%~BV_W!&4 zBd-46bO0oTk${C40RS*G460r`*i5enQbuQAWg(ghw}X}0GJcy}NABqW-gcA?EkI1v z#Xh27wh>~~9O;1p$i8M^TAcg>6YiWQpF^5uhM(pt$V zWk`iqn?uKkInzIr%@!sCIzDuZ;DbDJL>q{jYp%t$gnmdd8#R#qWbTTEf%D0zJ{GP; zBYaYhjfKFyw@ung83Ju$d-7@)zoA>bqer6HpAbJ{a1i*l4_T!gF4g+Qq&bWO3ckxnz?FwH6#!Np>O7E`mH_!3ONySEK2jWHqHJoFI$F!YcfRPAfrZC6Tj` zRJb*%m&eh`3V>-;Jg4n9X$Q_@RzxN`9z-k^NuU51fEOl`w%U@iEQ%0lHxcC7zy|D8 z9pZ%Xq{ccy7LBnAzhTbGmwRbw6h178+IF%q4j;P3JUfOBJ9|_Dcx3v{oav`bi>DCA zJ=T`%DJ}Sj!8&5$uHn0Lw{etHambk^Qpoc}h|Ej2rTGzj#Gr9qcW(SabV)kZDPSk{K|r;+AU=mR~(_`l0h)=(IkSEnX$rQpkFi&#}KH`@KfoNQ5v|308Lk2!-q9j#7o-hsg zNd-LK_R@k9c`T-n2wW3TEGRTNlT>Ge_zsO^dmVs%7nBmJ3j=&dCu9_@9RTzK35^h_ zn^Iq`h)_(mvrbImy;VkSW|P>5j=H?saZ94mN(kY2*5X;mhh7sw9A6?~ez0eEKhgwe zhT;eisJMvmz+z$LjIk_Olz}Dzc=ax1*3O)xM{-$eqq_JWu>V9~lRDl>3zEA$sonh% zX7&Tfu*uMvi)&vS%{;1kG>%hF2MC&tE@S~mZ9D`N;?(m~;?YuTg))aK2og!mS2SU* z0X$_EBr^Ytdan}z%)kS3vIu3ND^|-{9dSIOPHa8!pz=oE%kPqJjd6kRSR+E^DWyC$ zOTO!WtMKK$WsU!j9iDTZS1`&3x>t`V*YEU-qp-*dpXh8o_7)s%!1bP$aJV}1?c zQ2?NJ7_!*R9C43QFdC3*s~piGN@g$A>9?Ym1YNkP4ER)Xa_?`afBF_3#8g&WSfaM7sUE0Z{ZoM2 zneSh(?Q@|UA$)YLXGnK?Yc7-?r=OfakM-_NW#ql6r`NDoQ$opEz?7hY%;6amXs>uE zv2cKsu+sL3U-HXbjJH7Idpc7!TG3v6FGu8uh$!so;dILm5Y_C!2=9FBz+(rUJtZg1 z+u4}|Z~v2L$wT%jue=2*9N@i&R$i+hpK?dm+BrnqBUHERF40IvMMa#oPP_4SjbJ1z zeps%AQ&aE_DjD%e2JpFSIbr3-kF}0Ie!idxm8l0aP|nzenVA91C)w5~X3uSN>NtlJ zHb>?h+y1-ko|7x;=>d|Xk}_n9S@91%jTSXrGAOyG1Ji z@>vRk`SAH9>xK(|nTToTDU|TU|D;L8q}Ui0pg}i>$W=L|&<+?xFbc#*8F`}~*~FeP zwuit$_j(Ue^s3&j?`u;E7OcRl5M>xwOr>Jqt`y{N;p?ob)bt$1Wju`M2>>zK9pW1Q zNX`F#sy_O!^D$!ADLo%hs2DKRTA%eEaZz+&=mFMg2w4UD6MMH#^z>PhojiIntEYl-jAdA7jZo^5B3T@k+M zbGkZ7-Z?8Gw8p`GuS4e>gstkq8 z_5O3X?D{<~iC8XOfCF3$6UxXFMvt_IF1SJ$uhF+)ys~wo}ez`Wc0JaO% z-P*|9ZE8Ixr!B<-6k8R>Y${AF)l#OON0!?&t&Zw;t)!iYY$yQhD(%OTAo^dYk6v($ z^VLAwNIcfAkexa`0RnY1vP;2E;(HgpD&NnF=B=18wkXJ0qw5V?wN^ozjOJ%B!pmM0 z=G_l^_8Ku~F@Ac(DvVv%f@WR)IYc$&uR^K$8bN!e0H^yh_2%<;5PZ%IZeSa2kGu@% z4&5NIK=%t-4Pd<$Hh|*sF;|mXwjB$NNN8AZY{=Ws(~BkN6;lqcNAo=roDd)`g!CyR zLXqAD-=eM{)sUK;ShMX6&w>oR78VOKfc=5dTvDJs$7vobGy!r4X?xb#Xp996WV6w9 z4<$$uq2WC#n*EE?((2N;Etdg6v#sY)awmCV%@wiyq@#+bDW^e*;~oz)qn=NA6cgG8 zDP5Mr&_$Y9y_|Vc7T6}4uyIgR++ZA0HdldPJ5LS>4C*&H2heHby_Z%`x#ilq9e`

    5B&zizc`ykZ5*#t7Sv5)Cx`L*MU+F3?DO`D(j&}yy zmGec;a@a-|J2bjzjOdGKcJ#uOflwIsXySe1Ev48Ae=G~`+wHDnt1fSaZrk+*=Yo2Q zetr-BLU;Gdy5eqij5E(09*Nhb?JU{zRt6NC4SuVC3>`gJFAyYfIM_G!Rz{FhvSVNj zGF*5(!ydzn-~#u{8Up z(%cX8q~L+Zu%^>+)mEh&u6BkRYTR{&Z-6FDtChNI(a@N=_Yoo{we%5$hPRk!8qgTC za2pD+tVOSa_+^|Bs@w9n8W%35t0{I#u*^3B;+r6+$Z5#ucIFCfNAT)#Jg}Mv6RzE zi0AoGoWsMp1Up;VcFS)(0JX!GqeD9UOP&(dpqE31+WmYl*Y*rdoL;~bmkuDDUz)Cb zxLDU#TwtKo?3b4wINb7A(<^dD4 z+Ka5P%N|Tn+xX}(D-9mSk1tHmmKdS=J|z0`Vf?6%B#_?ePf8xteJjAb-t*mQf1$$Q zG(n+4x1S|vXQRAFdS^H3JqY{ZVi`zw;+~6VPZISW$s-k;KnUNEob5K)#WYKziO9lypHE0 z;~X~w_NuKy(HAs;0=d0I_A2QZK@db+-%>vdVl`$q6}{#MaOjF=bj{J*+g-) zL;)k~`3CEfOQg5aDZ7l2^o*$2Z+wFIF+ytJd5n}hQOcdSI_CkRf0aK%Jw@S55LccM z8p3iQDGO^+YwQ6`(6jKzRP#?*^m-P4Lhv}5{(3KhlYBm7#p2${ya;e5z=jB!P>Waj z`_^;EmQJVy$NM0_M*$Ybo$q7zE5lH$&V~AV2ew}K_Qt&X;;5bC5j@euyCNu&X%wN* z++XFkqW}i_iJINCxzZBw^YJAJ!$A@Z%mX@wpZs75#ECaM7Fo+JF}Mm=9T6tj3g19S zER8&e2yQ{imAG;}1k}@{yV}TVSO&0S4))lIltBAEy;;W@dMGW}(~6L6m_tg$k7RXl ztK7`^T5p2gi(gbjBi^u9fw*o6%<8aT^qKdv# zQbJX80#&SPW-Bs9k!ek|wAh7G2?Q1+HBH$6=4XUS>nuL-vQCoxHiCNCvH_#3VXl0SC+s1sUQdQg! z*2bLdH~SYtZohg6Bhf;b@t$z%j1OgfdfkzX)z8c!G@F66L3k3+6GtN0#HwP)!~Mh& zU_Q&A%qA9*FhO*FsKQ`HI=Fg#L24V7kP{f|T`sOB>OtGa>H&<4`-^>Lzwg+WdT0B! z^M#;#{@Ewl0*OUo_d3+d;WmY-%0iu`kmdqa7+C4H(y|My>AuG@44uu82i^d}66i6U zqUj4Z$}h_djzA}-0O8>9myN)|8ru^)Qhb!L3FoN=l1u#qL7dbPSZ>xuu(B+BdFsG& zcE9V!k>ivwp%O_x_RATxNdx;FA!E&<8}=bEg9$UVPLOgHxn$NyvZDhtfT@|A-2gT~ zUkRfddLpBQs&18e7|!W1Lg;a16ns%$w(-~za5(iN=zG5bW84L`> zt}gI>Tlu(PHY{vWJ7u(GU@}4Ii*84p2CA)h*0+BbME|D%694myf;Kve3=Wk$luG~e zOv9mikyUADbH5@>;>e;523`&&?6gKF`Qd`jsuxfL%E$zhag(X)l zCk~+rCg7N?AFy`aAO)_>&RS*wl?gahtQ>#e;Lcq3C#r0Vt9G_Q!m57KO%@?t;cO7n zGxu)NavhKi1wZ$4yJ~j*${Ya2VgoO~qU)!h^Ifi!^#}Ldq|1>YnleiQc48CjI5mT%Wid?vR=Csva&(a0sf7qK+x`&?UjF%-L?cf!apOPWhiGUfIxtQd9_&IXN zL$3k1xm3REGsFv|ARO90xIhLO3ST@WusQ&C#(FyR{K4iF#st6yA+Tg;(?MMUl zxogrbL-52Od`o$kVY&!4a#b9KA+z>41j(mR|rWBLM;pfOD5aeD6jqoGrzspK>tp z+5~Yxuf=cZSP(flB~i7YPk0bPT?MxcDEQNpoq6OfoW)s7XxHA=Ox>{KC^vbP50nw2 z8H}IOV5d8CHyK4Fr8&VYoKcj^=HsF3P`37_3#@yL%F&Y2cKQR=8)~xI*-((;%%Q1i z;Eb2DtTDA=YQ3B?X2{|DKCq`-zLkne#nwgC4Xs=2WX+|9jT`9^oN?Mw$s?Y@!5y>` zu7;rq7yp_y)-JjY8Z?fT&S3)&XBwA#1WE@-1&`^!4TaTKTS;fqEbdp*#@HS%{*{$$ z9L`D?@VP*#V+;=b>5+r-}3gl4Aa$eCFsi)nK}P18pb(`M-#L& zoq`zy!ppJ#Q_)KnQGhGItbpm00pw6M61JL?pR!c0xfh(L`iHcK^ z{%S9IdO?T~p|j;F7rIR$i3C9((C4*IXaXmV5JlbC>BFp}!4Y9@wTiUWMprXxas^#c zC0+8shRcfieV9mDyUMhceQ3cp2jrr|%jCmLtYol$&VuK`7WUNf;?f9;-n5=n7V$RN+>1Gv#vI8AS5v+NsNRy#0uE>yZ z7ejtaugiM5B(5si{|hhj;nM1*%K{DkVAmOvm`aQRl`d$c!lD@AF6H1~%$$LKm;XXByCmaz_HeE!%% z_HXZc<=C}j2mQ~tj^P$>+qAqMI@@`%usOn3Kt(`@fz%h4;-%Jr7KAb3D(IMlvQ|Uf`&*r zEWXgWHj?dEL{RH>xCKAV-MdX()R+Q!73 zR2>0ZYa3T$+pyvbk=T_MBqqGB>m(Y=oRxXnaw2$!Q?nir0CX*0w>0tW;mWAN1U#6; zrWnDs9)qL@1lvWSBwpjv6c`uB3>iP7_(a2(8vhHlk?@1PM(!w)ttq1*bcQn(AHi7) zV3CM-b%s9Yc_>hr0$VK27>Vo>w$CfI@6uP z#YA4&IT;FRw$3EHpsh@hjwy4JO&7|H+fADQ%ECcrPZe=6D)3>Ah-^dhHcb&2La171 z!MxNnGt7qzflER0BbcP~cPD<+pe(5hFQXOVs-VaMa_H5VK%$32vA0sUi^5CXPncGU zY|CuY>KB5DL3FaCKrCl%-%0knLR}t8OmV%&#W!@D?$rRi`|%b;kEo^go2|{ zi3(GSpJwsod)}XJnqj6+W_htb-~tz$E?{v8lwDVMocWHSVT;a_M`mUKdEi*PcVdrV z6Re(k%$E3&z_qXhNYq+pF*K$QpvoHi3t&z-w)G|AB6hC;KuYe~^TJ(>S~$r`<$O}yv z1@K+iZ@yW&qqfpAds_B5h+Lp(rqT1xC3-^+6(}=SO3qj#tq`k4``*h)R~*gCdNEuA zv0cvwp-A5ra$Q=kplZcIT%C%y(CU^v@OBZ6RMwdQQiAnL=|}rPmdO2S>|?Kc7y2Bk zQ`AZ`QflolG&D>FVk&be8mB%gPDEr%|84NfUPntc5o`r|(^ZTzMp=FkS?X`yTyHHS zfE=Ec47G%fXeA;jjzzP!6m^`N6`$IeC<|{51!4vB9zx^Jd{vYl$|MV&gk7!DkF-Fc zYj5nz5LLO=HZ^e-W{%L6ZnBI+aBMs}w>RSi-Zb3sMKO)46))S!QdGWwsY01kkzaI2 zwG;P24)PvFW-^1{>cKL0HdL+lwGsq8XV6_7HxpaDn8%4056wGOB{~-kF6xS%3ZPIC zB=JmXz3Z!q^#0n7P+x=6>qB@wukH$PSaqF;5Z)J8#U|(EjM(WY*vAt1D_wQ{)^QpS z1A8@{zRXk6l+a)YFDFofIbK=p)o#>RVp)Q^Dbj7mm1Pz816Ur#xc~ zzBq`fGHwTA6B`U=lc<6HCS5pGsqdN}zK-QLt)D4b8i)L#VRFA)KnY>G=?%!m6abLZ zCt&htdV-Cid%5Hv%xG`8J1*g+`8iJw*ptFB199V9i_JQdXjI#2N&rygvp1R$hVA6ihow_ma!R7cLT*u**Km5%Z-G%vuh`>$e9IwwR zd&auv2{^iO&99fW5I!&6Je(XcxUqMApS6CGE^;z%%xfa|B$loVLhbu*wX8`aFzq5p zEys6!Hr?-GF5oMX<61?JY)lS7#jsG8;1pcEDC6 zcVSKGvV3fcRyT!v7`C5t)5}X{kO0Q6n}V#z-QJA@9&ciPsKdRD&O+K4p>Y@^Q=kr@ zKBv$NX6tUgfk$#P3gsrT-a;@Oo7k?HXk|)ND-GKu47i9TW=YE-{CUQ=uE-e$`898F z-J-uWquJidc-E0!YlBxZ%n~!~c;5NPu;(yCS76}tps;@{jTVR=QPe=C&We<;ga^Wx zP=^_WY~B)NNrud1BF&3~jFeCn8)l`|P`$&^CNs%TgQlz;Wk96`|47i-A z{5CBv^klZ@-5{Y7O;_yotrr#B78tNsR&~o~CV0Y9sQwTT40BrI02;p;KN;2X_KA_8 zyonqMvzU!}@IDT9y|5daaJ-*}{37QqpTogBZkDC)^-%P>E@@q*(6z9}8Z6qKGg;Nk0U5(a`!&^VNApBAF0vn> z>N>cdb7`JkALaf+Wh%qrqplWnYThtilCa3*YM|M!hVE&<;;qVHVUx$y>`dnJX1fW+ zLd*1~fRg4t)&+Ln`uit+NUq$Ne#vpO_#C=v70U0LA1+t*7}1H)JHOtm2(7J&f4|0i z^sreU+vp3%2~o=lSmf%Nf$CYq8XlqWz65WS^6F(GAd}w8KMs!~MCyA!@AIJf#)j*} zhi`ntg@hn-3wnte3G-69$;j3AKBsbCF3x+j5`pRR=D-z_Hv@1)Nxj1E@F6!je_>;1 zkLOSM$X7lHbqCkmIhMt9Z5I+vTT>93M>C;Djhe-{p)>!~WMZ8Ds3ds9hSceqQo61x zemLzokI+W6#j5T&^-YF?+#lWNX*al=e#H;S4qp-GY8E+39yWZ1NsNBVIuj>ht8gK% zJZ}E_Fj|@%5|2xar;PXKg#$S{wQy zjU$fT9LMscJ`9FMZpt!mCvwS&wWGPQu}Ne?V;7AL#MB(9admZa9608+8@Ga=M5OpO zGT$|yml?tEE&&Oox+%l3uHDcVBL&$+k7&m<51E^@mP8?`yi+fv+b|lXtQ%Te2msd( zigDAs>sf2xdu}mya*!6*Hk=FNR8vHC<@9x|dy0V=D; zB~?--6)IcF$k zD@ynJ(k64c4p2m6AWK<_()rh{V6NB^rxaI9Q;an|KlIvD*DcLbl~7J$oDmyfdESmh zK^2N+pHqs>s;-+u$3%p>GA4P~@HKu!Po1|sy#id9w?Cvf*9i$XK1(Vj5hP`ymENw5 z4d^7w&L1e-kmXURllH|mV^XYhKZ76)=~IL`bYOppNZF6!;?`Xvo=Pc6OJ;$4AZaX} z$wAf9E_R8_thJ3!iypX85^N=6Xr~R6+${QQSq!1z?wc^lP-R zU$ULMQ~s`6FO*&}e(@YR8~g}Q=@~X|@tD5;#K}MjOQ9@>ir5Td*@>S#F0&^Mr)FPE z+yjHTjc--BxT9FHm1eh)9(SY51KE@(j4cn~?gs#`bH7JV#+L7SH?};uTboQCI%J^@_q$HucBqe9W`Hhgg@_O7&t0F~kBn6BeEpKDO;9TRF&3;A2lr$X<-NO%WG&l1^8dW%g0RvlsLqZ#HSU_g){##(;`Bh~L( zR*4Cz>R^kA@%tg4+03()r`+uuSmNuUtO`L`JBGU=0IX<)s!%qiFh#f6G5<*5NGB^4 zh8|^RKOb8{l$H4dKXAkAp5cXgRZk(|bdc~lcc$0Y)YjZMW7JZx5UTh2XOZV zARe2;z30c~?{TO0z-yln-9&qf$^LXa1wF9QyXMLJS%|G(k02 zWPJ6M3fUB!*G=NMbzj9$@{Ir9-w=PKpyI!$VwlRm)=hI@(n2*tSl(j40$4mEVfQi6 z)*oR&7NQ>wxsvjZu7KU8v`(xf#JLV|>sfEomsV_{Zae_n7PS+e5oT zO7SZub+ZdFkOBj(&{4v5aLeB2qA@hH@Bs=rW&Bz+Qr=aVgZnw2@f9x#vkct2QL#w|<~S zd-u!5MB#HbnRqm>+TpX4Plh!a%w1)m?dK=mxDYXi{eVh-=Uc-x`agG2ScS9BI_UpE z{@N10tEVn*HCF;DQlt*-UCG3wgiciQq!c^<3dhLMvv8Ut@|lXueE4-T%wPRPxAwT< zL-QO|b5JbEER9n!f18PI3Ls|LwIAxwM{{ygIEw!#H;v$5r(`}K)@WMmopz>abWLvn zccx*Q_)wP4GklrFBRoYFuQfE)L>u-J5NY zzZ~TuV{mlH6-MXK!`u49?f+RD^lqE9No!j4Q_B*+oxyj?@yaUh9QGh)c~8Mm$3pa5 z{j6yu{X|ox06I(ryZb!#tLYCN;U9pfUW41zuJ(J80JxX%Od}?FmF(yR4n02LS^rw4 zAIGiV(7lZ!_g^p!7?LORn<~-Kp!g3>%PTB$WfPJR5D>+)cE5h*V3C>XKmbs}2va0O$>~-@F9x^Fijd0==_f4F) z+gHT@8Z5Zmc&n|k!B5q7I&X86e@1KDssqp0{eO;S*{AAr#kyv<9{bvlJoU^Aul&M4 z9-A9)P7NnJaP>{>cnEg`fbWjgk$3Rm2mah&L}_a9)f;$3;-c_%QWB7RvZ*_Q1&BWGo2&dGV%m51Z>O^GA}0Ra@o|EvsD z4dy@1u%7_{2L zna2omm&J)`lhCM~UW`LEOI28l+B1j?ZRoYlCGYW8i>}TIcZD!MijN|!#oPK-E21uVAf79u z0wxn$Y@r#n5hS2tXuVyHKjNsKIvpx?j1-PX!4BtCHZixu6-5dNRH0cavjxOeBqi5m zmC>QgAc|1xO0~|Ity@V*X&=A~`D)D!d-T;>)dT13sNx6UHzs$^E~^PfYcETcsuZ5#1k4*HQ1xFVaX ziIQ-uS~QAgS!7*(s9qYq)DILr{rq-vjxtxVtrtHp?Yr{Pn#AaYc@*^0v4_#${7MUD z^sc)|g1q-#<-UGy;73w3hzQe{j`Uiq)T2pNsxzHH9NLrT78!A80cB!nnLR!O2=?Z- z6xWGfTFQLR_?J}kS4H}_jIzie2$%A}~SOU(Vethao+M@60m!(XM~m=l;<*Eng!H7!){hfnm`DeDK13 z`d5CGr)StNa+4EX8`<0IRHV`6{ytu35Kqo%8FxCG>Jpq0ZI1PhAVwYl5EHT#bX{8;!_Wo2-0&O zOJyDZPh@K{NV735SQ2s4Cjq9@V>Ic%P_(mBhnOhDrCf||$hhl4gJaD57=gLVO~tBjqO>ZyCJn%dL=M;8?7Y}|^i2e6P_`J4kL zxM(=gxvm9`zXg2^5Y;<&p&~OqM+5)|IseVZS}{wQ9wY9z^2HWzZe}+$xwzQ;Py+k; zehvpR?WY0V?-^PKK1T0x0oPzMa0C}FC|Y>%Z@y8*()`*;ih%8w|65$=?R4%Yb~U{! zfY{QGi(j zWzhrLmNsxX-@)$`)B}-OhMiOS&QhX%IxFyX3gsQh?cF4o+*J|_sm1Hg;5YLES#l1L z)8=f7bY^2miPXwc7$12QhZTgjPH$%|tfNyqh~Sinha`|7FvJ@eL$;K729lqaB5IN+ zogk~M>qwbd?wVbTZMeo!gUX~Dwuxhy1gR0YchsT|wDK$2 zWC<}XymQpJu-lm~%`r6R>bRVN@?tczl`7HY>9@+ha?ed}XTJh0q_Z&~mx{6v;m}}9 zNU=69s@T@PJSmiW(?EmJ^9yd6Hgy)*r#l5yCy`lmhl-U%x~nkz2(%JmU}wqhUHMp+ zQ@Chi3m25_E=|uVqXaRMx40EY5B$x&g8dYGTw`n>W7h$PmXX^s2c?@?mP+G9R*+o? zX9=CP3_wvIC&wZTB!ie!C$&u376+P3}4T?Ru%S9E2!JD=A{IDsuuE zGqLeI*SdU2fNwNg3Szzm zj@7PDxAm5rH`!`yn<@DnHdTl%1$Kv1jFDo+iI)H*Q4%nSWKb|E9qvfw;8j)yQ3WKF z)T*KL!AO%X0~SsuJc2AlB-zL)sB+|@q05txfr*8UgNuhxK-lWx{x<|HF$t-mb>}A| zudaFu%4(^oX=v%_85o(E`@zDhfQ_AllS^T@YEV>TO^O?(gj*?(GG65>RI1`rt%hH1 zPt>W`P_t1Q8?C9<#%Qj^Xf0Y>b5>)GHO_bw%(uWoi!8Rp(so>CxfNFSv|nagUB@-n zT4%ity=$DVCYX3r<1MhoR@-d1!%n;Gw#Q!W_Sx@%gT1l7z{8F>>X_p_*V7rjovPTD zwmj{Ovz6G<*5_St(IuB%an&`~-Du96{qCPT;I=#N_Oen(9PM;x9do>MPB`h5)6O{S zobxWY*msv)cEwfKI`4WHy4X)Qy5y!?ZoA`OclEpHz6Tz9veXi*V3fuTe)?hxFI*&+NM&+`QpF?8YbiGXLNJ12^pd~{k}?=g zW{cHkcQ{>ckJsl91ViCSlvkKfQnqv^o68p-MiRr`w~K#9%lYPo}f^ zV!2vx=+SwvCgwkMqQ5^$w<<<=40rcs5U+8X(YsZ{XA%HHFv8*T_yVCw zERo9O3MFK4aZ8|W-EzZV#BkrmY70M{E;n`X|CT?$W5R5)+UyRe%kA;{`~V2SNC~Di zC)PHr2*lu4x#R{haHnfKT1+dnDtH2jm3k+xRWt;bfr{*N1r((!M%Z*SMlqKBv> zsXXNYr_|aD(g=qhQXXk=E~S|GJ{)PRTl%tz%>C<+_-dw!x@47m$t4R_+!5zdx!L5E zt4lE{G!h85UNQ~Ava8G=;%Yk;N4ltem$Fkkx5^Z5E4#K^BN{S76?=ARpeI(z57&(Q zx&cmr{|`QC_JFp`rkuKEVOh~aRh!)t1RL965s)e_Ns=T9c@E3U)?TP`;)pL_SCU(n zkelT?4V=1|imDV%(yAzM-eAc_XS z-(F(pb!PJ~3845tkkw2;?U+ruN&Ajs`Ea24;R>eA7;X;iefKQ`WP1;f%UM7EX#rMg zQV+FjEBC*}=zQC4N4G1nb+~EUcQx|h;O+4AaNe_4sRJCDHw8Q? zcgB_(0l-=SRhUfC15%~=%?UvL{zj*S8Jh5J+RBHc=qd3GA!~_d$q88{BvfItsklGQ z_eZbLhxDrfb6n4v6&7DMgvExv%l;Ae?(Hi3(#zudeip|P3 znas(IIX5*JPA(AaxqWEIUhi%c;Hnl4c)Y7x{V4?hva9FSOR5u0DC0sZZLH(wH3hPMMe{(4!SxhBZ(H$Ne#&xkFqYp&8sOhXV2P16`fTpoRawWrz^&omsWuzPutt)*nH^+X9|Md5*>HZsm zxNZ*8|FCQ(n|7H)sE`H$wp^zgG0QX#!kMy4#jP$2zYJQMP%RIK%?4|_abHH6aY}jU zHw9O1_`@r)C0hGRVAe5D@;2U{7!<~dF{~`R>+E|oXaeU}_U>M8y&;>grs{dz42PeT zEiE-!2$3NLY<67@m<5K={EL6E3!uQ%84PQusUk<&GNpsut=9r^Td4V4)E0Gl5M~G> zTau=!-!RoFL8v+wPjR)x%~e}5j928n&n|v}0_!#`to;$h!q=GlKHPc!mt$M4kqCgi zzJZ;EPR6eN$aBbh-muZ_&=$oo)Cfz7ODC2X?M(f;4?R#?!rPd)JhU@4x_A`#j(gTZ z`L~~(QIl%2BAs7{xjM)^7mnp%xS~vOYsOLVHtjpU?blqIU#P({G?Q6NTMsSNBI^7R zbu;23{ts`b+{Og@>G295<0U>VIPVu;wO(q*ix~qx!Ke5#sDCHNR}SvnDrY~6P%?2K I0hlKM0JgZhVgLXD literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff new file mode 100644 index 0000000000000000000000000000000000000000..7d63d89f24bcc0c0f5dc3cda086fee67da266ebf GIT binary patch literal 50664 zcmY&fb8sh3wEe{!+jcg#v2n7oZQHhO+qR93ZQHh;m+$@cURRx&xu?&axjof&tLJu& zi;Rc}00{8Y22TKpKPRB}ksteing7`T+eC$gMScXnKb*x6{IQL(#YJUh6n?l1007t- z004u*NLSAhRZtQD06-0YY%Tx*vYo-&&s;`~E@CPJdp&-eo zR*tSe+z&uh0RUK8?bA6jOB;h9UF8e_0ABioa`Er0Y%4w2pXWCG=l}%&0W2Iu##+zH z=!Z-F88asU@awXILj}Ut#^EQnmW7{tp#T6l@F1@eoVNBxKfVJeKYm|7<7m7f7a4ZV zBpqXb3Xnw*o_#m=#*9U<1tBC*T~{qcdjHT8Yu-5meU2sb>Qy@MbJ*<-;O`B(_s0$b zw=-&#F}@x|-KxHElBcjF_9<5|@px=5UsI~sY+Tjq;Bm}-8HgESy*A{vuT|(C;k&qn zTLHLcb|u!y%*!DR_dLJ*p$JC=B2h;v!psSPplH;xkbpySI>x=gBZ$WN#g+!rn+EH~U6J1hjQdceq?{b(}wL!GrIx zi_k;+I8wVHPqtji$PjUIFfavynzu%Lda>6KZ+^}2ytt4R+Z3ophKH56IDeNf(_16= zos_QSC#{xSWYkQ2{ThzslZgR$@8B|q7sZ-y4RJe=>s8=B?hHh&l)uBVP!)x=Y0@6# zvWw7Oe+k?ycf$6dPJ+8mQ10utcB>bs21J zCDld~)R$)&mB2@=;c!3K7Edyj=S5ti?}Hkei9o4u^=CLJRdINvjPnkUF+7d^(sEaZ z2~A2zFO+d}8&uuzs$KeKp8A>}*#knm0iCWLal@pOCDF0#9KyEThRVT;pzK_?8)7%dlF^>dh`>fGG>y1*H zjrB>ZNz`Td#G5ot+?YbxQ+ddCg>KP0z&(Q|DsOgtQ3+qAJ^?iw3dK#4YNEHU<=J%K zpu|fzDVhAfE{3NKf~P7Ug%Fme?S-9;W9OaSMnGO6 zNIgKzq&Yvc-!(HSS;I0>31Rq~>E4Rw@~KbVG;jS9tS~X7NVBMjrbr6`F$qB+lyFIz zh?=?(3<;7U5JJ$EU;snRm7W*Fl_5MdwDgy{_2HcSl?y*@Fa#b9ysBcR&66)Q3z?yd z&%M|2#6+6IQ6>j8aBOf?C%Qa-7B4i1)O8cJz+(Q8ksWjYyf(LIC^f1-%7cJxj^IfE z_W`lLK8(=sV3N}~UcM4Mrr48z{mz%WL7D|iO;gSG z!Xgf7s>q5+3w5t=i>UJ`{3hDa>5|DInMe(Fjd9UAlcCEYtud%I152i6Dp9$T+sNB^ zduA=RFXPS%9Kq+vO zJSUI3&hSrviyf)v53_HpWo%*@Pf#)6u_36KI*|-kPM(d`ZHiM2CKQa_2n}XRUS%%- zHIie@;W7am$VFkMM!BWNuq*F)dHO!CgvQi?~$#i>MpV?#$p)6od%reY7dc6}- z7yOH->Q8#T%V(q}PDG4iF1??ecVLqpGL_K59bGP?&x#upz$~JczGInq7?ZRKJ%wd~ zovm|vWKjw8T6njUkdu$VHndYNGHojD1zpg?GS;Wv8WL#KC0)|Rp@`iK}E_1rY% z`8Kcjv&o1|_nKH8&iwxegr+2DGZxC-ds_c->x28L{lh@=EVbflTavo(m;8wP@cHiD zz4nu#p1q--I1CvTvCG%;`R+5%FRnKhw&{$iX%_V7N%9Muwk?eq^;eg|+v$y=_g4V^ zfuLvV`s*+J#cYQ)hsYQQ2$ws^tGxuzaG86|%hduNog3D#_?CN>&VZhytPj}hxonxH zV~wrJ?c3)Mkn3H1H@fTlX&{@YhAN$-C@dQQge^jdEyg9ge*t^&{>dM5>0D@XJE#&n za1%SYV>=*pj$Q@Va-ZG>_q5A1zFYLKA;E8i)8#3M)2Y2#?<``OoTI+w>Cjj9YsN$4*{LR0GveMMC6d~VC6_t^=$j`j8~ z45Iw&?cIUR!`#QP#E|mK->A&2l$?N(oPfqiy#v8ODaXw1ogY>4+XaC1+mIP&e^bK1 zP)A1gLBT}M^V>^OOLq7s0)sFC7-^w%|I5phOHPicD!|}3*YCzi{(`>{1b#8W6RG~X zq8%h{)v$%X`lt2R510mzVkUV{wovT%rcsfF7Dar@ALzkNjkyfbvD4Gti`7pXlPRA2 zD-WjoE4JsYZA}20r!YUkT(hh~LCw{)NpUu!jMwqbquckOaUPet0kEsc=CSst%C0a^n+Zo@1x>`q^R07ay&5CM`Lg?~Op@E(Pq zX`&F!-#q5AT*2WLg~TY8(g$VyNaX0qX;P zZBSx-f!OUr8ee*pz<_QHfA3V#TD}lQzlPSFkTn0Sm5|Mqfc_Vp(bn+w)@bZTj6ZyD zOo?D>skA)QV{eud;Fh6w7y*Z>VS1@CyOn;3p~GTS38=nlnIp)m{-6{8G%Js&sFA_c zR3qgutg<(SMe-op^$5{+y(V-i+jFVXHR^Cic0b~`Yq_xsAlLGE)^cfz^?*9=euL|S zapo~O?=VFUb36VKg)!!vGQywQ7pOjTx7Hl)^@%xUat=z4N`ulwZIh^kG~v*^#};+GCZK61_c zMLtw^vEUTUv7gyC1x`M@x_4pg%=MlrI3;(SnlV1P@4oSiZO!Vm*_o?dYg6gU*(JuK zm**neUdH3#_Sx_m)LnpEFIztwJo_qpYuALxL74S-^8Ha0`LQ-7%OV?VLwYu?k%+d; z{PKC-YKjpL-##?hdp*vQSyqE?_Gpb7ktEs*$A!FeXV-wXjZKTo%KPR16Wf#Ud%f4N zw{bm5D)o7mrgSax8GAQ%lBb#oC*-MGGiOI;*VMmlBeUIR`}NkW4bm%Cmy(Wco`(!- z4xa>4S{@13gu@GobZSYj5BK^avf}A>U&S6B@|_BYnx%9^dA_dn{t4emGlyrTtZ!~R zd`IwVMeum4?rL&$@#^FY@#s#?{)?nex~}Oi5zuSmYr$);YprYCYm{pe8_PRk8#)3b zSL~!GpSb0{nvbH_mSZ2oGyuWG7%;jC>ckjvRJU#R#GkQF&rpz>ptSFl} z0*So>T`T*JMu$5IN!Q9v7d;g!EdF{?P5ou&A?8|$t}VEjb5`mo-dwvfeQubPyh|GO z)oIi*>$$U$I7h!-vgx0TAuLAu+_cGV$;3ZFw|qkQ@M(SKQ!^RFGMg(@kIKU^_6%o( za$qO_7|dwPt0+C=ENzO(n(^C8y=2ZQ=yP6Dj5uN#F0KEzPV?%WOQwvPM#D z!8HHNommQJe@RdB5RzCro$I&ffW*+C*U-}D&?1k>QpC{Gy2vuK21sP}kn)GFBaNk} zQ&bh6q(S0slXrUU#e};^*FW~&?v+78_K{JsJ;g#>ELsPs&dg}4Ouy?k3>OaUWp;eF zHqd8w2tIq%YzNZpTaeBJ^UfWtczmRdG3t#zNlnoVd`}7LRd#sCi<-Wuf8aso@oY`AYH(ltput1@)9AcSDom z<%MJA`Q_!sXXSb6Ho)mFm4I?lbE&EdIS@mV0F3e`FSi zWf7!l6x?T#s7tS4RWFQI&wX;&>1zDj_2~C@Cipmy^|&koJ8KMkEa<$)(|io#d;qvO zB)B-n$lPJ~9KB^9gnS7%u#3NC*DrkcFR+a)u*)R2O@y|Mh_DU0-s4T#14`LNOxb5p z*@4{d7qm7OvDOF2(!*bH(Vv77?==kCv`o*b+g5ymUEoZvZglmJpn#ooIcI|8> z9V?++ijx$Zqe6?51mCI2=B(D{G7rxT6VHMJqQZk>%%f0DrLr<)h2dKG0lJ?IC&$m1 zJ34?HAOv9fsbUBKoB{R#VL&*53?L4W2Ppsa!hEv>V1UE{;6N5Xkokcf02~+;00Z3q zg98AJYo=B#kMsaNLeMZf^#QmTT09LE3RTTXnLK-(Z2BJxZdks#YN01MF*8$5+vGeZ zE!tq+_^9TUA*Soellul_UkjuIs~5CA=!BPd$(0*KeUPH}?FC8cZe&U$tS%Su?6 zxzlt*IX>_R&NJ7ZsP?668;!B>wmV()TV-U&yyN@d7?g6j>$rQk4)tLPlbNBCD|CUM zlDfTgbXgrD37uyBg1P^wD0wIi9EA2fa${A>CFn)5WFqryXLS@qqmr|Yur*Z@K{SAP3&1~#~^wb!(qyKsWBKQSN0_4yB%bz_o0wCr~K%2b- zwnZc?UU;=$Z#Y_lGc4^%5q`ebhQp;Z$7;RDSvAjYr>`M}BbuKvZ+6irJ~3on-u{Nv zp_IWn_4&IjD}9Hpq2qdYH+Rpo=X;y+1`p!>>2m6)O-xNdxrJl!Yca^<3I@ zJ%CULVgk?oh^7l!Vrnr!FCILBQ=Qy2p&XTkz)Wwv!xNVmPBBtlF zJxy`0TW=r#*px+-csxYxAr6U~b|;puUFg;@lLjIqk`sMl*AQwfQAI zh08xVntso~_7lksdOq_pAtYRLasc)NGO09 z7WY>VnazqNPoGZuZ9q^?!yx0g)XF zovK|q?rk}s*ixV=8_=-8%;DZ=h`A~c4TwNdFu8tPt7d$U_IyeJz0XRB(5$EVOlDFh z_QuJX?bzwP@h8LF4EhsTBFA}BJXOVqd*PEUlIwEUx*OcBd7w3(wuTiMB5HoVMWQGB z&J~F=7pIj4PGOToA$;NR zjmy4Kma>tf_#z&ZZ8}J(M3)TVf@=nfsv;i6-{@<^Wh^G7w_$zkkzrs%g;JrvRBC?F z5>h(OdAH~0I91r_IZi>cu7ThXqhI<#63_-$vOl;yR_^r5;KaE|-=sukLkCY<{D#X6 zhbv@*%_2VC((1qL-iUTIcbidYoyWqvDPd)SU#Z?YyM$dA3n7hV1udz@82k4JeE8Wo zz?n4^x6JM?o@Z{kda>m>xXO1P^Q01Ds6nk25!)285^8NfKp97v1$ZI+ij_Dw%}FJ9 zQwiHnwJ49t7URwgM|sKW;H!A?N(fDaI;(JFey@RqhIc1@%PJ;^Cj-~@;`KO(Pa}0Q zVf~(5-rzOn7}^EsvrJc$b1;yd>H#vLFaCxmvwXbIIb5-lXL_5FG)5M>xmI}AkPf&r zDbZBO7k%v_F8~!x7P^WJ3qME)=#Mqd;xCBZBk-5dcjZ7r3gjZA;Jur9L4YeT@1D4; zE~@oNw}XK4i~|~{v>jql$8Yy#U>9i1nG6n0lX(Uro!DQNNcNfoa~l9fTo{&8>d7Ub zIfKPwe>wU7uaQy`W1&%P$qlz-++}BH)yvc9t41cC^~LQ|OFr9#`Nib)^YRvK7&r?- zYTNy|O$VMfx5WrWo?ICi`Q>;TYq&^-h-+4SjafZX+X?(N; zT@nl!aG>lc08$MKY}!n>P%&bBW>>7Md{4ROnLIT=OdM41NDW!$kb2tq3%?-Duh=6j zTFr9%rQ4y&Ww>j*Vcgg1Z!k#`-V}RI1qZva4OGto$Hyu{ndYX;=@xvL19(_i+%X6^ zT2!*wBG2f$M78E@@kR?2`M%UzwU;3%0D2G&3Y8fz)sfMJYF&mh7!l>Kv<9rdEn*UI zYj*g)m5tK)1sXX*ExfrJ_(kjD6#E56WSE=Mz?;&49l69$S*1^xARdcL>qZ;T5`bzN^frDaT2FLKLebK_NlGUAcJxP_sdv<@HyqyRThHL#YoN>d)4$^k8N5U5kO(RugL~8# z-5j$|OZTdDyeQBb=Uv>_5is+&9jKFB- zhP_k|bY2wbaO^_~7k2X3o~4a$f*P$jLyDp<5m664WvAtRC6VJ(OPz}FxVM0~M}f!u zYakLUi>Kz-0v4!yxKTRO9uA;|R!-6V-5Uld0oE1qZ7VHxYgL3>Bg^2SR2;*KlpN|4 zSuM46g+L0!T^ubWO(UiNZiry+F-KubQ)|j&T3alWV=2_{pIue%DKThXJcf)o<5wvh zzmB-vP=9f#x1h$1M)^{0AM5NJaTQi@(ze5_s>pxG;jdp>9Pn5#JgHhJu{W0cMHI9~aq&+dOoi@nbGw9F5`{N6WoYILr zXdsyCrI3>ot^qOa_mU$F$&h=jqH-Ws>)ok*z(k(n@E>bmd*&z5){T#x&(#=~AuZ(j z5VGVPDocKv{i(dL!NFPl!@RE2cT8Q)c=jGIg6bmF%PIufq)|F)d(RNtBev zjZ#CDMwD|o!`O!@u)}^AOE=7GX$T|SXsX=W0%*rSnT zet%aH;6npbLji1T?V48*b?I1J-Aox0+=xU^POA>^bN=?TVXVH3|3By9SO(6|nH$qLy*Kx~aP z5$+uEB7Ix6S0;(31Eh~~+Bpzz0S3}V46w>2mMXP}DrE^_x8}!*K-M9~d|R7`K({~p z_u8fNHS`Y`{3p2pvr+@v3d#y=LXA<{1bVjs9)+jh zMJlRqs>I({t9ZCU(5lHWGW#mm1(K8g=zmzR!dUaIO!sqhdenLqC?0v#Jf^{vVX z<~3S=Mf4PbrdiUZVU_h33T*>&mU)msWWg26?J!_FF3o8Op%RpBXt{81G&h1AnP`gn zGk=3|=~~D{!v?9hFF#hV3IsPnGBQWQqWf&TH~CnXzR{mcARy+SBd9`J;AGg!v5riL z8Ze(FDUqd1mPb@OxfV7*bLgcLYJ)ijY#1`?WsMb%aAw*F$64hZp%ew?#>08X+&mcs z+yHI5OG~*#`z6<{`;O0sG9I6eQ&*RFmh1IK871Qu%TmiN&yrZm*{=G7vuRH5R(dY; zW&3P+e2WKg>z!6<(hx283fLq_A_?>%CydaOC|c^A+1UCyG8iKG_7xUopjEwgi6gBn zxUNSJWeO!ZuV-_2cXNw-V`;;9Tucu0VHVOGjn3J(C!2~b0@zH8l9)xwwaEg9U}FDPmhp4k^@D#qFwZOp_?dEx`h)B7rmPJ8e+ z0eh3hn+KM=`-b(k0^91Y!H-?&rp0ikWYimxXdS;$CC6S=2HSNqv-Cg%F?;_gF|iU_ z3c%(dM7DCH(Du^dfkUXqvdXF(j*CaF^@fDP7s=nXU}f>d-fg>nR$c#ps%em|vadDI;mOnBx-h)DSHhhK{N2rXAb&#B$R=Wp@}-;5QTg z6EvWu2^A+%{ySyhAC?m2Gmvj)qV?0iNtIJreRvP4&fMu;aEhm^(LTDAsSo7*yTf zTE>-$&Z$^#mF5)VA|njPGKfaYdwyTi05z8Eo(gmuw)MQ21@9k9`tB78tl) zAZy0Ra@Ar5W7>jXA1(AMb;6d?dzGHUNNE9A+rw`W*jAH7X9(nV(g`z)ZRGpZ*OTCu zbsK@&(}T>K?n>yXqf-5BQl-9ePp4tFd?$wO@ZnV;&xI`c0%!(H@MA_aRl`e7W^(UkNg${<-x$_*YE<)`hZvkWqwS0Xw z!2??zEp*!rY;h|d(|1oD&t%4(0pq9G3hNMETmLT0u;<4EwA<0Hju^#p8}%_49iUr| zup`->wti2$hZ>klxOO~(T>WsRg6;yxv?Ux z(Dae-GsaYAy`fIAiSbG|Uu)|S^{20R9f5ECU6>Xvqy6%;6E07^_>yMhq7sF)a7 zEI%*dvcp?ajO;9lobB(E$3aAOUUiKZ-Z05o2K8aL6)?wZe}-_PoCT;XffRkbyU)Ti zl$xTKVVZ2#c6PSjBsp0SrTrNg{h~_PmcS{fuSIze;n*}P#`Rd-he6(&g6#!L5K0kAzX( z=N(2B_ zP4eu_wB3539mH3)2>LJ)1PIiV@u!4{>r#Zs@F5GcAhwLsU;UBbj-;T$`fr9I3S zxGWM;x3~%lTzgTyiqE9z@0!)!gCuUPw)VTlL9)NPN4y%fyiYvB$AYle;ln3)oL|oT z^V3)1)gixX-uR}k+x4ZHTn3+x^T6QIwaJ?d@$2PFry;H-6>v|{G-Lldt%bWNV}#IJ zoEJ;$R5by5Z*#VAhCby?2v-z?v5NQV}4UyvP-O@r`Oml;eGt7m`hR zLH&Vaw*qT*n#^vw(sTDW;-hQ320uWAynAZ_?1TD&CXY^QZ0_FAv&G6+vI1v+#PWWzJ>k+*l3% zaqg>y-;Az>U%~~Eg47ne=}*$l-%NVGLgR|4fnVE1zk)*}6o-Q-{N0(d^;?4Q(}t+% z3AivtUgbC69-*|7x8u&hy7}n;WGI zo#xn{>M$y%oQSN+qCIaW=gen|XTBFSFzMeVoDP$4c%iJ#0`Kn|JM;_2Q8WeA052W~ zM|5j}^)UI+Hy{p6W1$jFMp;JRz_J`EkAdeR8cRqJKAGEFS~?o`F{>PpPmn8U)LS*% z=3HaiuRmQ6!x_Q!BNt$ncn};p8X0qOk%~CKv{w4DKIfsBpu7bE31_tB%UU`B zfPh2xk{g3|+TpKO&wk;qZ*w-23(6(=?B`qnjS>f1P1$eerC-3jqN4~M7%so7iaW|7| zOIDXy(9EVC*_+PNmri6$Eod*ztz%Z&X)bACUw79uz0=2BE^ihE56{65$VP^u>U-?_ zhnmGm6@)$NdLKPB-{OkR4Xe`YRGGa%OeBy=@N-B3oV;yKH)cp3kL49m@CojZ9wwtP z(38m5pLnm-(c>_WY?_FiGgk{x6SVmlDG1%%g{D#R1<;t7S~Y*RBR4!(@l5g|u$z>v zjb&%MY+0dB;Z+Cgl$&@P@Nr|Bjh46PJs4xDcd zciFmaHLi~g?cGYXMDF{9E(ZiWByc$<4NnnI60{8QREJ|H>BACVWua_>9P(aEqxZ2U zMg8T{xyW9MP9!j6o3O()j1pietEyJE>$^^?FQy1>JdIr*(z~@@RSSxZ&P^>9YC613 zMUCC>kIY^$fRRP*p|5$`3RpU8Jx#?TpW58fkyp0Mg38v)z}`qlJbNWCQxsl`3f4tz z{Nh*6DCWLO5e*^{_Io`-mj#OEheOM5x;XAW`$%txxW2ueFTFL<>wQ(|CiDg@@YiZU zd1HVYJhe_XREZV3@mZ=1J84^6|Iv;zVE;K^90&Z-P$Flea-J*e0X_qCB*@aX`6aD2 zNz7W)Iv`-IaUE!5oZ@!94uoZhdWfQJwDAZwNUR?nHPcKUl^72V85hG`O;|d*k1ASd z<7sl+>e|_^YFenLM^71VC>R7R{bP6&ZhkhwV*GrFn8(|+cvp5>v-tfCS5;lpZ0m)B zZ-dBuam)VK6X_CjPnEF2~kI@HJNDq5Q0yXobTZGUe2l& zS5UsgFD%v}O7q?%s;#1?m&=^tjcf(v3!o4)ev`5$*{K_EcX~=# zv249yT8!p0iCy3j&|>u@TJR;t>@Ik>lB44^9{|z+ua2jz58@4H)?iu3Cn;}L+eU}? zV~fwjEc;aojYHE48rdA(s#xNbtks@qXAd>8;HF_c(he|QYn;P4I2!;R!70;taQ)LWY)P& zcvfFWuC(SuOzx?0y)vt80(D1wg^0Y8`NA)X+v0*-B@?i5%5|yxsy$DIW-7O|p6VmK zcpo`ur53gI89Dm^y2RDr)w;=`Uo*4(|u`8V86&hwx7jod- zJ>}V4@p<=VigO?+w`@yPIsqP9Fsa1wwdxI8<_8j~LStl?9fJ{#(ymXUHy1f#)8ReWtHvh; zK4@zVsM()3YuShIwLIe`7$2N>+#{&JwxxI1W$Ii$^VmDBKdtiBT;UfQ&T9}tIs)Nm z-SJS}M?*%0OIQ7qfoYVc!7BHu@7*Aa8d$5MI`=)cEnY`N-SKriJFovi?6#Tgwhb2@ zi=8^K+-%T88d;%THsHUCjX!t0-2H=i-x0SQuAHG#hkP=bCa=Zy2;?jN#DQ=n?iK9Y zdRzy)INcI#ZT?(;qn<-xhFu|`w3t8+OA8*L{Ia8XIHTP)_D4I`S<7VBP%LVYwRO*& zlk_~p7n9m``6hV!ZA{`pH+Kc)b?y4<5fo+BJBC~9zJ>s6CH5ytT3vB%E|@)wD@$Z6 zRz-Z0??Z6crH9sH`06pUG~|oRj}(ST_%DN)RfT8csm14uI^VeuwpSI9_PFvbV;;uZ zza=R&g+fX}=|&IUee-F_7gifS?kzs{O?s8%>8Yk$W$ywO2Fxg#$5xAKPg$O9o0Cgi z3C=E%EI2!XRa?1Kh!0dhqc8hY$ig2@gt<`+w2`6uXoak2On#g8 z@|MQ)N=^DzMJWGzWS#Rg&Qf!p4|60uWG`07Ne%u4URQ5Rdee5p?Nx(;@nJhbp{8XQqBj}qy$+HMxH_*s z**5!+?jTFQKjlB)^gqdXQ%KU{)$;_zQ^GrQx(V8S=~P3}bZWP&T>VV&y?_w4{ob~H z-pU^_c2#cU3|^gV481+L@k@uI*KqflANO)KzjZPN{P9 zP{;!g;={hySQ_#4@8&*@>?L-4v|afTbg2cvsaQJ}JVaGXdyAGX zCbU1wgb$qpdq3XB5vDq-UQ{XvB@$T7e|AKVg7!^pHY-dP#%|cZG`)&dQKHpZzF@E| zwlQ{BOvuJ^Dea8>9vb>})IFhkwKPU(%5%}0xeYvi6ewlX^!x06uX}e1P9~zeZ4%?* z(39UlqoI2c*BbX}(BXbpI)ddYtgzACigGVZgC=YDRnv_RWY8OKKu#irs<9{MsSL9E zQgR`h315hU@I7H@CNDC4JaCRpRv08Nj|r`LUEhHac1gF#tR{?z5rP~BMsQr?tmK8$ znfZeDytHKB9@nUwX=WMtA*ouTe6L;T#a*9RyguV1i`Ek<^lVZgY5G^vwCN7!o#Jl7 zydj;Bm=Ks?CW?yb#A`E=5>tPKxq(k!K7S6+c!1A2{B< zzq^C|LFjfy=D=PSNUj-p%zdo?pwFg!ZL8VqC9egDWqq$A>MgK)+EvCM>!D<*ue3Em zZ)^wiU6-SMotL12y!ED71T>+Vg(Qm8<|H8$2Pn#kr%Zo~NI2f)((Y($3jay!lBC*U zpRrWkV=`E8j@ljGUOWtecOxH0bF2MGA@2AoEC%IB%CLPD=nM$jnU~%n@2LT}uM=-0 z5gj7>#z%fP|I$c1joWWa*~hG@`Z(KL4vM;+0D+G%jUpzX&1(v#d}$%pzDx>zyx+d= z9bBH;+nWJ3M*we&@*~7}+~Y4l6yP_NaYbd(g+xj*5P;ZiSzR254Pn)tPc^#s6lNC? zOk+)7lcq^=$jlU%kaOGurz;)$h1WNUBw-{uilBv8xRcFdd zj}n-~s@bG69no8TLdA-5xnSbO>B0F%&vanEF2igv8RN!&W`|=R7X6Da!akZiL+OkE zhhTJTt>1L`>(x%fy_u?c9KGe(--~$lV5E8VOOiqs7wd8wHbc^yuc!3m6S1OyaosW^ zBtQ#Rs+nf=N-bh2`+DUh1bQ}cv~Wqip7%8P_NDr zwsL55!Vtwe*u2fzbJ~1axAL^kI=S7>lMEYuEe%KJ5(9CV(I3)m$+L25)4-Lm+b#A} zZ+sfG4e!ELUiLZmD8fA1o<`8xoZNG43I;InNSL_Y$GLC!*K?6EUUQ5QS4;~F!+ z8t@|3*hD%MUBte$R#DUBAA{g9jDm!c}Ckvaxk(?(Ec(ahNf{ z(L$`)V^%*|-DHx+vnGazrnO)*82wSI9PLK)=y%+0n{sQc*#4e3k2gh&x+zk+zb*^x zl8fM42;2Umwrf6wN*47`mRDjfv60lwRB@>U_SG`EU|na++uc!-CHZAjP8U@AkhFi()qti4u9Zme%=0+DzQDc7-qAWq&fP=p#5j3f% zV-M`;YnGRfG?T$~d7>WQ1J)H1FPeJ2qYwb{?cMq@#wiX6cp&=3lT%?#P|mLCjl|u6 zZBPF#`lz8l9sQtiTUk*DjZ;z*;|>nv;|^MNs_330-2vkg5b(4#_>v|CAhXn;`phrZ z_8_k&WB3~rbn%y}65er1I5_G~va{txHcmInN&`NOTn%9Bhtq?&(XLZ;S`1fsya(W- zFt0ijgj%BZT3Q?*J8wv+#e=kU==*fDXk*SMV8zfTf;ZFP4-v}0txm$a*K?Y*`mxj9)`?Y}-oR#pn)rt6x_?YmSgk?0utu=3icAB^lXDDJ6BPsu70$Avo7VNmC0 zP{^pQqfWBjjPKG+vXS}d@9xHlE!cjq>7q&JRaG)4R}=M&iPTAo9N8;UQT>*;FgI!m zQn#jAn(?w+niAQXBC|9}Z=W8w5~lUNETRc!?co-v{-=!2%Xr6lM@5io7MDhXCj>7t z3O}celpwcjCASN!66aBt>INWpr`f)f9aOU=KQGq4Do!@S-@#?HlQ|@t+M>qTf%MA9 zUs#D(KSBR|;k94>Z02jHV|Q4G5uXBm#Xlat!sF@zA0zJKcMEPEq&OU>McY!`%s~j% zI#dXvFFskMtLVvm|GK(HW?R*EU68V-+W>PY= z0`ZiY7yePgg;q>jfTBudW->Mz488utb#@!+FstfJDI~wUS8$~rLcBS5RD*AxdZF}n z%jDqI#dBbOikEC-OM4KQd4ljsk3=}A>6-qgUuOEQm{}_NsT6dzu}OP?Lqq- z^Xe1$ZRS-d=NN0H^kis`>cPli@5g*}-)aK>aDX7w`;Luytv4H*D9S5!`X{g?TjW^v zAwQy8Hd<-gX??s!I)Ll2GAJk~9va_^^_%+RdzO3X?h558%)iv{d5G-*Xwe>B7YAU( zmbe@{6@m;e@r}%X3o%y8K?vp*A*bRsrnbmvlC-hBNF7}niZ9ah20dbL-nuL|54I5@ z!pA(IRmRbO$?;5`z#Dq(L*pJB*O9no82UZ^&o!&)O{q29Tgh^DejztMd04i8 zsBR~uuJd<}58O<9H{8RQckZ(fVn82mA7btaS6~kUMwS!^ZRVd@jTy(`*UfqMnAvqo zbsP)UWQy(?dOGh#hv{i>-g)fTRAOjsGRtz0hB@YE^X-hT`?#_kWC!E)EfnwMG~W0g zDxW~_*%icsQlyr?cN(a7@+p@%^SC3&-h4lw@q^pGfCH#}4Nrsh7K@%P>O?+?#OdXB&s_h}NXPLodX(YMpfi(VO zZUs;pqx-L&V!Y*_KR=>uKC*B!6}~l9dJ`?nM2_J=3SW);b3{Z9vD3HhzPz&5@Jbm} zeoMJVZNPj~W>4C5FNf`y?%#9CdU@Ql6ai)Z%0X8iDH$or#f!+t8_4@(r>Z^f%E|?o zlk`v8l?U_CxokvA#PKN4CmHI6gWhT9!Z_diw1|eT z3+3g)q$%fx_?8f0C(5SJCUEIgbnq*t; z#l8E6?}3A>(JQvPNFyRqmh4t#U|abbIN{-NfhLth0W6`srr}H=11H>Fyo(tU806Yn;MQj4sj6VY*txQ z6Kac(2D77X-dtN-*{oRemtUrdjs@_bpZ5!2hlij22?qOfENt-SqyJMK633& zl{!cc1P&d4lG^Y{*Wl7s5--o*4ciZY|H@QyevIQhD}|5G5-o4rI|0axZq_NHa9N6_ z$;4+}VJ_?{in{p?o+P)1xPq9GDXa1tZf8OrA>j_$Dp=YoP`eyDQZRcN!PNv=k?gCA zk)h)(x3{N%^3Z0q)HINK+*Ahzb-)?luH9<6uwQ)9X5=KCr-<=?kaltednb_*(mIZN zcz@mT@Tn;6v5tE1vbs%+#%@M)(o1J>Wdco`5>TKTNlk=`Od5wqqokS0D#OX@f@Qq83~Jz;m={=;)Iwd}KDuN|#sJ7t8GMm`;x7^GQm(Y#%`s`hMV zJGGDiuiR^UI9Pkp+AlT~%FUNX;pC?C23ZC>u@Per$Zni##fqZtof3Q0bEbLveio&= z^Xrjh*I_vMFe3zj?vx;t=~An&!Eo2syaEg6CQ(|OV{P@Lv^Y|(8kmuh8DeAnEg{eC zN>tC}ZzZbHbeLqZ;PX^rR81JBvoB~U8(lSKoUDr1(($|-r9;EZ10uHA``09{n_k`| z6Z@_|+h!W+y}7(hj&cvjz&8VO*zKAgb^K=*J%*HZex$<}Xe@!*-guJiDsK2m!Ydr6 zbTt*qMQVJGSe@r*n`}!<5Km@XfV!nk*a~aSp8ioZj|ciR=Z0E%fZgHIu!GM>S9fQp zwTF(x*(L+evMx&J*00YY1C0!wb(U_er|1zr-2B#f!wPY0NWT5$@C&ic2Q{oNk-^|E zW~p`3Wq!LEom7Qo%F-ZXyfzl9u&hE*}RK4J5x z3f^W4_U0P5d=8W_grryUsY?f4`R|R~FfhSQgLDH! zcioKaPMIF7KXWB&(tdCc?#N@FiQm4Nz!TF8P%6d1stk~NIDq~e#AiL#xPPxo3s_KIwPM1F3aZ|5-`Kh9NkZDNrOE29V~4uXf1{?w=tpuk|orHynK7p9R`Tf z`}(eDefx#0KvuISqXe}Hl|nE~m2(A_SPHwI+RIGu!D?)rm-t=bX^zh}Dcko2t%H_V z_>y3M5q|5hC8tL+Yk3f` z*%{fkI^_=s?Zz+TO>Qo+@v;km2a|=|yHC2x$f(2wn=Z8fv2XQyjr||S-Z40{sOuK( z*hwep*tTsa9ox2TCmmZ)Y^!72wr!(hJn_x@-tV4s>-;)ZyK3#Kz5ndJYK=M9Tw{#+ zbgC$;Uq7)^{t_d8A|DHM%PHfd?q-?WxLL>u?9Wcu8@O&Xwvp5JwCTcf=3h)%E2Tbm z390O6URK-{qGMYOCrW4mIQ@8(2@)qbkejJ=p{Q9{kCw;lYo;>c#>6p81@7P2#a$lsS$oZ#hc9CxGRo})dHev?^@ zCwiG$*iEBUECxaISyD2b;}dm#Kj`! z@={f2@p^h(00D}s7tqB%X+)u&*GuXLTYFc7>rtXV9{Cp1gbizi#1=Z3^K0l_pBlBI!e^;Rm9BR*hS+jD%OLnGKuP4J;Uq zvOSr({bM?OaD8;^;#Vh-OS|LFEbpN)o5ayc=#J;dSUhC~qwCJ+o>0zL`72#BzXi-6 zrPzK-U})5FFQ&xr8Q?~CS?iCn7MMp0*^JF`$UII1TGu~ii=)ZVtM2U`h#aQIUU1L= zorPObJI3mX?J)~=0Y7sf*Oh_*g|YLhjKVQ@$a1kZI~4u0CLsI^pgLJ*6H&i5={#c! z^$pT~d`&AI`(mLZT8LhI$CGy!6vp2`uu4=boHfDuW@Mn~RV#koTsJ}9M#(P?|LdK` zVg6LKmbkZ29xd2Cnetn(a1L{2O<}Q$|I`8@5ZQ;#9^v1^DiRPtqIY(BrLmGk8$OB* zXo}Q9c~^jr6vA6yZ>3X4LH)-9Q_qD zH@R8@$sLl2Nm>-NOo|2o^NpGzldMFyS-4=ow(2HpMrxK4z5dD8^!Qo2I6k2C#M>p( z_#6YZ*ILZptFmdVcVaxOVvtSmEEgL{e0(Oy)u&Drsaw3RdtXUc@pgvu>$Q z?`tF4hJ{762X_mcjEkw9(HlYOmMP_H<|a<7^GqC^7+ds#_mofDC*SM@6WWyMaGEf; z6ZZ@?5x<(2J|p%45s`#JjY2AAbBm^nDKi@WLzqVR=|RZP!W*=^oGT65UQry8A$OwI&;gWD`+@=PVQK022tYc9t>!9AxqVwye>GI!UR^l5QHQ3nIUCxV~ zUMEIo-JzUT$1F8*b_5?|41Tw$t(|iGO08xolju49NR9Z5pmQ|!gT?j|@-8sO#JYFv z(btvGF-K08Y-p)N!_73flB8f77Z52dZE0#m)EOSYD40_^DVd$kMw7Ffw3BSUq&@l0 z=bC<0m=p4bj)8jGP*^%h^Hg;e40yfzXdTsmm8Xe|&HJSG_-{ab?1Yj?Ny+h3(${6tKd~~gU6h(?XJRRC}v69`**oP} zA`nPrdF3+FcDvk#5ep?T3q{6gzU``=L`=&Qm?{JVdKJN74kB{%$3uCZOfTwe+#1=# z`p=TDqICKoH+)2&If$P*^yyD)gV%;yo()5ijVcvj>}N539Cr6>Ud);`M>Q>Gdp-Je zm#i_*$nzX_>uonb=PsCUp(Ks>)RBf++WYNCnV~5O^wKb^SS}uq+vg%6oWvOK#^>j| z5_X9vbRBaoesmS!X=R|{3u~&6Z_Ev&nRO0xJk6?#2Q7{!w1NhJ-id2%L7)?$l=fil zzHpX4{^*eg`5W{0T*oSXIkXKI{>Ay26WWv*LH-e3fk0itPFjpg+sNMl__uLhqNYS} zioTPZ+jF41{+dy;XC@IgQ9ZHZy@8_dXwv+2F4H7C;9hxAK!PkyTF)lnOkGsI;(_nK zlkR+Yx^b=(SUYqi$A(NU(f@ z>|fl5&FNJk=_Nk^KX3qBR_%bjl9nW>N8-=`O>j$^Y01q_Qyun)m1hstqalvJkErrj z|J`MSYGq96H{O<5#h08+C9t>T&zn?JU(SVVze zYVv=&qEM^u6gq(wucEhaHvGrsnnYpbT-c`UtMkrQJbZ>~3s0eYgI0Wt?zOqJW5LiN z{H$OGEw=OnLG-u#xwMi^s#o$G_l=E?Zqbdr^p_~?{ssj(KDFp55;-{|168Mh@G}yB zs5mIbL;bd!)xY~-C&1E|N8(Be`+>m76r(Z$G-F7ioWgBg2AV`zQ`#gK-G<9p~oo-jAtQv(QX?v_w zB>UfbKbDIgsB^<*e|cO_PUW{`H`!fVpKYT%998Jq!<(T{EEwWy6We1qzMkDa|9J zD$7e1>2}u)Md0Ir{MttONROo3(R!?-Z+_BQ@n95hHc+m(^r4h@y9ZA{dIL$Oqm#53 ziUN#c?o|=Z#{C&1tc8j3i86#Y zCZ0-FhwDm0-+^{o!}%G0qE?+JK;nF$G^ZoK^#G2MazP~m)n_>v9-&2@ZEC@}|1WKC z7_kPR9BLqU{ENpe2!Ar=Qm8F5mmgD@=Vq8<9Wv5$CoCKj-gV6iSG=XTu$l%F=jxgy z5K;9uc%!fAE_h={Htz9pm#W3h-@0Mh$R$|zDHicdp;8L%UX|zmspibG!)|{oO}%{fU>0Js_4|8Pt*Hhlvo0me59vKNCsnYT1migDn znMca=h547ly?cq5&;NY+auLe8#XHBrZD#OwfdUT$t;0x1Lw_%Ihuj2URDGIwPZd{L zuTD%EUQE0R)l{{=i<#LW3^pxiqx;!JZ8b}FeuiCUhs)ZDtS;rVImYK;jO0kRO`Y<*d$`#z?fuWMgD4sAE?)HSi8WuSYy({8!oitbZVT6` z)j71dZ%ZfN0&`PG-H@?Wlj}=WPSSt9GM2u(DRNJy(y7dDr~qExJ{(D zG26NWJ^nY-&bbAq!q+?m|K}hNLtaAE>xrsBdq={m2E2xt43m7ecxXlTS(kgYoT;51 z99tXD-@R7;GH1;xy4dgr!|UXnzJCRE`$gA>t!WBNjo!35$L-6ZwcCppHnodo_Xq@? z_;LSe_G6|wLOFs^a`&Z~b+l)mlo&Z)yn$_dLeaRafr9fy9qW1c#?*DS4$_W_-mA^PDx2^P4X%{LD?@cpOI!oyaCo-MKAkW z`q4u72Bmze^1QnFZAqGWQCzXwj)@F<@e(?&b7Z)MzCiKDc~?MiBhkBDdpMo;xvOPOx4RO?7DEf2Ygyy^kYj9`KPK~mzRv*ExM;g4GOm_qevx#dg+ zdjjem-L6TH4yg!Pa{wX)$Anb^7@>%dG2ovZj7qigZQ;O3lwR=>7w_3Cg`-ij8qr|- zf>SgX7d{R@)pA>=@&;3R-~XYbw$RqmE1cT+djP82LKgqz;XS|}e|%?|tTliro(&K2 zUrF_F3{SLOOT+f2nf0>s?G=KrxBaSB+(VOfGLWC)K(z`p44?Y`wDs>V<>*pYejzCg zACBtr&N9no z8B*gtqytS5l6F^zPEGkT3>SxY)!)OwT@!O>dC~j#ym&{9aq0P8wLvveLA9hMR|%p? zn5!Y}C{XMD$g_@vIpEEALUlFlWh78e=V96_k$9wbzH)F-7Yh4DT4^3efHA;ViTej$ zdIAgyKoIQnQ$2-c2~k8(U;hi{N&69^R|2e9bKn(Zk7{y6e5PGZ|H@!;%I7ze6d87fiTxRb$|x6yIbz=Nvqt`cjh7{2%q^SZWy)oPld_l+g6fC znrULmpP2HNe?vk4@}piIf5IDn{kkN#>8IucPxmGj!R@!9yzDl)e*w z-o$Z@veWQnVI`AHLo9CDrUobR1EY(YZwO+&+Fqnhxn5WLWR%@0%8A4^XI0_8LNK=e z!gQ2sfA6nBX8`%#9lyEl`VXJOPdbK~EJ9)*sy8&h#pifDet`d#_BENgC~tJwudv< zCO2jK(^bHIHC!+3F!&&CNY5lF=liUJPc15KeNYKf@m!)6hG_PnTiQKG{l$HlpnV7Q z<@YqfdW5@-A4#A&RNVyihaEXz(1W`1hjw;9_~b?E5+7;S41EyL#hoddp)OANpB*H^ zRfR83KTl!eRnK;bTQaE9T^CCofp;e-%I?!snHspnE$-|3+yz4Akmf1z!B~%l$VOG*c_L-a%v!1wE-`=R1bLU%&_R2+uqQ`lYf%}@JcRs*WKAf{Hr0UUq>wLJ6x zw@UV;e{eW)j-zD@H~lx|atf*U$=1*nT_Aj-GEo@#AU-Fb>Gj`$NIoJ5cyTN;cz2K z2q$ZReJA$0jpmf^MIcJLflqJWjzLG_8n&j9g;&XEj;*jT5%F}>45$icV@I+{wJq8( zw-bK(dt6*!zK#Y!TxUehQ!%|+a9$D^mUbR`hT&MG#lYA%HMYocPD`{WsnlaT=A!xI z`=HVSArx6G9Zv+mQ~aYtfCNj&j(dLpIeV1EDNTwGjdip3$vq&wSnO4GFfeJKTq`t_ zY!Dx7Az3Kjnvuo{drp@{$d>oqzBQ(k5terNpg#OkL93IR?gTaW7Jo1dS}~QZ#$m zSb7Y7LT(XJR)`~PtArbB@Qpd>YBTM50pCIVgL~m*F2yXMPWR{MuV*@TxTwg-sN7jy z+-67c=mx_~qc;@gJYiWFM_^tPX3#?;h6A)>ANC2g`YL6%=6)zmVV2|h>gY7Bqw~nH zp9faQ#UH1S&Bq3x6XDOKVd(O2%hZZ_LZYE3s4+?o=JR`r_x~#md-X<5(vxtGx{YnL z_57X(B}xoi3$`nDP*Bap+Hk%suIo&x&pbsJ~;($OAvZfmyx(diM++j?4r{%g}cspQXZ7W?I2er z?U0w|IZ@JKfytgU+C;&0ocV;8tV9FY^#|?R?P^^M)Lj_l$gtb>s{5ix$&K}G`{L^0 z2zyTfp9f)6n&YuViPrGj9R?gpV~?Dd_#8tB{~iOQPs3td8!?n#h`SOfNW?haB5}i+ zh@gI(yOOZmrwto=;WElED$Kgs-T`w5&6Bg*@)Z=ap62l* z9Igbs*aWyRZ~v4Jyqirpyc3)I1l2p)Huq)R8!z=v3A%k*UbOUrB=~c<`F~yBb-*_@ z&+WZ%`AbY5K#ON>JvOzQxw{)Usr}umKkIAKC%zKyWx%%3S@%}><*3Boy65S;*;Kcl zg1Nz-604cqO!-(3m72rTTCgR~O*h;=zSxRxXJ%;{EArrAcC{P6s0i;M6B%j_RllP5 z*0z8+@orti_q1ART<(6do!i2Bzvf_Q?BzwW8;4wv)+fw16pbDQZR;!1eW3QsdxarkXPzxDxe8>H zeeyrKszN`NfRTd;%)1zt-hUF*>SG50XaE18=w0mp4@FCQmn*tz)y~A&Xc0>t-Q&h? z=gSf%{h5R;rQ?@T=J7guo8FFTDz7~ zE7Q>U?Q}RuoI}hN4Bd)P9qR%T%gcM~6Tb%suQ8^~}s0id(W%PP!c?gR$D~Ktxf1L<&GOTnw%SRx@3KA(o zpmE&rM>(rBZ)=GkNUxeBGUQJs-odU1-&s3(*3|PR4{3TLFkFL*&&ykOR*6F8IY=GY zlD#qp{b&Cka{l|$(PEbyDklUU^|Y-yczNVw*|aV=B3EC8p?%|WApD2B>NDm z%GS}{#L5Dcj9y<4MPcWgO&*Kak)=VM7_yLW`+;xsfu6QcC^Vl@?wia&yul)#cU?zQ z+xkB+d~bg}%5FsKt}C5(MgXl!1o3+ZWSpJKq2^NKBc_{jzgvfx5=Xdyk7$*TqzG`7 zREMZE;pJ0AGzZ_eWU-xq2+;LWUi@Cf5AdQfkbk!(QLDJe0I0) zy70I?=pm!NWM2JwUg^PJXPLdVLALF94vZO*XKNc^uVF#8B~$%4#?F0}PSKmRhU>@# zw0esU>sEu8x{uxPBMSU5KFDua#2J3WTT%0<+2z9O7LOi(d)UUpN#g zwU;1auMGuAg|+={qAnf!Ea|XI*5uL!<3OKSwmzL-aXs`XuK8)z=2H_omHaJQSr?6N zYDY`%RQigFR1NKDSN+Q@K6a}j))tH|Uw9&%2?&CdS1&tqr`?^9n{qg{hwT)8RX3fx zy&{jp;1@%O&86s|LC@~b4X0tLnbOH9*RTfwTa~Ze#RE%OkNQBSJ5a@ZCV+7Bjt3g0 zJ;u@cRr_ra5PsT}*N6_=RlPh%TmN~*W&5F@_kQ+1pX5|qgI9Y_6<_Mk~_)`pIC29UIzJal9#;P9P12sjpvl~IbE29N!omMhtaaba|RgLx7?B& z3lE+FJHrM62_MnrZr|diyy3jv-Yk>qK9ZB;>)-j%Rx{}VmAc^SdUhnl@PzfICUGJa zK9!6e$^+>y8hT>VD*hL>XJ~K8S6S_ozRgD;;ed+}ClZdeCQKG6%tQkj1={cdg5^Z* zmKb$*s325%#52a8LNHGbKM-an0Yu(3u%X{GFO?>XqJUgjg5h87}fA<8h>T9n^Zg%sHFEZq2-8XaqiR`jN zMKqcTQ%}H%#u;VtQIxbTjJ%PZ@xU&tRsOS`Z1+-^brrDxX-v!AJF z8WNcQ9Q}R%`|Hn(Us=+YZmKVm(_08E^-3hZ^6Sm z8gu0IW&i~~$kk|9RJ@>#i+#JjbOOP`(o4LDOZ5W)vvzn%ip1<5mMd zbZ)Dv%H->ZJJewF={^?^@`uLXN&;{-7Bsk8Iyzo^5X=(Tc&La@jW;h55fKE`ohJjRvf5$h(->lmI1d^!t*uGrA1QGzyqZP`+ zAKnec5NUWT;3@YOreGCse9MIdm+Z}}Mu6g`kes8H2w^pBjK$LB^wTi>a~+zk7H^jP zr5eGnvecb^QQ}Ou#V2*Qr(m(K00W5Le(4wBX*NaMI%;HwjMEoCF82E*$HvEDc5WYc zlc&Bzz5Qb)V|``HQol&R;uX_xP5@ym>a(zJK|AGoEpxYJ<9a)jaO)o!-62Dj+x6~~ z${sR6$`1VZUcgeUM=`LF_wqvZbYBi@2Sr}oLl7Y*6Zf-ys2~)}knfkCXQgmOc z2DdU=hfSxdehgKf#6id4JSOS(AA$~YVnt-q(V(F8EY^kKWd5(P6}hjya*m&dB?kV% z`a||ypxORTxQZc(xJEM$0~zW2FwY@BZ8?;vL{$MV)9ZBkab7}Y^X-( znd2LuU8c!hP}cQ*Y)2oMQHNcqIW{plTXylsE^|mKnX_{|)Zf>KmlVvAnj}s;lWs4x z?D%D{DosRqdcHlGE*h8rGZDV*e#x+|Zqtl+MTqn#RN>ZZk^-VK`)K?b$oQFOEsUg( zsw6W4(%EXkC;45Xk~h1dFaa{|++TyfOhU9zl8Vo0gisl3)v1C1qym9R0nqd@HdMmH z*#QTc95sh>{0kMOGDBWWG&3&QTkBaF_!L~{?A;9>>)+nyhb>?7OTRrK&rqT!mMcF* z_V@-e(celj5>XRMl+_u!rZ{GsU0tk{cP?>D2D;Bgb(>^An5H!9Db`rHvMyN`FW$$9 zuKNbM#!PCx9T@o)km7T6-ny+>`G%%^utwrWIj6^3R|sRda7UZK|2;lzhzF%7BwT)uz*Z8P*!c?4;rHy{H0@0XZ}22OnoAtd)NGAdl!u(~*!)7Q znl;{)N%nqRVm`_9DJe{k2i5_2;iO`49Y>;(LRHV;F@8lC54zZ_C;XF+<9n*W^m$~e z*WeUEm(WX3_cRUO%WaOD_p1=CwhDB_DsiS%C017r5m9!eZ!hnfjNG1a^{CIrt(mQL zq^Bvqs!3cpX13Q4lrFGHtU6(5VYe*%VVdCl11+1%je*bW^mI-njz6;84j)aO-{o<; zOWps{r~>EH(eoB1m0_Xyjx0ogdO$*IH*WMs2j(3Bf1SFQs7s?;9}4JkFSGg>VhjG? zt6aMMkoblgV2x@~^_ObGe~OQf-k2$zn6M&E&?hr3vQBYL#x9h`S^Gh$1;Xl(`Su56 z4xO5|Sk)nue~J7uXMKd|vSNdZam=f1zMFPbd6`Khr1!v}(_iGpCf~6k{?tl)CP{qy ztji6qwG}H1+k4DRnKHM~pl-b|EiyOSO;-6U<$Y7t;Ry|x?km8a7~gJUE2;@$Tx|<8 ze^ljqY9EF`P_k7an++&&w!WY|CYV5AGnZaumP?{irDURa@672eh#KbNbh`b6Uc)Mu z^#{R%cBoQQpHA9AwuLUv7%VA*1c(O0q<8<5!pQ}lqJC*BBDlCvR8Yxa(;NU|ordb6 z%T}){pdYD^d8pfmQN1>2{w=cM#{^=xw=8GZ?p-FAFg*+Qf<+@SY#UwQX`*8+ac8zn zu$`z_=c4u~dmMvJjv4%$DmKe4WNLbF46FuY%F&hwt6YY6D)??g6nZzA6T@novk;F} z!|LODA!jlr*)SySLv4S#cG07%fU5W`Sj28Wvw4^j%FmiKw{=uA>R5!K+6;`jCWnAf+- z%M9NJpEOoE7q?U5FK9YCa+cZ$-IqV_<*a$8^FNMDfK9Nz?#v&;ZSjc>keuaqMPZUC&WH=OS@y+FOdwW#bC9T%AeKl$>|wut*`AS`{Qd&z zO6z{SDy_;A7Jmi)VE`E9QOen4CWtIDWc8~I=^EK7BUaX2{6 zzV>4fvkI&4U8x$i2x%}_n}itS=U@&h&Iie`v?G zm#s9Cp$IqSjjb%9 zuqB@dx=Y$>Cy58m`ph#np!luF^e{>5e%KrAHEE`SR6@p;K_4E=OcDSynQ&lePjlKS z<`9me=QfRdlP|(OChvQmB7H{#*J}B5BEFe}c3YcFOUefXX;`nBP}tt*TijK~rtTYq z1qQO$DkHt?Y+y8DD*y>KT^7fqy6k`J^-GXr3w(U5Iq(b{+btd!3MLLy72RWFdspeI z3K7m6_v=D5zb3L@utaLR;~>opDJq`8qqL)y$18)B$ zjVPo2GvHago+Z8aoSB9oG5R9^9d<=Dq3_UeB_KH3s}a5S>vc=-Lzz_ zoJm`(s9AX(V~++g=IQ>~k;{b}^afZ6=2xR0X z3-Z)AD}u;I< z5hO)^IU;lE#?1zSn5<%(s;YEX+NM_jk=naGPP_B_mScEBm`TgxNd)j&QC3t03YWFNZgjp(TOiRk+7n^%8qF6}z&v_rDk3(-2zw_J~;O04a##;@Hzg_=b zg_s6yw2+(~4f>J##OBZ65IPd(aO-+l3!^`Vg4ZWwh8&FRggKO?+787*?MRu!umBy9 zhphxS-3KOhm#VCB(HH7sxV* z;-@Pt7ZAv`XWNOrBiINk2_ZfWtHhQ}jwT393Z*5dc?Y}YDhd!rs-<^nS?=KFYV+%C zFdsVw0yF)-&TlF3Ep2b1{Metn|0eZi*%lN_&BURXix<-h%CnqZP*NOy{mCW?yXk4f zo6%+Pg4DzOSwXP>xIrlV`MOo>x7K_bvLa7*bfmAK#{`YO3}~u#aTK%WmJz(f9S``O ziuoK@Ubz}=2=0An+WV)CZ!MjSpfXPtU((4698`#_A)lxBM6#D-5;&xunj^=3q`?-p z{&Gj-{(0v2ycGa+Y-sog}67RxRw;&2&>kzC`5HB8UpL64J;EA0H9 zT(9GQ)$bKgeI0miAVQV8+G`vwjyJEkc2j0!qEhg;e8V)h2pe_R_5|pKxxCT?KcE$% z7Hh1QkBj96TPyzSX}?8@y*Vo2{=SoWF<8+-G#$jp!{a%Y08C@&l_TIWYRy6E+qEL? zC-Sn$r$z=;l1Pb?7&D6`jd0MPD|5Ns_^(OI6C5-R2h@OM4g_q}3Ca28+%8ZUVbqUh zy_FE_#EVpLQ0Zkdb=^ZP8~t@GspWNjp#+rQ0m~W$m%o<~{+$L|ysCKniqI6nRt2~ew31flJ7TD#&r>X7dn?8xDm=nTQV(Q$Xe%J+Aeog9two)yrst~d*4Im*>qOOKAgPu+ z&hCY6IaR9m{O>{<@si{1uRYm5LN99EM~MMpa&*x5CP~`h-}?cZm%O%cH&k+8)OSM$ z$L?dhu@hOHiYM?_`f&W&h<>|RCL6Mbf^{L`A1KQICJ?A@)VA#&S86$E`F)2 z3^}Avi6XV1n|elL<_u#T1iYst#teNL0`gHTmUYzzq-2?*l#0%2sWx{FQnH)XMrO^L z83)uO)qldN(KzTW?Sw>>jnA@;Pg9Iz*z6IXzfN;vIeb7lKSrfb3&YpVRp|rY6T*waC=&!dTp%DaX zKsuJjWpvHT^q;z!>H7M-_+RypQnh^Nq~|-3svr^Av~79JgegfyeNij2DQR@|E=8KU_3(U1 z0BNXa0{msDmI*TYcRjZk*%AjJaO)W4XAT#VXy^Xpz89F zhpDGpu}z=ux2M#ZG-XL&m?+}NAp46Y*L&Epr`<{xqQC6&eVn&?F#BG5)*0GC+xq%l zrPkXYIWiHmqvA~d5Kta^z7RF&YC&)8GZ9qg;;bZo1GrK-+c1XpL5eUfj#Wrvg)9_8ZOE)cP+HP^qs&5&!T_){P!-qeKmr2cAeLT(}g`IEj zd6bYb|LkRk;l~9KpP0<49>I_LR~xZ7JDJ2Ic?J0O-g5nF+dsy7$Tp|hG)yfdxzs;~ zm80CbLyHl_R2 zdlPU~Yex6=X8y1{{u@2VGftt!a;pKJqcDsV;PYmH(Nqa0NWvO6(NG~)GqEXBXg zTP6awQ%1vbbZit&@l)1_`Y4hDgs12`L7AStX5ZFg5a+MEVhh^Pzt&~$RCFnp*Qvum znBh~va})%|nSZL*slP*f^EembS_HSsAlJzLKsrH8(TbRB!j~j2^3xQqdI31JJVgi* zyg;FwYnWN+Xz0@{A8AnV+(ILV@z;9je`~QdEV|aBD)p;OFH$2YoLZm>P_qRwVLeHIv0u`uwtfQ;v2PCs!H$C zJQYCO8Nu*B7YsuumHA$}Z9Xa9cWb+^m{{siVTFJp&0^;IN6_Yw_xdJE)Ek>Mt;yu7 z`(Y=oSOV{p1wzfQa|}WaeTFL>xm;DL<-wfZmE3hy{&Z{kS+shJEY#162WN#Ys{BS4 zIhx_J8iq=!93IGcbS;Wd15EcVhyhIDBvjP2K(m4(O7HgrGnPv<|sMdF9jMGRFYqJX z=AwzFbNhCqX4QS;TXMVF>b0v4R*4hi6f_BOD1f4wJsx5UPJL8f8xtV)vyB}TtPc5G z`mSt8!g%<7=VS}+`hi*!;O%uV8RDPD+d8s+dLa@P3z~?J=SQhfykyl0s`dr`U%^ zhlZMd?u2Cs_Ba+@W<-3LvIrKVx!~GddqAnM)~Z` zE+Bub@%vg7TWLgK=L(LN|K0aW3UQuGd0v@jQE_17T}2l5Ss{l77?!!3_ZQy7I_g(H z$JvimWGILhP|?u@!lW=Nzr#GbRq5|TkyI57FVNJCs^&B&2PL38DUCm7Nu{XPM#|w) zVdiFa{4Qc6uXo0Px${L`Na}kwhdq$g@PG`4Jf}%c@|JVeDQ4EexOJOgt~8a_&bv*n zhwb;GZa#--jKc*4j{rPCU5s(+2-wkoi#_>oQOGius!=G|}Kc>&%JIt4d#h6}igOR`z31Hrbk(GqzdxL8x+%GX!e7VlcZL^}?c#fv|aDNRWtGV85(7fVlU@UpO zyk!32$@TGa@ER2@Y1(eem1inlCH@aYVjvJ;T3Wt>gGV9uAjQQ-<%7WqrV|s(rZ}p9 z_T>^A=l&#aul2^nY<6%i7RBrOBs;g=z<60%<97z7TEC+NynqrU={J@~$$i=gAT@t$ zWbkRuil`iAuRO{+Y2TwVQLy8Nvtx$CPKj0>c;L!RM>jbsZTB1jCiD0=JiazT$t~KaxVYUj+x^cOmNBbHX+#AcH*ldqmh+yDu+(y~Mpr1_GWSQN*pk?2$r z#fnGLo5kTo)}Z-_W1xEp0-I9D>2=sO&#RR=olWIr=&(eLs(d`Co3V6D5+4ed zl9DRzJ0}gEGi>SbQC}}Kw6*`5meVnHlDJNkyH=`INzA~<>9jRZ*s##Wh+Y&X_>bPb zFbc^@io|~Q)P4!)GkP8e6aBo6i%T@FpKxC296ED~! z6VNyq@Oh`4i)Y(Z{r!V+E{=2L1v~tg^CTpnL8%3|lcP$8fKB;y3ii5}`d!_nLv8Ny zZ@<2J!sFr?agO>PD>(ykG;M=dHpz3`Y;lJ__*6U_DqGJdn`Z5bekw-fYx)Cf+l#RQ zqJFP=Lm6QBb*L@=n9B9Y)KTIyM{!o@ZET4Gw+~qC;w9`I;uTW{t zOgo&lMQ}tJLX5d7aB%@j);w2LCxma|zq>603X_6yF z`C{rpWl8?Nbm=sxLi~T|`o`eSo}k^>8{4++WRs0;Yh&BCZQHhO+jg>Xelc(U@2&UK zt=ly-=bWyo`7l$}r>AH7c|>C(#}p)^fq8$MyI_2p7}bUR8x&L87+KDk=m>2(d+ z2qG{j)JxJ($|3bcVB9rEw*~i3S^G;bd4(^x6Z@kh3eP;V>^nF7{X0FZI32|zgM5_q zLJ;0P&xU#K4}Pwo0~AyBBfPVHYhSrRu^u77dGdv&BbbD+iJlQZIX3NHdVn%l0@Kh4 zQ}kd^Fu}8$dAsD`c=TQ`hw&T#Vf;7I$7%?KA2H`+%*8-@Y!ypLRpg@jbA*JYlMzy=wL| z{_@kAU`CCHgk!EcIUaj#A820X2lbWbtM4{&snY5CbJM1faUR>zRzFQTV)oyLuROzL z=+Vs-*b`y7^#iu*QEdO4h`3OCz;Z}Bsd`ZhlA~?Puir%ng>rre62Pjf8>TaVoHQG< zFqV>oe`}Y3$2&+8l}ibjB>$P9{z_9qr>cY?O%e@oH(+BB!7BhbA$`L&6E^-0|4O@a zpGb{VADc1f?YnO5s(X1|O_&{8g*vbk$zZUNK--pk_;OQtH}Y@13V0gFnJ-{Eh;cfd zblk&78TEb%(;Lxk&MJJ9xD}%{=to^|jW102LGv=4d zYuu!rf|pRemAy^i_V#e#;SRtK`EJfVa_#fBP0K~^CRSE{W4)wkM;Dc$-JD5c@P+0| zsUaF&U~@Q7XHnt>%9IYmVLpac>>z?^$V8sSw0ZE4X-Pi`0&XHP5&y>x%@4u|^6&RK z$+_Rxg->cKLD)dp7}dG*fOG$mb7mC{1p8d#{DVLQMd1g!FWm|kM9tVu) z9qdd@;1I*K;*@Lj2IJ{K3|OOHfFioF23scpK_23 zS>!7r+9s`DtCN0@g1CtB@z5ao#1t1KdC=#I$7HcPXhg>9$5lpxy@+-FM0bSC4v z(r{g?{@toBYMazvr==#~c<>FJ-kU9Xu-9TikLgPEAzvN7GQP%WOuLPQ>^Dhoby;{d z%9>t6R$Ql5*(pv6IKgHBopO^vp;xOvRC&|+1?6P5ML1w|qinL~S!3zYU}=6C1N$|B zT8CXl?s$%Rci+j`g6cvkJ-F?981BZRPG>%>Sh@O&>-N4)XXm^1NL&>1`u8`F36gKB zW1S)PlFQU!R-f&()1gqHC6#&44cIV<8@YyCp%?qu4Vj~B%pa*e|WFvdOzH2 z-nh|krr1S^O|)Xw-05X}_8}|$MtqIwy(9a15j!6n^k?g~b1hE~EBeX0YQs@bj_tzx z*F7H#@_k!irpBu@*iJgMVrSPSqY|$cd9Kh7(o=QUIj4UUL0NHuET3Iu4Q2%Yj<~oc z-LYp{&V`lYQWA|6Ye^Squ-O4wS(3d*cl7=%ZEGSH+DnSLRaQD4{ZmQk>n!7RYTnTG zjz9lF9TFe^R}Sm1Gk} zjfM7@MU}YAm%w!3^S9Sm#{W?I54E;jr!7?N+V5-WeHd&;Qayy>`bX#%b0aeBWBT|g+- z^ThJ1@uN&^))QLI8PA6vi5-JZj#fAi$g!XnQRV_?=lTwh#-Cj?!iMXS=N_>`l5ihC z?UOp~oBZDrdD=$<$o0~lPC_%^E8CjCot(z+(OudEiF^wI#j)K zROwP8Z^WwJJ{;pp*_fI}*Z-ms5Uw53uq!%n#{sJ6gX`}$4TI!k+f}5YuwQMF2T&k$ ziy1zRxAYW$Jxz>DRFhn5aZ`YMGN?PeJ#?_W)%xnNCi$v5JiJG+~#P+;V) z1wlZhVoNR}{I&OZF(nI`Y%SeAQ-?7$>e;aAGxd)7rPmxo&0%%?>Ne7>%>zrEIL1;w zGB!5xqONhM@B_QRCEjLAQrJipdL*0=MsRUA6-IuJc-;SS!r|oP6l+81PM|Nn3hb;w zm?OnTZjAMZliG;|@^6$IqSXhgPqT{m)nZzAc)JL*98c_qjRkEYtp|HPtnjW72Gv=}5YG(m`%f z170S(ltA3_u=J0tBKBxz6fI=<=rJ{YC=r=LVd27C3%FqHePb$y);$c(7H^Nmr>o48 z)=`Y+Urltxww_VOAAdJuo+a=Zle3sNdClskfH zRd}Y7W4C;w{&LleU{XH4TWz~q5iBk*Z8j%ll-&D79tpNJaKj;#i(?vns*LD1PWeTX z=d4Wcf(zbsJ<2h1C)Jf;`dT6ZMk>?@cCb59NsiJKr||ojH!ax%n`USXrTmx5d%=jP zl$__Yom5_lx0QLrDmsg(aC}F-N&U#_g0gBm9$WJM(}`W6?gE?0Cb}3Oo2+eLsniIQ zkQaXKWFweBRNO1*A95BgBV%2EAx&f~rmP@yTHd#U063YyQYB8&7N`u8E`wT8IZ#)#zcV-@b+OW5KQIqdlMUf6RPm4zF(l2wzCc)B4ZVe> z!_h*=)5K+I$IjSbOab>m)(Na;BeE_F>FU>gp&p4J6^YCC+^bHyT@RRlOh=k~af`3= zhmcZI(f1#O6MtJ}*jfyq61z2gS5$M=4^~6&i8}q)>DH?PLsV6(2i`bLBX8$V+oj)) zz-(zCYEPr?Z;{s)eUyX0YfY-@7u{TjyQem1h+Y`qeDkkw2d|>v@q8j@tt3DGMBdrD zqLx8L3(p1{3w!8WHUg}XbfnhAh@?a0LjKEBL zBJZcUoZBVCsB$7qr5((3^%e*H^RX*9^C4C62~N;%OyiN73g~knQzt)`wA7hzaK7*w zE(YCyGnS?D07ZX|CFU;WcLVowo8PK1U8ud@2v;-{Qh?ZfFE?}TVJVg@RDc59Ac~uA z9Wo?^`ZDc@uv8bhM9w$3dg)>GKo-j5@p0CblTB}cfk3cqkN4hl^!OfD7_)fW2m@~-DkZtUtJ?dfht7zOL&tVJh&U))5kezH^v)+i~qfXbX z-vatx_Pq8^y6+0FQAojyC&H$8(g4U!RKtb_`$|DqqsDJv0APmJg4R)y?Z(K9B|xK$ zUf{xN`gSUm3ZuBqfuS(`bcAQSy=yUTlIJr*>0WzB<7XmIZ87w--S~QAOy&w3@I3}0 z7lp52&+~`gXG!wBN#Pbq1iv8S-?%((QTGtuXY^F?ei)mB)T(#&U>RyEW~?-M>dhC2 zHe{}@qWPj9tRw5IP+u&mPHRW!Z60K5DhqJGG|zBnWcdzWI7LK%4_9FaBR9TZNZ15| z8P?`gFm&~KYgk&m zj74>rLcaQGG9*~28V4M1j6ON5y7PWD1m#oAZ)ca`d4A|TG)G4>RaZ}(+OG_C7XJF| z8HSsIWXjo+b6Sf1tx(j(RRTB!4<3b@58Gl&dJPb&sI)iJmS9d01dWj=MTZe<1w{xZ zNsGsM8xbv$70}tNAH$_V0Qc&*V?>2{spPAlfUq=GGWG3dm}3ny!S4R8=A0~b^8-pq z=h4~OGlCK(66mZM&5mtxP3rks>1K^Tc~+S)gcSKGu2v9=-uvQU`HeOjo5=RcuA3Md-qV3j&Y5{y zaf$nnt$bit31f>I0w;MP%Zud1paBLPovq0gE0ZRG-#OVQU!I`z{<^ulH-V|WB_pwVX5{Pg zD0K5lqF(V&E+48DRN*VEq`8$&XL!j+S2fgX<&_a#NL}twvm8^wfubjz$78srD?YT$ zuuj_i>=p!t;_}kl<4$B03J#3Rmm(Tgscj)f2rp{hl@nYm^3n;M_m?)fkt|sTW)FVI z&VDz4x&A4zI_q**0Q}mx$cxdo@AOjDCL|_6eSK{@0W@)N?ROLB3`LN z_Hq8RJxZqZ0&F=50$0MPGjbTm2Bm4K2hF_%gX&SKCwdpLKP}bq?e8BeX7HaCtDQ6i z8RzrJ37bbdFfw|phA@(6Z9UhpVM5Qt0WY=Xd?P+_DL4%xIsTcFA&KXDRkdQ=My zF6IiI_vPdB#)~=28G`Nyh7J1RS`Rh+M+>&9E{~uKC>|`F_<7X=y=vr5Y95BvKu;=KY7mys1HO<6RUa%M(p_61 z%Iln^6G-Xf3EbBy%?O42$=$--Al#gPRB1OgDK<_K78-IG3mrr_hi?q~QIA zlrEJu%mI6hycyX#lbNcdM|yFky5G)xLGgO#dCLa+J?R>T!{}WZ#N~3AR;MIiKpKg` zDF5JuPf6b^c=80yU9Ula8JM@%oo5D%_2!NJj1?HTe#yz2{B28)k|fR%TT0(|946ku7hSezA_FxT^}D;nj3u z_A-@N+qPOJREF8NFEQWFF7q_H4IyE@X%u zdrDCF;QbpYxDx$>l~sI~q6!|d%*|2miX_G@40U7x*R2T?rluO%XZ%-r$~z0=S};=- ziDwYDA5ov-zN|ln6Fa_OvymT3zYsY#QX-N#MXQ_UpsZ=z@*?8`9-K=o>y}Wb;I}Lg zt4HSNmq{(tam26YAqYeI^b_O9SmsaAU*(Fo8tnmTpu{Yf%Ljjl*vElkfNs)22Y@h1 zK0EDl>4cfoB0!0sZ_<53Uy53VN_0^sDYfQqSZJD2=D|f00)XzBBh(SX_jy=D7|;*5 zvPvjRZxbI_dHJzR65{}IBA0CykRp}Hk2xL3EZtHrQA@Vi;k0;FvNcxW)zU9j{v{Qn z5}C+yB~6NR>+N11cY7+9TG*?XT0K`eCoLgzHCH=JdMFas|EzTD%cjvpx8D`~7V@j^ zz#T>~0{4fOnvcg`{<LC!xJS8=+p5siAPDHi&C)={&gRN!-xPN=YtkA?nSG} z09P{9Gp{r-Yw)$PGB=d_7>y?Eq#rxyy!}x@ebf#HIlKPcro6P!YAR@VRH9Z~>@D}a zqQKe6_=){^?PiRmG^_oh&(YmEhdUV0JM2{0^4CSx8wfh^sC!_{aKNj|&RnYm`)7-D z7>(gJID~UJwyQb`@puvA_f|g!Z@5EQ{gW7Y{c9VVTpb>)*+-n`tyNm zf^&QP%(UUFaQe}uFKQ+WephA2#*Q&Wu5VQ@P7!5BGkn>&)q@VSeZ5Aht*j@PKJ`KGsBYWn{c;qqa0O{^H>>ahJc2=a2;^-DyWiDIy* z5jGog2xi|N0(KAY4p;dusju(XqFv->aL#~Z!%5C$%Ha@H8Jzm1?*>%#JC~5PB&x<5 zgmFPu5^Z8$I?Z>4H;6t7H$>7yg1y~6 zRP3z0HNOY%)P+Syq>4fKg+qun^ATD4x%tq87`8UO_5=@qFtX~t`+&a*P)fC?xieWlZ+29`=S-PpSz0LAE^D z?qX<%-Jlxd(sbAW*I3kvW5=T`o}|n#F>6>UkL8)DW=|cyY|)s+v+r(Xr>d#8k!{Oj zW3O#)TKd<#%&8M@QkL$Ba{Zj!cPMF8? zNXdYJSwPcu?PG84XxVC+DvHY1sl8o2?UzU~x}}dg|@&us~%Me=XX({Lj`2K(lsmD|OlWf~nX+`Su9wN*(YF?N+rk-9lPszH&hh;vZ0`;A_~^!vDw$K%`<VlFE94j(ZW2fW9v|X5Ri`&>+?n(|Dfr>4VC0@V zESgMX+HbmhWudi9izBbr(&e1jq?bf<RnF-T=beDCpCuT@#mO5Pr9R=-#x7=+H_l?=astDYFk@K9Q}0Js5mU{vEM;KnqDdKpEb!;6@M5t6)u#RqY+ zq@;|OUA}=EsJS-eZy@|Tm=Apdup9dkde7q(GU3EjF%bOJPUHFI1MR-!%UhLB7m%pg z@WKJ?sBe$)iGB1qb&AVj&gxVvz$E5YY(7SvOFh9e(%79eDfD;|OA~1tKC)FDYth{w zuPWvS^DBq>vvL`2U~jx!Ta=n$6peBE*-?a3is0+l8w1o@#51ZO!ZXIp7}}g!_0K`c zub2~p?A3!^e&IDF!oQZ8_&2jLlWI>7{Lr7(L!VszYN8HxUqi3aZqWpBHUq9YLMWwe z!K8wO4TP)fcV(m_`k2EUl79>$T$T;cHuh@}ATG9J)Fq5{QhTi}#BpmXY#lp5_W%8} z>pQlIRrOhL?m7W|z6sEnzS0|*LRofRR!K2>TkZVUagCp0ff7nncxAKG+gRNL<8Bet zdGsaV|Nab{eY(+$QET&v#c4rtUjTy)o^u>{wigWdOg|2)mKhaoKF2X2!#XCs4H9kkDWDij*A<9e)m~Fn!hAVAu_y|gtj!K9K zn>@zE^PPU04b(RBfj2aK6S$w8MlLZ{8elp{OnJOi=QT%E51~Vmr~=>89BpGmTXokp z>JkeRHx&};GvC;?+Y?;;8f-`Wd=r0EQBxskuSb+VWff4&d5uR0I2C&fY2A{<#e$567GzL?8q;RJ~Jm`)AbPT?KOv- z7W{??w((p1k!@9(AphMUvrUMTS_gW323(^iwU`P8rJ!Nh{TDz6!oE+amG-gNvC$T5l z%K-luSHldxsLN4)BqGX>j#c7^5r|(mBzI1NuyL1C4kX=nzsd=bp+N-`UA^ZL^Jucf zJ6%F=g+aV?W9XtJes`IzZqO%<|oZUWGB;|;C3R!0Hx{E(u^rN zpjr`dpLL@?YVioCD;})Gk>;Ty`vkjb7)kDIM$cuVKv0Ql6Lv0wla_=8#*idZl&Ew! zel=9msON^)ZdDn00bj?T4Ao_hKt4(40*qpu+cp>KOZIi%-;reIz_8LFXemM81SS;Y zX``25E)?StvW8useYcB*EeE%Y!Ef>|?};? z;o;CdNY8hj7A%QPnd>;aLl>P|9Y_cjN(QEX0VH;?1GKqi)ou3R>6Dze^W92waAa6$ zCX{u;5-tyLx}h-GNTm(J@%YKJquuz22_bNhY%udR0$Ib74H)`#Lne>#9!efrV@>sr zbsE#m?~=Mo%RF1k;4T%d^W6)s=ygi-rBqkSwv+!Uz(1bBuaq0PT&T|-v3Qnq=jDna z?@}&RH_MHD*fUde(HP|<5F8Sbc=d!{USs!quM{>*TPlu5HFh}3?cSARR3cpbkLbrR zPwWUr>yLWWty8)4{~fBkm=X*rvZbynk|Am>s@AoVIFsN8BXPbzKyCZ3g7(N7+{Te7R=Lc_*b*?V>e5`QcrlIY~DLuh+NG zS&eQaSXU7T&Ldi(og)e~L0c+VxtYlkp{xjOKB<)4(6dw4QG9gPf#1*s&Luj&>B=PR z;T{vOquXfT9*Ra`VxYI6i6fXavCQG^qT|mEgf=JZy4xcI1O zTysR7uEaJgBv|9T(Vc!SNN>t6Fc=X{3uM-4o0S+jyXoMku^R9n-yDI68h5t_L$ax; zmzbfqz|x8`Krnfrk|X$2N$Z1zGjL1Q-*16OSox2s%ID3U##GidwbIOT7Kbhs>_R_= zm3Q$mcxj-R-airUW;aqF-ZSm2y$KYsOR1qNWQOnXGT<&YF?GAgT}(IfvN%U^#N3Un z#EKR}XlNEIO6Ag5DEfi{-W19=v89llGtYc`#jrt4`E?vy%6g`qr$64-+O}! zv2B69JJY=Gi5{)b6JgIbdq>-v=Wx9Uy)&hKTY5e{_p8VyAFhu7>_X__i@VjDD_iU| zN&_IGv72s?OS~QNcnV{4Gh3>|Pb@a!#ER9hV(97Wh%J^r!kTZ?V9j@v$iMLykD%sz z8R+wTQRHVukV;8A+K9gt%62lu4VH0~YE=LR_0j=ZY3vCj9in9z$OAwn$b$oEb)sp= zKYM4`%^<9O#eD~ZTsy&+HoI)QZo5RgNW0G4-XI1TS7<#ne6$`KUIx`1)N4c^+K-^T z5j;0A*Qo{E{5kf<%0L~OAG*^TA{T3uRel#dqg5goJi}F$OYZ&}jJgZfP8)+&yh{)0 zPTiRr&-F)am+llDTccI2wMSJKzJVG&qt(u}M}C)2?oR%h8o7Yo!A3WiB2h%a8{{0a zC+fzn`^jp*t%$K|?shiH){vu>pHwxnR`R12@Ty?{AwO+asalLf?+&T2%x#AYlXhou zn{Db=n^Y~=v1$^B3rB|wfWrk<`y;MpbIWpo=mE$8Vvz74e~>dWWjb8FphwiQJqd_3 z!ec=ZF|s03TtOnTy&5l;>rz7{zl6`_0)?3dFJ0%#@FS7KFauxby$iB0LX?3sK{(h6DnQepD88zElovSwWYw=(=#;S$$6<)=*t^3~=`_^DPJ%qkp{_LV- zo^{LQT4rx?^)%hpa*Crnwq?Z8Ej7$<;J$1w{ob#R(V4xrQuK@u9ntilE z(L>;U1cC-V`%k^{_e$;mK6holKEA<0!oc)?zM=o?`=7XF8R{G9>l=JLr}+C*9qi`% z`lfMX`Vo)$ae)C9UqcfAPoL&hDRE^5Ovu{{F>os72=#JvlVc;<@?z*1V-~iwULKqki!Qb+0fpap5O=ZD!7HSt`#&Z=ik#=vsTi@n0+P2*AHfNYyR(`69}Hj~%H_NXEF|-s5Ri zW|gk5w7^f!JKHYkmAKVF53aop|yXKN93 zO`?qzIrNV?UZb`xW7}?!v9>%zggd z+3YSl_Rwv;)6Bg}Txt!hq0(7p^P(p&zX3}geW0CWt-X12n5oYJ6dqg~N2k9`0Td;^ z@i`|t6h=BiylG9cA3>!VY&PkSB1*-CrM0Il&K1$c*`J`-`_EBExXI(kt8tf9jMlvC&157Oq zqnfVaZp?8nNWFLeVDV&Xqy}3R_Gjc0o-SkoUnO|Bj;xpXByI zt8w>6LShpA^Cv1SEG+J)hJ`_ehlSnb#-9G56Xk#HK9Efm_Z2U$ZLC#)&mG3u^XZxQ zW`vIp;T)76#`d;&b%ziVd8I0(KBhjSIG}hSr_{vNaMo54Dv1AbxTF56xVAX^XH!6J zPkeWa{g`p3NlC>HEsZT3;Zyi-f-fR=u8>$(coA@se<5~e<@5saD0&@zf^#w6aI`l) zl=+moNcEAFZcB%W8WNQMS1<~~aO9<4+zN&r2UZ_#&7ukvr!S;_aSiQ~{E3#03oTbt zV!1SA5qTlX{Hy7>De+`4stf{1JCP@u&uUNeQV@qB&I?`=`ViwAkr~f7OlfRuRFaaGU%a>7&>5kw z%ksbWKMaBd%dWp6z1Y)i0#L3?p(|26?i&45#90<*9OAh4^_+Tf7t0@RYqRu59k|+a z0Z{HsyrTGN6w{ScEJqsmSwO!ifnNOJ=0M2%Ae?~k38AEcSo(zgnKzGdpK6mQ9_n8ICkMFO4B7~?CbmMh?|t%>TXY6KT1IiH(09R{DJX- z^?~96@qL?p`+aa0|MaOL7SnX8DJaKW_kjpwvIeUv_>@pIqv(46dUW;5D!P`4YQy*S z!b_?rT+j6G{vAWHo8c|X&eH>ZPWBm6)0D=ca69NNZTe7}1B*7jHp?yRYoh0<(nGrY z6?dL4k{x%X0w|pz8e)w3$dpL-h$q2cWsuW(c$Z+^dx)ISGXYZiMQw#^O1QN6G+)YH zeXRhV*PUJY-e_n>kN`hweW(%eTyQ-w%K+>C{JkpD9x#;z-2)ovWN9HRd3G>CUU9f& z;bwWzBfpM3A2ZAhKa)HWGrSG|m^?=!p)mA=AR{83F?1oJWO<4xDA(Z80a%3qC=%kZ zGU5;@RH{GA1>6daW@=8f9h5SZR}6Uc*_7ExheLmuWV8uSB8~r6uTi!{Y!1=a>m`s% zBqNeJL2H3XC?_x^u#LAI$vcoYi)Sy`l*?f!$V!?POD%*ssd;FV(=4XQBuyvNrCKLl zC5sp{F&EK_m?SrkYcuy+Dd!N%r5qlky4PfpV2ralbZ=FOtL~wpil{SHr>Rv@vZK96 zpcr={7fY)pB*_}LE|kJiQ6x7~XHOJ0ls_%8I{RnTrj&~$#leUyC8Oij0d@95MS?uvpaC9!y0zPSE){>p85b5QxC(x))3j zTF;;4PqzE0eTsTHC%KV}Ip>|XptmCrFTP^_)*KCnvesoWX^_MLW$Ox->hu= zn$ar|7fsG99Y#8qimWQ^=b8U9GBP(ZJ2JgwCa2p>)Eax;o813tys<1aW|=BzMOF8k zG&-;Gw&Yn~%Dv^XJmJ2dIc@>IPAOv4L&puAt+=1?DCp|x!s*88-cq_#$`;SI9Pl{{ ztMx0(R2DV2J9b8_aBOfq{oV83nbu`3TAONn!E~{8@m<7uOmv5P61p|dq|aGhMRiH^ zT)SdiusX?i2zS9*5lLa^MiA2cN?DH04rF2O8HSufaWKx_fqlcMNSSFz&g#40odW{}5Em z?VeuXTxDKIKAk>ky$d|+eN4abTuMDRzsEq{f@T4q_AT|j^_Ayd);_#{Ox}p!@ZOZ{ zT!D%EPY7D~;nsm~fY$giFydk&c)?6Tra&q|+yx%}?$5KD<2}Q@#MXu<2cHbYxw*Ts z+>Pzs#VPo89y$^_^Hu(3>XXJui)TZygZ(=uPV9wbCH_{N^$(15h^&A-|Cs=sHz)RT ztj|~4hr;*#_Z?vr!o|Six5-E5o!;FyPyp?E!+*Xds4~zb5HXNB5XAp@nm|A>|GAqW zSU{jaz;0RD$}+Y%BA7m_E?FV0@n%JY6ULe?G<_Ew~)aBoL}Tg~GW{7%oqBK&Oozm)o5AAq~z+~Rji zdO>?+Tnk!3bPE^?oI_DACMlNI@9Q%-N34l4w$lh4?$h+6wNcPaTt-C-^6fA)^ z<**4LaQna9`+jNKc%j%Ou3+y%Ze@h0gMRVtu~)Ny!MNr0Sk!=dMXF}*f_DiHo=o~- zm=`YpdST}?`vsyK@#mt^FZFzWYZbUQF7v&4gmWnH>31Je-LJOaH!ZzzZ4%F$0uWmT zVpqE^5qIw8i}5Hp+1G~kt=$YR5e~SKbcSF&Rg}pp$LW%PZ}?dM1+OvOAE+0jkW5Cm zEuQn<;eX%j-guQrRz_;qxsLEx-&W75itkINmr19T7GUZ==?V`jYn4hSLuE%wpPU}N z7A>*QgN9b9DvOvWNK#Pm&`Tejliqvj2_eBbhQ|gj0*iP2iGO%O^&U z22U3$ypFZsoxFLdsMr$(GuNOXL|BdFzo)nI;`!g%#PirI!s(Dn9mncF2p*&JQncG1 ziArW&J`VM(A3NLC!mFbwiFDX=plj0}H6WWR;;eH>DsMy+PF=6W$Jr=>;k(HHt8c7d zCa0fp`qXcI_K00x$4r)1D0d*77yOz`&?LEnY63{CK;FQrpc|49Jw^S-9!r+u$Sk`I zc8GDgt&cg+lKkQS3d4oo$b)x2XPU#eUgTFLP$8y=*dY!Sn=(@0p{jzpz}kT(E^QAD zQ+)t2BT#Yph^dz|2kHFt0r8jTgSUMuy}PW_wZnyO7Uyg9VHbx8qR6@zzi%+RTBYPm8V zu+5caleqG8fo14Sm1V-@Wr$M(cybyGn`_;8O6kus_KgF$fC5ETrErtcaY?JbI?3?c zl80=Y>GWoP!oek9mh2obHx2)|%JB{j+Q)kBtT@J02XDLDl|V)|36#4;BTPU?(bv((i7TjUJ>lmqd!Dicqp$u z*8OKxQ5e>M7SJoouYu|J#+RFg+31_SUeVk;DhJDLr7edL^>KjpIsWKtM>Bi~mYXpo zZlQYccg!6x+>L0y(u~*pehh;fU0Vp8-r_WpkHtm!jK=q>^TT&2z1!Qiz%|q`%zv^8WXaAQN(ZFTo1``wsAyXC$!m(#Qu)j*|0{!P*A&}5 zjj}agUD?~~?w~xLr6fep&$i($RX~ezxfvSS#zZ3QG!B$9DCWq`i$*qk2CTs)3ofkD z+-9B|{w0fW84}q|B+~`iX6)1X+2$A<$t(HJ=6ssD?0LV;NjB2h^JUCItVM~K$BGz5 zC?%~?3pvTm95)IM##LEUKyi!gn{esmXyziCkgKID=PH)OEf*>AWjPjPjONWcB2)|G zo4DEL89S0~3Nz2qJ<{;=JE(65b;D^=Z2l2 zvt=k8NI2|SvSlG1VRa-!<>O?JRq^{t+)-msn0Ql0?;N^#Yx`*KCA*kw2P*F^y0~LY z-_L<3?y#Hu-rQs|W-aeFy5Nu9b^e_`x_R&TaU4av8SVse97A_ug|6u*4&v@pw%SJ> zX6;S8!|o)s9#p&I?4%@G*+*EroO#JE3M2=hyAjJBxR|^ppx+zi2#H|tm3QG?jW4zZ zww~g-anJ6hZ;kWZ)4pZ99<1|*x}NB`sqI9&p7NcsQQL>0&;QMlGQK{rnYrh^-q)SE z^V03}-kNo%zZKd^wM9eL)Y^Q@ z7tNlS+EV%+|C(c*SMr{pP`=hHdr$Vw9?vU1cg{^o@44Q;6VDgjp4gjG2Fgi<+rS4- zzHx-R!3R`fVuB z0L}+_@31X^=Yzg?^acR=LF7F~a5wq^J$)#5?*0{}n}`3ErF+2eBJ~v^yH|V8{T0%C zeDmV@z?u`@r-$?t8cUf$&X3N-MNGH(79o_AeR2LnTB) zFo0PbyI3NlSVYq}C@M*TS#xAe9ZO7a+@XPYn~Y{Gq9LJ5I(LHF30p_jbOFl=Kbx#_ z!lI!gDwWMhMnh^P#o0(lLvAJYmyz(gID3kWky1Kgri3^g6B#X1xMb!KDLwSMc1s+c zG0nPWOT0B1RZ7tiy4V7<0=Yz|bP5%-N=u^o0@3CqJhP6;nDluPo^i@0X`^w>C2i+e zO+)4-y{EY7iDasilA#Ux-V2@Uk|zZ%3+#_5-2o|F6y zwU-p{lfVvnxTKJiL=I)R$`R1Otc7ms9cmkE<;!dH^*I~Bb*BT3cs{U*MAFNB26Cq1$nod%6L`dOwg+6E6`K%Kf?5lps zoYI&n2CH#IyO&%^rS`q@B6c+%q4kI&Ci1}@DPD3)#`fC7&z&FHSre+&RUKQF ztJEs>Y@;QKeQqjE(_-C*iz4pK@jux{`_d*2HJQ^o&7^G3&KQ#EH8oZ}sAty`$_KxU zwtw&yYL=4lw8fI;!6#u%6mF|zf1N0>!7j_@dr&F!1Y4ADQHM^M6||S0$V$nn<@X8{ zLT7qFv}<{Mh+Jc79x;7cQ%YGk=9ryjXHv00ue3lo^f+cU_=W4k8yq{S`)6s%j!z?; zoFL$z#$m;PH?8sRo_>SmlF-3=M66r&FeKj0ID@}w9}DJYuB7ni>I&-(FAM6{o<4f) zwC-fh($c~uMFa7zuqhn#fxcrno03CGxO*BiqSr<(?GGg znc`Kmb91lLB&+;zGD8-@PH$DdJmDZrMWV5jy+d%|ZDxeNM@0 z>G`91kMpr|jDF}Y!x8aulgm=}4!o01XssiUS9YWu?$N~0jB+{0N&U>|(9?rG@nuV& z)78~~#W#<|iP+=?naNOG09$szJWa!TEq$YJ>YLq)dG6d+U7p9fI@_7ZwSY0(Y)!V+ zfX8{zg6YRJ%eqvrqN+w`dw;3;=leCGF&e`3hL-II8)AZ zmM_k^1T)mAB88vP$H)j01+wYKq>q~hMDCBy2;Srm&sfcb3=$v!@&)xmvKQ<3|9Sa! zi?hekXY02M)C=qb`T_Cs|0rZ`Gxs?9Z3A|}dZB$#KFDAGABFYpnkdcD#=DyHim#~E z3*vAN*a=F`Wi?4jSt~-1C{pAvJ;g9Ke`g$iGDInuhQsw?en>Bcp%lR?=qieSoZ#%D z(%3ChgIQo(xQm!l1W)*v#k$m|OTtizDlPTax||Si*eAL*8PGyq-nAe2#X{X2`9;cm zlwbk$xt|rdV86E}=_RVgPm^4C@YflhAYEPa5{po*l0~Knv>@*3+<0|G4MN{l(#xhm#(Z7VDX&Qg7uQ-yJs2E0l{E6QGCtN_m&yMSB+Q%dI{{;_g)3zkM6Gu z!jA;}NPs^9e=O|B2MkL;b4*L0vx*#o3_tL}!?3`Ug8dZYPk4wtSXj5fj}I#bKCBp) zhgP%ie}0qxf7-RQ*w%I-PPPSm~-RmB#R z%eSkxII>m%y?3w*zU5l*W9sTrKUMQ`OkHK%sc}QfGjy^R*SH~tawo^uS4+>;mu%?f zn$zyN3+V?~<*#7u|EwoBk$9XvkULJpKp2ESn@0lX5hXk<3W#`vBLxD80x7W|R7eO4 zI*=$yMI^AOI0Ppk$B4G^5x5ew@h?~3{~x#Nms*6dKoBR8D0Pi)E*Z#+qB$yg}Ubm_>`WW|&j9=PU=y)tK( zHV1O7t4N_#I92KntJF-Xa<57qc1zcsQn%Sq<;awIe)!@=>4&C7YTV_dbm{d=GFFl? zG;hBLao;JmPLMg>Bz`)2#?&}8j*K-s-!rCqS>Y@AYaAK>j6>tQx_G|o*Mbi}$CG*4 QGqXAKIiCL_%0K=90Nq5|6#xJL literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..d0d7ded90791221663847f6258089ab9bb76c23a GIT binary patch literal 35536 zcmV)AK*YayPew8T0RR910E*B65dZ)H0lLTl0E%q@0ssI200000000000000000000 z0000QfdU(f3>=d>24Db`W(YtDf^!iN3Wdceg!xSYHUcCAnotXkDgXo^1(9tBxDO12 zG+P&qkphLs?jAx-S-K4YtY*4Qh5qW)Fb`|s*PA7S(!zC?3bsuv-rY)+4@7Eb|NsC0 z|BhrKhyGucj>8^<4Q*dBtT?t9@k#dwZ?k~T7 zE0eW!J#GGPkR-US_(ZZ8@Nv7lz80iEi$n=z%6x16_7<^tB$p%>@dU|UClng> zXQ=npSH#ZOWM=jwiAZRev}uU22x(9XhXSL<>K?gt4p-Gr0L6!zI(xccV@* z`(lBlkTxVNQrq#i{1*T~CR~zeA2@w*-j+%-Zic{*`?Y;Ld&mh4G3(}`&@N+=?1hh5 z3d3w=m{l8^G?MW>X8zl~SJS2mP(ZhU;?#dt)BaNn;gDp@=TT-gmR4l1W%BlZX0T1_)aKs%D|xn%=+vNgv+-maT8g0xMSR zB*(HQW5o){j-1t(oU9}}bL==nz?L}~3d1s2L{Jm9yIr?M$5d|7Ra9@$t^fa~R;IY` z;iG!iYnsCAClTibi7$W07NoWy~!IQ-tV%BDK` z!&%CqTzSVr-fOF<1~N6Z#C^b|2&@INYt;GwKdo7km%Ft)suK6j2!q98WSIQqFDJ>C z(rA|BC&awWnydqd??Q9n`TmD4o6BLK8-a2^-5N@J^p#@!(2cc$=i^k(`u7NuhjpKe zO9fo(yccV=Y!ga1y~XO@FHHsmW2VFx#b&XVmFh-t40vHgM+tZGn*Xlf_SO6MR3-a# zwq`IFF+(AR6k=S0y4;; zgCtk)eVyO0$nNX;rMyOR35jt53pbS+OQT}R<}AoaS6@|zF~D_80<1-*wJ1ZAhbWTp zH=hROE6tR@nfjZHf_Y<>NAhOVFwA4ZG@Hk6PRLYqaf+p<$b-`GT!rdK)xtwYKz4o$DimV9hHYzK|AcDt74t8(BPL-wBI9gW8~-6J1T@(ZLCs=G>q$0 zGL+y@HkEZNN7N_je#T;GM2u+=hE_Uyl^aMC8#eyzr~mSqYxOD(W&iC`TEEH%2unN&l*F{}SP##`6R;MZhNt4ScnKbh8ElNB zu^E1g=U`RSq%0ZPS!`NSQ8yfGf?0|G?Cva-4hjY~J_)7e)*4@Cb{XS-Q+9TVOg(zd zCd_9Pro6lgs@a^M440Vl%l5bZe%Evov*akU!#?Lf;*<+4U!$Jxc;u&hzJ}>7*a~~?n}~R0Dh`fjJ5h20)S953czC1xEgekie+x!@P#A<)da$uY(91HV$3ZPX^oH! z7F?mTwdMN8>K6h8uTUKT|51%OkpdjzC|v;BVn*~C!5E9o(?YcEbEYA#@Ft$)1>V9n z-o`t4m+h5=x*i#sLE3Z<9{KAP;KTm!Md3q3H2sx4 zvJE?SZ@uJB-3X%s*KNJ!E`gXa5l>ZSEo0fnT)bfG%YI)rHP+yvt$+20?@f|+;{*C} ztT$Qun!mvhy(!WazJ;HAQ>DlJ0`Gd$q(A7NpN9Oxm#zE|$YF;uYsLpZBynn_4ddH` zY>$PJw+Z5~QyZ_zw#qD^*=B+= z&+d#QdaGV)q>9@)ZLX(U@SC}fOf`0FCsbU{!KB9T{Wbtj0JwZ?7C}lNy+RbdNfOAY zqhP!JfVPH3ih|k_my$#NM<>}SqJScEdTw8BHFI{{_z4pyO`dJ`)op~Y1r;B=@HY*X z2d_hK!{|axGE)Hn0Kj-@Ia^M$TOl5T@suRn=q+a@LRv2&0T>I8QkR!1M=J7iW;Hd2 z0S6W#W=O>~ESwArs$?0%3o=o8noSe-U%hh@lgb}<4`9iT&u2gIF26G3R zt#+r|>ksx1bS_bZ5I*NbDNThUu|z79E0ij=1^^+g13&%pyN=**qoe}~;EKS(7&2^x z6qW*1^ZA1eKs8kWNCPT62rfJNFc|Z4S&fU)D(h_x*K!@9rT>?|%b2Mm1@ez`MEv=$Jjc4jPj)5)T|jmpZr>1LZgy~PBxPW@a)9#Nj_ z43-pTXui7sAS3@Oj8c_qnNJNI&t?vc<}kl&BSYADjsSFr;C0WSqKgdb1(k*de;h)6 z`VAO_pezu8ua(z;Au|?`<`Rwlv%%cHmMpfwiI%R1qP?KOHEv_kalBM)Lb@Ee46DIo zG)Zq_4jfxrxu(#K&|7Y=0b8q4eVYUDFC}dV#cQY`-3pyLJO(Y>EONVqdR{kh+DlQc zG&cf%wT2_$U;=m?8NTc64*LWEoO0{}I1T2%KcUPGj&FVFTAU@(CF474xw2(tdW;8KI z7rj`~cG@FEzR)PN3lq_ygb$gSnsHmC$dPMbKEsak$AAZH7AXq&KBmezD^z5+jC_HQ zlMQW8eOhxiA2OZMS~hH1v6iMFCHVmUcCqK+lFQ=c5wjDRn)FkZ0p_!TAvUpvV~h>* zKKE0W%3#6}O1MOh@QA{Q8zDq>gach^4ty>J*J@y$iIiMa>Q?hQ4a8!zzvZQRFZ0_(* z(kVK3MJ^&wqt_<>@QE>szp3q6+$@z2>#%rK-1s=+WF1={VlV$-jolyjT>TtCmWVzT zKsPq8+j`Lr-Ok%{M=q@Wq5pP8w{#!S>CQKFvnUf;hWA#FU{fA5f=GePc+!=85f{vh5C5NpPX4$Iemjc% zem){h^YZ~ZPep37U+`QuvoMGg2fZkfqf4I(^00-@ z-rfGDYm0Mrm0(qvpv>1?lDp5K7qYQI$KZE7q^wd!EiX0=xB1-gs% z{_8VbX{f?erhj%J-ILfqXEL;MfBJ-(<*o)<{j-|T77I0{A}zTLml*0Bt^EEyyQao2ShnMOK7a@l%D9k9 zTejnReh|LaPJ^wjW5PslSyy2}UHhtv>BbqRndZ63fNN3F;^HMG%Su<2uc_KtKh&_P zakzO)%gCg8eUAL8nki7Xem1;ka<9GLxxh2%Ug=w1xwd*;&HCESOaR2FR5c$UCZlUQP0vkw-Q)* zJ_8;jF;AK!kB3n#N zy`Bl23KU}|NUBRF{c`FWr2WF5UOijt=jqC{?|ALeG&e6UG_!CoJ-VesKBOS6b0cbI zg6hDOG!iP-Z)yDIh5WB@a?&16We>(~;yA?wd}t^@)RraRH%{V0q6Qn+OWt`uuZq8j zSICWp%GV&xq_j{BCiZ!ZuYvq7hX($D-p@=(=s^bhu5_U-l=Q8j({Z(Q3MIf~Cu)%? zKr)#Sq`jH5CP1VN6Yxjy$xwi~Ec~=E&Ic@C8`M|e@W~I3`3P+J2UvFuzn7PQZ}wT! ze=CB12T3FlVmuNBRTF~4qfm={YD%Gao0BbwB~ldZX^F3A{YNn_ZY7(j%HfdpVj*id z8VXvcX&?&@BFpaSLE5Ym-!{!J@=zbE>ps-91=Nb^Rw)r!36q!4Ye=#D`5g(BRHuxg ziar)^Y<*A|;{9!1W(aNEitubVa!b=Mw^<{7-@K_D0l|Su= ztULT8U#hwExkTsE8u)WrHp1T!*9ir_{d3tlS`U8zYGaI`i*fcR!6pCd3B9{4w-?st zftbTo`;zU`K=j4i&pFjzDL1nrb1eSrjLA()%?tAuiQaIgZ}2}!sns*Q$0@EngFHZ< zC_r192JZ*JU;jV;8j{{EBUSVlu}(45nhD!3#-SwscU9!4wKwE&lNKaqv$5+n+FcUo z^h#wXRph`F_`EMQ$B&GQyE{=k?VOV~m>Y-n{Pa$`wV$}!GP1WPqNQ!+EI!Kd_04^E zOlrq?rez#u`e_pg!*!swf$xaCjD?lJ{o8{<@v>~b`lhh1J3T!C#-~$W>w^43P2-;S`p*A8 zLPf11>m2w4rn&`MYZ@xD!jc5cQD8x7??)Aw4muz<5QNoe)rI)=-^52q@S0dPAI7LK zX|63G`idv`oo(4UV7cxe3=`EpfX0w^;wnEBFM?OQVD8jsBLF^p97<^`MhtVP13|1o zcxZ}aPT>ThYLk`^6Z#Ra!uGVwM69}rR&9FXtRKa#!rt3plZ*fW9Sf?f{Ih4 z!$Bsq@fb3u86nmTDq6)Qi$Y)_iw5UR+sFlKRA4HTftx{P2_d+K4h}NW4WKgCkLjqr zQ!hKc_-rH^ZrlYG8!@=+$;0nM-+X{!JozbUT}&v~T2a)8FOLi?z~yoxAVGb5XW7Jk z!HW-tlmO2iF=Ot<$yR)4rfcX){LXR@N~R&we(x znVK8B`#_$616)Or!K`MlsA`ZRfKvpn2H9oaVBP>l(TkJEg5y`pksdZDf7&sxu`~=j z+vZOc-<^MUSn*!tJ?Ogw@eAl{Y0v$U%J^#W^ymw9O(>r3o3zGOs`%r>0mf=Ct_gDJ zOd2!7)#%|))>{RER#PBoZ_FjrudL7zE#9j$weUi!6{W25_N~gyoDRxyD(&)I zNtWgu_Z)2@o=B9z_iF#lr1EATVpcM3=+$Z8wLn(hVsI7TDatiw@3(n*r^Q0) zzpo8nfWGXUX1O`3-q@2W{Br`K@oakM9>q`J=}@2LQ2m%AS=mv37n83UUbnOTqSjr~ zLe%#{OlH|seq^NW_4jRQWLrjzvaEpxa(ms|Vk%X_IX=^#MS_DXa}7O*63oZ2eYeQaD#Tc?E!M9xWZN5>3UO z+74FeEzRIyYwFr8X}o}ODMcM8xIvmYmu1;E?5HWF3ZHFuehq7)DE~myp!OoaA!Fnn zDD{6)de4d5dPe0lk$O_1sz@L0QZD#9ActnvIaKHX3L)e&V>a`EeC`EXD8m+~+zJ)Q z2VG7HRMV#gRCLO5@la~TMA%@51|ElxGsZcd-ewTr)wKDtAWs1;9>xb+H$Es#6q=<;6@4N@dea6n)SM^xS!*~? zls|Gta#>gN+uVtP&-wL)dc_xbHLOjQj63D_t=ux)^1_yPqwqq}J8$icMVNwiK5+PC zJm_;eZTece;<4n?!CC+QqcOc}K7*p`kwf@KG$5+sedu~(=rET`ZNXO9#yVS6G81`~ zfF-KQE)2^mh5}ZniO25Mw@t%I;52%LnJD1!V}%duV8d zS6wTC`}8Q(C$w9--&r!yh)_Zy{X`kTmx620n1~Y%z1d46ns%a;lR+9!-8Qk8T{_46 z5^ee5Ip%+$0Egb5G}w)ZaCwnJa!T1E86Ok&uc1U4_N;A{&F%{DD91tA?wkt@3>5T+ zzJ}_01?Sy=abY;7az{UM+v48(lc`{bMd%6A>KA?{J{0H=;xp=Qu=zNuWSC!}PRuY|uCa+IS zEy+N&jT%^fI(=R5Sg}VHj=hAsCt{aPtF1-s#bj0J72^qf7$pn)(N&>Kdu+r$C0lNFJ2F&CQ2y=StH*BTjbCgRRpUv( zsvcqg_#5E&X>6X8^so0f8K#8D_(FpU2)0i3Yzd@(0e1l{P(D321zgkjG~>{K1cxvi z$*7vBr2P-|-0#8u@u?Wd+9*it9jH?fP_L}894n4*%FA7g`o2PuqOls^sa2~&Tnk06 ze1bTK_SH)!MyO(~5R(D}8Tw&ioPNmf%QqNALkBOKn8IDnzN|ij6)eCN&TkLTa-XpK zq@F=DeOJ$i9@&=mEaUT(0pvN&g#hK@HRE&aPU2O8)qOI^C#nvD7Sl1J72`zM!jPV1 zT3TRE&(LGaen#|CAg(Qq52ojtD5+lB0~5reKc>+vlChh5IjmNINvcSV8I_M(vhWcW z4E~s4?kz0CBG!6hnNDehBj8l^?XqfgF1`iKxmUV8zWBg0A)(J4D0lfPW}!kQSdXH; zE8fG9&gorH8`A^Y<8C#dU(|b|l^ZIdf$9{SbyG}_D!ll5%7?nx)81%^KwffB_iG?~ zm7>^~5+(4J(urc~NHazi$+t5Qm>9&DG7nMnBIO)T9r`}X;fs0#wU#@WJv3(`E3VXM zu$a9Yj@l^?HJD-cse?n_Zgw$uJA4K>r`?RMw+J3}MeL|Z^JwiE69f8UEH}<<+s4c0 z1{uz&On%3cRiH=iAvrnK-A^P;!#-yB$rz{ucR)|<#O=@$O6{z(81QJD8ZrRTTvVk!PRSTQ5 zBd!S*T|br#D7a?~DnYSjm&i1Ct4m}DOYz84$`nbIo>%rK)E+Ka_P)u;0KYmd4$PT6 zLOOiOawp&MB+c8EO1)9@v)F4(tF9|}U-Sv4v}@=1rJjt+ZpA!9#Y@nGX4FNrH#!3e z+WV$}KR(cGC{SE`5csBy-|wC>w5+1tBqc=#?%smhank;|0qK?GWsmfO<<|^p`+rDQ$3!+SP>SxA-k8 z%Cj4z=82bTC3AR6%xA2m40E9vAy`JuG~Zhr*`{rgIBQ7xEnJ39!`YgS1~X^J5#vLV zCvB!|>A^hPVd9S_!#&uj^B-F{ogbw@z@1xKj?EaZW%4H=x_6Aor@5YsV1V&#Mt*}OcL)&+rel%nBifDrg8xGvL4 z1yR-qPaY81WCD9;un{}al!IO^50M7mJKZXc8~vEyK0z1JM-0U{LU_{w_NdIA zp|rx zpw@*uYw%6@NmWLi8TO3?wdpgUT|#N>O@=EnyvhoL0U~i1N5h-qH-;)?8H2C@7KnF8 zOc5Ac7b9SU2;IWf6Z;CyKj>B!C8Y=r*f3+xY?U4_s_z~d^y&1XFJI_!Eob5m57g|z zh+>SJz=oT?CdP`ySg($a($8kIu^2&MLySyPPtC&vsd2}jK5;|I-M5is11%63Wdajf zzfZ_=1~;J6ROw0!0ksJlp~%Sh@CkVXmPSYJ0@xe%^WDYS4S^9~IMaac*^e;NRJAj= z4ipssp~?qw-O?_2Eb=c~mDD-hXz@T}3Ph&L;)LYAIw;zxXwzm^_4Jj~T4gkIra-{d zQqf&82dA-8QUHTZ!(yh2i&EqnkUIhPHFYJa$TlCH98J(d`B9KyzK=ciE#0BO1F040 ztoAR&$S0uFBM_L)KS^F#CPgE1k3UC&ep8Jak|EhAVJi0&d_y~h1(2Nosc=MlWi3G9 zn3mQfMZ^~;W=TC14`l{56J{aFqFxhj-q0_ahn&>8Ez;5fk5VcSA)@V{AHq5- zEL)p}hpj`cO)7TkfXtyC4L*mhRaLzDLhB0YiGJ0JF$?u}N}yk{Jf=`+MpQG-wW`5_ zCrxpKF?zp4$4P;HP+qx*)b>&SS+I7@a{aPAkeIL$V24yjX_ax`8#J4sprRTjLc3Tu4DTm1<;vzX3`W}Jk2V4$t+=Hzt|X3||julX%`B9`M(5=1^7 zRezz0=52&VsD>NGO31mf#*Z!i25B^1;i-QhA9jiZ*`)OuX0?H>7F`CnhH$TbpSltL zD(UXkOUpXA$F|C&BzbrBFE=+kA-5z5BSbEpSE1gyk>aEvQJN#z?HCCW_GUKD%ck7O z58Z-=JI&<2XHL*>LW4~qo0C%tLapLiCf~{*&^L}omC(dU`Q+dxM$=tJP`EPc2Rw6B zp<1{IS|>0ZvNlI1rWE9Th4OoQQAoIkxruc6O$^bV9ClIdLGGitAGLeo*jMeoxA$}I z_wBw#yAlER1vBxha7VIE&MD)TxiPE7*1^U!QYEfDdYrclXP4+v^DkiIv0bc3%3j{B z6JOGVV|rybbC~6%mgT80j++pj+(l3x%`dlKd8H+=cSGZ`?2_5}NDJx{B^)E29hV+K zC@w0$7$mNAUWS*_)CqQ*zA$AErYU1HAiQFEVZZNf@`Y2cs?svPsLBhy72<-FF=u1J zQ-2QQKs9*tAq~~lblVYg}RHzM89dhD@jgEqz8dc6P<)e&vF|vQrNa=+eBox zHkQ^5Ap%M)+YqVNpb4s@JyEf2R+tGwYZ^BB&iqYq_0b)%>LW4ZmfH_;Iuc_TIVU_F zYlBj=`aA*KLr>DSM}w79rjMrg6BE0$No+dGI<A7w(BLg%nNm4qBoT_OKvoI=mXSiY!+^rw+mJ&+p0KYTS?44 zRK_u&L6sbdo-wfqU2V5YTw&iZK1nd98rRpbrFNfrkY~>i?-w5A(L6?f_+Vf11ospi zzsr|FZtsewd6uO{8Rxj*{n@<-&yS|3Cpn(;X%sw$G4gEDzAKrJkWdPgVX};PUo^-x zK+qm+qB?nz2w8oL@&q&?Sw(UHg2wI*Yny1TlE;I5GeLb~3G!K}ElxxOHKCqCGT217 z9oG0X7bT3}_GOiKU?rjTVwybvRggIH7Y?Mbq_BH=L(7IF|L05xFkyylh0W{G>uSss znsUS+^Y|)8cK*`%lwZ>e>k92DIt@)oWV_XI&L*8IogUAaJFwk>=6M^vb@OM;uw)?D zw4xCh4fja!pClUG7)~Skgx8o5jeEO_nI%tPKBwclq%lDw@L6Z33@dWc^Pp6pOh)wO zX`E@f#$pxn{}I5~ZO3D;nR(5pVO=scm=3o&7bGAO@DA)j{=kV9(TJ;ITTsP?a|V zUS4w5dX(bTbWo2H zR2k_hD!Cz<72djk|EiMf;feRt+A0bR1~oBGevr?jzzU1ToO!}7?PC+zy;AAKX}UVk zeN?EQFsV`Dq5uEc8X$$WWQ}WF!#M_84uCI}$(T5DX`G44#>Tk+Ze%!{eOcF?yBnCy z!^#pWn&8$ADl9V%b58ow@*~4j`Z=66q zkr0(|f*MX2nm0Ce3=BQJ2R!Dh?D+>ofezQyyx^S}s=U)PJ-_A&d>vhY>{?%b^ZR#V z-*;Fyd>r;!fkQ=yK=jZd@R(MJHb~JYN13b>0P32It;o!`R~ZV_n`xx3%qej$l{EM2 zY%52@h-EJR5v8ol$&P!q z@nZ~(9eyjPW~RTGXpCWLs+bz?z*psY6qz!EQOXDo$UOMV)S4$c>Y+TLif`Br&cK1V zKJ$^+;OnXOEGxD;CP=Rct_nuOwSON&+4Q%qFVF+Ow|NW&5>Q&n$&mb$Zb%{K=?_^@ zHMYIMj=eRLWv4J>pGwUQGABCEX=*4s@!xo0sjEw3LyK&VuFh*H_)Ekb_{JK73G5Qj zzx=nk>$d4E8nxU`v#CuDu6_mQgN{d%bAKW)|K@1(?<66mA!YEziu}>Hp2{-2LaP0Y z8ZnymnRtB~n99Ju*GW`4!VH0mFWK;Ep-S*IH=TP*u;^K%8ard^=-$0kM`v)MWPHMp zT2gOU#5l}X!NjArH67i3Xwv9(o;I18uuDtot%`WqjNVzLO@G9!)! zSKA{E)+iAD!$yKwu7pPZ(<{Q9z6lSJF{A-i*naVY=O1bRE-m+UjUlZ@z2IE&FTqQy zhVbBDO3Y&olXC)Tay4V-*3Fh6t7)wBtu9Ev_fp2hDhs)Jh4?uxC%(5Mm#So)y-rsM zFeQa9b5UUwB531BISUtM4z}Rrw+W($OuZ+sSC%F4)53^R@`KJ6{b? zSd8v$n#LsLc@n2t)IRpsmyOki=db*FU1`}Kgm&-DuXF_MXO-;8Xx^q#{_^T~Vumg0 z1S`j9d3Xww6;kA^(qheF43N4cg}u>t(~E!#No1szx5KaTUgzGmI>>M6|QbF1l9hqO;8r?Usd_-!F|8louI- zFovLAa@~3bJ;G6A%EE|i!G*g^3U>z^f~SjKI64U#3Py%H6<+&5;YxJL%KU+%C%#)w zFWl=IL^}r^y+yZ?&VZO-x0U)1BOB1J4J?)5?K#H7_-GtRo2g_3zM(8UR-6VN`+wx6{u2zN{j)&A+y+o&*8&_$`pMTBLP}C$U3uQiX(inUZ>0n z5@~Y9B9NQ^|8LwJbu5!*PFP1$Wl|t5{s{V8&ziw>7RB(?O%Sp*v1R>#{r|?*8Uop1 z0TjV<6JU1e$|m=_4XkIA-_$@g{Y4kUccHz1w1{FugO+?0u}vwRL`zN%5&1cvEXo37 zRg?bu|BZ3?Cz8%&hJqopS)W~2qX|ixJ~clJo10IQf+U~Z!k(*ZDRAvTh~dr5b}nB~6gpRYs#7h}XW}32MZ`2%%1_ zR_lP^j%Fl|TG(^-EpCT0*T88^gz8jFb`A;o`C)OsmDf8R`?-Dx%hnLk$q)l~^oCz_ zon~awFNUcS)5zuPzb#jhFNxAw8fI`*x8K(Ef+k%7rV8aF&IgR?1)ZyUCoS~4sxa4` za<+!4qH;#A{+y;`MhdlxBi}YpF;!*Dt8lApI#So=dEJaWDUv2oC3*NAS}|tzPgyc; zRdchpT9$xq|6sx-IW=W)fobl_nUctgOoJg)I>jJ_yci#FQ!nOu@8@WCD$f>kUjYApAXfSZNnAx$Df_0 zvnBC|QJqhtx{XZTuCn(a$9A9E?w$#2>L^krppA30x#Kj_tc%$36s*INu0{tIs`Ms{ zS!Y(`NEMG_qH$>^3a(^M8sE!XonMg^aoL<3MFEsyRABA~K~-R){!!=TKVB#EFt~q~ zW>$RgqH0Ywgxfw(OnVkU`(Ce)XR+PMtmo~w=;F^A^aj>@dx9R$;fnPLrkVyYn!C%q z$6R{lAu8~xkQ%o&Is8u!Ouk34^LTb4@4;lwE2}Qj6RG1fqZJJ-*vXz`HnAte&IXo( zbxt;!Lx8rP<+#%1YHC4_G-o3Rxuwi0EXCY1JzQ$D+q^pNk0{eNd`kM}3~}a?j8xg3 z)Bq)`ky$`j3()1ghPkoFQy9`hC}?Agg|rrmwJfkxZ#_AV_6Rj?abtVZ@rZ@?aH|CAl)RB@_viaUlo zf}bSSTl)T~yP|{751v(?eW-cJ1%sXu-dRIQe4mPGOK;CsFG`rxqh6SHTR(YJpS>of z6@%8lNIl_1Bk%szDV{e!b>$mk+q!kyWP6I{k#bZV^_Wh0n9I(@A<99J}Ws}zWwQNpe3az?*e4)<-&XCw>K+eu2$%q1#Z*y zyaQteK3%R28xjnUd~}q57pw<6X{k9=J)aw-e?j*$eJ z3+~BczS^91lE@qd>a$X^>VeP~kG@;9D7HTz&9E~XN)XJq_8aqfA9w+i_-WeND&=t2 z784!=Q&yC+s0xJ$cH{O4~$0*iyYmCKCr>$5gG|1y2beKw|W@=2u2UM#G z6zgAqnkcl9EMe9N_l413=c;v4L$@W1Prr_8{nQ(E=eRM6Ew2!2z4RW7#S%xZd^sCF zB1pfu!OY0X;g`A%7n_w>*j8dn`f2C|rud$cA&fILRgfXA)8?z+U0J{a&t+y+A6RlC z;OSxBF=B=0lS&Qt0MR*O??ZRP;X@4#hYpk7W7D&X(gFfY!8ojSK?;FsWDWOuE5JDD++*Bz*EXm6wdPQn8l`Xf3dU=sjJFNBQiF&F$YI8Y%-pgFT zX0&K5G9SQ8*PGB8Nu{gohkrby;GZ)YxfLz zFwwmBQ>0vo*w@P=lFnKj3O|LO7d?lb61k0`BRz`f>Vt@9h=V&M}mbj$n zMBY&>c!T)0F^?L*`zdq8Yph*F132O7_%g_{DGtbF6V2ycsY19$9`NRvAxo?_smvKM zs=ePsN2syH$6XiiU}nI+git%WQOz5dT6rMGCxU28QYp@c(vrX`c@>DR5ZT`hgUq-; zZ!jS0W+K(yP+J*PC7;~I>r;onIsA{LTX-JA{__k_Z8LzQW24ag@d?F8C!klU@UYK^ zN^P8(2(`#t_l@mqeJQ~wl3f4_O(bgN-2^I?e#e_6?SJWgxmNr&H-o!RoX7Ng4c01@ zIl?S~lCLtS0MG+~ryp1XQSeY1@1jm00D9 zpGZ?52XODJe()WQKdd^^{dPKL#;lAFb3bbSP|X>z>P-IVM)EJ$HJk=nkUZ>3ki6q8 zZ)|3fwbTQ$^5Oi;foJKo7IH+zwd@O&FY8Kqa#ddP=h8pFzZadnCJAdlBY5}jOFDTd zS!&YRBzD&1qNiEvjJ}))Y&QErPUfr4?hgu;EWjyIi2`351Ku$L-suBhid2#Sr%a^? z`Y=nmG|KF7mQ^p%n=yLaFBY>vfA%ch^c;zw@^2OyZT=%0uaAZjGlu%dHfMl|{&;eW^?FLYbMk~Wm(fm5dbZQsR8#A1a%OANo#?r%WvitoDI|j)nmU9@>pqaz|oR-&^v{Q6Qiahs0vBu_0m=5C!I&EgOBI?3;RQdQdnVU8#s|Tit!5Yc@c*gu28{{JK(dCSDxO;!Ey|EE?&*Re>J^CwRoWmMOHOW zsf!Wb645n;$I6kabRkodm~t{bVOmj{YFU<}P4AwAi4v7Unqi`>p)73XUcMcoCu?{1 zuE7fu6o5KpO#hD zmzGvlmn|%O0zH1ZNHENmbB6_srYN9Rd27X31$cU&&x+1ZS`Q?bXrfRT^=4gqBCur> zzg@oM-^GGG+;r}cU~%R~iIOkO5GeVQjqB@AX+RAb`x)?1=K(#22Ur|<%L_1QW{Q2fRyqP}a#(_^oD> z;oeRAF6C`49Qk6bq_zDaVZX2HtsBIO^+tomW64(tfGu?Tu4~Hnt!DpJ)@=+*qNJo- ztP>{)1Cq7e0#59^nn-xf#NEC$Vvar!Lj9x`h9_~S9*Rf!x#!uD8K1brY#ykioTlh_ z;6-U9ZT=K0FH{@+7g5sU2xX*r;*&`U8Lvj%16LAa7eqxYTYfMcC=9Qd-yD4RwY|X> zdId9P*wVr(Ho0DappSr2ll)6&Pr@^QNmi@uI=61SDTUjsxhG_6!k(`S`&P?WYhaB^ zt}3PGXXn5VXI;{F5_48Du0=ICrN3U3KPBdth=7jC-sAGfk$Ul*?rlpA2%*3IA@I7u?7GR~{zp!48zp`AGb$ zq$t3d%sL&vK5%+-Zb2Mj&HOZebEoEb73m|D832kNsiq~xioEO7)J)OLr1CI;Gg&-c?vN< zoC5b>Y5vyw1;U_9igs)(zj^f?&*#HhfkQv9;sL98vO^%c?v?xf;vHLCh6=X7%CPfE zan3Qp?EO9ej2|%4^K5;6W3~PdbhIUVgrxk4S=!`ta?^MM4a{4ae)+e}^<_VNm8w!K ze(!c8zP%#3hn`?()Pk8~hWc+bGtbkqT|)4Ox9#OHvNWzb*Ot%t88+oFijECtWes0R zbd;j(Ock>Rm5w+V%jue!a=9$VN7pQ8WGUIHCr;+aqX3^AKJAx-@5x@RbYqL%cofZK`CCU z2Gt&yLYoW}iK?Q&y-y-e&#ydX01X&E&S^A~xTTfdwK9(MPZn){UZ*BOtyGvj23xL$ z`hnFvn#THHX3E~dzGJ|&g=v}sz1ct5CN?Uju8*>A2sm*g^}lt$VU#o zoEz^O_b5r@e?jpig(DG)yK9Ca95N%U{6f`)XakE{Y4ABHV0bcgM-;y38TU*BvSfC6VW};o2K*kp@Rx4*J4=vK)X6Z(w8X-$UKEmR?vYale_zHu=e ziBQ5ca(4d)iL;q+EkV|)Ol)B@+j=msa(&WtTll)fGkxFkTPHtpD4wnlLup!S?4Wp! zdqu&L)YV4Xx47B#lzZvRYBQ%)lHxaCR$h6F_T!tCh~PHzj469ZMr_H9$RQ8=0CHoU z&It`8+0N8i%^$0`VwDYDr^=2^Tu*U(eJy(Nug`>8^3l8Y?bpyg|CSnGc}qi?xRCMh zq9!GYF8q>9%l(pDV%~k5+lsU}tuO+QC`TTxY81SDbbd6HdB%{%JIgBTii#@h%66`O zx>GRB<#UGxJAG>&4<^Rs$3xpC*t(SFsF)ei@_bKxL8q?lk3G;D0U_xub40s&VRODa zU)-Xt!p){fdsa<4FDg5K0hSGRCjFQW;y`=p_LPTQN;Mut2RhcxVj0dgHGkV={LjBE zWH<@dn@3NKA+2$(h#}_u4P$6KdflS&hJNxtMZy=I$%O)-KUA3}+tOmdb%<;QOu4*( zDYD@@3@x%P%Cyja@JdbBypxf7dc9X&16zR_Z#_Lye{x<|&6SeoQ$-*dESg#bN@AOT z4t{?cF3Q64ku<59Jkj2kx=~qO!GqB2L#$gvH-A4ju@D1dR7RngXQPVMq;f?b*(-yP zv>Z@nhb;;OcBo7`ow(8h!iOucw{m~5DPFi=8ule%;hLPty%Wp~vF;NsBAvmV z2h1$jhmmVQsV?0&f#S=xRafX$+{7Qyxg|Vfo2$441qTK7paTYs=%4@jEUrVrcbW!n zJG#$?EQ>>DAA0FnECD6BLQVilK(@d6uBI38tYkp>*cYJNV-}&b!jD}zQQwKMdqE?{c(|<`|1M1Y* z;05sO)39t@NEXmP(*@+1ldMSrx&S&ivg$)h7U>S z`(#+~IiDI+P~GeA52E4I?%34d!c2YA)ikz(?S{++vy3kV?7)qu~w#%5bLd z%yu;No5Gl%(I1~mr-fHE^+TstfX|LEu@vT;Ed@^NlUo3b%E)9;SbgIS0u+2a|6tDc zwY5#-##1mF9&F0>8_IeAG<5lR`Ngz{!AQEU;Gd)`zGA?Zd%X(1Y(QW;w3Gp5+r=u5 z@JmB4ri4GtpN*v-T9M%O*O$IBpbhuR0>FfVrAz6Ib8F*k?DqJYwdc}R3E}qLoD$A% zdw9azpFaZtAjv|G9L+{QwitAV0>t_P4WIAU%#NBt%5|O^WKkJJ29-5985teW3oq|3 zsi?24tf(*X`!vp8CgzKmW$%pg_Kf!Fb6OE`xK4VR_1)TwnpWon^NLj~%t=dwh6Xlf zo==NCNQgaO}==>zNqIS}8@hdenpga40LLChv`?7L2TaDIghlG{g< zu&xdW8P`|TK{k~vJ_k}7%JoHkZSh>SSS`_Kw7=ljxLX!~3(O|NW*h zr1jrcq}BMqJb70PW~T<5DR&ZN2}O!?$T@DfI=Gy{QeyutM#hk0&Zxb0X6rU+>|-Pe?_T@m2BA8X+ipTQ+7d6O+ZuvNuldMP&@?uTW0I zs#W(J<&DqVK+5pAeEb1CTit^^BC^UrWK?r2hG@XQ9*y_cC#1ZS7Y|OUps?p&8Ft0s zhrq+|tHV6`k!;&KK`%l04G*nXctvyOH-QqJ&->va0`xlagJ$U)FHF*pf!_kgOXc`5 zTbmjU#G%S7`dL3e7bXV@C(0)>X6IJzJQJ$lwbT^8X7!%|hV9R-@bVhbS$N0-z zYdybfSnw?GE^urteE|>pt9aKcB6LAMu>bRMiPhn>SR5|v>@y8|LuhNP{pc6beO%-! z6$it}U|=}xZ~Gi5_|E&Eu#0-ti(b61gJ%d(hP%Ue4QF2N_CTBkHF>i863ASos%N2dS$}Ec8v-A%1S(Huxz4ZZl<#%$s zQ{TbGC|RGpFNqJT2jKRvdH89>O-Yab z%|y;ycxUDB9BtpW$s#`2_?3M*1zi!S3m^u$6lD8BrB@K1rj)#{lq1WzEiLXj!6VU) z7bsOxG_0OOSsLjJ+&X7%Oj6WSPI*~qRMh16cG!vm!<>jJhp6adN(04F&Hu|Q7 z!iS_nkpTh)=aa|(S4gEhNEniXj(fCI7=H0-K1&VgO#mYqTEkv|&5Kw<(p3I&kEBpL3aMJH%% zy%f3!@k97f5DD(0H{!$ajdbX!{V<1-3=OasAPJ_p!?}F5tdL3csvpomf}$%$+r*J{Kf7vVSc+u%)HI!@vh7^X6Z-Kj3gVN?FTJRO^oVjEd|@KaGDIP9T!;D4Z-MaetRK z?8KFvq2Lb8*1t*Yo7p*l%8^we1+-FHGk>%=|IhUNwQhKLA(gd_n**x$66AO+UQXyO zwfx#SGnKg=e+s*klnQ2rTq(??bbde|6Aht3fqY=-Or5N^@`Nio>4)9`N z5cXmMlAX>k?_VYU^VhFV*J2W;^ey-nQ%YLwbp4t*T29fKRwvAto_cR3I9Z7a&?>Cf zFB_~1EeWHX45lTfD4!c+lq7|ABdsDpi8**le=8p*DH|1TWrZ6G~kiaETe|8dQq;sx-`X zIU|XRJ+LKCX&WgZh>g7*)Cam`!)zR)Fr{U)45==HnTrLnWKbAKNHayeCT-@;Nj&1) zOnMz5{De3mNbbY91hdPK%E}Hdr@$fXVO;$9m0MHWkNcEYL?jmFeZqm6`h*1}h|M8^ z3(jVl{SG@iOq@dVMj(exf)YT%X!Ng3&M4UaO+d?5h@nX?%!BA{8hZsxF7;*VVMdF# z%(F(H1*i|hMU^UpGBMpDG@9$_WOPJ!} z1I9EC+emIEc*G8HvMK}CXL;6?X&Ehg*q14lSFkj8dK;9NT*%NsTU!FYX$MKR4j<0; zVQOf2^!hhJ*RN1>ZO*K#PK>yfF)1Qy!8c)cA{B)+1Yf3AhS=;B6$@AN-&pKyTcco4a{Bc6$tjwdBbXCvn467`ynY=m~ zB6m#)gwu6%ODwluEo?7IJv#rnCrQ+trv`i8YRfy(ekN~Y!ZMk6+83_xXj`O{Zju-D zaCw4!)s9ZGtBpflZ7}$1d*i~B9TOL6y$j1_U1_e0p6Y`19y5EJ?o`&4J(J2d5eS=P z(r0BgN~bsqP}}^+Oo=m~9Mh(JvjB7b-K!5iD^3EF7#qE~cwBVP)aequZb{>ulO0Wq zwBB93`b0;>8g=n%5)Q`DK{cVM`tE{2%-INfafM;O$I&C@KmL6+PWEr5*n>;x>EfSk zqvsPx4*N4_N5=f`Rmw}ohhvw)eEIt3iyglASl0q2HCECKlwp8v^W<#G{QkVeA496& zpD#egAhe~sHRtc-;<3;@yZWuw9_7A-KEJbKq4z-nXL~x+$$hi_1VTX-(H8-Q47bs9 z6!2sUJvcD>5FrdsyRV*{a5Z{1y|=2X8{^IaY4f{mn+b`LzSWsX*nAPc7I5xf;j*-4 z&9+kC6i1!Wmr`Q`qM>&q$Pw>}<=NbKw#9aoXP0-xrqg0;>?V6nY_Hy46nj6m$lkj& zwm7!QLN$#mzn7H4+n=Pk?X6}zytz2IBDq1aYnFZ^P&z04Z>DcX&YD$C){)y;#= zi4Nr&M+W-@9xwVw*kWUt{6$-X)rKt-bsnf0vBR#F6M6i0@F#p8A8k$oUFd1Evgu~% zxn7_yl-8KPdbZdAaga1p`bC{XP0-KmLUxceQks9w<4_HBp{GNYL!FJD>jmoK4TMl= zo=i5`5KZ)DihwOP*p$eD=UU7*NOqi&cV(Cy+B=XWi>M4!-(b21iXp8Qy#kvsoLUAY z@8Rq(1mMAkz8Dm3nE&6{VgtlgX~&6{i8{CXW>wElD37n_K)tH;pW2$RxafLHVFk-YEWHuMtE1#U+i1eCZ!lmbD_G7Fh+QR? zXJ}=j2CZnbLdB5DHK&QObEpQ|3R^L(!35qQ_tV;oBDSNklChOCD_PDGhBrB-TrH#;jyHB$ak>sY@ws9&!-|kLL=7JO1BnDSLY_vP^?zE-J^dTd7N3s&c|_ zZh}1&u7SdrMN9extbp99CBtrYV|c43u=U0@*IetGYh81Vx{smm1HaO*_?3Reukszw zQkJtsWU|Ln$bDWfLQ+ZeC3q6y`aUwxia>mf{Tt-FD9LIyzRfNBe-C*>%0m`SQb{D2 zPTo(Gjrj5gJW8CLYhq4(=@9=g?lZMRgGw8@S0N{cLJ_L|~ z5Da7?2L&hv<+(&vAgbUm`!x3VWhMRNOg1I`a)%bY{y)}w{ylR>O8-X_{n&5hu}E*b ze_#EtLyTzK^HLsSc~9PMi?0ZBbDWWP-wFUSr8}J8m^T6dN9?*VTj%UgAbN;L#q32< z6%KD_xHrpixRlE|?^EWEdQDKPlpDK&hrJy)&CuHvjsPh9XZ+@#@JMaV44xeEDw0IK zu{6*lNi%_6eE?06SrRgI2cp~k#LQe||g6ro`yNVzGZL2P@6F5h_%E%`A5laI- z!sQ3Wre_7_bMU=3mHQ43B*qbjQJAOTiq|0JB`7#!HMFFcx+7T8fE@8E9&J!iqRj9- zc!cB4i7|!v;6%KGR64J+Qj3WwWrKneo&GnlWEiWVMa^DSAqK!%C1N<*yR3+o{@qkv z99m@g3>@()l0?l|9XNysN61Vb7H!cE@Rtq}ttyCI@V52?c-2eN^#+p$uY&v#GxqEQ zXCO8J&N`z%AzCU27QqZQSXOWv*ABSLNDB)EC(y9dTNw0Ye$GCo>oEVing| zV}UiW^>Hs^mKr9|UWI2TF*p{n1_m_a4PHrkmC+$KHtQ?X7O{lYHp{kJmyJa;^X#cS z#-bx;TGr3QY8>FzSOf!_@dn@0g%%7Q>$a0(U1?z8ah@Yf-CDI`BF83%UEJ_J z2SstHq9o<4YqO&#jqEdZInG!FBNS(DU-N2AxA9=duI?OITQb7P&E|puW^uCk%7kUY zD5xZ4OJ)y>R3@|5&?yBARbm`iBW7Ck{2CCKshI4as8Dc(TZ)!2VG)ebcs+q(TRVVD zYfGy(!fbBH0292#RdpO-FG&bqbCaPnJ`8Jn?r2B?v&vFAF$9*@+o+B(xzqF3`fj%KpYb1NZlz^_v( zJmdLH8hPRU5P7D@3tz}*ardmwxV}!-6h%xuK%R*Kn)%`C?g&OQXpUh;wQl0PzY z_iPEh*n~$vcDOV=q6vG0ti`+za_F|Zi_w1cAO*-sm3Y+ddY-eGaWbPW_8u{~{l*x- zi-!{K2{sj<~{WSf#&hEM~ zbJ(C{$Vk7g64qGhmIucFU>CMJR-an=4>bmk|CfH!LRN}w`h2;@rVa*FCYtN%JZCtS z%5JTdB>2q%|35|O-(O#*+pzXsaO>Z9q5K&D>+iSG7z#5ApwEdp@vHo#vWZjXsOxsP z{6JzLSOjhOm&DE*dFp7|R-bg4__RxPjGwvqV>HLCYeY8I0F1tfJk`}hnh|otj?}vP zfyjWcC}}76)Xr*SR|JA|EO!yX9iO{~OBj`BhR^^>hz&C`!-hgTfN_(VJ`ur_>s`u+)14oTIe0?Gi6~Ne<%|ko0>8sU2=< zZkPp3&TQs5^)+8$t$*v15l2D#8pZ1hCkp8F3s#(Nef>Kg&U|^`3%Y}?9g$96F&)Rx z&w4receMD=vwSkQz?5kk1_Msf#fmyuT{(%a1WBA;5p}T#F>Cgi&U%GROLBl`Jgd(f zPX9K8Dp%P;B`AT5XgTH^0AYL6>7vWs{5JsI_HX#D^BdphD>nR$ia!DF-`!LTuLlw& zhb7g+HQBaJ5*l_fSMyig#yP541d(<;7)+Y7ZL-pTLbd~-J)alUCw(x$%SKH3p5HDM zJ6Bpb)I0LUfJi^4=72_19Wr0!jAGs95W&`9Nl6VWa^yT;x(f3^OaR*dwu?|~S!*{Z zr5Q|=F7hJ&WUX&7l*aT|uwYSba27_Z;T7fWJc-f&9Fc*+ZV@k$agyur(V9f5)u1`k zxZ19fAcN56z%*;XuqWfQ*U`O(0>rZdUF`{#;Bf))05pw`gSBrm?EL!>Q*7lp!wj4? zrl@+RbQui#4?65AXAV`1SJ>{xmU=5}I01n`G;ekjT_6Cb0=IP?ns-kp8vc4d=qfrk z2sDCxl3>KQ;q`)p`B!nb857=$-K!|OB`PY{OGbQ{Q`2t1IqU-1sBe(K)H|k_QIXt> zj7;oLcN{K(17ql|0t0filwP3!2-hWe$SRPEul4Dk$0vM6+;ZCy{rWNp7}5!0yxv0; z)&?l*Dzi{I->6|}8^m=$`P)+iaclzUd%(BmH%s5hE`i55whE6?e{9TbUM46l(=NnrR4;a5{)8RJh0vdv zBY3~e63iB=I^Qf9bH}vFe^2lQ3}Q>wMv<5llL4g5RPlDn*s0SJAW$!n9U0t6LhF($ z@+qg7_`Pp4QE2Lx0ygN>KEY}&pMH}ne2vqg?*Zt`4npijk00G&8k}tqZpScxuh}en zG5o=iFF>UG6NfK+AMCn=vfJ)kkv0n#TP{1+of?Fi!S0R}zL*aNlv=bxjVD-72&8VM zY?e5iaWMLG5-LqsgehrLPPja?386;I6zrLiIt%S>%rO~nTG?DtpzWK+s6#>d9E6_K zp(<#(<+trcIMy4n+-eA6ovbfR-zk+$4hHItM$%7_Y(jJ)IJ`^{YqO)K?)!pXe#v$9 zljAI$Y1iUGq?LKX!pd=oQn!h6O9 z8vuLxffgSkF}#(C986WHHq{qifpV{^T^SLLN*WQ<+qLJ4Yd>3zvCPj>aCT1a34lKi zFjE$N+p`oZ#RGUg>rwe4ql51ZR(U31X#@j9|b9t@q3$%CnlE5nfSzS)3+ z2&6KztS>F&ioXC!NAZ!2CC0lH0P@aT26=Dafg2hw>dj;b*`p2!v%ir1s*!i}A)Q{p zZFj1_Xmv6;<=TqGw^XaPs?+D)X;EhR=N32xi@}Anottj;uD9|#6*vn5I5$?V!O+*s{TRJ7`>}9@ zMpFjNwHtNgLlyUWP3*I~eH!1(&Al@B_RMdX-8~>cy*p$VK&Jc(egI(~xBGmw?D96~ zdjV0KXd~(sCXWT92cYuy?#7~KZ5ngpJp7W&&Kh-tQTj6Fu=G~LBiRJGd4Zq<`-4MS z-@|Cgj6@hlAm++xE5Qj(m=yF-^q0_|5ZZHf286f4Goa|c?^pugSnR*6C8KS*Es=ZJ zHyXr~rau!qrt5?KAPGkO$9k4eSLlEAaa$}_0+T=66txK$XKUsIq|DbC`izL9(|LCA zhd8!IzXWj?$7%_o{0ran=FpMOH^Qu2OVCaa%ww0vEo`aAy|3+xzz5d&;P3Lxz7Db& zs>fP4g>`M7zgZ)F2#BExJ_sBTjkk$R8yh_OqE&m8bCS~k`7}-QgAhGdJ-Fa{j6ukn zcAjA2-6;HynMLbzU85Wzr$*4|ARgl&A5(tBb&ikWSiHT6V|wnslV?5mu5Yz=Z+O%0 z;D)XKW5MN^S#+IWoA_B(5;o8R<6XCxS~qR+>9J4GaEKv3pXYmr>e*I*@>HEYq^q}d z_L+VqfA#5GF&6TRBbuxy3M`55WuEZl7m(3!U={w)1%OC)Nb+enGp3yNszbP@AI|fr zv`T+oIDK(%5U-_1&P%6(NW}#B1$ZMD5Zj*uaLaAfANAvUyOS`s_pgV=(Pu{dfN}e7 z_!Vb-d$vzOJTB^!Pw}!J^~VYI=A~6h`g~}5#dN;DNL}M&E`uJ>T7KI+om)J#c!c!} z*%)Na@ib@us*gDv~CxG>ZRPz`I3;o@^6k5_{J~! zgJawGmhC65Ch1ItbSzkLAXc?B9h2Qzk-$x*$Qu9&I8JI^LZ!I_q5yz!G{5u|8S!fd zJ2!D2voW)=g!L9!`fB!fzc>yglb|X@#xCNDl+U^=5bmWp1lgeN~Ad z4f^;M047ZYh~$s9K=3T@W%8Q~(oUz>zhCLm8kzce(Hv#h4E9nhu*8l=00r`|o{XzF zOB4YJD`cge6B)i*wo5IFYoepQ-U#sB;V%kIbl} zs#c0w=ApHeR^@x+6HMg?(=ad(vJj+($CKuN3f^!OO%BrA9Cswc0X2HsNs)Sd0Da(O zhh(7kQaDh59Zi2XVE9|GU154TJU zjOKP3BKI8(u`H`zl))fze<&sHOtY8iOVNX_oII!pk zMWu+A0Mhml28!y}YE%vp%l5ktqvk&FlA6^QflQQh@Z)wTT@U|8diT2TZ4onhv zh}zFKMDvit2uf|p)u};Jo1m4@Q(tu6+Av$y!WOfp7I(5?_ z0J;!5n;u6?eTNp(KuS&-QqB6N_x5o~UBUiJiC$mC;vO!b~M z>20(h3*bflaFq`R3A%387l0GJNXXE1|Kr0eH*I+w>qOQ_2tFuy;ipO@Otv?YzvMB+ z&c-y((pvo>13{PQrcibCSrufr7ghq47;$4_TXV% zCRcF52HUJ--JZU5ZUPl$17b?HPCWF~m0eSzpjO>U=z+%?UNt6WC7Kl#7+P2zKEsF( zQ&JRc`Xc7>mAw$!GonX&Jnhdu)q6iDZ_hoKZrDwa!Yl7cZlH_n^Rq(7!YkKpGo*#x zPg$!i=s$wPZqdRG3Fh;-$>8r?3>l&o8!QjS1HuSUXlv{Dl7t zvfI$>o*`dbQ#1kZ#ZK4lb~eqWR-owK;-*xSC3Mri(R@Zw1`hn{86Kfp4Eux1K=S<+ zbY^&fy0EzX)%X-(;NdP{P|u?aXBE`Y2#ri|pABJ^{o(pR{ho$^5InGV#m&lE1zv!qQw z^W&#S_*&jhQnd!9>Q3Lohc}{sR%^|q(JdrPhZ6hz;I|QfKt4aZi{tbX`QQi1cQmK? zBF^vNv#bK7MdF=Pcps^o(uC%A(&Sd4kRd}ph`>gQl9s1uw2LQKw1jYlXCM=l1`O=E zDSHLh z2Q%m!uEe+Lrb*ldn*+r9ha3~P27SDKerBo4A))=#*%MVvxJp!#lzKEz$4^VDW?d3q zK>ct}Py&N)Tc0_lIrO(GR;XuC=nf#L19Nhp6@Hj|z?+|*@miKMAjo}iwP=Md9SxjP z&rN7O>$BO7Q}B#?q|nzbQV~anV#_^;X&##0evlhLV&)2(!$odDykI`s4lUmeskJQ@ zgqo2%P_!Vxi|(%c_~w)b4|fyk8uxA$0T%8Llmh>q) z=2`ML$Wsz0jv^N`cvQz!sqPo%qGiz56{k$DBXf#v*TjpA94a^c~R$zrC5} zW5+o`93&1-mwAAde;xMT_SbE()S?{Yclcp^8IM3Ql{pw19mD#tagX@2gf{PLBT2=w zW9wTE3+FS)l=_p*<_3l7mY!rKwY5rzB|_vgX?=@dJ^<((i_D88De!s-^T=r=Zm*|x z9_U=M5?;WC%a5y$k-?TaQ#3VCiK89~^fLN+p4`~+c)wfOFc9fExaroWFXPOh`)f=u zD4vk>*IAS~npp|#k1z>=5%IIQ`38ll+k~fEgZs%TThzP(q^>JVRcU%EL!+8l*4f~e zKf>;BR8ckQX1H+S#=0Ojxt2?ZA|3q20;yj#xc{$Z>Lvy6FF)BZ+d*%+=AtmG8_jus z(0{^lwn-6>Uhx%w9!Fj1_PH@Ofw_Zw_;B-XhBD1Ja@pSOEFxg*86djyQTXNv?U)Mc zI0f~r;SE8}A*GDKf!VxJxy3?VyqB;xDeGOvxa?GQu_xQfNF({r2#+im3{KAwom0h9 z*3hV+B#5O^WSO~ZVu#By%(y`|(^E!<%9&hTlK|7?saS&4#d@imMrMXB6AU5TF>@T; z93?jMsrnp;KfNFd@(w2i}5ulBV1P`MJv`6NUH2QCV&ef)lk|sG+<5%<_((U#!XLfB{2MO z<*CFOFTvJ;z5#-&ZFAaIGx9WK$GSfPjNOPu07)U(DWI0^vIS9r!mv<^SDqb(^+1+S zgO3Y+Lxo`pAKfW&4@JH>wyb#K>UfRfBaH%vOvP!8vXXHK5pswHJ04O-Rp&;VfN5~< zYeUD1&m$Ru<4izfS-p}e<^Z-AG3~mzUf0rei-{eN*(R--3s?rh6cbBKQQH+fr#9h~ z3k-%|*%B;SC=Q_tJ4dlbIm`#pHbrEXYOR%re8YPMOIq zrR`(TV?cU14!&b*g07UE^4^e{4|0&*>HP)irXtTA>~^b37oyK>OrNTTEU2OG07Z2i zN4mIGioy)h!Ky)FoFA#W+bmUhPUw(h z0gwo#l$4BCi^SAfSO!heHTab#94Y3*0{}^6^*YJZpu8_W2+XZ5nQ6(E0LsWoi*iTK zMVt|3GbY7o+IlLBB1O6Bbp49M@uIs1?k%Pm$uh+4b*gOCOuos}QkHMx!fd%KQTXx? zr;3Kxb3m~@B4QmTDneNgkU}I7e6>V6OJBkPz{p(CeUT-#dd@5}8{YFQ6U+rE0%S)s^r2#+hsqHz9m$T#fuJJNgGtdHR_2 z@N$0_!d1uKMlRwrP9Jak!IAm$fArjbPL&eCq(X)&5Qw`Q8Le2gSyD|-3U|)&%%A#} zv|KaCP;KuDAwre7$%qDu*{@|v8q>%a=APva5s>@X(@Y89gV{3Y$0>1!nB)g=l3X!ISCR|Gud_@Inmlt$Pb$EJ<)-@pSg-sSHqmdfil37l# zLXuA`hF9g*-m=lQg6xcYtRW8jIa=n;fX_Z-XjdfT`GeV#R4?US_RX~kdT2(AR-3)Z z=&>H8tmH#40N6`X$6(WZlZ{wdc|B!Ia|KD~aj^(O<}ep1XVE2YpHlJ@sT+-4s)lHx zFccK|2?mCcQ*d@+Q=`R-9^2?>?59WD$U@FHwFBt72?j)k*SVKA{;NU9ZW5hS+rCE!9F~RX1VHC3Goz;_v8ksH zX%z*aqg^rhTHrSn%)fG+Z!w2OSdW8d9%PZahZ_xB0ojcb#tiO$u~EXX9hr#&)t$6v zr{aq7OnK`F;xSV^GPZ)8Bm-kk&6vM|v3Xyay2>L7R$W!X4lPg?ELoN&AgNeem zX}u-w{1Jb#y-W}LpM8=%FByI_-*?uOIcI<>s=-9Af zuZoEFC@kf26S6?iUiiwLW#OoVeM5 zHx6owg)50bwqn=U5vfOkYBF})Y@ftq0Wf9{ccs_LW}G6BfGcL^m26yC&|lRUjT|#+ zhuEwA!GC~U;e#9QcRq!Zm#0CY=1#IvXgdS-8r4_K)k>@zyrvXh-(^l3g0K@Jsm`sDl?0#REYhh~!h#h3(Pw^D}rC!*0s6>$cHa@7|lQI6cKauHCQLK%qLjt!e_ z=I4aUR)|~zadWNgbxqZ|Q|4RVR}2f*q&~0;Et&&ZG%tM>YpTF&I6S|prz1$C$&;`A zOW8(0^%gpbD6So;zI8JeHFsNq< zI7Gyp@pQC=mdkvK~FEiw>0QtXR(n12<(NnfO>a?A<(?ZL&n#W}VM!i>CTgXJe_N$;Q80;p_(DfT^&+Cz5-^2(e zwe2+AfEsw1S=PILb^B}>h&Bs-jt{fasqc-uKxQNafmgDptv*(BLZOzV1v-SugEA25 z(#f}!VP?+m*M?I9qV2xcxw!cYF~i2Z_Vrhs|cs88?aWkxG+#AMHPP5~sRXazOPJAy~HBEkjG zzMo&3)Pz~aCQoo)_VP~IX`ta6E;s4Lsw%gemhxz+GM{Uuybmvma|TN7vbMeItU9ZX zfN;Wfc2BT9i2{;~Vf`>wgC7>4M@{}unCLUFo-&{=UF6mC4L%h`J?4`ADr;GS=ZpJH zXG4A#5RYTmd#l_xQO}2>SIk#OJ{3QikzuRbpop0$MvC1Iwr4M$xN7!CCjj&}eekG6mKL);_kSDfMvSXWCPU}^$2TFhv#7K7$n z-qLm0&l;)kh(*Ttgp;!?0-{cFj3AiwC@k_ivI_|r3z{z;Txkm~@?!kvG43{8!yI_H zP2K>o9sU7vk(Z9k8t)vF9}A<_X#N%04!3Th)aY2gE+dEEo(362v+3hSaNle|g8JX;(^-+2N8E?LGxgtYjIWU`7FSA+6@m z4zO)!GS_KXI;z@ilwGqjE8UzbkC#@@aJ91J+u9esh=Gk%I4j{r*A!3Y>j_n@^H&?I zBg3Cr%Z&_w#x1)A-v5kf6Xlk$Tg#(m(A%)~{RQ?EX#DFBRd0PZGR&+A16loFcC!L| zsz=QFWi4fo7a(4ftGq?@!b;0)@*r^o`ggU1>X3i%n^AZXm@#!s8&~*hZKW$eEp}_D zLDOdN7##%44pEp_;bmc<`ZyahP(^4PiT z*z^o8S8r#&NgMcA9Oc=r?G}n%dUp9fuCgmr6vqsm;PRy}zc|hxeHH*~f(64F$5)qvc&XqWLt@@tf!9+G6@NR?8uTl z`?-D1{J|N)?5i4Udgt&)8;a3oEZ~=%=wNrmTsjg-=~ONEbp?o*FqGG*nCtedK3dJc zdN%e@UeXW1k9{|HdnSJQ&@Ahwm4Q&B9z{HS%4ZK{A2?TrqA#&)s25%(DN^W8+pF4w zwYHzXA&!dR%UZYF1A&}&eduH<5)vHpSWkjC=!L?$43#VuGhLY~J+qrq>Q!}X@^Ol^ zla2$&|F1cydK(pVC2zvlmb@?&Wy$@Po-5%Ri#P3|lW>A*vcXpPo@8hqpBFS5U*36j zeJ0y(ulEw!gS|3e*6+HCFU-yiq30EJ=jB)N1OQZdz`8ox!vt^(UsTVm9Ig+z zYpLi^PXeE6zHTmF=M(wUbeS4leptCKY+W2~7c{gKarwHQ*Q=@c+g$H$-6`XmU6N+! z*NrB4K7?H@eZKj--|U=kz1Hy6vQ|~~bc)+~t$o&2q`n+N^VK!a9{_;PYq*?N0J$w+ zFq#3D(cbTjiur0|GHhbZO34PtCx~fUORR;b8JPP9Ups#VZvXg zWGT41d5x$#=U0DS%Si!F`v}d6l@#FWjksjpn5}E;s>>Ux^>_QU_A}tF0xDY4t&fwf zDV;3zVbGm+odcKdo34~%R#%!&3?i`fqdG70##G^HlelA4q^hzCUPN~&C) zVzuB1CF*!}%mC?%D5a7Sa3XR?lM>HP1ct;wx&lfO;oM;*Sw=&J6OG$NJ3&+u3VoE1 z$@P-SvzAHY=RB^g+Y8w958U_LO;ny?TNbBDfftJm(=6KPYV=y)U)^N_IO)ZYQ%!lR{1*3BliF?J()9_ZpIy{|4?};E3 zyeg!eW#BY%?82A_FDoj}U+X%9-b}0ecT6#ze9D^uV-|JM=#i@&n}vd~$IU&EdVLZ) zaAwC#>fxdTbuU{RXb6&(5`-e$gLRsNcpxGtx7i|i-V5mQTbNZK4jqkTS;6gK(&8r6 zDN3GyFMa+CO&`nBGy;%Hl~6M9=vB`rA2tDq5ThD0OHj&l+k0axPq~-PXT>4!DW{)~ ze`b>$?$HC-4%qwBckIW2%!276)n()n_buL$p?NEdD_R_B7xV^;BM2BOl$*#}YizR8 z(hP;RSoc-yHMZMnB-`X1pmXXSUjI#xclKCFOO|ESR9fhWUo z@F^R%`;;?v+*T>;81FS8fW0ScilSW%#?wj(h^CY2v6{lv>A}NP#sFkl@YhXufndLKsm$pm+sb&bU2d%7MYukb*J7Xs0PtTVU#PgQEc5CJed!n)ch z%XKWWUK=5}qeQy~Lpfkw+z74(w|AOFQE#7b>B5wD0U1D800%<$t`UE0BE;Mgml9ZO zOvG+!IfF6PtXTwLP)GN5lcMPn;HrV4-s%u0R(kI!lxlZuIrz@~R842^Pm=kYs}ZW_>pOp)=4TvbZ2oHIQhKvw{R5P;<%xFR(qnEd&%7lDuX(Uta zSt2=|JGy`eu`$CV(3)F9b7-`uP-I~90_1?1ZarJ%lL12^0l)!46Ohk-6Z33ONjL#EJ^ z5M9J*g-0s;&f4QZh)El^({Z^fUHiTvM=y_k&k`6_wvpw+pptD5o2Su^hg?b5lptJb zB_Rq&5=I8h;UI$A`G%{wHKG9$#(YXnTqsdEf$uqIb+Y8hm#4r%hoqC0rUcg_uH>}ZuFV~+Z8p7;}inT1h z1gqk#ZT#!#)J*qIOiE4xfgw;B9MM%I3XQ3{AqCiSad-leR7*+QODUC7@=kz8XE0f8 z4wuIl2u0$WMBS6hfi@V{Z*N2{7-qq(> zEbV2lW%AxAlq$7GOIt@*uRb3P42_I?Ytp;kH{e?z%*-t;t*mWq2~a!xDjXc0@~Y{h zOP}(t>x-MaM_YZn?C0&{t6hgqUApz??TWvC0|u*V(2!vxq*=g#gE2JwS_HoFDKva9 z4wzKpw*jvT3(zenW`|Netvfqzr`=mv@y?;of)#(jJlbPjKhGlxY197_Y-SRo% zDJ&{3@s^gA`zk7{s%vWN>KhuHnp;}i+F5VqPML1fAWE{L0!=qe%XVDP55g!;(u_A(uH;!ZEroPk1nDSd>Fe>nqZ3;?ZeTe@ zagt_vQC4-+cKt9;^8#p_>5o9`&z_ih7zs99)-CinubbHR-^<@m1v3__&9FP1F1N?) z^8+9RBN0`!zZI2MxCB?;A9X@Qkjct)G>gsQ^7sOwNGy@c#xu8Voui}l!LFDLA4LQgY(RNda+xVZ2SB7nQpJcJwmeQ|DO=uUFqzd(b})`m~yS& z&-VQz*M3(E+kuwfTp-d%RqnxlHNSS35N7thj^ep;foYj~9s7v9&Gt z(ECk^UOp2&!uq|0oI>RIIpISY?@7o029HWlP}ypyzTl%B;WUJiX3^2nrT+zp+qPPl zyi8njG2=bir|lh`J{P_X+gtt@A_=XY7EfN@pC>KH*6v2` zEJv-JgMe884Hx~OAL z>vY;)YVEkC&AeKh%?;NuAEbaqT1=Q7O#}o0Ldint5Y!u6%)##2-aUbO(toqZV)e`` z1`md4&~bpeXv;tk&=C*-NXqM2=XRt};Jl1{`^j$KJ;AV0e)HHlcqTdtY-)-)PxqpV1i71pq2iR!LDrhoXoN z#s}lf|GmXYGFl@!CfTr*Dx0!TrP3vh^kAw2rmvCmgZZ^Q@}nO%N_rq`O^?gtzEi(S zhJDy>cdlkI2+0a2RMi;Q4H2^yfJjy_p{mBXZitu-0U}w!gsK|jx*=jV0*GV<6RK*A z>xPKgM!t>v{Q{T5rSZ5uxL7|MI(ftjFj8ohFvjeJQmH0I)1QQ84an8C;nipwBHC3e z<6;VJng^9r%sJ5w3lXzC#&1tS`sSJtCKcI zpeh2Mgk=pvqc=e-v257f37&Y`u|AU z|0Mt&`MskfvE35=L!zT&`yr8Aq9f5iY-{<1oNo#yRMi;Zx*--^6>}BMYyc=DLba&P zVWHI#C(IC3&@Km`T{z0sR!k)bZPvoB`k%hN;$Lj8ru~1S;P>^C{tL+6E&Wl3tWcFh zfONv5H6$F~NM-VqM+M$+nyWW(P+zn6_T}Xvfu7Dijv{03;QJ*b?@^Ah{j11UcB81m z$l6#@-?R#nSLk18!=Pf1X!+?30egd`=Y3f|8^2}qcs^bKQ>Qv@rrx4OuyU(px(X9t zl);AoevP36YCviO>@kMQ&$y1{uGPCN3HUMV_{WpQZeJ~|%*y-;hGw6TeJafIHh#A2 z#~-e3PJ132^%=l)M&7!9YsXOdcc`4`vyN3Pgf153rX7Wp4}g4rot4^H!u#|fTtHs- zxC0YmimtF{-LPdKLJL3fK`$!rR02PBY~Jq81bAZR8P_fJE&N<**z9dG5GrgM=>C4l z6n^O`D`o2?bfrvma6b0bmG&k7d&PC5r|lb$J63hd_jx+)WB8X859<#+XAo&;az<+u omRR8g=jv#6iE5Qf3Ik_2$0bg}{nRF2Ps2#MUtnm%Xxz7F03s32@c;k- literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff new file mode 100644 index 0000000000000000000000000000000000000000..1da7753cf283671f5d127c6949074d212a843c58 GIT binary patch literal 52936 zcmY&eQ

    su>IP$ZQHhO+qP}n)3&F3rfr+kwr!iY|NC@r*4jC#eRd_4JY?lmj)#J{ zH~I4&$UhIDDa9ZAf0_T-|63$P#l(LEsXv^}4}x*baik;^6;uEK;2#q>8~^~L zrW#f&lTc9;1^_@Ke{3EA0IHqIKFC@@g@O5}J@_Nk{DEOihE2b^sN7E*8W#Wn`RNm; z9jf4g!o<$V5deVu3jjd;=%LMvGmuYB++7I)0MvsYKhghyfCge@?r32L0H9I+#Pc&a zkPEn04u^%2%a1QQ?T-fWe}M@gvat0s|KWH6fNUQCkfpAcow~E7nUN^~P}lmSVgCJ7UO#t8#=GHX;YzaB!{s=aYjrnDsqs8X@;d2Fs)Q>)%=T-EE~ zb1iroOc-UqHsyEztI|CxaB&O20`Sb~N}ZBf5W*bkVQJ_ttD&P-Pi8Rc$d0+HI-ht7 z4u$Mmi;1}T3n>;c6yh4m7aHsu2|r5|Tq5Eb$tN0dgBav@ z+#%2)lL61fo9|`r|8ITL!Q2qAu#bzrLr&h1G2?r9v;oU)QFi=?C1FW+=_p@fIQudesP6haH&Hs*i)@9=o-n^54YNhm} zRmkcY>lF%B3K6js!dpu`@C*zfvZlS(n0vT8D4R^p>+C(w;jyLemM)+$pxE7uz13-X(~#&{Z=UFHvWHw?rtrol8^aXJ%I%Z8zJ z)vr{jY(|z5+1N}!I*}?eJM<~y@Tu_C-pDg-H9eE6^H%+K?Jngg0|l1+pB=9{SvGsHM>-|w3Mz?4-qL1Q8AJda5Ix?~XQrrG;PWj41h&~(w9~MDVHOJv85QdH1)Fa^D?|5=D)8#0K3kEnb zJgyT%nIM-RhD+|ciB@>AXxPk&bzokf*EfKoJ z9G@jI?syjC6H%N<`UV)>#4;J5Bv7@41f?q}ME5}gD4G!vq8M(3xwX=2iDia!A1fv& z7n3n(Xwv57>*70RT*W3HH-Fs1%04T*kGtRbayLY`V5@7X+g?)2B~KGu9c!cQ_iYn* z9!JnbANj9*YFHswM_Xq?V$Ndta#(L1dd_AId+pD zt7amj**TiY61K*-+zjd%i)1*c-SW4MSqoF&(lXRIY&HQ2B zZLNYsBJ&9v);kUa4NE7o$;!#Ixwb=DhRLLgxfhYiO!=$AWnUu&<{Taiu!&L})~`6P zUkRM*JAS@_k1LT0bzu1>g|~|OjE(tjn(}>H*9SRd$7cCWu8!pV z{|d1w4aSO%diS0_c*6eRetQ2flpWVmN|e<(_ZpocJx948aMyEr3Qfm4TT|P&&mSPyy98bg z*Z2Q`9G)6#436Tk9RLuHh!KvMmz=@HoZ8D}cWp&a!oWKfuQ=p&PQ!QJo zmA2(oq1mM81-AS)ubZY8xi<)l%IYz}Ho+m#xH^VE1iDhiKR_85*(l|mq)5Bej?ujL z(e_exp+ZiqW0p6oF&!giycCc8Rc{(7u5lM|TPY_%l_*^OUfB#2amr$~Nz_xd?o!G3 z-qK>A_Mo54TqCNrlx62axzqp`t%Cb$g;pcXUsJXSIS;-T2)rhmjaXRIh!)nE@KhMN zRslD(AcRL*xbBJrast0O8-Vr&Iiy{VO!D%jwSQ*vtkYDBulOfFuuJ0%Vhx*(Z;r!Y zxq-SH?%nN@?lS1sTi{&&713shKY@d>CeJ+|Dj=IXlP`Ls%~hQ1D|YSY_tYe zf{OVI2;Uk;!ZYV`;WYr#6?f!z+2b%t?04I+Wqgw4D4LjkQ6deJGifUBA*HCOa~cVe z1TnD%Qr)~Cjti{GG+efeOG1Ljwm|WSz5wrcC`LR0cq5=NxPi=tp(80*&_|Y#>k{c- zvdj@-%On+xS3F^6jA7Vyhl-t=j_Cm@Z z6@zXe5UrGO_YiH?`twdH6Rm9W`uG-lxl8OvOY-x1UG^LHk}$neBswz_?Xeb1y6hQo z_R^Zt+vaDywK4O?jHy$@%Em+u&y4tbvRqRd?ujMWB)b#q3@aIq*Sr?I&BOPH#LH8w zjFX+uK>d1>E;9-Ti#JVZf;kQM!hEv%#r3!^_oOS>L;@zoryKuC!qt>M_aObPE!d=d zMpgfD##M>DW0vrV?%2dnOsaPlX?VlN+(Vb)QHD+sEE)(Vd3f{AT%*Wm8S>FTzbGS^ zrQl8K$(VAe?rh4&k+za#8|HlnT)CpLoEx8Y%-5)f87GjjCTw4GJ#bwPo+s1xGc71G zdwsc*z?_j`4o$BSXzNivMvb|`$egP@uw@Qi(^sARlc6RJ+deYUoXKepY1LPm%p0`7 z`mwnpnw)ES$9b2=UVmrwcw_t(`WhDJ=@(`n6B8ZXj!KP*N|uRi_WU9grzcaVH;MU3 zsd467zd)o}q1UKaslBT;Z_ypV=NUD)NDJ^;t~aP9k^dSHiZSW(0Esy08C(ZqcV2mF zh%#Xg2E`h4!a9sQ3cVx=7sZlE-niK8$7)M!XwI`_oPsbC~tq4wKHIiyyOFHbLXi=F7Z!n1NLrDCV*rHp)|5(0ML=$fu#XMG z=K9?nIamY|Q5cd1fmjprihhW!RmTzGs!vZk2$&9^awdIGu|$$;)2!4+k1{z!5N2po zXD(Y}{PcA9V)fI(Vw&&%%7^9tisN}}TNi-tD<(uZ*Q}^g{O9VQMOhw_g5UAZqu2M4 zc_ELs39z~m;+7E2gD~!}F|&3NQC6WqRROnk0l9S{`&j|~Ss}kRA$kKNUc)R3+)jUp zFlDTxC?T>EWpEK?_#S1DWvVEwbRp|_0quApFLePibs@923E<6;I7>UzXn+6<(Qqvf zDl-XgH3@_ZDN+|w*#1CBAC!bZIB~m#E|3v5G^86d*#8&kUx5hbpoZ4`h|J*Jm59xi zkbxK6vDWDI)_9yoOhJJ+mQ*mUU-W#m<8QW;;I@%Ym?4LnQAWQKc7F#UMUF_)q@V?6 z=8U3f27^xa>Ha>Vp+y1H(u`HYw9DHR6EB2lHzGpc^_w)LZZDw8(y7B8-Tg@3{>zJ9 z47FCsw^l$`W(3r6M+I&S#$CwbzQYnb!s{w10c$QWZALJ?FI;=*ZEr}7aF;0MnJOiF zgf>2IyqeyVHZ^W|_&4}>thzl7%|)Q*d@@T}+Lxs%zZsiCXPK3n!F)tEXJ$;>!iEin zN}2)Mf{?^R6Mg2pX&vy?<7f|!)x8Txcb@kgk!hvlU)d8=``#On z9BX!`&F(zydYfuj?j8v~y?hsW&I&#Ux6h`}px(l~MtR10;CWYhTe}v-E@JFd>GwxX z6vz71Y>OQ14Ow~gX5#t^^ULRTs~Ki|0{bvv@AbG#R=Ev^d1HUHh-J}NxGt34JG%z; z9UNLbR^Bi7pE#bx-s}BF{LSl0f6<=j>dOB`Ipgf6P4m?f=Y~4fYUb|9>6-4_HnZAo zc3yAI-5|SS_bBh!=6lGdZN^50L<=uM< z245eesq7wKyJ;)kQakDuWbe|(0u34sta|PoWX>^emmCJ>5{SxBKQ|rnT5OE+15zQY*^+Yy;UQBw6clB}hcCQQ(agL5j?x~hIV$-`o zcjm;?G6B$+;*(mR-9pbH^H`#U?p{VCFglH zC}A~B;WfGIg~jKdV)zy{<`&$93pRoiD#J5b%rkw_E6Hu1@1LFQv}G9f5VWS(?@o0g znw4>jwsey)O(XRZG?xxg}YRv*DiSSHt1@U?s^P*I}>@F$9`NE zhnqErI~H-@<7++!aX$cD91>m}W9IB|`i|Xl4ne&{n>Z!kavGPs2NyfW7CYsTIHtll z#>6;AT<`H`>;Yx$B4z9|Y3x944~W>Ci`yH6;~NqA_c`VnQGI2ay*c0XWI^~+Ya#9PITCg=-5tKTuf733?E$^1=zs)rvmBOIlg#I;I!0=+TE8CdW0e46|HMo!mE|1lk}U*u9|d!KA#r%kKOM zAcT0m>y#qQSyH=}>a6EwP^p3-SGAX3mrL(NBR;wol>WBD*nc(-)+ zEA?C=pvkm*XR?}&BLhMjqe2xFb6-eHL?o8_jt4#0ke3NG2m8?YMnP6oOH~T?k{A28 zQi@R=TMi1}c5X)*3>pPn>njB5na#n^DggmN0FVIapX@5w_cs8+GpDf&%FtveiSgr2 zs@YbinM|hMR%*#lGlFic9)66AIRY5Htdc5foVx7>bnWfFIJ7 zC=rM^wAXx02p(7XX@cpUu7l`30O!sk-j;CtI{-xCx^3ZXS?{N) z>8a602FoB0{+6bw0F6u3L0u3|Q?#=@@m5wm&qoa<1L7BN9*~%PN+pDR`-rCibH@J) zcUW|!a9qwwphyO9>APlc)iYKp%*UnMsGjR`GU{l>@h5>^J%llqUVwooBF)i4^%d5P zobhTEhT#=d3E}Ru0;|x24i3x@uXPtd>tt&5ZcLsXjMfLqn zz*K=Xj$J_8{7%&euW%c>jsod@9=VJn>E@oIYSDXMxd3`kx;nceoNOK-(T>q=OH?L) zm#tY}RAgUfF>qyY9bJyXcNA5&uLEx|tMj7ebNys;S?9>h#&Qd#P(-pbD$)Kt{dwFy z78zhVa{zi^{JmcAEq>A?1qperg*qv$Xjy8bASVfaW{bU;cL=zaYi z=AA{!J!#BenPo9rqNT-kfNjQvrRy8Eb-1q8T;g-}e3_(MU8#+6xLC;AjLWxj&2Adl zWZ4{;*+x2X5cO5F4DAd2(gy88ESRbj40v=7fMU;sq;KO$gRD?}&`1Du74?{d{=yPO z@N1aquH4a=GEjtKgb2ghtu6m#BZrg1RgbV_Ulf8cTAA8{g=fHXxwsk)UeNP77EN`3 zVLP#4Ut6Z=*7nSE^LV*Q#NqBtd16NXlw~CrJdY^&gD$-;62s^zAMIHg!M3u2Ef$dq zs;eTLDwj%0bJBVshKni3vpXf@Uc!>jE&SGT4BojF#J9;3@f+eidA4?I>*^`iDF5*n zJ)<{ez5pvW=5*O+VP9>{YT@4&8kVAx9*ynjFYLcgaM8Cf0_6f31@y%N4Fx)BU$EZ> zCkd_wjekf5*N$NyR^j%IO;|Na<_+Np#( zsos@C@>Xah6A<+)m*SwT_4Y80>zuCCZzW;n-0(ii6%o^wJ6nGT)q(bvd6R;OWXbA6 zHU}@*D5ms9<}UQV#~A#^b|zkW-liPZnSbnb;9gW`(DD4zRH|%=^SEP7Q*S8sIqE>1Ennm12MAB3QZQl32|zw;6|(o z0HgT7uc?eu(MJKMKh>ZiY(7{ovhH49c_@D&I`=xb*oak&-3{y1QF&!@iaq(F`~eR9 zZ?LbTE)T@0e+zvoCLBRb%dX(WCCQ(q)JA;9-!&fb({5~Tja;2$jy~lLHYPByii_Ry`6-rf>c>O8UfbIZ29&NAu}@W)RrC+|1V$-mxqq{lJaMwZ&as7Y zIB*mWl!cSS@bb{6p;59@{V(LK=ROGI*+`g`fXOQ%8Gm!&qbCL!V_+! zTi5e&*nDpf3`LUOM*M|wMLEt&m}rDI5gqIRufHjhc`vz9_$+w&9h{8}l^N)m79^Qu z_9X8Iz*sptAE_iCxun<12FX!??!_tylVlVV(G(UE*&is388w4)?BG--Hrb+PQdhlD z@>fJPQt_&)mUM_Xm|OAm9UmqpXVZf*nU_)&QC5 z$r`JezanX0|1+kRGJi8ZWIfQ!FD=e)NZsp%B^x*OtV|Up{nT7FGEr8W! zc!4n6Sksi{(m=8P;AGNe+TOpf?@)idhae>1P{3$8x%4iX!ptND1O0L^`sUTa@u0I9 z^C*~Nn_01I)#ZRuUo%fVIzM2~phL#ZfUBlMekm#Svkc@PL_uF-Rxt}rW%s&P2#|M= zt@X)cy#pse9eR!Vcf?U~G4uqTie6Sww=W3O6sg5lP@*pcbe^-QZze==>S?{Bfi4?AMJU;Xg3h{|vMJ>UTxk zY=gUI7U-@9V-negEmUoZF_LAIW}a#SrzKO`7&zyI`@L}-d3_%?*=d>E^OENk4Pl69 z3~@t|e=}4>+F&f7Y9%KohzESOlqj60+Xcx;+^ebDsH;2e4u#DgpvoC=W(w5_e&u$N zou!FV6(Yrwt%yF%3^apmn#dUj49fy6nhCfqbZLF(*qlqb$Vq2r9&6Q7V zkJ7la`)qvIxwE}9l+9>9P;Mzx*;760zdasr!q*yy86E%I{Gv zN37&XMV>7_hA_(;EX_APhKON$1-T!BmxPip-Fj0CXPiLTV=ALbkE$L5JsKJXL8@gP z8M6QCBIS6|G=gReeO^!QE3=7EG03&Jly_N|(rk4_Dl-?2q+XlSX?k0*4>fO`0blCV zD|nHxk`Xa#%^_sa2_36I!ai-uTPZROtHR0Kn>ovJ0@?A_tFl|cacsLRi(Z2+ra)V1 zWW&{5KDchmrYd0h>xA*ZCc2D9a(|laNvs9IrwaD?}$*hvr?D z1Fc(9fc_Wt%#O1rS=WkW{#;zZdBv*hs5C==Esy_*)ykp(M7f)GxG`9kT0;*4kNwY6qSG~(UI%@gyI{})Owma>%@r9HN%i|S3{9^Jnhd(usnEg zG0pGstSOv=Ns<<(x4+-S{B@v||ULAq(? zuQh5*N)dC?GE%u#rTblSR*kpeS$(BAlA zmc#GFDBZ3zCJ46MAa}YR3sIIbk7U`d^=3#nQZu^cY4PC_)3Q;og63fSoCDFuw%y+z z>~iPZ0-v)5QQfiQuR7h2l|ooH4xr*l zFJzyy0~3BlhRzcHmT<)aLlMgek3Y^@Ic~_Cjo3J&=BxJvizg8st})s>wy^Jho&1+E zEFnHzJ~TAa^RZ(xa^&9aGNM};Rdd-egO1lI7Nc#+R=$At;pNte#D`21&+!}E*X~a= zk(f;D@3I<#nN9<4joT`**-kJOT(AruP*ULRrW5V^r+h24=Vo5AfZ@S$wDhJ%hU zmqCLUGC!MLv!!RmnOHPSa#G#6WcGC=6lpBWRc(WuTq4lW=x`61e8gnM%Y@xz?J+p% zG*wiwg#t1vK^#b!>hTWfN-j}ze-Wls-|nh4Y{_;8t~N)PW7iA<%V|TH6^Qz#_*RDO zj@y2<_oXU zPeFynCr!Iw?_=}N+W~l*$Y zH!=NC;u;YJcJ4@Kv8dEnX{rSQI4&4EgGZFpb0XzZ(CAfv>b{0wxl1wRjdH;Yg_JC` zo;@9w3{z-Y7;V}{!)5BucvU4bOq<7o&?%E|t)rHM2P5xr5cgNd&vYDWYCAtr-q|4d zUAeHl3mpl#?u2)70;9}?6Paa>m;FCpzMcG~>;bJ%pfaG*8Tw1SIW*f9Z5Q(jy<++d zmW5*4cihdAsJ^}H+(9jiF!JWtj!^d8lW%EH@rx1?3Y8d_cvH|I9GhriWZ~Cn=u>zD z5#Tw~K_UYd=;+g)`*cm2v$L||87qDhOLQ$)^1SzHe|TLsmY8PUr>2Wtg-p*0$4QWo z#Bfsnf;C~WXaKu;=M*`FL?bSS-uv0b)^OUYF-p+@43M*>l(kVglFtSxCW#EgS9LwD_i_oiVdam5>y(vz_(eHng>dFFu4GO^GVk^!&lO@XoP zBkv2wHO4h9xCI+8#WZVK4Fp0-jiEuJFSgX43;0JGz0BM~_>=FyL_NM1+>X6treFKa zwVp;T-sTj33=r#vd!}FPwzIbnpHspHWh4Mu zqES02oxP7);fB}s`CPSowR0k?jGRG#OP?Xk*S&ysZpd7^Z=-7`J+~RQ!OQw8ERBaJQMV|%IFsrFLwr_l>k3L;$EkRc_T_P~&4s@@yEskos96_q znMY$;ekr@aQO+1~LFs7r0e?Qf8_R5# zbzmJ>c~H_T#8_rMdTlt$8jgsp9JsS4C)@^353I)I@|u@17Tj>Wwb=_t$&om&%xQqU z#vTfrlm>G@#hcPJYuLjx3s#s#q@!CB2SHQ%MHX0V-HQwRPwf{;kl>)MQ+uJyzqdyp zZk#|OIk5YrvNB12Nqm|MgXefRe*xb_2+I!GnP(>I=Nda4O7;+(Gz|>U#YuL?$<0woFrZo-=$F5(1UoCf6Fh(Qj z=<-V+7^poz24N0*o(UJYY)F16Uw-NHY%6l|Y2)wslS`0ikFApd)+um}oJKJv%7xTfKNc1o~iV_vu>C>HZ z>fMya&d6B6r(w!!)FsPeQa8x&ZF=5j%J}8JC*K2_WT*X3r+Io*F}2i3Q5VR6&iyuK z_rD|YY~z*P2Q#b6(gEyxYzyRw#K}s6wsfjVdSMb{0vO$b@w!6(UVG8O1p1n~2|A&X zju^G0N`-NMMq_<(1J1`MQQ;vru(WYoEBW96)Q%k$C_y3PEB-<*K;1q`_MQVFgnTEr zyQ;5uwT*Wc(k(nPOK-PVS5BRs?AaK#$!OmzNqX07ZlV6X4jgP*7Z1gt7;?clr){k` zB)dhgmpj2mLJa5GQ>X;yKi_O4h=}E_(`Xc3P`uuE5A~m!KYpVre0%?k;-Q z0n28^4-F+Tdt);01|}qrTfIX9`zpH#`Wqa2`sr_!>cM*<_bq5Aeb&Dn}gRwR!)*o#jyI2qcu2V+Ft_cWUXNM#b=G_}bcz^?aD5gP3 zGN6|iEK=`#g|=(^uTR&N8Y&C_qPL=0;hWxt&c~Dg(vVu0cw}S&2Aii@b!{d|sqWIn zIor&cwm)}T+XD&m8OuBG#>%&^K0UdJ66f{1MYk}or(@=y*>A;PzW~*3FPOsGWj|Qg zDwLTur{BO>zmN#*Imxdmw38*NoOdtJjL!mJWSw$dF!KzgDn|BCK(O^E1jXaOX;E)2 z&y+@ZY+|vK&m$hUx1MYz=tCE!mt=IJ5PR_3)Wf@VI!_O8uAOaOi05}G7BKQC)5!)V zdH3OFe-MRx&zA7^1{Z&q7%S%n&XVL^<6cE|VrLzR=)ZEUZmKVuL%(`zQ7F@y?AE-0 zbL38`h^A0b#`*_Oz(p;y4CR9*vpm5+=w`Eyn+A(+*AKkBuLLUCUY*KjHf%rJ3711J zmN?jHE?_O`1G7y?m!F$W<9leK)}k-ottN@z{rF9$lDd`5=x#St%ea=|OY;=p3Rb#; z-EAN=LSJIhm>^(-DBP&m{cA7*=VTfg52}S+3l}Kf>*s=mU%W+A%X3MwwBg-k`XoGO zou{#ffhjHK?D4KGEYw5=2K8Zjr^K@ zDTO8Q-2DV)?K(#oXRjSxl9>FRt$hn0!(%oNY}Y6J)-V=ovbDdX727lttxc6&t9(u~ z`ct=V8~1#Z=fp05W%a44I|KzvPc4q`*Mlep3;p)Y)>R(wjDCPV25$DXPQZQXx=v9x z`?Yioc|c6xY*QUcQ`fdn2}AkB;r-3*i0LrwOiIKW<;YF3c7>NuGkUXfbR&gziL7`i z(}uxKmXffV-)+zLJM-Nd!Qg4wGJOwZ)gMG~-{yf2caT39X6HisCnoDbzI>5?Hjwl` zH^IADEkss8w8bD9agN#hIZH`$W}W1ffFIRqU3Qb%pUgtssJi25+~4ot!TD7DPJuN3 zJ=NA;^3?{pZl_u}8gzFsY=^A;J8;Hob)9}_KN&R_NpmuBI9J@TjzegAvS(GtVowlV zF3pWlVN&5ui_SKtO1_R<5<`xX)N2fxzD9g>dSfqb3!#LOV%1O*cG*C(XUj!>#LHIf zd&vD|$|ru@N;nh`PbfRJ*k_4!V7}E!jUtL-HoII{wBW+5EAOI3F{2BA>ZApz5;-GD zpc$h#%ou~GybDP^p?ozGfD?q7pZV9wNSHGew<}~6sRaDM($Yx2%z!e@gE*tanr?9t z9uMK4YLpC)hKaZoCyj~W%_q#^cX#W2GFWvSx&SU2Gh;w{XfRl4n#_OrqQYym8>(7T zTtv_&;u_ssquk4L9yz=>bOS+1*UK#A+l`8IZaMwfu|YRBoA`&~sA{cLuC;TQx_E&3 zSB+|=+Chv-8u&o}B}>+s+;qC2KX;7u$#@87yQHz z`84QR05;4&l7_jvyOTU?E#Xv_!5P_Bh}&( zR+f_n8eGMst25E)Vf*@E5Ppx-vI+5=yr8bv?oB?rch)t{0w2#-9>jU;B?Jf`?Wy5r zw^NySJp6=#LwAF8-nUy54PRpiO2*de21kj6k${6anFP>{CAyU6kKM=Xu_?IMyc^DI zXYaL+_l|Weg^z+8i90-Js$p)z1m*cL(wqS`=@n+ghzo7J53)bp7OvgjyYT;&`{5@b z)GG8fU|gk54=WbCOd;U1L=)7>!|vFF7=rPS3H7e;Pf(0@^;;Ako2PDb<>i;;Ay;9x zFIRN-_?#W!quM-;f-rtT;9E!!S*MONpR88N5go}W>A0`wVHa*N=MIT9EAoZ1R(FnXJ3_x}zSa%fn+?E6b!X^_ zvA@6M5Bhi*nK&PD%iOdWfOlvmbX{cP(72FR%YriOCmWt6*6Q24`holha z5iW#po%Rp#dVM!`SuyemJ%#W&KEIh3dz&syyGC7VzD~bVHzJ|7BmqQ7xqL!1SjgGB!_gLvtLL@m~dT2SsR#B_v*Yn(^W<_pGeNBwcVOj zsVeB{kfOzNyV$YM8bo*W9WD=HYkvRT%y#5US+kpwu`^IRvlSAQQ93i#mV$Zo=(S&^ zB4?>oYch({Qkl!lr|C#cBp3|u(&$U+x|Jk}i zjA=seA0v!<}wh63NWW9kGm|=oM2abtKydP}4 z-OJ&;0tQoCGO;;`nDH#^EoV-j=uyo$!zv+)*b=-P8a8m(?m_9}etSy_nD@;3*Li@s zO=49mI8wK}rev}$A&LfS9zfW8e^YqXA@purwbx-(0!Besi2Nl%)O8rmLiA{aU#G;h zth#dP>6!}r^tJ_fJ6m}uF_0~%RxxGxP%}j4F075=pC2&2Q{BxS(1Ud)Dr@4Ij#BR?LDWkz~Rq@-mR!dzXNG$&1?dQiCC49tGXTe9hdpT^hNV05bPDP8wMD1}7HqA2rHq1iBq zF>GNzeCyoQ*t$a;aV+3|ViR$dO<|z*I`!nY-h4S=Q$;s}Z&^$*D0^G2b4WAT2H@PL z-{&QXg>2~WDb#P%S`qG8o^slna*FrB>EU_tn_uJfAnYv^bOHu~0?h$It=cU;8|6YA zT>N`cK{9zhS}52dFWwQKHDBT|o3kWgB-GlV={E_1sMX&pl5_K-2fdYBx=m(zK)$pD zQCgP1Wvj^QFY}Yk?Wik&L)RO1)fZbe9Wx-n&CJ2=n^>AN4xy3IXGEJbz{<+ObtZdM zdn>7aIoUDupSnJ%UA>LU%#6BSoh_giL3?l8B7HBdd-h4gmR5mfP&322~4%dZyN%;Mo%5myAcf>UgpFFnxb4 zp3oU0FDTca!K)Qvn8GqzBJ{GVHQ1wRSI%>m4Ci1U^7S55o&`KZS4B`1V7h$bB^kK~ z#?O>ny{_Z(a$suGT1NFGU;O-;l_&*tC#K-5`zP4;JV%UYFTRaTVKiS)Z~iu3WVi3= zOOSUSUkBs#>6|4qaeK8XslUcg%#$1E&cRYHLF2rdR&GuY?ZUyv9h9AQUAqhZ&-8B> zuytBHlXUHK$=0Txnz7yd@ISop24flwT)I46@maPg^3jDVS7%>PdVlQr^h__G5v`j%ZS!eX0tNRzhPSbm^z--rhyWzc{r=k zHlLd2yDHe9-@m>!lf8EN%Bx`7CQBTq4LP;oK%hgWDyksO35hC}L=(B=nT?ZPX=ZdW_=ALZO*C~iw7S?$K zF=$M1ogO-b+GC+!H$cMN6qp6MF}wey*L!9x2eQ~-4tRjn{ny_Fnvr!GCl2cT8d!Fq zgLZ{7beqlRIzZV!hfvTRayX=dDU7zeWP@VQHUu<-kl_if;X{{}l(VtR0Zpy!Md$n;zb^b%E%Z~&LrPt zPVxJedANU)duCmzSLa?djzk3a6}IZNN5tl}4je@K#NLVQQ6=6QDWK}irRwLs%Of|5M;N#e*iJ_Ql&MOe>9ZjA`* z6|tGg|Kk)D#*3w*&)S%k5?(?kwV%(dFE?YuNTbbk*E*S22R=&YCeqcCX;Zf+`52V% zZd@8a+@!`$<3-##3B|MkGI~JtpvH?>88s3qZ}HnExuCa9HWv>nTAGtofP zU2__kCo^t#om-z!jzWmgDZV^uzhlzn6X^7>P`!4Csn7wePRm#YRoNK-g+xU)Q=UfP z`CJyEmw&(q(MvRPj!wsF32g*j7fR128ke2CYwuB8BCig!i}0=({sPKsPr4t!Eb1-Z|?y&58I} z#S~8cxgfh-IqR44F%a@z>K{oRmBoB6rYJr@u%e3Up}ZfbypL7kK2;WEDh60Ps`>7V zpqT(=|IXHK%f?Pu3-lQMtD3bi6l)Cr5T##nNyawA;O({TH#8MKVSk6;^+h>`_jo7J z-Ew`>eXWO!hH(lq1#nfGRQA6xER?N&M-S#KxT^}r6nL0Bt`cbZ?y&gDBFUoWMJ6Gj z(w93S_cIJ)a)C4i0Cx<1Li|(OzSUObY%RRy_0~|6PFi2Y45uD#*C$D3Wu{+_oIUkN zu>KZHH!Bn-PouxGId#?=Ft09l_xMzme;r|nebY|)cR1#dNKg?_oCu*cjX;8=IvM7| zIJa1FB!Q`)^jA!NB{Nl)_6Ww}pT7H(?bj;Fzy9w@17*rcb>^ONez_*&d~$_pjCfoj z@BTQMYEQJzp1fD>bFXl;orL3=xlnKi#Tjd>Ksc2d(JM-s%Wa3bc4$u&8>3x}gAa|L z+&JXN_hPYx7VoWiow{w{bC!%Jz#0NO^^P^QQZZh*h(POmHlg+#^7A901mY$1=@5*d zu_vK@O~t{fX;O@A*@cZv77QLU&iWsT&a5LsN%(zhX;oYAjf(XiZPOO;l|RPgYf&_e z<10-5l=0(x4SIIM1~A8)?IR5Ah)4ggkI*wsyy_Fpq4ZplG9a)8q@A0;s=yHvO1*MQ zQ6Wbp$74|>5?`mINrz?ymnnkl0mvZzaQ!@i-VTWTlU4j(%*xYZoTyXN#J5>5*VRu<|dK1>A27~U7LXPkUmzX zJQUtUsJ?%ZXb`L_2w38@KrR+ZZvfdb(_EBtZi`i!nJfFdfPJ)t`8M6yCHs7?ZReS%YU*SYo- zE=0wZIRqjx8paej+b$qTS{Q}B#tCCRn+0VL ziJMF=qgT8`tT=Xl^;bXRm-!U*D~#bCJk|3l0+E8kUnz7n|y&J;YRB$huuv_F=B^BzWnZ-86U8THg?VOTE3X zt$x+7lXZ$FVB%gvzh;axu0& zARv9;Wej)I1s#roKcQ0MdZIL*>x?s64xLXbaN>6*xoeS{j(zaxS|$lcQ}S8wxxSb^ z9qPkH2V>r(+PBdA*T6*ik>PsdElo9{>^s@xBhvRG+2v1{xV+RY2YjNKE@8}qNlv=E z7%sEHSWG?SbAr0*Y~@^osursayh4#U%Q0u(z=u@=FGurLQpg@x7SH7#WG#>6=QVoy zXm#&BurRVo8g`gG5t$9IrP`1Wa9fJZ5zY-i>lzS5Q-4MunJ$~&!o9dQTd6A$ts6*; zyrn_^S&eXqG2B#VrWYNF&E-`QO-_ifwXe0gpR-?ayE&(>uWS2yqnnXO!@QL3&8+OV z@AF1Fol6&qh~J!_MF&SJ+*{2T;b(8piEk6V0ph)?0<>{>BzIvNkH+F`?mL=R2X3_| zfH2ImlEDQfLEJ^Lhm`JdO8&7x|Hh!54;8uq|llFr71@^?!&u=ip4DHj77- zOgyn|+nP+wiEZ09Cbn(cwrx9aY`-x!yZdcz)vd1nqpPdBy6g76=Q-#1%t;1mvkO>Z zTL7O(!Cu{jDovS_a|jS2M~?5^7_>AP5u2M4(p@v~$aO;oj;^9LSl+ff^anhjjBoJ2 z#2ThdKE+N9Azn{@`6Aj{i{%Ie{zBQi4`K@%NIE zg7a6gAg(CPkUK;3AY&r6{uexM9=7sZ(t455C{EQ*6}mcNBIi!c!sI4UYe`FW1#aa? zH+D?>?eVg|;-_{T!6(j(6ICtbmCDP@+$wT92qH>Vqfm$GKEg>e zZ4K_i;~_>Pj?LIvfKR$)@crS;Q2cl@_Pl6f5k9n!+~mSo!YK^ZZ(z!q@z(y@StNWw zWvBPbYG%`~g{-NYk7G&c@OhGLM$|RBrKgVt1!-XLmMO;f`M2##wxBL`{dva$>kea=7`wGz4Kc zdg5YeK>W_+wTc=7bs?OmL_=V%IkTCcI+)@gLzVA+Y%sS0xw4~J^d#OzhE3)IT>jMTS-y{(fxjHD zhEp$gZ)Q73@ELES8v`##sXg+96H>J{_()6^0S-;`Ud+Z)&P*-wg@vm3GEhoz&b)-b zfZEM}btjCqBfaNIqqe1)6DYQX!I7(nr5Ze1voqXgZ*tXn>aM5;9G80*1h^q${^FCy z%=AzQFn{%W0pj07QBfja@Q5+{P7XIutluCw#`cl+vU_E`!paTQjVdU|+3K76>IO#_ zWCE#Y?@%C^jFP+&rEf;t8a8;@rAFtVv*tFwu@Qh)Ur)!sl?^Mo-h2FY+XgixJT?zk zrHjhAW~Z=?e~odq$KbD)tx{;L`Q((XBizzil~W21D4$cVQ4`94IW3gA2^tPPP!`dqiF9~a@Q$0{S62TPgjdoKhF$!3xZ~sA~MNnGd=IGpUgB zCEcg6w6X&p!l0Q2>)(0}WAF&A4e^$R ziFz?unHmSLvY4;N`^lyDu%`|CDWFwT?W!;ZXNUPt&f8-Dh2>ey>{tGo9?zwO``5bR z2K%1B-HSU_67p!#j$yvk`o&z+su1p?X@*K3E}%*(&LE#e$u6 z7VX&xigIHoWifohGRe6KgRf(`JdN2|j3rjlEScN50?~R>;*g8t@kp0QYo5J)^|Ne`2Hqf93yH<7VvX)YqSc8$bIbHMw$;Pl6(0pjY=U5?1AH!pemU({u zTpwcR1W|vxrd?%vwc-P}BRD|HCaXaTPNt*OIBeB0=yU2%`u=vu*>k<7^x*-V7I+6V z(Y4Xl+~SNOSp$H_VadZyewl1E281HA&u|XsHhCk2L3kLZM;Rud`F@OG{nCEZV}h$( zdPYp$2gh!M(rW1HzXy>Wr1K&rpcw@tbGabp#?A8iks{063!TVl(?FY4Ys8U%$sPRs zEyD|`Mn&ENv(;v3TIgL`y_CV84oAP!-hE_LP_u^4o_?9*@n#*S5kor))EqAS^{gKFIRnM{9;X(2;w)u<*GZP#07GN#7(*l7_L>PK*?7=gl zBrR$?wTbTUI|V&U z{$nj(aI&9!#j4iH{93>4-Ely*p|)5;>ncP`|A0{ji`9a5XUc7k^K4JzGxexl6(Q|T z%)hzepzlauuC)`q7Sfw$qZZ|YCKhnAU3W(GiTB>sm?>Q!Mlr`!e>0WE?{HK^NDh97 z(q`9C=WmirBCR!Vhaa2DBm-5@3o6H5Cym!QvS#&^NljJ+ zX+oSE@2C6S`*mPh;C%>UF_@`K$wCKgLPK2gZL&D?fsM?k{N1cBFw()F0 zq(%KwhU_bt!zvdO$6~RF6qb1&KC893v=|n7*rp#h_)3;(4Hu2g^u{+XRwrXNGN z>;2qcP|0>2yh8AKs^*(X3Y_e${i!XS>E%?m-TE6SDtA%>&}Yyh_J9HsJvf}|6RTMRh6 z^S4NiwV-a6(k*=g`PH)pyHaGg722yfFN+E5pt{gfwz^>e)~q&kSI-B6=qvW62Wy9? z8VD~nk4{*ib&o8%TJE;+he^X){7goC{24tUU1;?j29N^)2;9UdqG}VF>4oN53ceCm z0Z?s-kgJ0W~?r;elkzzF1)3wQKt?GNM_6%3Og} z^W9f)%4R8*S#v@k%NgNhom)<(fu3S29QS&o9u*Mn_mgVlTAlMQ$e zALcdCS8md9oH2b>wO0Nrq#733m9^oxw$%)?va|I#yAV>tZaoT733$DSP1_3WcE24N zIjZie+hR|<Ph$l($NTOJXO0^X%#vQ<8(^U7@vB z4Q+a+@_@m9h|YnxAk#zyyT)H^rq)?f*tT}u5Ez*{^KvI+KjK}5{dHMi`gVuy$eh9E zRLM|y8T#vgQDu>E&7YZPuPZPW*@g={DCUJ$@m;fUHvY75Y9!mLQnb!e!WYzLgK0bh zN9KvLAD_3l%;VEY=+FFVhfWt0nGMlMm^hoRD2hFw9=4FKYsc(OAo(&~V47d{FP&K; zD1A+B(+pa#TOB>i*xi1A(Q}(-nZRNc9y@w*HNAK-=`nVzlq+aVj%Nwm`J<2K-dkqb zVn(RN>oC*e6K3J4aAsLYwUnG(wQwvb9-fQ#v(Vwo-_lVtoryf{IA{Q`S;q33A>|Ze z94?Ep9^6}}x2bFotbjb397Y?Il~+>*hlco}kD*^&pB3+7w>)cZDEZY(-kVj4fWO$N3}eAXJU=mjY8FCkc!I;}47J zO*N)jpg`!^?_6uC3fr&GaF`fNsTom4fG@x+`p-^>`A&{0R4dh+PB9B=!S3|j0&l8T zUZCd(%vNr;$(g>xkGyDG&Tirsr&1<1)Rq8oZU_%KrWbzvB-2{|!e@Ur3B&*5m~Hu6RR#4pDG)PH@@TJ(uOY9(keIb=?h>@hP6~ zzFu4zexyv#v!r`znVO+$^>;&9Iksf?sUHabOc zBB%;z;Q-b{3Ohnx*mE^vj3)oF`lAHKRmhOblA<^Ap*J}NCL&_0h$dx*A90j~?C$6y z5C|~&PE9ae%MKZoZLR2dUVeUl6n@ozyfFlIV1_jrPzz6D!F!dVJu5`~`sZ$sL;n`f z7k@Qs-;m>IjZichUdpX|Al^>KzFx%}p-k4o!fk-Lp)fI%FFx8ll1w%X2yoKcTEy<`yGhH51yk5Mbu`DN@;z>Kd` zqpTdV7?gLHp7{J9)}A4cKq1yYigSkRzS!^;zN{nVy1-|1ZxuS6^FQd@XO zC?x+Vu_Q-=&L4&gdhxGt9zoF<>90Y4R_8(cd9|jW550I-Mo4v+$6R;kSL3yk#pFb_0`5JeoKNVyMTWqbYx_VfhL z269E&ctj(e4BUsm3s{XMD0LrdN$Ad$MBF8~%NRmUR@we~05QCmh7x`ORZ&p^Ko1hH4&|Mvcn#!tmFgP?TqUfb8Q)G(5f2d~ve8cC3XWt`` z-{qs&SJh?47f1vIvkhx%D}3s^zyG;=K16#!&3zaw(_h*Nk~%p6#lolZ-=P{PfO!r* zt6=8Ux%0_-_t8FmOZk#Kr}?ZdIN-}k28ljoh@MwI~Lh zB{_#|MaH}=0(~$~y~=m*$GXluo5+AT2X{8c?%X-ll1GX-lO>#!C8e<#{Qeth3cT_e zI#>;N%ck_nKg^S}aPuG9T1EkjC#;!=8KSLmgVTnMc6=znWRNJa*m=bt?kbt_1L28^ z)fb!jrs?@k$W7IpJ(M6QRb}0M8Vh;nugQCq1u2DGN*=?t4#{z5YcHbASB}ilKr+KS zAuJm;!^tJGLxSWHM$#C49YMt*u0wQ{FYVTUD)_O0IOOLnX5MnYnE5vOzT?EZ5AK(b z_W37<9>M>9bRqeinCUVY{*uJ&p1;8*W-`=y$h(HW6PWTeF^4HR8W{U$Bt))D4v;&Z zYP=;?u$+^vQz)xCAlnDSH-(51n14`fvu*ZOF$c%1!MmCx%D*TdP>??MqQqB3*siY{ zJW%T3jMlK5oV}p(@&@=`^ueRy%;3sN6tqM0k+4r|lQW^LfdJj>Mww*7pZ!Qt3_F?g zDiZSsk*lP{Ub-20+Dtt?dlt&JN^_>0j^WQ{hs&%nGy-K^fy>mEwF?VehDw%iQ$8ge zxD7}RJH9Qt99(Vo z3^gzYJ5|OamNEJ&6xn4j5c4HyECt-GgSF2m_sW3ORH2b_9>qZM|1(uHy|t>1ofUa1 zQgKtj2x-6s`7HFxMu(lL-v2wf9qmd}o(Uu*`!oWKTH9N=#qzbf6_GiSx!gRPW{y6m2f{i;Q|^_f(lP~=Ip1B zN2n=wDk+A>?Xs&=vDe#$?z5`{tGXVQdYR*6nHZHJTHaSbMSMO_9deg+0o(Ng@N(U1 znq6vI2KCO_N}2ku-MigR-sR!HOgivVFq3|QHclqm(WGn4^Xlem*E~2LO3;V9DHacN z>^B-*a0km$;IAuJ()Q-uXtDBaQANnpuR^#!MkV+T(Eg~J98gr8q^@|lTZBlfp{O&G z?BW}HF;`V>UE^rw*=TU>-H6Y&nz1$mgMpbDYIZ9jnOSLCwACDigEQJi#^BLf@RU7R z$-)Fahde#GT;e|z6Q3PK%ej&-#n&ojIHyJ22YPRvK)?`Kea4!KtC^vj}@QtE=%O66GHJnw`e{*CS>ikv|$!+8@a4DsxqdLq}S)COsE}Ytl|(_ zZT*#-g7EFGHRF>l@cwcym2mE5n5W}1wWm6`)hhNv?@fA$_BIk27E_xnn>xFAByJ#m1NM~=i4!9L)hRg-8<-Z`pDr9yG=%cEnNytc~dus9x!+OruZaZKN zGRL)-w7w~9Oi3N(ZJSHOBhO%XhB)`HY!fsgZG-(oC9h{tI+n&zf-FxKCOl=*G3W+P zPg1#k7fw%>IlX^k*}TNNoCu{@4-5?UbqoL&^(kKI@R1+S(mp$WV%Rl5a8q}Fao_~X zeFyOwXCO^%UEN@DK*Pa3wz;!Pnyj+X6Yav!!3^qE2TGi=ya%umbo7zqJGPz64%uw12R!!BnM|EGM_AL!Ct6=7w zltX&pXwkPSdD?BX#CO=L4v{+I0@&4fA3!^%v+kQtGIeWA#V+!DDw?3y!s_0_QhXGkyHMf0=Sp z5a>g0bFU4(c7E&&3iJEGA?(W#F>ce7Wk$QYyASV30T@>X_OA6h&tGVBXuTO~bTt)v z{0HJ5=%fnVrcB@l!;|3xRb+c0560~XPwk9AX^jjr+2jI9G%Vfvo zDg>$i2rt$-bhGm+qv~Gl?{z>wxY&`u_IV(CWrU%*K*evq_75>q zy$dSodhO@B9qcu{z*Bo7F-cmsYNY>7nQu{R6;R~NquQ}&<3iV;f^x0;~C54Z1JDbIS zfTKa3QZvTE3FJt;hsS0JYYe|!eOt&$z$-Y<7N88jf0oUq@tR@IY5^uzTMVyt{%FM% z>Fxz^BjixOtK&j-<177;NUlDFT;Hb3oq;yH&dBmt9X<~C>$!#F72pG(nM52QYwr#U z8ZUF*xexU1x^C(6r6|{3b9@yG!Z7o^tyB*@E9p&<{}Pa*X2M3F4f9Or#{NM&dKf&U znwJ^JI_Ja2$oa6+`-Y?3`(_3Qel%yDt3n=ilhEH4;M9Cs;g=M{Fh8q#$o1Q}Lw5XL zBjQa08-J1T5OokPP~(NcTG}B7EpA?_AD@w4o$h+5{IE-{EVH-8gjr(KvB*i#Rcq?% zb|#DKo@*}e{n4us$DrpVn3)YJYfaV_WF;{ zD5dQ2Wb)w=;o+y-0<0So^z@*oz2dsWfnwR3$O+GL_i-|>mGXJrG-}FzjwEU3U+QKI znh17aDXkIXdvaOGhXR_-bIL8y#m;?jU)?weh7IMSiB9_Mf%5=hI>`!hUB=-P8{;6Z;-znmJ zp^Gw;K#BJ%5NEuzikkfWZ3*7O2*HN1hbiF!_2|?Rz4|(QV^1FvvorP(QSRvcDe6Pf zAVe`2zM_|Uk%`COM=(|s47qHcQLejW0jnENfJIL=eWEcpz8s!pI58qgSIZT3gBvAc zEmBb?n=Iu-#=mLI&9(EYr7dB*C^hD_bVGvp)Mez+4dg|UvYMm~?1p^jv{c_{CRL;% zwso9-M28|S^PKU+xJ8GR^$CXB3Rfkti3+RM<{FgMdyvDwZl7T2W@fFm*9-Nz`8*DH z{3nds2>$`)B{FW70bo)P-Qc385gRw-q38w!s=lk5e_#yCXc5FnF49eXdxerGX7Ug`m)4deGh;2)JZDwtr?s`7O(E$nFEPiq}vsavBZ8 z*3O9%hG655RB(n9Eb z>y2@GYa#F)@i%Ix!V|ce>c=gRY9zBk^+g-3EaXMcjdX~Rm7#MW zkA+wqL8c@%I6)Y4C?cbT^X>(1ns#b19QPI5{EatOo$j76oF0#^nYyVAg0%HzKR;Vb zm$kjLFCsQCX)vy}{HlJSAK_LMlaeZ2+n7(;VNAX_y=wL6Wvsc{Tt9btvbx;dJZaKs zT0hFWD8|d~4PCMVYHBrUNa?QZ3d<;e&F=ON!Lo|so(QsjUcd!Et(rHiME+%9$)k?R z8)*D1jW#z(YARQC@uT=qwvO#}@02l0O4`{nd5Y#+XU4Cb^d^d94hM~pn7es87d<}W z{|`yws^_Z+{v~be{(h<&XL@?(+J_R$Ikgj-z+o@m}qP(b(pBqM9nsg+ycF$^&XC+41l)TezOnQZOBhS*Zrr z4R4b1P1x%RFALXrDDoHO51OoOZU2w1beRuNfbU>x801TgfA)zK=0lFH&xsTCqByba zZ#?KHBiApV-NNMVqYhp2UfRiM;pr!0 zpej8}HzUkB+PeD2O=2a8fVcIKjh=mO{?E|vwd7lkt3NPLJkoXjn7M4j{}OsLI9JC) zjPqndq0qydKAVfn$%|ODZ84xF&`k>nlp2>Zk4pnYi(zIIlcFwSZ&L$rXKVOnadj=Q zbTL;v*?!v&Vs?v#Fgo6@#jN!EZ{%8AlQk7U8H2 z$Wl{HuMfaVT3v8bEkms@1%)MHwrZW7Tg5exMFx{!}0qoXJND> zZbu%XtCU<~DnR3Okc{jmk3sKG;{-@f0?LWYGnj#XF3JF8i(Qe({#SUwbA+s+_OIZ= zy}g-GK3_>*w2hsR@~ovfPkDqYyOi>WO3D4Fh}D2A`xLF!h(G2EVOT_=yaV2v(eEj_ z8#8h9iTtdNmCfM4LqFh%jC-Q5pmlJQuhV?j)kOx(Oqf7SN$gRe&nc>J_z7%@7&&<< z&}kIxI9N4S4P#ew=@w5zv`e#vYQaz-R}xB#g|g7|Ov%RNu?I?I!$Yc~?4MOqeuEvt zRFmFs*N&59gRN!cQ|dUR;=?fZugR*v@eZL87-Fi1zhn5RUC?}#2m30R5g?N|6?I7l zLY6TToFrbXnWM!>arf264DoXfs~3$`KW&t_*w3J`_*KBJ;{p3H&*VTCEPnO7&xX6V zdz$T9Z>8%D1C zv5xCcBfw4u1%lV5<=$SOFX!_>;NEus%9zIq#U(EN`*eN_Tz#SsG@YUB9DJpoa?)4_)#f|eF^E1d_a<&l}I;Lxrf zdbgun4|zlROS`HN=E^K_@-rX_m5 zB4@5bQm&UE1@J+3V`}843ynK0o}>y;d;UT8?$YK}UC4{qF@N_x-8)_0Im)8aSeKN$ zCcVy#9fnAa>r(Y_R2bY_qaSIC3+wKlGoD0LX<1gQ=|H%tZe{DGXNQ-tDnje18!9c7 zoh(W5LODX}e}na?vGW=DZg5U}lrb2eXx+XEaq)nc`3xA#TywK>3+!sOS`({2Tgv_q8KIVld)xdPC-RCbcGdAdIV79Q z#Y$CVsM@g9R!FzfDH>u87YVBxGjlI~y)^(gq%=n}@47T6p#l|3C~;NGju6q709 zR-bAdD3Y)K87$3h+%Y4fi^K}0bh~9L6`7r)A9qJt%NzSIr-(jD!LZCHeMQURy&B;1 z>rfudf*Q6TTZjZyQ6ZNUn9Ww$X>ylGka=~=HN`L9c*9JEyUE1Svacsd`h+hK?FO1A z$*S1Bqe1*+1L6jMsGj$Y+3Ne>SKPqd*_gdy>{bo&sgJcy)~6r6D7TG@oag`?e|$(@ zbd;qR&{Fu6Y~xL+xUKAQVG?&=AUo_Vvr_g@+&)MO0%to`h>swUf>JyXncNC*$Elo- zVBho=aDV-LQw0ba08pSta}aPT7jf0Hkg5XqSxYW?XT8`;F5xK=B^$MM^Mw*`1M1(! z#)XW|Pa7#9)qVOnHxv8dwNWdyw=}tr8CG^mmpNW4t?`0P|K^}$z$({`IZeD5qdWFi z&m4^QU{o6zrj&H&Qto(Tfe0$k+~ML4-~;v_-Ujb}+);2J>5B%!2Y~sg1RryGfgOFC z*ks`3I|}>(gp@{IdlGj8JO~3MBmGJ~apu*n|J32$@BF$d=`n3|`gzuL@9~ysr-A?7 zD}wPW>InlM4Evs$({2hepHS&+L;;;gd-9KBx_YQce?yOj!K7kA@t|U$`k(T)8BsRp zA+ZsxQwf@{fEzeIR%3h*YtpirV9iDh_9{)+*ELg#hp1v986#6bh06!h*aiNYaMXtx{-xbG+%90#cb{(dg zC1{>e5=eL-H1)`x{{B4u?NJYK<`bk}`W9JlYVuLxgn=s9RBe2|EYd%rcf%1M!GZV`m zSdo3u%ucCtBL;9p7$h1(JJg;@s*Ir-QQ8~&NuVO4fB6&oy0?t(u(CWao7nOQ zj=^wRl0q59ncOr#FidLXgmKMDhx;)6&cb65bc08sm5~%Wq9!g2GkD8hX0*)?a)?b> ziMJLg4DfaTKR(L43nhNH^^9t^U>JN{VaL0{{C@~3e(70aho6WT$B-{jKV*cyKy|<5 zL>C4`(p3`0p1IoI?Id2F1Ye}}jbCJxT(Y&8pzgxkRGABm1cGJh9aE<8z7GQ-2W?bq zIyYXCf=B~7mHkMK>Ea$Wd89d0nkHx*3QYMP(5i?eDgBm)oO|&QHpZgpdO9{oa&oMd z&+rOv1m@i@#Tho_L?+s0X_$CfZWnzF0LOnKLXqGU$Q(MW1+Tf8&=14xJ!lDl1;!w?{j zly7#Wtm%0C_}&_|j(dZjwSfw9lcAZ0;#~nDPZJmOwP`yVUu}H%&oVy3vsUA`K8?a; zOwO2h9v17eKiyW^pX~;n{zF1hJhh_F&RA{wI8&^+20p1i46<8GkqyLQYbP;q?Fu+` zHdrs?MK-mFT37%>il4N!m*_VfQJ?u}_Bj{;c*A8$=YmQ+MLQ@&VazF^I)*7kvb>3w zm1P+z>X1nkf-7UE#dZ~agZpW#k{iSo;b8D_WyaWT!?Yr8KQMzMG+(w!UuHIV!6i!A zx~Rajrb8O6`^?@RpT3ZUM``e@e`V~Qb8nP%Y_UHzn~`6@l1B5n-0BOaQPks69DuTdJO>*biW~HPt z^S|F`{`cvp1Meh?zKcE@2UL^{H}dQq2wc|EJ5;*HtYc)t#oFNx1}zbU7Qi)`LQ9l{X)YxRe4<-78%fnyde<4PCA+T0S-a~RQrJJnHmPUEra~D0T>A+j4sT)!EtOZ~ z#N89@qR~DZ#2ouakFqhVQ2>(P&CGv(tkGDqWFZ-0mibaTWc15(y6fpw;%_|4?h~T; z^{J%K)28Q4*YT!f&vE8w>hn{Xb{N~h7N}}a$>v4vJiLi}P(|@n#mZ9TwJvqG5^xHq za3&ZwS~+Ci^jb2*c=TTfJebS8mt*J5s-UtzzZ8_p0W<>0y}C!T2fAmPsl5qQ69q`E z`LW%Uldo0kqe@DXwQjeUr>4q2;#Cy;xuPyXk3na9dS}^syL!>cyX=r}hJ4IRR`3^l z#Y5^&+3R;R4vrAp#JfiP0bfLq!Bh*|xJk;tGYOLpCvQf>yb=N>4Es;y3f+*$rmDm5 zg@+}H4mLeZagWt~FUv`DID+{5fvQAA zqr?Jl#Ep$9w_mmQWXOI;w*TnF$wmFEQn5?D2zx*lTmKaiZL%g%0WkuGfZVg1yddk# z1>J(*cQXR(M>F$+ZE<{=L=aO3seKUsv+VBOOyq!k|J;SPyKMcewO~bPh8SuGRx@fBJC%LT(6xF<(SsuoQXK4OwF6&HUQwW?%Gn4WKVT_v~Kj6#x z_1<`B&E&v(+thfW+bv-i0OF1JU35`OFx)<#J(=5eL8L7#*j%@B$jB0nR+QYx;cvB z1sB3TLpm!>Yl(qdp+k>635(s~`fse_j(|mf&_N z!C9BeIx4ANilVehstlZPc^zn@e1vDhJ30p;A-OoAvmKHEe7*>x!Ph7iVF?AjC0~_6Mu8~ zrM75;MN@Q_+0M$(t0X=YSY2Fv-aQyzT)PT2m%9e?7F=CIcr3OyHa4vZvhF*yZN8Y= zlL;tnsi5~l4LIeKJD7DlSayxWDM)Z(*9%65(%kh$=Vg#h68_D<3>aNklfc56j8Yq7 z)t_K>bK!8rO%(n6YqXE6s&L{DUs>%kRD;sMM?r{!(E71wMabilx>3aU8Oqu9Xm>U@ zutzr1c{LWik3%4S>2W+5<~n%gA5mRUA3sDNYgPz{bOAn3na2b3{A#wW9z4ttc`MlN zmzE5}DQ6KaFn>aPqq>LhPYiEBv}dkNRgdXrI}b0qhI>;P_#+4ztCA9nO*G+ynA9#Z zf_6@vS^pm^9Z}t%$l?6y6J3u4sBd_=jsb|Tz!Dp^K zbxf}u&Uatr?ivRmc}JuIJh7~i-vRHXUH|8ClLr^a)+)cm0hgKv+djreQ57qnfemeT zUg;IBMwPlt+T}lHK$b8h8CWqgXW1udituT~gJ_OdZ~tzCM#V7Yz|t$x-=c9Et9BY% zVsO}AFqhP4e5q|6A>dKlUSHJZTxaWU^_pJ>o^>ny)s(!PTxVviJi6FMcEItP54e$Y zG+5GebdB3)ja6%pdl-Y^WDxyYzts3z_qwOh?w$Va__CrvRnao1)foM6;in0Za&2W| z4s9g^-cfZwqwwvMZ!g&K+WRYeQRrVz9DXmX6f1vF(y7(CxI0_k0=T?drd3Nuifoi& zEjL#PwtKkNxxz9(hW4A56X(+aJf0v1mLNv812O^z>98}CX7+l6;!q9c!XB~yx-*L< z;ugPgjmulUv2~W=lJ#`b`RJef_xr#9Iz0mU<=QNn6&#NzR%M^wJr&|0=*`Xr$LwM8 zm3i3K(3Z*rhmkMFLyIi((&ABa>2f`S#?A?Cx;^WLrwpe#ba3>4uT(iO8PQ&t!YH680F)mboM0 z^Jaw>mnZU;lliOhu`ojgqx&#r8z%6Du6!%kvX5y1Zs2R27vOb8%O8U3lL)RyxXW6oA0$1F z$TJ7&T5l#@FNA^hz*Nf8Iaro5K}T-H(m_5f<*+!eNNgMBXAl^F%m|-xEnDy(8KR1$ z&LC`SNSQvg?lfVf6~9ay@P!9cEZu#Am7n|_xERi@!#P;)9;MP zu{T&wr$Py6E*S)emRN`VJJeG;HRm3 zs?8UD8#1~BfkIXgaf{b}3#d{mpZHVjrE}JcyEw2fR5FH!VI(lK0SQU39YTBVc;g1x z;Rw8;YVTyD&Mtx59P_}#v{zxNx5>$#;Bx$z=>Z^H-mXBlXUw}XTOO0YcVkkqXjiSX z*gkxI*P8?qy4EN?KWiMX<jfy78#a5|;@)4o%`wwkA46=UM)3Y{=6+F99N@O?`P`ga2cYNF7`Hw$^i)O;QM<+) zpE+h1#{~z+kj^PPM+UH`{{&+D9#5LSFMT!(d;^xBsDXrDz8{2ZLi6~-WLEL5F_2nZ`!s=+* zNUnt9t@cjMoIq-2dM?liG>{8=aSvnzAE+?bZx>X5IFIweahE6`#NZN-5uGoz&3WOZ z-~k>B^sVA5%vHj{1A_I5doqPr5Ce7dqeRY+mC%1COsKlvyf)XR!h%iE>j&$6p_7*6 zh9ynLUO_A1rUkW(!tQTtjLH1?07>O%iH;2_aXO+1Pt9Sbnawz9Gt2R*NB-in#*`;^OO`aN zLl#h3+1ZU7Vd(MFI<~%uV1sAl)P^5`S7})?BelJ-xFzL)?<7Axf?TH1^7r?iNV&%O zX~hAhVez=s2r)ShudTYCS_!f_&XkP2Ss!kihUHYY4MZ9V2d11p2+q6Q#M-TQ&P`=4~U=y>ttOzj2(mJ=s4-H#Ra}K-`-cqnXoU>!a=I35p zBgB3%Bw;K3?^QaYn8meVN1YE^azN9-bn{7Jn@PIhpPisdVznSmFXBqH3@z+aQL%g_%mVjI4ac8o4w|;>7(QD>pW!yWW;wV9cWGW_WIIzo?m!(ATLmT=% zS!o@q?!e=j8iat^US438{W?30lO3RCDNO|rt(~Zja-$Ty?0^dS&x2KKxe4$@*wi=Q zb7inPIyzbY>nxaNS;u`+IpJpz&v}<}1O;u^t@HVMhGa&1iK`m+K3m+a2wp-)MFLaH zHergY>(E{WH_d7q4FHxPJMEz57PVK|f<5KMn%Bi*zb-JU)OF-WhCA!E#dJtF z69+|Trq~S5s}OqnlPq$R8j*7?dO{V$=rX zSPo;|leDTUgxuPJX()c()2=LUjHltd3Fyh_#b8s5^af4WcBprwX=itV*FJ<^wlBR( z@sVYLM;DR8+tTUxim}8ez?weW_b-g(>?Y7pIWO5{+vLb28RsI!d0kw4{zO4UNS=5( zrkXC)#CVzD*4Bm&sFK{`P1@K>yGw~~o1UbCx8ZK^Bz3xO7LmX{z2_B&kCL@F))jAI_FQVc{l{sG#ii-X~k^%Lm*QgcV&IUw`rPs2+glzpdO}Ykc-!g`Jyf`o z9ww&85mI2@C<6i?>e^zhjjiHZF|dB4R=-PN*I8gHN3aRXq&-C?5IjW>`=IZ3+BhV> z;cwQxE0`^&(x6xWYCpQ%FR^|MxBafx1@@Y*v&4c{X8zxBdrcSuS-iUaW;h(4kNJXk zD9gB?_?~_a63%vBqStR;gU=n9?4OPsKaF0Z&`?EmiOg|2h3AgnU^qI6EsGko4^F7gi!Ib+?l{b-9<4BM%!~1!GD+S$=5y(3QxY z+V4I=Qcr{ycKs|ZkVoKBhI7u)dnt3Z!RhB%{~Lox8G zvHA6ZXW3pcrHXUm-#)AMmB#X6nAh&NW!Kc!?kO{}N)|!Vq6S7cz=mW>Yu4i=HJ9i! zQS_)ciB*PW1KnuV;k?h0{YR&U^zwx0669?Ve(f=g6@fpecBzsa`ydHOr4g!C zJztnfit-ZD8O(qfjah1Qo^$aa?iCULB4z8K+1S3|zW}8`TEElPINEG;+e{|%#;%s& z(qr3Gb7TO2w&kj^U}W8isY_3+tDH24yPN;5xjSrDZfIYyJ+4OxgS4tp@<>)-T!gGh zIp?L=(!_jJ8>s*Tot+0%b6}Z)Tr1d2<^CT;N1>Z5ZkVtuh6&R69xG~ti zvPCq_F5B&Hk1ZFcuH3ry%IT|v0i8j=XA{}Zhlon=IunmbYu7ie7t$CVRer=j3+ZxI zo<%Dw&!7J-Gmo5=m9rdK5@uW?pAr87v0)^Pc_SB>sc|896J=)TLeJ00KuBjF&p0+C zN(>@IEQb*ya^juTE>cGu%5!#hp6%18G3~2&O*$nSGm(L4=75!hpOZ;8l?sBu2r@g- z=HiSxqxVis08-`e{%+>Xwtt(ve%l#r?*_!h5zz~gVIU23SVfB_?vk1o^`B#~vH%UDRdU>fdMK_TR;wiD><3_m-Z~#z;rU z2=)WT0FnNYXl@P@!^2`ZVk^uU*b3wPVQDAC_7uJc3Pi0?r`>;QFbC-ZmhHU&8c-9} zOYPrE<^eOC8mV2G?muK^HY;Z#sEy2<#4`u0oXl1^kp)y_ncvSs^uHLU2rh^vBK<50 zoyp#*yl^7t;Lg`x%L6A`|0eCv4juzdqB%-T?A)z`^k)g?mV5~OU}#47ESpm9vj+pi z8KrGXg>Xh3O_Yf-QlpRP(F3kvE{E8FtlYrL5tD?!XE_eod|FDzAI=56fK}2lYjfUs-x^S! z#weoJa`fbHsl3or?1`RZmGqnw^n?@iIQYdl4f}PD|&I|GBK)rqC zx*0WQY4u@Ur1H*}GayUv(6&N?^d8dIu<&SFo5?E!HEq?Hn9~j()S|4*MiXY5mZ1kk z4B$f@v`j3~crg>7p*ha%VSc?#Wtd1&u=xCy|+O+X&fO&;a+8}<-7wr)Du@g_e9&)N2yu0J<)@aDa?Do-G4 zZB33=CtV{$fY~8z5cCl?2!(b}Mw)JY8MZs~?@4P(n;~EPtfO6n z-?gg9?z7tyF}rv&QdJeH*=eim|H$CIpWC)+(>C#o9phK5?y%X*w^zkNv8qSam>8>Y zAM|ggO=4lZN4^jR$p5g~SeZv24r~dZ>m__5l#mfFQJtWh6<0;v!=6=;XMin6^w}T+ zYqmdv_%nbL0|HG#ECY-mgzWr0$XkwNC@qX&(6p#PetS59D`Wd)8|7J>f?xnA|57&H z{dA1|f(;6l)Ng~QZpYiGZ`{7U%Sk>c1emjGp3t|Fga}5kz2*c z*N@zK1lHe+Q#)RHWnP;b0c99jFBy&y4>=>?-y|q@n?(-{&9=)PR>@<0yY9IH$P z+#JMM=^kMXtTG3vy6hKts`!v9Hz9XfLC1(BQF0wbuShhDVk6Ivz;F)TJwxJOhkCn* zq`yi-ik8c-+eSu5N5nmY!^4AW%%rr3>G_8-mu>~vORQPGg&Mi5*7G5AhE)MFxD<4L zkabxFnMiu>dXpn(F42fY!;VR&tgBPwI#>1_Hrj5#g6irWk-Cvingq9Lu=r*L_xPL= z^VGohGMZSc>T--_0A&bnJC!RQS?!6zWX|W53K<&>wd&!r3s(`MLj#^4R zm8n)JGcPuo*>ULs2GMVcpi_v%o%EYG->gu2^UX8cwsC5=O$!`92Xfpgd<<;gndfpE z@QfyPoJNmR?|J}9TZBka3wa8~jG7qkkR}+k&(ZBonWC^baeG5VvsH{bVyr1fHz;lY zRJWwn*t+Cn&z`!szs1DK@L@;j7ID25Vr-vH&S|Dt@- z#pIYrxF(J6%DYZ5jU*Q|5dOND9qn5wwvu{07+Cfxo>Ec7fDF8C9&8(n)|>&jL_Fwr zy>D#Ms`WDs%?<5DeEn;nolE1wsR4kcbuJK6X6{Q* z>9;8%*I+rdF-1f-M}w~!&QkQcon0L)g)vzZ+IdslL|B1k1P)(r(VP9YjhAeg@rO4C zDrRQ>wbXB#ShZ!#cODMc81J~l5O4vK6vm3;3t_Cn8T*tD#qW*gYFI(Wj$++6elVM=4G+9-j(|H9U+ar`iUbr9P?`%lB z4f9w*o{0xwQe@_tU=WpX_AnK|>8E|@>3V=!J$(p^3bP#OK3h7mm5rZa#6gr(C4)jz zO7JuT^mm%>EHmwuHoK=f{go=eXJ!g4RhrME{i~oqSUyw9z+zo5=bzz}GsC~@FZelc z>}er0hV>9DXYqf@^RL6th_M16YKg}7>n*4ApO&vFXfMrkZlBX$YDW}S^ccdh?n|Z6 zeV5czk3J-z+sqF&u}5C@B_> z4OgjW&Uj!baM+nm6~}WHAY+d`HoHZ==EuQYC-jPmEzgmPRzPqoT7Us3_X!~@Oe>8DqGs@F( zYKL^RuNn!zHc4m5-s~cn&oQA^HNKi7?Q}o!v8^Ik8i=PV&il=&I6ym`>8Qy(brKDY zjQUu@`JKKcsw1isH{X0hb%eE~?~GVqq&lN|>KkAG2h{nrb07fzajF|H%ZBQ*)(4fkZ ziZOcr9pmTUF|Pc_`1yB?R2IYY?+9Y@?1gcAV0g6+jkX+aQA790M+LeU9o`Mlyn^7p z&-^cr2V(bdoEqsk{fY8mjEo0h`dYwGt%RL6UJ#})$a1L^!t~LVubF9>Enq?AF`^d4 z_<;%aU-~vz6a@KQy7ic`C|Oe&<+pBs>0L^^PsGI3sql5TEBHDkRt}c`U!}Y+qx`T^ z-pR@lkB7gfvFmzMw2ym8ACF4_Q<}?b<17%zWdR$fk)7n~DM#zkyeVaxH_^}y=PumV zNsqksY`ZF;Yc0lKCBx&TfL$-=UoYohFL|_=BX^;c+eYaDu^Qh;bho*1qkZGVtQz09 z{+PbL{>Q1GWaIk)^22Nlt0e%8Kb6-$dLa>n^V&z%Alq#C?Th5*7Yu$&qvo8CowP({ zCv~v?0Hr4f=Fu5_q3POdn|6c`%}iRWe7?%k{+{9b&a%}NJH+PHOQdhke&Q2i)x1zs zZO|L4Vqcz)EqeJA&_Cp#v`GTTpReV$l@`GCoQJKnB->UBJ>@}52J0LT=^PL5Hnq*P zB!|s3jibql{hXZ@4a=^$b$3Z6^g?gfvWBj*VE9 zY5v^&dl;h@Y^Squ;kMIYhV4`nf>)#bs*R}*1sr5%VNK{F7Q$glt8|+pxA7B|$2lAnp z$s=ElY@(A3S*Z!SF}+?)}I;x5k>A zV@|P!LSu;h&R_t6V++S|FL0lBFQ93?W)pJ$_E!~BGs^}GJYYr-E{^r)c36(wvPb9! z!5*zVc9&T3rDJyioc*j=wfEdNU~S76vzPQfkGT)pe1t~b=eAfIQyW6rw%FWyi48{v zoEfQb^Tv5F%VHK)Hv>+f4z({wSRz0gb031Z-ZnFH+tS|DfA{u^rrxDXdo4VA>(dWC z^z@!fF1A=MzC;|5FTUiGi!(9a()|L>oic^Ev?%EKo-99;1*4hT6=ar2l~ssdqukD# zsn;t?KN?!asY!KqlR!ErievrwF+=&OOvU=&)+d@_1*>D^SP@c;%Kb)8PC>loDK zw=3n{8Rch`@}7+HN0ss+R*vz@P#5dM{$ig=%lCn+bl~ZMWTc+r0SXSMIv&E2+`lfBUz)Y5vP8X)F35 zeET#e-pyg_)a=^|=yRF2PAl%$&&SqTqS!id>2>m@*U6V&CtrGf=_U)~wbfl24389v?_KHeEV-Wfj9 z89vq-Y9p!w`lb>|@9wR;Vsh5(s|y8N-9$Fc7ndBl89?6=0DW(uNOi8a`cS{W8|HYX zey>@ZH3ks1uYD2uDb+QKXxoc;+i8Rhy2%IpPVrfKwGeHz96(&kHp|U8-Fp@t3)EKC zB#oN~TdH@-%1~9i!x6R4&g%WuJN`p=^rJuh+0a1I=T1r{17OYe5qUAyQPJL3`CwHn&YKr~priG-9#|70MYtVYz0h$Htc^>Xi*Q z98LtLfMLr{H_B*=i)5KqMc3ce*1PNMH=Ypx=7iqa5!|(DZ2VKp#m4@YB2(&nM7N%P z$ZWafs?=ZlSMl|>2QjwC!90?AmZ2Fl4YF2kyh}su^>d&aNo+4rOdDCu8(FO2y=m-4 zhn+exwyPnK(RSX*Po`UPBW;?}h@ZbsTLpGUhEMlEE=|WmzHpj$r1c}9Nnn&4z~Bge zKx@bg{D#orXrul>M$#sOa5s59fZwDc>}fk=v|TYkj?QWvNu<+?CrND*ZH-^g)FH%A zqi*yGaYX!~^bBEcFWHKqZp3#z{Ls#~V1)E?m{u0S@-B~)PSd9v*uvq9143R0YAdr9 zDw>fJQ6&irC4L1KRIzdOiWRHj74di?@UmMhcJdNOc5mCdd-v9DyT|Kd%VKr-wyCt( zTwIE8IF~*zE)$ytFJ7bB#SAE?d3D zU+t0HWx?`LtaG5%Qx~l$X$E@lB)sqxp|?r+6c{EmbL?u6@GN;{ITVzls&2t^IU{rG zPJ70IQRln##)!w_pq@i;SQ$9|MRM~C@_GvkAS9;_*D--5Veq3R6rwGQ`Js?e>O>)? zjecUF!te8V8myh3!M5R5P2uY1^`jN>sIPd$QWFk@qM<6O-euF-oo;7aFw)T!t=H=e zjn#{KEDoPF;SPDqWw!&Mk-}YM9DjiPR4QCb)hHWd3U!R{d^@FbnD}t|rc?0cCD>Oo zxzaLl+mV<+(%EPr9+4=ENqV*=M~w-w_Zy#?TC`}RVPaBTwq*G)elxN}bbJi?>^?GX z&*HhrVTc=_d3LAqPhkUz;})?|o`*l<3T+^cGBCjOAsqw!IPqs?42N66zIxOB@^OTA1l(Et76`!O2?YzJtJj9B~>Bv&+iTy$E|;nuGk-l+Cq(i?&S@^ zgsY;$t8@G9rs9`Kuia1Dc18k|R24*!Pv=YX;W58d&-=R%0v8A{S zbKWcK6WQS;N@XKYAV3x>b>ye`($(JDgztEDZ$rRfD6WaM4zC^S3i*8%A#r5QNNjO; zv)$X+*yNR*(f;=CWg~4xKJt&e;aGi<@Dhxb3F3D_I-p|JSmjHlgJT8pDkWSObc8$n zVC*AOO@3qcs+D5!dh*!%k&mxP{R`o$Pa{7(gDZNw@Kxxw4k1aGVz#E&E|@C@>ny?0 zp>#EmU_V|5%ejYe$XlTF9>6K?qceLaxw(?OX5f4mfRA^Rn+HH0CX5uvS#2br;>bP@ zc1K5~mdLtU&OR59W`c+GOe!mZ2!GPZ%2#O

    tOoZDe8ce%>V4mauJPm<339_c?PnNgFV zlJTgn!3^J^e5tsQ?;t;vgZ5W3@Ew=|gV>WL3aT99`@1OLft@c(bm$n(l8wzb@@8Ty zzlU5mzQxRVZgkn|`~4Lme|Lq{M3^paYhAxca#jrxmRqw2`K^ue9Fg(!e-(cy9=#wL z@5^cb(8_p$S@_a2UjIR5ykS8R4>F#-JtrP0DC2Xt;X-75LL=kT5}wF+^7b_8*Fy5$ zmvakpzVf|JykPlGqEfq<}zBDD!NjC(-~0(k}IFy^N+-OsjwI<^HY$Di5B)sKsE2=qC+V_Hb0JKrrN z(Z%XK5*=JPg!@U`&XBe(EYoxMc_EqZVK(!$OxJ%PnQmA}q=QT+Z!lJcWO}ZaWXtq~ zBTJ?;i4JS{r;#6yUXV=BT_+5D9DA!4<|UO;wM?eha+zMsWqK`<>9s_r*K(O&OJsU2 z%Jc}6>9q*(1({yU{G6$Cokfub3(NH6LNcAoavziJhTI~3I4#FNmzM6pbDtz*^gYsl z=O@#B*)rX>kWA-Dg|tks{va~_je;Vb@WTV*r1G{qgT@+!+ z+aO^ZWO7yKN+}7Zx09x9p5B<8>DhwvBl-y zx3j}u7nSQ+(;Sd*XD8ht*G)*3mt2E%dCidOa$2f8Bj213d%WeIplQTV9&QTt4q5%x zuHLWA3*xsg@>aQJyTkAD*~1=dcfB(juCO)^AkAMO{O~ZLxgPN4LhhLAxyRp#wAsk_ zMh3QpwL_;wMb{x3%*HZ1id+~d*qZ!N(QA_fwr+DY*j5<`g@b;t%N;9ib#=yj$LfNW z_2c4Xy)Rbot2LH8yPY;uX}Pn!(O=P87mn$5`kH7{2h7_qkg+(G-h0c6j0RYFI^Lxw zVo;hp=m{*#wTd_*9h&${Sw3g|9M|{QLt*c&i0Eh^+1`qMpS@*QQ_~9lcJZOn=U(j` zy!ylm^3<=8K0k$+k}ILlGchHa-GR2x{p5RNT0VoL4S1nY0s)v_ip#1UKw=8=!Nn(J z4w=k0;xGnceMg_Q%UtCP_yg|ppnkOE^{)_#9j$I2TuM6LA9MObEKGhz zRW*CA9NkcCFxA#H_Kf%Xz1{#3yW-@s-snJky|sB&T(nnpwzLg*`(4Fuzu#T#^21#C zCeB|F#RC+Ura;x#qHB0qHtiNblvdGNa|g>20uvp>Jeq1 zedPV^v$5A|*~(jjFipD0F0^N+`*Pg}*fS3-sju)?4fgg9R{1OHmkfA(5T@Ga@%l+O z`~Bi%vc9s$q%+loVu?g7gtwLT$>&^UCcDdJHy+NvOHbCAg(W1RC8MAR|T)iX<|B1$H+xzDEgW`J^%$p?Ca*DGF@<{QRH z-$c$q9-^|9=vt498!kw;dh+a-vt_Hmj^N(UemRe9H7qDvL8g(nhsD(eWveH9^|W26 zY)xuq>i`$6l!kATek~+dJ=t>Ab|G?gK)i6dI+kCof?OrM@GzmdkX+3@_7^5s^&g&G zH7qDrVZM^aokD&nC|B8PnTs=EEOa^Ls!k(UaYKC;{BiQg)j>t9f?Oqi4zv}LtA+a9 zuLPC9V7aRQAad1^PppDmWo=qmuI6qTm#Z$ezs)6A^&d#C8VZY5kgGUjlbG!v%>{Hb=ir z`r`a#YOY?$l&J!X3VaXG|KS4PXV(8?&G$db{e7PH z|73yZzq|nbFE3EP_WS2@f1iiGSKxa%cf@$vFOvl&2}(Q7n91X-s!Zp(BOA%Er%zUh ziKN3VMoW(j^%#dgcV+7BJH%f{o_^{n@s8NJbySwS=HHcur8A)Xr2R7?xu%@-i*DIr zG>fjexm~9o-G%S1VzXG^m--*k*q3@K^-7;;O#Kf&$08h+dZi&kw;P~zRB{SW64C+j ztZN|jxUHf)8F5Bp#tP9GduPewW^scUPW^I&*t~ejuce_Si(@*<C9GpYWMWNFUe< zZKXmSZ1e1y`Oeh`d}gve;Z71N2?NI36ZT&tRX`>@ViVQ?52Q6=Pu{ZElNKZ=DjiFx$XS9(sF!m>ITu7@!Y>o-JO1JiLjQ?yOln-I2XOJ zzLC)cKT|sMN+`J>P5qudwj=woNbg!gFMF;&7rp$s?fkhnP;!4Sel+8`s%{Wy9Qz(V zj`{-pQj|5x(P@EA^NpF~_%^M0r&P=*`U3T+`6XRXgw>PCsz>2VtVSwM?c~)E`DB@Y zOj;r9hP2qS=MCtgw~soA^7ObIDg>h};H93=CySy&5)v2meA z#e*6=Y({7;+y~0ej@s9N)ZLTuX2Xw9;kv59Il3pi^!66Dd z!FA($7%xv6$6>@9j@XcCuuGQPU^U)AoD~p$1|{<*wGEaC28QTxkUNOeTEc8Ft~foe zP$k-OxzWUe(N&Lns>)ndcDv8kY;Uhoe~>nonu;x!k{(}Fxxu#FC~Xp-lYU8-+(W0) zsmuF8+G0}LVq$HvC~Yajwv>|+>e*keYKyul*pj`@3R_vo=Jzz)TB53ZX;X=**zE4~ zRq-on!>6Sy#Al`F$WZjac<2x?BA}-v^^!z7-{Lnj{ALZ@9Do^SU|E(vE!L-Ak)GQ| z$N6W{YVo-AThjZBz;=@M<%8$G$lL!>enT@?fFL!}!-Ncjb5nJ76M0FiYy2L64ZeY0 z;uz`wpA%jxp~xpQLq5?sH8k6KMplfwRh10sA#e|w`bHXhZH9%J1;b7$i{t~jxE&d! zBr-mIzb+iEBd@QrtF)=op&GE835K;)^!*T@UT;7u}>zav8|hej9!Wo)q~Z3VP$();9j zV%N%vX=$MP$qvA{k>^bF=VbH9YR(aHeq+TO5PWXq=cgxDO26%RvKr_)EDefp$b_rG zhAlIvet~5suwrHce2$KHfsVIahYjYrfMK?vqDB6keBHw<9%mI3sTwm9UHX#a!&hJ3 ze_XbFw)N1V*3Sx*hNXmta`1P^paJ_Nt1A?ifq*vjz(Jr@j`Awo@J1O6H)--8X|2*y zRx8vwOzJEq>qHNX16wEZ)~R6G-eUY}!8s-(0L={n`eBWBm&#&eV_$>6XxwRWltWs) za)-q^UgU4+mxjYF-M*?wS)jbQxI9o6sq%HVgt7gHq#^Owgchq%M}t?{^V0(QGp!kf zH7=E)v5*OQRik@vrO97X;x|?9bvH^w)1B3q$vT~US#{?$wd+fb;wiZsLd`=IDuG4c zgQ$ZGaUl9Y2|UCx?SJ1{3QDBQVL5~8Y9a}e$w`fbVq-8-?Q~Wrg8b_9xBL83V|i6; zD7dJqoL@%*o^Tty$UvTyn#CvN7swrC3EU56N$#HuY$q`GLv&as&+g|pcjf0|8{8at zTnL^8GX^&0EVW2`XmiNm%95DtW2J3Bk&y7-aBsV~_p8vXvp z3Om0Jwv@Jz|8dOUkjBJYWhZG@G-Dl*?edfb$H-DhsKbiGvM2)k>${MI6;JK`-XDlfHKTAV)Ezs-+J8->S-tTM{F z6l8XXvT5lG;XWdJjj};0`OV*7`^=F zMX`{_KiqtcY_IeC`$LiN1f}H(X-wEg_}VC!ary*Natfa$ylRvk^ZOWmpAb(8`=zT$ zE#WLQBwBLivs4NjA+8CAk4>~E_E)vvd~jN_HZ-p~2zC9 zM@0Xq5LmQ&0o4d-hVfmMNOflD4Rw)79eI5(;&pi=_(q@irPO~5OXq)xVhNbBrPWC0 z;@F)Rt?7CZXlN%igal}>lhGi}4@up^cja#oc6AH=nsr=RVwIKk?My8KR#i|k%Sqeq zOu(>pzEp-)zj2^BSTy1cG=#$q0q00juz4^rHZdu6`|I1hv0zP*{2%kS)%!nHNtBKc zjUIO{dSoJ9bI|kn+5Yfl7rUCTEf^B;m{rxy>}6Mn}m;?hh~_EUI;`9s%-gLDnBE1q%r18&K4x| z?v8S_r-E#e+)>yfHPZc(@B$md@eto@3SA}9Q`OSK;&(cJH~cqQ7B)VcrtohPVywuhe@#-X)6}C za_JD)2n9t+faoX7&lgnx1+)=T1Y1m}+H%LG=?m-S}da1EOZ>oNfE) z^=-59y54$!(LlMoF&J!gmk$*A>wBeN9q3%YzVm?FQ{CjM@rT{+u)oIDRP6!!QvV_) z3C~@4f5|!fs~ER8x%vz2z)9L)sed60=&Q8Da4h3Whtctq<zg}{yToW`s9 zjg{ZL&9WSqoduIf86FE^{Z?K-+<>*C3{3*pK@&Y`*-vBUmLfhny`F-xl2F=kd{J%G zppiBd$X3b=XI3w-?li^cf@u<(94Li3# znSU;jvHc9uJ;TC{0$A>_3qzc$5MS3rye~u8m*KO327X*{D3q39SbxL2nHDgxb~%*P zOke|q#Rr@WLd>!mgc?_n0$>4Iy~0P;Yd`Dm@w`j?=WpsgcC7m*`7_#u{lQy0_U!4n z1#Nv~w){%IowWC$%GSr^*}TREq{y@H@|y>Q|DR2LvXIl%N7^zYi{g*uj}xutT9vJj zwdK2-w#;i9^#H%wscIDFC|khhSH|-56twwG=CS$7VlJDX>=B=p50fFO)7o=zKS)!; z%1o8bu<*%;w^94v|A@!sdr6nqY3+MTqkk!!k6G`3)Mh=f>yf_xIpLcH?0WgSHiv=l zgW2_Dv5;Nww~|vngF5@Fe0Dv(V*X`m9vHn7{04apI(-mta@qEF?3I3-VcYwy_#OHG zLWT#eZ7;h5b^U7l-fy{m@3-PKe^xd>oaFX_33{W);IMIwl9#se$@eNYzTb&&$fw9; z-IZ)+`d!T1>Tl)T(QSY0i-bcwgF`%{eDdT-oJ1zY8Vn%QV>7Al`C;1! zf4q==aQz6n@}NzQCQGo^JLm$1N+)HgzxG$p>3e$ z{CMzvS|{GfX`T2$G+eMnLPGVXf4Hiy=1tZo24KAbsqV$>} zwEo%54tHeJbwn{X9LYhRZW%W>q~6UYGEH9>(c7AYD+B;6BYi$jpO4e$BlOX*8dZco zI7kO7Yv(BIQmu_EIOnIeIr3_4#_Trc*G{j@(PsJET2=K0P2*wj@JHb>-Db9T8052B zkx>(TdZ#Dxhi#XTk1c4I_@i)2w}sgy2Dx40B;Vsr&>Nu9h}H_}|B-D%zWY3E6Ysk} zKP>x%uDYOo;&rha=_@9*u+=ZeTBltT^Vul!@40i^C~&y5Y!q(@f3F)y8^vZ#kG)u+ z>+96f?6to zCL0@#6SrTarQ7*gGUy)eLaZ0M<=NH?K3|I2d>Ol-`SRf!GIYgx3>my{$#{>T zE#q}=%b4Jni<9&Qs0^V!gLSgno`IQoO2F|9l6e3y#!J?R5@8K_?F8&B^$J@I}%`PYKUS-qXUJ!aoVmW6SO&t-5oL#s-Fa8@oE$)yu8{c8#)Y z6T1$u>nOX9vFl29UB#~J*mVoLZe!O8cHP6Sd)f7JcHPgeN7?l{cD;#RKgO=Nv+LdL z`YCq3pIyJeu3ur-N7?ls=7iJ24@mFG4WPW=FZ=-gt!&C&g>n~G?rM~~m~uC&+>I%B zE0w!d%H2BUZVS63L=r+So}<@G2#ZZAzqw4gyG6OXRk^!exw|vR-QCLFJ<8p^%H1={ z-H+H^OKUY`C2+&S6afdT0nu10LN<9Pc^L^?CrU+Nb%++C@&;BpV;SW3ggcQ;IIG#! zn8ZJJ8{26LPCdKE62>TA$;-&D{60y(gDl*w$%GZq2JVDWR@D{5V|EIdWz!9H!5Eow z?NlptHjCm*=BcGeyyIdg(89I7-RuVs^dyuyK*` zaO$V~3@Z%#ZZ&NC`8LC?+ppN3`YCz)?ncA-wodr>bA0#o8(X}F7NfDnFj8gk7%YZr zr_JVsS941=+5)fNl&x8`XiXWu-&nuY>s;2|9zQ(t0jxvW_Ln(6FC_ydJ zrmn6g^4eP!4Ae9=)dYf7g7E(V=;}AS00031000620241CyI&7H^#BP2=l}o!0M=sd zSO5S30M=sPP5#^d_yT|gcK`qY2>=2B000000C=2ZU}RumVf^=&fq^OF-|c_5nHm^? zBFKO_1OT6^2G{@q0C=3O*GXtoK^O<{*AbIao6y#xU2N@=RJ%k?wToJ-H8fq)#%d`P zic&;xp6UjIMLl{@i{gghf>jTq7paIR5f6d~1y6eQ=t&R+m-zq4f6SL>sA)~%$IN?~ z_h#mMGv9kP<4%o(KQsPECxoCEBCK~n547=E${6g22^fZc2=e>}ULWFdJ2XNR8o&>A zP{r$fFaoVG0Hf5%F~lLv-wyEpaz4Q8Y*`QG&<({tHpkf{i?uxi+!y zV$NZPx)>uV-*666KA|sg79PTDjycZXdU*ah>(b9Vk~TZxDeQ)qun#6FP9E@b3|}eA z@yofRobPEk$zvJg4q65^oLjdNQ+4~>n2XHw2CwgCY^!ZBfsNAk{O1aC3NzcFkqr@eD9(u3Ho>*g&d%25aEj6j!C;MX@rcH7^NG@5<^Fy=; zR`Qx5-d9Td?Xb-x*R{N3?R!XaLXgLP*7us^`%mhdOmdw@8K)^IG|Ba3IA%L{>7&4; ze-A`V>RUo$UB=SSakj8;;bAGUXjbRl?*I?uFwOCene-eRVQhrgr016*lO8WQx1R0t z7?U9B=Myo1=Z6NWV;ncb7G7(gi_OuJlZzg zf8_MUPmamPQSTf3&e!zXls;=sYHis4Y;!X{(gNDu zf?2vl58ysLf_prdZ97j6$hwv9BH06dB;R|ohDudw*NBh5+tNO(LUMSIrEFur`|UH4 zZ^ko>$p<(A7vM8vctknOdESIijPEO$bezN>9D>KN3l72yI0ggCakvQ2@NY;S${Dy0 zb1(~S(Bl&0{cSnyIKMvXbDYPih0cRxSxkQt!}xFK2PN?U0C=1&!hc9wi30%e=C^fT zYt6aNYieHWnwi&ieY>u8FLi0|<;?79`ZV)LbIx_voLA;`W@fFLYt8FgYiicaUd}7$ zn#2(i5hFrINJM;z7$GAfB94%VF-Cmh0jpWc+bR9O{m@fFcMLT-Df#x zy=Olq@sr$18(+x22&0qGX7urw(3h*pxMY2@{~YR^@?7BDeo9e_FXiKT#(CTM#ni&o z;ne5_%mw8I`-Kk}D#n6|Tr9ZgxOj>!z*?|#m#~-gm%^9gX>Dl-Y4NX$zZ%44;qBgFCSh$#;4#(_+q>azlz_$AK*_2SOSSaBghC^LNlSCFhU3uqJ(Y2 z9x;=sCN5=UXNWR-GNv=uNi5PFDN5S^hWt$|Gm;s*g1)kFC7w0>t>)XcY$$s?`}ivU zs{HCaIgbpHz2pcvmP5=b%Nfd9%6Ug2QUHpSVyDbg-sdLevUBTmeYx+c_0)~;IN$B% z!Flh#XM8`DkIoPLkoZHeKvQt=Uu$7j;lhv9ALonkMaWNU#mZ}_YxZlA60l_RXa3Ls z1Ngw-z*`^+Yy*2h95^jaF2$8Dl&+R;l#?=${#A#t!3vadI7X-ErN0-Css6TV=|a zT4poT%Is&3Fn!DrbAh?a++ek_dRT)jFDuBJXRVY2L+_z|=va^-ND-t990Iq%F9-{k1X01Z zU{4SioC;Hgcp+I>D0B<`!mx0u2C9+QG}IVtPHIj?$s(L6OO!7HM4(70Qi^mUljuNn zB1Va^Vv?9DE*5*l0r8x8S-b}GU@5GD4X_2a!^5x_4#M;BiX>kGNctsiiC+?yEJ-#b zJCXy*iBu}pNDWep)GpnV#-*pV$+fuJtlIoqpcbqZ*6!CH%MxTpnN2nzbIK6etZY%X zCfkzj%8u%Ib<#RbouSTBXP4*6OXO^MNd8HYq`)dj3ZufN7*IGBh+OM72*Hy`6QNdV5~O()4Ra?j+xl-U(@ww5eK9%h$r%KCR;}bXR)UdH0~9prN}V z_6z(=NSCb(HIf?1jkZR6<6Ki_6Q$|xy`p>8d;R)Ey-+XHtMxj)Pd}{>>lf}P-S^x_ z?te504Nk-9ukf$S&6s9m^QRVFi>al%#nIw!@wJ3nVn(uY(C9G+jB~AdttGAO)>!MI z$!1zKMNFHfnCb90^>2r5+O`kBgTF7ENoK0qYYw!d+Ed%{?d0~t_O|wQ3t#~){tiqB zu_L=f+oA6;cXV4()?_Qzino%jg;u}{TFutYPOwwhsqEBsu61s8?%KSzU>CV7zYFLB zyM$fJu6TF%gTx2c2cLUvJ-atv}>HTzdHK;f9@T z&$k11mOc0g^T_nb`e?sDufOLp=5hVw=s@v6!xQuq!;@_X(P4Ca{)79+{9xvwa4`O- z?9Y`U^pJWe`jq&z@9F+9aoG0^{jB-f+(_z(c_i|@@cHobRVU9m>s)rOJ7dlxSAq-U z%5)XD%3P4E-eqvvT!SvJYu2^wI(DbKS?)G>$h|d+8qFJpM{T2Xqx+sTPnoC5*}9}yIig5Z!$go+d)41|LS5jo;Sd`J+9B3n}lQBQJ?BbuYsK zRDc^m0!M!}{1u%pnue!)rnhHsGqf4?jDO}skQ&qk-@T%}GQ0}EIu5ZzZ(paq*1cYs zO`KKE2Hv3Fz;909oX+L{A3EdKQvd+~00UG2%K#z(X#fTQ0RR91000UA00IC482|$S z0eGC9RL^f4MHGI!cI|{TQPM)Ch6B?oqDUkg+f<5_N=QjjM8H;lRXvo`*gLVO-XCUn zoUQXGaN@?DBmV?v?%X(VH&UT_XC}e}_COe;fFh z*rNw{DzQ&>dhT$AKADG3a$9t4p(W_ z{}Ma$s`Ge!q^G8knX+4mbWbHYe2x>F$PAP?*>8)3kxEK(yCkfOX=$24GY~B1 zV!0A$Qb)2IDN$%MKw?j>KG*RG@=;o- ztS6CGzBSX&Bbg7dmt-7wv6*v~rK3pV1ZE~=gc>H&%m%9bTp5GUNJ=A`P==31p9W+| zP>UMWq%EL*+NBOg%gVqDfOjauC1xUWeo`0}#xvYQFsHGHU@Lk9)PYKZzn~M$#E9R; zQv~)gp8GTgmQZLRy20183zaFFs!g>v-qL!>E^^FRhL}08t<`!;V!XtHDF7w-1HP8u94#Ul=U2Q z&eFGmlH%xn>`u@DGArK6rE>dQ0g+m36vP;v|jRByO4%6p)xZwGcz+Y zGcz;8SMDd>zOzoQ{=A*t*?9vyd#58I_xJDdv{MC`hWymb>MRQ>vRRQsX_`orsGTNr z0}rDqG?liaY1Bd6(+)J9X3$LPq#bEi#oBC|Lp#$hv@5Mc>(Y9(K5al7(nho~Z9<#U z1lo*3WROW=icpl=C`NHg(B`xSZAn|vSPpQI#?f-LJgq=0(n_>4twO6(BdtxNcsSii zH<3l7=_*={M$+okM8oJ-x{Yq8Tj*hWgzlyL=nA@$E}{mykk+DRYN4%ZTiT8GpfR)! z-A%*kQQDn8r$gy*I*N{@qv;$vmky#-9Z=AC3H5O$c@~@&2%~)%q?^&9YR4mm0NiPkK|E2n#b^19>>e^^1K4C$Sd)6 zTz51Zjux&_l;UUas@4{xS~Y^8MOA|sKg8DB>xoqnwTh}yMGz~sqKejtO=7hotx+tx1mS;Z_n=r=~cO;YpG zefQpPUn^%LtQtK-`W4Brl&StTJ(6cBM~3Vv>Yf1R2zgq*_~AxINWA)n;$zdL=dIrY z9y^#dzL#s6E8W)wpINg)S5mZ77EoKJP(pSjE1yqn6(t-iV@~2&Dv4Ox(oXP#$?OuE zHY1i&y!J(xP!X$6k?Kpue_qYJ9uR`wO^sASwx@P#e%xgYM&eW!8B56*HD1Ggyr1rKc+nC0*Uu9Q}huk;(f^!w@?Q0 z5o7X81aGuK%-(0jWwG8tN-#nj-6J#Jid9D6J)WRLsO}6_k*g8H=n*e63c-&a-XOOl zuAoN+$cYFpmj^EUQlT;~#Js(A#6=eq^q$QDqYL=OXGdJp+f~0qEnG<9n1TrU$q14@SqD4w&%AKoQ*(yrh~#mUJB z5-|hf`!cs)fGRf*Z^Gl?)DU;51CJ2#(YH|3Y;R(jA!

    8wA3$ns4i`zle@wL zXrh$aD$I|hlrP%LM%{-nRY-c+OQw3H0FWMyc|R173Aje|4`pUT94u25kW46r<%=pA z6JEk{L=^`K4X`3nWf?*WEUO4mMW}`q6e)QTP_W!0#SKC`tfWX;fRG4#=?KUpRKie> zO118cO>+U%Dc#oZwb~TaH=sEKYFk$|Z7cGceagu9VROp7 zHQ1uCI#V^Lx=+5W><#$6wWHMJH($2q%K7^NbuEyEHx-T6_veTZA2kTra>uu^pbYb7 zXY$rmX^SoJ^_Fi^LABOYhHvh?oZZNVL20Jgrw4=sq=8X=6~{2fW6oXQd@2dEgv8c( z3uDSZvoI_O7r`1<JuLO?g3QvfCa*4H;|e4n^=5d`yNy0S&v+fS-+$3~0gzu$F)_Fqc3(jpfLFqv)XC!iKECepq)s|K`3P*#m8C z+}I4)-($W*ZJuFJ2P_aOSiD8#>m;7+A2pgDd?GxEP;&UhvBrd5WNqyp)_I@SDrD+* z@7Z8>s5R6#{NV%%(yXCVSn22z(LIcHX>e(L=ANW z-j0~cnw*?+lJ)55Xgsuh)u~{WqMjMHFGT3Dvvx^Q3 z!i2+-U?RPAyyjEngxf_}pr({MD8CT&xaf|j2LHJhg+QONAu_OlO*zk|L)-%#aE5O) z1j!H`2z59@z}chuY#AcEOJ=@DF2h4~o9b08g}8RM+BrVWDHVSlBr zpW+_0^8z+i3fLH;QE|>Q)Ov%U()^jD+w|cUoFd`=lf(Mm{+y7Ag$d)l`35~ml=EbX zKy(Q+Q)m(sLP(fg_hNg9|5#xQwUGSNU)JV1udI)$6dE(q?sk!gn-!z33_w4IHRM71 z8kJV-Va2iiL+@Ez}R` zRpH(Acd*Uj(}{~IeW7J;z%NNVlk5vyes%*{M*bOjY->Hi^i5b&6M`M+SMTPvl~jC! zPD*F=JQm>}R2Ad=^qC!4_48HowN(p+zefL7jP$1O?_p6nr^`OX2D{SdGeUcK$C$Kw zupjQRu$;0`udlp_1=0VXM!hE}cKHIH?g;~piONnES@~a}Hz39 z9E*|-xg6*lr0Yu4OXZR-A2h zRcB9w@43eCs-?pt4Z!6DbKX-sz4HsyE)ZJNX-QphSKZb1Ifrwm!KqiAvYpU{={j=% zVzhXPninrN*z|ph86ojbmk>n{8z=pt>}*B-iu=#R6|li*9LQ(ZG%CF7=z?OcgPnOl z?&2~34@{iIq|DQ!r(LvHx)SE2r2TCMyg!XAJu#MAF5E9ZC3t#a&|S}5JjRhUD=;X& zq;X3A(62X}+V3p-`)j>Ga`)r1&nKi3rJ#yblo_Z$bt z?zlznI(3UkmT$jQ3cqhD`n*SQFsT_w;Kp`gZ3*(mVHYyCEkZUS$o{DsSR1^cU(A=o z_VfDdjK#-kvN+YvsET-qtB49y8Us&{qzWT8R_-d!^X{J7on;5l%Pwl}T$@t$F3VAT zPeC1cc@_{i`8wSxBdwucTwDck^67?R^oTr+MU~dW%-K(G&idQx8E934;_CK=9;90< zNUe9by8<1?ogIRY&U|LZ-CZ7jDTEUS5+80yEL#W)mQMl>E3f_Xd}ohEEZ93p|N?H})D7Yxzo6@8qw)-&UeB%z|r24IZ?*L8js zkE_~$OJWPd+N;ZX7df?9=YgLetj~@b5lvDQ@W0OBC>@)ZI4f2A)ns$$w)NSHzHH2v zJV>?LGe+aps%XG#Oss)+qS@JT_g-fFGuM1snVFJb{aPrz_~DaB%d+cX$J@WxwOY(- h@zZe_lV3JKRvdRg6LS4>E$I+WWfr@y|LMYoe*#6Z(kTD{ literal 0 HcmV?d00001 diff --git a/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 b/docs/stable/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..79dffdb85f745ed98906f3f28dfe833acd98e287 GIT binary patch literal 37592 zcmV)5K*_&%Pew8T0RR910Fu}M5&!@I0nJbV0FrV50ssI200000000000000000000 z0000QfdU(fDjbtK24EJKIslYr2t)~ja}f{PPz#MJ00bZflPw3h z4-A7?Tf(4`1J2D!5chGS+%|wdiu$zqJMjeWusz`1Hdrg0<7gPw;LVej+^q_Z)sZpT z|NsC0|AypC9Qw(-^OBsS3W^Qgy8ksz%qga*!b@pOTUxduQKPU}=e>*7S{aX0hN7&P z3_{hbCmsV7hf`R>n0w};R+)yETGf_u$8#3bw&%0#$4h;EZ{4%(s>m4^Y>5f0L&JT_ z(Ftox;42%`W_-`g;y99-7OTGWLgCxwMnC$L&Rs%U5eqZ zbmQmI*y(+_{)usr`EQp3Nl46u8#BX&NBG6&5$}eKY{)vm7}qrnk&wkALOOBreOg7+ zDyzCjI3pNI21&MOgkaFxGQlE*b;Q11KJPuU_a2aStV1Tx0Ju$3K-4@J6-5w1xgzI1 zm8XKDay}|FvoInb z(OjPic3>42?mCR5-T@euP#a#%Pac@@E-ufw&~g5@#!>+ZDadSRqG!}5P<{;Fo>ck5~7kNIV>foBwdQ~UGC)j zDS4;Ap!(;cvy1Mw{MkGGpRuRka7DNNvAS@mF1tzT!*t0E{?;aX-A~pb3#&Mij337F z2j}HDoB&Vx7rx~1Wbu-wP@n;;@JB9Ug(kkaX5p~R)bfq9on+dj4&NmZ%l@Slzpwed zRyCk-FRiB#qrFUNso32`m_K|5+31+_S5Ar9!d5W{=*QeMElVXR+{1n-O;!o8>G_5L z|Ip6=G7GFrlfWi39iZ*Uxt@9tTqQ>(;W{i^Q=XJq&95Z*i>1K z*<*Q!OhC4ItQyc;x*zfq-sOSFIE#nXJw5uoCvvh7QUWayHJ)IQ=wjuUWg$4!jC_l% zktqLIf9(s7`b~c)+-5?*@xY^6aao}uM(w*Dj=e^Zh#?knC5_e6@@!8LhEW4rlu+Jq zA;g!==KM7d9!Cg<`F}tNGLGl3%@l+jkj>6^eQzoo7*N^D1qmHwpkxg5y5OokaWfBw>?2V z)WASAxkV4y7D!?(&}#qzPy1)gZ{H8>0^b~lwNbyq5 z?Q{joRI9_Ansnf+Ls$K2G9I5Q^U-&Ibr~?G49N^1il4(+4o-UXErCdPTk#G(dniEx zEMHR>E{P8o!(fOKY0IiunkhiAlo_Ol_0|VQEd(mmC^P0pmhe|lwS2LyBNpu-o{g%o zFc*C!eA<>~sQt{-i@f?(LTCB!6gRozym~Z9B&!O*a6!TazymMr@C$LGLxl&u>e4|I zqvB6gjp9id+--}taCjxGG zAffBzKt!F-E77G&FMaewhk*&EX4aC#wGt=Nf~)a7z!Mn<>D9AF06|oE+q?9RqZCKa2ow0DT+uE zou6hyjt@|H)l$;tViaO>(yHhvbXw%}j5daG1?$qN`t%P0@8I8Zcu;(O7 zg&r-qe_ACsmQgKrffAeO5PqP>k9?QEfflz$eYb)SRjFHD?U=TG z49I?tY1PC{y;ASiXZ8KO56eVFM|A8d6KSD5itrilOzzFwxl9^mKB#w3IVrN>A* z(vAXA3+8iYxq~1a)NY#G1{k_?{7+PU4-hRT=bpJw(J-=c^78TPM-?k|et^M(W%kSn z;kgK5g`_a;u-u{iwo8>#N-34`$mzDitCNW{&LIxNB+mPw%+trd+o-l~tr&9#D3{-ceBjf0DaPe4dQN=8mWNkvUVOGnQj zPriaEJ&hryqGrU@stwx?JLaT4Or?=VjCQVyuCAeL>gd`Ax-LQ2H_;6(bYmOc)Im3= z>=aSMOl(4wn2hhj{DR0U;$$*fdy817H0g)Ik!EtcUn{s{J$>?m9c7|)yu9A3R|sEg z6{b4S9xst{fb7EZ@YBbxME9{fNqy`|;RdpYiR5tcbkBId1dic~zf{&~=pV+ABY~z)D3> zt6a8+!TTL_#=3S{sCzyAJY2S)*Qi|)MO&vWoY2s-2qlv4M%l<+uEC9AW5!JoYi}~7 zZ}?|bq+OH_UFsyP$0LmXB`T&ay2a^{ za6_^mnNq}L6el6)N%{UMrKmI+WwI(cr`kW{y`Zlj6!m+_K;J8?QL%!mA@zEndf(|c z%78J|SYg5kP4=xJQ`Ai}Y$qc*sjDvfbTdc54nP_q$Fv860aBhQr{(V%Mpz~|W_Xq? zu&yGKB4oBkg?&*=)|8@iU?{^>;TGp)bIHNw##5Pn)nh=YhKN_OC+de(W6m`7xfhzd zl8;K8Y8vgi)zNma9)L5Zn9wmTX2vk*Snw=G2c-=ufM!XFn^) zhJmXJD)GI_n|@O5t!iL;Ppyv`_NgDerq1W9>V4@4h_8K*<;OsSpEUZVCU5q;@3iI>efH~9Mdy1hCG-Wuke5r1#AJRgkdBcF`(*<}7=dbz&x z%?#ho`VVu<@zXrNEb`m3{`g-j+v%TG{#!>7APl)EY<1|0pgWSDDD=i2eWhK091O(C zVB8Ev_2GD|HY4%o(fIa^F~+0&gn>P6(jZfYn6}MlY~L@<+QFQi%o}0Bs4v>Jr!3jS zvW8Yz)%@?VHRF5IIx4Xz?D-)8ad!g;Apk&>07bFU`^QeahsX?^`|%|L$1v-*)&)u? zF7Dr_E?Pj%pyo5opAGqp20ih@F{%opt)r`_Z^gjS$f`B#Hr!Jgo7l8v+m2m(_8mC% zLqGE)pL*-M0V61eb9sD<76?URHHo@ZL#8QLFj`9Lx|21i)&oEbUiP8Sg=HzCQmtw= zSn3J&88v7qf*hofjiSn`q`V4h=%l+Yy3(b?kYraZa5dJP#;0|G!hvt(uB}9S=%rUu zvxc7xiopWkcSAb>9!`d1m02WGlBZb*?s3RlexCQL?cQdMwR8#tjyuLJ{glgrBgGk zX`2V;v3X(Mkso-*Z+t7m%KuWD0u^esm@sES6J5+>!ki^EJQ4sC+bc8HY&r6(S(^^m ziYut7doZt!vBIIkiNblUP^;AXwN0(O=4oC|2ik{z%>Q$9KmVewbpU8ER84ZKwo$$w znQnzk+%c6EcJ42PEmR%>=^MSdHqk?$l3@%yar15S@H{!M&ijbR*fCe+D1m0(tpsMa zhC@XC{Y8@&?JgCQRTj*qpS8Iok{^FjwZT+t)?;)lPf9o}G32=XbZ0if6rB2a5GyAW zx$ZyAjj0#Vy$uj9Mj)KBl7dvX^}O-SSZ0w0gWzYHNyY_Zf(P8gPk#l+4IuhjYuy0R zBbgW0CT)oD(eiHO?NV{dBi|$UBi)+Vw!W&nt}o@m|xl4W#MMs4)Ra%?7G_RX0I1rQ=%oI0g+pnqV@RkxK? z2Df{SguABXvfCAdYU>BvKHmd7Y+h@$bEeCWGAkAIRF^d^aU9U5)FN(L77m{h_>Nat zP_52>^Otlg3FVmIxi5kJu%l{Ks@7~PnKL9aLfXGVm1k*eo`E%5o2;~(?2%Tg%~_ov zeDFPCwN+LftD5Tjo3PRP>}oU!=!EPU88uTn11ID5m>Nsr|L#>n&Y`|zPIt(qj=SF@ zGZX2rOJ9!-)NNKhmNafo{T4KAX%pr)U~P-mv|xS9*0p2<6f^-S41ypKT45n%BXmGR z1Rmmu_K_54KWPVTae$0NwmNK^Beom0(@{I5IcAU3vW%1SB^B2&T*q`1+YKzYaNNar z2hVL>4@o^Bai7>P3jD<2F{KysJ*W4xJZ}~IL#dDc;qo7kf4O}H?dd)$^Wuxy-}?(YjswOY;xk@6nOu@o?d19&Rx4#4DDUAXm|%YNwd5tD_b{h z*AL?~FYC4+=XF1CJdsSrEZgBUF1N?)^8<)5p^OWuv~gU|55nlHVud;gBj{j+cUk~+ zaC1doC+KyvY4uBj(v@RVzH^=`@3oJ*XZ^eJ)AZ}I=9n6EhDtMeIsflhjfTaw*+qNT z(Q~f4R9|ae4d3i{)q8et6nV$S+ff%96V@4Bo#XE--JRFjS-wu7bjpps6xwMmofO`P z#f@3kq~%Rn*^HrPt!mDSrfnkuoi@NEMW-mcEY}pOt7yJKcLl|H1YaY$D90sa7ZCkO z;~AY-OnznY8>`|0jLs$`D{J$M;EGp6As>HfY+{Q9+;1&2fpjs@5Tmh!@0FyVsbQNI6 z0>=nc?vWNySlfbg)BKr4k&WYI%?2~?3*ZEgKC0LP6%1o`VWvsP6kFgz3zRd9Er^0K z*A&UQE%Wh)v1BZ(-Al&rjZ=<2p(&GVN|NBjrACg?#MZN*8s2uy=0tI1C*x@}>ctkM zjNlSFfz*vtg@d0;;ml7Ow3NbH+72SMX@2v3^UE`^=7g*fF->E(dHKxQyu4*I8;xQM zHk1^j|AmDPylQr#Eyp0RF~QBy_$@_@;%4*9!XmQ7>p)ctmwSaJgq#q!AgykZ zoNdvUH>ESa3|3?g2gtQQROP&|c2Wa|aWAHTK;uzveO0b#0x<5+DsTAqeGKqB+mQrAm;*w-QZx)OYy5#KCX zBlBIuqana-vIO$44#7yO8?@|`=rE&6h_0h}FImi56n*9nkTR%r*ElQd#gk^UtuMc>$o~*<`L8*eT~#)it8%c- zr(h~yAT`(le5ex|l>WC|q?$t+*igy5vn+frlAxl5;J6OpGn2V2g#!?*mF3mZ#R9tp z7QDE75E0?6=#yG2j2L;XMNMPO`+L$71Tl%Zz{~nhWva(i7skZd36glByqGM+A>T}C zK`TXLrIdtR6tmqs;jer^Zw~e4Ug_Mr`iqvTI^TCjrriLfx z4I(|etMXRt+Xgh2r+Bf0op=kf09hi3vZg*e*8}$OFFYC=Tn`Hc`YEsNfIJ^CeLeMw;WkTyT3(Oo3b=UItc|kCdX%e z-5G;2?n}NEB4Vx|| zt~HhnkHtR@j{W4kuDjjMO>WOA=g-=PB$SwF%wj?5DJv-#g_gsCzC>E|&zWUiN(wl} z>qyLEhM6KlABX*sj|fhC^jCW2(P1GVD-<|b!$c4o<{WnSsHw_fW;tDlZzivvL_-AJh+3sb`WA%Ci1wtASI8%mMDA^+nqgjfa zGzD$A0WP^=9p34Mw|FcgMrQwb-m5B6(0mBUlRDoXt`p@rPtKAO#?!^ z2mlR6uX!GjZthcJG2oczHe+n7lC6$4+i;^TG^k)zN(T~q6GE7#qdf^xN^2TX$5Opq z?Zub+L&rUTLdA~j={vXOy;XiUiDyG_tKA4C?Kat))n0pt4(_9);}_5-daTDLn!eI2 zN#UipiyR9trG)YQL*fS9NRFQ$DQHVT40LN3BuYn9*WS4kcYyOS^5$<-XT>p=uB-7+jd3z}gv5Co~U zmTw_Y<|Q^=@1bpsVO2%lt&~k^0wLN5cvdaAH=UqiHPpk`=TN(D@p9Kb-&Ze{KPgn&-LS)7r+YVOJvO6U}UR=om|aff#JZlbhoFAE+4%& zy>9hd>PSa{>@dUM%fMThi4nPRc$rmssj(p+v)7kum!8>6wAqx&(7sw^10@=0>JCii zl>U&OmfR}iJ<^XYB}aLs@Vb!COY}e!+rUVTr(mJ59FTCc~`o8M!g)wK-^$GA=V1McKKvomrp0 z&reR4y%hQEtvzaX0q4bBN9tFlW*w5|R8JuPk6D|`Vqr4SX zU%+wnPJDf?$OM=grR+r-EA%V&_kx@Lf zb4iHgAIeNwA{3L{miL6sWPyidQKmIzGWR`UL%7Wh6Nj(p6xL(jX4ae_ad>02HA+MB zd;0!9gemqj0BHO-|Iz=8|LjZ>RBKM|vV_rSLN-%hpIo+nAFo@-hf8W-pIGZ!$nSrl zxB<_K9w%FrUbKt-xnFOKEu9@^_J0vD@C}Q&T0G42l-AWPqVbYVG$!CiZ47_Aj+R#a_~3(e>%8Yo7j(|a|;s#1+NZ6gDSPGhsDba0xW5kou+Tg4QPC8#wOaJ(n45Z z2)rbF?I9?^?%`AgKdVvUb(C0kWp&ZKUJp^;Gl%>z>H8gYM|;^z^^(1!kL2(L$U}4} zjI!8&EAT~YW&1B#zBPuI z-m-&tQV;eb!`{4A50Sc(odw&4_y*`fOKdMqAGSX%NSSuUwR3|E#re7~9H{vCW1D?S z{K)E}tvAa1fF%_FQn0RB(9KO6)uM`e{UdP&?hN_tF>v6RRfUZ=p;-PQ&OkHY)1hXDw<^Fp*-DlN{;v(S#y&xDMHf<${zCBSjxdG$SSZjk{qo zZ@GFQvYMCCbv`7srGr3&+K(ecHv%yUJR2_xT#2{a{1(rzj5h;RR7hNP8?t`LkVpBE z2|DRKPUKB!<~D1;j-vn-hJX1WSb!tr4dKy!m)@||d>_*`=mBuSxv^p_+<8q*!on9- zr*+m(ROl?5mEgP1VHMV3mUN2G_u?C5%5;{y9bwa~`^^4beafZ~ESK3m8}N+xw8t22rSbBm}B#zK`(w# z7qpe>iO0dp<55Wd=*VG$z*uflh~HXRRBaijdC_j)H>w%+M!fa;Kyr?t2o?7jt-{hdB$7yn7LwI_}M%a z29*Fdra}(XzD5G-?(Z2eu@CcsQ1-|BK0HJ+DkAQ8(T}e3oAxrgNcEVeDseZi!;4lo zNB-z~;oy8&)n>Bh>cse0H&3u?8}Z7^0Km8(#)PDdfAQ6%B^CBmFk-F$L>VVX9DD%6 z753MwKWmM}V4!rVCCQnCt?*eWK`YWr}JyScFB;P*DnjdP32dH`3 z&wL_=@*t+R8;qw!nfw#9Xn}Z$J+aaNn%mkXNnMqYV_p0v8KD98BDhOoY1ro=)>I6Tv5}j+8gBf1J20L*+9MKuD$(gyhh5Z^!9$G`G_C152PUbBx`{v3L?(7L>l^_!{&!A(pkxhSe>^W8Sqe z`-KuEBAM+gVHm3OsjW8ZrL3j$Mxl`V)#2T3 z3G@M`r}M6Kl1St-Y+I%|<|C(t<0l_OnS^)YY%+t!0An?vG~{e1H{vHE41;R(4P#Vk z{+-*!5!hJ=yF&>4uOC_ph72;p5`_VFhn){W%PNU05GNqDPQY}?OsxVryEz8Y19hFl z;51;SgU!ilz9SE7C_!$8z5@Y>u)OgGrc)81HPE2GxFPHyl*Cxl z5O3jPRLy&WE(J=oK4f7a4$Zs#6iqm@<*lZX{d$uFauhlmBJX&VeVrzIkF};Dn?cwd zr$3$R4M1~N@kOCUHM0fXy!-_IB95>5Wv1-5}LkJ}oNg_XpVay6h z=b$NR8!#!eTl9SR#4N!Fl4*f9#T?k#$`Aq7F+fQ1`y6i=IeNiif+7-dF^e@PbdD7+ zVxLGleil4Vh6b2>1PS8zU+$B9uFw+Px&D3&PihxoM%jUg9z+X(h{t@F?;m_)5S<2& zl=P1tqDJ)Q_}qhUSvX|~#b3rR(6rK-JZ!bCH+#+;ADm!4xk3O2)g0Ox7oF814BJ{+f*))JV1m$%=<7v%fm(=&|m;jnJaU& z**#B&Vh$1HbX?^ea#B8N1e953{pAG_ zjN(M})e28EcvEi?*C##d%B*1}e3pZcZaS5692)sHOWdc!ShMG1d&Y8d#iW}Qe0sx> z2$E$unPvA=(E!+o`ICiC{My_~fo8k4qpe&H7+u>quQ9HcW~94CYrMSfQfm0M(NV@y z?Uj!Vw`;3vt~{QMkLGZiR$}i|)%-FRr-0J>*9qh{qw;EW`gnlSQK9Al{S8X0@VHxS znlzSR|CmM+DI?068>CWQKP>DN(`YamH*2q0?{Eg@2-dE48FEx*B*c3qd`%{@K%HF{ zFZ$n?TKJjS%^X~O42BcU`QY-6dlPQLj?roT*$pH7^R@0bEIrHUEc%zKJAAttGn)|o z=&zu)N8Hc&iP(s9&=3FN$0X&rMAm1To|Cdg3g7=}%nx7FHN&@~PAI0xm|jOWuaYIs zKOij3gd7|*OUlDvR@c^s`_};RjJ}6`2FGHefVQKJd$9+d^;CY3{aol!6zW9^JNn{i`aO7+Koc3P$SQHzZ4 zDHViyZ!Sq=`GTil>|?Eh?$E`b{!ScJu6EVn7Zs?kT#??Po>5fm>+nGNvJemb(fBq) zrnpgub8bbVaE6gmXnLr(nYTSYi@1m##0eD{BDJUf-t{jkjHFERpQ!jLWOFgMM)Mbr zAn0mQ-?;ITg2}^|J~bYC>!7%}?_M`8a$Zoz&J8!i?xB<$kR*@w8{r^Zw=@Qqi_dQY zd4Hh=7SrLY8y;g?_k_iSx|)Rs8!C)P)UqUPsYw=NNK>WbQTX@T6t1*=FR?4aw0P6~ zR#oc7B3#S5Sj<+=i6*&iUw)*-b^J?W1}{!#F_Ta?!lm+!&6{2H_tHv5(Rg<)GofR_ zHga_J5-O6gl3r){Lq21dZuj(3Lb{$)u%cOT2xhdg598_bE?x-EnaUp4qwFY^F37W zhIsk;<16tKsH{jpl#}i%$Pbjh&qYGV-S%6F-nx+2`j(F0cT$HQ%r2QWixSspfL# zYyZblT~f_?nL|>89v9Gf277_}i|6)?+_#L}QN4=q_i`+iK8NEyzdTDzx{Q_=)kxyg zAJ1KG3o)peJc0^AmB2i*xwW&4c`BwM#2x?!yv2#$uK>yBM8H?)5?MSc*U05bXUd)i zxNwF{gMpJv5Yc#9T+E6^cuCl-Ut`S_hI32NB6)^OAjoiu@=qb(cI|$o-ez<1SQx37kol`!Jux5qBMHR zj8uHuWypOL0)t))x@a?SL?hbHl?|@r$=3Z{asc-&lB;*JZFeO z#o`s$4)g_T``cgqaybvVAgYANRqe;#!WHu_)o_+6`FsA zzs_hR`GcCL0WK(1!4v8nZiha=ZK{Z8e-)8XO!8V2Q4ULj$%(7Sn)usp&TbEGWY}}j z<#65!y6-6#3FLas`50Gfp&eeRdm1W_?!;I-8Nh&MnAqs3{{3Btw5qZ}BcsTauH;xjiPEJeBEd5Ir4ZH$68wH$Co!wl)bDr^XKe12vD22M@{~uuq1p zaBQ%d$Ae`N!L|5Zq4HgAvR$EsU6tbC^;~}_&%fTEaT+xG-wM7hM;QVD-rUiTr^8n& zs>rGk0LXS65AOf7OeSZD9G-4W)wTcU8op(034v6Rsve)4;W@hhEMj;}XUGSy51-dRxo>a(iFq^wY$;l{vjbU`yuT?`#;LM(+Bj9R)TaH(RUOEkXxUN&OsX2c zKX=xVUAt!;o_qgVwMtfM>nfI&s`6fFO%vmFj4^y2{xqYm&@^$ePCkaNLlg8{%OSH? zmm-ngx`|zf`zB}846vD*vK<{rPw0MQs+d`2Ya&k9-1#??#g@?6ePti&+uE1mrw9WR z0~u2pLv}+3=Y~HwI8Us+_K)#@a?&+hjHy&tlPk*TPhRjMm-vAlcwHxq&sCM(Y~C{* zaDL13^D7^qpRHKwa!(8x=ymi_3FuPjVC7(CDCm;ZH}OR|AOr!+Cw$E(K7`F)nbNVorIwt4?p|H{7hl)bUS`AIZ{dc3?0&H2SO~10$N4daAl4Q>)cr z;QX#pGorYDYJ91+oboV)`mnHk-l=h!m^9v;une*-RKChDUlj`3mawdzG`@>JAHtvC zp;@V&&+CE+y7cqsPgQ+cFeGf_b%yXeC2jLhn>t>7{t+zC-yv8WB3xXsW6_TjpPTHz zT5J#U>K?ZgKd3UbHM&~GskG0qZ#>mTUilSjPkdth9u#lSrDCU6r{UF++*XXswK1 zp30->*bwG_971`JH!^w7rb1FU-+>V; zGTcHfah9r~xNz$5D`&pSM7E7KuQ*zV$RKFbs?+l*bX1{4=*Zwy@*Z)fbAIKr|D#WR zkIHuZW%xVL^y-!GjZ%pZ7nH%pbflUf*O{I`jG{b#cDm%t!elnPF!{@LNj6`UXM{Y_ zndAhdT1+p4^F7>WIq~G%Jl<_`e9kitpSg?>iC@ZOF2zR@mUUo{G$;IMZ9$#zT8)={ zj=tq^(fxG0ZiMG3(i4)DSRUK;?r($wi@8RWCu1{@Jo(ga%Q*lP}qq*W#8?CjR7qaOYhAXXXY?M}VG0!)Kq*ydsS=K~Pyhl5e6I)74QRcx@;qxeSVdC!z=E6dk@t;y(zS7(?R8#6v zgo;Q=lVyn0>G}S3`XC#P7fH0?a#rrD#V=;p=~B>{0)6G(dkRoSt)@%%Zw|?VF@_p3 z$ZFA+j0CO~SooEht-^BQ1{N;vT?-6rOIa<5!Gw(kic~rreMDR0?%8?$%%W>IuN7fe ze%yZ8gjUSs6%r^Z+y7(E51xwVwH6i?w(_E#bKyAjeBN@>cZi0nTgvucM*(n=^kzwzd$5A*sP`V&4Zt!k*MPuVst`eTsl=Ws(3 znc_+t66@!F!jP*_@SCsH@p}#^{)G}5ipq$2nKE4V?+f#w1(L258`ivxodLfUjjm`v zHT{kBo>FDp%s+gt8L%Cm|KC4y@{tcSjJdg*@o*xUla}Xwseb9D;bVk##C!{>ORppO z1uPt4RLif^$5e**6V-|qNKnO89(4l?7p2tL&f%B~N~;R-Ns1pnWw90XPz`3sKPdRD4_}Otz|vuotSp!$T>`^A!bYV=NtHyMQc+q^ZXP@yo|hYxwmS~V zlyO_eG+1^{wv?HYbTlOal8};0d0V*tCk^WfOY@TlZsPbr9G?L$_JWFmhLwhuKv1!F zU~iGQ1UNDa&-j|c%%>X!@u;_%8S8HGr>nDh$~>uLbC&$eH3~vR{Oy$V6ENH|e1WmV zSy))&G+xOU$T5qs9PA>DTdxjZm?efq<7PxV#jJ<}#)1Rmp1stKksn~u7muO zO(!K-=HUPmgw5+?K73t2S~mKm;mIzjw|UfnPfE}N61_baTgqogS2dOQXI!lIk5{u- zg^W0A!;OEps6vsXob>ggxpx43&Hc3g&$q5fNV#>*j{H@p=Nvj$zvrJGs~S7q^gxis z|F7!1D(L%t2Pg5tCWepRSN&bJ6#vuEDvf3pk2<8$)ZVibz~29QpFo8{Q)mPNEd>gr z;%6J`A^k>M%CD^pAAYCsDb++;d3n`3QQO0+D5p?=fa5ZM=-l_p&)x-D&HM@G{tZNw zUtT3xD)(3ZKy3=4HaG0s-pzU^nE#FlQ2R?N2>9Ff-fjHWY-jZA6hzAFXlM2gO&7@% zKHV?*u-pqHQr(K*!2+|@R*vw_IIE_zp+9u4wR-o^Cb%y{gfDBF8m^8T)0}4}KPC$f zdhF96Q0O%Wg(E#GU8%)VP^zVBcnLCKs6;Bj#r=X8Bd>10w;GxJ6FLR`6YP$ie7i!; zd+jtyS~mJfaKV3{qF+vGlXC=ahO!JZU@&MfaPkDv18~`YId^5^lxzBI1&?qdUVl4OVd4mcz1@moqMB?h!zH;YOq}5foNz^yFKYC*Q+pF!z%Ajx_C1K2RRaidLh%B845v z;+1BKq!|@_c4`SttU-}DI-1{?X5KWvU2uA$Sfi{Y4*nBmA`}bxLcwD?IG&K^+R~`X z1@-k4OdGJ)Rr9k!R8~saX+4(r^Nt5LTBAuM3aQzxBKFPD)Mkr_E!yxoOD6a2!>*r0 zQ6-ogUR3beCv$lU`Kc%7=Z5FzC#Mv0UhYYD1^-2ZS(VU}?mVM9vOC(nJ3g>aCDV*Y zyU!#KABb`vk_<(EK#rFw`-~aI;Ba8IQ+9SZxWt=7K0t=km~&>ZmE#X14ve*1_>gz*0_^yKk2i?A%4fEDEK-MalZLpXmmLbJmWFn3;mgP_k~3-# z`pSCJmoCz9VV-z;NR4dzEigHIni3j@1A4ZJ2@$GL)FcyLBAU$nzONnVFnThMG#QNK z9wb3tMP#O&F>WqDj9O?M+E>VtME$Tgjc=W=s1_QDOmv`h{V*IZ9xwmxvkouF(-z5W>_nyP zN}>d{4jr6D%nC+lp4+Sj^Ud5K1Ix?+Pc{P1VgU0Oz>^F!E6Bh#^C94E*#{D{4{WQo zB8xbHBGO6hB8dH6H-d@1AuG02P zmuJXm78-Z_QCjvcuS_1N5m+c(ibW8sDOcpRx1Q@eiFaSPD*0OCT1eR6cTV_O-~jW} z4g2Tq?VX$z66x{8v<36^iB5FaJOlFKmjNc+&JkNsEJ&o=-FiJp!mD<4_VEi-I7}^z zQj#({LJ6nkr8!};?2Mf{ea{LrC`NBvl7#<6P^zUmmIucpYg70qGh_G~qrSVe$D;J5 z=ix+SZl(fJDAnnjs0Y7+CQ3b84u618q$kM2)0ICbDlTvb#0 zHxw&L1C|3LJ+79UcLe3mP7c((W`JOK{O>rq=V1TAQ+5LuG@cFWcFv{B(5XR|X-Wwy zcWwos;${%7U}UbPPbj?RHjX@yk8>-bdZZLFyH-+&iYbi3l#v=aKuoa~&J^pZ7PQM10;G2sBk?TRjO zl)@(Gu@&VYQIx`<)SEq;>8*P2XMcKj4k9OuZVK`+ZMhk_Hl}Bud6?#*nQ7(v&Pb)F zy_*FLj2s^cbg+YMEjq)>>+yH{HK^Ai%%_28fyH#i>CTGK`1y<2DTEZt`_U(5?< zX|=lil7b=`n6CV?LuS-r~6sAEhO^7w2DkTaXS&8 ziNDXQY`pztz#$8nCM+XEK=AEg1WDafvZF01Wgw4_5_5 z>5z&|z+mQE5%faoigVq91QQ$+we+4{&ocigcJt`g!thtG4}e$e30POs(?4|O>JRj_ zu5hJ$>$W+BiaGd%t)5Kt19D|MU0m-C_S?aK+wu4+i^E~+=C&5c;w>vvDqHE;B#HIkh_9rgSmh4c}2!I-0$bjZXjl7R!HzC1;^f zS!Aq^oh{CA2UBL@wC0%$7nu6>7IYr4_XQyf^*)fZ#ORz4zoNgAT8Z;!h&EhEQrZ;3(ZW&TTm&#yu2(z!``bRT5N*o@!zi!Wlk_}xq=gz{l zzQv0FCNTwz!-@+x#C#hgiW!pzVEuo-we7^a}0O6IQ@fCB6ZT^jbE>CcN2a(yl^P(s z%&j%(6pNdpYL5-i5;W#B2*fUN(yax7f^J=!mHJ3WNyq&o#_>gg=6E)r==+^H{ao&b z!_AK&L@1wfG4%hV#Q-}%#J`PrXi0a4Tj3^)O5n3Bd;3n#tyZ(E?H~}yez40H35fpZ zxp>6`jw#1L@pe}K53H%3sy(i+STJar3O8J=+Y1^0D<@sM&74Z}G`pkndsAtDbPH36 ziJV_V0}BQW;i(7yRYTot8nyxImjbu>F~TzGLILYf2e!H4d|8Qm9?)HUxXDE^nsP+d zS5x{5_~_1zEpJ`i({~a`h)E}1vVTvL`^7WZqCfo&(TMasHi{fF4?edPqI52=J#}Qe z)mI0dS}^deA$j)9!@GCQJTe<=WSm6jhcH)pMVYT`8-luua>I=4aZLwocwg z16R{X$p%|Kf=M){0QlDREMg@t6LXMMRI~8#JJUc@9TwM*TjN@DX8A#I^Nf2!EAl4> z0s~Y%T$z%W>-4r%4lMk$hW;Q2o%4WRuej0(z25MS!Pvwz2t@VMS6N4gV&Nr{+q zbK~EqsO@NqI;KH2(sny;qC;AOH4gH#guOIk+)d2ymY5E@2>F#2!e>&*w8KsND z#7ODVc6EBQFs~dd7MEj%dFjpScIi?vGHj9K#TxaWc{^ZL28LBAWLOPVupN1Ss@J@5 z6b;Kg*&aDZ?xATEdO_UqNT7re&bFb2nv6zGOZpCFosVe@ULVrF96U{w+!C9Q1;;38 zOp*@KkXFJMAe};)4b7^c@+Dji{{)T65wYbWp2Sla43}$E9EUd+I7`OlAS`pnFbHr7 ziw32!=J(CGmOLMt2%VqYNPy0LDg;BD(d-NIJS8;)OSO6smH z`|}=;aj%(pWB~jTmo%-9xVN714f~q+8k_M=J#lZ}wAJ8`xF^IT&5V1z``&}daAP|; zM<+k4Lekk(ID{qZEB2w&U?C~*=(*DL5sj$6mSXZY=|!Zj7oe<1RF&VdUK`wVE^Hb2 znz<9WIRHuvf&)_pzAFoAjz_56;Fb6oYB|fJK$GRVMaDaUHHmK7DeNV%p1P+S7#JB; z%fno;iiMBnz0by&nZfgGe}Vwp3#^L(+99)2xepMaJ)*8&8yvWD`bq%!-?FR&v$Vmd zzkAm_JrdBo8b088d|F5 z{M6szb?@giNh-7qC#3?}^VgOD10#n=0xi0l#=v)=4H6W4$KhjvgSa6oS)X=301Q-R z279+wZw*nvNc+b-z#pWP#$HKyughxZ5BGTf8}7&!L;p>d;PJ_lf5p)3kB`eTTnISA zl~KOEE;G1sSd@3C9N29`8ZmH;5y?A9+Sv$fTvies7@l81BkWxTuRtx^ozQSom1LWZ z{$>G%Cz5eTi$_C6G7fp&%~HzhDw+A${g>R_x#8Gvw?nNY&HZ{{{dJ8X$#nR97EuJ- zA77=_CRFVg!*C^0-dBy7YE0uRZ`AG&)O@O&`?LQIc*aLA#VmU8b5;~EaMCDjt;)fw zbXz+R$P{P_64Bw8k8iI-ZC^>&!L2YonGDlgb#O8?sG9HsPr|<-1n!nKk8h|;F$}Q7 zYw4IYNkUn8kLg^EK^4@wO1-MH`KPv89i%O`81yw~1MT-_ z5t@Q&#ZW}(X4;!CIA4Rnr5oA*b;(GSoYphYa7!E4VJMIll(B9TBZ)*#MO|5iV>-dX zAj{Aj4eCwx^n#`?cM;lZ#oPwJyS{R6Rt`!pWjf%c{PA9R#A(PckNgw-Yo}B)K9Eu7 zM-J8kLjKGG%?5@9xFVtDW*K2%<68Y3e|f}$BqvftrP|<4ve@IXg`qu#cV8nXhx)6TXRAn(+bPl2Vak|NH9cer8JvXXL>~ zGR50E5N7a+09?VZE)Komy#OlaS1aIw6JT=(Vq2zoBoD?=cygq6#GrNG>`Bw?aDph6 zpoDf!D-n==6F!U}s3@@wFBhc}$x;NqAbx?jCSd`-K!zl`qrIPRk$xm~eLfex`|e;S zvX;;7cnxa-TYwf={s?@*iZ_W~m!Ewa(i+LqU&#qe3#_37 zt$Y3;B);fb+G6oj>zbRlbMCYO+op0ltR1MjE4{9<2;402IoRXe zN`{k*Y0Q=^$gH;>9T>X~$0>31mD!cY|7Ev=j}m_+JDDCiy1Ya(D83We3@ED{_-h}i zShON-7@g;|7Zy70c}un$7#c`@v|%U;kewAXT!N%#!Lz7H%+Wc*;J}r`R{}3l0hQi9 zd^@lzGuV5z>#BE{C|LjMZ@a$r`itQAL(OJ=80BFDNw%N6XzALa9=1qGpFyYX%7ylbW3gGS^?ios;H%N9SPZX@Ao zo#0K^?C(JPc7H6h8Y<02#RXZ!wm%nH7mdD3j4!$a+Ta*BdEoYitqZqXN2M*CtkXxQ zTVrGMIBJzYD8(=!?q5jUP?vIM)HRA5z!kd!V9m_mQ*&L}IDte_N`~KpIzz}6aCMj0 zS{i%Du%X4gYNf-@!NOUS`}WQ{vCzZAD0v9z44b0GG7>qbHHVDWYUEtV7sQM;l7C$P zJOE!TWIVrj)mYtNi&)~Xzy4u76oHjb1m~mQ9OS<~MI@5CC%Q9<%N)xPK2*T{x%XxW zG!%E2znxnr!L`R2Zl^DEoaomN)PXb9^~gC+;OV$kprP1%+->YSHGV;?W=csI*d1k7 zS~?+{W4(`Ll}1chP4;i%NIQ6i?=T9OP~`SiSG#>hbGqG_odgMCCnoImz-0?zyhh^j zj4!4a09AcBb6St_0TH%J_}a6o^x2jVy0h-x8}DT$yxxx-_h&B2Ot0Ez@{^X3{HvzM zU|lM%$2lD`-Fd|23LwEORboDt0YEa&J-YUfU}q{;W~xv}EcKgS{y((`HP=;32J%25d|6F{?2OLX(-}`#F>Jxic`Fj&ITL^+a9CU7opx>~tZe88NJs^!et-|VQ z4>jQ-40W&2rx`rz|Do_0dS&ASpI`eTWshO{@RB^Ov&60^Eb`1Sd&#Z_HL^2Hoz;nS zkBP9xoD2&OI~nu)qU@MT;e24thPQCa@K%NWqzabjE5hAQ3WDBYC!TnR2!L^&^j z!yhFBp$}lt>s9K|$QvJ9NfMPIPnwk^?IJGiRZZ?WQB<(fn3XH$(>eJnxr9v%O}P(C z`6Xp|FDss^`mya&{roiDj7&aWqSW(9s4udC<0=GR!H`H43_LKUUdk4BgpPb1Oo_kKb&vZ>4RuCxij4cN!DyEkrNIu1idb*KYCAFE1 zTByu0qiI(q7cmbb#mqyvnRGK76iPz>Yc{c~=xToU`6{|KS}**=zVBeI@vjzWa7=%E z(&aWua=!56H*lVajZ66PfWa}jQwS(byP3cIy=n_Kjg}n}AD;^6Akwx*Lm~$fq3&DZ zw%#0`oQy>beg%vlLJ`&UQr_wIflr|CEBDso(a?tMgHpz%){cwI$3x=?ROjC=;Ve=Ei zpz~8x)V~9`9WioW2A2^H2s(!4O6;_h?#$lcnw?58wDQ$ z?@jGVOn+meP)MbBgUuj>7hwhk-yI>adG-t%{Ui%rPX?LCyknb$T+hlPM{%z^D%)j{ z_IcgqM^PdsiKx+#h)fYhTke)YvoR`xmckUp6ERUoi%446&1Y6&jcMbmM)@_&yK%rb z$ewm7v*v+~-mqUa?ykf=%F+aH-zQ%as&Q)m)aOlDT2#~NApTqPNWyYA`Wl_-P z_@X(jfSM|pTF;%JDcglQ!8xM&<8H_HxQfwWlqBB7&}6kL3*0!qz=KyCvYO<@lKEn~ zLYPGIN~p>DJFha)-yMVIQ>FOIm+oAz!_6zO^tc_~T=z?SrIZTIKlU9u^VOYXJyk;T zCMkqsx@3N_JgZ4<@Zbe}H%?iQ)v95{n^2PA(Tce3x0lmyN#a9`xiK;8|9ucD!5my_ zOQkj8^~dA~(XjS!|9+2hXWz2w60QWt#T$NubAxi$ihY}unDiFAQrs1BtMINsfpAQE zDB@kWrW%$&_uHHnVdIMpEL?C1iLU8A>%`;^--mIXdM|yejBMC^yqIw9z^-{}l;{wi zS`!sjL&YBwsZsNG9eB*ei`wzERh5}YuWw64#1`KKD$pu?Ef$YYLrF#OG_e$w#+{`~ zQuZVvX!PasS!1_gBb!>~(g7PB;jqJCb_W8^>Uhi|N0M1L%i9>&l8_(&&0^GV@mA0` zi!xX@$la$7FZizAm3WUJ#zfuR5}x+0xy41=-5b9FO~zq_%(5Q*U4F}Cf^Wslrc}dV z-}h`3rxP7r%7#chpFZ2*^VK2FVgbpC$pGx?wL-d_wb=Mgeh7!~^8UHG5{zziF9NZ5 zR42jAJ$L^TY^5COm~>MF$Ko6lx+z5?M_vmf?iti77%Xk^$jKa6o2tO&z^9VgtJN^#f;9K*&2ZdpqWdgHc%*`r;q4`aymTYvlKi)>hYiKc0uad&7->sTrl|JSf% zr&FhSIlaiRbh46SV7)LShsCY%5OU6AC0D$CTXshLt1=7mFUyI6zk}4~^R29;rOGz(0w68?3Y}8ijhImxqp@)hK3KsVxk&##1&bZc6u|UUr*_#WuejDiVTkpVc|EPVt0Kj%YclgF9p<%8+ z{4;;J*|(%QqBAh?N8HE87c?mU1uH5g&?;Q-VP6sVaP_3)7`sR*hsfQoscaG4$NjY| zqAm zMsr0~Q|0%|vp@jn1^Nr5+y>C8G6|s-tN(=0?6_+uccOSgaCKtr^w36d47O77;_1*t zsCPglL75lSBz5f;joDVBlyHHKny|ER5|;A#y|6#0rol4_KV3pCNCkR^E##-vJv*^+i?nf<2&qk3>Gf$EaT10+{uv=+4xG#HaXlT`T0Xe@HExIw`w5 zm$)n4RlDnKHj&*8?nbkD^6qH#oiQHSDIeEkwq~amaTAIB>dkDUufHMLjFk9<9=9bo zAn{O_Tr=rJ>G);66=q_o8jMlZB50i=mv-+Vy}Yh)bqRSF_Ou!4VZu`_IY3U|YiJ;y z&BTB@vsTE~BatTVndfyHO7lz?jTUQ2*#&fMYjKOSv#Qt^{6g8hY*J z%=;u!{c^2pZyQb?O`?QR^CAgS-6RrcaUzi28O6GMi<*^KmBF4`hSE?xo9oHM==PUpBVY? zr?ZWJk%4J5cu2g?ht=eJHVnaNsnTC78@spBr>&N(q;sd~=bxw3vFANf@BBBujl>7t zzNYj3I=W*I8}|p@d2}Z>v)6F$Z%KFU;o|;$^xdF)HnTT#*YNMpbZ>CXm#^}lc?G~C z)2M#Yo976*d=G#n8#@Ol7dMs0!$C{PBNUxrkl=(h83W>3;H2rfbid#o>7P){g)Hvev19 zu$XTE06?N2rax?AOnIGqnd)rL$R?cA3nsHUmr~}3JdIvWHj@{TX0>fy$N00mSN5ef1L8bPcNf4Jrd?@A0&`Q-vjUk;zeJSv+l1Fev? z3lFbTNoZPxAl5|~Z!W_LY8LD%?f5X|X2Yel>Z?7Vx}zE_YOBm^UOF%+ zsp(ZuH`TUi#gH9IK!SB*RwF}j2{nNN3G$?d``d%vutFPl%=!W&=Xt9@`Uq>R@^3_rnx`CcrN4Kay-Y*RzSbgW%t; ziW_haF2#8`#C{xAaP|^hghhU6`L+qsYKsC`C+Ao6jys+pr@{8agv$&LUa>43b~+x1 zJ)0aVj92CoH!#k@c{s#=993{9z7V)bE;_Q8jq%W(alpY0& zn$#mLc^3saE+XPedHS9NX3L5unH?PnUbH0!TqDx7hmcQi=58q>vL2z=mB^k#rISr-DWBf$ z(o6<}fYzMlWXc0ilPY*poDu?G1fb>J4_PltX9p1go=t;-uX%RbV~K}F563s+mzUgW zA=7gX-;*1<%YnOhGVv2A+g^C%k(yn8<=9TQ9G?ii`mRo>H2RXqmOYy=lLv!vEqFZ( zH{eiw4rsf>Cp7KYsoD?I=`H11cv=82rXam+=#G|WL)#K~3WElA%tTzT9+4=Ju7V<` zII6&|S@NvZqfVba;b_aty ztQ!7Sj5nqkT<-*PBzNS=So7kF?ioxzW65qiP0p^44dV+U7m_Bs{}QyqLgLUe-|=yx zWXV7r;wRIysnzBTJ4;`Z9&s48KgICh`vX0|?VrF)=TA_*0-(QN&;S)nL1?v@6K_r3 zShYqQ*%X?%k(*x>L@elY*uF}fO!(HKQmb(rSq9KH*4ENvVVge66jW3@>ig&3s8Ey! zhA4YfWydpRx9uxMhm=+Y72{{;;HOCmugU%Z)=o|?B%MeDkTU&J$ZnE zXP0`IG(;f>T!dD3M11HYK8znhX1_H1)pd$j-F8=^B0o%ola9|!mPQv>wx+t~=Qd1H z<0Ac3yxQ<`=k>ogJ=}A<$I5(7F4s2v@^LyRe?y`ejZH8rGSBLz`s2N-ch_oOMiq-1 z$FV4!ksxIDj84~|0KDVf+Ma7{kH3OTx`PyS0Uh50AL9v-GqITVV*K`3X^?W>+NmAppAa z4|X)eAhNiWLJFN4DXXk0cSr%hmt;cp-b$)z&$uj_{4asH6DoG zHxwzn6PMCFB=i}}%))TK^3_NM)Z^Omr7I1 zoUx6+A?BiQ%ANFC)?49xKQSy2CJ?L(e6HVknS zmK{k~baNb2oC+B--;@pcC>mY8xKsBtj<(9k5dfsgwI7MSP(P(&Y&oP|K9D*WrQ0>G zQ%4aXP}3ua7#u{LazW+!5qEhUi+J>UUDlomRXwea-6O2j{WGW|RsTsaHw5{){@ChW zz?~A=RzkCFhC-+PpGT%-&k$XG_XXqvDyKdjgnCjzrU!Dzjeq*gG%Q^s=K<-cE(qe> zu^p=Re^PCH08n9c;C#q!2hfP5hxK%$)JZD6UUGarHW!A%#Ds5gC!+Kg4xKxJeV``$(KNrHzMheY|%Aw1I|or z21!FB7ZokRG0m@6l@! z?b$Vihi-U21npsfZ>D3gb z$&m4g;Ye_BjNIwVmt&(^;+Yo<7PASAFq%a;Y+6K5OAneHxiyc;fDU_ul@dsOZ|~}D zi|4)2nXmoPDV&x7>Onu8qqe4~zP9Jnc@JKEYJfuOIKqbZmRFi6xcc*_dymDPo-O$} zn@wTNJO`t`{94~ec1^#nT%du(!1LUvsatDPMGoM4t!W4SLdn=LYhfCzTM3p@ahgZ7Am)xEm^ckT!);JcthZoa%PVR`~0Y~H~J zUz=fmHPs7(cz7;jFTEAdjQxZA!A~FO>yEGI&rXL_GzP!nDX#Qchkaqi;k(|O{eDIp zoAqK;hV#dO4V{m2Yn-tw?)8Z9iBt8337i)&w4cti;^=Q(6Mt~>&Q7O5J>*Ld z>wPcZFFKk2wq48fao4LpXB2qoZO>$c(0}LRob9J8yqgEk9^$sb)<|dVjnDL*nN5xl z$JVO5Q|xk`_a z*)mV+f&tTFab5C>Nu#u3c>;7v5S=Z+4D%)9<^n0 zI8^S3z_U`8TXf9-6-@p~3Ez*xBLT$@^Y2vw1m2U16iw7lIo`L(uL)JFu5*?ZBBagxmfC#$^1NO1FkP4d-K%uRbmE~+39RUA zV+RI&;o*s&fbvd~i8$$kAOezEu=u*~!=wKp8{!o;IHxwvH6a|Umxj?2OKM(2FK8^B zBu)iSBRQ;=1oSzeo>8W2NXAwZK+`$gw@HfyF5ffz8#5rbsRV0A0el8?@NBq&jPGup z7#N+WyI`*G!ei8kp~Q7dxUED?qqrbQD3(uyZ9A-zqMF@X4Q3*MSq8oDi5L5N+ORaZ z5-0#bn9g)@)4bo9xwq_o%^Mn-s;%otvR%eRHJ#%XU5A=ze!-nTd z-B_w1`f`yl@$yYYDOqU^B{s%vB_N>*%8LlZr%$q`ugxHdp}8+Q7E)?d$yi)4D~v@N zZ$WslevO;h(70I`LPd*LB0Gy_JyH&YlbQFhuH%Tp>5bmn}f(bQx85~EIX-N zY6JvaHMGM9IqzA^#A9}-b^AVIgSAOleP_+-=8r{!e*;w1f1&uzOf&9rNn zqxnI>tuHK)F&j+qYg0&BWLkzoy7qKYK+5!6M(%^7oQUl2dtU79JHQS{?B4iPiHTaU z_jHP`X9ErnK-0Cblno>YZl-}r`O4T8y%8t|8&ue|;n|R_`t-WIe%91m_TkU%KyW4XWVbxhV;+O^ ze63I*)*Gb|dg76W7p*Fj;#nh=o{tt5qi{?Koy?zQ2=~{dBgd}K=X?qhaQ~xD>a^;I z5BKRua?4)&hpKSmYDEJ%L?a_`mm8}lpyesS1lI4pR$5gA?M*Efit0vpOk^;!fPq+Q zd4As3DlC}Jthm`O39T8JEcm$Xx5xAHdBlR!F0q)qD;>hLT)?A0Ifi^E^Ol_rhKUZT z?-q57F0;Fgo04ETRLPxJ1Jbfv&Yzwy=(u-*O@}gs+jli39}XnG;okiwd=ay`*_=Bo zv>OcqGwLonb%0P1Z0FXCTT!^ebO8xx>=2M}MRZyLHCu%23<{(!I7f0MFp<@;0}84# z*FDy_@Ey3y%E?(>qR)j=e0SNhy<~zHANA;XP<>)Ds#o@5CjVSf2Cytqv3H+i7T6@C zrnASI6$~n{F_Z;hOwFDYOxPa~(3-i#Z~Ky9B`~hhAMjGarW*6#U_Wz;RxS9&S8ygU z#{w7Xi+NeCW{W#h5s}F)*i##2w;&+K1U#Fy(3FUoaUIxD+2nk5Cm5A~G3t!)rcdqc z*)tU8v?vLPBY(T>65g%&4Yi}>PA{1DiJq{rC@L{uJh|Yo6{U$n%E`K6$+LP;MH7dG8)DgWOF}iNiEo>>QwXp3iHWIK z=s53dvBGt+S&2B0k=hW3Li?boR*$lRfIh1@OC}qPRcJ=5sg4$sbfW)KjhSo84Yy)Q zzD!a<%#TOc2-`%{Gpi6SEy%F#6%JHh%083Et!!30h3i`+pp!1k)s0y!EGb|@NOU?t zbl;*tAtHf2weaTCg+O>gpS`QhI`|ff45BfFJZO8>)SZm2$ z+9o4h;mM!qU#5`>Q}P7@6Rxhd=@ptaP8L<;jmPR$kZ{>D5s_zVXhBU)ZQnrn>b;Hxz%8-I|nr!nZBIj$ltwrg76yKfz z$ie`}V(8_nM47JAcAcV%{sB_}g4QS(c1i^~VXP#DMWr}ENVL3 zwWkj*@M3ovnO>ASyoY7F^H4K6$;ufBxuG#YX!unSnAxS(!2B{Pv<&A!$c~h7%RyG1SaBjzsO#FEE*%LXcoAS+Wa}jihm?% zba$v*1v@za-gS>+iGw~vfxU!89pGe4K5BDAS&Fl}cL-V&&O?fI}Dy=!yYHXZGt{&t>pLm|I+m2_hIUJiv)Ps6kB_Wep z(?F=1u`77rV5Ss%mgaqagcQT-oqUm*ZSNx2ikZ%EWOdI@*svO#mOda<-;v* zO_M#+5CuY^p6*b+p%M{bcU^fMS9_*Dx*yfoS{$aB(i9HDgXcDUM2s$T5z_g(!%{M$ z=oGU&%hU?I2nxyNz5clr;h6Y|&U$pTZoY^$zZX)U+QH#>Vc{)mNVB6?|FuQgz_bLUu0aHv{ zEUxA|CS5E5T8kw~Pu2tO;bPJ>6LNoZF)_lJ=SjQss;6ZQsFYyt>ovlb z$cM7@9n7yz*`4|dIe++UzN|8!GgjMpiE4oDM^Z*QsYf4by?d=L@M0GdII*&IxkgS& ztVN#3fc7n=+HhFQ9lp`NNzMlJ!x*5r=AvR;=n%<(14n_+qE2h8Ds;o3#_rdN_(gqU z32csFpr255HG@M~Y`D;x*iuhtX>v3aJrjyGfTtA51TO5_5kFNBY|m;0>aWItq*U1l z0FKz7^)O$Bb|5$D*z0L%L(X67GP4t#?^ece0xloPR4|&CRoOlvHUNb}4n<3Hj5k`&d>*&w$=hPEYKsv94O^ z{bGcjqllDr;+ZIGs$a#r5r&IqpF+$W8#d({n#R9TxKN#v^&@cLBIw1BXbm&D$7|b^ z+me2e)tB5ICpg)4qSQ*lo^3u1B>GHUm3Q(6zrAQjm!&${mrj!k-iMbDfKe`{uP|^r zKyP|MEr{uhMJq@6I>!kBq~cCE$FId|b)1Y7(&P}NH(*!o=#dK&y*30ulx&lggoMZT z+Gf?`-j7^bUFjMtc`6K7WdVUU{Gy$b2yNzkqzEOtN_w)n)$_GLs=;J+!xY=bls|=l zk{WoL790+ivZGgFg-|Qx#Oowx;!F+`^0v!ERT{B9r3FR)3!4DkNY3zPIfg0O$@Hn& zO%Rcw!rx}N?5jixu{VUe_@LG>cbOS%y}%N6rgO~Oln!T~9;y&TjLa>!*&;p)@4}d9 z+DYfEIu6w8^tMVt4UiDuKKnXS%PEpC0ag)2MczRb+P^&*C_@|JOf4T17-QPpqNZ4+ zV74SP`K4uI)rGGWqR>IdzJf$WQyY)e=Q_6mOS+8~S5mMS7)MvZNXdhlSp{!1#zq?6 z;gM!4=h@5pSYT_K0;mjYO)P35F-UE>j7)={Y2__8!B#~$V};xrKn(XW@(N@oRi;P# zkuo6LD~u&AA`>=kkTGrW(a}huShAHny#7{TeWH5ws9s)>D{VRQOy&YMnXxB@j|gA$g~%i#GcG4hF~O_j+8msm=5ZDpi)GIDu)t$&N&A- z*LB6undQXWL@NrPh-0R`V4Yn)eAz47NczhyX_)lPb58&E98gO6^i8{IFn#E>JXx1PfHe@}gpdBum z>VoDuYr8Bdakvr3v2VXJ_f7bfT(X{Zo-lgKbT2}cNJcG}K z!X+mOmpxgakWwe9lmNWK1dTP8Jdy~^737_TkKRdcg!0Jmnpc;J$y7&w)wM~oS5mzc zljI=Gb{QuAW-V^#_YwtwWG8??aJLwsw$_Q4zKFn0<^p>+TIEPH(%fycwzw-6`T~B9nrW3w6M90J z&=y>ZQ{>vJnP+Ec=1(CC?u4-nR3>gUQxr#;S|;e`tC41x(F@~QoTy6;8Ob~$DiS2? z6-Fh^i>npMYdAqOnQN=`Xw5a{kkD@}W5Np$$Y{DgNIF78WYh%$TZEhq)L}JEH=QHu7-iCxx$=Q4or3Jp=mz~u8E9So zYor-zPGe4z&B^AfX2Td)Je*mQC7FWKYFVid$j(YnOMCLTVqDNMwLlkm>c#DU!E)-f{8}k*R0~c`BvAJF;=V7tYEmbp>ErCSFu8s z(5wLUnNIEE+6v}S4>-pCunJXAk_=OB0VW-sw$NN=a74U7>h#c;QYg4cOS~4eVKdnL zgM}?){qAFf_tGLW&_KEZlc>kuc$-+DC7A$QibX?_Za6lNqew$|$q>y!K(3RVBH_(J zf=MbDD%;cMIQphU_ff~b=w>)P6V&=dh z2bzU2IRZECSk31408(TJi4Sl_Kr4oqV{4$iQXn2#PRJ6D3j!_1UMCsd^6P#c6R9n~ z#&}9h|1~zc0v42}Kd@xwl_M>Ci>ABs#Q54b2s5K3Aru76BzJwxnGSwGAx*HsPC0E z&z|G8`aRTDI&#n!Juf;wTy?}llbXc0{i;3tDNjN}p%(L3?$yAMT{HIv3f|mX4rB;h z!5Ej7|AFF0y6w)Go?UQ860^$HjW%rfa461_ zm2w~|cPEBg0vz9+Uviodp(;VkNf8&4`oo>}N@@aQ$kC%p0Dy^_XYTuWGJQs(0MMxE zV-ZDykV+0Vv|QVQ)vHd(xdRj_o@vF2Z{hn)*p3L+B_B;YtmAy~C&OB46&SprVR zLMbdkh)59(#toFH%;`sirrR7UY5xmp&^YgKVCZb!;1Y@ZD?9^VW0m8Zs@7;ktoP*qxfaQ|0{j9`F-J-J8o*qIw}qMx|PtxDm~of z>h8JJHhu~K8&&AJhL?HpG7lbkz*ez;soeY~R_w3Wt_>lEma%DRoPf__3(hs=CuPlW z>+gtG-_L2&QPa0LJ&myl+BFVvv&eARzenipLPSesj1>T5)0fV z`w`exdAwlWgcuWZS*1yDkISB%$0;1SAf{lpnpu6B>R#tmYg%bcXLfHqya9L$JTvRd zGdz*|0BgQe{&$Ie@?aZ?&RM)JFaQkfT_2Z(eEwy;i^MCcCiE6y+K=Z*fw=V6dDLX+ zg~|OsGhKRCDHzc6RFE z*NaO^5y}$nIy(|)$MYRByRukT^Mcy5>*lv_56?2nsMIN^&ZqY^mM_~u?)W3F3|aBF zRPj{ERONyA%y*3gYtzH7-bo>z(@e&tMz*53&}#%5g7frD9dy-{RgBC|q3;C%snguE zuSRBfUysbrOt0q*>v_kOpPvErpZZLKr+b=+T%Y8Oc6w}7r{*`GWjgzG20_sQkll_t zVsKOxnOO5zIBJU1H&ws77b%u(c=J`d5w7U=M-D1|$cEVvLkzP64s_6~!BCseT%AE2 zncb1T=LIs-bdw(559<^fHL=^dxgHRHBVi>R15bfz!CGjOJrB8Aic?@Sge0!&OL)K( zyCG+%4gnwx7e6qlflk0#H-rb83}nuO^qeBQ2}#(z*Y22d4;Prv1_OcwAr}A`i|D2P z9ps(J&36Tzf@k*%`TdM<3$e7}68CMxQ=Q96HE2U7!N4>cw7TzRd3~S+$@$h<3%ub3 z8`lTimC@nXB59}#)RygVcDe^ghN+|Ht~xX4*;|LWvg(Ue1D_}8CT8o%^c4D@07N5G z=(|5Mz59A(diQla2~#BD`eZyBgPzvbzk_LK-fB^uzMQM>ae|FZU!pr~L@JL?qhBBS zAz8n(wbh3TP;AK}j$aQC*M01}x=tLk=1DC5&Gf$-Alq#`n9ydE=wAHMMxY~CTc|?_ zO}5xe0Bs$Ua3=^D?H^-68iLNGTsuw1Jzy_!Q+KhpQk=U0*PN6irKWr6Po9G9%dQhj zlT)(zGh6izh1=orUTbm3+N&UNEs=qo4|OkbFQL%Uz`b=~K>_3!iwx?m(%AnPQdIU&^SGsXO)i4&N=&M2$wILHo| z4b5-EcY-uK7HLa|7;U!$vLmF~xmA0fS$93DWzSMx6oe2(yf<}4!jro8{Mjj6;>-*N z-v)r}W_WJN;)Xr^_C(59r>CD5V~E?*uJ&7*+5u-0nC<@Khd*UT<4K9Kt^r~NY@}iH z_>;S4$4$=Xr>x2z(5>Cz;2LcBp8gW4#GsjMnVs1^`;cOa4is!p*_YU*qJAuXdVbL$hDPuU;n z!YsF}q9Kq$K@2;(0)zL}o02@c*z*sgpme7$h8@C0YI3@YAF2-tM63!pD)8 zM&<2y3!z{xrEz(g45*=onjwj9#Ufrp5f6{old+o;U)LUc6PwL5rK+ruI5(|@;TfJ> zapM&-{`vp+ohT@sV3t4IJ44EcdVYw0W%Eq6+$}|8Sded7vEM|Unpn*%-j)98xG4vr zEn(Z#xHSI_6C zOwpbBADW@BhjcSqmGw9eSt-+^Bd^Rjj;GwykB)mJ?!&1EqsGCUu_sjL6&cLs28pgA zGY!c(66r3}B18#0my8q0Fp7spX{f9$e^U`9PS$6#??!)-U4O~1-n;GS>+HMZmBqb? zKz%p>^WNsH(|J4&;3qLjEOE;6AblQkom z4O`oB!c?9B1~%EPN6=+iGvH^IG-bD0HJ`Ow>(Ah^N|mNBYP^ph?19s!Qb7#~eYm_6 zsZ-O=Y(k{e^m?5g{a-AuWvV{^F6ZJ804|?Na;0v#5@6u6NKnwVJ*cKA`mSE0zeZ{MVQ&xTYJkm}ST)!)Cl=Ho}#;%3|Za z0U%{XU7AsiohcfV3|yXHn^92Enna^{)lJDp+R!l|cu0TGL8@;1-l&)iE@>LF**c#w zsnWbN4V`JN9UZyi6dNEWX6MLAV*_p~G07IVJTCx7jN)-}QvM4Yhe(qA%ni**XWF^@ zmj39<0}tK$%ALT=;*0BE0n;%s`?{PWKlcKF7|l82^Y}mi|EIaGSN5CPj?zhbN-ych_v+sPN~U9)cb)oya4sLtvwSXJNZQgOWweZyak2&6 z-<(VX0bpR|y9ei7_ga2qPVqbdaPO_r5c*+(=h~mWz+av)0C0i<00c-{eg(ku753*| zQ22VIxGAQAMh>(v$9n%w* zlp(n?cgRvQ`AvY5Odv+AND$vydrY1d$U9E?f${ zWVxgZ`uXRN$k0M4LxCh%5pXmTB`1Xg(;>?3ggFPvduA2j|If?xLWubP^9lmSI%yDj zx2|}7E$jx22p{m!jkV< zaRgWuxoO&{I*!;opodU`v5rDrk^3<9Fg%wz)l;n^~|Q<{lEo z_Iz50LS0UNMK3{sRk52wMl!7|VmzoG?|T@E^oqnxoD>}~f`*lbOmEHxd}%#>2N4F% ziHu51f+m7OKA-7Kf_hfpq_eP7pi9pP$w>+#u@I8+3>dTryz~wK2Tm`ev5ZX8ikpRa z%S<;aDmg+-BMO>EGU=+c!pI=R#_8yp#0mC_rkyeGe|&%=A593yL1&*At^iqY@8K7t z$Ax|m=(BoYkMN5-^LF2V`5Yk#9k~`Wlt=Vj9l1=)Y_is3ORTcSR;&NkN{x-#_N>iz z+F`%#e`_=TH5ei#nqEq&a(aA78btI|M1YzOKnWzG0F|UozttY_Tk5qBA z4W5vh+)SxfgWpCnWsR*0vm!(XUAsqIpD@a zoapl`H#)PO3O)KX{2&fI26L7HWN_NuZVis_2@A%`UHn zeX(THH?d1_d6t=?cNkk^VM$uj7^KsKC~pbY2WMD;7tn^(bfOR#LLH5WPyd0O^%+Gd zK>v-wl8=Niy`uYrvJvoAV?RU)Tsj;N6m+!J8Edz)s-)!1{jKeRlYV==ymbK^c}1W`7h$MGqMc|&(L_O>@y-)fOvu0XlfH) z9l4aa%r&5V*jcss2MlpKxB`JbRPsj(;iSpR9XOJZ3c#&vH=cmLhB5H*O`Ti})F}%1 zMz0fO2#Ni&iB()I_HoT9{O=qSR0zL{Ki`a6l?Dx-e@?l&VsKLQn)i1i2()KyJVgAq6cs& zPbDb~ZQLM*^)2;{q&tw_`qtD4~~wafb_0yTzwEM(}+ce3ki;d#n<0c$6TRDCvvjrGkTw;- zmcu}y5=ImZc`z=(0crs3{a17jRr3!tgC6g|zaW#7g-SUgkjJS#xtLD`s)$OJpuwt` zA>eV1RvZ!kKIIbRW)BGK3(5!q9J)fOo}iC%j>ks}2|njMzkvh*B8I?BFqoyFq@tz? zmK3a|qi2vOUjZW%GYhLiMT*(jl_)h4e_q2zj0yxtb`0_V`QSvHFXA{$4LdRZZ8tBv zxuD=YuQAo?(5XqYVkL56@h1x0P%&nLL%CSvE+&p^9rnU=FVRqi%aG{_Jie+84wEWG z;x%z5Xp!@GQDm8ZB0;+s>gCCoE;bV6A~$~FX(0T~GaK-;;K@T?@}&vIh!rPZf<#bB zlBIx2l?E=omRdUt(fR5iJ0}B5W|yG52qQ~2EF3%nB9a_rxhSY;=opw-*f_X&_ymMR z#3ZC-_ny#|e%H20KGi`F`|X|GF1-ObRMUl+gbHo04m9=-YmJo4BR zPd)S8i>kcz3Kgy1RxA|JE0Ll^ixJyc_pRt*kHq)DkVHw6rAU<~U4~3q=(6R=mDh-E zAqo^KQmmwYd*ay}r(^b2j-f(j#~q5HS`DUJu-EF;L$He0&}tTUQj=ya4R+EQXXD0q z&R4y0-UVN~=#tB>m~z!OEx6{o8*aMQTeo}H`Is*4NGg#a^3b@$frL=HSz89XI2tQXORf;ULYm^rsT)FXc%N0tMTBFtJiN+#- zW24Dzu@)BnG;H>gQb$>Nh12EsR902jcx&tG8ycIMeJzAz(ebgm{M|EpdWTp9Ok{yK zri=@z^iRWB=R=Zac~Mq%Y}&3L#%W&GZD%Bl`-Q%r96ywe?{ntgM)R^#^F!+c0ze2x zPz)zXie^}j7evWjJiM*Yw;vfl%@p&u{V2We=coG%G5&XX7Y1gF)n<1%U2c!p=LbLt zMsju0-y#J?*uzBq(Ue+(tP<*JhQsCY1wxTnB9+M%N)@ZtXmxsn(PXyh@h{fvSAP6o z-IcBqM2V?zsa)m*0IVa6Uwr-zcr=9-m<53 z1>)#wSlrkebCUcUMk#qZ3RdLg$H6D$S;t07Bc}f<=CVPS3Vvdl`< zNrnW2mn|7Vm(2V_kc!7goj&!Y&b@K(oQE~xw%jN8#WZG$#A5R`$)QiaB;1BfAd=&o z5l_%r6H!t6qto5`LT>#T$}ii)MumiJs8^}gPiWGliFwR5?HqW5m7CuLt=7`L^+hD> zbA^D{{&*)0(u-sfP;eYh8M?3OsEy=%_{MB(4_^)mBYjIJ%6HD>uCjJTCrlq~?UB>= z>yCGxdDYRs(Kl|hIf;}{rryqw*c>$>~MIja*q`3boelgpX?Q^9sEx*0;B|dY#`QZ9^`tLd+ z`pY5*&egcFaT;?ZFC6UlaL=Bb=jYqR7s5A(2mAWCW^Lc7Kh3_KNBiT+tGWChw}S#H zJ%c$N2wI911Bmpk$@JfGpmzKICNN<{4CLlm^M^0Ir&M4FwUpB5aZrntsTe?mJcxZ? zGYiZa^Xfk9)?;>o6Z3v>xN!Hl!NColw%>QJ`4VplU>?LZ?X7bxzPc}ZaOB8i`NchI z`ll%>ng7?VV!5Sw?N}Kr0+|W`9g&$eJ^F$kV{D9#{pCML-8Y>z=6%eSXck$uTZ_dS ztXN}92g00HbH>h@6KA=%BF$s7-u%HoHtLTs-2U04*1}OJz3%h%ljy>xPh~rLr3VM3_*ZgGL_gn#b z2`@>rrb#H5YEy>!tSoB**VHC!&=iP_)2QH5iZ|y@m6Sbg4BbLySLWpP)|Ora2X3l1 z<2p*pwgv)`XE2Kbpq>jHXIhq$%$j6Ot56+*XJy%E6FLQ922P=CI|xI%P@r3MD1Zpl z(*XbgfKw2MPPz)69JYiqu4l9k5Me?Y*VDNg=c{&5UUEGx=FP5gmfuDfS<$dNJX1(5 zZ6p8sX&&-F3A2$x3D{`ZoaI-=59&<-?EpaFSinF401gC=1wg=nS-GI{NC|{8t{VbK z(^3P@9J$2LGyo7I#*OrrFwiU!C)j#Wr5uuw(=;`5I!cu?#9NHSY2MB2Tl}Y&Thpz~ zS8ks#(q8~I9k#=qGce)0gaDaLWHcn2Y~+|Jd)yf(yE^c1(CCco`)}VqSums1T1ftl zqR*t`rC$PrX6o_^{+u3BInZy1kL0F)M7k)nL-H_oiybfHb1ecI#-4W`XZ|G{r-?ld z^5F7QZX=gX9l#OcR9Q2vgz`l`jQEEO@I4>~(z}6}2vOM((IM(hvNQh-SSOS6Qg|+k zjN;J4#CazcO|eDFl7k7_5hcFvMB*w8Xa-|?@3VGV@*Xce|I!|?<}VP*x)0mpS#;M1& literal 0 HcmV?d00001 diff --git a/docs/stable/_static/images/arrow-down-orange.svg b/docs/stable/_static/images/arrow-down-orange.svg new file mode 100644 index 000000000000..e9d8e9ecf248 --- /dev/null +++ b/docs/stable/_static/images/arrow-down-orange.svg @@ -0,0 +1,19 @@ + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_static/images/arrow-right-with-tail.svg b/docs/stable/_static/images/arrow-right-with-tail.svg new file mode 100644 index 000000000000..5843588fca6f --- /dev/null +++ b/docs/stable/_static/images/arrow-right-with-tail.svg @@ -0,0 +1,19 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_static/images/chevron-down-grey.svg b/docs/stable/_static/images/chevron-down-grey.svg new file mode 100644 index 000000000000..82d6514f2506 --- /dev/null +++ b/docs/stable/_static/images/chevron-down-grey.svg @@ -0,0 +1,18 @@ + + + + +Created with Sketch. + + + + + + + + + + + + diff --git a/docs/stable/_static/images/chevron-right-orange.svg b/docs/stable/_static/images/chevron-right-orange.svg new file mode 100644 index 000000000000..7033fc93bf4f --- /dev/null +++ b/docs/stable/_static/images/chevron-right-orange.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + diff --git a/docs/stable/_static/images/chevron-right-white.svg b/docs/stable/_static/images/chevron-right-white.svg new file mode 100644 index 000000000000..dd9e77f26165 --- /dev/null +++ b/docs/stable/_static/images/chevron-right-white.svg @@ -0,0 +1,17 @@ + + + + +Page 1 +Created with Sketch. + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_static/images/home-footer-background.jpg b/docs/stable/_static/images/home-footer-background.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b307bb57f48511ac8f1e76ac6bdf6cea97441b21 GIT binary patch literal 38907 zcmc(I30xCb*Z!RZh#(Rdw4y+$XrV3zmsU_h3LwGg3?KSl!)LY)v6_Z6~$f5BT&ANYTS7(zzV zGnPUjv+3X1IqF4i1@a0C4Ox7-N6f&`Hh|vvWcEhwc5eI(R5~Kg4KL-DUF_;#XR;*6eHl5)O z=er^X#$+-qn3k5*&tTE;Ifv z-(YRi>#g1neFnVa=Kk)$5$}I6(#w0)*bhG%7chRp#A%;=I{mX5GiQa&U$Agd=vQAy ztXR2f^_sPjvEOdo6t{WH)}6Z&cYnWU@4o$q4j(!CQ}VInKc6{!?)-(ci?#^1=7c2tC zu&`iSu&DdO7^|rp?`C27_E4+t{!>_AEbq~8*!oWPV-gOgUbl7~KDDanm*Icd^y)w2 z*??;5*62Iy*o|%YTX$x-vF7{ALt{ah{@H(8_>zD24e)906A;UD+DFKiiGeUNyCEEr z(@Kz#!Fy-=lUkKbixvdSOZj`T92A6cEs;I-(0x!yY<8*^xvI9sa6By%jb5^`W~sGk zJsJFcC}W<#14pJLJYt4(J2!n4z5yF}VT~^#C>g4_t#QW`Sh^@bto{e3lo?9>tPOJS z@I!c)dnYt6rYH|YX_3c0-upj_q6Z3;zu}8-3!Au~DbizYokIw1t$KA_Yi^_?yYSCb+MS9U9xUaX>BA+0J zRQILA9fYKTQ>B{)lrEOR?P6wIJaIUVmUdyMEdZ(Uy=`#J7wJ zgbMegeKSVpsvA9O)S}HT_H_lT3bq`!_MzxEUVouoz;w9sQv3;Di@5gK4@SGFU!(qx z8~kfIMFgJ78$2M(&T*;p)xKKfe_g$&ocmVy4`V)&mrD6JwCKLCsf>`v@`p$NA7Gx>BRr6!cVM8d9WA45nAg|$vVS8|Q zYA@jO36D%_A(>jKUSMs|zS(%Nrx$1?yUOMr#^Q$ENPT}R}1S$uijlxL9+q%6DzNN?5u)(r! zr?_R}jlsvStKl25M-%py3kQU7C&FVc2$e+ zy`_u^zNwQPy{)kB`KzPMhg0LN7MIFV_~kS;F@bzZsF2Y1kvUs}mS+?+23a0^HFFtf zFn-0ZH^J8^*o%|8psxb@jUSfmcTKdph%YJKk9JZk!T5A>Rb(Arv{F5p;3Q)7La?Wc zS;;jTyO@2Pljwi6QWP)dgay{HYw8L|VD8=yT7+jSV?IIWFzp`(@pNY_Sgau=zN})& z{U!rM%MRA>f|u`TP7Quc{7jA}7;9O(Mg}y45r4SMow@mMi)8YVpBi&~4}V5vhiYc= z9NyNViI+KwhZ52!U$az;zNd7wXCp_V=I7#Z4npfDbHrG%BtKaG62y&kVd*tMnD8ga zAzMjabWn>%HH+COLLgILtQT6ZXC*7i;3Rxt^J{I3Ad~+q*q#%7RfQY5DU@+lC-eVM z_$(1xwEnga8ZEUtdVbTgdWR{GhUW0=_+DDHx^k6Q#t7`(&&Li1Sq6DA*K2UesR*uJ zvrn?`PRZdzm5SIgT)Di`_$3=uTs!uM+f9a2V7!IK_Q8ARj^YW^%J2+Hp-Lg_(mTxl z*#c!?z35xb+Ws*!DsL_4z7)a4(E2?z;*?1Baq+c;o3i&g$}VCr52eF3q{I0&K`VLlkuPz(qij_ z0>w)rgFMWMGngCt6~+dCkl_MiPw{@Vx9hk7*te|wZPF&+_mAc7*F!(~C`FIA6Vt=y z=0z>9+>a!Q&+7`G&I}-Z0u}Y-&j^&C@llKz447-JVJa~8z0K}w6*@hH8@^N28Xbd_?1KQ+?jsuL>isDKK7XtT|`zUON*kH#T*FF!A>}C z>Gx`E!sFG>;#2HG2*^s_z(H?Tfh0P{%xMf_X|s)_Ply~NxIG3fDz7Eyc+?c;;hxN6 zMgd-b%9Mhj*Y;NX%`;dR>J9C?CkD@JPh$lsCq|(qZ)zEwChz0yS$H7AbwphC=+Css zDfm9myK&cdmn27Oh?0doH(JsXW~Y`?k%{;P5uC`FsB`{}WwX}$Zz>|tf zM_`^V+xT*|XphK2Xes>nJ#eof6;D^CKVv-&_j11RQ{ZVonPZUE<0<_I_O$YyP$tev z(juV~uaw(eN=aJlFw7u-v4TK(C1vAx&vwl(Z;CqTh>lXy;##~xy-NeDr@*)lw99PM zZYvTBmt-s>#`t!6<<6Yd_uIW8uS6?r>*r1mDDzbg#(eepLKLTF6zPWZGby`(gsc;g zfhc3*@JampyaDunX>*LFj=GF}vsbqf=Kjs}h1Fe)qD4=`WZW)#W7N@y@Z$?Vb(*<> zf82|6TxfT1M7KjOC0l>?+lqvC6OH05>yRZG>}RFfVD$X<{f=n;Wqh=hM>=Xz9o{&y zA;=<8XKLs$zV-dN_jd#V(xN>gFA%j1%7mMQe2(x%wW5Q%^elWh6&%JYW7dGQ(Z&_? zcL5Jlg=f`(n0Ioe45~+Q<*iWWj(^z~V_!ddn7sJFX7SUo;03k#WWjc{wBO*bdm@`r zm|7z_?jto=39Jl!6d8O_lI>b6v<@}09eQPi5ZLcKKHUzftG=gHrwU@uFj|&SBy3hTXG$oUihgPDBrZxEMuK)`t04>h)5!XsqC;fO1+} zINHC+scB0>N_UZwx}=X06mi$YF!dO;RHcc~qBAK<1%&7ZWdUin?g?K<*&59_oV}N! zw>37Dptorqs7rqyT$$E*S6u>-(i`|6jN7Nr{J9JM6g=DPz|L8Am0#U8YK1zKdCb(} zFqnT4&Z2BiZWV;IU&61^7kZhg01^6F1gY)sO_6E`lIuZBIhu7^^brW2-#mk`U58|C zwn~>mvs`hRl3lEPZq6=deI60vUAsR}(@BHxDdzSpF_HrM!f5I3_>+vgSFEPr&e-N0 zn9}GPD@k!>=G$Xw&H&>D9Y%J7??=@)ibZqJVFQeI5cXUgR;+T=41*x*d)<2IewL0% zu3r<$9zwXJ)`x{}Vgu>T`zmiv<;`%8pkW4h@YJI1`Ehq&hC;%w!%Nz?e{Ai2a!Lu7 znHM9FJrwPcsa<+7lR{0_qg8Hf`IUzYe?uex(7OB-X06&TlyNC%+PS4MM?7kt@N=t# z=<9dYQ!YB+94XHeWLFD2cYAW(4XYCbD2X{0xGa7-w^u0RBM3=X;YGw0{*4#9bV>i4 zZ5a{_F3N7erG*X;fy!EJ)$nBknYcW`=vsBL$x;;C=~J1M_}9yM>bh>yKXTr_7wz;rCjUR#j4nduKRv) zvX6eDsB@M%$2qOJ)tkBClYtwj@T^#HbNES!UM8MQVh77BR7Ir7or(qxYDR^ouH}5q zZjt0vOpu9H)@4a8vUTR39EhZA0_#JxNNR}|HSJ1!QM1*L=WnU|$H~a<#OA1)n6O5f z?^r667n+y5?VZxv#DnhW33>H-JCfTiRzJ4$om}Jz4KRhlx%{;;3`UQ)V zMkm}Vu>I=Qk-a-6=e+z*szp~NIN$k63FExZ2=o$O-sxyt8Te|b54&!%U4HS)2yWjg z6L-0+@OHXhmMo1I%bmZi6?V3Hz;XJFh)+m)y2=Lc(V!?N zokE#sw)Yw_KKIq3jU1kORIL^{pMy}T@RX?VIiV`kB2_nGCwE3p*pA_fdxRiMGtRfF z3;MW8NV*TI!v#vtDZ&mC$tGW7k(t-RsJUbQu1C?N-W<7R*>Gjdie{i`1xSp9N{V|h zK>PqErN9r$E4@}1ipQ<85q|HBAkR~hpJ1tB^AnyOVNCF44@`cr%-Z=#%vgKu*pY7W zdn-ssJKQHy%DnDUL?H@SmA_L#3dATp02SP^^c~^2L=G6%bDrzPb7@zCpSnpN=EhCS zu3UGCd8(6m>Co>{amwJMXV;Cq+n2MG>)H3gwjY0CEk00dQLrB+H!X!(ZDq%Qkno$w zw46HTC7+Rj$=TjY?gxjrhXj2mUHvNXc6AAVHaYq35skfq>g#)?!|<;m?WS_Bz%^CPx5zN+}A1=Xf%$l#KpC8AJSrd3wKS_HX0n{X8=sIe^KJwpv>< zbU*ri|MY!X&Oi2y8mJ#7~;PK+~`H%;69 z#g1scl8giK0oyTItmc;uZCICX5H@OwD7=zn>C>e7grHb`wc##W?si7L|| zFDstMiAAxH?y$22Cy^JOa~t_JAEN{{eFH9Opd&3uDlY}aEv~%hj{ONw9~CY=`RM+_Eti;yVU=%}#9ZYcW^P3%+g27~cdp~kWvt_LQKfIM zs?F1qKED*AVV%}Q7AbDfGG-a=x+N87{oHJl z9Pf1$>oB-28x)>8&NkTslB5K<@lojErYXagI(ECk9;@6Ho96~GEd}RNZf~ww9WUA< zDXig9Ss0+WIyHfBxR9T$tP3j!AKA$wUADjJ3J*rPb?#j$A4z78QWYtDMFlPnIVNHG z*QI|J`FyBQ?kb6?Qrod(jx@Sk{fNf5sSVx}ETRq&Ng`SNaM%X^wAv9U(c}yDhNX=M zVXwEjxBDaC2?gw%Qr0w!F}DTig@|wjElTT!X9i#|h4)Fqet zn5b2bX>d$Xm{dT{t3_FIq221K-oHkWcc9);2O+4?rVVLm^67?vtNV?2x7@J%v3Df@ zj;(5bb>%K@--*|G^TIo2&|pAI0Gb1ka+3zQn@==uU0RZjr{;md-A4F@GE9x~;D;CT zHfzzN0fFjHM0^(%CT*vTtSf5peFJl(j8+$JScX=2YW33(R`tVAD?_{jSG(@gB8+uy zt!wfu^cYO25t_&vv=TC`9)NuS_9xwr{9*Ha?Yux#67IzuuD%Nn@(C9N@A5O(PK|OdtXvKHwZnN?T~Q9k&*r_WG2}a1+eJaS8X|cUa`b$q~%+_WO-D zYVV;LAv1Ch#b34dyZ?pb^@feS&9)T^GEyUw+4WGPAQujjVnjpy0 z!)QY|$(Sq9t{4wuN?zdi@=H|OW%EjPQ$akXUb z@U4L4*i)dWpuAN+5o&bRv^mTLW`rZ@rlJcYhM2VV?otHWE~G0lWC}=VwPY z&Ir2`qKs{ck)_T5;gx+IcMds&bDXmqEd}#S>4N#fv*Y@*n4fsvTR7OZFTBg$QF&h~ zU->QS?O$^3sxs_Gyv$43Wuh)J^8O~Jp+P`i25Hbc1=hRQ>nh1*5vu65LTk{Qw%|_? zTMiUPTB4$6V0~kcXdP<>F}+L$^p;e4Qw7GcLab7X$`4bjx~au|4IY!uV5bzToJCXO zue{(54zk$%g#V0O-UP-{V1r1a=&hO$tQ8fIXPigVfoth5RS(+a7Elcv^%3KMeyxZL=M1G zDM|PnQ;ul!(2E|n!aeZu_0LA)1TUBjDb3beF}my_2z7uk(U z4z3TJmVZztFTE|ay=-z-A1r?Rp~H^&`L}#`tf*}iCmr4fW$}`xbu))KzL|Z+!tdqZ z+ZDkqe%lfv65qtgT{#LR@eV4vQO}SjcrnkL<(Cp zi}{RCc|fiu*Tq~iscq8CiBkj>hW_L?&y+jSx4D)n3P7Qk0r(b@0*3a@_E}95ldb&r z;YaCM*nVZ-ZR1YI%JDk|&Rf|`Vc8AWc&_gnp)GwOtpzxH%Lgt_k|DMDvp<{Q3;=!2 z0yCjTQZ-=uJ*>Rkl-Qtms>O>}7DW?#GV^7*4cvEhI>Y=ihbS!7q9=hGE1tW~Xc?`6 z>57>C)#rnnrIQxDm^?}zpZ|>e)~9F1giKKY6&ldmV@v?d10_BXbqOGo1Af`&!Zc+a zzK1gl?RAtJuXOPe5!B%Gv*ct~>ou-9ebDWTyqgalZw0adB3w~5wBmL~y%lee{vP0= zSH+TTcRDZclSDkipE#29?O4ZML#;{>VxeU$Azi;{zECp;7|U=@E(^svpr7y->weph zuKcJhaBli)w+HMh#{{mE#gt#hryhG2F!yEPbU&GD-U=W-rh+H(QJCJI*}#EXTL>YY z3saQ8L6oQ`N}A76X+)h2C>PwgYFY_8TMM~06#FUrYXqoMpt6ic4^xtrOni^Cf?LASTh#C2g4HAku!k zUK9Rw&+?WDr~JiFd!mk9pYM(FJm;@Uc=(_wRgx`b&jU=oR?0NH&Ph*3PT2LwT6I*+ z6%s_^kLK&L@2(G^?@_GJ7K~9^J-eTf#pWu>Sk6Al`IbR$0hPCjYYKHmLre6xO|v^= zMsP^%0p^Wpc8q3}LS4d~3=~1&4CzTefPV*;P~mJysWRpwh`6j6?Kkp!#u;4q9S-AN zdb;k2!+2B>w*S=75e!Xu}xkAOT!~@%720EmJTKZX)=M!SZbu0G6o73;g5~kMQ6qD{(p=W1 zY(lGW`3>y2jWE?lxN|acnjq{_K6^PMBGxrp4$hDNg~Gu{zRlfoKS2D(1u37e7Q7F6E0Ec?j^ii*6dQk5*v@r5Cxrt5&|!0MhZEG@1WE@e%eZrsoL}Vv86`gu7lra&7JUL{` z{!w$8eOwa84b0tjj`_g^=jo4SZl_`G>VswFawz2=gHqt%%uKxTiT3+%e<0;37qX;;A5aq(Br@jdC<*Nd^~(q~8Lat1?GD03TSgtnLXeWHTqermFIK1P`H-0Z zb%~M`0peu+=^0-Qp25ii#x4stdlmZ4ryg!Q;Wt1R1hdeNiKJ*ftYaqct-BGA*%as} zSK6R&##hwu6^Qv6^I~GF%hKCO%8aQ!KX-Sgq;->IhZoj)J2+v z$vH+bj@|S;%4^(Ji|}VHgA-tz0UkaF;<^)L^iYGhrEi+?;ZiY#lU_P*XJ$u(iU;S; z)l~P7m6aVn!=I}~BXiW(0OOW6d%%{3BJbJ!S~-y$K(4^L7zL;;wTx2Pw&DnrbjwKW zTJ!AmsXC-pHwF8S`kfgm#{(gLvj7dynesPyu&A=#`81XTr51z-e~r$<1)48F=~;YK z)Mn}_%)5Um!vHhjK|Ikz6RP8j>*GDU>0&35Bo)w#S?C58*NPylwFr8noD5dSa_XnC zcEVU?kk#q8d9DW?K%G2b4OoECHVKod=QE{(wA?=<<%CF)j@{}$1%caZpw{ZF?VXND zkR{rQEOstKWgnxjXXkZFU+u6%;j~NP5Vo#5&+9Il+nPAvWYB;=FPDtf3@_(8K4~jg zH!0_L@}(uERScmMe#jCDyd|NY0!P92m{2`zCqVW+0YIZs#$`j@ z9U%Zk^A+{b6DR*IDN;AaT*6cIj7R-1+YYL1P!flT5(aPJ8_2qD3lA^NJIuVi6SouP zI_Gak1Yn;AiO~g%K++vqBg^MNlTCAF3l_s<#XVlXX3N+id7)Rv zV?B>0G|+`!-!s=T=n_;J(~W5JLKl)C3~^0)9?_r{vYRi9GHB|H(2&q!dC}1w)>M+waVC8*j@MrYI$A$l zU+iP+vC7vkHg;FTyw$)xeW?`3tBa^3^4bKDxIWZ5v%-Ru)tdlEy(`7V4n4McQVaipE#~o>a-IDh;F=CR_s5 z<%8#ThQ_LObhtDWik6?dHCDh+4s7+4&HH3@9mY>a+Fph}hF_aE?)By+feNN1ROP9i zScif%)%T*08C(>$F&gS(9*RH>3bNEMV;k3G#ys}l_AK{tA!K0S`;tPL01E~(!~JU^ z$@U_X4ZLl=73)x5+JeHUi$s&IwoI>HV`YGkfObY=WMB;}|H}^!yJQjtOyy7s$6qkU zzcIa!3qnk^43$_mLB*7UP^~sGGc*(jPv2?-&tmr;6DdECmvOnUQ>YQ7Wmuy%;iGE& zt^jLe2v|c1))Ko$g3Ps9OH2#NPA%2o$F%5qJ1OK%fo6rEG7Wz7n7Q9HvlE8v0gzzL z;yNIR$QyzzFB?gosa-T`IWV|4Lt4Fe+Z^17?aB4Ly1O#!~b}Fd@#;^q|{C zr$SiU?)A02AxoA8XY76i*xY0`&>a9AgEZ2V_-_!PC93G<%5^8n=!hDgfyPGdU8~f{ z$k;(q>1vEfvjBKji(cqDp;(^nkngMu(^eO$zbgxHE4jp~9uS>2KghCZdkDu}cF

    z<)D+|(*>uQCFXhr^>&tS6{HGBJ~6J%vedp6qP0J0Jb?dp3e06$WL3;1j>@L6TmWz! zr+s;n$dyE0Se2nqPtPqA1s4OH_nz-rISL6RF9+7=DWw*lyon#s1rYtCCCWlDjHODc zh+dP@)4iI#)qri-3GL>!W)3hn3|}#o@K+|`lB;OaX=1Gi zBl~lTDD(8y@f9E#%>jsB6JV?c-Q zA9?|5SI?nv0}1PUeU%ZFd;@&fWCU8km_}}nV%AaRJstJUF4Rbb7#rP$ZhkqK%NEHz zKapl$V5xGxlzEf$G%O>e7Mj06mj`qcmauZZ&wXm9Y3fIjY`{$SfBALIbfcNfS3T@u12{9M|E4WM!zt2v@_j;4^?4I+8HQ|{=4#qIil zLTInbp{}frIF5ShzewPEr{!1IXwipszr-hRZeG~{2Ti(iOTiIn(KkX%xvwag%;nnj zce<`-H;75X1}$4r`Kz3z=!6SEgSvDh`}rN_WIJgHJLuQ7nkJ2HkZZivbg%x7ehCKz zSx{U7j>DoCD0oP8AZTr#JF3ae0l4@KkKObP!`~)pb&ra4amfue@Kvu(RHePZ8~DZi z;~9_8v+nWVf}zmtqbuK*O&@lCdzv}`ZnYd5w1b#eHj;8q2JwEFy!@7wc{_Nz8ltD| zQmiQDM9eo@bgKl*S{m1|mlL>0{TArPEVTOJw8pzy6fb4|e%WrLXsG(a1)QCGi085M z+DRo)Mf(aZdzDP??5Su>#v7m!LLjt-UV}3!^*=!8uh6n_;Oa$MBo0N5gNdhvdKJ%p zmiP&*nUj$9ox7?sLP?A-!9F?=^9#BD5n08_#lR|LL()HsfuA-|Syv1|+>BqP+1cusYt_(efOy%ssb z=l&#?%)GC=+;^X*k>QQ+$;qfH$beIS#n)DdPx-*)moscBxkhOH(YA{(Lgf{GwP*`D zw)!?au?ypS5&37ug;$|%?p}fI%oVlr`Bou2E7VNKook8+{HIYtOycZkWE7_nt`NkH zyc?{1NmS-*(PAO9@qI#;szoA#lE0U5$4O#R18uUI2>(C$3^s26k1xW08>*QP622RG z)Ga3D55cWH(xPhWilFZ)2?|UxAk6VW%uV0N9l1jkbk?HZf!!##L86EM0|aq4*$_-E zf8A6qlECDhYj08S#x#i{`Cp>SjnF}rh=>Iq>e{fzXGB9W;&k{^hyTYsa?fuoencRX z3t3&f_{ufJ@1Ib=4J~f$UX$?RXQJM@khXeAd!f}2@*t`!!pVnRyT-RG-AHe~<~T|8 zL8l&ldA8TE3lrR4c@Pay1M|BYis5H=kx{s&ahghbgX#z(<^!F{H=opa0GI%~^bW3s zAqw@SM1uzTgqA*9C2stVbc0SGM1**#>)}p95qT?E`6|EuHz}ssCXzpaTZIog5VBv1 z{LZi%!(cU5!D{q^eFh(rQ~?y@A&Bz;ERt^?*)R*?se8z%G)>jH_P^7TyfwMr4zJiR z028#QSg;p}n*g7!?=Z#>cGOX#-|M_c-jZYH4-`acO_$>?aw;!D`bdKi@?7)KY zgV|$ae@8)jbI;mW0XDMkGV+mxf#jW+VlSc~|2@TXe8(kZX-u*f{X!eACc_gY1e3Oo z>NTH*eA;=K5*`4KNi0&`h5UOrTKy=mF_vu9qKXwL`lRS0UMT`!!!myM7cE*w61k3~_hey*APR*JG8uRnQDi+&Ou1(nKIWGjT0M@PncTNVq6lHi+C0+AYwWrUq%f)i{8VT>`-h)?5QY+zlcx zBQ->w3gKtK8VVX~!5Wqz@-}DY@hR-+Ka*5`$J=(5{v~< zD$_O5c!mQtjY!vp-h$i-(Q8LWk}dP7&(pjhSD;5m8G=3^HGQzOxwA}|>WduQs_e1I zAs>`Ya@yb-mGJyMsAadpcUy+n`ne?Fuk>tF zj!fN(%F*IOZRfNtV`?BjDk?}Pd=BYU!kB2XO#N(9`nvKm@47E=b%}Ld_7MtKZA!X- z@-ntX+XTqqnDj;QbDWt}*P!tfO%%F|I8g!0gE(PE~QN&1u>Fa?m*&XBS9SILPz-*-v z{WO&aqN)f_c3EG|D|bfboN*5%=9hF1dN?O=!?90^Yg#mrDiw>sBnl53Yi=i(Yq)Fex5!*a_k(boa?Q)~nJ2w~_R*R2eYRD6RLb+t$|hz)dI z6##w-M&dKI3L5BPbRD(@SC!)VVKQi^XuJK%XiH%2^<7~SWt4m?%%ci`eZ^W4qv<^| z$e{_Ar)R#HeyQOv=zjJc?N~@-r!XIHrx6f=C1|oqwLmrW7&i0MoWs1#^i5NTb@JUT zw3`-`{phbN88yl|qBouQUh|xSUDUJ@ zlS?sXvoYAlI@MOQ9W9Nm(KRT}v=mt#GS(OTv8 zI){P2zc%himjGT=f{}jR3_XhSx+osPy$2x_b#xTZv|!hE+mde4z-Q>S{`K@{Qw!D} z6~ol9HTCKYP1B-<0) ziznwvbc`5?R42@g_sU$h*J* zG->7bo{^F*Qr?JYXw^^dt{X@Pyd{%)Df(4x(W*oZHZd=%@P6w>g)9p9M+cN#iroXU$ zX(0{7ffmDG2C<(*nj5I;attdo9Y=%x@^jgg^ab zjY~w7P|mw9(yS$6fEBO>=rPH(05;D@rlr(%6&|M)*HSZbt=*Yhmd@m#;K&-N%1d7} z-UeE~Kj}Upow+mcf;)41Uf`Tf7t0}ox?VtB8;83@g$Gpe7W3>agODTgLQ!8fHM>w3 zA2c3dC3cPaEY%)L(Fn4XQPumK=HAfe%l6V*@DKT6(!xBos#e&kL}&l#NLrRxdt1{q z$G1sPOf1ZQDv%ev@b!R!naokO{6ZaE3Dt~)=fPFA3JTue{rN;M+h00Sq3DCHsQCRZ zz9(nj;SXUtv!^-Wzdg-=vUIq#W>~sIqRCyDy*2%lU(bLBqAwu3VwasDokhFR9GyIy z5;6xESzcQ6MBxPLLm67qR8k5Mk1#&HSimqx02x7$KuYX9=Gm zR&Vq5^8s5O-dS>Nc$xUsv3f6j&$I`9P~L`zE)JaG^%2)q9`ym)ennOGjNjWJg;GZ8 zbwn3zx0~uV1d=eOz(8sTXI(P{T@=RPePzl_XcXlpv1)tM8=1F+re zZI8_F8r6;K_|w7U^C#9O6bnPHmxZ`B%+6~(c`MR++zjS0qtMhe`Y`>AIa#ck4G02w zo~_N?A!^D`5+UH6YBgq#HE3RA^m0vK!3{aI4tjb!0L0*z4_LKP7^3467+M_-1Nx{Y zz&~CifYcznZHa)(z~cG^vx4}8L<(ife6B24hY&NqP?DY zaA1uU0rR!mNOh)Mry8&XRLse8HYb9er`MhIzMXlCJ>+qt2g@yASedj9OLP3H&$s>? zIkNt>{Z)dh*VeaQBZ3PikY|8QK~4EWnFe)jXSRu{-S|&!p(rFftzm_}ISROcQK#>) zKdx+8CwlF~UiWE|s!NqGA6G@&c}6w`oTZe*B&15DNc(Q=YY&V8&OL)bcbG1sDG znKygv(5K37{JVT%$?~!Y#qVf|yA9(Q!gwsMK3Au2`|na_GIwd_0|^W|s4-(dLJ_$b#V8ch~ zb7RwGc9t*fNDE7>r$OCq=)n4hhtc6_?T@kWk`AgYhVG>Tfy@Aw8 zM~^L11_EJpf#7?DSTj+V7cZaiKIq*riEL{}ppNp(Ax zu%!{|TltzFV9IIJICEM|!&8Hk3Hc^j6`&~JHnnN#^D^_(RWUQLqv-+}{^~ONo9zwT z$*RVanr9_zanFjf(FfM0zZGiK!nHe%O#kg5p_}l6|c0SQ?=;tqR8^!47{AU%wa~?%yIs#RC_G* z>poGl()Kyevhs2MX&ABbs_63LMF)!(@P@ZAD~5eFtenRqEjoc$3N)6Xj12m$kSnT~;>Uldb&Lj}sf5w%$Osl%7{k*O$Vks69BGgFr2Zg1o_x(IT4gq0ewR(|6c z_5{2#fpj57KcY!{R#pDVWjD?YeqkRY+I(vUBi*|w-1itX+7cXjiLOhY?ngB2fVsm< zur>k6R`-e=!@XR01bmHh(*$?tjLe^VtuWOQ+5=;}gq=rFrW8)u0kMT{?A8aMwCE^! zXH$>ig@@v2US*}RcIEL;4dwP7c<_^f+mI(zhoDsp_O0DCt2K_=bhT|WUfU5|J@#a- z$;Ys>v_b6v!LjK=bh2U6N@&`bVfxKB>#r@Grl}We>Y_!*4QYGpx$iA8A?CjYg$JM4 zqUl6FZ8-NMI>I|0mO{uk8V}>8uh}$tp87&DZsLbbuYz6mpQ%zSwjHZ4=Q?<}W~?6F z%PmKtvWut}q#P?N^d{oTg6WxMF4=~6?pOnk=m#DZATA8HNJ0khDftsm`X zdOY`KFN*|a-3ucDpvn^fr zdd3`4CCSpH%eAYi>P@tMR=Y9l0oWKY9SciqSNKfv54gaMA^R&dN$;!g+sx$=eTp zeX#Cswr01YVnd|hYRNKxdu(YldeK%kN9kIJsJxb}_G(3K4&i9hWnFdvhWoV>YfOXM z4wY=&M?NL$Aq@dkAhC5hp~K<9nNgdjTMRp$;Zkcs2&PE?CSuOeq^Qo4VAY_!I9N*g zWp{`X2EgflEn|c(t2+H6KD2>&4|+qtIQ!O@cfw~B0rw;liqm$9CR7LOila3PrjtEt z(i)Sioko#PmyfZ=!f{06>}+)`KTGg+%SLrT*I$oPn(hsA;=RTEcloY~^}&HxB3BhG z&)bTwQdi&1<}gmzy>}U!O)bA$lXju7{!>*A~VQJM&i(cw+l)auk;_?7e+rkg5C zwK|ixphb+&`eiR%v!XllxBK0>3VIREn+p~o!EEhxRIY|#0p zMK_y*jA?u&F8!Vy3k6|ri$Try*smc_MY>%}Xu60b)HJvuU^l7aAxtfNOme8h1dO2~ zifZ2x1hUz^cK&{6#g}o1!~G7Y#R!*+gXc)C_8K4k1Pw}510({l&oXOdUdrXdTVVZI! zO&8wmUzQddIu*q3h-Q}ptTYha-}-UpsnPM{qUxR4V-Gk+RP0BmVe+s^+bI&pHJVSLD9E6&XP)D%p#wHmud`sP#vaJuJqN9FuoH;lEWb~Jxv3@FS z(4XMdw!hQ!F`FB&^r=Lvhkqa4F)i-=J)$UF=D(XfTmBk8W+#4y*a`FX;f$}&Ji{=5 zqcv<1T%$54hrn?b#QKs*=SYyUZ!BC|FON!m7s#nL!g`LkCF4}Z?O|J@8ALE*{{)SJR8HLrJ%Gnd32^>A zhZ$ca)&R}1lKdEj?|%b!Zs(xr8)P*(hUaDx5B*$CCnT8}WZPV-fMRx zGTz(ufkO7;*(b~onsde=YZo-Z4cQ%$T2-7B?KmKJEl5i{#7`!sRcvkc_EdJy$}8uW zfTvCZy3G0v2~0&KgLhN$A*RwUo{7*kk%_$3 zlZ{4w806n@DgS1P(BVD*oq=CD5{2SYa*fPEe^9nbL4ii;|GxjuD5HS$8!oXq-N@(w z|2HB`oM%18D>+5((BE%bpQ4WRs*PErKBi+F(CJhwzy3**^n+PGgH{fba*{K-_+D>G z)}OrL`yA{XCJM%Ce4(v!3AOVU3BUJ`D}t#sHh#&1qG&I$qfEDp1=I&<(hJeI&()6Qrz^vA2umx~1y;A=<(4LOK^6(w=hG3G z6TGSQ8OBD6()?qm=-HdEjn!M$zkm7phAoplW;hUZj-t=L86Wti=+!Ru>E+6_XI$Gx`{1X$ zNL4WDi3^Ik-G`hTr0&aa{7{e`%8`@|-HJ}bl%08Yyk)dt_WumdivdtcV5T#p`5a*j zVJt;Jl&NchVDuW!rP+ieb#dN&@xtEfNPZd4If_Dqto*4FQ-islog)v#Fx3+@aC%=P zU@B!WzT4~xqmI9PR+_JV1|TandyyXJW02R51MF5S1ku;d=?Fq*DuZUP9oWBcKvJDd zwZ!Pe0oR=~o;xILP}#{!SZ+1y&l-5c=jY5Q`jE%2gnlh$0sHQ?4ZL@K{=8jZw6Jnp zLCo0o5TIc_b+T^v*$VG*3-Fga@jXD*R}A337eEYLRxsX|oUTq6zc@+OqOZoTw0~fe zH@k8Bb@eD_!=lG_UC<1OE&e5tifI>C3$)nW0Kg)*BTutOQvH9`mA0Aiy@ZW%nnLwP z{Rw$g*&i=3_m z) z#nTM!!q}!8`3UONBNY%f7U-p$`N8vs_og}rj*&qt(Te4}Uae6)f%b&i!-o#OdDi*J zfnAWnN|Glw{g6QkgVV0AsXDVD`zK;cK<@VRNayI@AxqFVhkx1X&}o5hCqO+}54J&< ze2z&Y)wG1le02eJ+zvH?+~C#>-%4L-E3-?#Y~`3=zJQt^M^~HTO;qD=w!WD2imiSe zZ2t6d*zQdZfxuISUX>zg2l?|x;SMc#Cc0d-2-fxO7L8P|@$OGBl^wAcIFDxuI4UF2 zX#F^Eh`Q>fnW6Bgc~{ZfuR(3+RflhyEVbb+NQa>nDwsNyd$gIz)wbz`g11<$(x1C* z+UnJwW=?6@*3r?ZnnPYjcQ5_=6}~F2$Z_~;q8z$%##FX(%u8z6vZ@aP2FBF%xL)tH z>_)0MHAC3tLD9^hQI4_7*vFhPqsaqb0>J#Q#3H??3i3a$aD@V7uuQ$4P7F;qoG>08 zNeAFfV`6lNRhk*-V#YnT1R(olo;vDi1;wg_9sVz&pR+~ANEZv@61YYrddt0j<P(J8)wURpr;$`rQ*qxEn1Cpnafh|ZISv;9u_jwCv>KZw zdmxb1(8s3#rE{m0HE12H=}kxr_1y;5ar&GIBU95>_<(%`aOV{mENax)(VCHA^hHfp z6%(w+X9R)8AJVCk6iYc*%hr^1Y5SzzUQ15d+Kt-#;8|{p^WlUHO{C-ep6IECr){#! zwhg1YMoAgce+5#*B$LNqOyny|^6%&%D`eWe*`C-pC9gvj27c?`tY%s0%;);)RP(04 zlNRQXTcYqtQ(&W&m|_dRXMREZb^^&M1h)udZB6mje=&>e1U7T#Sm@E5!2pqD@#Qbw znY;9S`}4oj?MLa5$}N^l3$`}3#4*;)Myej9vl!nC=j7cr+UXX$NPg4=kd-UTj&y5SRgs(Apb*+6 zIF8?1co+o~=80UB8&(LZGo4J$8iX@~av;jc^;0Lzez`Gi&&uig9T=)VjwFu4!B3`N zX2Rb$p;Z01ki)5i5Pr_ykzR+xoGwH3`OPW=U-gN~(X**DWa5u_A2?(RPV&uCc&l0t z?QS8h=GysR6NPH{lk=Zt*Gir})ibI82Cxej3j3^*eGE=67#RIW19djsN;K(6g5(rT z<5B@5G{-2o?f$m!=#>CRkRr(lY=DvGF%x%iUk1Wls-L1Ew>xGeHWYv|*&C_&@=~gs zZx)0VW^E|-k^W;MSCs>KfB*|Vv4!w`v$I=9S*)LRLnt+yzb>Zk3i3bJTvb?MI;Q>FrSX56AI9D|j~ zrn*6OU4#3qU6aJ%9tCq7f^;XGjsI3rLCu$fNC>QIB6S<=F%j`%cq&QoQ*Uri<33$E z5MbRec5kBj(7(^Ux#ROd`?U|^#}pN<-&W{5dHDJR2Ufay3vJE;zIr^H8ct}jIdJk5 z%GRaG?>3!5r3;OWGIm3JNhvZ_8uKb*mf@0Qn8IU(^a2_;X1q9Aso>lp?Wx(AOzp$} E2S0P)JOBUy literal 0 HcmV?d00001 diff --git a/docs/stable/_static/images/icon-close.svg b/docs/stable/_static/images/icon-close.svg new file mode 100644 index 000000000000..348964e79f7f --- /dev/null +++ b/docs/stable/_static/images/icon-close.svg @@ -0,0 +1,21 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_static/images/icon-menu-dots-dark.svg b/docs/stable/_static/images/icon-menu-dots-dark.svg new file mode 100644 index 000000000000..fa2ad044b3f6 --- /dev/null +++ b/docs/stable/_static/images/icon-menu-dots-dark.svg @@ -0,0 +1,42 @@ + + + + Page 1 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_static/images/logo-dark.svg b/docs/stable/_static/images/logo-dark.svg new file mode 100644 index 000000000000..9b4c1a56ac65 --- /dev/null +++ b/docs/stable/_static/images/logo-dark.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/stable/_static/images/logo-facebook-dark.svg b/docs/stable/_static/images/logo-facebook-dark.svg new file mode 100644 index 000000000000..cff17915c4f5 --- /dev/null +++ b/docs/stable/_static/images/logo-facebook-dark.svg @@ -0,0 +1,8 @@ + + + + + + diff --git a/docs/stable/_static/images/logo-icon.svg b/docs/stable/_static/images/logo-icon.svg new file mode 100644 index 000000000000..575f6823e476 --- /dev/null +++ b/docs/stable/_static/images/logo-icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + diff --git a/docs/stable/_static/images/logo-twitter-dark.svg b/docs/stable/_static/images/logo-twitter-dark.svg new file mode 100644 index 000000000000..1572570f88cc --- /dev/null +++ b/docs/stable/_static/images/logo-twitter-dark.svg @@ -0,0 +1,16 @@ + + + + + + + + diff --git a/docs/stable/_static/images/logo.svg b/docs/stable/_static/images/logo.svg new file mode 100644 index 000000000000..f8d44b98425f --- /dev/null +++ b/docs/stable/_static/images/logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/stable/_static/images/pytorch-colab.svg b/docs/stable/_static/images/pytorch-colab.svg new file mode 100644 index 000000000000..2ab15e2f3071 --- /dev/null +++ b/docs/stable/_static/images/pytorch-colab.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + diff --git a/docs/stable/_static/images/pytorch-download.svg b/docs/stable/_static/images/pytorch-download.svg new file mode 100644 index 000000000000..cc37d638e926 --- /dev/null +++ b/docs/stable/_static/images/pytorch-download.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/docs/stable/_static/images/pytorch-github.svg b/docs/stable/_static/images/pytorch-github.svg new file mode 100644 index 000000000000..2c2570da1de9 --- /dev/null +++ b/docs/stable/_static/images/pytorch-github.svg @@ -0,0 +1,15 @@ + + + + + + diff --git a/docs/stable/_static/images/pytorch-x.svg b/docs/stable/_static/images/pytorch-x.svg new file mode 100644 index 000000000000..74856ea9fdae --- /dev/null +++ b/docs/stable/_static/images/pytorch-x.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/docs/stable/_static/images/search-icon.svg b/docs/stable/_static/images/search-icon.svg new file mode 100644 index 000000000000..ebb0df867733 --- /dev/null +++ b/docs/stable/_static/images/search-icon.svg @@ -0,0 +1,19 @@ + + + + Created with Sketch. + + + + + + + + + + + + + + + diff --git a/docs/stable/_static/images/view-page-source-icon.svg b/docs/stable/_static/images/view-page-source-icon.svg new file mode 100644 index 000000000000..6f5bbe0748fc --- /dev/null +++ b/docs/stable/_static/images/view-page-source-icon.svg @@ -0,0 +1,13 @@ + + + + + + + + + + diff --git a/docs/stable/_static/img/aliastracker_graph.png b/docs/stable/_static/img/aliastracker_graph.png new file mode 100644 index 0000000000000000000000000000000000000000..11c66e64d81b80f9b93c3a48055efe91b7f331e5 GIT binary patch literal 5572 zcma)A1yod9_ot)Wj&trgJI+8){Wd8hDGmPASrWFvMHs0o;na>XZ(~15yfrKkI5=d0n;RD=JC_~@2agP9Y>qV7)sc1Zf`jZG zy&gG%0^r_QG!70VKo2-yHRjz1By*!LT#uPlVb1^2 zCLC{C zTGWY?Pe98>|B}7M;?mL*zun4;_RFZpxKVfa_I|DG?RoB_NNnBQocFTCeK*EBbB+cV zb941jpSF>*d3v7R-Pc!-TaLLXj!7tv_W?B;&$c9)=|fLf$czeqpa0bUwCKK8=wAhv(3tx8T{ETm?PRk`%GG-_y1hgr39FEt~_df1eOh zvO=y-zXI0v<-2Q4M*@RcID`BnTy>f{MljQZb%<7 zCpn`8U;B$XttooGxnlMZUFM1md`Z8f%BtG)9(+m>uo|<^K=|d)8QTn2X>U8QP-d!x zOA}uRT$IQyesQ;HoMNMOGruaM-UvoxE<8aU+`vrllIzoTwvl7RNGX51+s9pxWu-6Q&%j3SKE}&v^xHZ%oIx^wlD7Rb$G$U33LZNbc3+)W6*Jj}t*1!L zj5mSPiC=c<}L6opiPFLj^-GZlV+kOiB)k*|;wdYuE6#{qvw1R&`M|3bbZ+ zj7;FSIYPR&;~TWUXVGUZ3NDx@w-lf@|3+i}de}dtg@hgKcrl&6KYEEUS)PL_*tBba zrdB;_HhY5J#A6{0Pm!R+&x4FeG-zj!nqEuePUvHpdy1aZ#O-%ocuwd_=WE*<6o}vw6 z2wY3XX4f{XEQ=>+VJ@?2*ne*ZI2byS+Kc-;dTiDdXqzQ zK|3|>ruZghLQ8#0>ZKM&LHou$7q1vXZnI+o(R6Q@dR;W#2XnCp@s zxLEF(+;DZi6NdFp&HC8ku<*X=M7H!2^&|fDO|BqaV-sBR8TY!*=5OV{97*lG-3PGg zv&9;x3!WnwS7!@3hLq&NLANf>cj}-u7TKxd@E~&SVg%aCPc%VlC8lx+)t$8!Z8)li|@34kcmNS5_VkswSs;<6MoE^lCbnoh#8yj(g1uo}#>5_k?MoNW-Gt zG%<0eN8CsKlfSmh3yjFIxp<%g&|Q;FjN{EW_bFyxgJYJQVR)epZ)lku5=3VM?MD`6(x8wlqf3E&X_wqliA3`FCjWQXeF=yCTMpy zd2~FXZwrn$myTfzl{SICwoA>5@J!XHq}_Mxrc5IcftJ0ooqEK6Gyu>iP^R^xX#lTa zQ{|5~|2)6xUNV^NxXdPVPnkH3(taNGB_ys8U3G}ZO5dT>I0|0yz%HBSLkXfD?=>>C znyM;ke9BWVrngI8mFLe_kx1oCo7slkO86C@iCXkgr`}EzC}Iqe6w|-|9ZgIHkdKn+ zY>c<-oy9$cf%Y<|mBo_b1!OM^RYGI!aZ+r&Mc0gm29>>2~mb-Be|gs_`GfpRbBbLqbF z7}g-Zj<;X5V4ppjM*C*!FKC=o$SoN@kteR>wmDgXjZQS@Js#)`)4JH)=Ivj8$C)Sr zs~|s%y_KUw(5B1r%Cdv}=%K#c`h1^=if&?jy=;Z}lBbsFiCZztAU2)~NR&x#pxE%% z27d~GaO0bQQxN>3Z6Ub3jFF?oT*j0mUnUs|y%XGjYQ4Y)vzbfL#t(UkG5?}P* zO|AFgj)^X7-p9@Yrwr({;KSZ@k8p`?ZS%~SnS?K{3t0oanRb5wt8u#oY9RTkL z+ObB0;y9_${PfE*H^Mm9OUHS~xt)xMdNNV?R=U+hv(Wr{H8HJy>}KNrUW;-fpziwj zzC*x?-Rofuvy%^_(q9vni-}&*RtWBBOLe+-uG9&uB+>uXH zHcm+Zr3Zo=$c&16H0TFdi2`qxo(!E z>IuZK#&fU=yeDCyW>)SfQM)&jZ^FV2-GDY$5+{X5euIw)F(xOKWP(z!a?#afoYC1N z4OC^8lp1RRj-z9V^5=ZTa~;M_0baQE1eF#h@89)*!q*)9ICrvyDi}&HL~Ib&$bAVu zdJ|@C1hB8dsoZfEq@OskaLFshXz}~tRfNFw?(%;%VMELreO8%6|Lmt!ctL7mO(rm= zu%D44s5;e$G_kZ_uYD9jl~>8c$XPj6R(BgbyFB?&KV&lxrKj-w`0bD52%}V8aFz+J zp}oyb_L-`5Sl!?6XyTt&Do_ZQ8;Y37PdA+z9f`&+b`zw~DE}`ZDEzU?u{{$0dlM}U z>An4L*bC+Mk}00b2$5GQ+m*rgT4_wpBm6P3&b%HiE71)B?f?^5mVIoYY5gskF_WqH zVzuj$)oh3nQ8kD9B`d2^d)%BUva0^#25aK`~N5f9#q|eHS3OoT?|QIZ8fYkaV3{65VFW=pOmH-po$%;r#OmMT$3XJ3`mf%#?bU zy`E3@U%SVFo@cTJGfrFi>bU)&r#TQ52ewEz!j`bxM4&QDy7{9xxnHx9XG}>V7alIB zDibPbQFev0g)F}3o<5m^XUgDR_8(@J;WX-W8(}k6X_B!r;F#G9p5)BbWZ)u1+J556 z)0mWBOk4>qBJIzp^b*O*bwevPwU-zxlzl>ymOig;(Z;lL3pF0~xCCJ%%Y?&hJ15<5 zCRFJOJEdX+Ad%JsBYV`7&Ah?udBhitcQSDu;uGtlc-o4c+8f}0u3ZruqWVVx0-|@H zG^ZFi$Q0pJ7&hbZuR`C&l{8Cisu%;R^LE{NYqD>95f%=H*o+!?4K-H7b9}0`&Lwy& z{Z)3X6iHX=hm?djs!jXtZhaJk=(yaT5uiz6emA6~=)>mK6Aa_?MH-kIcizNL*EXUV$sFx5Q#b;ii>#+_Ssa0F3(F9};jh&4CR^R>TW^3z>DP;>IzIc}w zAL7V|1nfHly2p@9ZvE%wrkCR{OnQTD}T|6rcrQCo=n5#b2tTq*Z@dE*$I1| zz@Pxh3(skn?CU=Al;#*{iAo!<5)(=xAf12sBrWItd;-jjEr}rDGSN0v7>67s%%)Y^ z=8W$?_f)qBD>(cCLQIbdSQEvu(I#+LR5j*DHCOzyv2%*jLilPmNr&_s8xZu;2=7=& z7It`>_Kh_gJ|!EFJsr~QNE&`UW5Xm;o^L+zzz2Iy86<0nrB?Z34ci>&r3Z4YI+dFx zQ1!&qQ;VXz(T8UfNgrgFeLzrKFGFn2bOj<*A!L|Ox)4fq z^cXdv4!}eyedF$Y(X>VrD|d2f;yW>TY?}pni}RCyo-DBnlV(Crwe{hi$+& zAEV6lBXKuOBjWaMkt*D7YSvbQXFL-*x`z&z`Lg z?dP=K>mI7@Cg*2Y-*6J6JLI=ZP42I;VL>Cu1lGuM{p^N@Mt^8suf$H3oWkBHs*&F# zt-f1EM2%pQ?xfaOyHw4l2t9a1H(XTo%$_hMRrsf@I^q+Y$=t3TUnPdwH&$atBZGxz z*RJ~o@%>rEq@ntDHt_^|+_Z+cP);YQMA@@b6H|*)^c;JM9tAA+?3nG0m}t^5=yri! zlJ_L9QZ&JywsF3)DB*ixPO#I+mY2iOC2-Nu+WbL9G-lCvn879YCCx6I!w6?@)r?wo>$L+vdUCp>k4*Qh!|3 z>oCQKm*WlaiEif@fF4C&QFJ@$ofo;=#D#5rc=<=i9A)W>E{|G<<-vWEK2(67dx^zY z=&;eeG5$P_yl4ztBx3X@UAn8Y<5P%UCl!*-z7Y_-pNrCw<+NXkq62RuNbYRy&Ov5H zYCoX6;#@kKNY*dE5hOd@oAgEnJ(YUfu^ad3<;G=-V%S$gMVAKT9QU<3&&tffvs@Zi z)&xW}C*D)nEF+lzBU`VxZc_RcaC%G5(etwEx7Nd@pQ`u$I!3&;UQ{3XN<$x-zIQh{ z;?y9&z4;CuRlHspX|>3b=8ROK|B<Iwpwaff45|x=Q7~sH ztV}>%az-7c)d2F4iTLPt*4QWj`&?Vt(eE{zUP;Ozp5;voU*%;Ww@&A9Iqmy8vq#M! zR8+qG)K1>dtYanEvJ9x_X4qcZY7aX0=uPMnMn96=78}(?dZ6TWyTK~mmxtOJ_|r(b z=>cokc2cF!jM~M8@43;EGCeZy=vMB9{0MEVrZ+wgn&Xz2JUG4`&NbTE&L*VFEQX>@ z{rMBSslRVnCubeg*>3V|T3!=ToV%0Y3*m))$*>J=m7^Hj_2xElF|Oft>*aD$`bn_< zSw-ADD`0AaD6x@0T9ES0Us|mikAXG51dfwr|y&TL7_b;jZsz204D zH58g`{ry;zrxyXn7KHqCuWeN;X)(J1nyffD>yF%Mq~o4P^Ha^{h3>3BV6CeUr)qR>;v{g6 z+iR4BG8&twxw4L!koo&Mr)=o$<<&Cry?V=0Y0kXC;g$n%u}XZoS^W@Co7pu1Teo2P zY9%lg$E@*bF|IS^+ax`<3?cOvV_XT4!PcMvkGZE_iQd#261-j+^ZEic5xl>&Djzy9 vYusOWc2J{R8W1I?N+n1Pzhg3*cyx_hnh+c$94PE@Q(Dzj)l;cevisv-Eqz>2 literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/dynamic_graph.gif b/docs/stable/_static/img/dynamic_graph.gif new file mode 100644 index 0000000000000000000000000000000000000000..f6fde31580104a43797314899d59bfa47dda83fe GIT binary patch literal 334931 zcmeFZcUTkMyYD@uCj<<=h>*~m5PC$^B(zWs)qs>x1w=(a1Vv3kz)*yMC`H5op`!)_ zL=+5F1jT@;JXVSYv7sV@B_5vNv(Mi9eb2t$f8KMh^Ut0nE8n@-%sp#XR@Svi)LPft(bU%2 z+|hlx>qcvL@71ea*IOIsPG!6-&UsyN`ujP~gT~BpLCHeRxo>UUp{vakH(C~(`R}i^ z%=cD(yxy_?Uh8bUKX`Rvu{1nGdjQJcCVc5`c~At(%nDY zJAC_E|L|Gi%DKVs=Z98#x4!dlf3F=~ZMnP3zrET%xz_z~t#|szWzlN?t4`^wJ$U@|@w4amryfk+9sN2swDQ;OmB$ZXJs4Y^d$uxr@5zgq zfu}!izxeTNcK+_{&-<@_K3V)V{pQ!~%hxa8EWCcZxcL6VyEiY_Ud{jf@NRA4)z8l# zzb<|H@M&r8mOd_he!cYj{ny`Lm%sg5{`z}uW%>KhmDOK={`^^4U2lE=_ZQq6 z1c+QO2JVlfr=zcrJ&EdQZh{9({A2QPb|a8oJn-v_{@(%u`KQGH(mMarn5j*9e-!|2 zkhctE>HAgk@xNWU*xP$Mlby}*X6AnY0Hht3kP!d(uKV^UC$48;X#s&)_$&Zle@Ov~ z05&WlDSebde*P?PgF!gV)FX`{_m64I-H#Fx8|&C`NZ`?t?P=v^`3vU;BOuNj~@KjHoo5W z>)TAPYivYJ*t!pi)pczg{-5l7{*zum?w`L8{P!a15Sx?`AC`utTKr@9-fH$NcK`uga8Ayz?Knj28f~?dE#J zy#60H*P8&)y1pms+JD&e2>>7?2>>J462cP0{^bw6o`RyI0pLqL07wM@fShQ3pWR8( z7Ulq;yPhe5CIbNU`JX>u*Vo{u0pK_H&!3;wfByWg0RZq*01)o~yVb0>HW#X|{okz( zF#g-~-+eTI5CgSHHHv`Omw}Ko3>pzs4V=VixmHkGtA}!xE#jVzwAS1zz}n}ixwP?b zpVs$j3cB4^J63{^7_D%*Qg`7E)X+Nn_Sx)CTb|B4TDG98}HXsYh2H} zb~HU`rd^49Hrmnr=rVmEXOmlJ%U@Sw?l%RGbzXdOHDP}AyxY}F&#tF_U3@ln_413G z46vlSdzWB#fFoxVa;K|x{uWout{<-?~-HUrS*MBnrra!}g zD8|e%5xNaCEVT8)Oa{hDf0ixpA2XZTD3Rf23feS)G7%ymXrmT2Es?3Vp%&G&O8~l? zvsqfc+DU7iplM|BgB)&Q^ss-_669#FWu)d>gU-H*C`KD~XeWi|7#$y}vFu6L=UZBY zTb?p&S=H3c_H@nAH1hEe7%Lv9Ppjh-+7H5w+W|66g&_pxR_vQXj%`ZP4A;95i6u)K zM=c|BjT5E|UpmH5#_Clv(5MF9(G{0mFN*~3XI*2?oyI-bB-odRsxz>-`i=V< z9#g8tA?A-``G-5>7SC5#sxH(fmR7$$B3Qz*F~n7T3&vq}1}rn6D>>LY^aDRA7~1Z3 zaRYJ5E6Z5PV;-R~5PtMMW{0xm#cQ+CPhdt$eV|L2(Lguj?yu^*KXu$Qmo&vV6wkU| z%|RT!EEs*fd#QE!%S2Y)&2k%*roBV_Wz79R$-#Cxx!r^9$^}Q?Z`gqeFVs2qOZ0ir zF|y?g#!lotF(6xT^vkH>hM)GsJAhe^V@-GHV?Cty=XQuj0$;_;G=Zx2_2r=rURjrA%RGOMwuFk$>0P)}JElD*nsU*@9-GWm z+>O03rhTAb$n||=Xu4S|$L6l}H74aZ)szu*^Jl9QTD`}#<&C_g&(0YmCE9 zK=#K<^MH)91xRE6;6AH1w-APZ51JneG!>V4<<Q8wvtC(4-DjoKk3*?Z^4VsiIoh4f=ES+kJmdZ~6 z%a$@AR*cv8vqbP7(x4K97JAOLmILOKN4v=F}?567e8eZ>KEE(4iF@97A_b z!R?H_aY`v!x7@zQ?R1S$ z@dF%5=7rQN4j(sIfI&8rc!@KrBm{y;+>n*YmW{u%KPolo;|PWw;&cU}haiSIk8a(9 zl|ANq_qkvt6@Vz?!cSmH66(#9P==Xc(-rY11F~zY!hZL>y%5!FFdLoTg=!S+mxIDo z6DBe!TrzM0q?a5f>7|^YB2(<7*Dc7T$RAKS&lprxj&WhR_jB$`E!e?jr7pS-C~_$u zbzZu1qf+}A;5D{V(MBXSegfOcVJo)JKs_41$$mo*eZf}cGT(>Gj=k$og|_9oQ4i-p z6*(rD$54-$IoTTd5p%mjy600Rm5Q!T*mPuJ2EAHdB>&wO9LNaOUP@S(RXdb#|*D zQ%(ueJ!Y}`MU1LI8sqI%-GVyxYEGRAnI=_e>qu0dqO)a<8GsxwO$y|(=cwNbe5)E? zH6Vr!h5!hPV1ANCWrw?KD<{xG@8(f6B=Tn3w^4->()h-0{CS3PJ~>S zIVdgx+;MJ3P_4j{37J;XCU245O)$Ft;%qJ}5W>K+3@&&NVCsw1>{utu} zqZcrT$SEY~YIXVWNB2Hqp4u~t8(-lP-RmTO2f?Hy1=Tt!xoG=7=pXxOZ3dj;=g z@*a2d@9+y7fpuqEvZMKz>krJXy*j3FI`JF1LoYVh|7M`1`kTy4ax63}*AJV1aak)v zeE!Hq%lY><{2xG~7#w`oAEEQ?b?h$J77awACR=;an>h^zf~Ly1;kS;?z2}C-jcI#Qp4NL-vnJ(>hq-Pgqg1Gm*Gp2}p80r;_ z^m0A@G9EUI?6$$A9@hEV;aG*)g^)P{lW{fG4>HybV(r3w;!`D}FcDoDzFb!NP{ZTQYv`aw<__!rOEYJApF2Sv^fNlc#*viS)kpRQ{gvM!1 zUCoK9Ij%kwBC7)EQgg!ZZ&volI8w*PG(2R1WHME)xrkQuRqFl_1{9{PeTNbfMGNVV z$s;smB+&L=pq_&E6(A)J3Y;-l8PH(_G9|Xe#UC?*L6tD*A{$KLOdLpsdN{UZ?MTv~9|O9(_!!4%-I`#b?>WzC98R*Qn@VgNlLX}*8AtXCB6J{ZCU zwmK&rBlv71`X14Cs_!t56fc=xyALx>v$w}GATvIS_QuLrY%7P9mJrC(803d7NUksq zS(Vvx_1GBq6ucE(>l=J4{mts1O1`QF!+m+@{c4a**!8mmvkO;`18OawI2K`Ky6db|a z(#bj(fH~h77Dzq1!K?g{p44A&(gG7=Q_tl4&nEte0f@-S=zel$1pEhxa>5@#EE(I2_{Rwln;9CC3fij|1CvXZo3J zT5$QHFEutA;WLJTyvj-@U``95`buYZ&z?}Bcqh3sl`_E4gmcb#klYg7I>F2BJ2;!` z#C*`XPxn&UH&;o(lISZl&b@rNq4Suvu@mo5u4Azc6@tJE?7uk=RwGglVc6Q%+qfxE zAu#`=LVL+3yq%fs>bLjk zjqkkcTSMbG4nGhDr`QgUX&8mbG7!^2OxYK&k}pz zDRB|6-?`ZzdKExkKdNw_3OReoJa#XrH3}r3+NCF=p)e4;*nlkkfTKz9xD-gg!MX+L z^q6e4;rHEFT`|I|^b0na z-Ix*xGma~A=@wChTQN5TcBlEd-~NIQc1XrKws}OdzJ9xr``1ws7P*tsld-D>#w_S= zt_ja(wOnmA&2xmhG>tf(36MZvqW6*I%*0J*vAKYlK&;aY3NW1a`XMRPec#eS=dz(f z?Z=#`ZY#phZE;PyJ9&}^GSCyHnZXQb8GtGmKr879GB9MLHR-Q!`{+?am25t!PjZ(ixB?&v zeZt3E!al!+mWhy**GB@mEilr))|JS17^~r--3399p{t$sQKYwGNme7M;uH}%!9MV_}d#M&>ZaMjw|NPR~iat!_NZ|W8CgeUixLUf3VeA9kXjM zYo9|4pM)t$g+gCMJIcI2<@ZgB{rw`Qe^KEk195QI?i72@_Y5a#1HzrUPw zV@h(C;AWRzi)k4)`+6}=GcdABw7tt_l!eT#nHRq5KH+ksV`;X&l{=<-l%J;{cN#wr zj=d#k_U38TJsT%zz+HtbtwX?cl7_F%;e>3P4=}nvC+_T}C@Ng;pyfMo(R@a+3If7N z5WexXQnTFpBDQkg8+I0~GQn;7*%5jg7{-M4&KEX8wG|Ih&GS58+qt}U^O-*_#*AWL zt8OUet7}>QBC^w37+MChbPw_Bw>9%o|cg!R!DYppU8&2 z5#j!N{sx_suz5S?WBWzp!}jy=6M^Up77n#bHgcxT5&a5gk~8KIl!nCOfDB z4p4LB2=T1!1o|!yLBxC0JnMnLHp}E;fxP*A|*-;2r+$p2{g0yj`$u!yI zCHad?jqwgT#EyEk)JJ`k2#XTIb9jhjRG8O}Z+<7f`5yS@ee+us5s^bg1Z-Gg6JZB1 z?uvc7`$XUT|9ta#u@d=W#V2${@`}NJA}sjAx3qiTX!pK{+*|SAu#!z*Iq?xA#KQOf zSz%sA!K2U_k~Mf#M+1tFM>SnSozpO?7pz&3Q4NbL0IQHb@{70E!d+n(3lco;hJidBqzPkf!zotl$(ha1GI ztFFT+r3}=fOKbnVA(DyrNUEc#EMMLP_1PCz=jrViac9Px5PudQHjusZ9pOzY{q>H` zTguuT{Mz-s`GPPtsQZ<|vqFY%rxS8dNtsqd+UjePDf6;QpTQ|7s4<&W;?xg5`Zj7*GZFTS`7GF4ko5JVQe3ly8?$ zl(e3@s98@>qetZ_hU{GK1Od+v=rwIk=vS83^kIO1S zm>6aGFAVccWt~J)7Smru;?eM39_5NR?lg?(9D7gk(0rtwIHD>UCyw^`$S7nCWke>B z+T~HVwZPJns;{fUe|yE*@l)a z9^Q6bC#RMgTPJa4yF6qC40X`3)k0bNlI)RY=`s|@!-qFgf^~aKBhdSYR>P-l%AZIh4B`St!oqG2EwidlB8@-VOE9+< z9B4r6c^btFuB#u0YvB^*)mvdm3V`O74Ja8eK(@R$^{4Z|26p;5Te}%_fwH}9T_pj$conPp?V~NlAJfTQCxkd2M~E+A5d94!12ixB1wyA6geRYl4aW)`81)pkC5_hi z)SPRRP3xD~E0Qr(6XW4g09biW!pWkTdy(KkQ5~hSMEu#?#L6-3Sq^}QCpv|z`q;hq zas)r;q3D9SqX@GBh}kQ5X5O3~$$Qv<5{A@SM(o5E!!}M|!(39~@339U#d>4$n^Fsv zRYVd8vv3G0<8bVmsWMfa99bUJdU5V;LUnhonzJOcR&W- zH~0$^Knt+B3FZRF88k7!MTV7=rT|e;EM~}RGq%O9l94^t@-O(Uw&BRgV31jcvQybb z<`!I3Th*&L@_g?~!WqpgF%mE}TQ2*pBgU9EZLeO%)(9Yj+*5^MH6G;CH%$<>Cp0^ge1}Vs; zk9b$Y!1;s(?wx^9wkWEM$}8>s-3JeR1>Yl!=42{WMCmhCyiep>_A-$mxL*( z(oov4C_DEsHS#?cR}*w-0cw;GHa?<1+cdfQqW#B@fbXsY<4}kE%hjGfGBCalr~yaA@f~pWTjU#EtO*$b*mwDc`T6kYDwKx_L^R!Phj3_D7CL^eG4%Y#~g^;9?nvx;;eMgpOg+D zp383U*>!ibkMw*>(>1??ImzH#Xeln((Dz{0%12{RII$9X^jq%no3k(U6N?OkUXn$n zBv1h{Q;nUIrMU5>!VwM=^@XSrHh2;`v|_)nCre_7Jd13=IVEjAb8?q_{|0kqBZ!s| zWv%pEYZL?ax-h>_`IBMPv$?a*ygrzX6HW>nH>}1zx@o`xEY%X|C+cjp)T7K=OmYkh z5-qX}1PfA9UB>uIEJ@GUgM2K$O8G`D?3y#l7XLu{aHX7Y&(1`tLp%SPJuiA0Yq%3T zoEr-u+muEE!g1 z7>0}%s+l~*XAwj`0oPNM+IsOGhe-!w2af($O85=AW1tmI@?)v!zut6s2ksv9+9B$^ zyWQfMCsU=n-+i(hX3s{wKI~2yP%6dlc@)iq9y-S)(wU&i1|n4U_>J)wT44E>MAbB+ zgw)7IWlrsqSvU9wDrz+o8w-;88R|Z%rH0{1`2Avu9FJVSk5fEGU)R@$2XPj7wPDa*7h~N z6rBor#d8cyhiqjJ1x*bFzZ?o#9?Id$Y5#J(iR{-_8;)=oj`SUliX4tk9^S(qjwv3F z(?Z38S%(8X#y(86-+` zIS1yK>R25E^CLoo#OUlm?5+L3a`$9|t$C$eQZt@sWXorB!Yw(t2`#tB98HomCy%5T z11!iQ^7!(f((KmOyEU3bT5;A|Ge!aLlC94MX`MD6U=-#tkESESf)E9koX{A?!!em0 z!R?Gv`z+yY8p&9MaAhrlH&I=Gi6rdw6^fcmdLjRGIS8Uk_NB= zB0H2~T0+Ru$2Mh&@|l!c$Al~zg}HhKucQnMqe@(>Komf5VtW-4-M(*D8xxJLsH^$wDJ+WAP9vuCv{U zP7XEGYeB(}(sk0?(=AmHt(-}~W`<%tgBbjhFz8%-;~U>D78UBzN!x2rIGM_4 zV0&n65=Unbo^7|3aYBaU(d2sDA=j!@-h#>sZD8v&AXWx(;}ydw;NMfTav zl3$iVtZn+>;%3>!8z*nDEj2n^2m|AKnebNUXS=-VCk$L z&nW2Bh*{Tz@kGWmkzclqpZ75~jH7t?c8^~V@A?lYq}v-wZHc6NJzOkzoYt|(c3Gn` z12r$m$B>`(x6x=4QuI4(SOjgigXXE+WH+cm4Cs}3napm;40eG=Ha=ns%{~}U!@W6# zQV?CgZioZJmnz2sS(kG~C(fHnLZp1@AUDj#H57hEILdrg^P(yvwt*VkMU;GM8e6Jf zl3kp24h$>L@;%DX%Pt)gG>;T#MQ2;f$4D^Kykfh2-TZd47V5veoXQ^Q=4oce5TOx^ zmLFrXmPq-D5d|Y}jdV`e1xuFLzP-WXWNo28Hn-H(p@18qsvohLFl8tc&&%Y3dy1yP zaphZ9H+h%Be0j=}?sXPT3JYNyg1S^h`ZPa%TD4aG#kb{G)7@e>{|db`TxJ7U>S1v3BM&cD%;r)x3=0F1=Wf}=lZ{Pc zj`alj<&bn%NAK0Lgs=-g4&2gGNh2D{Cn@ zmH^uki14GbJeCR4vf}f4joSPl1}mv7`EkF6SGR*Y=7K)#ip4%b>@n59g85)R6VRS+!VVeIze@1TO z=Cbezjk2v<1dzgaGCChO#>L61c^cOS;*IC&@-uCt3L8xm3zWNp6wfn%k34kqp1p<% zG61CTTo6&4;~K-dlBsxa6DK6**@LJ|=2VM5J}(*%^<%*NCrD8Xb~((fz!{mrkFqEvO#*EnU`fAuLmBo|l^w>t7KX8?y2}t&)OqdYv_ycO4_1Us^G*NMCi1O$D0?WYPlY85H#@i7e`eGqzspffGCxfn#dG#G==&=Eb-rJ0AjfQSC7 zm_IhXy#o@ZucWi8ZpmQO%^JC%<$?pt2af<2wD)iVG$s-7Y&%bON`(Y!H>6+faJBk} zgSw*&#@c5vxfT{va7fYT`GNzm)((<74BMkY4i!W?%7X~rqmoDy&c>4~v zx0^!E)`7}wLrl9p_dy91ZsdE|5T+E5|z*q{@VoC;| zj`zimPdRktrGahndfr%DWj*Ks<1->3yhUi{O$(lb6uz;9B2Lz67mA;f>500b_h=&U zHpaeKqU=ddrtY3EQu{Y(Y$HV#McJA`z44j&9eU`?2c=Gtf;ceFS#C@!+Xj>9t*rRa z{jS+zPmRW;G{dhpm%-kvX7mM%^qMdh0ZMVgRmH;6Ts^itYPS(ml}%OXsYz}EcW*8_ zOde+Zg1$%ig`E1uPL#1L6>2|I5~d8HYCtw$*3J)F@;`vG#qP^p&D0SrR;*AXOj|cMK2sW{V1&+e{g+D1}PEg7Q;9e zj<7b~Dc-x2*Kh?b75jxEmWnd;* z)6cJ5;Fz91B|d!&Q3<_PyCO@=6N?3w4ThjbcBVOEUfF)QkERl$pq54Ew#Pz;98}(}SuBXEoll$W z`(e2_rLozi@XPD4_RzLHf8tg!ceL1h@2I8A4|Q6)9o}G=6n>j-VLRGnyTyNLLViZ- z0l^@Csia)bvwCSWj(FWt5xWX>PPt6TN5XbBP=r1^WvD~X4WL!k4j!f)veoge?ur*`iw(RbEE7JYekQV(OJQ@sccTFxNZZ)IKeecR1tdYRT6R zb0zI#tp>@*xF|tYg^ph-Idy0~sZA4`v^SFP@+6#m$K@^Ir_Z_4L)0Gp9(iEq+Ttaz z>gwP!ukY$Ah~UXg9pqY!ql-w!<(kVBHL&^1JNPw^nr2H|KCxC>A7gY%&b)HXxP8KP z>*fwAYw)n_36pt6;s!Uf%pjzXzK`0|SeuUIT!0+vF+<44#(#5DplyG@ zAIlgV7!1o!g4UYf+UTvMaA@ib5GSJf@9;it?gf6FtHV!sJ9&~BX#V}Fm!j|3#yGp` z(WgSevrfwspEzS`aZ`O)b$YnwSCf*i@8Je6DlF$Anqd+kjYW}};W z>hfXHM)u|&+jkeQi*4Vx?wZPz3w4Mb-m5h`NmOh5Dc-u&CwYKd08_kBUQnKvoSqI* z;EQjN@3-`&IfyhOkC9p`%)Tu>JNjqa>TK?g?Q5^jo!I{4ZS&3TKR;amv;EiSu^l^p ze|vIbhj{JX%^iP!|NOH90P71SWCaYE9Tp_lAcUF=n9}K3h?agI(p$h%oO;oQ@y?S> z6lA#gIH}&jcix+d)^1pzN7isy)sGQ|792qsr23ROzQn=4_fd=^c`W&t5;zuxiCQ8W z`!h&nbvsSjzNx&Vp}Q77gnWvxS(eUI1FH_e9@5)^(;ne zhCnS4H^f*{Of|XJwI^Hik!IE8x~yBd4zq-#zbKZr?cC^JpRqoAFg}yE9ugD~vfB=0u(K5&aI9@C@lWhH3b?^^B6cWA}nk1yA%+ zE%DAUoWw#c@KDvl$I|j|@Fo_P1G01iSea9NE4geoG9`jzAmU}kddf+4_#!0hfvjX^ zn6r>>FJGMi`wfL!0ks=I3*U2NFOw)E{UeID*9zd+B{ClugD zQwQV-)>*a=Rl3Q7*9M7$MWOTwqlgFN!%3(rre}C0 zMtPyiGQ5v1da0Iuwdi_V zo}YI6(+B&d5)JNd4(oz4o#dzK+(37xUL%e5GGv)+MFFl%3WuH&&IIeU$DrreG9jLU z7{3*$vMaWnJL9XMZxDS&?K61ecZ)KX{QM5_>{?MA_`NW_b;!Ag2J&0Y#<*EBw?Yr? z%Hd0l&lr=+B(t65?UJka7!H=cR>)8u24K}@F9WaGs;k0YY4va^DBUAQiN)H6N@0t* zF-MN7I#m>3c1v4GiuThMjb(kybov-DDS2UV_Riy7TB$rBMp6@9oQr0OI0T|%_x=rCq;LuWur5E_vD5prUa?Y;oakZDq6f^$5*M7l}n7vT9Z(BUBR*<+VmdrubxX z4zN*v@<^mbQZ!o?u()LF86>sTOhUdodaMqyaWwXBd$+ASSbA7825qJOUWt_i~Z%~FwV zmm=FUb%5H3&Fl(jx^%qnUC$$|Pk`S((}*RgQI{6{rpU-Ode!9t%68m_LPT4we&?>c z^x3ZBNwSnn4MAr>ye#{^ag&PmtG+#}tgDpPO_ykM$epEr77vAx6F8sxiqHNQ{admm zYM*dysU*oD5$I2JLGOkM(n}u!>+as-8WnDzzS;SqiqqPFV-N?6-F;55EabT*F}$4bBENq@v{SmA9DV==HQj5JP0 zu}VfAC#zc}Yl4%ru9Dl1lXt3;_rxjqS1E+x6r-yY_v4gOs+5l7l(VXo^KmL?t5hm+ zs&!SWmv9?9t2XxH)P}3n#&Me-R&9EYQ-58h{t>6KQl%lrVPVx+X-%ABHBMbqQ@2{v zL{rPUT5G$ewo|pXr>2g7wN8kpZgjQoeoeiUYQ5u{`dQWb`I-i2s|_kO4eP26FKHTe zRvYzd8V^?+k85szSiSkVrpfDSlaHFFE7hiAO+2gyFRf*!SYxKHWv*LeZlYyjT_d*G zu4U;|W9g}7_zs@h zj{bbd5N)SuzSDkf=M=v4acxQ#pOUZba+dE>sqI?FcfF+T*2#D4)pj4|yN_#oJmh;k z*Y`oM_t2vn60N@;AKwWL1r?{5ZtwUC0K^XvC z$Owk<>e#jL1syV;1S!%UUS#II)8u0{)f6HW}y9nLTTSY zXX9>rG9rmUWy|Y3X zlM_Y>0K!*?;eSsunSsdLlkvDP;W!aai*h0iNQ(2K5x&t`whKWi3v9AOkYNl09h$A_ zWtklJ5A;;UI;P3T{)n&}OqW6ak?z7b?-2YbQd_64(h;n}umLr6-0k}M_4Trkht;|y15a)`U*Z3Wv@n#PGLX^hSlmaQoaBp zFB*D7h)Z~Atky5-D?F61Q|rGdv#A~~XyoSr==VbP_kGer06`KCg_iu|0 z31nOj1enKxllu*C&&F*U>X$||2Ka~QAs7rn3II=drQzGyI_YKnp2R^J2oj0j{X!gd zZ%5~(F}@#H{0?^ml$O=p9bR32srh=Z(T(Be8{YdVoTi?E0m0fzS>tcI zfX_BVXM$xA+2r2=12k6`FAtAsf4@W;HE>@FZEx~Fyn&q&`@#>Uuk!{DBqXG!rpD~q zlSB{xcksYEazNx{eUF6y8#dsq{r}<(Fhhg?K@OBBc-5!*2~GqQBhTfd>I@ z)6ujqaUTEh2G+p?>&SttnL(EsfgM@<*Qo;!E8<0*z5gY3;O&1w9Z3I&I<69#{tt_?7>Ef(N)I?9=7hXDf0_EAtBrbMx}9FSlQ*E)XmcCYqaUF)cy8oc^;u>Q+P-G{+zzXy80 z4)rYGzxG~uew|6see=J@609=`O8WkdMo>NSeVs;d{n6SwilFabD1zaU+y4&~!NY$; z5lsFIMet|1UpzYad35+6AiESEo<4)Hni;Fz@ARiE>pu2C9kA)0O84k5`NGtaXobr1Cs}bZO=| zJm<)4Q8)REZ%UmjbvHH4FU>pifzK!pAzdhj1`E*wn+AIqgyuW}E2NVb z*s_T+j^g{PX3h<@57%NV>jQHPOyVv%-;vpAS?D0^X7_nec0loHeA}?*IS0AB(tF>_ z-8LuRGF2+HKneP{OYXZWf^CiOdazP>$8iv^Y0^5e{mK5WanG&svcuN1Egi#=?~sD2 z?&}|~y65fwB*zWrXW0bEi zV|)v9A@9qmryA4UvBS8oC$k>1?QPsS=y|8A(V_Q_MCK?%K54^Dxlht#^=U%I_G<8~LK zmHG1G)gXtaSGhRmVUvyr-aAOnCUS4hPcAxd?L%OzcW9?Qz+CZMn>w_W1ivi%*?jBR z`6puMk3B=0V?p=2`lUPsLQs)$O!2|2Z)08FKWOKizZ@Rug=<*L6~+JNS{8}M%@^+2 zXs4}xymFqcb@`I%?bS0U#Lo2Dk>T%&veM>VnwsO(4D(*iTr)z3?jHP*b2JX-JEE!Z zFv(FFrmPtKYZe)2|Cem~t*!a3`Ny5zSbBzoeJpyV%m!hGH z(n2qSfFh!zrW1N7A_6K^0TDtGQ9%=m6j6!|6)_YQ%TW~cU^#L4zL{%guK8vDg5BP| z_q(38?&sOiRf{L$UE(a@hMBR7F=KF_>9oA!S~41z%T^{TRV0z>vV)F*v9)-M)@w77 zNdKfhl*$sI|0d|3>u{9xvl=R5GSAO_M5?FX$F@SMj0zw8)u^}V%ke%54x0LLEYvLq0+8ZQz91)m0V9orhDF;1^42{~cIhosMV zfwAFu05AYbn9;-EbmgR@KYfez+yo)ve40}EI!wxdhGYS-OgRAT)TfN;u$7q8`kYlB zaPj#su$K+3Q@5{i)ydJYoS*j{e|fduY4QNZ#6F{~HkoTV#-C0nS+}DW;mUEc_wp+auQiLy8Yb3jZ2AyPb`BD<73KH1AR{dN&m;fN>tY|EoJ#aA$k3q^GC>tVnw> zqDVsc2QwnI_+vG$K3Tn$Vh{6Qf?b21raku87n zszfbP_Ni^veVa>G@Fmnfk{ufSro(tP4Q}>%AATy!^>noC7Q%DbqmyQ)5-ubk>aV}e z=f|d@XJ3CRwCj678rslSBl~NxRJ%#8%AqsVZ!fOrKcJbb_;oI(r9DIWwtXa(m*iPi zE-CytRcjhizQ{Bf8rk=Jey>(90HgRIpm@NsIt_}@T`WpWTfu}H~jk>~3vx3$a5NZ|` z_$0Q{WBa0Dt`K#jH&&@ai?`KFLPIm_Nj2#lg$T@}*ZywG$^YbOl+~dQcq5FaFvtUQ zU8uVn5+;MRfv>;LJwhCZtGu}I`cJDIx|tu_vo|tj-(d)Pu=kB}#$@QKqSCG3g(I%w zNDB_6H!=IK z(5m>uH~2#Z%&1Cocv^7vZmkRF)#ud>qg+~c)Q(F53=HNrpd`X_H#~2J4|XomYnD*S zuE*Z%{@4HhwBu`VBD{1oGS9N0>kCL@wEL#Iw6#)Nc^8T3R5SCk-|phw$fN8I$-zQM zjt&}m$(F2iDQ{M9=j}Ot5BQeqt*z41JT$SV<6VI=nz$X$Q3m1Ihhb~q@SRoYL829gX`;KuFxn&aa%)Skj(A^h$ z7V~gxwbfCig;}sC^x?Qu%QUIG|5COEd!4q^U5>?c@?2r*beDSg-F@~`m!q*~Bh-n~ z^HjJtQTnbz=w0?PI0z~H*K;qI4Jpae--hXBv7VFQ^k*rj-$=!YJmbcKC}e5BSf>nD z@E$x=h5(Zx0KT!YP5@~8Guw`i7@a3S0RsmjGf5;@mI`<~cghAR~w zts!6O1|KzmS^$t@018LeYuA8 zewKkS^bWE;WA_G}3U~hD5t~ z`=h&$TBKd(NnN2`@Db;pjW}Nc(JI)z8{U%^6_>!^ixRWHYX|Kv^gqvlbzYR{EOu~9 zo_<>i23X^RNfkE=|^oP^OtTM@SO44!DC;mLiMrB?Eq>#^HNK!C{mW$E8 z5Vf$dC-p#t z>ZVY|4H@Nd->g{Dq2k1-d&GKeZmq=@3S3f`l*rD`4GQ00zullxN*IzPqL_L3b~jc` zUrDvh3t!5@$hAiIAEhc7Rh-Sm6!sRvBkLdGWaVr)?DOTZKqBXsg5izBS9M?*0$@RM z_rpLS0LWrr*)Bq+bL|nw9QWIervSkz9V`IF!^*O3C~4>t9Z8+%P;UTcK>_qeydFyK z-Suk^nijC+zAMB|c9>Spcmq8_gf$~Tb1X}5K9o|MdCVedrMXUxsN300H;zk>5fp$y z$f&iT{@5t31dQC169LCBow{{setQdC_v+l{*~{*&qdAz)v7!tc-G&$R9s}{ovVV7U zBw?T@F}T(`WK)skVgx4<(~QrTU|(%A3i`RZ!&Q>4byeXRzRka^4JK;n4*)ukAC*}{ zI86hY>u2O=oZKEAj_PgN@(df?0L~22zggwoWNEJL{j%S$C8uLujyW;qq=>b8tl2udHQ!%W^Hb zwY+ng(wYCP6IKG>UU{tN3PwK*GnB(ZoL~fv?M}A42&*G&R?)4EvLdOMC;_*9#!@W2 zQZ%Z%um6s^Zj_G+;Y*cVopHxB^r8~qD3oq?_&;hVu7h`~z@)`^NU;bkgMk$)Ik{77 zt|-f1fn;of*gCo}X#nsgdGFt6Q9#)n`|_H*1St9kVf;X_>=7Os*qV*;4UVp|i9tBr zfKIjM?)ud>W#6||S)7<*Ss;oL>8#O@^{jD5QJhr8Ny_W>ix&!NM{LZrPg zSX|cGQD$UIekRvszs$}st?ua z+3g;3BF8YF`-1&EjlWFmXB}sstKQ-KzF0tVU+#7{7;=kj1)T9)?+DcGtW}bZ<04Xa z{HA?QLh$g3{fAX-E-?v^R)Us5brC^W0F+{xQq|K<|om11NLnb_ubnnum9XOP} zbRoOT4t~!#F$t(Ezl@rq*gwa^^|I3+D%Q(#;1W-diGp@q=BKE8z>Pys2=l4Yn7Syc z)WZg{>PcZIytcyaL>x^@);mdQBPqJJxuJJQR~lWeN~&fhr9CYs@+T?|NO^bupkDRO zTkzV{oOHkWlqf-J*IKHxWG8+9;e9ReZb4Wpmi*YD{?AeP^gSx}#zg%#INt%IVol0@ zHW8G5K3U#|vIhBcAosmX)%9p%nO>P+cfe6wZw%#z`t}PoBip|_x7}$h9dwB!T8&{! zh$)rM$KT$e@h)fh#z@Law+rffy^>@F_q7tT0NGKr3-?3mJ zvvAoHKPsgp?xDM=VX>6)BqFk)Wp^j-SUau!qv9j0%cvs~5!Y;Y@YCS>@thY{4-_kQ z#Mi>R2&6@Jbh8LGMVmx^MdA8Rp2U@>{DZQ-jF}KUxx!2RFAmc~c!2#LN(>q&jiXz5 zBkr~&$M=taI!fTJJyNWkq~)(gHN<9xU|tF&>2Y^M=b<6L5J&G>x>Jxh0pR4Qj0L)eKk7VT zncveyZhR#>b99_(Jq>OzuG^>QI1-um8uL}SNUrx#l@uOvvU~2`f~G|d*`Hxr*sVhq z7SXE@YEHj@IN|HpU%Mmf(C;2xzMxYga@bFJQc~Lo z{rjP$r~osUwor^-Wcphu{fEh27rCEvvv*y{Pe0iX41pS7#;(oE$rdhwke}GXyblW5 z&UT0T?6>Q@!JIY`a|C;Ttk^t%Y_;;iRt3V}TMNw~Ar3FwC`FqG!}I*Rp`0|c9~;(I zhid8|%8Qu#b=y=Cd>B)5VLAQD?fCu>GNM%U!v2Ve_-N*blkG-W?`w?Y*=r(}*z`w~ z$=?a6=bB`?m2C5&4&@^m_IGRn)sM1d>F%QHw3P#@W$*H%tp>y&OJqq?)2Q>4vJ>j+ zq?-y0BPdsXI}I&yk$wGHvS7z&@YI(DODhn>Ql$*)oc4KmaP8;m*aFHIZ2Fg+L!ZIB z%vtMS-kFJA{yPl~cCcUWL?0nZ6p17+iVzv1Zv^voYt415zuzp(*X?($XHX>8Cma$;*=69fiz+pfe-(-l6`R)$#B3wSReMbZ!Da zPjNPtzoPs3x>qBR)^gH)G~{*aPnKrLG9O(f^RGA^sg@>JBLWG|{Gs|H0sem7bU;o_ z`!hYIuw{5DL6JKJ07IJpy*Q&VL-AK9$W2rHpWpp&#av+yv(*s*E~?AP@xT<8qiu45dTkPo3N+}u=@@Ff01qIZvd$9W6&%9Kgf2o zFcDa))s>(PQT|`ZcDkifRw;&dAn5-_w(oLO|Ao^!%H+Y7{};0TPaYc%JlqY^_`HQ| z2d_~F*|HiWYL_By7e{>;M{WPNm5Ox0+jtxV#6G;R{_Bp?c_M+EOddPVqQ!H2v*^3%hO@QwRGNG)}!|Ukv-5 z`FYO{1PM87+4K3$gG=svzeQ>Rt#YM0`v|9xvJrAuMlVJDJltjm{Pnp-Xo$(HP>-2sS3grkkZI8{bJ*}{btaMPK0 zwqH1@o4U~lId`#td>nWJvH+?n@%#^fS(NXFYhRJVOj4?gOuM1sckaoZ+rv>Ijk06DSMa}lOtEE zi;%-$48hP~n|dfq9XlQgHIiJaW4UDF7oT0}T?Avgv%fkq<+NVZab%PDkuOwQwZwZ|hM=sY|q7YpB&u4(4;!5VtOKToAwd$OVVz>T1>U z@XeKSCR*G#Jnf*>#iF!5W2zU9h%ffldYyWByr%F{U_y=>=e-c_C8rE}$qcAiRcRe~ zK3G(fwuoma>KocsdK0v&<+|?gRF&_xn9+UZC7r0nFrI5YMsQqHis#win~BZ8v<@GV zF&AKT6Bw7j4$17;poGehPD{JX*!DL!NZanz8&kCFrw$EFzG`;H1qrA+I0k<)`kTX_ z>DZbyLEmzbxzi;C#(Z8hQf$(%Bv)nE+Z=_AmAu77ljg=4=z!74w}m3mA|oKMe@v%X zWMRj}peuH{`7p}$6O`ogB+o#<*%-a8VkFa!t9c|*UjL`k+&d<1{(1FHkZ5L_u}3&hj{mhV_)x@K&SCH*PPZCrZ00h z80bI|Lvuj{3n^$|4Dg*57N&Q}gQt%`&xe2eLt$$fKFrC7xNO-Pa7*@sp-+`^#_|ZL zEeA=7Co~u7thK_V4ZH-`0HJr7Wa=_pjHgWtAR&WD85fdtCFZ5xobLw!8-c0 zY)O@|gU65F&e!}vD|G!QDnm{SrYImuk*SOB7&-^nCxW?J9!A^vaq%BSu$^mKGWIGG zTCF^Zpe;#JUDe%uZOy<0p)HfO4p-_YAc9sqq8F$`ifaNX$7C(0tnY~ARSJx(l&W@z z%ETv&Ex5?6Lpdsa5W_wJtcL($vxt`)e_aNdVYFqG5-yPE1u(Y3U=Xy^<#E*8tHo=z z!BDLwhJn_eLXA%WB(qqQ`;;#)t+OBIQNUV#r2 zv2-DX`;PdTv^Q{-3>jL7S*FyDn|SCpgyPw*b^?x^Bm)^ZOUjOn{3AWe7L;aR@kwA8(<9Pws+;)uRuxTk!adC z=@0|%nsgU?KucE^PjMj+8rpnS_R@Jh2SOxp_Nd0BL&GPzMrO4+A2I?lbCvh3?QPJh z0ibfsV*#(%m)-HBUg{WYA>W!b&)_Ly#sUv1SZ?m$wGpK;-*GCc7=xX^=mNIFEOp z!-rH-&mKkwXrC2jx7uHFD=L9VsIHtsRt+Oo<7NE<=sO$dNQwj+9T^}5-=f_khmAU- zd-kY2R!>pDcw%-D9^+4mk0?M3LFi-wI3mzh(Uv!>uQQD7u&&yN3zOQV$x=|+d}p>x z;f=UXPkH7$M?J3pqpEx!-l<7Ncf+v=PG$%URG$-)C`>t8EmQ{(j^iEs%K-+c5OI%l z{n|g?d|#y;Druja$+?(p5VDU4Z{%O$(ZtHi(NZP%RObn)C!Q^iViLcSE7K3PM`=|r zZaS6e9ph&=cA~I~cSt?N@r`Z#V z_9)iDsZz5tWxCcc{#8>MpmNN!>a@dqq*bT+34ifj;SBx~nAdb`>&#|yHtVH!O*B`G zeoac?4WW~3|LuPb?A8|FfFq7yqGe8ZMeFt{vegAv(dY?jRfr41QpH(rEG!GFqB%3Js%)X!@?`KahwsehnbO1EsCWa#cbgzL(_0XWr<3u{T$%e zCkX3;XgYEuMdGun9?)QVy}A*DUfAu~va{!Fm%r+L7ePF}f*t1+ zQ`4GFH^Dn8C24&Sa}T_m+(1|w({y^+P6XQ<10nF)rY#Uc0%_M~52F%p7Hr_ZQa34k z9o=`^r*#Cp#MW8{+hrjF=NN{!9rk~N`0Fa{Ah_OCm6)|}{E;uGzr7Up6Rx2|hdOf| z=fFdT+^8|SL0yGP!liT~M`O;-){1K*A{I|DT)`UV%_WK4I>Y-6`{t4uiWaFECL^A# z35zl{o5p@TU?9~7j(yM03TVt~D@mEYWVqEqm}oNH+-A0A_=dl0h#a6qd59JnD(|}u zz3}1n(jRAf>^;acUR+1jQ56w=t%aVR(W=I)(tY7p(L$fomT1wY$9@DwKfheQ1{+(p zC3>nle6n?T%(9z+dI2uPsD>64$1Q=TW;MJt2TxQ|skzNfT=UFN*q*xvPgt|br*3Cq zFkC9cthwmAH^O>1Lb->#!ps)qemA!2Za+fd zJjczLTl*Y7_(^$UgF9r+wc5>1HIP3$14H&{l~n@U$7~%Zq1(BD*y`4h*BDEg2S^Hf z?NzQyAM}x|1o)8;98km5T6)vi#Ow0>aN_NvRhXAUZdH_IxDqsc(DT9?JZzdBO4Jgl zAstAj<(m*G45urp!3)7r_5dK2wAo44ZV~_?0?E}N2!SReAUw6+yWz+>^pW2qJB%=J z5t-z#AtZW$(LYqqTNXDF(@;kBJ}ama&CsSNB~v%yJv=2)B**Y2U`|r$BTn~LPIZ5K zs7yjZ6QINcS7ieC%Kc+YsRhZwVBPgBDi8Ad@l+RI{b6L|c%9%+!OZPV`vimOYNamG z`&wzEU1n*M3AIkos2s$!kK>;`kTn`)>*{L6035@WJLRV$;ColjB0Q#=vvoYVw3_?A z{5dum&$e=XWTnt)@kTlouWL*9x7C7y0@0)d`mD>5FulUbd0Om3iV0&203Jg-0YDnt+kejeASzRW-RUhn`)gpvdI`oaPuYe5hq z)O`mO<#3gn09p|QbZ$=VKU$P4^q%KJR0NOAzXxnjfNmWkvT{o{oZU$FvnR5kwV+5I z0H0<_uFiHR%_tk7Lq?t^$NCd1>EWjuBG7Os&4+}BCz=LsAA`cVG}nKEj)eKyF#`8L zd3wC*nXDDd;myO(e*z=LYm^|GJ~RX-MIAp8JI8rb9HTUqeSInt z*t>nx7D}W^@^p@*_{};(3(jtR1vt#6r;-%!9M|XNHpP;NVCnz%fbc%vTp&`RS`zal z;vZW{>eH&gC&zqLDOtW}6<*Z6D%x2Kq^2}@6Isd#U+>@p-P?Us`udwvpbpswY-iV|4x!0_?wmqzVN6zj_R`m+^ARqe0f*VDHdh+kriP$zUWa_HD zy{lxz>P)H91M2jPZV=m{$9YH{E~}v0Wr%y>@}fI&X5rI;>QxwSg(;SW;*uTNV}Ye2 zm}E8gK$x{S5m7e+&dnLOV0F`u=^%!FP9_gtlk1~5mWIT>yr;^0S-^)vbm^}q>g7nS z8htPGHbZmqVWV9%I}h&44(@Uw_ix6+=OZ1pbe6Ut{D2|DOuN_UJy+@5%Yv7}I8Cb*pN>fw{zlr^w%1LC0}_t`J# zL+eonzy4q|+;9=$x(aIUyjDtA{u()YMXP5=wTJYw?T8a7c0h>-SK2muiliV^Fi zDqRjTK_pJYLgzRs6j)HRbR31f_49~pmL2rn6WSaY!H0zyc#-?GA}DMewjqoUi{ryc zV^h<5tdm*_iPRmZZ$`5=z14GzcT-1QRBVUX7-@q85jE%YbU zlTeYGrQ*T8sfsl%!%MvU1R2u_#l^*sqdAk#kt&BIcg(5&c@k|d-c){dl`$Y>hE0r7 z7}tG2_MGut3h=}FPl72?aKaoTxQoJAwc4-UG@t@C5!IL)=sjy;nkOVY{k7LY04P^k zq9}@&obQ^U&i68%)0oo5~`SEG5BrmE&a;_F#kKw_Ce_x@u zgHz_{l^Y#n9(=M=d^NL2jr?gsvuW-R#*w!_)KdZOM-0xu*6pwm4GuZRx$D zW$-sD2cuHD2tVA*^K#PfUH{2xYo+V{6hm#aR-of<)vE3-^s13Zw4)Hogth+>w;q#_ z)Mmob#m7$%>2zD*Fppk7casE9Fzp2p!t<-#xl+!aB?grfL*+PhtP5PZSO>(bkc(PF zjVYU8<9Cp^r#jAj1y3q>oq1~%XoWb;SI4%3jZgM;EWylFa|j`_53~^uh@03vUK^R4 z*gMWZz~sy>{k?;zx8*kIbB|8k&)+IC`Wk+mfH?Ghx8c*mJ(3%LwrMT4eW*XmAR0TL-&o=7jhsqx)!1)$8e^Bh1z6iNPvoxea{>OMm|ilRgT&nW&lJw2K1s z0qZ@SwG5l1iNE$SL}25gqcx+)D2`nCGN`Zoe8l@Nb&4PM#c$JohxlD}Yy1S{csJ9* zgX1E9Bb{dMe(+YF@wDej*^5@0=&TdR^Dk&=Nf^Od*Lusj%`W!a41Rn&QP=58N?0~Z zD>jgCXa`60gOd68GBQ-oU;2BSNrUpp%B_B9HAHj`B}a|Ev%_w$9zFf`hKL9?XxPfv z$!wq6-l`*G@L9}?$kq7E(8|>V+_-_$Z6Alg#_ov2qw9{BxLAMLQ~^7D4L;#plCpO1 zfEPzDyF=sv1ds zmbXF_|1f=H<7$%^c(vlNTyE5|)0@@E8MoqtiXCXHgC);kA=dwX`{%a;4v?JA`mUan z@61xnA;2JK4dpL;mFT%&Oy2I#r&dZ|=T6hp7XKe)yLDgpp|BG#6wT-Ffo27Kf``e` zdvAn?XKfwa-z*SI(_AR5VoX|D}wNJfS6b}_`d??XL(xK61oTD>`f99Q;j44kL4+LW(vIBY6 zMWzymJK5O^k&P6ES$by9sW0lpA~U728$!6^Do8K;tNNyn@tUulW~$T2E(wkfmA_wL|!uyj5u_hyU5mkoL1mE!b)(Kq-ifjqIFlO zklM1wm7HQh(+U!h=zp3ckvlVu(5sBiY;9@wn?<0pE+V>ST#~)UTInMK1XgqsBcYZ1@z?g%y8NB?@0+Sm+b1z=#;UCM6Z5Z{g%o_g5BG1_ zbHEh0apHH<3ijnAsSk>*|BPVe~f{NUXk|Go&B z!nxi1^2Q1ND_{yHf#f`MTu4NEwz$|Avoneu8EB!KChknvXMZP{8V zEd;`Rnun(HwSLdzhSb0DbFQ9;0yb==q1O_s!0^nllE?&qEKY=k3(S$PNB3E-i71@y56#YqBVrQ_{&ioB0 zg{!oCkdvM4aFLVvO5@iG?+mh3%#%_qi*nvBOAj5$8w~Z;W0_}@OVWeAf?3pf&-TOt z3J<7jdr!~z`&Ou8FsMC*2bpgz2l>p6!t`=d*t)0j&HxayckZ4dUjRSO&R?nVlA!71fY4R~xT-Ou)by9CQpbi6>6UqOBK}VRmW;x-wlf zj1527L~FN}5(|#Arq-?pj3IAbW6%L|+27Dy{R)Gtm!<5>q6-9aie2Rdi7WdY-VPQ& z8aQ-tP>XAd0^@=O5WRT&wxjRCIER0bRsw)q3A--EnB$D|0oh_yy?xu7a*gFI@*+*Q zMI5{avF)cz*cQa8c)wtnU9QWcdoz`1*W3)P@poPTgR1=VDGBH4{p$hHgUJL9yye-_ z@B~thq$+*?zJ}0SCh9RVqAO97>4OFEsR5rk#pp?(nbAdDY0mhOdsrOd*4J9*@TScrjS)0pes`)|E-P zoa@4gsR@?_S1sq=Cs@2N|nNZyRo?)n*z*Z;!9 zovLN(x-3fCPe)2$z_@NB`bg~IGGvd4Hm*3I`Vf#`rL?lfK5Ka48QDZaCRrSpTe$+! zL{9b+769i8omU6(r$Fv|I980GUHNv!&JP-IGMRBp7frVYH8Z(wuJ96e`BS>!)>fZl zlpJ-w?~1{#=E08{7S9IMa52~RcjiYr7De%>hZI|w7h+Axhj}`3K52t;rF@n?rhh9| zh#_Aofa>WAB`KT7+pM)jyAuZGQ5urYSf|$~&^elXzEoTmc~@Qf0rSnXhr?aE z&8d}NtdnJ>LpG1bh6|j}4G3hdjxS0Rr6i0qBP&h0sTk(j%=-iB61HosY+JIT2MNM8 z6*a${`c*{uLc935%N?gmG`F_KPq$?=<#Z;RTs?dvXS0`86ZFJ6$b3RWI<^jeb4#;y zG!v%$sguK*6GhbkdmEunv^RKAASqrOj;~pQ?H!c;g?Ah3k*ojblolJYw1^K9U;WgX-XfrcCD^{abbE_ivcyBEobJ^^>2L!74Z^lzX)6K!* zoe%EN;H;NTM*PJc0adrMGrnq~vv1bl)ikIJe*Cn2MdMwAp&Ms#SA~md9<-dFNHUZW zhE8g&ecs#u8!3I}r^HUlqM*O7?_=|~v=!XdP2=`CCs>KQ=O+e!#oK)ob!%?W$OZQA zO(fprNq4PR010B8<>QHR;**(18B~c@Z_7=xMw*v|c#pGnMs0Xntr1to3gM)Y9Vp=h z@o53``3$4V;O<1g0zITlVaV)Z30G$hJONZ`G;oLY-RNvme8YM2uph zsLpl8Nn>u&o5$2(;Ub13)qXM5=U=E*?J*?|{^^WV*i-l00;KbYto=X8c0KR}tlp;e z#}uS%zjW7vt^+VMqy87!evxNH98@{2y`-h_pVk7_{-V4ASr6-K^uNgV!7Qdl^Wxpz z!fg|mAh61qSLuy=ySm0ANkS zT1O2Af{n&?T-;)`lj@iMi$<+BV6D|g{#%wo(2##p49~Yhi(RfUUoPNlBlV|oAlocss&KIiIoJ*JkpZoe? zn}L&bqf!!fNP3=z)&=``Gtl#Zb|2524=M^>vgJB0jq5=j!COye-_FooW8m}2z3m$ZYxNjMR#u1OR6ZfV}><26g2Xj(MPK{y&;;@JoQfxX^Xy)si7haFXEqw^o`iy$#I%WLqfNPe<&Iv;HO7^FB4H}q(` z;b-LBHhAnBxaqJ)`S3kF->D<-p6w zYON9LEtD~#=V~`#=L|>mgo)ZFDrl$`m0a=US@Cquq`rhAFK0IhZ0$OnSO`(&MkX}x zd-TVoeR7-6^o#o@7p_Fnm8HsXr2R#7&?A$O4&uv%RUuqpD&clN$flY*0HV(n-0ot* z1E976bkgy#Ti2@7&yxD8;JP%Dv&8|?%0CO?Wro^AcV%m^e`q`RkS$(ZCG4R?hyod& z4#p9HdV}P2S9H?0IcW=>qUzqX`)={n_uQQ}mr9y1p0LX2T`DyMn25sbYd)*cWm zU1v{=Aw^0nyWQ+0pm?STV%)do_99djDbLTkWUWrA&vI~%+cecKXkS|MP9B2(T_qtD z&e1@LE5h&lUC#CGHBA7z9`6r+6J|N@;@Rv}``>8F#-KE^1x|H0V-Hq*b7gkEk|tRA ztN`Neg%G{$L{GjD7hZ2PO~y7hVgw|VfVAvqWg|mIC0p5z(ohL+Y=>wl6FQ1@B;2ze z&)=ptN-=NK=siRa_ZY$pH1HW1^=BX@KG-+*pFJfDIFWwDioZJ0rI`%UXiGkrp*k>$ zu@>CRu1{7N?%rCNV=ua0S(SJZ!wdsfY-Ys!h7fg~Yvaz(J}EZ$_(yS*t}!QXYV1Sa zCscJ|nk(g`{RAH+urd$~Q@|-|pj9)Oq)LUA&A0nuzvwwr(&(Ls&MFHo5J>l_n&PU|UNCgA8pOdR zW9j*0XO4~J9?NxYuAP(*`GTN`KFDlm_luH+K^T{!Jtd!7xV? zfI-6>_iQT9+Vy-gw7;xi3G#dxq2sqGznOX>-uL_y|0|v=apJRZ)zH3!z>BDZS{8V+ zF+*Dc9;)Y^rFd;n(R#G%O&)GwGykJj=JBDGx@YfL&V^Hp)@fkX6H7XKwkdR_8`kz= zFO*|1-Ls~F(55#_t=voHZvOJ!n!j{fWG~B;KuoJ}dF;YsXM>IKE z)3z*zj_$2(gj)^H_Loh6X{8;|bG+58Zc&$Ne61mAIrKjdTVRk{MxfyTh5>D?{2k!vG4fzR>xm^zQk=YU&f&mce_*5p=;aiE^~)& zyW8En9D3Yu_w04JzVG(+2#4PI+r6m{H;&!DamJx9=XPJQ!_9NIZ(ee^b>;T04u{(} zZr{G^&_8;+f68It+3kVX4tL(&zVpT5?!OLqLn`k|WIKR-6;Q{|EYKAa0Nh62b-3NC z_fXDXUb@TS_7%E%mP;|{8?hg2PFnM(=IMW#w)S>RY75vJU68oq{$E0-6|(==z!ey# zw_qgObW`!s+@QirNIQ*{{~H7c`r1D^!AY##U^1KlRiKiXSdMA3P+|2LFz^o6d?*nG zbCMsNIhYB@R++_+ByRUF+}zh$nQIqJ7iTfybB`V$7du-cq7G7}_6ExoK1;yaJb1Cz z*+v1mBm`@Cj!fv*29Pp~ZjlxGX6fBz=k5VKm86XvRLB}~Q=1}9_st(;tqo6hJRKlt z)z7W(_`F){zOik6W_a}rY*BOjTtomd6od-}!Gp;avH3-P#hSr%oQLG23u)g`DRd67HGI@CoNg}7ErC)@&8pdj zzT@(?Z5L9k13O!^Juvt%xv#-%4F_Y#)5kXgv^*yEUQC~9?wUBejTzD8eS9Rv@KJ~c zGSee+ZBUAd55n~~w>q((DB8(g>*^95jM6ZlYz}UQt$J>yss|*lP54&{7&rM#CBgZH zaAb0F=kbqCQpORD)^p0E_aDGI?(`*KGAh1qL6(^#K0#Kvv7L=T=Z9wZp`M}v6YiHj zAErj*m&MU@yDptR_50ti?{4fL`64DJD*07;;MkL8W5q_&DmiG|1^LFjqS8)UdG6s( z(Y5sc=hQ+l`Y~RZK96zH7VB2Q^{TRjfNCmXEVkzQy_$K>F-egXzJKv;l73A@2?#jHtOtLYyIeWI4#&Ox=O#8{zXYiX4ibr208a(-t@TP z_Yn#+O|J_g%YmwiQZ4!F*odkvVNP2m#By3tXg`O#UST8^6$Bv_%b6CFKTnhF2IrNArIAVx zqS5RRm_79^;x1q-T)r)#Eak@Hg43IHwa`peTaz(}N@@N6v7!r2&+DZU6t_KI%-u73 zK#00?Q4)9O>_Nq}T#a`p^uxtPzf*DNBOe{In=Ea<=@=l}C_UwM)uFXYD)ptR-I6$) zXVi9ao?(yJ&O_$4qb}`7PBe|pQJxe)qgEcBjL4)0&%*tF5np_^4k zh-Nji2{+6w(nq1UR>H7E+diaO&Z(bz)#g*j!Bos{g%6msfBxw>bC9IiZk<+_*hfbn z=faV7B{{x@Mx5|fm=?;T#HMDi_REjOrhRFHm3(b_t*R(F`)x}#Mh4e52GNolMwzz> zgOpdqRhpsuVCI{2=V_`lB;IoHKzZ8!zIw(lsk5#Qh;-E;-V2vLl5VkXqu-D2W%aL> z?mHF?)g(Vg`u)k$3>8W5JUO(Rb>g1<;`?p9KKrLG!6l$Pz58J5{IZ_K`Q1K;O3NA; zQ|kIL@`+e8nI=KNfBosO%s1xx(n-48B9*=NKP;`t`k>>}FN;q6RC?}FbA$V7<+6dB zhmb`w1qq1`EU{JFw(~1AEGPhGO^9rORt!HmhXWDhk`IybgVI zRqU*#;+wmtyk6n@s;fnnZ{eZx2KDc&?qn6elA`iPJ)8HteXIP=wwE`V9)0f>sp5bB zVfkg7>+gNitNbr~F2Azl`}@6gm4G_wv(2tHAAC!y0-DUuw)h&R zg=1D)YF0L5YFU|KYS~1G&+mKg_k7n~XPtZRy7&HfSS;A=^|0Xq*7I!k-tX7@qi_2u z--~<8F6>(S=-;R4*IrcC8fo?^aN?BT)k|d;_oRK=Iiu*`{h;hp+SO0HmQVTLTq?VK zWbIQ3NGYICs=O`BY%vs96)ot7_O!ic-OuBO4-%LNTaI2vAyLTHEWB}6s4ev zqVmorv(K@fRY8w0m0!J>_BlROY3H*C<=3uW{k$isYUit^^6NL(KJQ~G1Ynd3<##t-Wg3=e-p-Uac)1=u_IgTvX9JZ}#QjMAh!K zOBFXir+qm*qZIPvLB*}Lt6z>RSB3mps<{1U?aMKcG6N)C$%mN>S-5Hj%&@W#vtO90 zrW`6J5Bue=3A2ob{IaL2_7$<3E@J*&gyE>{le?_bUjP4EfJ&=?+v zWg$mA!{4*RV7lW;DR%8(+yZlWySu@0w>I#`x#^KRn6F6JM6j~O$u6sR%SbwdjZI*y zKLzZKniqG{Bc4HZEc?`ZS|PYrflTP@&t)BYU{mi1S#^3J?^VVNBsE85tyWk4Hv1%=2k6U$YPt4E;I!x~6QKISZ-sd2!r;3LO?nTBQQwo>xyRbbfakLQ)5yJl}Mf{(r5*-^_}lPvmEx^IDacUo&b3bQvs>sQ1~&4X_P zMoI;HrJqgOfVwXFlHv-&uMq-=uZ$c?E(q9iP@r*nvH!+RusBLoR_QzIzR9QGkR*#6 z+74|iCzFbvxzTnCY|q2b?Q#23yYtBu-TkV20drpVWyXh!wSy)8*n%Y2;K$7mXpar! z6np z<9%8m4T{4Os1j;A9p&AlYkco-9|Po#kr@n*y-0sM9X!m zM}k~-|9pI1wAOy+$me}$e!coG`rfB{bouz1-}4rKe@vV?x_0@@pU)wC$p2fB0|0z< zKwyw~9I;Kjq5~iHukzqO35^h!*TrSMu>Yu!{*Q7*WXj)g@dp1~ zP_)>T#Dvg;M7qhpD*nH`O^r-X{ii*Hn;!&{d?no)VbUSfOhdSk=_5JPDebuh(iIuNOxjr za_E1x2NBp1|x1OkApI1UWCGbPGI9K@{k&kLcc004Xa_wS1Mkwhr~{Nerm z`>X!%-#-li0C@=j{JntMUzFG^bf){*k;E53@1K?*fKPz3#rOXxavKz7aZ+MP&f-5I zISUJGgH8XkHuHEH>pwWTLSAvj$&(kdIi0!Oo|2;f3smlW^S@EK3sOkPSpQjsN7WdQeXA|43!(YHFUSTfB0XhTB&-*libS9NJR z%*5VhWz=LY$0^)o$6KAz(s}wLG|4<~vum+^Sl4a~HsT7xcr=)uO{^eh{-els=Md95 z_1Y>&j8pEZGk=~hAm-`CdYbKda!|Xrf|Q9CYwSTfCin1PQ4WI6A5qmj7hP?YV%z8M z&zy5Vb?1DxR{xB>=n0zI(p+b&n{%cG-cq36+uW^U?BWE1$-0u&vf2izp^I%p233pg zql#PXuei+NM(y9#$|Q95C+8=$-_cUaCH4nH7q3qqx?s?L$Nm1L4p&;qowEwDN(NmE ziaYdfjTWH_F2A3balzBEt*uu)x3w?!q3xDAZITwj!anHEFi*Ww(+T|n=srE8Ay1t? zS9`EhhEP_^d$g?|QMG71vhm^pUW>zAI{onx6zp zXITE0_J_vYrEr_lt|{fi%2{`QS1HOmcouvA-kbb)%WJz;37bW=#XXQ#gt1h!RRVNa zE+Wz1V}!+Brp(Dl3K{+hq1>492u$nxh~hMz7gRlSI6_UrT#*3Fd@ST58mNO}^%(4h zjS!i&_D|K*i#NR1L;BXTWjxDinlk8IpJpm1Tb`0fARY2zoZfwn1%KYXS6dlhXEzd> zZwMaRaD5sfIhIqh=fngtJ~71cRuXp_7apQQmWCv3lMYrrIKLkoL4tVc)*%|jc@8+_EcHQhnO$xF zcs(iinDg9*ky*{z(;z}nHApdZTtL z1!Wtbo^IPjW&p+k{j%j`2zVPG;lgBFsM4V=Hfqv`OP%Jru5oln7yuw*Fy**UI8HuG zvG+!T@f;x-Ok%=!==7tJ4CV%WU73*%8JdT6eqiIS-jAqR=saiNTid3bz)+*L9mJMs z4K_j~!(H^*lX;b$OThNeH;bwoKTEx3gfdNSm!A-e zZ4bU7#%1>s*) z&q*Y~x9Hu6k4g>74W$mpJ`;|{bg6g7GcS?@p6>KBt&qQdaY%15U@C}wU7oLh$2_i& zYdIMvZ?OyUDdsvi(CM1;c))SsZ3V2)qY=#wW@?1~=*{z4Oh^3UhS4F*nvK1Ote= z`s7c|RMMTR+CCugfhM+03!RQw=t8!%_DgRs&y{heDOIe8wwLqVwf-8^$mG9J6B*~5 zMdTJ;WjL9}Md$Av=|_jx?m`yEp?)nty*Ex65N!ykdu7H`yAS!GuNZ8EGUu@{QD_a7 zU7jdEUMOZ}qm$`KZIFLK=#8|ZRw}6r{`tyV^hoQw4N=9m=9fW=;G|)~I2{$VZj}W- z58LoAI3Kpve5(BOTSZP#QOtPJBo}T<$v6a`vwe<^=5MAF@5crUmeDPL76}PSFYc+x zsapjHN3bmpvwSy@IO+-2aE-kLIg#G9LbB^2;y~7imbB}UkBhJ6CybkA=)XjoRJO}a zDUaw610Rz!W*P$v7Woz2_{ekk`EY{iwexo;QM}&t;aGF5rGF`=TYmMF z?3KdM>-7HA)t{*+FUch&7E1bH!9M4II$d~f3Ab~q9}oBj)e;<5_t$K{Qi%m5bkyJ` zmDBzW#M!I}$RXHHw(m)Lmf}clt*iwV>f>SWiIr2Ut2m8QrZYFuSRf-(KMy4@b$jvm z$y39rRCr-3t4)2<)_?zVf6+&@vD4q%!O~a1!}|M9m;Rj_iTbU$aWViYtNEosJ7>?D z&U21nqTsWclfFv;?}UULh9Bwu+~MfF!%_Rkw!Y_LZZt6^W_C>D-Qb9$*5CI-aiJ`& z-CTdxK}~Ggki!*#xieI%&EMDG7wVZ6E_>anM`53Lsl+uruB&rjv@akf1jERHLn5u1 zzeBdqk!lCC^n>~VAba9LBeJ9&pKWJqC~Jht=O0$~hB?pytZ=U%IlznToRQ@E#s8S( zaz^%d`_#Fk5}v7#HCZqtH)Nfc${MoDhHsHd(o2o6H0T zlw~97C_awCa3uGXtb>D=dX96JvLN(9l4(>VAVspbc;u1U7I45bFSHU{xPAmYgG}}J zIb4Z-BS1I#r8r5jI698!2+`t@`neI-fhg307;IG~77~TE9fBLrXB(TrXRc$e^MmRl z9jeQ@JS}hdX7BPfb>61Hd-z-g3uS%)`(6jjOUr(B{m48W^(iO!M_2AFHb^3tH*Y3_ zLvmPl*^WIORGT4Yzyvy-AxY^dFcO08>98GifJ1}$3Zc?W|3{YA_pX>7GQoxWn)~np zfKFP117=tkk!BHk!or0fCSj5#&Er_(L2w3pj7@{tB_)Ozo-qlq3x}fV06vZOXVv2{ z1%>>8Y!y^o8b{vYh9E4lahBLO40vh)_5>`pl`O_nDv4{^N%FDZM{}LFeO6T zQ%xdPp!JC?6iCY1&-{lEZ0kGm|6*s=0YBN9ZEffDUT^kng+{!r^lP_(N9VETee$ErEv~eDh9h|Z`Y(X6e1eV%vaqB!fuK^W^Ro=8PHI+0hgtn z2MJ|C>x%k5RH+eQ5`){tYXad6s7K-@2L?dA6Rtp(v=;zjfp$BHNLx*#t?%*;9bjg} z0y@8DGTaES#_ll9vORl7x)wl|CgO(x&JkPa#Dy5OCcV3;d9S1U zhdsZi@`7(;h{7q2T&yzw)V*gZW2YL%b(?;~)PPy2A_Ok*luw1z{-SB5MU|XwdVm8F zqAd$E2Aq9`fEiiRh-oLXg7OAg$-eP+M5vV8W~0}E zrvvOEuzX*qbs$a7B?*<8Pb`lQ;vDNd8y|GWr8XXpkk)dTkkp{4xwzNXr(~Wb{9Jea zpyBX>nPM6wCDd@3W2TIXE>u~?mKfl7KT_n$DW(NQmozEG?Yy7cnOW5AgobLA|JJqJ5x-2uw^nhbh_Yl`l;(!rk^t0v{A!ShJ_jAosp&mmb;)G1Tso|lDm$@^mJe%5^rq3-g2E;^17+_NLTD? zcu)l6=qv+%^H<13%kfC2PbVFY@r#PWo^p$h9DGNE^ zOxp9QZA)puZGY^M4x>V4Q{P%AoDkgLQ>b9p_v?nI&v@CPq}@m(sY}=dXnRT=H$J>A z*2z9_VW*Gp6Whvn=xZwn3gv^o!J2w9N@!JFsGrYI``}Kp&vnL5o*$~mA@-4fr0fT@ zo<(rVJL~B+*Wo8IZtr(p9e`g0g0E%;O$dUoS?_u(5!;cNJI z%b~tDa_rhq%q8Y6%z4MBALU$|tls~G@-+GPj4w9?V+B~`{fcu=G|88=uwpDK1LA)y zrQ4M#g4)r|u~ie$_6axMHW|eY%l4?1HH_AU;J5_r9K#E{Fliu%vvI?bny_{kJMC?7 zgIwy7`grzF*?cT+HRK;*H3?TcIW6BTiN3Z{DDkF5p{1;`@H6YrfHEl)UTQs{ys zXxmh2A3FS#&~OO*LM*{VX9xV58-O&!|#&-N%cUt9VnIL?!oRV zBo}TZY_(uo{jSCuHO;o44=KjT$=NW{3iEl>F0ztwvbfMKhmP&Jdl`+(Nx5gY3N3F# z75`C)-G2l7u0xjf?5*c+`c3>xEE3V1abUoz=?}Ix{CvKw#o3eye$DeLjlR~K`FBMF zp1hg;OKX@}VNn7DKA?x|d9~4{dd&ZqTpHxb1AU`>rsnld#JeMp5AqNJ8i?&0&x@4n z8pI8aqh%?W#n1jn%Os$`33JR{(R2Z92|0nn?cS;ay#;?bDwoTPP=;jL%|5>uCVBFC z=B*A(k-y zZs%i@EuyE$ER(F=_~1i_!Kb#@AM2UO>!&{z>{byF(cO$44|%9=vNOLH)y>bmOg3u-dLRDIWLtkYNLWp(m|AKRq2KEr~snWnS+!}#`hsW7QG#8r(tL_)|HE0EOI+XziHwAa2L zMf-?uAul%?fxrwrm;}kHf|Ns0lLr*kAWZ(bWIc>LOQC#`W4*+v-wwx&L3didd0r$ydsr`w{q6c?=x6 zPj9SZJ@upf8QETu(T$alugcst5mEsLl;r0bGsz8ZN>-d?$(0>EDIl~IgfbxLJyl!T zL*)c`NKukOS{GQ!Ac5HzS_}8oUg*cOed|(rk;m+HNx9fywv>t_DR18< zY^bM_rdzAS-34_rs`Nz9S{ElKR8l5vUPo3YqZx|8^ENC<;8hNqoow$}uE*ZtfCeD= z4PPnQvZ=iC=kc!A(9Ed3k5NT{B2!Jm*>rOgA?3dQS$x`rKCILsxr+(LUaHqK!D?aE zvK@?<2VbT<{c|1frg;~{l_V30TF{gss+&YTXFb#(8#My$j1pN%wFC*)Ne-ou+K z4YTd5lD+x;UDX$l@%cs^K^IYHJHnX=0}~Wy=7K$ba;v{2?^tfcDNDV}3qAgGp+T9p zs&l7;)cP&BfTCvujc2!Ph`??S`aW=++A6<>A+3(jV(75RO;S6Y=WL{GGS06SO5gwR zPWr_U!eB+R%B%3>FOD5-#**F&BYB2=adK9x@mXphYdt-jXVH~D>%=-;oSAQkT&|yT z3BU6;|COZ6AY|=+VAM%Qnx8MSz*owUTI>SuNk2RF~yA&4; z$U*s|1;UC+QJBwBgU^$77`-#7HOOf)Ex=Zb~;M#@E=pxo&%}1lP0;K&{C(V@VO%L*raRn-Jw@r75=)Yp08C z&(cw$DKdm$LgM)y7BL95?DWhJ!J<25G)A9vJ7G{h&QFT53POiY_pALLw@SuLz z@OZeC_BS>8BMaa<#rn;9CuA${$qXw=)uCwVKzb&hEwPS88D{huJ`^(411JAN}Z6vC>K}1&~-Gw?$49oyNb$L?|0meo%D2eCVZW;R~J)v*@#&P!)d75RtU#G zKGC3}pKb7(huDQ!4a^zmt9YqpOZ&7d^^9&ZNO@N1;#058o2FQmv=1k5V@p_*`=l71 zx#5dsiFYGhjZq!Z7CaFod)!idG6ENri!`A}#D^&>Pp4s-ud=MU!El8t=Nm`ri|=)# zHY)NmkM4O}QaIMAO}h|4wtXO5GtN=ZPs)}QFjJehA%??n=c(0`Welvgs`Y`Afb;_5 zRNEVJ>VyufVD`{)*CB<$Kq(ut z7=YYg%v6AXy~WM|rNWb-k}5_#w_4(!k;LZx89cD6z5TnMwA;4Y{5d( ztb0NjMs^u>q3m__)AOch$!A$vv#bLq8;_hO^8pi^!O)vB@*@fY?nYG}zjd>5E!hv& zS`?fq;jNVe*^wzJc}P<(z|u$0mJh?EDxnC%A(L%w7Y`reADrrHBafoISt9h?A!zrv zI;(JJ{i(714375yoNbvNzjAb>D`AikN${d)t{4a@_1hH z*UV{?UJprvij8ThmzSg+1PE!j{8kK`ng zyzk7%a3!XeLgOkmr1k;7p3al@t0`#8T92&ng1ybG*Pn$tISm`}aDO2#0}ww)$&Fh4 zT(5|F*{Vj9lPRsF`%09b#>s&_%IYZJ7{FBr?k3>4wV7=D#BtMUbpHyio7JQlYu+`^ za!Uk$Ct7nWxzXx)v~E{s3|Czp7t)45CYw9MhB3xZpq4ZB$xy#!!`GWRB%v0KekN)f zY%9nLS;%^sd;Y~ic=7^edw{_%Qc3>~DEe)tosNieegUpG4N*J+(E~Df1KE^y;M2$R z&Bmm+Nf2q0ddNDvy5HudI>O_bX^H@Z2iPv@P$yD%2fNJ0iS;AS4AZ=c;{=cW!E#K5 z-d02q$3bp6hPoO#`+q8OYT=@sHY%CZ3S=7QWXWyi=&ZIGuB)6#RuS_!iyNQ@d~jqP zmud;M5U}lHPA4}H8YpvgCTQ+Z?rypNvX~+1qQaNeh{KK-u#3J=@c<$pQ$8 z2YDfx$&OcVL1oISW!nfh?gC0`IxDyHnd}Q(x7KsohvavSUzc9D4R7HFzh#Fsb4fz> zj@~;VNy#gb7w{QjvH!zMCLAd`#F*@N1o0ZiLm|u>)-c zXzWCY6%!FEUUFai?X5|a2?@}P2et`$Y;hZ4u1J2Mu zuq&EIHG^!FWN|XvHNcgUS!;f6ilvH0 zL{v6hKXe~d>gbnS{=-;;|MUL!f?i8RW*HVPS!+@^OYX%v-T4TGtUdr|v@U0~Qr=dD z@t_if8|iz4uGE z;9_Gua`?T4=@3H_?`mL4reH89r;r=K&9}DFBUQIla!0r1uuv0wdEBRYh(sP-9QcmA znj^8H>q%=nzDZH(YEkBNr8cWj>tGR7iB?+6bYON&lsxo`X+(BquKeDv)C?eFx*bl5 zn45OBj=VKNHCGvJ@yHrw%>pRw2&m$du_*kaWEY6p%Rt+Je~?_rV*GUt_#XFVVh!kDs1Y#Sk5F4N(lu#YNURC)1P zFiR$mr{}@vKrI0%4;tPEa+{wz`O622ecUVQVYDSnGI69ym?bHMB2JLEKC1F+hKRTP z+-Xu^$+RYMtr5PTGpr;LzFV0lTj!zSUCg4F-r4DSA2qURg?_wrc)Wd1oReG`4=(eH zr~UOD5tf%E4%t-8j+4uY(mB^4Gv3SeENcVQU-6Cf;mTy-#fX=OXxjG$0~*fvKcfHe zs2fx7mD1fSjbY6Y!_RqG#G~BN$4Bh}2hv4{5q~IPof`>dEkhl;rnmA&tX81TZOr&c z#3vjiesa?;WM+Wbvx*}KF)$Uo;8(Y9`kivzTs#FLxDhLKusOm-j(aR|x`+E%Zf;DA zc}=7f6N!C%*q(_aGqeA2kTK75^&j?b=%C!=Ak)ViN!dt&g2=Sz&V(SEnd~1KR(oRV2DnW_-S{tD3tw0V0LpF5*M)q+E`Vp>L#@|TV)_l0z5dX&d!#l~ zsoAH&+iB)1Yh0hJjM#Nv?$!LRSDvsCXY3n&eLkgLlR)P+k;NUpmp@J@Z?$A95ua?; z;W{9mC~6v+YaYC4ARf<{|j2geH}&LUfXnAq&eBwZx4dmjhO?RG{8 z!YY|iovhA3{xgvrz?>`njni2M{nP1lx991CZ=QH>|7cq8x*6hT$&}LJKAwmEg;{S! z2wdqeHaYg63B`nighbuM^A6x~6(?glNfXF@1 z9}Ka==%Ih~vh*<>VN4J;pt6w;{jq1aoi6N`{;agf2St2Lf(x)Z@C#$JHVy(S@3-3K zb9N4MWtLE@y?V5sr!cJF_0P<6zB7k0eqL_CvRSyNed8(b5fl-Ac>(596V7g(h44Y$ zyIwhoLWu73%4DXqydN%Lw(Ma0e#%%bBh*FZ`ASK4pN!wzpo_8u9K0yZ668 zz>Q|LF$*tx?RYEziTH2bDghAxwO2FnWr($9?=DHf`!FHrSUJ>cWiG4@!firg`ETvO zCcEUAJ^C;l7Ql^oD0RIJ-N-Z+ac}_uK}VrLWiEI4QM%uUGKV?4E+AZ|=g`JyN=^%I z435$W^kEOA+90dyQk8oQM~TmQbTd<#54FH{)rY5EK97S5 z!d_MO3@|UIAVL6(_tS=5&siY52i;Swo9}Ym(zoD8?f}1b;F&v)^Mcs{oZm_tvuRlF!cyTFD2hFGi-b5zQdPYX9otn)X z>*9Tz%Au0{cTEfM_0(=CcluAptH!8FtCd!Gp2KtqwWddW5L-Et`onH}*aoGEl1o;bG@8enDpxMDX&|S4~ z<-_oh5f~K*SAJpY_dUxw;+yorZ@Q)5+zu4^c|iM)B6jO=28N8ZM0v1iX$LO|mquJ0 zg=BlHewQW}pOjmr7)lUH?oLAWUtVRdwZ@67#Sx74X;PMJ&bE}qL6?PXn(BsSl>joJ z`~K^%=KE62&NV_!QT90_2tDiI$C|~LXWY}hPMbX2&Qx-cOe3`^GN!?LK#i@A_I6CK z^z`*~>UoXmWfC^ie2yJ?h}(ETeW`m)Yz#-U&%QdfX~z$CjBa+)cqVBbtUa0er?vKgL95CQP4sX#LT?L;wR30tV?*7iS5rEN5*t@uWq5;-nOuEI;&I~Idb9B4%6f|BKLL`NhH`qgC~Jczc5pZ5T#yY=oS6hR{YBW5#k1n&WlE2E2Yy)H5Fkvs zTc^8!!SjRHyTgbB=Zf7HEsJOyvkSd(h{T-=1KrYE#C1MpCZ* zrI#jWpl^B)ny==Mb9>qtlnANHN6C}Tpm~vTPOYgya-4d<*9tE?V4jbe6kb(U)$8dz zL{7!JR&(yAE{-@PKFW=UmeyFxdw!AhhK`k`3}z--xv`{GY4`0btU?~Qgf5$4gOv@T zeHlr*n1Ywd{azJSr{t6^w}(oVY+arCIzAS#o>2~oA{wIO?l(Pen9MY+I%JDBjNZBT zUF`g*^98t6KoEO3-{}}PD8{)|>;5j;#q>j0*vIK&W~gZD%J5b~)e0!WTAIjKI^!h$ z#7_5wm8P2n0FWrI^H_~O%+QH`N$naZOSLSyp!84`YBO9@an4_j;ih+^Rz0jP^ z$!~MDt24gl<}2I_P}6QY!5)B$da+Y*0HLF<&Z8hVhv|i_*4-&~vYr}K3hDKHB+S7>= zs?E1pAkw3B>|^@n=Y!hvjZ*5*DQC~pc;}NB>vZ9FeKVgcBx_E(+3m&V;geyxJzFS` z*Oy`PQ}CRPtvw^w^Cn|H&bpdgi5yAfh%zTvY8uOdO3!O%g0b7ziH4HL@lzQ2EOac7 z1R|tMQXEgp_KTqVr>7-3Cc4`~xr*gf8YE?Ls}YSiHbLIlR9mTDfZgvJ%0b1~=N1;a zV!^Iw)EEp2{EgqqJU(aHUN@MVlw-#y!G^0)HnX5{N<72QE>LCezC8Vs+;C#}H&?o9 zq1K=l<#__=6E#WKZ9F-1u;gCtMA!t1aw~&gs_;{O4px_;9OV97;vg%EGA9C_F?0y> z&mz=iu31n{)wZ3{cq=3R^{S4jIVvYD_GOo;j6ORhar;Ea`|70KfpoPlMLg>5nJu?0 zTQU%l?q;{}{SpfM(4AO)8UW!b+Wtn^eN0)<{2hF!H@*#%?x8Fi#GF`^XvVa zCccuTkM!xVTca$!ae1n3L^QjxAHp=wR(_p`Pvoe12LEDHlv!X^Jq@M1LJzC2;YA6H zdY}CL7cz`Xhc?u(ju_9JiSE=55UhD(vj-b$hmvJdG%P1>DV%Sv zg?%2(g^%P&u^0--^9~RhvCgs7QE%|8giz##z-+^)MLuIY{A`1H-9mF2>5w=X+_wC| z&?sy5Y!z`#pCJc?8pC~X9c+Vb=gnd?3hgZ-bnmi-ED>bvS*e6AwzFB-2br4WOPJP5 zG{)5`tWWY4MnFXtmT4#zKHKLc&bR4UOeQ~>Cq`j22;{*jtPqMj5MfsTl9}-Z{3z84+;4I^Icj6wV7+H z9Uu85DX`~-*Alzq9#b-2%01n^)N>oKB6LVdw zn~yAAEjAMTl5``zg{xPRC7{dJ`AZ#2A~ryQiBF6UywRI*+e9N{wOnOBpLw{=!LI1a zm87rN`zc>xT4_dhS@Zz~fRyoFJ0BlQ6P(eP4Od%)xLuAwbbyL00mu#9v<@ok~- zsM?RPkOiv3O|jnhO(~y3q!PBBRzIG`(o=cgVU}r0b55l+spQXX=n)KYho(-l6YB5p z_H(6_Eay1ZwiEXS_}P`r%o|i)Al^x&Z6eDx*Kcd&_`l0CIPDOo{wXZ}RXW_NahcW3 z(~!#?w>SCPE3djzKjF3e{_%|$qwTb*3X|=UByL7dkpV5O9m}mIj>Xt-(Zyvt4Vk3( zTE!8p<;VCd`MBglv+K{4YeWGil@hZ#j>@xyQ$i1O7(4aKJ)uYZG;n(sn4RKu3GvZ8UzlSlo%+Ho`-CyI zyHd=a;fr66tlW6>lLm$B=n>-*DT4;yZ*BbtVvlLi6fQ)phYGp8YlL9hmXv!(H>cG2 z{|=Sf6Jv*<3X`nS>I(fMmV;Xk)dVBhQmRk;F}Ai66df?(2fWyHr0Q$UO7DC5vM%b# zjD(jQbOD9qx@?>dN9<@u86CI%?itfo(@)Q14XjL-bsrW&u>J~b-f>Hu;iEUFWd>%D!CGxP_e5b3b4cQAj?{%?Hz6MY zCYFEtwuR{(%aKs26y>IGxBedadzk4SK!pg_BK~mpA(793`(OXIsTC(%g*m+W>C5-?-KNs=U_>iH;B z$19!Isk#Xd(gvyOt7uCG!EBJKmoTQU#U{0K>4d=SP5rnfkV0+sX7mGIfOO3QQ>1F7 zAh3Mdzai|`GSQ6v8guQ9Dx*(cNTu)TQ{wwkJ7K;F@(Mm1?Od}6qoYnuG`(>=A)!xI z+1_Ihi}1Toq1GWe6(Tl+2%~6%WtL$8jlhG?JeHPE2MB{QzLqdm2Kj22-!n8O(iul_ z%~Y@~^A2S>G5hu~Knm&1=g@oQ8##jU(44`1ir#=^0+`b1dtkGR^X}PSEN=*OqucOC z7y$e#4%w3dQeW+tUuA*3AcpHyL(Tc)uU=6SX=*VLv+B3<%2bOJ<38nzZhc_O)j+El z8A%x#h2FRFXtp`iPPLb1_mbls>!63GnW3dED_#6*ht!w@6;2{d?6v`YFH|~%;%zA* z-%Hq^sKTh#%xR9O+!boRK;C3Zw;H4n-_0!_tc@Ba6paxIF*GomY*m>*;5RI_yi(JGw&&92~$dhDZL&mAl2-yFd{rk-hq zfb8vmKe*0TR|czl4yfr+)th&i5W#B7e0AuZ+l|@;gDuUl95TIrml+q1@h8ds}I`8ee#*}>TzaAP2j*@#? z+awmeUW8^lElNH^4}BpFh9(7R*D|X?l3{0~!+TK3xge_rmJD^_(}&ZW{-F#(vLq1x z<>ngmK3Q6ytSLyA20G(FsS(BE$IsWiK3^Nuul_ry%6fd$97Qt#ET_|_NuxI2JKyBc zvMC}?lg`?tLpgg$_goR%xEZ`yJOJ+lJG#o*`PilRe#EMC2&;6n1+djZzx?RkSfP#_ ziOx4aV|M;c794pWGQ5QN`*vZOun0Tu-(|N@Ulduc2hL=-W91bQwGK3 z6z05BriA9Ju*%GHnzGbJs#>phO=ad<=h;<+yvzb~@E}Xs^y9}+h<3kEM2FDSzbq&?-|0@>D}Pnd1Gi~zpZuWn1}N6lKc$JOx)FET zoYqv$e9>QaY%xnpC;}v7JSe}>-PWyh%U~5^>l=|iA8a5nrkq{)JSfI3@w!8AKlB-2 z5+E<}btxX~-6g&dD?ZgwXT#`1KM@bsOPDsCsxjz&;Pg72f|1e@Y z$Ua4YsxO>1T~J^8=4ygwtBR*}_YZu&cgb%d%x~L(+Unl37sIrN+3pMKQ@#_H8zEW| z;I%O7Nw+MJJgv=bpO&lPYfXqIkuoDs!$)48`BrTh+&_EC@Rz!-Y(f8zL$YeLe%a2; zf51z!FN1%Vx0aGQD(S`dw?W{8??l5j#;c$HXeHw5eVXh2nersNZJC=)Szv8pjV(wV zS*Gb%$rrhf!o%xS+s;zgDdjfMC!sZ@MbHgu;=TEP%>WX5B~}yP59vsxZX3|t29o0o zXP+4M8&H*=w_&B1ZxIco|L&QUNx-}It4)z`m+jTYzu?{&%IWpz*&4}5FWU|Ft7|5~ zE*qY4?=O#QmsA1&Ib>AhZq)BUQGpt5xNW3dF<@EUujmg_3?a!)Cy8giz<)1DJ5pr% z`;@ggRzf!ZY0@mXMa9HGZZT1M%YcOR70q}fzNWMjkY zD_D)CK0go!+hO{5iyWd4=uAeh??6u{;Q&5_-Y3(#EK38c1*~8djBj5u!qdo@-YXzW zre3BA1RsYd@PXE^iaXo49x#Qzrz*_G%Ar9xBVhhIG%R==s5bm8r= zD-ctDxOA)BHUQwI$Pv?UBt9@_5+c#czm%dWyB}lxTG1wL)9hxsu1*k{DekI}E6nOe zE8@?~RhAt2kiP?xAY03J3}3me_>bO~^d?r006={DWauEvt$gwK3BsF&>}bW4K%7v< ze`D{ygPQu^e1V@-0)ddwyGiKMLXR{PItUn$rYMFgq9Pz5B49%A9i)i?K@dT~f`EXb zh=585sY+D@3(`ady!rn2-kqJ@nVs3W|J*-rGD$v@LFRzuMa`{vAG3`ZDvZw41b2dQEsNJ`+f!`T)Wq`)7 zNarJhZkKd${5<61kA}9kk6UvRddS1;F6#bc{Gw1Il^$7X%h8pqjO@`|H<4x)PQPHt z8DMs7dl>b|8E0|f=pcq6h@TRy&&vdWYQE9$=Df7iW*}!55HE!c2`4hu0(6Uvl!L0d zC0BW%K-}#{&>&4iq1~5_Njd?#@|u+T{T}K{(ARc3gh5Hv3Aj*|Nd|T-;myO^hLID5=I<)qHLJ zpxb~5&DStJ_7uZM!q}5_&ORj`Q|H&)EHX3X-F$S!_{YNUH9&~8I2^2+tl_ExIyvx| z)4sis?Qr?^5|tLZ1U5<{hV=spY}?v?2rxPn1G=9E!PhKS96vX(+D`215dG3F+9Gav z(P5zDkA?tLWu1OVZ?AKcobh*PIk@Yow~6K+c^PI$F0MES+&>r1}v%LGS z1-qoNW(#O~7wo%kdXEO`3u`X|I%il;C}&?}1^8+4WcBy<+l}`nYkxq#^*Wf> z*)Z!ufX(Azyb&1?V#+v?p(LhD^mSnGJinsO)x}1pa!16gS@oIcdaH&wD!HJmEZlHAYqzWeaA;4VmI3zVg1d^i+j*+ds@4Cj{bQfD!+EA2Qw zLtG6yyN-S79h%=HQVbCu$n2nWL@^)M@X$f{u8!Lp#@azPh9XFz)9iTj@Nl+|%l>16 zQc?GR4&tb^LtTc1lS@~^kDm7|ZVdb_Be$6031;Uc77BcGe{k-|T&z@fr~KBh=(8mK zL)Z6B=|&GVNR(13Xc7kCswUhU256abl>C&#+L`Vq$%3lhqm2id9%ku>$Z>qVLt=zu z))I!c!m$=Ku>AcuG}1i|eg%^xXL&Rek&_R2Iwx)z_hrkzF_m$c$~@h4j@5l_yj3x6 ze8vUk0EIiaJoSz3peUGqMLtZt3=a@vyF1wB>k_FnD<|V?V(ccran0z5uS?Qg*Iwy_ z)te`QFWP{je9IKGqs}~Mas(8{af(T=D?0EE#l@|o2G}xUPxOLshV1X(*xE7`t~O;q zod9d)mzGi)6swS^(XNX+1c#-~ijaWCSDCUedcO2_$~wjX32rYd=^%$P=WA%Pd8&vJ z*0`<=N`tIAiUH11t%-8W|0r?;7=GSdiiByR8Ur|xmUHx>GIYQD_^|qXLx#CY47qFW z(sFqP$2$@WCl?9=3$whFy=9VMoXPILz?_QaoR9~J2fOQsJBb{3lpj$? z@Y9H5ZTtmayut7MD*3Y=Rn=n79d!wX1VRy_I1zo{TUE7W$^2pR?jX%rS-EH>L#2Ag zLuIT>q*9F9oC8Nb?)w2tM#={&btmbNaUD}wTzh8HgCn@?#NT`BDg;@@(Lp2CYEwQo zhg0}ga1t#F4gmSMBsRIXId-^e8bAZZ(F_wHo_t>K9A{K}i};pY>wb0aba*Fs z`buF{QI}SdG)jLf!zfLE$vjpv@O>4sGmrzcIB~RVT{TZiVO56n>fbOPp=zCrHIhen zs^tW@!|MAW<7$%M%#QS(k8?{BXsZ)dbccR~^vi&|qpPZmd-(VqTdPEf78~sY$)US$ zcxyZJyJgyUO&mYnx|t-Mo>M004j1u8yR(aA+-*-NFL)4qzU%_63|@ZDUoOcUWl`{{ zwh7fM=74t#s=!y3xUu8nybI%`r7ETnr57?v6LQvH-|2l|0+!*#`~vRO)T4)@YmIoW zWr^vpfa7i&?NWcGD|OI9@7N!74BinAgtOn_&d3~DWOe@Bb1L-hNVs{^pl;Bs#%Kp| z2iL`GP&oxOxlgV3yUsEF~VKRv-?2kym6555>j!AVVaMb=3(_) zQAZ%jMuoBg+*J6lIj-^Db*74JlTaky3L7BIZl1=Pt&Gv%phG3ZShMz|9u*vcnGph_ zX7*sn)y`>_#FcCUC~-LWQ~AT~c)yIsaM*`pp`t!yP6NtOYi8j%TBrjOwTR9fJYFLr z*UslUNat>1M2h*q4m12QS9x(}@a|;iT@a=imm;S36Rt1BE>?hq2o2&Z67?3jPF@o{ zq*sabFJ9x)(l|izc2b(1^(;gp0He?Dqbb@H+k{t^F&~&r0e`?KAq2Snk@g=FnZ16Y!t%?= zYv->UHx=BXq@5j)c{Ov0#UhGSaAhnGtn#K+zPF~w*s*4ld7dm68PgITrOcE&BKh{- znrWJ50a0BF1{(UNYX?Ip7gl^jF0E?GG9&;TdErpMMm& zm)r+@Qh}62fmLDI(jp_(B-?T;`^c*nFN=vHb`MlHByVS@t9Z-X<@UjZFTP$mCfWzJ zZM;&UdB!rL95`{S3~i_o3nO=uDt2knVskWE;v2Gk(Kw&j`x$e!4<4v~rQB+fd!}+- z4I{S{>58DSO0>-XY>hJ~CO@v67Z{-em9lDn`sUtnyT z$QPG2R<@^kpPTh%Q-MurT*|Gl+nuCrs5_rk^Q}7)>~m$Emz_+^t(O#zSC-?Q8tf>0 z{Es4+JuN0zN@!Vs zG};?e0y8EkYJ4-MAnBVKPX7N=NsXWqK#rBXEcHe{!;y>$HC z@uDf->NvqV21g3*PPoK6*jr-Jl-;2->D9gN1i(?wpeG=e=@VB_Snqb;$rJ4yYNYor z>03bFbG%=V?U7yPtK{PPsXk>qeD`D~5vwOiU8lJr>^9XjDl}+pEBkumzHW{GVK+z64Fl?e{|e}u^pT_SogR*ATJFu6IcbKxPdp< zc8awFS5xi7buC2JihGFFOZ>_9Mk4u3ztkz=7-i*p%hMVs##DIN6g9je7ZqeRQKg{B z*R;EZBUQYhKVlV`{f)ZvYy9)SW_7cW+z6v@r=C4CK^+QF+&c5AO(I^P zq>`>!UR1(5roP&znxIG{LnJ~Wi}!!PhteZcg2k)Y>efMup`?iNAHdrEAjGI_2e-t; zxgN1J5TcAylrh`teIVA>s^uV`-YNq7jd`k1;s z!^DT-AQLfo6f3>-wV)cXDUv?XGmmtBDEJ$IFjj?Ilu9^o5GGd89RzV!maHl@9eruK zK;>@cW`(d{N0p-8J-JDBf;|3!!berY9K_BI-DV=Ppu$Qi<|fX$Y;Z&Nn1YAV)l09q z2~=+41ISN#0S$oD9fYKPL&|x(8q`_r&Fww{Ac6D+EqAy+8k!v?GOhyGCYaZJ=b9Mc znnP%rN(sc^`1NS%G#twDn^hYYiE96>=pM-mnUF(6h@O&R^M7M%&<_-{^+;2SmocwP zMISL~5|jy!9wh>)*G}?ag1!kghy;wB}fb^{kg42@T)vBUwHlhS<2qq>7tCka;$O1njBsE=XVuYY{=ks9$Oe60gZ3{`E@)rh*KyOt*46HVVsy3DkNN4ylO8NTovvSja{0 zOzGLB{sM7X?JWe=?Xuy1Q+r0=XpE` zB$a`3Y0|ww9{=;KvGurIvT;+d!}6 z6CSa>EXm?WLL>Kf_Rz98TTvN|LTv30(c3I2T_lJO(VIIxWg8smw%sO;aphJ^UW8n$ z+KGZ9ma&)}ETm_cdj@-;XVy!|K*Dis^?+6AW5MuEBx6{Oa7Xk#4RH<4*#kbvP|H@V zBZ+Kb58VPo4pP|;rHLOQO&11k@0uS`4a7eh;XXeEF(CbPedtzFDjvTHO@(srGt{=` zkbJxdeSp0ly4`bSUXc&<{c<9Ksyl@4`twa^o&&Nr_VIrU4_dcpDP(u;tzb@FBC)%4Ep(XyPNJi4# zLge^!R)m?fUmJ3$@!o6ZP)@kpt&H>|Dp96a+{~;tj@6TRRS!O1%lhFbWjiMq6{$5! z@(8j|hrAr$z$bAmb5sZt0iN_l;!&{dk+oYPGHX0_7VdP&mkp)+S@sc{FTp$pyVzN+ zD3QKOZg;oO_P&w*W>$m_HJfdx3jU`%%939aMd~zN?t?zsf0RsnoT+?5xgl4iThpcU zsn%-R_Xed?l~2!zbHdL)+|*#~tFi2ev8o9NsX1HM<=p)`05_{ig?$xvCkt}!P=j%T zfB-#6PB0MUb_H|-7UC}7;I5Q6khATs8RBuI!9#z?gRJFg65?sm;Ay?%X}fcuiNn}$ zSp`vCr-GcMImFy?{CrmoNwN_vsz;Fy3Fn6jThw%xiy$uzREuLYelOs8q1aessq<# zVl#czfbOXL@{$8Fw<~Czd~qj2g9ztjodAFnq4zltMv}-w#CM6b}cv^>{y(44)5v z&xJv?yQ)M9jt_8~U&U0~O8nTs+qEBn1)ojM9sj9e$Ua|=HtxTyr6w6e-zQLAKaD{` zoW8YS0gISznJ7W2%P>{u(cBY|HX2=@24Bl&=zVkfSh+tjD6Z-*VyKK1nV9AYonJ`b zZ=-Q8mV@NoxtVi6h*VU8mgI0IBw>+@X6JqsjQBGmrVd1_y6>AyA@v^0@(SB|R|yASTa2kZit*`Dw;P(4Zoi^uVXKQ5`$yqGw#S$C4GA}jcJ$!@B;s^GB=DU0ZmyM*+exv-u`CTb*PTd9_1CgNxZ?eQ~l z2h%;m!eQx?lXBM*OJ2oIp|Huql8PUgP@K_v3-2cwG^D!ny zlBfqh^bGFNRWx%>PTl>k`jD#G-Ao7a)M=ZfyEsg>8t7J%pq;(iTvk~kmx)Q*eiT7H z=ERMATG?u(V-vPZ8ckZG#v4;AYB?2)k=*JT;}8E@v)iWckE$(cC<{yufVOE601i=Q zA{D(;TN(SVm&1s2K?hS7kC-RsF-S)*G!gJo1vlIwR7b1%zQX+ol3L<%7InR>{5tQk zlDpV=_A@So?Bm}t`oQh+pWa_jP4tuo7r8YTl^#}X7A|+OWR?oYxT(jM3JW;OKOqYm zaAb$KAA}x@6REo1ta@I}jx6YG9A={^Xf8Esbx^?e$yWX3Z;ct3d-n`v=Go<%+Ed1?c z%iGz#xAS`a%i;Z>Tl&B5_5aWt*bX1~)iSWZHvrNfghdRpw+?dc4`TF(_#=jdTZhE= zhj0h_!_pDM@~y*4`@^dGBbpH-M_Nbp_eaS3qb3of7OkV!`=hq{V-68xCtJtP?2ozW zk9$RopKl$%v_Br8|1LP<-L=+t5&Q4x`V+Ac6N#-8srwU|`jfd4lLf7lxArGX^`|N# zrtY;))$UK#>rX$8m~LvFZrz`5*PnSAG1J>R^LBq`NPl)LVs^51c6NVuUjO}a#QV>! z@4xQ9|Iu2QN_N=1<$0`>ZMXGz-m?IMg zc3qGUaj-0f+0QK=1NaW(MJ?OCH4fYl9V}YUVNBatJP$6L-XAHyQT45)|RFFHlyX-J;a3^qsk&o}s2oJcF$;6}BQ)q7mR z2JaUgs4L_S-tv;QdNtA0bE`*PzZ@-Fw5EM}+s2d4Sl}=Ds^77Od=LF%@a+|Ew6l)yL)@^ZjmwLkp4@?m1{ z{eq;cyu^*o%E;Zs+LhE7KQtG9oNa^U-ToE(R><$~ZZ=*tY;H~W!|!+{Vc(L2&1H9C zW*b|~p}lD9-%1jjH#>NP-a|87_mhI6}3oE0Is(R7-;Do$gdI-LO4`T6s^V)UiUnM74^(Tt%nD_CxM4sWc16MrTZU zMgJ|m>%aHdHL3ejc{Zp`|!+^YBI2 z$3mKuZ}X45Y>q%`KbUC>TAO-`@P(b=SI#lY4@-NDuW%tMBY-tO>BX!gdx$vwT z+(+B?e#vE%R|8sq|Mv4;hh(x$2;M2_A6J<&g+>;JJ=wY=kwjn%H)cVamQnlVEc%+wi8I6!!27@{mZ zGfgEDJ+my8dp)yl4A{ML>})K&avfb0z4A`^^?KcOjby)@0|EiKUr>PMTmNsDnP_NX z&0?7ea{mmVP}cpd)4#^j{~Dr4#ZxBJfk)8KH1_;2yM~efEN5Y4WM^h*rb!#80D%8@6mW!~un<3nOXgn+xFSwS6o;3UmnZ&90T(`W$U;-v zN{7Hwz|S4ixay>LnL;!-Gh->>&W=teY;3)q9IiOq+Isn&`VSDejr)aj-sjwd!o5Rp zgkHUN^@6Vt)%BL-qNqk7EBmkJ^0}{kb`3wK8wu$-Vz7g8yo5VsYR@osFNnDmQxWe(h@dcMkk} zd*Rcz_U6V{PabqeH*6<2@5DE5XSMBgw2U-&{C6mLXW!eGJ-x5q4)zWV_jXRd86F!L zn;06O9GjY%nteYqIyO4c_n%Pkj}vcqmu9x+hlk&M9PM9W(clxqU;oj;-);8y%+JsK z`VUO_^3tc3kDpgpzkFR~G2x#-{{Hdp>-Y7AZ~LE?et%ovSl##s2>(Ai_#c+9zqh@) z{(b#ld)EKpilu;0|9=Ji|3(3)-?1iwGq0qmen3F`RbSC02eoI#uB4IPQ?9(x0S6nU zYJEv)97>RB*i6+r{K(v0M>WgYLzm!P?xqdCWmKkRAz{RIL^ccIrK=_Zk;*lks>+PR zHQbq3JrQYSt$HNx4p%DpOyYdlC$yot=7^%8fnpXJfcJ9u3Ims6_O|I(4tS~0L*2bzV1zbe*W!=le$G+6}2#8+d%l=Y& z&*V!DkK*Ik64Ow=Zkx4>d|BwgHzrKe>8|>jM@2P{=b9D1KI(G(^TuS-uc)Tw^{~RF zC(HQ7wQDaj@Fp)1vNI3sY-z~^J~m0Q06rUHeUixw`{9}nUWHT8)REipZi>{lyN8rQ#U4(v;C!sL&bj&k9Sk=hA^R?km+8W-Cv z5Jv2stdq)k=qGx3^qp8!uZ%v5m1xCSsXl$@BdZuI@y^uVO7c8}>!nnZRnU{kfKQh; z4kS9Qf~+RPuD5jI-IB$f=6%FR5~S&Ct!4yXt~+LnzQ?nASVz>NdxmqIlwL}-S=ql? zx}~z#B^871FH(N~<|UW%%3w~iGWP4RsmO(;;eM@0RjBa~CoZ1W87bo9%G4_2JQT~9 zO^9kT^1O0z<(;c`J^MH&;xYR+H{?>&_iME$`ZtO7(QsuJ|NugzNW zF|U;$t)v|jp1WRQEqQ_}{BvO(dcxkAzvINjvlcB_fYs8C;nyzU8;;B*4UZ^#5m*B( ziB#_?ll|fh`Hk&JR!^x@cus<7`wYXK!^^q+?GFxx=TQPpPUIvk<(lKlhckVdaavWl z5M8%;5t|is$QN)bsUEJPm1VeluUxE^wn&;Hyhvk6ay3gLl!vk159%aoJi1`01k+sD z90yNHVS(U+5H>w5*lbRMZ-x&t=HF zE17{grNM4RZn$SzY3KHM+^ksHah?=KnL!%BP3(A-UuS;J*N!{lAUk0x?~|%R7Zm`g zO#b}t7UPr=>~dErUkU(%ut0DtViYGI6%ZDpb39iBpefi`slpmeMs|X#`8z@%*^NDc zSatf;1WLOLjc0SXWy#HBsOA1za*kJW*L~Y z)XsHbD=K28@vLs(P@E{1BrA+2V=)+u_B{$@P3rBz~B?p^Ro!&!)Cw0xeGquY87;`$q zNN{2d_N%WHslBc$*oas)biMj~)-mMjB@&zN;Qct~#vSb1#7o}*GWGr8vDoRsTE%bE zsE~}jr*+2?G(9upoKpk^6M!~pz92IYed^_Ww`a~%;I=gLS z;EVkA80?J!l}3tD>Vf(5UkiSMEsd}Pkv@N2!LTcWeA<1rNmv@75qW%(TV|#x9EHO`-g1=p(xx>L1ez;VbSUT)r!{WSL{DuAFLIX%b(x zTaV3YYKY)wp*h?;77WEcbcRmxb*pCjcl|kMFBApZ`3jZM zBY`;mqxe{$I?j2%7j(M^b`Sxl^Qco1rgPO0Z&EByr+|QA# zA4Ny3);3O0(tykG?XO`jL(_W$Xw$X1tWuj@$UI=-H}!|Ln7Z@^{k}B zGm)Z>=pTYwKi;7x?a?Bp_RCBwnJT}p{xoO+(%1jAT|27AN&WOw3YrGvdTEPB+hWiV zh^G=a)KcCEW(WWrgMbPJfHJNdJchpF!)PEh?CYX+h?ekmu=C($$P+z7Im|4eZLkP+ z^oPRM!D{{>_ignP{>l+JYiOxy_Br%>S)+{j$X)HT4;>{XiD*v^!y3COwksg*!r^@0f6EsubYyX&l#LAv{Q^ofGP=wnS)vr!o_YL^VlYq zJ!Z37gc?$StINda4EqTUE}g7fyCkURjDz4qNF57wyijCzY*KYTx*6*YpU?OT#2dSt>S<=H|GEVF_F;-ax!e$JYd|^^nxHLr~fep8SM&eGxgi{7#P>>WLNvh2Qfy?*+L33b32AdD3Mo2JyQ384a$!sj~$b%>lxnphL0+;i_i{ zOLl~<0v<=VaVA>*&FG8Mm(K{?#n{Ld#KtWj*p<=Ds%<>>5})bSX!1d;B@mNQb2w00GF!5?idTyh@B=CZvOBBbRRm-YFV zUkWxw#>i7+J-}(*22C(AVgiO#C>N{T4T04OVh#{|GfW^S-F^;=&WOG#0y2;?`OXw{)J_s^j7&IZdcMvy zEGO#3VRRF-d|I>IoRv~3Xc*OKMxMG~JbAxuB5JHR`9VTfY9-pd`ab0_y5Q#hz$)}T zqFL;nTctkmI-OG^zY`6cA{z(cwZwP}_GJEDw4Jb)ta%3I>Ma*lRGQuhM6)2oi zDfSb5lL~FnRy7)h4#@g*bVA=bTRKr702X3Pu#ng^=Br0-2vpfn08nK_*fArnW4zlIt$j|pTw>_15Dao|!ZVzSeVV~- zB1O*zulj1#SUS`+@7A)#q2u#wz@p?Mpqxh>yNI*qu!Xrzn-F=bwS^F z5;T)^T8x-0Zc&qoNPgb;1cA$4JxMGRLarq7f595>XORk5>trNhYW`4zEkMJceRn)? zq*I$E3%mPxaH4_NfiP|W(B1(qynVg*FplGQ$I>mU&Fcow>QZ83)=f4Jo{6y z?u9w{b2MF37&*h_s?U0~)xe4osQp}38Xe_BXq8KZYl~tU z&r7_m=5MRivp1@1RJdMY1Z#YbD!#ARKEhrkp8Cd&qS-Nib^dwzue*Hhcjih&XbD{x%%?2)WG#0P7H9+6)K6KkT`skb{ejI_=KHlfHecFYSu6a~+xB@m6ZL8x z`L2xX#X8c@nX_#j`S%Rxca*|Q2BM}8&+3wDn?ou*LcJU;<;_HS4x_#qe5Z7v{vIF+ zStza#xEF)cf~DWu{FP?ck%B7d(e-cLSoFq&A7E9^&G!e=2M5R#cAT|^mV720Uip0ecnPdw_CUg=dzwLjp;;9tE_y|DnN+&+ecNQ2-2R z<>BE602dX$%X2^ZNj!M$ZU{H-GwmN2JYWEGiW|5L>_&3$NVX$@<<0o-*b`s3av%PL zzTo}rzZFB`d9}WM`}QwH*>>w&jNN6H2)-*hzug`En-h%6P~idUHcjTYD<5K5iun^3 z3QpTT)Q$+d%xCrifw%pgb7+tA04&L>$29h1_|Sm4DiF-gL;tYbpN@6>uqUnxEUyPY=#SQ6HFRUh@8E!uQxBPyhhGd}p?>*l1!w19A>Y2b%kH&dba%fkU*1s4e=wn981%lS3G=a4-1tQryNZt) zW4SAtTjGy2=I47ke9X@k?9xqle@P$ZO~P-M(v8rOHQYl#sz^6JWyw~PG0bk9K@GQL z%OGA%loQuUl20f&6uMHH9j!4*XR}fJUCt&EN$q9?WYK=`*7PTOWWGo$jpn{2#8YW; zu8{AQa0bt$zw8$+5DDYhEYuE0D%X+O#Mf8AiOMu)dmOT5YYyckGrFesdgO{AndQIZCy4$fna8{5j934Uko#AhSX?q-eYX?~-C zX;Zw(8#HDTI~$@~Y8p}cDp8rNH~PxImc#>uyl`KmstfRM1>xhM`6#|1F?*fBRrGO z(z9&j#4|2PPGU_auN5lfN!qN$OPrpW=QXDc?l9^OiJUvv*RpF=dDbr<%VxR5FP%dn zl|J`tHu|;1OIb1%4{$SI#s4QP0X^y6UcfTd7f3zDK@c2Rizt-wL7=sgf_fl1}7E>`W%+Ntui4v>+DI z1Vtr-5T%ZZS%!%L^@X@k#w9j%7zynymW(YUfq(Le0xVU<0}oWB034|2!w-M~={kCc!* zs|C5bE5rTGd}J&mS}1%7CE`s%1~J`K7tnEkN_}ETdbb6fmzu|$Y(l=Qw7rF#_5@X0@W zp4f3&C!d7)Ehu;?h6>hMF@CN$-`M*0QfA_5DaoI49QH9_Zr*j~54E2ehU;`Ok6o}T zaSZJkPUdW&bP3i}W%#j-Z})X+!u-J;E>;SU@}!9qSYe_{K!-r=Zq(~3TTXHUC@&F! zei|mjRpmMaM1oH0+?7MPAlx~Lo`}jPbv7PqYeH3!0SzU|8FHQi$U+hg##|C* z0u}fVcr9i;X9NiuupW!msxuA_yD$D>={=c>7$(T!Ii+B93tP(!kC_p{ICW|>kjnFYcf~d zQsshO{IMi=;n?ttJ|_2jnw5-%D>>U&98YE>YFcu}`$#Ljr1cy4`<)O{@6ldo(=*3- zicb_tDRj{KL)OU{C|QOabc{2ez@Nls@rvL(pG+v`5IaRwULj!%4Pr{w;&bm}nUsuO zaVgH*MhGd(LB}XPjf?&Ha;D=yqWKVs*vm>irkg~J%SEggW158T0ignE9Acjb!M~;g z5I(r$$VB}wQp@qU6Z3b4#L(+>vUH(aI0O9>FjM8JMjIr!#c=siVz~p;sJk74r%0IB zan6VXK5fqw#H0gHNH@Dgj1s&0ePJ}CE0L$zJs2y5w}`{h#fTfccer9b$)Kr0wX}ee zghy(|AP9$*oziR-6Q)}JT8F#h!aTD zPWjqkRNjy2ythkW+px@TIj`Om{_a8s7@sUGhsZPOYKQO%HDNV=qhy0ZFpfTSg^qd{ zqJ;@Uv67X9h2pdf=9D5Mnb$sqvLLGqC!+Y=TiE2aqLaSHa^<)n#msAzT7W3-0~%-q zfr){Fj+$n-8BgmxYhF5 zi@~0rx+RdM$YZEMUJ>f2<3-yXiK>X$$p38m6ikhZUZ!dau(>JHK3;kIgk; z>k_l$M5BLRyy5)BuOe}7HB(&i*|Bti@?UI@yiiWj^yrf5c}`usC)fp2d+LnNml>XH zT-aBT28xBY`*JmZ2yHg{7Qy-+rDCQSaaw7rDX8(2VhHqfZo@7~dx9KKBt(1;!hTHU z9!?a;8c!@{PQHh%ah@aG`2&3EW{Z>H(X|5jBG<}$q_vlu6C1Ma{feK940hgsQU)r< zUMY~(o!YAWn{8hr+8Y4;ojVe}4!_WWi<&H4`K#oFj#?F-nO#K)t{A|7OAD(z~ zh7n;jCcx+6_6GF#6kS4Cz?us8o{Kqzj^!OON+-pzJ0IOqm$K!k2$ZU&42%9nl8%#L z$DNPV2*v$55qG6j(UhYiV_Hf*`-p=McaHJM~)dqj6XumHT~6%!ABF#i;Q}7 zVQYvG1k@4(A+P5J(QuaRF(w}IN70avkMF{A@Rq{S{J(lUHeh(SwuB>9Fen>&RQ7O^ zz*%Q)bbd6^N5+4*f*=Gh(G!2L&J6DBB@lEDTZxhwh7N^yui+B{R_Ikv^2Ar@-<{*# z$RKfDM1{S0v_D*Eo-Q&8V%xYaQI)5zh#)ee#(u z8lSCRtN%04s>DT~Z@#TMD4SYzm#|`i-pIHzh&|qU%;pF9<|$k}B92gX3?}uaWGkvs zN<1SBe`_X#=cKF()j-9Wc@Q%P<;$?VeLmj13Zq+{S>N7wh+(vo1ogd_S8H2!nK#kH zhi0>Grm!g5i#sx`*58ObqJBbZF{ma83MVu0}F+yDlLfo;m zxP9OFH4#LrIosulfiVm*`U^ZGvGBo@<9k-zY`F<0ceL40*Rx*|LFXbkMAE@Z2@p-& z4`Erigjt%TQ3iu%*7IV=L&|<7iaj6bc9*iP1BvZ7ir%8>0n!q;0FF@6ztcskBlO-X z4$L4u%<7eR!~s<7CFlh^$-Ej6ehWEVYt~GLi7|)FNJkHgJ~kt8ZX1u>CMt5rCb$z+ zv+7`5?uqUcXsV<}NrJNdXfIo8@;7`U3;&h7^{7|pg&2_rsiPSO-h{HGiCv;7aTFtT z!iH*ZHhVPND3+btR4Ek4UNN3LehE>N9Ox;9B6l648zt1{WT@b~s&&n=6%*0e_{)QF?k^L~qHlXu z7^A zBf-V74)%EaQIgm6ZD%$|Mpk30iz9kGnhX`isvj7|ius%@$Q8380h&i>>FT4(>n>N> zXE1f6gYHM|>uA>diB!bsL+Mu(e=|KaO^n5769tb?oHDAIf;EEfew{g}ZKXfhH&YN* z7ws8q2%dcxJ9|GA5;o={h6eN!PFdCsK|-Mz#-tb)V3tlQ&z*T0A=e!74sKc3))%<`M+F?zfrssUJTg0ALF6uQ%UkROkkTSbno5)c+F?lBL zhS=F$ewrR52GR)f$_IG-XFfQLt-S>8OM~*C+V}REdG&zIGB^zD=yt=?dMtN*1qu}c zd99zJG}2*m&TqbgnZc(%Y(vd&Iehh$+B7-4V=~X@=$4m1yO*u@{MYbr6IcQ4bj$K& z;r@H!5`g)ZexPrc?NVN&3B6|y{iM$dZ5)GppR&P%kaY)h-yPyu8)k3zv}LfnIse(8 z>ht^`7nFD0Q_P_)sbUXbPWgQWS6Q>FGfrNO^Brg4wGO9MmnS#E73~{=o^AGC2Y6=o zdoc<;IF_DgLUl-8_`2`GQF5wkdG=F1SU_ujni#~O*-(z!a#3x304SDjJ2CzJ4BYYUS(&@bRvqs4z`$VO z%vSM|yO#IFQ$X0n>^Ye#d=SHqrYUELJ;=2c#R50EU_KA*^2JYo)O&R_&FLNjgr~rr zyl2Y!X>b`Y5DjGZi2hq?Ny7CUCPPegaPIhlzL`BEMqU47+H49S^J#V>)f1+ z&7C)1pwIJ#XZXAWfo;=oo+xx`vwsZPolxV4sv5zwlz5-OgK6oRyrzoO%Wq zE5}nGkMSsr$>H8$=b6Xc1_E!gvLVoYQ@wmfT;KF^%+!f8ZbkX4tQV%U=2`Ygf-b&UN&+l-t{h%fSz#ga+DBsMQL%@P$GMA*XBwe)LCSx>tLFy^f_B zCpa|UmNMLsq`E5QUXk&MYfINNku4crHf?o9ayhe0Y8`E>?ilO8ipg2%DCnzM-?$60 zCLS=~Q{}2PxLN0)*}Cp4d7jRBMca_~_gXuUwk{cKcgqe``9*I1I-f&}M?c`k$wHptKY0pv1<_gmP!RACzx=GQJAEs8Q9#}vdqlVp_F@zsQDmz^ z&WQ+yvREJfrZi{F<+)8>h!N(xhC)Nk;M+#?Fjb+W-aPje__3Xc-HW-=DV6S%4~DnW z0_5AV&zx{x@@Th32^;QdSFpj0QyaHj6nYe!TFC?FJ9NTO{`uSfg`B`O9_B@@z=)1; zjRQT-x^PMNo`CfOk3kuz?fIAp=*2@wmOV6NXwSy;hT8;RKLj3im-+^t{}Q^Jx;rJ! zXs}v$@s##2p7L%)+#FOtCeI6_x??CaGc-SV4tgsb!DK`InIyx{SIgw`#hg7|Vr2#Q z+AX`fw1?O#A8b9MH2K15+A5m^y8y+Mr4bPC0U2UJ*kegJtM-Wd z01Gu2_2iPSXQZvpdRNDLz-hr z`(ZJc)AtF2&Z_Bo1H^gvk^M~6y%E&OPL7Wzon*YB73ZGP@vB?69ERs}ED(;c(w=tM zMdS`Ij_12gTAD|_E+R8&`yZySPVc*%!Psw}aGq0Blktn~vcg3*n%;A>HX_ZZWlomXk`cgQ6S_y{4jUWdO3F3@9F0YJUC4OXaL>@pj|2O(95t8 z{ziC`SF~~SRQ@`~-oF>0thNI3_W=2m{A1ca%soN&3HCo2!VT)6f~c)Y7ZNX;`&DNy z%=C{v*%ewDt!NZfHRoHb?ml1SkTkIn-m`0~s@5{&UAU8b7}Ib@S{&KK(Dsszfk#M4 zcl^M}r3T^C(p59KLqV2pXV>CJvaOPfaPF5g=W&b~%VxEr+JH{OpEAMQx6~AA8(>no z3r)z4me|i5&L&=uPI(?VU%5s{V2OWf0G=M0aBU8gRbJ4KyL=60@!b4pYkTBG)PiUU z6qOh~b!|RxU&S-!h@NQOHJuRs`qqw4)#Bz|ktJbHsjlR&@tyady7?PtW(Dn4-1|7A zgY#SX(zPMk%PGZl{u}4<>p7R}`v5?|eUvGtykXX(a9N8rH2Hnq56(ZAKTPCoHdMUL zD;^Ph$qy zqf)nQ&a^urjK6#@!o!gQC#hX{?S6b=89V64;@2@9qwGThO^ivNIK>g0+DEzG?lyrG zC*%Y=B+Nfv2`TiC9Y&-t(+71^W0@s(y1!TW)fMgv-0THhd5C-VhfA3AhrlKcvC>`` zT&I}-g+Sb*)dnhCkUCsqNP+R#iqXQ%28+VXoSDMaLBYp2oNW-|+)vL_OBvpeP6Vsa z2)|qvPWz>os^>=xJ+cIab`~-=UQ&}lvnYhEoS8r0&G!qkTFx80`7PdVeeV)J2z8v- zc-8qf)l~7u^bw*^{MQ5r^jKi<>AT;5=3}_U(ZT2I^76c}2e3qSJ zd0V?_xl(UUkoAa$vH$V=gu1g|U;_(CJ+F=Q`kw>U0h-=3ZpZP_!+N3hm))s~$`G&* z%l`TH6NfV$(obdw_ZKSCMlLafdiR%aI+L{3J;SLhEWTL1j7vjI?pa>Sx4lo+G__oy zJr$Ih1_T!nTv8WoCRu-%^4;dyYyI>NW??<)0fm;c4eK^OvvGglbiBa)V5CQh`mVvR z99t>tw-~OGLvl!VNo~p5aP$5tx70*gN>8(Qy49GS1Ae5SO_ho-K|JNQpiEwrBX;dS z$CC%(&u2{(qVWYOe&Rd67mpwpUX{vxjNy6VtQ%j_ii-|p`O(A{F6)p0>~8UTS@xwP zpm5?g{%o-MHe`7KNnyQhVv(WGC!A??)x!Ws5w5kAumYg9ulU41Gv4Cp;(o~jnxnIE z`pl7jDK%%bK<{0_so*tMC3{dQR=SlWx6mi0G4UL0jWQ&uZBaiYo8+5b&^L5@(I9t` zjOq*$tHi!W%{J5`)`#ZXX92vT^w%owd^F}%b51}3B((V{UG-dzSP`Tn)m8j;RyB9l^!~$T?)`pLMcWh|SD1y_Ji(F7qnHgMiro!%4kPP_xPC=v~L`x2UInJhhz( zo+jB{=*&fFz9JT}w#^E45;Lzc@gDy9^k=1{r&4TJ5 zTPcL1YyJ9joM&8MS-un%+dK!-yy3jpHiCqjry62`%G) zenp^~RD5&+v8UZ)?fPtix5jHg*ZD4oMNNc6vAu~dKpvG%Q!@SoLAZ=otWRP=ysRF> z(^jK#^Kco=gb7la?MLC~S%`~uLwZ*H;Xzsr)WhZj247>kO29^Bj;kUG-N&O-fI8A+ z1|o6#_^Cs|dRgKE^%X>YFAj3;^=FvBBKHyvGlT4r1r)vis2OtsJ!RSYhn`L8r;YUDS0;NSz2)RR&Gt@3W`( zJw_z>RYDOgeX-!JZD;Q+KG(>&8nTilQn60F8!WTgv@pJx)gNDL(SP^YZd43GraI>!o5I$5c(fUS*e~ zH^Sx_R>fc7cwIbf2hX!G_K>Rnnz`|A6z+)X6T0(isZbT%`S_M( z(U+Jad&u1pR-RF%erW)$Tdgt#SAPCD8ib;akG+TMNm!gM+kkFI8Hur~M*xX@E(L1{ z*@#!d0(1Zi_}Fyqv#5*>8Y;l39%)TCXBAd~Ta?-|2&lwMvYL@5*G6So;${ zpYgkfpm}bH+N8!kuznZ~kzej>spyLS&BbJ9xoUJ_^0a%a(L8~>WtwyKF4W1HHHRHW z2G9O;MF_zh@fjcKOe^s!f6zDP=UI^ObK}Xq`qfd3ja{Iv)R1G}w&0l;N1g}jAlI#( z_46Kl2;u`7tIxWK0rq^8{Ck|4M7_!W`qKx8b~Lx%SlG2gcDGtBu;5Ihoz4;8em&J{ z$&r zAg}btSjEld58%Oai8S}IQLs`}O8fiTA0(%daOnH~UGFukzbg9Oya{WD0EVj$q&o3+ z8qH*j71w^h-MwAsz5UHCl@9`>N`GJcx()*3H|!6;d-e0{a|bgo$SdF#w|>tW-T2h^ z{qqMeo*w~#w)2niJmuR06&VQwpAI1kq^aj8b7|icR{c#ri;3RqC>TBDVmNEJr*2dG^0WfeSPZI0P~Jm>3Z4?u;bXM>#Qd z2NXQ@0YE!?6&b2);G$GoaP;L7&Y>gP6D3g_nIWZZkW@g7&OUz79)eq<;a>VE_K&NC z4!9+~GKR+(hfK|p$qV~*TQsBIZ<&D~g2)na&_w9?!2)+;p*!vw+>%cx`$bufys%r5 z)CTfwg?aQzM(vwZB`-Bga1iPw19o$WD!NS-a-Z|y_mBBFu1wfXa?N`;yR4B|^7(F6 zgu~{NwLkn{7jgp|hTmsRqx}LCGpa54KGcAkPf?!p%~QP!8fvFsX(q5p@nVffV2=Tg zx(Uy(;Dma~38c+{J0B#Ot2j5wWw_%=$U%B8htU6k;eFddUK}p~NeVcws6GPmZP=rN zjp;e+*=zzq)x*ADHDXtZTAJ)_RE_nm}fx{<0*v`&yniVMQjx0v%3V^r?E^&n)KVDU8&CX zG2@Z$@eoa-s144BaMb7QkaLXQ8U`Rq{8>hT2AKB`wUmgl8M89BAEF&;71V#( z>zUTQV+k_jem$V(#BzUQXg`AV;Mt1bt^hY`!K1L~d#%n!7Z-p?m$46i{klAH-qwph z@HwmI#fA;s)$`(2bM(~>jMejW{ds!14_C)}XZf0){D4bkpXh6aeU_xTE>8F!kB16r zJG+(p^r#YMIz-=CqBsA*(wIRe(e&FOR#b-GZ!U!Fsz;Id3SFrrh90SJWwj(wk;_(R zL`ROmOVp>W2ORkv?vqWv9j@jAy$f?N_w2qTE0|?AJ#YU=3{(-@eeTs`H&6L*7{i=I z<)}BEjTga>5lSz=QVAJ@!HW|+9=)knM^xA(MUt#iWrW>->4Y{H`W*{O=eHjh<5p_+ae`(B$xS!z9YwqKz zAA9(~x@ga06$6O~vF~$y^v+EvAurFl0oWrh{;FAI)At!0b7b2ZW@v7ep zd*?jM9v%DioYiI@TeKS6wkF~*Yi3Lmr#=2v0d94q&#YR5GZU{Gl`LLxbzd5}lLZ=~ zJRrZ=sGKV<0wfy!bmb#n5tK(7r0odt5MdFd+dFXZAct`lGEuclPA>Q#ne>e6NA%W5 z%++l^oG;=)oE|aESGZ##1r|7Y z(wk9=&v|i*TA5Xu|Ck#T058-5uY2aDFvYW*WEA`J^o--}7WJ_w6>V@}k}+z1c^~GN z&~q`yQ;vcFT>9r2J9e#kw&HrUj*>rqZ3@GPXK94fj07fPN?eWDsagjl57Wa`mlK@o zMx#3{zTo=|A6cw=XE_C<^(P9Bk1M?mZ%`fP$B`WsyJ*QVLVLi8VS z$!3en%wh(CYjpFaBZW(030LJ?wQOy~d1>kxu_~?SViv`yrHydG+tn4iP7RXBMJ?ug z7bvKl%JSz^wp`>3wmvI|vN^{Z*DABr_+m|I)z?^SOW9*%oMzJ{(Jo=Hil}>?;c7_w zWWKvqt?Kmhg4GXp>*qIT?48`@`4$rL%Ekr=seyPf{blf*7naLng~7CWf2#5B)>Rb? z2?^kL^ZlhS+&}NO`omx+?y`@ zBQe@$?Xg)xKGvG)?{5EsjYQ^9w5=JpmKmG-=)O6A8z8pUdu&G-8hkY)H3=oY)$3!k znzdyA{5PeVzjtlOc-`kWT=jl0yKHNN zCt3%cw+SoW=4kC|?sOHOEi-3cZ^tvmlQ(UQA7<#bo?Vp%_?j+n`kTSn{MGkiy3Gq3 zwoS+Bw}diefeWQa_=@qtLU~jk|HcyM+_&*!d))4C-OXQNb|7UWef!`wyc=D4l#O+> zl}CLrZ}zAiuWOP8^hPt_K{e*>!nF>!X+`wE5%0R5tP6+7h!`KNkXo7TT|+9No7a?GE8NE=C(Acw5| zx7p|?+W6VEB%6M*fqt`&?b9K4WSh99Q+!E6V$^shC#FZ8@eSn#LPDKcyU`SNg##^3_6a%%BB+^ees}fJSifEIKZt z3hM<@$BB+V+#SCA%9gzjjh*{HOV_ppi7N+12KHi}=SS=iEnh7Wz|iD>%PJ>8rp^>& z?ddz!OrK`H9=<10w*R^5i>(F;05jRGulHK{(!kbxtV~;KEC7w*>sCr%bmr(Ioc8i< zRP|%sxFL3qCrB-VzEAr*zOn}&!D(J~VB%^|<3CQfHY1&~I`WTpqlN$wlnu7GH&y{4 z_L)`d*%%xRAh5Bc^2$7Y>#Yb40=TQs+Llj)Wr;vt5*y?<8)S|tvd;$BLk)Gcy3RHA zlm95{OmvbD1hWEmHoC7n##C9pojBS)5w~FAEN_0S;0Ypt0D|yYsQLZ5F`2hE*Ew!0 zSbrw%(R#S(?|x;RJrpU1$;15YpOYXKIQC`5m?s~d7{3nyI9Q>j{zWp@4W~b zWQ2{u0D#}`9qUu5=OCm-x^l6KSHS~n$`G+tt$$Sf^e4|^ame%_nDS<{)jr4UGe9v@ zR*}v;B)SsoW*(oMPor;h)gaPf_FR200Q~!pb+8DO!ZDq{F|s?`N@>rr+uwQ}Txf1suiU%x zkUrF~*NP&-@;2iDd+ytE6tK35f@_eOL^}Oj1>B)L*1N(eus(RFRiwb8DdaXvbMVB- z@qxMQA8{2Iy%SPha2rnrD4jjJE_j|szIxF!)hR2U8w?I|%2)_E?p34UzUk;o7a+m$ zMADYl9m&t0xw_vKk%4GZ34_JPT~F3fwI4rm7Ym@9teeMa(H_RfRj;@ve%pL9akAe1 zgmZUTBNTUa`$s*|So=;@)sddectAha+wJ2?k1DUBqE3>a)M2#%GgPFnNagkz|4xv~bD8`Gb7MpA z{_CnSdT*5<#GXN&N$g?X!+Q3FvY_K@&cM$Y(9hgf0sYRJ*V+t~&NNf>8y8A5^C&?V zGSJihwZ_k^hB}R}rjX!fI4ctHPEhgbc83<3>Hi|HFh|9Gg`i&==JHd&lvad|MY&{# zd6wDVCOy!1`yq|d37?tMNALFW860(cO_-&yWI+F>&3_v%)kV*ca!{_XLxnmiV61D& zdL)j5rs@5$>1VCT7V&&qmv&sq)&a~L5nogYLDBNCuEpyeTVN=QHo*09W)Ubyh1R5O;X;Jl0 zK!yr)NvAEan6bh^MFhV_6pJlwrURwkx5=sM*VM&Z?FnIK5(Oj(1QPIS9to?mU z+mDzM3-HogjQOKhgE4A+O-^MdW&8F@*PARtrM<+S7vZOJ>YKic1b`Cg5@Z=AL{#94 zvX@DPw%Cfc)ov&kRfjhi;6YgxzWWyQxK8)rkVf#P#2*ZP80`vyZA?T#>au8FAL&qL3DmrzRti9 z6jv|6uD0eRBv)6UNGXM;%xP5*sw7Rz%2vzEGbx}T4|QNhC%G3&j{;;&r|c!z+&&(` z!eP*ED7CZ( z`l!h$m}U?dX>P`#JQL=cx9#vU=M|u{e#4Q3*)kP{Tw?L^5S2En%tysQUV{~k72sZ)t!&zPsKE|0U9Yw~O^XqtP`l)wTu^X02{Qb7Na0PZR}J*Zj$ zTg|GL*EUBWd!C}TM{2aj=iyhw&!BeuCt40a41-G&nYN0RRHw6=xv z_x3Eo60-W$V~~aBH-)z5*<{oLr^cjxKUh*~{ zJ$9_#$w1NC=ftI~x2*H_l}0p){pDT9uAw1_)dJSZH_JnSXMsf3B03iVu$zy-*q8A5RDH28a$5QS+V3o>kTv!7>r^0sTtW z?{@00&79J)9n2i#N31(*CzbfTW5eks?2d8LmGTHq*wZBbnf9+aruEJApAo?A@dXQS4FxJHPhcMKIQN4o&J;I!JbhGug4Ek>R&)FsI zf`k}7^)amcbZ+SYkf-;PWqY!Oe$42SzHWn9E`!lArLIc4L2qp>(;>v7mVucz{*&F5 z8kLyEh=z4(NDB&>&==aj?}50BAZBwKVo5^jUfmOW;`JT?z zV6PM~%=yVt^*4z-j}*_jx$qw1Y3G{x+Ful*&>wf!2hez-ASD>D3}?tGI8JZIlG&QD z3ZCps)$12k4;$e354&h7$|7ukylB-e5>2F2xpoeefFbu+pDS`ANajWYTDw)^p?3L9QwroV$30ym4o5v{M^4-~0qm=dh^VV!V3F9{Is6b%&F`8q(X+?p*wVyB4n)(dS#3aM+;96_%#QoWS|F+p0;bxhp zOR{Q^(}ve5IAafy87?@HCW`RtU;jQN51YR!UQA#q)-ue zVjskb!F=-d9(vc-6l5Wle9jp5kdA$2dND|-uqE$nu-+z*SOQm~aD{1jdirlsijo>w z(We?b;0GPqfVhJ}+$*s*lWG>spYIYXPr3bo3)Sc1Q1w1;;~cqlsq<+OeNCwFkPd8! z{;i&)lXOKzd1*b6h}<%&eArcWYf^2JK&jv=YIUGh?krq|oH=gx>nqvlEMpOK_oAsM<$gsYy@_(l>!6BXC^XdI4FI!lW=A;18E>Swq* ztaoLTOJxL@&a~yKw@YS+I9T5Bn(Rvo*{eNH(-K)w#h+s*_rH$?7wdj)HgA$@9*P3n zDQYH>pm7|VqFrrO&1X(Gb4QwNgwtpbQl2%MD9F=3=$-#*x9-;+x?586q0zb*SZb?M zS4CMFZcj7x?RN@@Qu#(x^%3JtKt`rwoKLmUcd3y-T~(Hnrp6vO&bL~f+5$B{f^{Q7 z!+iD5TIx4i>6irAmo1zr<2}q^j}u zv)Ltq`BYFXq1^CCi^ZSM7JDxeis+kf(y192vU3^#4)m!bwbbzLK&PE7b#KIAnG zcN3tcW^1p4w62Y79=vK38lyfh($WTLw|>+>PGk5So!e1ZEe@uQqtnm9s&X(Aj*ftg z%~I2vtky=7u>?AC@f}8$qX1g#aD0!$sV^3N)mZ8m_-_)HCq6S$Xwy<%bqK9Z0QtCy z;LB3PHNJ`xPbVbLd7*FW?pl9ltHaALvUSsIT`p`O9-^y!B027sD)&g0KM9n2r1A{` zsG9&e&&J&kbirEF@#%EUrzk z(~3Z{P0){EZAoyD`PZO7GF+f##zuz>|EachPBy{rcrS4q*5hk%;Bjvon@wBVHzl@d zn|uw7%m~?uU-guOThGN^E6|Z}KtBp_1Fwnu@y?TEO(@BCFN`Q5Z%%!zpMHFc5k-gE z7ILXQEVU4etj5-Ww8}Y{7Q9y1 zjI9IHnxDREetsOTO(sfgu+XtEg$|ZBT+1*@!4Tw8Ly9^H)M{la)C+V^3Zm6&qCaj( zJRwr0gpjD;qMe?^OsK}LwN*SpQ++hbyzrQ{#r9{>w1(OAn%F06>REAzp|xp4@11uU zKz#Do6lsf7lVpoys{K)6!#H9aOmtI!$^}3PUCUO0-~(V{(U=)ueV5j(iphf|=tKQg zt%OxniXvTUmSkr4DdD)f!ISEQ^rETh9L+oi#_nPH4T1e~1 zowls0kZ6LH07H+)+HPK=0zdOWkXI+Ri#hMXjk}+x?9jw|A?XR1f|b@>)PAO~(?JUv zpN2fDeKAf3d&VX`jM5P;s`Q1ZoZW;IbF%0R`F4xVL%L4yv`HJt8GJ3W+E8Rn>(qI*oia=M z{=xQySrj&7FE<~fXg00E-t6<3uJj@o(m~69b(LTx^-_vb>SsY*NN8JO*o+`IG7Bv< z)saXc9q;zO`RM9GG85@z96G6mFdk@iO*8%kV2#+3YQMKkEE!QA?S31wM^<2ZmVNfjl8u4 zGpJPBx0w_=sycg2Cys#OR;$~eTho)2MRESih(SPd1W{LLaE2qDEKuU`G*o<=r>5AZKgK zi|9*yF62?XO9u%n=~ypeLHsn2yS7pI?o(M`u!lo&?kfflqLi$zvIfXl!|ERA)QVr< zDlT{&dxv}N%q`aaQE~dmfWcj7$+u#aa>)?cZ5YQ=Sg)k9KVtu8D^mTfQpgZl z$6k7&4ZuE%f>ec>%?c{l>0#~J!B?L>@1Q9;x}ySSTtzWxd7pE74Wxeu)?V;9cHbv< z)QO|jga$gso>eEJ?^{>X;9t`q=0NzTDMe9`l1%i?%i6~25pCC^C>-?mwwM9``L$Vyp>EwHLL_Kd9hK>Gukeq z^0D|f*XY)-Cm~5r>8)BsThbNYNuuNP=g;UxWCzHr`x-7RrJn9?(+8Bh_gcfcjNB?| z4dk=e_A8k%uh`f@tbu}~%lWVEAZ`0)*IpJBA2>i^PeV)`Dadfj&u8b>%|OFXG;4&k zOhGRc^LNAaTEa(>>UWXBS{{bg7^_g9?oOS8@J)}%<*ttVwK{cXfX=tSHc^Ss`VOEz<}Fi@|$RJM`w>{suX z*ZF<6*Vk#)>2_)Btu#k6Z*7u)vQBh-_2zB%=Jp%Q`Rycv--Uu(VhuUHvRsUokggn} zD^YrM)XDiqC(UB_%^#XvWBCVD zbUnhXUr^$<8&duvvWKHm`cuD+40)w)hWp-SNlK$ZB+7vp|c@Sdq(LwkR6o|7w{ zM*Na{Yav)&ZdLmEnD~2C!va_f^8QIvopAJ1IZ9GlO%8C$inn2asLp|DNB~AY%pDm8U z;$-^0@0@0a^?g(h$&#znv~H#`HM$fug*JZkq0>cebd*Wkrpy|>E)cW~)jf2K>=uJtogH@&Hns*r)Gd-SI;(1)lk!iwWLAW7V-$VG)F8nSW- zwk7Q4mgse{U=`T$?OYIKYx$}iqCv~A-WWac7(=Faik1X|JaM{Ce#Ke}Kg_(!7SfVX zib__uJ_OufLkl>~*9V|;igRaQP4hlYG2mfWzx=TH=j*CfrV(y3_JsS4_GkGn-MXK# zSw|&4lsDfw0~}0^2RvE|Ln|dBory6bkRA z!$@JA44E~dN}w;rYM`aQ|Y*AMx~`8`YB!W)9cxT?gnz zUg!L)0$xC^mgJ(e9siFCI9K0VLGhR|8UV5u*&ZCgqruC8!RBC)K)l;NCLm(#uZ_K2 z5(Use2$edwAMy(LaWX*Q&3nD}kspMHhGZ++Tz+&ZxH0TT5kB!eO4lW6@#iK;ZEUCiUIBN(?NFGzKmbUrZVHrEVj4mKy3*dO=755(daV{^$^F|>i!p%o zrW}8s?i8+&L{n5>?cQu=u>9oehsRCf*ALxeo-yCnw)5}juOGFOWQC%**Y%MfHw$0( z7i+IyH5OEHe)Cq=E8+@RUEeW(6f-{Ng)@1iNhQ=+-^FVJd0zWIL*w^9;B2cbRb|sx zjQ!1Y!tm&nwokWi{P!rwJC zYJ7jyk(E=dX&6(K{Z;v26>wSpRI$7QUVB($dmC2$z5*GB=lsA|LQY4f!qh{4^W0G& zD2az3jto>&4jp3g)U^M+so1aqwS8v8l8$CBjv1;Nb0qFuVchA2t9K03G`6H|=K=&e zA;YrL7KlMGcJlC{$UwjcN=v02dmA}J3(_`UdMG;{4mX6TYYlwgii@$D2|jar*WH~L z&mG!dZ(kU@T!B|ur61o$SmKiOJrz6EY2mOlb9ixSwY`u>k72vrIQJ!$G9NTnM_-g|JA z4>IM56d0bTd1rSSb|}$8`L&j)wz&dV9@e2~F_O}UZoKqjgQu67X}+f)aiGk4RFr^I z8{RJ%Q2%Dk!^l@3g6^)qwbCywyqm`xRO~jZ9>W;*p zzLwA*GhZie#UI(5XqS0=@6@5cI=HeBRDgYf_ut5E_51YSDZ{e~vA*yyUbUDA`O4v_ z7diEpx^;}Es@sRLm|l_ZvVk6p=zeV8i#<(fWaQZmZpy?r#%{{+LQMZ4{2lQa5}sxe z&>OLP4I=CF-MzC71s@Gm1}G(7;lESgHEfhO{MfGos9L~UCOK>*V+fIQp?2U?I=-A^ zyMX#aCmhUr7(kAW703?t#qt;Fsqq1rz^&v8snF}5ql-{hu((sKeh|~2qAn?oYM>It1P^N7MNEi@u9R=@M*i>wos5e{ zvs0#MMS-r#%vu$;V2XB#T;M>N(QtXh(8XEiGv++V@4ejgLdA$}lhNpy<^i&NAAmJl zjoF@n__@jsV;}lS;{pVZF!92qgr+9l6h_}Ov?@Ub3)ei_@3u)@G|e^(!zehnb{F?pz7bMhedf~kW@ zZr)LJq9(cOyMAyc+aUz)Ny%R;>WNGSolz%P`F6#7K#Ot^9*v0=46+y}FC0dfFB-$tw&&sQcaoz|Uv>F{DlaXR{q zHiuZCo-|P!{bp^A!qaV*L(QC*7c6wNk=5#}zB}f|i3+|dmigxYj)s)_?Q}+1js3>nJj?(3& z;5?O1!Oh(bX=Kq5;Bdd0GaXN_Sd*Nu?qio9JDUzsXdT!%Fq+?WJDnUoD%!T%DqOj; zg4|n`NS_rw<*@ayAObxJK4+e;sMcxo8v4`Bp7Lr6?}Yx&WvB_Hyn8DXp}*RXHJ@g) zYIoKJYW>(uKhdsPrtS6NM4dQ zi#PM_y}pw~n|8xV`}u#$AdPqZdEbtOJB}B%S1H9Gl}(()PR>q9Ij2Z&E7m))^vrthFK+T8HbYRd}^hti-d{8^HIE^VgnTD4DhH zb9hi5y7v5Zl7ii}?tA48ZiXSvg$!Zx$PRB$%LZ}&6TGY4qfu`1r9jlSIYXVobc%gH zDbH-j?m;IPghR@+X_fghEnfOn^{YP0maJ}_mtARO>cX+1j74HvTf_wz8N{q|D8S9qo~r(%U4Q%(ifGtV}kULhx1=78h;0EjFt{B0R49lyU>^}`S%(pp4X9)jj#8X-gll|8*n6ME{`~9pVx4KQCg_t zn%akqslQqMW5o2TcEXoj?5)6(7-XHctlMJ9ZdOO+-TIpFNv+1Vbb9FZHOTj(m~F0? zQawFiTkBL$+_U4K7sh0Z@==@{pY{}FPeG63;(G zXf|*TiV&c9jxLf*uC&X?zy)z!L0pJl1J`tjYd*)dK;~E)=dAV0v5C*IE68zZ$Z;CV zSvQxn9?7E{^IW}n?(sa&0-kq#sF832H z=0zFjy<3Y2Xu!Ob`n}G;%YN<2fOB?trhCl;Z~`V;HFXnaNm+-P4@un)I}S?BcL<{e z#dsCuuHfFVLH6~SnlHOUEH@N2D4TCyuS;A19zZ;YVZ%h}=xODm3iJ>KQwE74#1&ky z=k@WCAuEVNYC+&88#rDdeVoxe6m+#vlK&lpC!brA0gH z6xBb8e#HzfUPN?Iouo`;fMsBxTI5XV^Vc;fo8`0XE9hkRC+B)-*)muHnCb1T9edCQ3USfQwZCc$p4KYA6dZdbRem;F6du1B+|2w_fzH3U zA$>97o_&&HDmoQNcm_p8NsEz7i1*vewM6OptO^Kd?7M)x!W(B90qN|ms_>s>9%HZlK(z5b1D(Vsl876f(2SYT>o;6TB+-99| zp~C0+1bQWeDXfEiT+GxIg*$%d_v$;G?+>W|x+nM~DC^y+z-r}fWamC$&!d42-WSt! z0GIsD4)@(RBotuavF8=AsFgQ91QK%11Sqn(&c<`^>)Jhfx@eKESoD z(`^7q8=#^}A1No9_%1L#6VJRP9 zJNzO3uJOg)Ahby^s=yJo3_+)T@R}NJ*kN+neJ-pv;qt`QkUZ_ugW*^tx=u<9p`0|`xoMNM}uHx1uyy7!^!f#Q`hlPeE> zt~^e-GEsEp>E$cW?_PQN;mVZa)oGKfuYIn*O}P55=<55+S3lmpy71xZKZ@6uOs;+L zx%M^T+V`SsKQ3SUb@$qz57+)G3PGkqh_4WqC`8ChfsI0?VWILzp^B2M8Ee|C=G%-< zY}P1l)@*Fn9&Xn8*sQD6LN;yD_iZsqY%wZsS>4!TI^1IZvBg5E)zY+et#7MMVyj(o zt3zX}({Ss$kFD#K+UTZjuD)&ViEW<6ZQhM-zQb+)AKNx4wFjEEZ}gR~7`JaOZr{?_ z9zNV2`LTW5b**7RBdf8sUJ7aU>2NGoG)n2%`K$Gk5NtNuv1_=sK?vUK)R}&!HFFre zCI!5P-Ld~y%h?>oH6pON5Nt?Qgju2+q$sX-YXiG$?*VX`sLLezI||&i+I(?D>sXPG}}aEK?GZ=Y=!j zJ;V|Du!6igBQY5fCVuSP=ZmFggAZQ6ZapF#UhGU$k|q*pO4^;&RC;)aOI}k3dQ0) zryHu((1WI(yXOZCfdNw?YC$T2MRjVLDP}bZ2~9m~gg2GP9PVRon282>Rl*&9*G;96 zT`w^AgoCm(L;MoqT;q*=C&i^JQl|l@qw+av3 zZKx9Zu!d#B;KLswJ{W~E0n|m>`TlxechhiL#C4g{fDv$SG~%wpJmg^S2>K)Hsp-8n zqI)m=x(_!hxMtsjengeAM#fP04r)V%&iC<3$_-S-C1=H9%L z`u~%jMbMDzSrnqWoiXvh>siLecD8oTYxMtTJuAd{7t?+{!}UJ^t)Rf5e+RUJ8#7A9_~ac|qB^y8i@d)m=K(*;(Fj z`9B+49Zl^$*SdO}|J~5K+Hkh2;Xe$m!TNt0T4PP8{$0=dAT0Y|&{?DX-T#4RE#A5O zFEs0hxTpK}|Ac0pyCwb~-K?gu@iYD3Pl~@c-CL=>^S$%YkCw-aH{QOx`R@I#xeuMv zng6w+_2~cJ(7ONQ?Te-9NB@bErCd0zWslG$@Q$)|69-c|DtCd|8I=lS5#Bm+XniTN@9Smqg=#8nSiJ{1`oUOc;4)n}BpH1NUl#1duDauhe)e@><3=KG4| z-m3#v%wJh`+NZp+Pjq_e&xgxnA6`9jA#X`sZ&19cYIJGX!Rezg!>)72WpkePi&1~s zZkJ(2iR&Dnz-an0q~J##rDA*G%Q#+&_i31&Axpk7mNl?8trHhzcNzjCj8?>54n zF5hZ5iwBh|Lq@*E+giFuy|rPAKGcc?MVkACmR{_hbknWMpztj03vZ2+{zUfPvDPvFos{n{d~(_0 z)LC3>_w%4bQ&fY)u5D5J)`qdklQDb#bQ|$q6|X;!&HPL?FyCQ%aC18^zTw*|AK+oq z)WXu}F>3!J%WB4dw+~Vmn;hFR$hOqFrFcrw(#PU#P0WdYZ~xi;vXgx!w}UH=`2PH` z^U!DNd#uuBsZYByANC6{Yo|><2XZd1X^A|GHwlT7(IcW}4CJOo+ zbIay%$&KW0zhJDV55|vsJzBTByzO9#y&Km&V&QSsizLRw#05d@rWeV@rE+xVTQBWM zW>k;r!BK2l1BiuApqr_c!jng8|D)r;`EKy7wX zh>nM-DYEh{+Pd%RIprUIbt5D|7O}FTKS%%i7~CdizNnn)0dw|1`<vB+iN*tvY!OuNxe$QXQxxjSkHJ(tawM0n#nZn35;J_To8zRf;A8WU zPSo1Z&GLlJQjNfeuOkzBjIrRo?DRsRjPbTWb#|T=)E^&snjic!6rPUiR+cq={KMfx z*ET&#b4EueHhTb&D*YE3Bm`lg``sq36SaG38ZXM}89D$Hp8NUYqWcH*l(C%j@dyC& zCk?5Fqqq1N89?iq!_A-@S0}PJ!yo&)XIxFRF@HyZK%TxrC%*}kJ4$ypdsroNfFRIR z$(1tG`Ix=f0#S(t;u7}_P#0T6??OICJ2Z>4r=9j_F9!+tT|T<&08>f535ZkY*$V=L z#!kjCAe2ig9yXH!lD2>w+uq*`v3aATay#e>d9onq*ZXP?bmfS8QbNvFGwwsdeOU0^ zjGRp~L<@F~ZYa<82&|uZbh{%%sioZISk1&EY`&C(Q0C3QI#Bh-2pCge<7~!5gvvpE zgw4UVz1PysW%^FBAvgeLMAJq}lNyyw)L31&KF2y_yW76G+P1A<65ZZE_tQygXO{bL z>YffT)O)`t5sPo$Q*W{<*Fg8ovZY{s3cna-2boC@ zBNcw5Bh9+&duxBbC1FxlTe7|QVp@D#v!%OH`~TQXNnKt4Zu8(H z?G~lCcr&FLat&oeR_mMml;9vZ^+>Y&MRMnors`r2ok(YUnG zZ>C?@H}Tp4xy{~-&CsMlY@{^bpI~LGS^Uj5H;Z^y_nW6yuHu*sy)8HQwkZeJDQ(SY zUH&okJ?N#F2dQjg|1forv;QbJrJy$sxs!2CsxzO~dHGUIyiNPQ*zpJQ7w`u^%#tTX z-Rx%FSLF?-YL9=)UTIhO_qC&Q{!C~zd!<}Xo#&=^_}2jH7DcXcM9+rcC0NYg|0&#> zZ@2Q&g;(3U#yNX0V59)nobmEKE7{3h<~1~1B{aY1TiIAWx;wj~8rc|7h6Sbp14%u< zcA&bh@g^Jf%Y%iiPVQ7{GlqbK;!9mEvLWcx=3miHg$v%=rV@ zr0;B%V<~RvCgxGdk=W9@u-Dd1y-4((k(iKncuQT_mUHMk8GAaW_o52++wMjGBXtZq zdH|}&Lb3wO=MSNb;$Q^_&A+7XJ+fC=jK1KCX4k^8dg#qSv@IXqAqZ?t<-lNENB|ls zsb5!4b-~FuK6mseRca|dt)m<41_uS9K@rLzFD4X2hV(p-EIlrZ9hLJ}w$P9Qq^Bj# zTK?`Mq}@!t=iRQ_;jr*qLD`3oTQOlSWC(dYNdG%pKGS+H6AA-BHsp*z2y8Q8MDEOt zbhZksBNc5=Ld#ftoPEwh5K|!u$6AlTfBSpDtFq3Mm{-q5$+XZykD%o~q~S^Eh7mrz zfK^m;02WGvQTB`$Mc@75QAa|*jfbn(#zhjsfGf;LDe%$>&J`=x8jabom`y?)MO2}# zEuFC6d_L_HY=L4xrG) z-iZC_ELyu^Ja{w*yEhp^JazZ{fk}bPjrf!5@NBEJ0~p3;mRS^9r8G6NnT{XGhV#)z zLFnYvV-4tHN)!iHg_2#DF0hL0+*>TrD|SMLNK56tRFc2j&xuS$14igi>w87K6E^Ev zgZ=yvB@b`a;&FJP!9MivjAszyl7Y&dndmsG<V-KN9o%l>1Xa7>b^oH57DyvUx z-QXxM=Gd)AM>QWpvMRLPbL`+zcLW!^Hf>_qKjaQiz;7@qfmg$JpGYm{$-J_S>0q+N z4D|hEbWs|5Y|`UiO14Oud7uq0g$ab#r(M+xf0b4FsSG_YsFFF0HcJK-d?vuJ%ABam^U4^oeW^;Fy*TN75z^a*ZT7hbShcyEtq>Bp?fjzd8bD~cU-&Es;7FOZzV%3AqY_gmFt(lQ0|<3e|R$*b(f zAnV#Sd|pOmZT#V;JE>VxT=XqTSGRJVOB7sk*3ht8t4a#&(S_F8}n6SUrN_|&*w^!_TK9)NqW3XNnL*2 z7LCo38Ae_WJt>^>OMUa5AB@?3I>lkfVN09o-Tte<_IRW<8pO8VZ(><`=PRPU2xc6( zBXAV}$O--ztn#}E*frFtNqdB<_k!O(20S=z5jdrI8Vk9HUZZ>{f>QAhsMZgLH>uXU zn@vv^>^kRur(kypR6TFM8)q>24kYt}DYE7CQb7FclPT)*^z1!L%)NIqZb3$+uTQ3i zf4sAC7~X)Uy(4&xUh#Mr?^tdX>p1}bwwrctclL~RrJ(+VRZvRh2l~Xvk{zX4p)dK> zb#R9DBdA^8p_eCPGcN`Cpk6PwjO!4|sXH%vw8}D}f$)=52XQo%4fcem`^+EhX zWK^6t!Wi+*8&}}5&6Pb@WZuQwn_j-?`;;N0{+zEsjhi_~xqaeJ@_EO}Uihc+5HKs|HW1zs7#le)y|^z%bTzfp zB}S~oL9${xd+FoK%yagk`&6@K;0LO{^-Jix)Z6f0o59^w;Sph7+-~wcgZ{uZNAT57 z56klywCQkkf+QdE^Ca3n-Fjzgkk!a;Q&~G#lY*1Y-O*T*Lj(*%8%E=MF5H#6VvHHU z@7rTjHtLJyp=*w_!lgm-x8n=_Pr%=tgU=|(Y;>b;Fnb|Cnib}cPU1u2^+O73LfO|( zWvb+jT@AZCa$wYTm#ojINIzum+*4)7$V7(RzVin!Dm#9o(i)+g9;k}Y(a3{#jM3lU z<;ok-7LLbm#b>{rZe0B&&HQz=&X85q?XdEawQcX z+7TbROujX_8jW;+`1{1JyS>XYHHNpl8JJT=W9%WnLHQ!H=>vBMT?!%QZ6f-!==kTpTCz(aqEw3Y3ep z#5*A~H~;nDzWm|U+G@Iw9JP$_3NiEgpE(;mHsRbHv;zO8W|N+H{)x@&AGpF;G`!}i zeAVk|O4EVS(~qWtY&9?ymT#+W4l97}KlZZyp-W8kBBJluo)!4-v4K5izF~u6)?e?k z{fK`1SMx7ax%eV-UGw$wq`%Y|+b|Q%6}iiUskYqXU6`o1H*Zdtmmq$b;(821RccYE z8sP>Nule^iyLqu^KgAz}cqR4bS@=hso#wUJzx5#_O_!#5Xu+e?L27elzf@r5)Aul; zsC?iR;_czHEn(xmu_yn$lh@umN7*Bwj_e;rl+&jEhCAFlZkyWmj|2@Y6WF|%viBlO zTWDeO;w?;{wc6jjx;31r))oAx6n66g`d{F^_1xSg$+E|rEd8r!{@tqFm_M6d} z`F$;@|5}M%iB_u57TvH`u2@qV!NjGPsC&xFHGD@gT24hJp!f2!t#a}*TLh3 zTBf^4uE~$0aKs$Lj*dLV{9M=kc`6S1Zdj6gC|x}AxjbI_eg4W)lT;>t zUJaE;je2R>md;1wGzcShM`9qUbz3}t7_?7aJ z)$e92bwVjNz*F`?W#J8%cC2Z>`}|}0-@564d4$a;&sQ{efo1=W$%zOu4|8NiXxVI> zd@uTI978UD1mR;LD*|No!ICS7LC0jZ3uQY;zIuY?9nEF8j-htH)9#FauT=2muMSEK z$bKr8CGPx6_~$#=N7iwtmxH{lPok{TF-+SuFMOx2qr9xwyLB&x?}qIZy*1ic0cw`hMP`~uLjI-wwd{L5(#e(&stt`GR0 z`R|+m;XgC~M|1hU!<5JashfDk)aKt8b}Hl(ew{e<2gOD$3pbw216SkM9Y;}|{}6fw zvgLc8PM*b=9Q&d+y5Xt~bjvF^7yjH|v?=F{0#@NVp+8*o>!|#83R!@1rr@c+; zJ)?@Y05vcZe=grsz+C=+^(^)6+raI34k%vlHhs`v=h53y^`KRHXNFwSX^2pxUjg(= z4o-8b(FFQY|EFg~4x{own*^vY6V|B=64po&(PT|+`kKey|Y9ndsMdNWeZ{jk!>nbG}7k}VS+Lwg^yS&V}xuJ5- zmQNtzknVk;@Y038ONaj36hzv)UcAo38Rh!v8 zLRG5tD?%ko^~+fb#!YYZe#+8o#&)ngL?DzmyhhOawyUPV7>T*-b6G>~Rf{Xs%d%cG zf=%S+Z|^2l%jm8My9D&YtOER?z%JL3TZoLr=)S|Tx!d0|3Y8-&b(fm%N9!40Oh!a! zRwPzC@RfHeM{Qoc*y9xKCZO;@*DjK0nds`|#pYc65$38`$~&z>Z`8|TNzVsgf%MJN zN?)zHo;=HlAVe%qUsmT0YX_(?h6`NPNY=Z!!J+4J_Hv6OpAfQ$(Bz%`-1R zoc&&D&Lw^KyK?U(YTSW9rQ=_e1UlSz@66F;D=9$a#2%ZHUHzvy2K*)V#hjrSwugzOu4D1FNX#1I}_}5opP{#dZC=S3D9#1Rf;&*>f>CePV zf1x1R!cgsJX(vn5b?+;+s$YJJjW$dg(SCOUpXPMPZ?zTc^o^$FP{7SRj=rm@lavzk zL57^BPz#k#;_DSh)}j`$3+Ini{=9l(Q?F3f2+i5mS-958R@M0kU7Wulb?E1ZGNitO zrkN4tu7Z@RUZu+hzYb^Nc{Vea^6CKedm_qK@?s4^QguAf-yjvUo5(xwdWF zXfhR$Ej)1(Y1LTk6Pl{g3-Fy4IZlUhP<*dQO5R2f6E5TsQEM5*AXkkfNyTC}V8I4- zP?`KLm06%UEs29VR2qug=rIEZX)IV27m-$ajSUq`@xnlT;9Fp0-+6!7N7lk@3-Yyo zu9PF2zr}_xOO6Re&EfkeZ;^T4O56j7n4EL70={jrn3IX?(g3&dQ{;4Zg+ZaJPux)- z2yDssbtyce8F54$AWP}hh)6@&jhJdNsk|+>$Wm51x+tPz7i3OsMtl=5v#_+~zh5fk zGSY48iF6rIf;2z$XSPlv4QojYUVkj_qj_ZwYbx--@Pu$^@^(;+XUS=XDb2UYT{BpwS#QT_oU#^{avcL+v;3fnRyGYf}tJ<1} zH#&)v;{yNmD%q>_ZWU@4Iw4nEHk@GOG!t)_asQYs+ntIxndF;7*~;7WLLB<}W<(Pm z^yi~)s=XpaaB)HW=Z$$hgNU#|7izD)a`IFm_ND3RqH+MZt{uAGIfepdy1U@I$;ELE zEf~kCK!;pyI3`zH=~pYAqE;{PXa+(ngD8(@S++T$&~W9W{uELRINY0y315wJ>j84p ziiwz;2&xogl8;)YczMH_~tP*R(=-t7u?bx@M9tVtXM0r12 z&EV->zPd+KtQ@@fkWiBx69aSAz=3i*l(R8ftIk^&nG+GkkSe5H7iFR#%gv)&YJULx zq*@O|`J2$EGM*=YviaUC$Alac$)nDoh25e(OeF+f7@tVk#>&6ckD1H~J zR@snw;JoyJ7-UbV{QB_qfs4t3_m-&#^^C~iUke{%X4IT@mIj^@-yqSSAp;Ul9z`@l z#(e_R$5 zk%SJdA5oFK+HMj>^4djY${d3I;*Bi_cAybq#}wRQPwsUJH~mCqjIthKYH*rC-FAbN z8>YpVHTMfc9Tc>|j~l{eVqPpF9+vS5_=o{k+Az^U#7`DLIJ%{SMe+%`iTLnJ=^5uT zWNSnr28#QAa>6bfBuk&%rZtZf>&lB4GDQm7m?CPv4e?Em{)&cz?hCVh^lUaFu2H7z=nO=8A?6aQv@@mLX>3lnnahb~T?J20Q&%}!LF@BxAH`0B|KH*8OyVW%LD8?1CIwG6& ze(K)4{mG|J=PfN*J3oZ%5m=8{=OhY0@sYOsDe%#Gu7HC&4xC>63B+X(HmhUdAA41* zvvbXh(OMCVRJi+Ew6bb6PAPY#8r)Bg82pmrm3*=`XRAMli7w-z^`~ zrt|@#1=wnR2oiT0to3j@K57ql6n88?ylso)SPH3Ge&Oz&! zj%u~|PaLY~<ygQn@ z?wMUATBmNkzYcav9V_!5R^=P$|Y)sA}eM z?HY?+ce%~HUF^Fl7`r?YA(+HVt16NZUXq54%9}0w+@Tf-Nn9B2=<3B#0xy&-^wHta zSeaBb$BAx4N0iJTg_0-FySPK0zF6!`bG9342xVkC<3Ky`dVRro19do^$*EQ2g`y!n zCvOv{n@|2!PU4gBrCbjImu?|ep5%JUtEB zI8taz;!^3{IUI-9dh-XS1QnxK`wA?-bkWv`r_|Y+hRbv=HCFv3bUx*JJO&!w)*CA7 zm3qc>G2(@d)BrvR<;#$?Z^ke__ z2f6gP5l*YV$3uYLLuYwSL-9c@Euo^m$mPf?*9!nYQPq!BM7m5;?*G<06s}b_YQ3h~ z`6&!8aVfr!N=xJ^uTrP);4Rlc8Q$?~y3Y+hc-%LxVZI^@l1n*{kmo0eH!b*_cOKlu z$F2EHDGK^L5yvf!lG@>6UII_@0a=ma{CMWT{Pj(ds=SjptDegV*kX(PO?aDHWL7v50)1F%d=p-6j zDUHId_k4b*;>kQ~Q)?RZSX(-ZPwJ)oAJyf~Ge@7tEt#`}q`U?j%vY)GML0#sp$0ut z*lMLZXh{;0iKKd5HFo^yu%sm1eqW|V_Epo3Cuu*y_tiMo4haA5Z59nzP5Hxb{O=Bd z#kY>_V6n-Q@vdWWt?iLhCN4bz=gZEe9W>r zb>1dR{ef}jw;Tj7k)MK=Qa_DIoF?%35Zqir2EK%g(C7E7^8`FS_1mV+TBE8`n%4Zw zH#aMj(Ngz}ZR8Ww6O6?86TVrDi^baz55zwAqB0Ze%{x9~2s(T{FGQ02n8pbSzr&Ya z9J!C(;?N98_j9f&+DJ2<%!PnskA3JpzzdP_;}2BR!WG=nRz5?}^ zp5)L(e!F_^#Z@zB?>I#*Rl281D_aNTrJ4Q9MMN+~0=#r^Gw~c5?#j4l`B`Eq^o0@J zjb7v^>Sj?z%sb%@y3bh<5>tj@o%)IkmFQf2Zluv(Xa~?e$AyxBBaY8ao~uev4_w2+ zr5V=ncBk&I?4wtJ^>QFw)4_od=)b!%$q{fUwbxb*h!g>J8U@lUsp`$i3BN{uT=Rv!1u_v*J+RA#NUyTi&P-VJ`Mb!| z{6<^ZrNt=O3teR!$!5rqK+^GhJ01XcV%upV=Doi!wU?pNc*k01=J5wk+x}1-_^;8D zK284XWXEx6dr6%BW+)8C-w-)aKZg41a!mL%;z6Gt)naE77wi zyfT>jXI)BJ4>bcnjDjGm?0Y4(USy^!0t9V_G8_SOcjkHMTQmU_(@r~Oy}i~5F3a?< zYxlSq8mK8`Vo8JW4rW-%8_nj|>vkZl(YQlLd%*SDvpo|plHdKvIF#GLsqnI{`#TAf zoR|OgJJKV`2;iPo0>0WZy%@a2JSdtB2-`*?OF_3~q+Lr{OD~+>hr%7EryTebks`W7 zKTEb30-9tcmQ1+w16N&=v&gc?V}?f2G|m5k6vK-3Ot zw~nc8+}e;Xy5?edfg>ow@tvswU_Ke@_7IqJIb>1WsR+-tUZ3AH{K}aWq5k&3F8Ds^ zxqCwaFXuAGp&}sWdpPxlf$WC8WEe*%`N+R`4(VrsU70i13$p8A$Vg~W>{|y04^0BT zJu&+?>=2Fjo?%uHQM%Wn^5t25=8-&TWFv6#((EZjvuzk(MYv{|0Ni=T;>B4%Bj3=v#g%pmHE(`--3WBzKxIhRJ^9FpQ*uC`(ma53Fh?_euI~rM zheRS+9(x&4o%XwpUU1%jtklX<9?o_V9yE!0@7~YK%dgY~c6ogV!!*kaOfQ*LmPh%} zVh5X6s+fycq{Hd2Z!I)=t+L%W4qVNT7PfJlE6cq`SOu^Q_rqhA`67P9NPKrDE)fIuj9ex9bwEG*b zXRsy2SKq)+i zBGf9F%LobL2tv=2;8Jf9iFk{aNJ zt_5ciIvNAQ)0gjEMF`C#&u!*Q?uBflxD2x9x*3AL$Sk z)iGJLv*DzPPlboqEe1!%%VMtCFWcRJl479m5PvZ()3BJnKD(&!xAJmZZ7J9M^DC!4 zKnA=X4mIY9mmogIT#Z+Ql?*OK3Av!PQo+b13gUPSm1bv=x(!PB{!73HvaT`8|anaM&}a9Vzx5ax=%qhP{uA;4X6JmnRheAlQ; znlM6`L1jSGHQM3;9+m%gE)JTwdchfsZHa;!U0JDl+@oV@Hss-|*M(T#-K+UK5o^_y zO@h;>i{>ip68n>bCw)I`B-3%3J`PEAd`$$t?bzD)utm!cvnhF-)qUPFKN5~*9y-SD zJHrokhd$SIaXRTNuW&)3aQU*r5B$LQF!b?4RVj~4NxRH@2NJCIbfg@ahI$wEd$M@B zd*uD5xlK+|drBqUNjrDYS_+z((oB1;nn3K%*w~DpD;9>fN8tL)#j@|R{5J-crkuV< z(`q8>R(%|NOkuPRrgZ(nUj%d~Un;%)JCUF{-Zh*iS^4+|5T7>Gs)O!dOG=|Xjt*JL zUE*07T@i29&gBdp{|N?U>Ql7$+p(l};5j!xK%RYr$WPV`eE|d~ES~%V8ziF&Y#VB> zX$$XppV`M1sDo%rqbE|jiII;`A_K= zrZkrxkz0(of!?j9H-^3o30RtDO?o&}XxlkI7yJMEE{;zcW8 z1vRFrZBJZYT+{6r=s3fVqCN0Edny&YJqHqX9&;@YDgWt?@q;+80YQxIUkpctPMHdF zcll?D77>w}nlBqYHNw+-@#O%?HG>w*3C|KU~H>US}=O8*yph$y?3q+obuU4_aSgJup#7*DJ%Wf z1#IsJ9C3e7-b=8_L5ojnFx|KbGjv)0Di>Rqll~b0KB=-#y*Bh*um2g>U^8WN%}W?n zYw6o@e_}#%w4aI3gys^x87~4dhKTz@4|_R$aL1n2+tJNIS*d$NUKKcHz*`Y9L#4yn`=;K7OG0vjdnpfLI=t6cdXEsRYP=8n5Es% z2!`?y-Ea3Lt_9XJsJCY1+@)gQY?NV$%2Ra!KaD7kx^~Ba)>ce(-*RSE+=t)Uh|R<$ zR4f!*Pi=gxBkjCm;p*0(J{n;g##DZ47a`~^z$e99$#-5iGT7d^p_n~y&s~t$TVKCW z)K));G1VCX^|vK3m_sC;p({Q_9siOyrG5O72#2g(ZRIu{a)(dbvo9ci`+5mB6A?Bt zirBA{=W}GgCPVG)%};Uu-kbQan9%0j3h?+rser#Py)K4R;QkN<4qTxnCN0pV0(Tw=B^%%*>|35=Kt)8v%-my-IWBtL6qW~DvME_2bV`R z6_=+kJCWY~@udblum0jGApsWPdRgBtpB>2{%r;^C9}Yy zvH6$wRo4sWd)?eFw4u4JYLqT;>{ZaA53)G2NUt4wgl4aB9?F)IE+yl%?sVmCT@|a2HJgUxovmjvQWndG zdN=W1iBN6c?hLAiJO}s3$)Y~F`?6*+yAQWPL%AXgj!jK#JA8pPo8uq{y2!63u=uSJ zqio$zL6eP8KiTFWZ)o^)^+&BYw}}1q(fa|vw(ka!<0SOG7~Ndz-<^8^?mSEmFcPYYSXMWQ>Z) z;;_K@yLs{KmIv$u4X&c@0{&4MUD79Irmf5a9z{0(`d}&?GHl9dvrMvXL_8#`JW8Bt z(_{qP9?lM?yB>u`#5dlWB_iZBhT55A758belpHRGFk$N|V$oqQv}&+_JK8xWxgc&+9Z`4xK~h5t%cF7S8qR zhnv0(&=>6Xpvy-(ULvdvh{%Bzml0i-YZcP*UZ__XRv4RkfH~W3ArHaD`n8s`62e`R zAzHII5PiBJ+ZpImtBljBvv2%hQ2ui0#^zf@y*fx1TD5?SI!Jv@t#%C73x`g;Y-{}Q zMZT+sj?xtF=3IYEba2q+X94ScD?6XX@haW1N>baB;%C_-Nu4eqrn!0sqggNY`$lQe znE*Wg^CdwSI;;bDNEC83m!}bFt6;4SM~Eza8jMeK)@4SqK>fvi)?Ok2{+_?Rc(VKV z%66^%Y1o-h%;^nfj;_F=Fhz1JC@G{tzosuGDtQ>Sk%ZX(HCVxQ&tukY@SuHWh5Hou zeuy5EeR8DD8ll-}^O@voBC9QBexC)kP3ux$iRaVx=1!>-d-L3F3|?7>RPCG9sm!Dq>Wqq;8td05>4NU@mH+%w*O zk5svp_ilxh?6TYCWK{&K^h=P5jm5Kyf3*rzJhcEMt!OTj{T&|taHWp{HT4Dke15%h z;>tgc%b`z~T@ckA%hwI|y8%D>oJ-V+J>)&K z6|6l#{`C1B(Shp=pr-#*5`wgZ?g+dc*Vmq_31-Xv_w&-=Pbd-q{$#x)ThqReKL2e! z3OR#E@Pvr`>5a_;ht_{qVsIzUVw~qUz~q zq13qM{ecV0wN+|O+UjTIl+J0-KAD=jB7Fa|Tj7?r*83{0l`1#c(B;F^QvuVkhg##O zwYDf9vfs#pu!HP_AQn(F5H3*Yh^ecAJsYZ};4cpsJEjbhbBfe z{~JBDT!uGlLY_OFw;~YNe=fB+kwe>H;iI6203{6hIJd$%qnV6O>egvzhCLZ3q>saUJx4#t8Th9%8zvdLv8DLKb4`uIzPVt9bIJ`&K2<5gRRuNq^l;mxV!_oIuhE~Y~vNF;`D8l0N9a^*B z_lf=+ts0yP{rzD2oAwOzw3v_-@WZ{vmC;Ktod{84yCZt#Y+ooh$e89n!KH74_U6GV zb6IHB+|xB(_;j&cmRKTg`3fjWCc7{#FL3V9=rJ%ngXx#W+!%3@0!m_3pOd(h2MtP= zd6c+V%E5&E0i#T}=fl3rE`Etzs|KbaquX|s%$&-OI;g)zQi4HZf(o_IYCf@(1<=aE zEe11be2}gzL{*lJ8!N1PQC?JXt+9AHR(ZzGL7kxAMQkW=CAbs$E?_7?*u+EZBZ`PgK3G*%yRifL zRd2?dYo7kS=qfVuJm5C^cgkMGu*%+IYLtOzD%k!p5}5VwDF$JpsQ8LrS6S&}L!DKn zqrU0Vx`cwa^SieegMt&O;*kBO?dRAGsP5u9*9~!0eD*ALcYu-n?cB#TNr2B7>SoOV zTc`YJ=X(w>;D8qcvz4R0EH*0bG+qV?*FRobVrf;djs4l04xL)KCF4bqu#kPpZb{3b z%U%zX`|FLWS)J5c|)MDMy#&q)YJGPIA)G5b1VvEKJzCL!G0zE*%p#;Y~eZ z=$Zu+uWQtUFTU)i!G=%nNjX72%%kpAiaLTuZC)(6>^j8eMni3(`ED9rDu7=J7abuf)Zi8Z}iGGBWfjdQVr9YCQd_P9tgklf^da@V1mFhg7x5YuDQ)S3$dnxbJ zX{xOifTL zeIgad%zxNBZA)-6KrD;)HZ2^C>$X{5h+7fah!t>7qe~TgWNX}uyDS{onr|qmhLxpr zqNTSi&1H&7X_q??@v%fQu)(5TERpZBcn3=iHy2x&h*0JWb|59_4XlLY>hJuo-B}fQ zL1=~?JWa`V$})d4;0MdbC&_O~db$snjZ0Gs0~bvS#1;N-PIgMROT+tu^G{Tj?Q2{( zX@LrrR{1jG)GY10wO{TPdT(X(i>N!XZ~99RxvUo$B{zUYOhm{~533s=b?@he87n(n z9X|b2LQ;%uY%YJE8=RI%Tp#iY$nylT4am5-()bM3K9{}3W2#Jo^hljsyPZrJ9JxOB z)&{WNBuj-$M&VexBXMrDxlgbzE$T92+nUIt1sNWvgqBP>`+|Tj6UEGl=lgVq*vbQ2 zudlh<3slRVu*1fD%k`Z+-}WhSdP&6*?}<*5b%^)y+M}Q@Z7BHMg5Jt|Jw|_v`>h@w zXeaIcJSkq!wAsZQ?7x-I%(v|_hzIK(V@Du)wVeir4qx<7A*#LD3U7`&O#0~mv-+QA zmywkidnplVDK@@ei8)F&!GX}uPA2|c)-hc+ArP}U3)8t)(__vOf{Tb}t8_FO7|Cqv z&~=xqx^(>6*vqY!fld0^t+KO&CpV#6a;pcEIB-F&RVLm+;Dq%QTb}(YigY4|Y@Gtl z>vovw&^V@toULij)3~oDW9s$5o$hh{wI-z>~TI6ZLtCBH)~r5IV0J_sA7Qlrq!muEpWopY$C~P?u+BQuhtRHCV!e0Cqi}n!3$2C-?2M8znfVf zm`DbVb!z3dAs@9b6n^u=HrNKX5B7Az@Zi9Itizsu4Z84uk<;(2f@QvaV(rq;Z8km> zhZR`a?iOnewIBG$hSBwQd-#gdpX7x|O4QL5MR&1;0zzC*bU@e)eqC50J=TP6_%ij8g49?)_PXtuu8o)dy65?1$=|c7U-?wkdH9f- z99pbo)uM!ErCG-G7>MPHt^WO0oxGF8y%}Q9{3_lB!WaR-=7@-S$V}-C`SBYBln|pp zBF7yBVe4<}%W`7=oz&$54$oRW-+eXvkYYA>%ke46(SF^Adf?;F9}5RTl8|svh*&bW zDd0qT027R-bUXc@%~fjC<=U0x)=_fEb`r`l@L$1(h~)sX9NHEO`3XSNtS6iEPPNPZYZ3Q4N$%-w$79Zjh~@DIU4)3ve~#Ni-;lf1 zL=apWNaBEXq$o>5?pD9QPrP*`+_oH(VX-j>s#?ml?dmSr)@T+ zWVd6*MgcY-B3VjFwsK^Vz69nXfFyO23KBX^Q3Rmdiyybw-M)A4I+767c{t}J?~s!s z!M4kSQmSjxd9Fe+2&6D?pzKa5-Vf^X1}R2#66a6K^*E`JPldg{rJ@0VC3WbZj+-6o zg6c7Y{!O%L=#=E<=1$y5{%`~*mc>~Q*M z7cvMy;6+=>_7PZTxg(I)vzyCm`R>`;7rtLTQC#Lk;oAL4uzc{ zNm&L!J@Ld41S!^#tGV5{$GPOc`@U|+x#S%*&#sXlS3r}qMCVfdCr@zFi#Xz8 zbZUf1lTSJWC6HEZOWARH3>w)P{f#V7Zg!0UlfO-CBM&%m#%CKWI|QN5ney7M#iW8T zmmC#hWO~PDzJHOCPi2TP7~gqin=c$K__8)vv|C;}NM%U)Bfs-7Pk5kHMoG#!_aX?MGkU*Jx%gK7~RJ`93$Bg^#zIBcVx@L4RZG*w>p5 z*@nf81KITstrb}o2D^>5bD-`{`VpYYAhQ<$jcTylX!O;~5{I(HsTB6ZnrkLRRO1u5 zf*$Q>pTCYoBvA1;C*x!;X6y)R2uc`UvDI?4F%H#^sxnsGWzaYsORM0MuM`*tKCB3B zc0B0<7A7p{vOvDg|2=TxlAo4YYlcJ>UIFddw!6kZxI3LeFn2F%J#f-Xi7o%~)(H&o z@zrKK5*apNa3Sd7wa5|(dGdYvpRwTGJEEssIS;Bvs;($32W2}+8^8&oY{TH-tAKj_ zHWQ{{WpgJSh!%mnAJtDfwb@TLo3(Sk{OB zob)7%!3=(Sy+`7$(ouX`trZ`GakuPOW!RNHG4meEJyK+K8t4>y1v4Y7oT>QCUi-D7 zo3^`!>PWW94vXo#B~D@0OudL-C;-0WekZ7MZy~$TWpT{1+UIRN?O}Aq;8v*n=4zOv z5Z6=0KH4T}J%={^bY|{s9erOOpq6{X7@=Lz?7KTNWWH0@I$eiZO8(RDqt?7h|CkL1 z)19)n?QPKZkqdub7&wq?%#nBfC2_j;DT5{1_ySd~?2cuwWMi=(=iKiyJIy(p!pQ16 zOX|HtJh-Euf4MY^2htO;q~P5*`T1Fwb4jy#xp~(3=GW5qkhpu!4eO+3cIWzvryqIcuFi z?pf!qb?+Z{ScDIgnG8(YWS-}Fzf_)wP1ENa?KZE|@Vw*>rs1d}28*}<$P)UJ`!y+d zB*(h_==0iu=;aBQpS|b0)7bhUabu(!6f9q?lHQo{`mlb(XjKA;hfwY- ziZ<%;`2`Tr2>_l@tSWZ(Y>`jOYfQb<2;C2<%XUk=ojs(-A1FqbC>2)*1JU@1gk2{Z>P~{I$Q+&?A)l8M1kPD(^Q<10fSs0pV zp{`UaKEOPkJhPp#j%z_S?>MN8iezOV*ZA(9Komi{fY37B$<9UTzh@}uhItz9CC8)^ zjndo?_wkFR$m>mLw=6D3^7*eVBfj<(`NmSuY59Xuay>b(mB+G3`P};HQ^4_Tav4nP zxn?yfLy_cj1XM)c?{3GomY!Gvqs$G_#P>s1bOray1Yw~!OPq1@4%}BMYt2`S&#PkD zN4cU2T*~xv`{^z&Ze2#ZGL3$GY*l~<3*y;VyfW=Y;C>6pdG8D2@0?iWaT_4N%%wr_ z%e7$h0ca0%%?Y&37R--L)kJ!rLsM|p+t_t2$z{N!HtKnqf8o?~*Gh9ySdPpHe9t2BgG|kUMK9r7rIPQF{J>Oi<|k zQ^H53Ugr6|$)EHo?y`*v!&Qzi8@lV6a_%vqD{=N*yqPg>*ArJm4{bn=QYxP^QLcLH z*bJflT2O`}am5}+G8EPybUwsIA9@S}p&S1Zx zmb@eOAi=VWL-w@Dfh_53SUe}$qzD`k9AK_y>0S_3)FAm%H_TuH2lu$QqI=sZgS>>x zWVOYizScve{}8z&jzMKEC7v=6RD(EzUd3n{Y*~XUrKw2cek= z^{L~j5@Q0_yG$!@AmvYgKHRrz)rep{1{|5JTl`AgVT9BIZ0vp9F+?hPWy1lIzeSf@rHTErh|@<8NzqP|PH?a!)@NB8W1T63 zb~ic#755K%f~96_L@ST@<+vFW3~Mw!uzr{vMPqk0qfDZetcd?cnie|>p@tl}U%rxd zH(duFRn~B_7xY?`9yMXIM^4TG-5?9j-}4b}Kr^A~7!>2^{2Wu7%gaWRMM z*GnF{=>s~%XY*OfC%x5Q z0ycLE1XS3MLzRe4wb8xW2GjgHFF$kRBZh>|)1U@qEC^a6>?Z4xGMpmuW7IHTn@epE zjPlim6KxL^UdSLbKOx5E#;~gI?t?QY-Ll3uXpQ^iFtV8?n<21uU@k+}s9)HgYd^V` zd&_RFJCi%Lwtv9-S$hO{-c$%P9a&H1+2Jx#WOCUA zBL~^D2fu%Y3S>n;ODz(W6;&k-7-z-ORg9D=5EZ~!y@*SWXjDTIS_%hS@B|RKWM;^4 zbemMOzjXE-ex^>!yB+@w$E)Nk?GULo1?IukOB&O6Wx2c$u1ST=T8xoqF+hbD3n4;-de zA$i-(yF}N4_6Ib~Oc8R1#P`XUmr-oqye0Gykog#bH(+X}7GW;TKQQ$XE}ao&z=jov ztL`DBqidT?w*>6xM7{yX_dyW2T00vjv_umYu|I4Y#jCl(+wqyFkf`Tm>buc}l$FO- zs~y;lIf;#$5sN!n*~LKH@*46xnt3J8klJl^*!2E5t=BeDK9c z4NIM==c#7LmRKq7+nMp!kQ93$y!nDhN}?5e2omXa50IAPhAci`GMN(4su}Ib4kh=l z@Efv=cvYlP!-BQlxH_u*DPI-Wud%8`C7gv(NvKL`UTz~v$d{q;A3f{Bnsj=q@OP?! z@`|wSupsX@qCHti^cx477TloI1YP9r`v@vK#Eo{@`L@1{3p_ zbRsM7(hufXNj+K-KIo&3Rz4c!$gA$79q7p0OwvIs>%^Dp1ZoI1f@KT^iN*iwSx<&B zxj5^oVN5>GdW@{E(=1sujM?B$E+UK5#v}@cgE2JW=vwG4stDGYWNQhtqkVC3+`qez zg+{Z`1wFOt!+;=J{QV?a3Wsi9k-j&KM*5%)r=>)P(V}FDH+7QmUUdFJ@mu@~l`)1& zy@o2WhWA&{GaB6cEcD3g!G4^vNvx)QfQVreltejxv`hM+oO(gJN!yr3S&-|i72(4i zJxkLpu2=9O4zt7Nk0u@Kndf`O=D)?_i^g)B*Fs;B1n4xgI!()e^sLyeF&}YLGRBgH zsvrwh;KW~aS&sj!XWe%q*xGY_W|=?ewRyQ?v(jt%91K#%{;Owc+Er?B-mSpz9qqtc z6j&UkR8jpEeJGtl;pkc7efFJl;MP2-RGfoSpMy%AqSQVPBunj7?Q=Z3>nI%uvIIF< z^f}pR*+D>1+qi%9thQK?(_`lVj-I8Z=hNpBrR5r@bs#d%^~|m-qkI5Lat_yYJHP8z zTyC1A<$j^h{nD;R#ID_yIFGhn_sTwxE-lZVIJdq&&)bjPBg;SCi90b}?gGO?Y_FYo z9Ovb^>+y8gYh~BLLrAC+I*@`;bQ%rGLq z7HUa{W>OK#EC~LzMHUP3kt9?`mO=IV9Thba>xX96qTbeto@Gd~Z~@Mu0cYw&L+XGL zD#DVDGG(Z~-}Q^q4m=bkETN5hJ1piQRs@{^2RMEgLy%8>p-MgB zB(ggNcJzxMAp5^#1n0N}Ww`_oYljt91Si%3mMnzRqWkPhKqm4XhzvERA{*}qz0Ei} z!}u=m7oNjy$F=hF*$Sor{Av|!P+0d0vkLT6QMu&`6VRFSKPQ? zl(iB%+8=ZDNzCvt2aOAx+;X$9gWQ_Kz6HfennS#v zBW654g&``%3x-a0ySqNvZ_jO%9$-_Sk)i3(D4+I_Lqj+Y#tw&aK|V_JHaELGrt z=H`lS3-K~9=G`h;3S~u?#-sY#%x85-pVAO9AIO%r%t)KG|Fr6#8`4s4-86skCv}o3 zW9On&WuNGzE3MAf$oII7BG(u}GQOylb}Q^JSp3!R~Pk`J}U19Rm@I}EwE*a1|faXtN}C^8LCe` zV{Qx8CNVVEvP(HcmKff}3OPcBEZaeL_*2^kvb9MyNMEV2cpL^EjNhB>rJJT z|0nS1sl=0I=}XHJAF}vbZL7rVrL0LY=g7f>D`E+CSM$CHmAYI=8$i{FySF^RPLV6^ zkXIb-a-(F6i;WtZEFjiOl|Qny*lgbO&E{{ZXGZj8W*F_9;Wdxyq_sf_B48PPP~aF_ z_bur1!vx5fLQ-P_-{VzjXqB?fm^d>|EBqie(6;=UKhHDn4He2dA_sXtC{ zC-aG}c1rk4d{GEH( zQ~b}zBn_7*NmnI}{}6y)t(NSs4w_#Dq3BUiItW1x?RXYc=`tC9J7{U(Dn(y`4Af3;kdRy`%-CL8$c2ZWd@n+L&`Y{q-xE2FB$v(y1sDDG z>U$R7T{hqVj`C|=&SEfE2Of-CJp1@AJ=YKE8}?hD`Zm*4YLDL8b7C>K!8UdiQl{Ul zwDD6)>NY*gibmbZ*aA@V}5q9T{dt9PK%^nH)-T*j8x-Y*Pe*S1WUH_G& zVULRW%exmH6XVDDxrX^I9MF z5vDk|=PTab7nsYPk_C0y_ga1%q=`Oy_lqruiukON$(;ik?imMN@#J}~EQyLRt=_2Z zz+Tj>mH9q^LG?n<|7ej?J_3vlcTeJGm6K6p!}Y~=X#f0d-y70F_pq^QX??G}HFeJn z{k-iX?qf{CXdf0)o|U*I*OixzFpA)t!1{|CO0ikbVDHbqIo2gYtT~-Hn|X@Qy8ve{ zz2JDFTbc_PV6DV_m$D?K+}4&tkU@9Y<2scqPv=WK9~1S|s5kKJzIx+Jyy%Hh*U>P4W@TTxs5*O8Oltg6lJE%o?eQO+ zyH@~3g*sglGvJK+$CFbboHx~PbZR<@O9)v&D)Onz@VIl(Wx#y zvR2gwIqFR_<;tEzDLfj@_jF_YbD?GLW%WpSuBOyL4aIw>Jq6=-gcRP?t9(k=Kl87i zh20hAnw+g)$~%KX4!dQu5Z>(fhz+3!5Ju|V#qVI9RZ@n>3sb~a+AdxvLTS|StqH2$RPZ^nWr zJY!MNMSO)$qer9?#R?Edqm53aKJd~U(^OWS_Aj+P#uHHHAliQ~9Q|yPPbVV*hSE8p z$gGZ43lAwgo(}7o1Z#!~T-}0ZfsmLsG zeva@$;Yj=NGe`ytEWWqO00}qg(J{el+449DE3xS?8xfs#rTKDh$knUYXZ%CsPXugu zca@lALj$NgR>nMTlx!H{o+D?xX_Q7GG%^SG(R=)k$U$8><IK`n%8;^yZ&o-Hy)=|&s1FHZ5S)xQ0FQ{=Gx9*om}TELn% zoVE@^xOBa`8QJ>h*}IPfxSe&>xai40^G9&3?|JQA$B@x?>rp=36Jq@f=4x16j5)S9 zHrATJGT;$VX~Sx`scC$(;CmniZumNL_@edBNr@-(0h1Z4lF=fgrx89JfmD`(A)PsiYjVfctVPbu4%EM#1u@Fv@8IRuJ%x}74@MFB?QX@l{kyEr5E(MMk0caq0|ERTYr|L_u@ohXmRM$C#z6Wd zvVIWh4HkT3K$d{~Q*=0W zu!Q8#hV+_e!S5|w6z+kGJlU!8#~)?$znCLjaXe{2#~1A5bE7UQxxKFCTfHbVA;V%G7$FWf7X;;J;xf%yKx+7zTX zK zF*jYxXW8)J6gD%sABw?mjpFpNQzcu%bknf_gYLwe1;9grFJV_LN}{p^kzjyd!nWpM zwCy|oNwIIOMm*75h!MR$eMZq{P>9YLGDdtk%xs~o;4M!DyoU(gvl&8XJXgCW%#PiKc)cmP@ zxz0Q|Ex_qO2&v`3g*120$nUC5V`RAWwQ%y+0{e|Rs2pG<{nnoEU`Q8!5N9l%N#P+msiDQHAj2Yj)5rj|9E?Q>RsW%IMa2zSG&p2pE5 z!7+#bpkwk!0MiX(q#6AM4y(g$IxtG8z_}o=HA_!1;Ri%hkW+4Jh37XW)ac*~r=^N> z)|Wq~9e-31+uqP+glaVt#id5{?vF}jB|H`FpR*6mCqqln+;&1f=l!toQ1*v+_z?9q zvSt8A+WdXyo@inDX(y=MOG9jd+MNWJs?6af$Iqul3nTtSo%LA;5zHSIJa)IKqLX3_ zMAhK?1;#UXttSs;DHb?khVIseX6Z`c*e1+g!ec0pgER7d{_Z!a1#?dSDWQ-LAOa-* zM5EloAFyf|yXpYt`TOq7M}A8#imS_pxJ@^QP*yRYyB|^NQ!7ItTS0~~^+r@5FsKh> zN3Jj18gI6Le68YueEJEbK2c=xIpue`w_lU@1$}Q4*E(>36=S}vl1iLQe5QEot>|el zG9^<%`o(;ZY|kfj0t>ev+GE?HrMVRtkXu^{*qB~5`t-m6!U6Wj&bf(ZA&3SFNlaZD5QNk5;~YblSYzwte`U!C~o*OX-tD2XOuY zg+0yHWjfu==y~q!88isQc=5%yJTA8Ol^qZCwY}eY0ZP>ad{wN0#5QAdReDqo1mY4E z_}Q{yeXy4j8xk~3eOM=U$|(O&_&g58IYp1TeE6s{N8eq%_DUdS>elAT16QKmSlbQs z+n>_#L-*gG?b-LG{rGgw?ou>}g?<9Pvt3x2qA2ILIvsiE+m*<=b;lIKT<4wL&cm2Z zzjMD|e7R%O`{l2#ymNo>^LHLk7w+#U9QgA-^6u}85BGnZ%rd?gXL&?B`)h&}0i88< zON}Ui(P~IAD?iTa#a&c5_ag8tkD|yY7Wb~0;3~x;@}2NtOt@KvRxMO_iwHje@=#(R zGj#;l*^Kyg>Du&&onDyI1pWCF{(DbS?Tb+dw!8@gurx7|Ni3JvGJoHa10H*11K|E$ zCb-3PJ*b9SuXBZq2)IG49*U&#a=`b$QKZH0hjo|Qi(@0BSYU}h{c~f| zFUv7)!jX~yylEU=pI{bl94T8Dm;p80_>R6!a7iJVRe;kKhhaQ{g3pSzn&fg(@j-F^ z;`^C74_b1Nif}yis@bs=JsV~;=mbLTyEN=sgY2@b^!@*{= z?8O^9DX_p>VwG!PmG5KJIu(`Oh04sqa57uDNGXEjRnpsb(kqxxK2B`{>@-S0qe5lDMSP7ay1Q+MdW>lya632GhkaPZ9Xb znNTYLW(&Za^kMc`m_4P*iVgJ|f;v5e+7LN8y-7{B6qu6)EMN#4_zUWm)J#3t;xq)c zVK@0dgW55nWg!0P_?ii<_~eB|?I%a4=7iU2$Z?kODg~)#YJ-8ECy-mOa0XS^2G}x? zleGa`%vN0uRAFvg-Y@Y24`jEW-#9k)GW@dZWLu{Pvcw+sDF#`Ubd}lHYCTqRL!!yn zA32MK`8;bHooTYCwx4*`G*#U`Q{C=Bg3X4syCt>H9fS=hwNLPN*c8Fu(onZOQeN?1 ze_V_xV?#ZYI{wkKBw%l^{-bAYyoZ+6=1&CXIXHCg{t|1-=v=8jy2(bAgF3z(WWGDt z0Xf37rSN3VU41VWTx)BnOFHkm1BHkDu1u$mS`j5) zy?a6QTK8cFs=T&KCbo!F^dK^7#R&bpY z;X#F|AL=D1!(8dGW69U;i0$P7k}t2K@FaQ97C=gd#)4PK(7Ya{dZC|9-!0l=tqGUfb8Ky~txX?Nz4t2Ov zpdc#5mD;2x*KoJg0@{7x|#<=GH)cjzwSbUCMCnJ z@^#o1@u2tL4c_Ft?tQV{e)fht&^Ig5>`LWq?4|?gmMaK0bEt3THxx{PRxP1LKWQ*0KhsF5SR{qJOhQ+f{d{c5Dnt61%h+> zywRarlrba;s!1AyaXJNVT`%DSAvw{mY!K<*ZAuYSR8;$#AX-RLk25J;1D3kiT+X*`uI5QGYIqrzbH zDHAHloDSpUNOLl77d&sjzuM8`338zEH~=6dFg<^@|5`}W6?EZo`54P8-w!m27)_1F zkLXs2fMlR{_n^_`Qw0qS%ob}#n#_kULS~`G!v_8Nj!-t~Au?*HrMbc@_NbS&3TL#C z+f~+COmJ?0HK03*DS^c#!#EEPV-n1XcvF3D5<~&{TR~lGppSE*8|`C#zoD*F2q_E( zytwW{98||X1lNKprC?Qb zSIp}>S1hdp7ni2_(IW4d=18j{@AS*$K+Qw^j=#uppue#$bQ08%&y(X1YTGdzlQ-{W z!Q!A!QuCqXJ>heTj5CX1ZRy!_l_ds)_eJq2)ei$zpa;fVH+u5GrAQE*$wIRp>lr*O z`va<^w|qPC(1iFP=;HNXe^RV&*)Fe6OfHGGq4Oze3dIH3qKU7U$K-D91Z)+k- zhW&$%co`EwI8j6!Yar-u}sZZ9C(n22LN=f?Nv_}jKDRmRPGDM(+y&1~Jw1;c%UA~OwK zJb;GZeYq&U+UFwybK4p<2KsvVz?HMhCzy}T!g$O-Ck&(q!BgD44hlI%9>yH|MNAHhHJGJ~69 zw;`I_F_F^t$0``!*NrC39)pb>OXXK8Wr~qG)SUPnqy$^TYCcB%j$6B2qgEAlRt4|P z2V4DXloR~z!k&El2MQsA?)`?kQ~RB$FJW{jC)vc7-Lr!n%RB-7$Pc5?uXDHGaHDiw z$%j4S?G{c+akM~1L5%}qt@r81akO4oW`#{#u;5s!en7#**&YAz%O00S=LwI%I-2kY zvDU5Wn3ojOJ#8mN9BOYzBPK!+lA)t{m5L{74rBO=$X`T zB(pKC2wgN6Cb34enWUOu(1L$jp*DbL&xmM>9@{uUzwqnF`CnNZQpNTc$ka`^)y^yQ zg!I#k&YR-h`=~1R8#@*VN(4c%5P-b}Wr2#qAQ1K#ZxPg*0)he{0Xhs_y9S(tIj}(x z7U-=tZ1UzUmLyo{3rL&2#YF`f7r{;pK|@1R5CO+J675sQU|Dkr#+K<1S5qxDggD8e zAxbloUjXe@IB*CQ?s?|kXUx1EHlzw2xhX< z1&>acA5efx&wXYP;1xgg+*_r&d%=8T+})-*FNr_I9*6dqqC6gkr`rAaL>4|L=4pLq z>vJ)TuMpan2kp4iVomw^?Zq7vHq@03wFX+O0a&>Jtm9RaQxSAk8Ky}Z6#)J2+&>KI zK*7xE(D+|0b4q*U?d2vNL=3>KA%uuwam%{8RL+xa%>RV+AW{JC+0)4H1b7NCLML%Y z5$}&3;4YOAEb~UCu>Ty{DP_#Xpqt%e>G(W=yXbz{lfpn2Ror*$I03OAMY{CiA@`?w zcj6$TLJgh;0@hc-LOlWZa3mT%CvPC^M-t%#WnPo2OcH}{n`o5!A?evwI5 zoXx2!eX`{9-Og zo$J5jqX>NucaB}ZrMvTF^4y8Z_`j0(wtG55os@o^`SY_uti!+cb_xjM6fA|--l&B@ z_!p^s+t5F)7=ECPGR*05yk&^gSmcqmu37kE1T=i;B=cLfcU1W}IH`^MT80IhVC>YR zOeI30M4=7C#i+@9l+9UFY?^SiIX+L*p~>%iEm!wObN1(hGI{&eNTs#fkkt!>p2cV@ z)tftlYEj6pF&>4+nS5)7W`sCIMAmH(xA@S&n&-MW<_ zpVkh=xX1cbRj_*9n|W*e%d`?p%Jf)?naVV@+*19#@WN5N$@)A+rODsHUZKf> z*V)`|{tX~5)VJ$$BIfb#jcA8rIn3Z#u}Ou`oA>e}W*+(Ps+=UqZyS6ZESfGB(xZ$; zUp)hxih6j2W_g-zg(!%41P$j&dH93h5~3HM?|LPymB+t&^~!zPJL%iw_>*UU@BTPR zhlnOnQ~9*_sOds32|k&Ur}uoa2^9&xxrh4qeDgJy68sAGk9^4#I^e?f}{^?)l z=#m&v;d%OJK$Txb;;HJ;{-396VwMsEFDHNh8CaVpniN!@r~NCavD76gxcTDgU%{;n z6-gmiul4^5Y42G|3cY^w`>#;usAw{Wr_%l%*1O=69Nz!@^zZP2wTk43p%4APBW`_L zN{+nq`}^-m7DOzC=2b_|qKyr!%E42F;tf7HoXB2@n(5RY&k#EsbSwJ7r2?s#`$xns zZBCuc9^Scc=6WEs=aNIM`(LLoA3x`nVR7njPV}w+^Iu_AtKAkW!zKZG`^U^z$hq+_8h7p;nxt{kPYb^b^Qv(9vB{y+m z_p4g@3heXr7?X(U1XEp!_o=nX_11H0h6~$=3zFYBXyB{#h^uc3(7m_={B3f<)X(Sj zEo3*i63xxw!9`W2b+&K!xIURUlo|?h?YdM6p)NWc?H)i zbqkbP-+OZ?Jg1Iw=eoFg`!uZZ2N&gq1E=Yoy)D~fRwad!XgT7glJ(R2r}D$IB|UiS z_c}+#mQmHu^98uu5D@!CH5tuYCxOAl2ipi7%rBPAdls9k-fS#Uzenc16K@>*Hpak` z+47e+>(&89A3+z&h(J-+uo%Zy{pGJnGX_K0;*Kz(ri~x0A8B6NP7TA12`+_$gKHVW zDfed659%a2JR|uD!kZLLrn&9PCWMbXQp`{cJD7WF%(3~%da&K5ob463bE}bDi2eB_ zr$%|_HmiUT$4kGQT1}m=IyQwkcO*HtpK`wD$-f@rdi$3%Gt;^KC=UXJ`w0d4bIudt z{^qkWv(h!Rupx3L#eWJ;tjfRBzcuGH6K4VzrSqQ_g1Vubbe^h@dxmHKS3@P!=^Owsr=0QMtkg?+ zRhR#_tkeo-*R}TcmX_v*j_zw$dz-uZu6FnR$4c!Uy3>E-M*sh8rB1cvy||XXa5bC5 zrEd0Qam3VLHyH0b%OCW%ZL~Lk=xP5v(8Ku<_-(N7=k39teJu}1|5se<(Ekfu>iW~U zf4S7h|2tgj`v1tK?rpyN{q@tQFI)d|soU?r{`GBV_s`GoJ3oGX-}}vZmi^iN_WyIK zX?zE4Zw+j(O_d%x`QX;zw|8?5R#$9q5AA+f?g@SU;P%ZQU)ZDjc?az7-1J zqnV3Ivp*6!q1STdJlmqBvDs8VxGB@cL$P*0TYaR_WWCjLv#s#`&!8(wAPF172y^h2 zhNU?eCS8 zM|YMYZhRb5^s;m+!1+y{@7ou)K4Iv$xEkk(=#lf|zwwIrZl%q5`iR5fI?{s^Nx~Y%?BkJ_)#T1M@(i3fcdb01Q0cEa^xP))n7vve>qNo}d#C#BO@SOE z#ANhc@6*CbAPuy7A_eznt_&DSjvU-jf1-1PX(XgNKyS>G2yQICt<%|f%tigKCx6UC zPWnjltk*gIdEJZ34^uV;Tr;&;fUnggqZAtK~Z>~`+Q+g$umsFjY?_P}$!m2?jy+%#v*hkTVU{;)di!Hz

    *1k9pM5H?unNmG`GF^9|GQm`=v20ZyqMj_RX>WAs zSa+tDu&h`x0_S!QtzTlZl5dbVBHps4rU~x~Fj8JchPO;84TSTG=uNIuwPA<$=6NX_ z4I)>m8G;@-URS{V>8d%}QOf?D8$j@0EQWt}k5r)ani#X|@~pUElE7lN>KxmmtQ8^# zU92}-Ub&t60?&^So;@Pd(#ILg!37k7pr=4EaT2={typ)@8Pt6+lrm~+YC$gdXVyaP1I+GpJxpjs<(GW4JW@aWAYadi2Mw^0 zRkcugWF1JrZDgfKdTrh|SvvOW`c9dAqk_8HBPYn5xCsDI(QuVP6`z5=ca%ZT>CCnb@ zua&JjLm$sS<#)2a`3ASTc#SwwJ?-qFd@Ucr^KJ`9q^!FKjqAVybK-siROsk4HLgI-eaY>7 z1+@PLnWr%0;Kb!Es3q30H|eLTIM#eFlet!2es5fd$>2kD`=xEOU_U&3j-F4eecFw)|zQ}~9I}|2| zEMFtFKJixhC3Yobz(h5o_%`DfK@9W&27X$c_hofewDH8$x%+t(MuHLlM>==<0xOpy z^G13|u`@L?>QzGPBgvhYA)Y*30$)ydj&l!B9O4A*cv6_1nnSdLlWMM4-6GD|B#(hA$mac;!AOrUdTY^`LK=hky&bU@Xmf^n818cX-?mmc% zpOk-VqnoqK{DjSFz6u&Y8}-W(K4mxe@}#!~!NtXX^yNZ^YP5Z(y0wnXN5wSN2z$8b zwH4`Cijcy{I=F_E2NqVasTj}cnJvHa~D*RI~Ul9WX{$Kh<(&o4(}#q)36YLYIrzNjnn z=9OwY&qzV)2%$Cv`~5|yO3{Gb&0#K{BjhF09)lUpfBD`{OU85FSGU!_jBBR}!@i5A|&skP=nPW z$=BJ-Q%ZjMJZ1@WZ~@IjT&k|;sASKHrQEpzUKw2H>G*BZNeI~@n4ZW*CM;9of8T(I znF0Co2^866l$;C5EqR-kguHr$_U+wzuQx~H1e2S4kaKBo*3 zFgF(J3Rs(A!MD<_)nW|s4cwDkdIc;E>$NBl6C}Wlf)$;)WWsY_7X5`1qo@FG9}h63 zLbZGO}78W%icAag4sz~M;KGz>_(O#A+ z7Ua99ogA=bcR@Ivw45{U0`F)6sG6tA<~b?csgPmb>p{TLDmss7x|~~Hr+j?FSS|!j z=kChQsPt*;p(C>Gx}01gE4-RPr+eHIjv#U$mFin&$0t|PMXN+MnaF4F+`~?Bn#>?) z6hWF)C=&()C~k|s_C@8U$|C6g^0O+AQJOGV6&4C2L23ey=#@Kjbh6nZBWsPbFe1br z3!W-*6VZV>(~l$(op&9NWIDnOX`r?^$1*B{6(?3E97%QZhpg}$XP*qF(BEeQAL;yq zTfCpva|AHnhHm?en!Oyc0oSsA_z*h}Hq3iJ8}JMOE@Vcw6U&Se)1Y;z((k-4H2fxA zfU+HqM_P6xD`%ygaVUX9+xn2E>0HARF(R%Ag#ADe&_PP`F-f)0@#@S1-qT7Tjr=@d#$A$+#8~Yn{xfpqoyRW*FFQ+4fMG1Lj)+VDFv_)o_^ufmqNmV+f~N z^<{$gP){+^M=#)8Wx5h1&_Bx%uojImCueCTGLe&Ox*`Eb%$OkfmeXY|&2l#4*ALN; z6TF?wiybt+c2FJ+?b1}0@U+S`dosSW(PW^AFT$b}Ht(z+44|q2)<9B!CcJ}Af3Tke zR4j*=SMafO!lu5TdrkMspCl_FOxpJNm&Gr$bB@-HMci>aF`SS(WELMfp1WTOP-B4^ zMIazJt({r(B`54wWde)Gw9Tj-JS1}ZX*ID2%Ett~bAt&|LGsS9Ngb#L5!&BiXiwzj zwDW#q@y)mEym!j>?-RCySIm zbkA)VXkN64H_NdLO#}=Efxneb&L;pLpPsC3DSz({6rZl0GmqH+Ii2RwmTbJ72lL72 z*|ywwPrVnE_V*4T?GD&ZB?eJU%%?K}E` z&9oz-nS~dd4nvFTKN!WSy*sYys=XG^Ip0L;hh8+CD_Yfr3J@Uz%oO6c6f`6Dlqd|% z$~vb6ZjRF;PKu4l(bKAdbAdGbH<3ZK8m|j2&#URrb854en)7sO;9bd}0TRN8FY7%c z-ZHsup!BG?m#etT8+`^Z+I3;TJ1hghPj}qYZVBBjCj#nlF z<=9}6S6N!l6bLsB`uq_zTd^uo6#Qckv>vVIt-J5gc@VAcq-#lqd42bHp+Z1Jh%Ert zA3Gz26W~tMvPjD&)}Eqp`VmxW%KUKR$J!p|HEW@{B5+35qfjCnWbo@0KfO+!6yP`` zN(%-O%RBq4j((((d_Cb$3CEzi@f?e|pPBYiA6N*^J?5S&qaFI{SLvhRD=;wgLv#85 zduAeUtIJgvSh8?FIcTBJe{*ZjfZ=1z&%V)wG>UPT1IsixVwMt0=gVyldR?Udo(li8Y17@c}Qwg?5^CH@#r6X)cD>*9tfX zR&8v*5tev9#Tovv$0aA9^U|Pi=%@{P6D3GdS*SiG8l$VD5Z9TAivJr^t(O@CmB9kOoVzB;X2u|A@$;5lf7Fgqw_SOI*Y@o^8iRA<7gn}h$v zoZOceD~uCr<%5$BSJQLIc{c+^(EXTYRo!+KvsN5ZRyg}qeqX1N8hL!4)$}& z<&)$K?!?kdGf7pmBDD}btAh`nDAqGVFqOQ=MzVdwKT!#%wbB~R$?-6CgQ%M(JZ)Qg zX@uj?ByEf=8xQlLzY=+0ztgl`fT~G|AIP-)!a@b+@%NinI_ChPG?x?M=q=hZPwTQ~ zrc02COU|cdq2XoJHmc0VukuVz^t#9!BCn=|#-5Ol0}~p>k9ImVrXm#WY7}Sv5s3f( zbO2(`Ni=izd?w&y%>H;*r^)o@Gt5LH5N^>A6klJfgYVoF*#JFviBq+=LeGnlIPLU; z`#6Eon=cds)h})JLCCyxjGlo8?Aa%=f;`eWg*PI7~BlItu_k0Nzws1dK_`2>lj$3m|vS>kge!T{_`eAVgwh zBFg@H_UkXVKl~3}oF8&M5VL;chigXB&yQdjuU;32N%FUNp|#6uJ*ygY?B>)iQthZl z4;-a+x(E%Y3EFQ3br$QNpQ_RjUqcixANkR6e9Y*0p}pTrq4kGv7tlT~Yq*wr1<`!E zW~64oH(@v1QP0Y`X95gV!t@dcP5nynY`#*mC z7ehcqhN7Z^f+KN*d!^t+bD(LVrGZ*mTDdZtH%rBlp;=m)p_!vBwam=M4X(`0v|+Yg zm2E3CD{t;E_viOL-*bNd1LwRs*LCsc^?tpckF3Mt`!u!H4r4V{V(MWz9q|A<5dHqs zL0XxPS}5aP2>02)1Wc*jyYu01&)l@N3a~X1(N@VEtBl?~^WDd0tAAOKyU$kdylLB5 zYNMC+p{Nh)d=pB|@_1c_-YxsTTFMG&eo7jka z+Prp#YaaK@(#U6g>z98IAVC|H2!n`ElVj-H0IK-Ge`4xg@qc3K3o>?q_I*)|Iw(c* zz$otxE52}1KT)_(-2aKGli%53jJG|?`aU{LC8qLcbZfua+%r*STFrNSQ*brGl<%?j z*#61y#SW;DFYH_Vw>nIX#pFdfHp?4>iZK2OZbz>^^i7FFnWt+huwdUvu=jSYX)Y-D ze`xAKu9kW_*p~@X=WFTEfIJ_{GyoimP$gsnX5|0S)SXlvrPg4S6C23Kd?dyhpz;lPNIJ-D z?APbRN>mhj#8~SK0PNj>KY2(A;36g&DlQcn%9xt;&DSJ(iwTj_{zW>r34K+OGl8X~ zl?R~uQM0Ua3dxLo8^LHRx1CXkOg1Gu06W*e>^rvlZFtSf{Yt1obV^|E*Eaym{Raj9!l!Ivo_5XWqnEF{iU(fR+l)FW|r z$%Vi-=p9pRVlt_|!1jM!>YIWA0Ix*4PlBh;m|DtatK)vCxYYI{nEGKX&leEq*}(bY z)R#9@Tx!{kCNX;CyFTxMW)i)4x1CB zwY#c|rM3TP&9?br?xYp~zg>nj+j>7%g~(8rR#r9b-r{!41=R73(+29@^Z~HF+bUU( zR6Sqn{xoGKpC)M$sif2`8RVgIkIWx4-t=SKq?@1oIDt;9)$kuI+}vgFQ@;^Kzs`Ue(FX{o*!&b6 zq;y+Qomy$xILN;^S}25LaX=XYhw=JPO3j^|Ezm*gNVIiLf`w{A+xN+N#%5DYHI|wd zr`$N2-*9fHoRQp?+=rd2%C}2k!b#Dz*oi~os-0nU5K4N@JC8c`Vqtn~!_hxQ{H)!LZ8D>UzMHE8- zEkK37{?(rg{d4Wb_kl>8S1V2bBnmRUG_Y@7N8w=fz~?v0iF?VyWU&HZR{+5mx}>S4m? z01~*&f6DHDfu_j}CQjPx1_is=mQYrMnU#9aWfFI}A|!EMMq$qx2h^{^ZZ(-P9+m{A zDUxtWz_j5Ey<|ro58b>K;V?_~^P|!B3;Y9(J7{oc69}4)5UGYHq7kgPXwVHvU?WKWC7~379)!Hm2Yc9J3sTEPU{@sv8W#RT8*?~P?-3_d+RhJ%$*rx?v8GL)KZJV3 za#W+QPOEL&DMP*itnCbjkj{b9X018{_Q^2KRU$;7v}7kWyH+Een%=)#fdOn9{Mw)spY%>D0~HXRKe$sJCk9&?48*&hv?rEqfoR zeaSAayp;Qrdti~W?n)mYvDd>)Cy^CyJfT!%UeisEaRY{ z`7nPzFKp$@ar&EXbS5_)x)pknQW}X)&TXUYZF3;{$R=~4GYM(yf>iTesNMY{qA9{?g!gB;p`x#>2U)_vZmgsA1iW0~`9MD*wUYH3zgUXZ~r;v<!(|@ zyDEQ6Kdd*L$MlsJSnudl*I}fP|C9!4Vc((-x69vIyG_(x9-S-0_JE{ZnA58GcRO=V z?tPvLGb75iI)Pr$F2axTM+sqVe$&u*c2eg-6@e=KAkm;aV?Y&G@}x=VOkm&SPhcU> zrtTEvUA|s@5z4;m7b(eYx0eG4n|LdX`~mUIkFY%X{D&g+=L>52-4c}PEu|lddSG37 zs_LQ57ralGU1roPAf!Pzqql6(bm7BBjPaZ0nGc+ZAb<2leO>0fEOUA_L4BB{+x5e>| zznUP(L6=AMZiU3Wna(ZnKzhkfKxC)S3+GHk2-DtRyA-y-j1Su^VW<-kD*p7EcA-ZT z5WMbMh+j?iH*wS?6wWrgz*d7#LO0FHbt+cq?BoBSNizs~Y3`?~>kz42@`+V?F5eOV z)a50m=(&QR_8jqkkM<=$F&%&kru9-s|EHX~sg%>&?R=%`mKD2!^27BdIb5+%mtH{E zQH}dTZ=lD06ig@N=g`Es2}oLdWhp4%=mgm{-HJ=ir=?gg1^2>>^%4)mjkq9VDKuCD z-GJ=YQF&S%kgmqxHG2>#ZpR9kgxq4d@hE752MREi1WY&eNvCH2awM1&A|3}S z3pvHEHuGaZ1Lh5|p0JCz&-pz0pLNO>AA2ua^h9iypJ%nTXSG%J7ea#>mj#-3T&o}w7R-MB zC_&hI=Yd)Cg3uK&*^UsqGsLrwjvj|8PFawDG(_TW!sZ#OJ*$ZT7R_=B9EV)`)Aa1S zS_VBON?v-u!ShfW*~*?8`g;uA++upS@9?p2LXU4?Lodn2t#}ga!U3G!Q|${++Jkh` zD_#`AcOx$7F!YV)t=tKsV{V{%1#3RTV4i+?=Y7_vmKBY$jiP-G>^3pnt<|#@Xt@^t zX?R5N(c^2J1stnS!XfRFPUMgax~`wdbn`Aurxv8ZGnVcZ%&jcqLui~zqCSG{E@8h% zY-u|A_ZWEYS9JzqG(a0P7YomDyC;yWGR75rx0d9zjTX|hj~?#4rq2GGGKSuXP&;3Z z&_V3xj~ak33c>0c?1=ZsybovjZ;!8tKF3G0n8y90ulo0*djS@8P?VNWB;d}Il^|kD z{tr}MZrqC250R)YzG;7y7vf$7HIJwg-T30DHdbeniUdXe&VF!^WB0IHMVtDR%hVoP zt0(6MrammnaT`dK4QU+FKwGQ(4+2ixqB^bBh+Mv^dp)gNKU~$f&H`8|Fro@Ll><5@ zD}uxLP`1Hp!+Xe-!E@|HYHiU(nTFb=__O}wT^pmmCgyui9Bd&Ah)TrT_f~nHxsN^PJXk6O zwF|O|6QkDjP=ZnuhG~$0tYyjhm|$Nz)$*~4DA{@(`k!c;zww27GQ?!1S*-5w>lwMU z<+*&#`ayB_S`>?KH|L?@CO|elT($EdbaJh#poD6A5~6wU-Fq>T?4^H||D&Seue-5C zj#2b#plT(@YcyI-B4AI(64`v@XpGs(3v4-H~Xyf*+)HQg01h#PN!U{x`&d6L>>&E1W<%6pnmhx$RIoM(?Vz8HUv@SI#*ud|v+ z;CsrW!wGAld@+^uXwN6uk@SC`;?{JjWnK5@{FL|OAV=2fBF35>JUMe?F>eKbrCSw((+&mea=lxV8zV;UJLC48RV~p%a#0J{SZ{nWKJuU|NxpHi$$YOzf)_#AI+bZ^%XkSH%obbDA7nrLOd7PCDmep|jB74QxR zklr&|^Xr{nByROhc%(TKls}6O&JQ~Bf^N%qG@nqJwVQ;oull1q?#zabzY3EkgbOoDZpkweha>L^!036$t-Who8Q1%Zt04I z6?{8_F_Hir`w<6|cVm@eWL0A8qZbdhHD=shtGcjFQebam9(oc0A~(N=#K-1Z5XpS% zXJMvp4VJ*y%55YN_@>H$O>E8s7uz=n7B`o(HngOpedW_1poy$G@G9U`O5}02Tx2&V z&hH1U*fv3w#}{_8?lVlpl?L@`oWfN{87z$U$mY7-mE=2Eu2vtt`1FN!zK zKl-b)Xge*x;*)wRyGtdS_L!=L&x;Xx`2iJUs-fD|h?@fH`*4-6$}dP_t?NoDSUB;F ze?gKA#C~z()T`xnK#Vu{j9j==)7BX`@BVg1Gx_#lQ5{u4OyL=AiwK^n+10b+t9rz> z6@xSJ50F%BvTT6CHVu#sq}U>Uev%}yUzU;!=v{3`ybyf@*EJ`v6;SiVCn5!7ApE4j z^1WV#;Z2;6P+*9N=R?c3j?nGxwg z=?IdS&JSk~UfBN#_p&?SXv3zn{Euy)-cLZX+F_oB#$#U}@K{CL3Hdx~+@If^46L%? z(Ejw-V5kH28;?D(YofW1h{&YEOgF&eWO>`(!Qwd3JncvaHZ+MN=Cys=E^Py1pzx6V zXa+Qp9q9f6#$`YUW6}c21vlC++5IvIB#hqaR0IrvAvIO&(9{IfbEl6Yp08SXu;!Te zH3WHq+<&w)QJVt`o?l=73K8UQ z*$^Hz2W>5fPfS7Y=}K14wUtQ=G}`#8qu>1v+qHha&*`qgeM~>kQo}1zZ*gx&QlqY| zf-oBufCl**7$o+Vz|We&vvH(>2g08?@v-vN0=Jyz>Z>^UxfMG~tFuE6#toPJV>YNQ z`AY-BB51^$=q4}g9Uy}OkL7K0RX`bhFy;v;z@_jRnF&P3mS?j z_R=Nqd7&iXT0Bf_MAJT^U_%in7+ zg|xilF9hdlaw+;DrQ5N4TONbqr`%=2Tw8L1ae9v5+C56%l%Ra<<3C5Cx>RES)W;#k z6%rfNVjyw1`Ermeh64O2j9sfPN8$8hq7DzXt>)jPeLjWAzhvkvM(x;t@8_yi=Bjj6 zI}@P6`ur;0ILR#_j<4pmQnH;IoRs@_(K<;oBmWEh-{uuI4&;lO*fkDPNSY;Fsu%eY z1Mx$!%Wy7nRAg%IUjqKh2wdl53&id9&|CnK{9?LaJLtkmH~V}<&Ucf7_16R9$W~fR z>jY0fBi(@yr~jvu>f&rH1H*4ujAhTe1&uyHR(_Q=ryu>cVx-zB_)JOP>0}EXhb%4I zU*Tg2Tlq^J>$Tw;vK+!|zCbS`v`R4DM?I*lY)SRv#=#QP;0RQBeFDEf^>?)NVNO;b zi`2=}RYdPE(rV$@BwSr-YPT5H-7(RX`vukM_eMY?&XUrH>4jMGg~W-zk(<(Ysok|I)zK*T)^C+vLz zEka(#sl?K&?yFY|9GkfzA>|**7$#HGoNVjc;9rc$T&CYLPEY$!EG4dvqML9QA08NJ ziq$enLRPi);IvZai5^ z^yNyj89A?aD*Lmb?)>Gu&e|b3osJBftI(T%qlTF5NTlQAZO_(sJ|5bb(Eg(Rgi{Px-w)?Ob@ggC$n7J?(AeO9=LP{^Y_^r1LpBYYPzY)IbnwPim}`FoMHdDuUYfbx+@kCYdQ#^@QA}FD@G9c0h2H%%bgqll7S@UvBT*B;Bko?{BX> zvsvm?@eog!F7-PY5!I*TN;s5}!Yht1DWE_L0y!p zWoAZ+T%x;3J|f>|X0~9x?$SuD^%hisk*Zkj(a?J#f?)z9%n9j(jiJexYr*;xulLU--PjVhV_gqK z&FtH1;o9kXB6?oWwbF9r>p=C4GZYVUjHZvs;8-!V)cCy+?#e zKtLE4LD<}`!>DvsB^3<9xLrT2`(eI^Fu*DI;WPI8+@C-MP6{gjVM0%zoq#XSo8tD7 zMRD^C0#Y50`?wk!hj?e~J=&1FOi6NkUjhG}i`JQsR13k1l*=%acHrr4-b-$!Ly7W@IC838+zf%>UvFU|Kw>)_Dp{=!v2`R;) zSEP{`yk&$bRuO-BGUj^c!|<=zJ{Ze<+=q6MIzQ113*5M-lh_Mczths6=qjYo?u2OQ z=f_%}sL*V(x2R4zi{y|`JUB0J^mznI-4ykZv|OQ9=5`XjMlK?Wk7}E%ixbq2qW+Z> z5O;RPqcXXLAC7?cj9P!*FMC_V%BDbcR6D?dyt0ha2e@`l0D>kJ*fE6KKY5k&J=O_7 zIVSJ{l)!j+%JAIMs^*9LcgTy%0=Zmgi~Wapu!-#wOxWO^&-z(Dv zGZ`iSHcST$j(&k1tbL`355%e4!DxJXiRS7m-F%L}1Fggh_27!FoSn+S)*_M#5CUO{ zwz<1@3aV|rx~8k$f;FX1a}LugALIp3N-bY8Ah$d&Jbh=PC_HP_d?{edEt(T?W8tr# zy>i5{!fp^Iqbgn2OoP%|KqY^7_zgwov@2b_UROjP!^v4FZs`;+fK_6D;BS!!#OcQmdgt(z0&$+IeTg{po2-<$MY*)2biodVcNP50SW>#tV(u8)*-XZCs zN2MSvckFLmfk_V+ym=DIy9iRcQ($9-tE@-y>mo@_EWn79A1D(}p`_fA@nSod7! z3&07L{VC_>;Yy^%u6JMnSj-iAlg}Y;`A}cFcOFCoYVvXhs*&{Y(g9Q;YNbwzEWoM< z{kG-sjgOn|smpLYNrY;HQ?^j;+7$YPS%%OK*L8=Odzy9shpkV+3h79;^m1 zx=;Ld9cgrp*1n9VMnAtCMXCeR zJ$K4e_sB3;Eg`$SA_;FEGSR`M=*1IWWBDSS`_h+ zjsM2jG8S@npZvj9is`qY4*@kXP8){GYTxMed~!~#B$q6y5YrOo{@u^UCwY->fy;dZ z7($GI0nz@%(Ei~cRxQFL-;K#dtZ=9^_t@|T#6uidhsjL%tG*WWA7bjKB=^#y!_*y-su(6&5RwDagIFQQsT;S@iTkdcAPi zDgSXsi07c5stjz)jrDTyb=a4;QOtQIULU?7(4#(Q$LI)_Lw>5*9(RaFv?eVr#C8wG z@J$Ws<~r9Z;#U{y?1b#&7UF-IyOl+@IY_9*J>ef5LH62+gA zq0LBLqZwtY)8an%t+5Kh=>3zkWD&3)A=@gdpMTwds75 zfMT|)Leb^-P#V#bVvF^fSL?$=SR!ODm}yhCZRPE?-pS}Sul`XpMhFc(u>613l&91H zK43uhs()&N>eWN|st<^qbw$TDMKrQ$xGEfu+~biga@!*?X@OJ(zd3BvqZkHPFrnmX zRUbx~hn2rahlp%bxTC@^=q}5Bw8tz~WYDf28x1w-=#|)*uSq#ZFDcsCp-DuEXU(b( zK7Y81b(iC-!(@lWy_d&niHW0i>(Da5S&hl*r6*&VZ5(>_9Vk>zqxRk;^wOIiIZm&A z^6L&`U%i{Vj9`$LSP%Ws-aq{#VfaVF&oQvKLdq)>9r$eFF54U;XD(x*WSM_(_Tc2O zuY^5Hz~TaJe3ko#L1)1FOPhlwLv<%3IWaJ%P0u=Li05J~OgkuN}>tv?!1r}>(8iQUOPCbXVZGe1+TFWAjDj~V{OQe;ByiRu&4 zsJhtFEpk(!YkpvD!J60r7-88Ghg3>XZv$E=dSD6XW)#?P4=K1bd?)5_Rk5;W~&Ob`9Z4G3Ohk)9AnfUbc_*`2&OQ6m6rBo*! z>1qD)Yrx$`WI8J#J(p`%C;|1f>gP|Hq&XFB>gV`UYKTCR}mBMW8h&u$fKplqx(mA)Y1wIRw;l4I&-E*v2A5M~tAV zsB6jGly-l@TLlfV7B%CudwN_fAfiIt%>oLhj>H!7w{(UP{E-P~G;QbKq0aR5(@?fD zJh3At@XYc4stUwtK*;dwkYzdL_e1RrY3(<)u!Rbwtj_Aa9Nh+dpovGkKmAb`8FA15 zlX!kS?)QyV*9l z?unY8k@Dk?@lD&5FTcRQTWK%tKz!e-%~B*}8WkQ9^^SmP$shOt52+ zO>H^ImB`Ij5{9ii#3J3-qMj}-?B^csYiRyzu*>&mWSm*wS^k@&%fb!xv>x?h8hOJf zW7F+Gf-gyWxdEO;+vUIrptOi8x^$mO1^@6A2>%bVfVDYwEq`& z(enHIBcH!6GYhvU2aG1)%Cmc6*=MYFz4G*eXjgx;^t?bey}#lH5~-|Gv*8|J(k^+t2oyR#$Am z&7z=J4MWnfTK-Gg?tQb<=6=S5^X{oeDt6Yr_!9Zm4tzA2(vsD6{*-z)(}t7fZ06|f zcFE7j$=~&o3oFwlMP#h1&9l^)1%?ixpOlN2e2Uvl(|O*N9l@dvG!B?m;KcfK(IhRq z?PRh0uq{2U*U>fG|7}P3+Ft)1em2x^$j$LOkv$;=PGrxXcQqYR_Lt|!eE)g>#SWhO z2$pL3^P7ETybV`Q;W#>eS@50dSo?j!*=E6csl(;ONlHu)^+Rj)U$mGF%5Ce4)A4i6 zD96k3uYSC|A#$9}d=$vk!T-Q>6!`hp@!vaj<}WH;Kvos;Mpb;5XeZ8xY=>2uoQUrn zpYNPk0IrEb!#XRGi_v~e6?w|@wEGv9Lv~d-b~q+n=}i4XwEO9-J;BFv1plmIYNG_2 z1O-p;oHj~5dGKI=WDJ=)T3c!UD18eN;{eB`3QKjZ@_ zxGHz7-9P~Wu=UXz z0T#hWp#-SGkpn>j9Jx!lV^`}MUsFbxTOkA%-JAa*+os7kd6%;@H`COw!==Ud*k8Ho zRi}nj6e@P-#F|gsAiicOsLer^pwvGSURcpNG?n>vjIEAXa zecRjC4R3$xaG5J67)22ZJW){LDL7@Cqs{4IoO`ZG2IsBHacL5Q&G;$_m1FunS(P%w z%F5r|Yh}jJtJssZ7DBH8ysyMl|81XHqfm0!2~hsPijqYfGi(jw6 zxhZ%bzToEsiEFM69@=;3(`8xKAJi}d1O-a;7FFAC(8SdSKR(v!4~XN14+UkMt0uMh zX0CFHIK@{}1==rNcUt8@m+f`|mb0~&t=9Kiq4#iw*yGr8;V=6m9%Dh3YO zf4hPTLUA%2@I^-`pwiHc#D0Kpx|VhqVE9=^QX>N|e|JBc7JF&)?M}y8bDg-0$thP~ z6rx7-GBdwwg~EcM4yQhK3|kH+Lwu!*wmD0IbJu~+xml$oI=X)^O$2^?~|nV{FtPiJ}-V+$fjU4;^6+tJuC|& z?5%F8EGOEjsaG6o7F4XJE^O=<>f&lo2&)AHhjp-7=MMKGrp1G~xl6T!Ix@>aEbjAG z_K;<>SF!F&l-XTNz@%;Z9Ar)L4sS>a`z-aT0eh$j-m>+sYrt8v)C!jp>wDLwS`&5k zc33&!sbbhRo8m50C{qEo9 zb!~MpuxG$1&cNc+Fu`GwBgW7ttIo)LCJ|75=D@_vg-L+U zRGs(KJ?9X8lDesprv&`hZTnY}TAMUUhLk2vP0(vH!8pGXpA9*sJ9xmA|Jl~`A& zn-o0(QYKxjvy0EWhDpbo--?f3<(-g}LarHTXi=YieBS|zj`X!>lj}jt?1keoTGGPn z-#WjZmY%0LiOWyF&Jyw=-N&tjb|<+)=d-Gyq$1E}d9tO2vPqJtow_t~&XC!IFvW7Q zV~Kuo4L`u>xHq&FMoc;D3?ZN=C6(aa!+;v4P7)wxhMINpQl8tPsM=_Gx*y8*Ie{>e z*G=~?S&3&tas0;u5$FDmHkX>{kjkXk)XiUgrLjkU^t2}SV3TlhOE0#N{NA zDJZb}{8|nE?I~+iU}lGxe;R=8nCnG&>2~ArY!nZ`ZE{JLR;XFppKJ^DlGTEv9)`lJ zX%a&CQFmOa)Mh?+V1Eb%AaH9_c)yFF`TRmF&J@x&9lVVwYdCv3U%in$#T;e|J;su~ zp1Apg8Er`O0igzjirzVLSTix@twBMG^0tf7BQ==QQ24zkeHe@o6(%pRWOKk#^$th$ z{Q>PY@*bTMu2{)9V<~l%KHxZN((29_Cd{w#(v36`0=*h{Ob6*5jN+Zr(?kiOH-1nB zHugi%`IGAUSf(^Hm$|b?aEGb;b#R@cmOKRsXFA{|LXHS^&mmurc4ei*0u)>4$5Lr6 z8UeX!d*dYciLBt#YWHUQubcwiY~4eNC6lpgmih1w3nqD6zP1fpi|Np%$d#QI!QS~1 z775`Q;eHvBDc}|2^yAh}3MBWUFa81mLLd|{7edd}g*I_hJOx?BeW3O77c^SpXyhu% zlQmIX3ac9n4@|(6tc%Pm8rQdJgd_)toXybk>FdEDCAS~<=$08G?5i2PkjTl~VQ z&Z6i)Wt*KoCf#_KiH=uWVVa|5+XgEj|AqBGSmB^Zu6J{cVo$aK2zx5&wHR=g9J88Kj3IJ_1WrA7crZ)YrusXFOym-39G(`BMEf8Eg5Y6ka^AyG5p6GVm6iZvp9@1fU(AlGi5W)8OA zkSEI{I%NgNV3SEau8yx=NsIe%EhbAM^PSA_08D*RdPu1Lphq#CXr}5${94TFSQGUP ziuJTZ*!t6gxe^8uI>up!5GTq1a)JgP3Ky}fqWYe`CpU0F8JMWjTXwTVACBTKy4*}y z-?9$4lOwANBlzROb%*72{ohp6n^^EJX`@$vdG$rHh#P#4*pR^!# z#F^2;lq8+@roo`ZvANXYyO?vbz zJgWv&zuADOxGQi0pQxp&t-S7h7s(tT z4X@mZhdwK_6`A#7xs$HsKnrjn>hv=K1@3@`H6~||#O)mwnlK9F-sE1aDZe)!N)d+Y zLTUPTQDz5Z;GwD>@;qxp!~waG#4^z;V}U&ehLu7V;oWiKxbXH~Qufrc&FVEWith1D zwbPYDb78LC4_at`_B_Sh^77CQ@QSGi8!KUVFiv&%(xav9+@0 zkgvNgk#9?EBFf21(mWULIp2IITP9K(N9zt=H(LJ36;cvby;%Q~=lcMl0ctd$ux`uz zjJgx=j(iSAEFuu|K3Pp~9~P}J8x1!Zg8CMVA5QNWVgVZIfCU_ShedT!Je@z(bRAF( zPLqZx5cRXf?a8F*Q42z0%bq&Y#Std;i~XQNO-F^GPJScwaNK5yRv-N3@>}8Q>SNRT z!bE88e0?1axi_UFM1~#z46+n{>}b=_F^8}MJGyx#qp8RTsYpyR@&x11(R7mU?VVLL z^pzFA)1;KMXP8B`RkP`s{XOb;=P@Aq4R$TM_uVM*tOkybduFHWz|bE>QKwC@<{8je zU!VmPRAvEcAKUJ!QpLHo`@O8y{*|siB{-Yo3t9;=3Qfny;wVEr{0h469V&9g)FGB8 zyc;mHqTzPqK7NCt#y*mqrqs>9dM8FAU3l;>t;DJf?J-~OLvw@MjQC0U-*I0J+UbUd zeMD5P*3u4Y4Fg#-sXjkrZGuA%&{qSuh^H9p$lAk!=g=WCTpP7k>#UyfywMekE^882 zT_)ZHN*SfAUSlQ&&7P}_Wa13lTXi;h7P}vk};+$#fe&1Fsz}Yb~ z#6tBdz5XsJ3J06K84^s0Uts)S z0d>T|_VGpA2#}5q1vBbk=Q?HMO0oOFw>w0;YfuYaKOJe)WAs<1N%x46`Lt)Wg8Ch} zvnPyrUx8OGmdaGAr${Qr(RMY`im6Wh?qG4rp>a}66|d-j*vUenweP)g|3&Bc@6H>t zNF}v|;XGoAf-ro>aJyqjH%PyP=i4&nl3^#Wpy+RRxa%?nW69Ar2Q4mvY7vKNH%8pg z3QqqX!*RYhia|J(0(Vb4*aoHXpvFl7&3L7Zi(s4*oDKSg{=F+eeKE7V=S zI{N&`^7;ON*m(g;n8!*e?$sg-%>|F)(>SAAjSv9-UXFV$wYeN8EO+Y3>L)pB?m{g9`YycC5Owuu2haBLV?h zSNI{DlT!$mz`u_rzt=@R6h~ojN35!dro14~VD| zQrRvWgI!XTd7Yd0=kR)`^q{p+39xD7Wg_}bAL}#5p8@UTKi+B?+fYyRKIB+Eesmg& z&1@x=P7yk7~oVH%6xZEI}0kPT)aQ z4r)t)h6@+sOQlpqXgtm7c-RB6DYPl#LF9MI1Q4`t%dSuNjycJ;{}{LUD1&Yl@YE{} zkyp|ME*U4+W~fo1(x^4|rA=P-q)z!d6^82Kzioy`+;r3t%+yGyeb|!?n^9;x2P^ee zAS$DN&$TJzS4ThL|`vZmLFN2*|4g$N?*QOtF&C~q3JHAST zv1W>?;j#&GeQ`Y!tnb2qF|ELMGEiO&=*J?;@6GF_1oW)j;28zw&(Oe99VJ_~((}pq zsX~1a)?W@e3ZM4pL+&@)zEtGLtkn>^Qt;L&IUmxp12VMjuRMa-jbDjKBzj*V511R- zppEi};W`vaW2eEr#KEEnjr^{hS5w^QeBB;yY49e}Z*H-ppXvx?d(wSgWe-;a;b!3x zq~XTZ7~xPBCkwA|HT>n8=tP3cr1$w;$kv6N{kdWt6^qKn%4_xZ)NZb<%!5D9%j?`N z8c$Y$P%kgU#M?QxpZX0?cyA_u5Tr{ zi-jUx7r7FsecmZ6aNmU(b3fkr%VvO5?oK`hpW7}F*jxQZWAb&&DLVe-CHd>R;A9Ov zXn(vJ%1g$D@*s&@DEYjK536zAQN?4G1NNgS;bpHZ_(QiK*G{@ua%7TOdl-k;`z}b1ppfw%;sw_r>4q zyY^5@c>fWpZa`v(JjV-2>|r?0l24wmf{5Wce2#`xt(h<#Y~qjh=&}%=HgsX+>I)z( zVKRFRVbEQ_l(*Ik$Qlyt%`&;5Z*t)drFHi-!Hc}_wOnxVV=1Pt(0FgtOf59+>h7xw zp1#W!MQvqm+(yyjj^LcUUJAl2FGeIK_kk{}3v2rU|Lc#3xPqIw<&FI^!! zJ@X$7nd*27NsR5*F}XU!Q(nDgNO3w**7r=LGu81bTEoV&P1Bcexnr=CEDPXjWbSD1 zWmtS$vMJ?3Qu+)Gj*|&YDB48FI~KBLSMh#MF=z{9)P=IxUAk9w7cik)f0N^$>DYHC zZ(4Ws6CbVY%v-fab!vuL0#3wMWAKaV#fy~+USiVvX0^4=3JNrETg*9Eqx-6_K+R|* zwrK{0bE+(Fc&2kD0NdG*b)k13w3M6ty!B?!-Urt({<#4?^S2)JAQzDo*mXbg^BYqF zNc7tt(lavQcwi|{!wn#mHV+lBHIQ}Z&?+Le_Jrvsi1+99&$${{8=X!WI>ue+DTQ>u zme@k^opk>#R35T8Ky2-OzSG{-A0=$W^vM6(LlPeE)HE>&0f^NeRePBr7yxQH2Ozls zjBBLK87MrOvUB-AC3PRDc{gSJ$lB!8skpG1Vm<3PFEVNXL|||=JX$Y5(Im_UIT92V zOg(#o0T$0JzE@Q9mWvy37Xu2->DcETaKsHcHj258#F{9e#L3-IGrY>&^Lr2oM-{m0 z(TMSyoN);xE^;q4dkJZf5UM)#z*DlAc_7nXarXpSe)8VT6v3YhmFI4yG#JLMgLW64 zuyAeIxka^z7a$Pyc$!6h&cr{B4ZDKbgMOPpnS{2 z!lpE|7?7L;*k#>3!8Km6E>TDZm0scP-u+m_#T#9TmAS{YsKiu|SPDnm*eJ>^mP*vn zfu9|HZ{>q(k2$TV(F{%?KGQVXbmWb!6}va6{=oN&3C)z*qRWj$l@iydXunl^i?|Nm zH&t6Ez;)gv8%SnpC1ksd!&hr!U#{HUbd~5uuFwHb=S|H=2`+RC)_@Jg(QQ*54Ai&R z4$rwyF^Qj>Uuu5bQOUV_)iqGVWrXat^1jCOYwR?pQ$FRY5>tVGOeL>ZwHCQX&QTs* z(F~(h?g#0@6nZcUio1{JBLmD9HI^oG4_BUjJ9T!Ok*{05jTc=RGm}hkl1+ZUH?p@! zZoLe1w1C_@7N~Na@1GcI)_)kJYHKmfXg-cXa-l%aTfFdt$ z(%Wn_y$$$3?A>=z6aW7&`VHBH03jry7^Jvj?r))+6p}ehb;DNr@y~yM9 zvZ^0FWdp|AgXbTWwLh)cXZVjvy-#4-z4D}TT^9!AkDI!)UJK!zqIw^^Pbg2|3BrZb z1G;)*|3Mk=O&g0^q-=#r{DoKA&fiO}&tLfc{hNNEqN?%Z{lB~0z`Ml~_Y~fE>Vbfc zfIg7l#3K1T*Q}IKoR5MVKzo{PTI6#uj-RHvcLm{_BBl16SGBh^u^kCO_y3tRAHjn?+zI}q%d3;|CfJ`yIG+KYRc2+Hc3+WYzCyLzVv6nKV? z)%s6>7?ippHv)AK212sbg=Cm)J#}>GWx`n(&J$b+(yVJP&F2@7?&CaWKt((%hW|26 zA}z2=@1c{~zWTvS0(jqeB%?sxn@|BcXlhXdQMW_s_0kR^sVl(IG@JMMiwfgsnwlSR z2j6y;3rNO`Ul9oM;-^c#P!~6e?86_BA@B+F&tj+Xd@Fjxt-gJlrdbUV?i^8fz0*TF zUx!ZIad=OoitGA)-r=#Pt*+Xs?`6m5bMN9?P$DaMPA7tX^ zeZvB-cPr^({oLx4Vg9;QS+zc2-qO#5kb_AZMW^#w!~uzG$n&F8)hkE2dK{Fu2mOL@ z&BIS%M_G{aM)@N_}xc%duNr&4gD3r|mKh_VW?VrL6ypTI9xf6o#M z%su+|XjT!UBnuF&3;*!WcL9TY?y4?9NKyFOZj+YxGYhdPSp`$Qf{^mF+Y(6lhcF; zKp>iqL4!~N`+7j@9M`Kym#n3Fz_DV3?L_)tu8lhij5%STT#<_kV>3OTa_^LvKDfu4O z9BfkzYp_4q?twk8;26H)Zm+b1<2!%WG;Et0k8_Ve`gaaSj}VhN1y1)|jHY751w~m$ zbGTCq2GKr1l8_M%xGU`XQSvlu$&_3v7dP-78Dz#^oFX)! zx9HqX&3QpaANs6pqi=zJ9zAJ_l-GwtyTT05?}xEYUk$-~Z+BOBc3*DqM*E8=|(1H86q zxUoW&P$I;g2%43e(j^hHcr%G2rG?KAp3A&LXRg?q<=?;iGwi+MmrI|QvG>bsaAJ(4 zQ2kzsd^s(NiW6M^yz@`8&*R0*J~c==es~o<_dHD$ch)F+1Y+sVF4&#mk~WGtS$f+X zRn3*~PmU5ttlHDYC4WU*md3zlnx5=?oTX$6RUb==8ZyQA@upBXtyL24NI!Bgr2AwZ zcHhA%-%XMdp33wC{476JRK-q6!ZiZ-cVs>hM+|oU_|uMBf{TI4I>=Tn)xgS{7ijaO zo3_pt?#sCRduzLhTZo%G?r2xxVVh%)QUgQMb+UpGyAB}5Pv8*J`FS`pGpZe`loLo{ z^(uKzXDy_L+_+{^E;UG@6hsQ&_(+lw7|Q0>e>;8Q6K+H*rU`MQQ{k~f+?mW{yoV!8 ztHs`tdKH+G0nRVs>@OsLSyTPf=(BN9(K!IISU3wAu}Ro55jLRg=FTR&RgHHf#)YO*|8_15M}Yt^^s6^GKwP52#})9|dF>7bCE#70+9$A9 zJC0ioDE3x}$?r+^p+K)n%bH;Gbe<6??wjhEMn%JKR;M2T044-SzCTZ&Rr0mBTsim? zzdN_#ePn;Nn+-)UQWtry$+K*^A1?JO zqvgt6%(cB0^~bLFv#)$>y7qT-gllJ>@1LECgR)=#+1>kc{o4BDBmeAuIr3ww<;uRi zgj)Ow_bHSd7%@UcB;jL*(J`4A?YA!2cP1wHouz8!sjHmShTJ%IP*yC7sw|1RnHN>^ zGI*!XIH1io4a(;Wa4hTav)9k6WRyD2eXZ5jIsZ`bvYvRuRnfJCIta%biv&+*v}fgM zi)fQCLVVVire6V04q5(B<{<hTl8lx0x7&L}!3-CqMs4FG4OJn?#{pgnN4eNNO+j>S(=ttNUGXxRNJuzr_}WhfA%S0Fw!*hT8u%2tR$3U+)ZA$H5UN|NK| z2-XW9PB4`Skj{{Wq#roImj)24!o%tUulTGb+`2#*rl0J^+h7FY+OsFNXPnL+;6wk? z+F)iXERi|=jBH2!|HG)#!Juo$WI19fg0E;spU`&qwK=_#=&6bO*bRWwMo1(S4UcNT zdPid!bJ0=XwIKmn^T}D_3MNT99zoI^WA#RbFws~6c(E0#2F{=2f?clgP&GyEEa&d{ zu{#SP4$G(Jol%8(_J_$>ylecH93s}lhr;AMPmNbz%9fSn6bYj(xnQBLInU)$ALXwo zTr&LFYi%iMdS#1~3Lt8IJX4$#mnpgGajDzP5S)T19!-po%t$T7ekC6Xbrl({2XigOXPXMM0?iR#b_H@D~OW%JPBN zdX?TF-)<~^>zs1G-c~FTDS;RGj63;)bR>-dG2SWxD4gDS%VkE`we+L=uUw zfTd&HLc{0n-uw#C7JwZCV0jfFJ0F7#z(SN@_DnEJsouU9 zbY#|%n4kmD5IPPXCDhxpz_453d2X0}SUvTA1G`^5&!j$C>E9r`=k;a7EaBIR`o@K( z?xzb!NJtkqq|rsJNgg+A6#Lr4ahr-9HaX>{(KH0`eWBd4&_XrZHr0)byaVz$!P^8$ z)DJGvk&4^n>*d>Y)DQ*9JabFH8CAFLeb@tOdZRAa4>>`Au?4t`BF)R(Fp^30YE1Kc zk>*v;W~bifFyEb zjxW1yWOTbjdWT1kzy=GEM`6fqfSaE6PU#GvjW%*GSXj{!JkhB`M$t^THf%d1lv`aX zu+QABM@_iQ=R2=#bQytNWUppvyN<}9OurRz=E6OM^?8-FkcEv+=D6msJFh+2PYcT8(0F=kx3)qk4nh(1F zFuh|njLUBW9I$)9uxlCOX&)5r8QK8dn1B`urVtD>59?`Zdf?mC`Kalk?w`&)N~as8 z>nNow%J4y}A?za0qYK=RE{eiFOgw541YYHf81hvM4?4Bwt0jeE# zcZLT@7ljCk_5FUV^CI&Mn0oGx#qO(0_R4y0NW0WCm zCm3*|a*+Xmck6x@&%K!ukxxusk-7-?xr8m2gq#nra%z%lT&lFZ?m&=*q9p1kLbUcC z%Fu#GDlB~J0haxp=4y4HJ0JDrGIp85%Nr!P%H$0-e>yA{oWvgL94?V0j!B>IeK&E> zjQMD1V(jKwz>EYn!w+SM!mPLUCr*e>yfcI<@7<9vnE(K23meB}0vA;#n+-0{%<5Lp zMxQtpd@P*uP+D+Hk>g0NVVb|h!)~mDq)0V5N)UvHGRqc);W~dEeDQ}z<W(ub#0YN)IQq9}7UR^r3gbJ!mQyUFp%O%YE+DE_5(#M|9k$ zX{t}G=hehzV-=3LFeGEmTY3#%KVE`;iDIm!4a*?P!vh@hxYw4k$=a{-PgpBMkZo?U z*BG3hY1o+{!_oZ;BUc}zE9ewPpw>G)Cw|LNIZ$wf;E+Umop~Aij%k=dnfJs$RfHhN z2N1P)w7K1pi*)XLbF=~ro*~L*{%+V@G_ss#JBBY+fWRH@ypVE4C9*m~LfAE809`tF z!Qmla#p8?f69W_XA?TME7ap9lhq@7<89c2H?7XOuha!Lb&&6J#NfqC^KUpN5fgx|z4 z9{W53?I=B3!28FOpx4odnp7^{-tO4-I$94%rVL{!9mo)1^-o)urkcIE3T2**^k+yRW=@mas@ke9;h?q!f*b0Aq1w>ia+>jUH4b1#mfq0RgetfTM zM@EHGY%JuBd?RXn2qwQ*_$v$awps91@{vUrB&-5KW#*Wwz$Hkbi)gsS)>erC{N{(O zQ$z{=K?ts$vqvti;QH^6VP^+&QPJ1V7vB8c!0(&2Kk23X?Ac}vm>dI6JOW7!(3;fT zV#p3ihqd&8K0SZB_+a5rUEWkN?5eX5vZnei50dTI`v)9&t4<6!QWpIAxL%(?aDI3~w z!YDkpKM}O3HXCO+9vaQYGg@coHGbJqx=G!=rcCiB&@_1g!PwHih2&xMB zrN~+*CGB@ZFQC30)ebr(VviSPHxKuT_6vy0$_aZ)=*ceO= zW%Y)e-|(#+&r=#}a_2(sMTN>05}(z1FYaF0eAV_0sc)aI_i1V9x_rySL$#0kp0}KC ziS5QJ49vG&F|(Qa{`L2d1UdQ8(rdubr87rb_O%GTk%){S@$4<9EmMT_?3Yq2g&SYk zT&lA=w=G)V+6-;};EWhXZ>tx|@33H!4zUzyEaD~Qku5cjw{KgCMP%5Q+pWxOS>qFy z>2#qB+Bpw=lgQ_7WYhKUipy2+m#!0)n%ch(+Z7?$Txu=FjGwbbAi`dALi?M&c8kF& zez|w=G`SVKFZ1ZW`MR#0CSPt7am@J7_ix8c?(oZ_?TQ1X)Xo1f&?a{mQ{JU^BSW00 ziF_dw%pG)XUaIxdI|1&)r??`W9qn{}%#w43Pl-6lh5x`hs5JVsS=H3%E_T{e{;r-E z?pR#6QJ4Y+-F{RIzVP*VIQZNq={Y#(>9_g=>wHMV-`2V9`9-hL9v4Qbv;PfCt?+(v ztF};_&Cpo%ZJR`lzqSY)Q#DEB#ChN#>g6754qIg&fi{VK7aiFdbCZ*mZnM$jHcYSo zIhU$?$f}(FA1?LY!``C2|8S{|gDS!Rcj|w*RDGjK9HfF)_8%_wnG^(orTvFXJ<482 z(}MoxQbRq&VC)$HOw+$yYQ_+R-Lmp8m+BlOCnGV%{Fh4|ayI;zOPwFOH~;8gF7;8f zDDdKEY*;_fTjW;9k3*NkhuKM1;ho}zm%omEtJ^z2$(d{v@zk(BHuRbK0r9gF*1CTq zYwfJBTzGNr&4UYbe%8W~uY$YEmS3KGc_m6?UXr)$JkgM|?AaTeM^SG#+ z(J%CkYc9MnWmkM&NUD~7%>B@_OXzTrzr5~wx#-vabzzMfOc0Yij5ZSyzA9{v^Z~^q z;~TFD>{&|=(gRr6F*F4WK=J$IzzTDbw4JxwObuQ$3&xcx>dif7E7ELjBJ|1}6|t~Y z@5eZ+EG7W`?K{JZVJJ&@62M)fq%2!fzu8fGJaP#NlU0br>P}W7Z?Zt~&*-D3dGY7` ziCpqVm2Bg(Qvm`s~QafsDWUME#7hyH4*og+*DOXDomeom2Lh+-S3N=f9Qe-H|OJMWez? zO<-@rv~(RCrJJgWF5+n&D-f&s;Aduo9vd*e*VVPQWEWkQ9MbCH+tA zOWg{)=4~k^3eZ$M#&dknRwk|}1Y7O0AaOyxXx=A8;vfLe4ongfCDuABCS|FFjA@WN zPKBlXsoou}YV!VbhUnM_(J?+C)03DW8A#4}=57vBfWZqo!3CCQ=j?Pg#6(G)ou}~To!3xRob)xU7Yw8h<`DR_EJ?Eei5M5y*@uPve z$#>!zQdUCpdB2ct4NO%*N`}g+w}siAyffFOW$=_+A4w*mHkY(aUyK^)^?LvLTm)TK zu#UH^g2@E0^kK@~^_47$yidZ7ANQTX+2?_!CIcJ zNOQrsE4>L=bH03liI;0R;IizR=ISWjQVWyTL%)t*ag%;QS~<1Ml}zw*^rovN!RP*< zRCn4c`C0>vGM4f#L<9XIV^S{Tx5%2-x5XT;eDi*@CmOkK^+fp8gm~P{MzFUTZld<} zz6NrAIT7*uDLwH6{OBDRZwdq*yG1ow1>y;NOAR_@NL8B1^~Lkl;DF~B53X@#M3&A% zSKRB&4<;en?u&u~t+q-Qo*4c_Az`UNLWOBXqVb+2(u?sjc=5^0 ziGgM#Q^jy)!RN|2UzxERud`SsA#lZ}p%c$sk9_Y8-7`f~2_SyAg5pVQ<&$|-DD|E@ z*^)MhOACu~5I9`G-t$A787ECk9?v*T?c=>dp?%Hs_6%1zjIJRMhMddFuTd9YwC(z) zw}21Hj5Nov2M}=}dFWzqgp~F{_^Y9*5oN7xN(Q4j|ZoVnLNaC5;*YyX5#VQ~R zHMbHqoDZ5sdfGq3onsC4n`V761+BaG8W|Lq51a21IzTU^j&Szpifrm5u4(?fC!3u%_G%}XhIe;+0{jw$YIN!^GWbnVUYXs&VpFhvTZ$Z@8%CTNJasY1~k&(K-VTlj4PPfR9 z%2WCi+t_MwXiK--BM5{|W(}Wh1x$RFFFbGXd^@*Ibese| zDSgw%@m0rBqt0N@bdp7uBtV6CbG$9$fCK{eI1b_Nc!h2br~+VL1fhTLtwY-QFY}Hr z8*yRGOEQQ}TaTj&b~o+qw8#V8TFr_iOXrMuB|7!`@gu?rs;fY}qEQyK$Xo@Ahj7pk~d3U+kCH9-Y)Z*?!8fgteaJCzl$yKo#j?m;l5UB7JDm5>Tg9>nk0Tvk? z(t!XcH4Z_A+!Kh0M+3rCK)BobBtLeyoA%~@c0_cXP^5he#dM>|eo25o95J3C&>3-; zC)}5#QJX_rLErd(eAjV~wcF{=DuZWYZq?^hCk|%ItUW2&OgkMmx+Qk>+av~@r0aVc z2N%cBIBEShBvK5Y>f~mL`Iwg2b1;bU0`x&4X6`p6>3}|;!5KSQ>>;1|r_5lx;}2+- zMK;?As{^Hn8FbD!r51k$NWA&<^CVjT%&mj>hg9{Mg)|#8lKlbp*_IAEqIt-HeL~T3 zV+$ma=lj>+4xl3(dv)WU%~tc+62P%sw9VSFed%-woVo99vh0)V723_gRUAg=QJ-!r z>HG5BGJDuTiZ{3A)RLh~e;TsgbS%x75*TI59-i4UNpirey^l?gsH%*JkJn38TFIS> z3P9>lE&OM2gjvtLNZKj zt2(1OtUG3pF3y_9^x6*b%k8AwBH&_*&im$5jJYR&bEhmapRc>aeWH^BD)a5SLEEBy z+ff(WFhTaw`;*IwvV6)zS`g-i8~KIJ=yR`-7soH;dq1Z+Ai#j$?DY~0Zt?V}9Y^>5 ze3!7=Oshc9125bWOr!K7{cvuL;T_?L1Pacr;8HUCxE080HR=|T!WlXG(u$IL-ko;& zp+tBf&C;YOdguBdndyCLo=b>fp_Q?)vbj0E*^D4~lBHdX3Fo;oa@t?_t~mHVd#PD!%tw2*lES% zbz`{8o6Ghsa~?M=JsWC^mGA?9gOf%-a4j@ac6G*KEZP_oiNCU^i2!$fLz! zz7q1fLdWxVG`uY&&feMUKCj3=ZR!Bgk(~*7Ad^hE#lpCx zF9O53=bm>g7Y|T8UpzdK7YDbQTbR~aJOFTz(OwJU-nrRHj%e75VB#sX_q!194_Cay zI^5UFyw~q|Z#?wgeB%9S(fji!?=OG7w|Gx(i=X_edh*-hliwXq{y1^+XUNH4S5EF^ zp8Q>Qa`(>3KMzmtm1VHsC;wZq|KEx|d!5Pt{r^a@XA|~XUN49=b~K#&;Q@a^#N?T-qh^>D%XE;)cAkH)^{d_q$kAuf6msIY#%MyK3VbA zzxrEH-M5g&ZxOe@N4I>x-1kISp7#XQeHRauPMOh`4NjEF9%WI1B zYx64Wiffz78k-Ah8vi@9zUjZr`p*9{>)Si-vDy0jJrCR4JN}zo|Gprd&DPJ|O8PIe ze*b>_e}VP?gI2%MT*ubx_xm4y8+!OxM?KQelLCU^hHe)O~a zabN#~nV$ZkM?+)x`iE~%O&9e2EFJuHb9kre$xhAKulDKRJuiQ^&VGHk`r*-skB`^Z zyIAk~2ZjcQ#|Orrj89CCJYkMMof;dMe=`02+4H&Sndj3lUOj)k@Ot6R{L7iyXXF12 z_y03K^mTmr=c}=wuV1YG&*b`nh262YyNfTsJsn+ITKL_!@o8~s`N`6s!Oz>XtAFO# z_vSwAjeh?*^Upu;R{l@Leq-~~`iGVOrr5vxy!T;c_w&~F#<#!kxBj*4zy132^Y`Ar zkp0i^|4$%$#6GafB}Qaa^d%tpm5z@xDhHC$(zd0<)~ca&oYt9z(bnpb>v;1tF`K(J zW4Q`Ww~mk9t(_<&_>Gs^wADQ+(TiAL7;CG4cGDz*Timw2VXB&#t8`+#y>X_VRBc;k z+i~l8lY85l#qo}&mo1cmG;zDm+ppRJXKtOC=)5!E6}CKHX4loc*c1J2eQ~0z<;iDgC8l0NEpWN@* zc**daD0k@T{50PkvGL|fPuG{Xz5nZqeR*_`X?Zg#!+MtR>F0c2c~o>B>uuU6gJSuskWl^Nz(k}yK$Uw7S@zHfR;dI95@lImLjf z+gxF4CA=R*GGHF6regwCW=F9+>06^;gDRRE6tAn)G6T~TYm0(zF*wLh$_#6)fL-R> zJVz_x>cL;{JEez$&~%QWTsSUa+&Asct?_RT<#lVG+_+Z6_b0f#tJU&tu#W2bnpOa3 zr3h|YpB4UFpjo5rQzMz4>~BG&E5{TpajVp3Xe4zGB>7k=Gw32#51?aT)peeCbHIgz z=ny-j0IG)V$QQWYDMiJ44ekutM`pwMwX{lw0ZDA0kBWaD2hSiSW%Sdu#HEQ-$`s0G z+B8Q9G)WoS;RhE{9IrlFiHdbtHruT-?J#P+Y{bk6%;=X@@ljDi8+Fv-cpgA{L{#^6 z@O;iq#Rk5I(K-j7l_{CF(I6AId`ea6PfMkaSDAe7R#KU*wopFQYai8|-O=&@o332f zUiR5EuttOQ8YimVa&0}sp>rHAD0=5%0INy;!&1y)D`jZF8RnrgHx-Y)vz_1={usEG zRZB-Um$jRsk8%0Do&FUP^66Cc=f9RiRpg|d&N>Z!v+4#l(mc0mcjhAfc=JI)GT8pr>knTuS88qmjO( zUIkm%_z_)i4BQFHCsfyvzq_CT-@MnH7!+1c_at-si`$ue(@HV$OnLl`?NRLe7Q=8# zphZG^JaW=;L?tjKsecSAMA4hhDNe3AGgu|iq3QEHiFg>sEn|*|t17lVO$RP+WuSss zCRp8j9xIIk-BK*nk0^9(W_p%cm50brJ#Miq$aO!d1Xj5-njldla@;}`JK=8H;=)d- zDuM}g2{17^MJg3rG*cSHMcZ%k9Z}@# z4~Wd;7hEtS?+$6wA`<9|$%iH->5&5FA}eU3{-a`49Nk=OC_33>vNF@x-bQSNK1?5P z&2F7`kyv3SY0vq*@U3ydzoT1e+^WIcNi)ZP*$Vbp?L*!D`J?44LdAcmk~d`Xg7inG zmHTS;D_@mE&4a99f0&_6$7{HiOIosFULpK%@40J!JNpeUMw$0RoddmdMJ*M+*(3#X z4+{7cEo`hf8(p%rb#KKDygK!rJm#Xr%XxX>*aa^5+c%%$D7_y{usHo&+_ZakQq@XP zuEH00miW@#hcy*6l8=zg9&TP}i4oLD)<5g}=*Pn^nrrm80SW_rDRZ|i$HU+JYW~F+ z!4|nb8J+~+x(U;8aH}`rZ@#Pf@&D)`#PTCgaxNG4O_0EA2RD<=!+uDu?j&4(>2(r1 z@^a{Jh?P`KxF?}#Y4{XV0B0+yvRYYr@`+uEa#>?Owkiv-YI!ph=@uwq(4A@&Ile zuy!l*L1IswS9hz|a62B3Jf2%HG|dWt)|$F|HmU7%Kf$NpNlg(^OSc^HmrM^;Q49&9 zTB;m^>+B8+q|k4?9i`CSsF0>C4=YSDgC;HJ!{t!XUYkq*XnQbj z#(@}Ln@a-NwbT3JzeA--%!X(IU=wuH4#m#JCb?k zc;V~mhGuOnDP-NlYZuWJ+F|K^0+iRTd|@}*(b|@|>1F#vz3Ml{t;+|kNJtl z3lU$R4QSE`yq&p~m2bq#HY?@M%e7q=E!@AAEol?tkpsv4_CMdsC9Bf6ns@cyM8qAi z=<(kByk`B$3+12AN<+AAGE&s=akO&L!}A(PcBg_vX{Vhn4yan2Jt<**Eq?ppgX^=O zlTF4EcTQg6BBs!osm9-Gl>e@Kyo|w=zLB_c)#6Bh8=}!&z$}V1;35t~#T}gb(Wd@) z^VE?)le*zAQ&N`sEZ2J89(WreW8dG7sU*CGKHC{TShjBUc2G~wG0Rxy&DZ=Wv$-i} ziLM0yd;!Vw<<6aHmRR}Mihm53FBo%j+1218!pt_6uY*J}U#S+iz0b*yL3t(w$DciX zF!I>t+LwEnRQ%s)`+u^f96{gVjbZ!reJ7-)Mb&TCHGT2R8W_r?6n{ZSX$mFRL3rYtPz>49tMFG2ZJ(ObGS8$zP9d!iXG0^@$k z${jPoIP0E#)q#Z!sV7K7cgA~xV%uS;$*#zwEbzn!klY&)fq1a4-6Pm7Srs2Z!9LXfHYbeCNFkFhXGABfkEDdJMcFcw`KP!PisDGU@iK`R-8tS% zZerW41ohJ(dJDj80cuQ8Z3}2XK&&c2RaP1X589htFer?$I(=;#FN5)k&o~VpCS*h+ z;w(wg*vi_bDO?hS-E4)dCsJJAg$i~^_%&{7emMgd-QgESn3#K=+6=hTMhx&;-g zC#UP&i@Z+HHPFWg2yP0w~=BNdUz3UM3lUo@k`yTXT)>pjR1O-hq+!bkK?d-j^%= z<_2m;MgDUd!Kq)wl0>BvimeMlGLz%>7)aXUz)*88?7xDl;$e=-={E{4p0~MRO~jOk zfw7|?yT6BshuM>IKQ`Pz`=GYEqV1VbkjX)!f>v9A^^K5Q!i5E=Q_+~inA`=VC0&eTRDF# zvi^M(5e=Xyp!ybvRd3~CPMBb8rAiQpWC9*GnW{h`nUZ#%D~8Cb!cHYh;?9&_WZzE! zMgd8^fGQr;R=iwNR0+lM8H@)J!}9Fu*^X#P7#9q~%I2%M=_OK5w&CowF32-EL-|+< zQ~)Rv1ZD#Ce~fad;1$qF+s~-4gQLM5_JyS0T>J=CBjgoWUpRjzE-ftUykO$yRJrGQ`psP%$;q+sq|;MPaz<+14H!pjlph{t0E$9gZ5 zSx^|Gpe;Ff!Up$=VIozj@6o5SAtLmd-YgH{gS+z^eat-^b(49Y!!)dhyCRK1Na15b z9V?Q%iA~N2;_L~Wqw*Nn-{q$dz>ol-eW?r}Foryf>A%ncjn!82AY=o&I|c^6hB9`_ z?FlI)(tf3_HRmN^%>aLx4r5Qd0$pm{SkUQ+4k3s%8y_N51>v5 z(F9r&5o2r+Tfz_*+~FclpK; zB<9NjdX3PTqxTxpZ-Hivo`JO}OMFPWkJbRc4)Hk2b!l)9GGyRG79kJm^_fBK`FrkV z>I9WpyA|34HCjf)##`N|evLYDwSI`YXo&_UmCMk*`ABs7)2^%ECOSvZH`<~K4A-HD znT>bOOxk_O;qS}UbZauGPl;J8j_OXiIC0fATlQ+a%%TbJBGtv(&LKGV=_1hamV@A` zDdXxV_jyw0IrS-@q}e=|oH;Xw!96Mj+88KxgM<>#qS zMZfdP@S9d`5>__lKh>jzugLz`K7lGz|DDl}aM*X)`Fi>*(oXOpZ?Lk=TZ-4QS)#oe z?;29>i;4_PRb`txHBTpNsm_>wl5ek8TN9fh^yOx675;n$?P>xy=b_w6<=U?p5uW*x z_-a*r@lY42im&oXo5WULZWfL8j!jei$dPW%ITH57?f0mI;^pCwQIRRR8KQ#Ikp?Li zI5*Xe`PvjhrY^u=e+AFup##z20W@aT!3nXeK) z9$(GW5ml9RZ3|dLJ0Y?anlteM429NxZU1EscqYG*ptahque{bZ_{Z%AFTc)Zi)+R$ z5=0!WRQ=w(gFc(~RaS-CYze9FO+Dk~hu_OhO1tx_jy3bplGo8{;b&67I$iTc2;5AW z|95em{egSB8zqgW2KeJp4=(k_xL+XSx$sTkbq4oXa`9i?iNjQIprIunzJ!%r%DVmJ z=%>q;0JmGE(d8?P>zeRdN`hV>;fAx>TPA9eg16;299l%ZWhuM6@P-H}N4iSQQ#FsA zIyl`N7(m)0iQ5;it0krI z;U{k$=|qSV?^_1%a*kA(mCpV)di6&gYSBdLN{-$Z`QWjqj(eiV&CZj+aErjIE2juN zKU#Tp1k?hU+#mhC?pDZ=Q9LJ?-<~4KD(1eYK7Ie)oR>j4ADMK_taCX!i@Z)-jY4Uh zLZ`{d@VI{E)n4N^y1UT$hFj~2w3*QCTl%2~-%iIko?gWB6r1Vna>%qWpItlbsAY;O zAwBFVyfdo!K=^ElwcE>4u6*MIXO7Y*d0R`e$7m6mkM8Z9pP##%p4`V5{^%vVeC|>D z(CN=4Ngl>g^cB@`R~Hi>!q8bMg*J;%JU;MVf1Td4W#<6{r^Iuh#)qs33#WeL0&S

    wmTi~s zCAL%3Wbk&6(JF1Py;k(#&Qe<@-{5^VPyWUq(LQlZ=9`42%%T$-?ML-BmU%7T&hL!Q zQAhu*)+YXg*3bXldP1r-;jkI4T^KraG)evX$w6qJbEa#%3e-NCH_k}xMmJRch^N&Bbi&ek!1!H{w9rvS&PuW)Xd57d;K>+DZyxWJUJFIu zj+e5*o(+(buyXfX=%c;N$|J?|H)3QXzWWxFVU2Mgt>~Ubw+TMO-*%kdG-$)hgj9y| zSPI(SBb^sw87{-2(t?`KAFx4&PH3)v>49%m2Aa)|nrGc^O)@=>tD{s!Fmfu757<=- zD`6&`y1wR&@Auo1LanmJRy*cXHOg2ull%?mJ&nx^}dAW#{LzL$TPgH?dhQA7SZFJl@b!e}0)8SoYo@ zJ@)=z#r}&*aE3v~b20wXL?mIFZmD?*p_PEt9CxSpX=_$nnP^k8$D5)#HV_$S%sBjn z;=kK5`ZTfHs?`;1%4_*0%zl-+@fCfH5_I+BaK1LvBmt={W4KW@N~jvY#aziwef;!Y zcA~1-06JZz?U3?l4KJVAfO*UJwDb%&&EkLz^aO?8hm}o}<<(*;_>DX!JZLC^y+!6` z6t1N1uMWjur3E!9^7K1uJ=HwJDI`0EJFnvgs169kllUj`FSZh9b;9?fL8JNZ;4gAB|s!p_PiHqWDFRUgc8E<_h>e$M#B zt1nAW@qhF2O{$EG*V}Jdsh$tLIJjhtgg-oD{IL4$IG+~Aaqmk`bbsoPa)yBMuhC9k z?vI}nM7ca(&7eMY{NBI+ttFY`2~o24;aZ9IFs`92Gs)@n@o9BO@x`2?_gnQ%-ce8T z|Juv=ox0>2+o(3}JPRVpADJ{Q=C|Dbf1Pf__g)Nv;CF zer{!V@R-SXT)BIH4Bey;`QG91>!k{81r?MQlRQTv^<#1>#2*UA>01)4xQ=z3f5MZ` zP4-E_BJy=7rs#1e{a<_!)XWRrlb1HYWZYPIFVri>Eq81AhLO=-`S_GUTd8orvz`49 z1R94dx1$q<30Q_blY*FReKGb5FbIi9Hx~z~-du zi|9&K9}D?Q+J&VAUz^#jY`orm+#V@XTTOUUpf&~e_fTqf`}cYhPoA}3-9F8#Sr(h| zD|JL{4M;r}&4G<{uah_4o(w$7UreXJ*KGQT2n>D4f3paRzY0@~K=`8O_CW2ATTddV zeNdauA9R)3yL)JJk4@5`eu<64DbHL~-j19OWc^s=aNCWDxrD=#E~q04RTa#$rwm`- za7c;q#eR2Q)6nrenA<%qcxn(~ZEq(sWltj*@~nlX0V9lHf?eloT0NB^DILSVlct50pD1m;KKPe}U>i*zpBT46(Qg zIs8EJ2cgPfbFA81E1`Od^`St_axzD_G3gs&y1Fs&hk4@e8R9el{oZT??%Sf`PYu&r zd6q`aMemb_^io^Xj|V2;2bh*>yK>ikWHrW7pL zo^O4gEP6e<=;DDdTa#zF`mv5zg3N+k_Zl1imgY5My7{`mcK zuJgw^*LBWy-)FgA_v`t1_%3RYeL20;(Q3G|`4HS!qY`6($;JD%;W5Rz-!I18a#OBT zpVk?>lLSVHxkYckQt{kma&nKz`%&QbD1P0cMFZptY{c@0(PblrH0sJJmZI`J?%=wE z_EdF*BAFrXef|6G*Vo8VBoFeQSzm)4IMuRkAv!-Vy6SG^!jvTzydF$8n`Z}l?sfQc zp_cON)#tC;(AVQG;>%+VNHp`|iwgclv2i{;u!4Atm#tb^?r0{LD-9l0Qyrx1@A~Jg zPqi&&hUS#KzZ@sMZO8EA>6WKg0`jC|3?ZtMJWn4ffpiSeXZrk@T+wD;_{es_qx?l# zOE$<&BiUaK_DIIO&EljSxi3J$ySvx)NxPMtlqAnss?YlMxXmM=0k8?W3`C)B>TWDabvbKG-opTsWz zy&c5%FFzc+u4s}k*|9q0)~pA@RtzOO9uE5)$lfY^-&*@>6a6JMzH?`&z4-96sh>}R zW>0rTR6Ks(bxS6=Uli%VQ0G>Uh;py>)M*%KY>rwP%L4V8wf_MCJbM2&B!HGpei|## zYd*1{@_M6Kd-2PIeXgMG|4l^4**}csTw;wq|Fvz~5Rl?@;^PO=Z(Pse&*E$Q3iJVh zAv*Q@iP?!K1#y4(dx`$^*zWA@s3jM-S8j@3ko^cjs|?ivz>MQx9k>;eAURWyL=x6@ z_^>oWjKl@;%}U5=0!-Vytx{qWaz%2r>KFk4Vu9t~+OrVADv1DD?KGq|Sv&+d@|xOq`>G zXgwe*0cJsvYZ!>o9gMUb6tC-g{jG|o3dqrVWa0EE4?5I#Fury0Q2*drG7!IB6^6X8 zf*%On(0;L_C&6+k)p;nb9|0NbyD7l{1kMu993Zx5qJ(41cR4yebaK({Bnk$IHXsw% z`qZoF$LcV|xv3KrdW_V?c`0RgdYDw&** zwf28_?s=<%GttB4X@n#By$7E6YdgD#S-PL?AFi6-_2!$4Ft5xgU{IwWo0+eDGlK)P z-KNcMv5H2nM5ANZE2$==l+8*q!R2cINW-+ppRESy*5WaKQ)w9|u9I{tO=hL0>De+6 zK09(ZZS>w4x3;z4J*Pj#gp{f1+GN7L$R6#kLZyDGe9#Z?SsmRyJCH{cj}&A%)o&R~ zAZkMJP(H+IGApt|C39>*6FQP_m>r5_52kq`i@ltJ#UiBsss}7>$+hmyK#>Xwn)V=JtA^7EP^;#NKhdDSn;;V^RK1x3>Y9+{Go6<; zwnQ3l_X+~nm{&up+_lPd!^fAOgL$WUT1IbrRVq94Ln3ls*F{(utAFU<939 zF$q$zWvJJK6ey2Vmr85GwO1-a_`4s@C!QV<#7wPd!_*5s;+t^IJ&} zX~v&Akn1Gd3z#M>v7MHnp6F@GHHt%mAcYEXgu_;~VI(0lpD979m}=eBrYwW+(S2Ef zQ~okIIU(>F#Y6n7q-r9`dfB&sJvU_yAQ-~6>lwpS3=K3lwSMdB;j46aY$? zWLGG7a%nM^7 z^KuuLyas$t%|8BwfkMMEOMV%YY|}2px(QsPA}C{$9pUXaai_>(W^bf`B{EK84ckMp z^7Nic*&gIFWkSHABh5cVx=Lsga_igK^;=&;8-n`nkmvyGQe3$XG_OdHzn zduW&qE|d_vH?WxVS})t7o@rx?jif@JEHSU1;XJ$i${6Q+>0`(xr57|>PzH5th@gr| zk0YOe4j`YRY{k84L20()>48B^?nCuvhHL$vitqH{uh^(%K#~vkf(NiPpEI+BaO+@+ zbgCD9+LH$&Ywng%6i4y^^?Eiee1>H@Q?zr&izkF@J@*nqy*5CS%k+6acS1cYZG&kH z&ko!GKLfH%Hoz2Hwh8b$AIZVaFtNBu-EdaP7@X8BVqt;3d3d(*Br9+$xB~`C#)o$NpX#29#bY&qO9SZqrr{bog4t&wS~4}GC$YK>-x*+OLi9|8}I5I`t1 zV63WSV(=?6z%~~8E-Zi%GhoV$ubMvGCOEKyyGQp9R3Ql#zZ0}QOHt>}BiCjIvmUCT zDtvFt#u64y-Cu4T@Qq15_!;djHYplW54LW;>hHRUeG3kn%lb5%6&zGgZlgN@SvLHb zNOy5vp9S3*&Muk{rCdA)4Sp}8K2*150No%@3QVJ!aUv0_fD3xK6QoUlrcR$_qG1T? zfzMy5cIX+!E6ce)Glk<&6CTi!$+t}c)alb+!87nrGYT;?x!CzEnE*Q;poV;wU>PX) z5lrMoX#HT~8(3$*qs|Y&Q*39MGghB;QTFJA~gH_oMf-2YQ=N$@J#oyb8oKV?!M z4y!HsT`UZo1a|Te)GfY&JLk#bn-m(ySP+?kgd&;^8c)SW@|a(wXY+%_v>JOO+UVri zarH*<3w4-C27pd!XWu%SXFtPa24_j(5(D`O`Bct=UzRc7H-*Ri355=27yKCD8YV(} zR%eGXV8Kp~!X)Es=WB&!=*k-Xt(XqAn65^h7k7QXl1(iV3z4!8$vN5(W967!L6>b~ zbMCMe9p~kK1b?ueFVUP)D+Xr19*$bg!nTD3c7;5%Wvl7I&wi9nX@k_cy!iK!b=Hxs z*$t-(z{r9?rlB~N8Y04S_3^&K^|;6Bc};UvDq+#~N+eU2W8Ca6bF~o*>&DJJu}^|b z%`gY_;n1G70T3?X1jwQa3%h=8Y0 zVF>sc;t$}a9>0fGc(9tBfd)qf!mL~v?oJFn zQ+RsY$AYgc7?rJzJ@Vm4nw0hYy4Le(DjT-p^!ITVtQb;y`Eah*^P<1&@`coBIvWNT zoGi;$49rS#_wD~K>2C^moC&;%=GY{$qTGJi1JCjelkz9CO^PET1&Mpj``N^ttk;HD%#Pd$@cb`1 zW5##B`6K6HYVys5eB0D~AbVV2JT56CQnfK+?Zq)PN7IU0QP0{{F1|HCAN%~ui%lT` zPmGa6_2u)U${bonShUj*n^-sUyD#bj?k?cfP~$-bG&qI=Ykd8iiR9!xtGDIR-xq%0 z{UCOw8|pL3hOK>Hm&$@ovN?%I;TKP^ypOaWJ1XgO!iz`ec?~5KrrsF8;{1?|+v5Hm zo4Wd`iWi<0QS2MkIz<-Dn@|r0BB5rJ5VXiNZh|EWSIrK0f3c>yZ%7`B6|&N2vuw~0 zj@h#DLSGV+{pQh%13Ej9#xh=JhI2Wy7gsb741{eQqHbA?1t+OO<`I4Dtje*S^~?zp zN2Br+?Dhik@7U1z*}%^;)4$}UFJF`@+Fss4EAc#Yuj3YNO_=Siv~%fhD73uL zNkg#_ykF_(fohIcXcA)|Gj!whBb-))MWC$BEESS%6B}3}t6xFr_mdb@P}^Z08qAWh zq6e4ku&JW->FQi*&DJ#&g(`t9Vntt@OWl+X1ew?2Z=e@JN?|5;XU#%s#V%R_!N=Yx?@KBf zC{$v|`zs$2-dl1wUZ!S9dO94KBcc41M|K&R78>Th<2m+ge= z4!k=bJ>X6sj%9xQzGwe`UoJn3ZOMHaLaib!050{v)t@`ayU2y9p2KYccET%`heIPgeiYiiuX8PIzuMB`a_`c} z(Zb7~5ozk}o_3{2JeC4{rA2o>=PTUwSfvq?iWa7&LLL~kd}#83=}4bTT@tXjbm%RA znup%drJrXTA2mVj0IsqhKF}M5bqQ z{Go-02EinuBuy z*L1g`9PLj$(&B&+-OFF2ZQ{Q_Azv96Ts@=XGrM3?_|YL0Zxr!;tv^-du)IfCM#=oB z`T6fpwWROCt&gQkSnQx*<5!LoezSU3Ml|_a{&xiN12)* z?|Jnu^NNZqT^5u=fPv+3CSFNQmW1WrC0T!xoO5$lw;VgE8&{=v9F&gj#K@*rG^Q1k zHD*tQ+x@8HnHa|I4in7Smeus>jnt|tZb5k04Zu(;NbUGFKHQKFxJx&K1ERh3o9kU< z7Ou-H3c|lHv^cyR9$~=OBH{dcCo?eOSa>w3rMQO)4G;o`37TS82KtHN?~pv(Ma-UL z_ada|R!L!IG;|&$NkVGNm38VGPQr&*eu0XN`YT1vlYk_;HWgmMP~l|Rndl00Dkl4l z9GYFMMOg|;%@I;3eL>q7R+!?10}zuE3C}gT)T5hAQ0Duyasm7brRjVvrIz71zuT2h zJN2z^tDUo*q@VpcdI?zMIZgd7lDjtc8mwCF&b+MNF?dfp#@&lej5%9Y$*;Yf-^F&H6|lH!9+ z9C&U}Y9wsFb;01!E|zU{>-C^6O*nv?0;H z#$255mzJhyfqasK-}P55$MD@~LFlwOFE7>qO2}I|fV1b2bJC~3I4M)i{}bWsL;k%R zFwW**xSF^u?WOa4UjAO&;LgJ?xh?l&PWqWyx*xPq{c_*Rr{uzo=$C1q{lW}uKFKRh zE`FOcXLziqm^Zs*>u0`uG@3!-F}=-}EhAncT&?ynoUy0A-FW?X@9k5f9I+i(Wng=QCLsykz@A)Jd-@ejg0CQ~pA;sms@)AGz&-3B z;Nr#|=jiO08{6NgJ#HI(sMEhY8CEfcBf*l!)nx>K_L{_2$XQN8Z%p5BeFjs@VD@Yi zQ#{ib;STKRnuaQ8mRr@$WyuB8oh1(N;X%Ym*xny0|4iEOPBXLzFW}ko*9>H+xYHK% z*WC>|Yb?sP2(Bl>$L;Lr^DpE2wqv6fa4Y%a$D|>u!lep4ZsmOB=31f^YvGAS1hp?# zJ?L1M)QZROS?|v@+^wuiSvN~!{lZFGYowLL-(CqwZJ`21)HNC_@87F>&rYu&V55KF zWHSt*yO5M^l$b|I=0E@nf8d6Y$feVlcCR9Baj(u7AtG@O*{3=v6beXA{Jubd! zS#+GU*85U0C3Y_Gvu0S^-RO2ctjrQDc7Y(iBY=Hca()z69O00VWd;vr$X(G>r+aGt(5?u& z=pdAPY#dXfFK4pfL-Pk35r>DVh8Ks2%D$6g#?`9u+jfKsFool(-5)$LgIe9xU*Xan z+8<{IFzLw$Q-E5PRqBv&KTAHbp~mn9Z$R-3XdI6gD!6cnnaNA{P&7_{*#z6-PAG%3|6jNGop2Al}H)iv{#*!edVe&x; z!(dnobyo4=tAEdiTLY@bRJNF#}%QpLe{F)EOOqHdAWVw}Fcvv@6w}Z#=Sjtt9 z6DXyP-fB^0D-HBuFNuDXV(+qgbmDp=(Kx1NhK{h*QbV zIFljcr2g+SuDI!*KLl5EBqTWda?L;%I+JMEOty31jn8CgR5{zNZJBNIF)1K)^XLPR zh^*Yu%XNo@1@#}MLBj_Z{VaDWp-6Zn+3q+3ae#OvmdvZ_$82CQv3zWQp98>zA#3b{ zdZJ=k;;msGX~Y5(mmOXn-DBuqzM9v8+CPEL(lwWM{2s)N?K%J^+rjg-DE$s?4Ew!o z6&l;taPdL_qsCG~r;A*@9^-)oCw7k|9tRGihBOj-xT_>9Yi*5(VyPC+cC(;g0nk+W zgLt5?d4Oe`l;IW2%#{$-tlK4>!_5+%51?Okp#fyH$B zxYUCHG=2VZMB>_(&NR!~on^fqTNG3oO7B%Mp(;;@AREOUXrR~{ zc1fMHS*ME%`~Y$u6S~YU3v|XK!Hx@W?v#*~@B$AS@Ds_ohZ3Yphix%iVvgq;+*aopF_97+(e4ui{spm4?UhLWnR$k~{(M+kQy z%DXhbA0#$O&=*W|wU=VXDxcB;mwdSWq%M#~P?u+EGntkqoLl2AxYg|Qp@?ly#qEF3 zhtk2XTTv=i{oGELionYu38eLvSb%jY3v#xP?im@+@<|KRTxDyYiz6q+xah9z!nt`Q z^xP3M-rBafay(*tCGeUamaN9ZE^fbJICY))(*v6=uBYpr_f7X`xDRa&2_l1zqZ03b>QR;rH@|lBtxq>@}o&WM8B~x@@hPDyFRx7J~PR!Hw?HEapE{584aBn$TFsZ zwsrPtt&WCr`)+(PEFR2yU_a!5H^Mt}I-1GMDkwgd+|v#D*GN*v^>Y*06=|SY>pGbh za(`r8X=r#wtaNO9Xn`_WN#Ioq&#K0G?F&86lWtIHh;u>BU+C88#*rCwK<5S4pX+Sf zMTV_w_AU<(y{f+KDqSw1(^Auqb5D#8j?s(nOAP8~G{rsfa~=qyMjPkg%OJj%?CMSz zJ4Fu_zUNJ}TZ!ljxgwvnbq;tGf{snB3?*FZ6-#w=wF_8fp$~#CEe*!=kL%_rUzBjE zBWqGXCBvHm~9>le?=}7JwISGU-mNiGtZbOm%6!E}llT5$!up0}M!4Klixcrx`GA zQR!^X%OCT*Hc(15q8Ft{3|V7VHjH2SDpvQ&&kHO`gG<^bKfV8Q$Ju@HHG&ywWpif1Q73(S?64DK8~MwI_X$iU*#mU7!{ zHGNKSH2MhIqnTx0@vhqS1qSHF0^7))WXPv<;CR0S&N}Kd=?JU$$k}i^X~>qV?f`n1 zIgNhQ`Ch93y?dx*FykMH64ue1;5Q$8e>Mx^_K_?`(&Mi}cTfjHf=O~0$$HeR%)DmP zpiu`nALu+5%7tK(i1R_;w;}>En{?FL7W46F;2wf_ea{b3AXH46fZpeCr^^SpYnXaa z;EpyHxeaA&E5q{(%md&^(tGbghpkixH&uO(wu&ObDy-uBEkgI@jZPI_4WoR z#)vlJC+pl6H9`qy#+yVGm7z|jOV=~h`$6bMlDV@q&eAaJfIn)h%^tp|7jF+_IdvW-q9$c2s8{3Ofqn!gRt!cIP#eL=`ansOCv-zAV#YWj2vfNVYDmwHI zo#|F9DF#^d{TkuomUHiZ(Kn4?=@Y(7LjXATA_rZkBdVM5^EBwizqdwjJN?gztC5e>ZWsc^}ETYp4ry z054MD0Kdf(FW__v{PTk7?GO{+7N_jmH%JF0?bgtEUG2PJ0zdu?A3gJH(99QK{O86y zq_=U#X0b!jOZ=3|&VB?9Sge=204X?e0(!Z#?;Y}yL)X-vt|vd$@Be$*?tplx@Myrs z@7eyYt!1InflZwQE%keXdz?h_^(X9~vvwC9{yWymvPSkPcavv!eHp2R?U;>sp^`)% zW0Md51|HoULdd)D96IKZE^Qt6b-MAC%qNiaY26$Z!;`NJQkK%Y;2LHu_%{#m8%->S zxb~27nIN=#9}d~qR@{eeKIRci-W-K|GQy0DMkQ50u)QqK|LX2)*E9BK;-8%70$$DSPvXUaoVLTJFn}0*n?eTGQp*!x zoQOeq%(%ETk>k+~|msidopL)f@veEe&kgkLWLc zOWF>OA?>vL4dc~shC+`UDeV~`pS982IQ=jc)WX5T7;yr>tYc>)VMB>uZZPtWWc1$d z|Bg1C?=U_Qy4NBi^@9)lp;xd4ttneSKXAQG$?0Nd#e5z4TA?h{m}E45*lH7#bL*90$Qv_h*8xs60CT=&cm#k=j8Ni#yCjrq8{1A+zS} z`ppCFH{B6tbs6t8N!McyD@>rf-f23{1YfVh-F4elpx{5qU1xt{(vv+2$ZgDd!R@oTYQ@w5XC_-pm$VweJsbTpo}pmcp+Q zST$zB76!DLVyp0;+^>qVRx^@2>EJej!q>7S~4(z86Dr`M{c4$P|3QdUv5iYEOD>mIv;b_W-OG< zZex%(UK}f8Zn)unW6l&DIA(NlEMV%2l*7W7QarM4+m$~>WtEe>l=Pf~XH9yQ-bvLj z-M4Z--XVxup%ElpFNRlHF3&{E_}#HGS@~Hg9s&3KUVI55&XzDrkmO9mL z5wb-y7vD;Cdj|GG$Jzo&1PqtA1l3%oJ`-OeLN#XCaI3YK%5%t_Y7dLI0G<81xS#j+ zEQy?RhnR1V&-86O<&%n^j!-CG<2tQL?(YKV(DIbR2t#ez_zDq2ngB470(xG2MIVY* zZBs=DfMjmCys!yli@3(LoubZpha0!j!OP!^INY{d20>lZlt5sk_)az1`JH>@rObz^4W zq3paRi(9MX?UIYH1HpGTFjX~;+7e}E3 zV0i>F%jS}hLtd=g7nT~VEFB8=LrF6G>iWeY#NBE&EMy_?O=QH0{+zv^Y7KQ)6O;eQ zVv#$HjspF()@0CIVR@TH{*Z;py3!|i(E5UV1r+BTZ)Q>HD~ADyXk14~Y{J=&$qHA? znyFWxeEQVSE4jqI!|j%wEJl?GFsV^_P~C_3Bt{LJR%;GvmB&j*&)abm59&R>n&xpF zZZxR*BX%Vnt`rJ3ezT9=f}T46=M-W--T{6`8e zjRwvxY?-W{hmVwA{EXx8qZ_!kAa|QEj4J-mfp`=TQgkz9lH- z+8(TYa*nO06ErcN#Wt%Hp$q~7(8!tQuiPPxNUWLq)ZE_`@wd3QPf5WOw7J>0CNlf< zS~DJA__2PKXsSh{MRM%ZNQ%Xo?+u+nv_-u+16&ene>Km!y_H1E3wSMa|-%7)Xl zWDfC2in-RwmLjz@XE(Cq1OC^pp1cCJ{!dX~*0S==+aKx@09dUj354RWUE4vw;DO@< zl&cTDJ2F-qkNy3ia#_+H2YT$g#&~fE|HhCVF7Hw7{6Y$6kLk}8cpB8mqe{4hZ*DNg z2CnuhCKtMemY+D%$~Bk-0#NNEH3UF8MdlZFKN%_!KJ5VYjVUF|j{C-fpt|!=k&!BK ziAZNEO5*77+SKpOKmYv$730%D^5s0Z_0SJjANkV+xZ#ilTPVg)96U`0;DAI8bpu6^ zFQ$KpOib0ssFs^KQZUeFP(Mj}v|(Nn34rtoo5u@pyP`o&5&ia~f11$GPGLKnF|v#F zgOi`-gr=!=z-Q-oVr`#*h{Llr=zryC$$TJXZdqczp;oaPL&5id45ii zw?^qv*C)U_^1r}*6`?}deb3P*43zdZMw{4DDTGt`AaxP2V94Rr2tivzTaEsgO$P;! ziLHH_e_x?(En{16n%6W@Z2t{$ne_mIFIR=zjBV}&0z$U==x z7}Kj|C8K?_88UP99GFh_-C70pdDy0_aRpI4L1-XPQb^#z)InKw3>hd~WrC%N2OX^x zN`cT|UC@1r3>>22}Y7jbBL}4H%>gMqQo?<$nPT zc|2VJ>r`iW`zm|w;s?-&g%ww7jobQQmJE3L zd$b8iER84UntBb-e-Ql@Kk}(2>6TbblY~!^#0*8Eq7Z2ENyFhJ3js-Ws$#XAJe+|U z(ovZP#1#?|z6`M)Qc}tQqWi{nMDkka=`XT8h_sAGvrsM>IqEE}C9@KhF9Sb&Lt^O*QbA&D%f`k@@~xhe>%%zT>BMw55V@ z=q*x&6r2Es9f9se4#+2~uMvRjpVez7Oq`_gELkl`PyX7HxTdt(Qn9Mj!JqDjx4kL; zP=o$B_pP8X|1oU4>Wokp-XohG$6N#@{Gh9Dpi3RWTCzPjXqp9lzGMZ^3YssO5us1~ zF7@l8%UqN+IF3QAQv=+r5b8S&EWS#0(`8oaGU_C)kqAUP`qXT`>pDSw1AU5rRr_Ed z;s-UjlMvKFM?hEgM;J192?%;b=~se&P=r)^lvFWO^)6rg0|~J#rkKl;0gvjwa79?> z$fS*-JJCC)2Ar>gHG^B_LgC(Cyz{e4;ddmfB3tokJ*%e-t%n<};}GMjeDSR?7>crU zt7ox}@BT$_|KegejR^Cgbn?HcH-R*-?nWs9N~bWOz0D)KXwZVWeIw{u0>$oxjMf+4 zffv(=P|ub%KWxt0qPEagCqV)N?7lGN_|jd%Fe3DyyN0rrMLH2Fy3UUas+N^ynZM?r z`pT0cfK=f`^OHTYZG>I>A4x$O1OzGmJ5ObUE=8TzJA6Qu3`3^kZtQKqh|m*zn=ga?uAP>J2$=Vi$q8;3B0a?HPct>45mLc#WMr$Q-1`!O zsuX7GGrXAi;#Eb?0i2V0wz}%!vPW1Tat-}uicC}-|Xo5 zsisV*20vucijndO)NNh{ksXw%kxs$MHl5a!4)J#JwNSpoldiE)(s$Yj^`?1gzuZS7_FAH5MYEb zOv)SWa53lmBn5YZxMJ2(Q?c;tddZPz@fV5RU+X`*KAhc11}NT=T6DEnM`GP{0o0lAa&g7E~HLH(~SxxUPukc!pjA67D=9SV-DV)?NB<|52rew`lb@IGT=m zF)8SpzLcd>vFtSCw!Q)OyUZ-F{7@SwbwRTm%`(4|QqJ*1p&lvW0+t*52^HS2*-x@!1ccJ+iPe^hZ&~k~Vtx(kC%x zq)}txP8-mngYN1gZHU>zyz>>;G6k?I(1z+~pjwIQFXnwZsY zS9&iqCzPGzXI*w<$wXQ}tEHf^h;D*!K#!EXwq_?kO~KM5`?pk3#XTRR&@=}5w!TuaoJeqw!4z;Y!p1gSFAc=6;cAn5P@i=8Dcnq(W(PkoLSLye!>Sp zd+Tyz&Ecx@osuhb6gQ&u@HN#o^r01i#ZB7kXPS^ zVSmw+Bk-VoA4Vuih=Dy{2p_x#Zg!>@KC6*ee+ohreu@%E&s7SiXFDa1iBiUg8^= zQB~9DE#enL#P?{gQ*FwiAw-7v-3yvA-({Hk&CvR+!|1tl!G|5K47ljIL?O)Q>w+{h z%$p#-1<;z&7`q>-Vi-}o;RW6<^Vm3XUM+k+EN>w`x3g#M7~eKTkOc> z+;i_)ORjqOyoT0`az1=rZmEbX55-rB?ggI%Ycbi`BOtjH$Vj z>lZIF>K=Yl8tK;u)^z=}mw%m6O zRpGQ)y%2oR>h}X`P{gh8jjtb%%_Foh-)Z$z$&bel%X6~@B7>s(=@<19BC60yUzWs2 zN{?%vbXxP0zI_S=jpqM)(|>y#mRo+P#`5D!ATo!q=UuJ{()GF6nCaKz zzBm7B{F%W5^}vu#xJ8|yMA!5GTkM0@vtm!n>3Njv+L21%%2jz38$LhxP2b1BiDBUj zz8qqfhCQ!-9t1vPb?7mVn3w(Cc*|nnk)CBO^YW&xlA+bkeX>YhUB@{6_s`RFURLfZ zN0&Ghn|d=WO9<7iD-FUPwv>irzHQ6GK)&x_p238IrhTzv2GdgQ^wrUGHc?kr-z!`m zE+y^0nA+4b?2I9BB@dZaK6$?Xz>z!gFV(F@hpMO96@3BU-IJH>ciM;d=ajxF)V9k> z+iWY`YM>J)u+^uQ+@=uZG2fAJ?~WH_}3%7FFVfPz3K0XjD;7a{`E4!&1sT3 zVlQtNTlOO%=Z-5TYLnsdT={~-0plImTkQ>sM=z8y0@&7FLx!93qQN-uis4@L6Hgd?kY|YOrzTogk+|`SxB;)I~3y|W@ zJvq7Y*RMBiS?qheAbe<8yT_r%yK}jRnq)tM0dV$;WI}VVtY<@!*EJ9TbSaJ|+ z^=ktCOZh2XpJS?0MzIeX9u46pyi&*IhZ9mC+V`0NFr;~B>Lf`g-^cE)YN|A#h>yy* zt+aryPx|eaIf5S=lM{&}F|C6T1S%MSBG7C;gjdFHL!J$|IezypIewg_d9s-~YAT#P zX=8Kj^;T~EWI_+*@_9woqc5|!UHqWG_cn32DeYkCxapl~?O%(fuWOFIZCjV4p~hSP z`q*)cnr>)EKkoCkoec<=TD;eJ%G0{h4g1UVF6H9*tMeQHT8=yW*hPtlU-!|{qD>Cq zB;WhGXqwczNbl`I{1>d1$$fs&a!pWMa8eHBm!hIl6|3uCI{y8PhGPOES>>Zf{aix$8n8OsIal4R_EQnGG6_O(0zFD`? zNQji>7|x_w#3zUHC6$3}d_5f`-Hg#zY#`02vk5YQwl2So4#lK!<*~Y)_fc@13BUW- zHC0tDIpEnW8ZY?;8BIfpiWw)HKtt2#H_ z8Mh&uB1qOY{Gz)A zXJ|s*?PkV(ay&z8R(Wujq4@>K<@gZ%b$*s{EMIcEl!MAiW`r3+kwME~7@iLk`C711 z3CmY(o(s3&GmFJq^}XWPg`k}#P~?#3!9lq1L-KMDM12`;j;;q`(;{eVgew-;kj`8# z9xmAwdsW3Ff5t+_jNuP*j#PQGWT zjX<<|_VpRJA*TXvy^&o{yNZ8%&MO_slnia|b=CwINg6+v1Qobzj_eXkYfr&+3;4HG zNnELf6bxTLw>-G(^g%8U0nV&h&3rz3^N}n)>ZG>qNQ@V@PC+%Zji0GpBZEkY$Yi?L zV}lr=GbA?ZXi}fXoG_a|7g2`@5yMCj;lzrqiPHw4Gv%IGiJ;sJl?`q5w4E#>DMG1x zk|oxp5+J<{Qq4#@SrPmXiC z3hl{ucLC8~CO-(NCSZ}$dSHl9s!Ru!CBeFv8%ghleAa+zd^JWyu9qPX>uPzqy$p); z>Q`|SgGP3=`5?;fIjPXi3>tYp4Y_qF?Y8~c{cd)n5*26v{8>`?C&eFTKx7HAAxd98 z3#EkI>y>A8(DlVFRwifkdJb?cS4O9$Ov=wBROo&Sew!`p7>rqJ3(JupMnrPZlShoTr3u$Su)0iX0dgQU$9y5&J15Az^ye*g7F`qagwAEHP3wP-v zn&%ueeLIYwBqmpdU;;n&e(R-8y>xX(DK(^7bTr3s=(`IP%Dyx#&NI-b$&ft-md?mV zq|4dgm=1V~oh45yBeZ3}`XWZ-G=5P^kgT%Q*_YlZ4Asl?#i(lT$~X8uzkae6!wZgq zUG$!~@z7t3BjC&Er9|aMQlB8-XNgab5~QsA#|Fa$|Hs;UhBfhpf1i^c0)&Jjp__Iv<#D)zwzyGtl&%W5} z+BZ8~%-o#JIg^=8=Hi_5y+7Y`tAC1&c_76Dk3zSTb3#a^f7#gx_D9CO5oe{$}VMKdOFe6uWWl^e2o2jP|Sst zf=i`|h-ZYLoW`F@?~S|07h^$-p*`bWy`=F)eK8>?T4uF{UZy@1m(DJ8(^tY+c*Dt zi^n#HcTwZTn$9X6@1iLWW!#DNSVzl~(+q~)5VC`NCqym#5FZEOqI*gnuOxgV6v~FD zw)dmL&x{ex*E7yM**mr~)Du+oz5v;&n82zTC7z8$o7LKq(o+3&3(E$7-#3{D($zah+Rnl2^(U&|3 zoVx$y7t`PK#^ziR)i3B|>aiYtD&nz)#0|JV7ha<-(Z&h-0o?3e5DUWePgsQm})0o@m#kbXP-k8gM|MhTghkE5-yEqSLUq z8X7U|fL_`eEP-*H#E@_DzUt+HjXmAVMWq3#P&ILXL6(H)(LkFc{s3}O9qo@fjqS+l zW@jGbTjTZwc9pq!QW5)ylRgb%etpHHQegJs8R%Fvf$a6K#OxLZeF_iwV$ezPqRj%B zAD({ITH+K>{1gwCL^+Yk1Wpga{=g}JzvPLR=c?Da%Kg|)nZfn2K{73pwc*(9Nzr&v zLDsYzgAKCiL~Y|mM=bl?)5O+rVa#L-DhUA>fZltFDhREJ*Z`ae>H^TRG+;aBECLVu z<#1Mt4XDxpkenIROO!y>UcqS#61JssPhl;$&?D9?y#-aKbhzOrhP}W>gXH4bi1!X& zs4#>m50dl{0pWsf0_Xrdkc2^R=K@kZ)GaPZlah5Y$ZR4XaL z$2l(WN?m->#WvU~IJTSTKBiWt`MI?Ck+AUe&LYQSLy@9xj`q(mY%C(BJiHr<-f;T5P(4|bAfCG`VdTW zqsZ}UIi=uk|#w3tFi$ksX~cI*S-z-VbBCTV3aCK zcdVpbq_f7W+Uv#Jwz#5qMb33|L~E*)>Bw{aw{vwN%QnoZ=)%$u^=<8nNP( zZ@!jhWqM?VQB_5zugRHay4|I7i;=b1STS=9I`UF^5(O~lhfC3b8yNH)P-2h@x?EKO z1!|PZ`Pz}dEtAqNe#&y{PFy5sh>j+(QzGT6!CcU(5eO<6Va^5+M38Z6#mn_P2(Lnx zSfi~4_~A>PN-MIb2M$ug-T;123dDB?}3B#NzkQn&RY}&cI!MLC^#aG9{`d?VfCSTp#4r}^L8YT@kpVn@>ylFbeL#dEkh=b^C ztKboBiC&EBq$-n6Kq}*L?R#?QMWjz4v0e>R8jTD(ro6YaiZNvH9N0Q4K7H_j-Iee< z-Dg$LJWRyKJ6e+-j zD3-%d_y|J=?m*K>%oDa^3;_8YFP_Y0h<1pkvV*#b+dbI`rT|XEhs-m4IKr9mMfl)# z6n*^~M<99vK%N_M>TbmxK!?TL>M`?0TL-NbMl zkSqt(c*NJc$H^k=z=dW^%rTfNBIr?hl^^?{@_O*}7Slnk@c=Wyb$S>kXN2At4!har zivv(GY*2NgIWg34|JR^o0jxa)Gz6dq)-Xdt)C9%y*^WTf4#YMJ$eam!DO#{6@qp7~ z3{yB^;#eSol$&3VXF22UaQ3ECE}qixdf(asZb~^df>POu2u--GG>E9~5elLMgoe#h zBe3uCSZ`T-DVHk27jw08{Ztw#(+a)PNjfjh@PciRPImFy`1C#-%Wush1^Wn{SRYPnyJ z;6^ZrkU>N{+eA^mCAZn=y`2P=Y`B$6%iTRcDE=AkPc9Gx?US|b2(a>teP7c{E3$0H z^iiyj4PB!c=A0l8eyqcI@h!36gCyo|u2QImc1KM$j_qE6?>BUn$J<|j4D+WVWT3H< z)3AQ|a+Ntz92<0!f{k$=8p`HfZEF8ajqx~+^O8b{48Rge5()F1Qx6eAQ)UU&c(eEs zYMINtd;&%eJHtP)_aexHbZ&z`vTq?&R6ptwWH3ud!X1f)J`HMFvo;8$8pql084>o$ z0#!O4VUw0aiXD=%z5e(yjec|t&Bc#%-8sGNz)MW-cP2`Iu)Q6(#1%OMKm#G*3{`%R zDuh;#93_wJ{em9CZ;Ag{wwR$EVDc?`sN47YzyiF+hkO&?^Uz*K!P20EzG~t$46bu{ zfVl#yS4H2^K+X`w-g<;;olIm0H9=ZKr#EkG2mF)M*?`V}}hsO#IPo=jj>s zoOWP(9`>l&UZkjF&Z_7d4H_bHLLY4@$RoF zoJt%zp%kd0p-Uro_fSAuYW|9x0~`v-v;u_Vfp$ZYavYG{BU0a*BqR%DDmxf~=coL{1XD`FVW;@3UE&vp&|e0&*~0{}l%=JN;8^6M{l)gjFK zaKqGR^_M}5$w)=w6{9{Jfxpw?wI+}KLbn6u-HU7ETle6vqx8jECl<0lqrq&Qt=rti zmTRaJR77aZQZgCwS#jz9At9d#5CN88cb1}6kiR^}la-kbrprTCXn>8paC14MwsIH% zP6I3S!j%<=m2BPRoR2Hf*H_N!u5QDvmc$4%a#s^ht=9DIF1x>a0lk`adt$yfKaYwsKKs7T*h1cT%=2p}b^w)9s4)Sq1vJns4r zH?jAPh}zag(a)4N;*Ifh=s_Z0X}{P|l`n&eOy{Ct#oOfKX{%qxdUyyrd15H|qY%l> z6x4`=kWb-{AU?1jYApBur`orlj=3WycY4bcdr=(-Su*U22TW2rcpLA<>pWEv#-YW= z>8LF8yHCf(>JX);K#et^eXlW2r(C1Vhs7)%-g+P!Yh17)SRH3PuudYp-d^JpB>jt1wC>{OXP2t=|ml+PH(Hd`ic8ZyyL5goAvA+yXScHTiVa( zWG8UqkU|`?_>cPVt(@p4^RZ^`xrAeDQ?UI$pMNrWlUpSqkf*mlV>5lb_UVIrJvoQM zG+g(1jc%wPKvqp>Q0gt&~`Z@l?F#gNp1pSdQl<6?C0~5nLv0Y6T?iT%Oj2 z*SVFYYGvsu)*T3WxO|XfnT1wA{Ak6k(XmWdIInq$pR#B*kr#d-sz2H5nM{7vM}NyC zue*mVGz6HzHnGd{Y876!k1nk@?Yns7=|}&Dt@+k?i;rPu<{rnt1_3XOMTEVO4f6Z0b!&bgDRK-{SgkV12fv6P)U5Q`sf7i<6 zfk_KHChPq?;i`wQKjAlyf7Uq5PQJpT1il_l3YfLP^9mCd5H26%f#qSZi z*gy1$zn6JGb)iV5oD`I!q3kOZLYC96Bh@M+>hG%*8rSEjplq!~b|A}k=j`8(EuA)9 z^mvQ4Dk0IQRa6|`BJGqSvt7y7-&St~&J@|pQ&{>A=RA?VS(R5OfnsbxQc^2>5}1Q%|ZecudbTkASt>^ZdAHyU7}sF&fH^mDEvxJl}5xxZI^ z;He^Nh*8nWqfx;ZO2F0k>M{*~MKnGP*?~2aDE}E*8G1!7s#W4NlpS62q`Es9n4Ujo z@qKpwn3eX6U&lKidYQQ6~DzV+2I>Bz~d1|LxN`lUi}_NNBS-_Y^1g#D+se*gG)=3QxK zY^swLsDjMuAFengpQ;jyLBpy+F5i7e6L42PcaC@64gi`mB}Lh!^uYBPb>wuF$eB zL-7*`|AmG#MO4s6oEAmTE`ZP{6wQLc(hP?acKiHbDhgHUJ-iQun^Uua02udtcE)-E zdsw#}jPt`6nfIl$0tMeqhG}9})xm+J3ZX|4p7R?ZLU;IKE)?@P5H%3&g=2jd=6FA_ z*5e{GBLOd_g1$(uT-*NrAs3>_GmzMpvmbm|{Z=G0LLbQW1r6Q|_RV_Jn1D(01Bxwk z5Mem*{qDxh-5WqQygc)T|7MHqt-TIg2YxLr-K%{vDbN0F@3J9fwE|Tc!Z4KmbZ|i- z9PejaxyCJ*<;47Y@O_mUHZR#Q^;Xl`!5S&aMxk2_6g2avohHpD(m*c{Wa;s5sil)? zUK)GS-S~@5N}Q=NM^L67wM7#1!fjby<@Lt>F(;SY1F7auAaY$mwiCTf)Dv_BhDC6V z3fY>%JSQA-hcmK6zL?EG-3lBx-h(+15aOWhkH1{3OM>a<+bZnRF0o9Q110_&dKvRN z3toS}_}rZchtq9W@=IRo#=Vfc1Y6DOkQj)G>YKAx-2Tp_63$g2&4I3cmHn$igKF|6 zz;5h;{sIaV2j#)w2ohwgkisDM;OSFl0+51(&0Q-}Y0O?4==?IG;aczfv-DAzG95=5 znpavU998RFrSEx+C&-P!Ld?Y^(*J?oL0LCD)|T#J3LlVk z9c0(qQ`x#`5myozf+PaqP5}HP`68Ahpo0JnI1)@CwFot{jx2ztO6!dWBX9s>)=Yy% z7xAl;$pyAf8iVlwNRvjO5!6x``^U^TceBjbcL@&XFEw0|02tQ%OkIcW<|C4+0*I)P zA;sFlKAXwZ;n1J7i4v_C-`b-77Pe3olGemq@d&g)U z!bdd)V#SB-Oxe%UWa{z(T{b*g02aq7%FKbU7Bl_FXy$mvFh4Vv2N((YgQ0HEN z9R{m%+f07;nD<{*rhzr68Hf~HpL_rjk#W}b5|lOj+ha7NJ9HP)O#{jGT%|R@_!grYQwd+6pn^ zXU4TbidX1Ho92d_`l)&%*P7WaUq!k)~IP^o;lT;g@^Wo80^`s%&E-1 zv#?l}y%7ZwEfBH7jmG^MH6xB>@-yF=v4ohs6Mt+EXv1e6E7dhCv)tq~X?0nVb@Ogj zu$q#tUpBb)3>>+(b4GT@O1#%^zd(U3No{Lr| zx46DOckj8F9dG6eSwS~q&8r0NL`r&b$ zhQonnKe^W9^pEGN_J5XQ^jECgJfOK9w22+hFrkOG9Qb+eAW0HIqB2aL$*RpBJpJs! zzHuovUdg%74?q^B=Q^YzuO2X_Igl3>t^}1+*iv;W)RS{iw3m$0h6ng$J5r$%e;JOI zP}6n3<0iv?o$P>gb0E3NeB;||x=AEL9X1&b>sw?~7Q?Z{P4eP2HW7;PcatlC9;8YL zhRCWGGaFh6vIER>78eV^$jBYhBq(_W8DtE#p|l_M$@26mOcB5|dky`8EKg!tir^5L zC2r#bBhTXEX@@Eev<7SrnO=4`>vcE3=Wg-Jecy+N4I&o%#XT&ucA#c&SXy}4+#>^< znKscL_O9D)02(I6!|C#)6%Yt;0GwZW9Pm_vYF?~<=i#R0If!LK0ij|IddT0?%Tpm1 zpxH!t`ksH>palokA*~!w|5u)c9iD#bPDjPP0<$my_Z|mYc!j7xIkNXjNVHdYR$&nA zN%(oMC?z3J(kt?^SM)0{2FojE>#r9zROE!Xcbxds*j?TUkDtW5dM715NsRVRQF;A#;di3LC!;jNHc$@Fqmq}rA3|R3vpH|Fl2d6b2R}hG#`6!8Hh|jWMc{y1= zHEB%`Y-Zm$A{z4116A&4KFHZ1CBjLFV9r?^V?=^+_74&?fyDG%hmdBW2^6L)`&n?x zv!9!gU=re%JCqawPoToBfal>PW;hvIY|8{hB+4wFSsPC_`{ge>oeRgJWZuvA_;lN} zf*r(si!2ciDN?QJQZwyR#-A&!GNdfD!PlefjOt1uxkpOtUewgRP^TU-j;u6|g$77Z z2Kr>}70{2n${R*>Z(U$#j~$YFc})vla+o`e3o?d}n3QG}HJlnRdHjfT&%e_8*~|3t zm#}C#=5&Qai%%z%Q8jBAkTU3AS2m@92+)KlktU)HkT0@Wu{hzmZEHMUuBs>&%f`vfp(Jj+=7yp)-uD_-mQKXxKE_|=?VCMN}+meGSwGT3?ZypC~0 zXX*{@n?e9(xBBEhrY0gt08YxaCTk)zkN8*Op)XbEe{d`FSUE`Z>#!3&bmm`Mv`O%x3xX_F0Y5%rxQaQUQ8{R~8aGgp*S{TQG4xFgC=>@FFGv zFv(=~XU^gr(Xa7+xStRu@u;_t+Bp?2mG-mH)jzgcx=6BvQD(~GB0IzofSM?XMy}Iz zX&uyz+yM*dPGXG_MYrOxodHZr?j+NqlP>J;K;bA7hkl;)^4_Y#J|IimOXSB^8Z&GbP*Kcd%M2#p{?%{+(N3_R2yDW$l>!8f=Es@7C`C0%a1opld zpqb)C&jvH&ANEDdBkZM@3p5NLScm6pF>NY`M$Rc*ZLhvyK6MhFPdlySKAmYSZX&UX zvf{T7j?wPgvOI~qb+TskzKfjDMMkHKiV1EP9oBN%$7tKf8oaE-wm^Q^i7c>VO?}Bq z{)H9f7QbK%9z4tm6lj#9ANOY=$t%RF7$tJG()n$bHBTaYKyOD5H*WQPruB_Cnel_G z*84&(fkkH*O?iWKDg-T{o3HDbp3be4$ELDF#EkQLUN4%h&_(%K2^<4~DqMXId>ji^ zSBMs++|4A3i>>73JppP};o_8`K>2#hTUGM{g_8^yzMAFx^Sdd6M*qm-^Ivn3zr_NlHSjo zs-1%-AfFD;Hkz(DNLN%OCRJC)f(e`pdDQ{umi#oX$V*+TQ9h_}fR`~~;nnBZsLTF9 zkSHUU*PI`^8@g5pkE=2mts0&Gc-~IofF@|?Exx_!w&nBjL8}{Tq4IrA@~SuhLUG)7 zi$tHZX}6J9$pt%P)}1Sqd+1qnzyV}*moK9w05`D|=Hkr$DQT^J%rt2SQ^2P(zy$t& zJi{Fz zXT*hXIvOK|YUMD_{Q}G6!~muDZV7*;@%!!)I%n`b-Rei6G%ymZ)n%Kr)11JfA4xdq zUR(LmqB=`2S!fl>8qJV&0J(If)K)ZKORLE?(Sda7!NzVC(!oY5-+Ts|)z%X&PMV_Xc@8QWuL?si9QODN;>$}r1}R4b6%50`!y$Y9{j1y zODfT#38A!p*Dio6@pk>^t5cOP_`V^X7jgcYpPp40YCMD}{m2nMO7^XX ztgKLJfKU|IPiLgbn0L0~2eZ-JCr<8I@$0b0=mkx+#Hx-KD(j=as2A4h2X8-kv~=X1 z;zYS_z(Bs%?isAQ^H$8;snp#$C3p#GVhi5Jv2+_;#U=tXBqBu+mv5&5mLjFfV#@6- z0oX~h#`LbXrQ{_a^Qy3ewL%?pbdH0v$wkeKc^&`!wAKj0A)|R+3E9qJ>Cs`jZB?kg zCdq^Ii-&Lw*2Huz!_=#>p4%a znv|^PgJI2*T25)ujJJ(Dr)wSSH65=(Q`z$q(PxxCZo7F*Me9j<#6s+|M44xoo<5$3 zrCU;PnxpC7u#NG-?cS-lF>c;#`hxjNk(MsDJFvyP`<5F`PpEYA{VaxisL@9)#oNJ7 zKXO)~6?xZfWz}4qA64aB+8>d0z}HDh9nhJ6e860sSrVtVZ?x`!qs?1E)~^1G^kkcJ zHZ~%LP28z`z199Jj4*?h?~X6E%&j6*IDD-YzEFOZAJNCStrbPt+T*2f*w zo$b4hQt2bVHno#fj-MhhrP0xg9JK*a&8F*_sJ6c3pXjDxjW~yjOnV8g`JnXveBGQB z_(9!_1m!fr;sV{_{jDIg-75W?%ZN))cNCwV`10^Vpc+cAov|hL;yZDqGceys@`can z%4kt&slZfA04=a;0`Hf@sHV4^tuWvzr*>ta^06;;xi@lo-{{0s5W7aa@YyEai1uI~ zG+`0M$rO@2bZ$T%bxl-!jH~FWDXHL4#?&2aK#_zr9h5;~pme4ikV#PB4p-WD>`?M+ zu>viA&ej%-P?X2pxEQEPcqE9Zju(?r*u zZ6)8LQ?58>qG5yCCpnZaKK{t$_4M|41=B(U?!_!VHV1e;WG#FBp6+ji(!a$+XU+mp zXk==UUAt|FZVm#?Z4e2z3JHaQpnQ~yxEO9lX`jLirG;jMxRB#_j9piziyyouJr8zJ zfg0}yj35ONNJg-N$+#O#j8Z~q<5~r^vMZ-o>W;~Btz;IGkxn1(p-vw2u*_Xx$3>7Z z#E!>vU3Mk&5+R?U*U z110Fl_N!oM_>S)uQ6q~5`#Tm5DI0dM9Ph+2V{3G7%XGHyuEJCn z(Dja{fQ&GWC_iAtxU)Yy>WL+!8#ikFm71}s@twX{DJ`2&SK#mR9V5FwY+SkkB6&=R zCzOm-JXMt-^Ietw)`fa`X$id{F=@G$I`cIOOqNa;^`eQHC1LlH+k<2wT1;amp&%12 zuiW_4+1e`>v~{K4Mz$BwPSvw2%w#8d{?k&^;~D0YaH`4!;1>)RC%vOKC%l$|T}(cX z6;7pJ!IzR?W)pSdN6#eV-ml!y$N%xOq0to=1hy(Y-R_hKYxxcRH)i*Xsyy=>an^_;h2e zd@5zv@SY|;T@Hwz#smK>V(#gpfy5~Sv{KukMbFI;IufiW$iIGS4;OcNYf5zU4oPaU zA8okhCs=YHqD4-8&Tf&oEIN;22H|&WHK|7HV|LY>clKwAih1ytQ>CZQl=lUGeF-^A z7E2wD1?3nFiTmU`>e4b^| z(9=)(%crs6ODmZ3eJ~DK=`|S5JMke~W$d zX`foBM9#;&Grw1!p8hfT;Pj`wyMLc2GyXjLC*rgMdMI_kZY<%>R9NddM(-f^;vjb~ zW6u^*?rtQ6m~hx8A40@f-_05P{Z+mbfC$DR`T!)f)LKT#4Jv-5aL2xTU%nX<^ z!(?*L7pD6iK-5#LBk(s%Gj;A2{4sY5tKgQE7ibW-D+WnxjrCB;f z0*nE4woB6fWsUjzfc;}{hW=3Rbm6y%K%^v;&KV?)6>p21yOmFvJ*W7BG-rK$+qc_u z_CDEG9B11Iv?b+igwBjU&WgZyHeVPZ;OMu2H-!8lvxK8bCg)ca!Ola0?=)+zN{64^ zChSmSk2TIYav(q}ZTFqP9rszUVzHJh=f9pGGWN(dUXk&?WBtC9kSZoH3n|_rr@qyh zd3#LEK^yF_OCu9J$5TITqmeW!@#HOWW|-(Ft)D+WCa1F8`8w-VbTm2lv-v%tWANRy?OD1^FWBpM59C4!eMHVb~(&2h-rU6jHFs15czkG3?VJlZ@F&|slcZR{9#NjB{(E5EIcjD*1%eIMLno^>F|4= zn(;z->r&LUrhTT69h3nz7qEt!b=1wJ=(k}pYfCX3VJChqodAbZQOi`x@L2ifSe5WN z?d3Rw@OabZc&qRPr{#o0;faTr6NAE&qL!25!jsdMlj-3pxyvcV;i=WjsrBJ$t;=cG z!cTTDpBxBJpIA)SKpri!lWnwc)07o zwkG7u=;~)MuEteTkEhpU=>PX_|L*}RBs4x)_|hNYZ)h+zPDpA6{db0{$qCf|VwNzL zjuXBqVSMhtIQ)Ob-2dXB{}m7Y&psZ9tcCl`3S)e5M4+$@(55if3HpDOhyOof;c@@{ zci{gPQJctk>hZu-JbCy3D)|3%oe-KLJP`n}K0!^5iwuuQz*`D0laOc_6RfL)Hz4X8 z0lc@=YN+u-35RSVNKNS|6QhO3;e|?QIlPRjzxrr2PLG3C8wvPBqzp1g$5qoZ)r8NPy2(F^0c!V-_G!X zCGmrk=BGpY8AmNM0{nAA>`IS2S5wK&X`b9OzS${>XQ?qwCsQ7I*+2Gmo(=Qah;n_I z6!kLs*r%kUNf{?^W%*sD9ln_zotK$5Q4;p#T=Zma#4YBf@s*j)%qT3Q z7gc1HROghR&#SCuR#jG%mU8oR+8S%Snkoxx>as6zifS9H%PyAIHC111JAdg)dE=#W zZbw^tdvkMB!{uvjZJkXWH(IaVY`@Wcwd+pjjoa6{`?`C2J3DXOywY<1T+4V>*;0Al zSaaTNYxZ<|)#`QTRyTd^YT5nHOYg3?f9$%xe(U;|+c$pp-hOwb>A~Rbt%2Sz!@Zxz zu6*jRzR}y?b7!dY%3$V=Z}~mnnO)zC`o7oR?H{=FuxDVnZ+QInz)1V_OnLwJb3;Ea zjBK{u+iV>F(K++8=jqRmM;pD1%YDnMcURVK3EmD24iAn@3{Kpen4B8D$Dg=AJwEv2 z-pu0%k7s8dKAw5VR8`IE(G{GW?2KhHg$ znOb^uf91t%!R*Y3!I!_r-~4*@^vnIRxw)4=``>+dH8+26?)T8g&yN;=zgYV-yZmSD z>-UHM{=HpTTv%RNT3vhh{=?ex!shDo?~M;1KCQp~_-A?H*T?nG@4oz9TL1gy>$fjI zet-Y@NBBki^ZhG_C;W!}e=Nu?U{OTTfy?R7L`rG0XU-{MyU$>&*`?kTOw9PMs~&4GtyR zLf?Z+51YviPUoF&v_5Y0xgPau;zrxktCYc9CFh&%&#p&2Z1tbKdHKaH>iooc=UZ1^ z^`v}Rdo_9M>YKYXh^Vql7k6%uCAIVDR9DCRC|lLJ*5&rK#mUOu$6im}zP|i`W1Xk$ z+TFSOu*JRY=)LY6@1AmlCu?1MZhm;t760z_y`Ed2-taO-w;#CE^?81*WM{zrJGZ|q z^BbHm9O&)-_HOq2u{Za7dwzTp3~uFZKiGHY=a-d-Z2=EzQ+f`)cs}WU=#pSCvYJN*Rc%A zTA717GLnr4_R8MudZZbFdn7K`kdMqh6MhMjt)u&W(?ec=qTvnca{k-_%g>#=L#&*( z+0@&ti%F|nS&v*-vvkzxTqr~Ay`)~@K-W5G7R-%cS6ZPQ7D-;eR+^lM%Fsz%)W~jU ztuS^*w$>W%5tHw3FM4B%AUL8`#(gokwGmqIv6M8&xO2kOu#UFWTS)mz>$??^Z2=SSoZsOtpk zXk*N8G~q<=fO_P2ZSBQNc(hfU23cVC%04)+ZF!vmnrsa`w~KxMoQhvP>0_6+&Z7pK zorXM@ZKJ0YP_ ze8Z>OBi0Kqvc;~P@3h1m6bXDY^CLR?Ls-hkzqZ5mZrPhRoAgbr8d`M8 zFHn|U1$}!BY?j`_JRe@TL*XA}Z}>|&j+ENWKO*t6*f9*tBUd#ufyKE!k3Qe0{VlO? z0vwt%jdxl5smS;BjrM2rG@ZQYqee%uaCb3@ZA~p@zh3IY-`~EH6-ljSA9h29C^?h9 z>}HuBX5IZF0u9;OhK&& zhN#>q?`NYRIq740)l*FTawSaqhKwGUOc_y)7^3((?y3{AbmI2{&z zddw*c5OMj8X(S1@i5|E(qm<%|%(2nfsZrFQ)z`3{u>JepOr)Df71;gT!d&5Jw|g{P ziS2Ythj>ZkXQo-T$|vNGiJ*x=cBo&YxkE;_Slacn6*ZPTtX{8uRWxNytl0d}(|t_B zlnZKWJkU}%Qo~vfnu^fRl0zrmr%t;oUnNRfwr6*o;8>>5m(+A`c(ZX=V#S)RTa4-| z-QcY!&TaUeH-6|ilc=m0vq8k4EnnEv^^-{X`s?l2ji2_JHc<{+Qx)&Q<<27I8O}L_ zMe=o8R}EM#N(!A9B=&1b41^|>O3A#ma{D!Gz=+ReYcUO{2^-xAPl6gf@o(97`mK%F%emJ0PwU5gg&pc^ z-@jemKNGDtMzE8ZGCZo?ZIUdyat@o3`3j0l{ z=AZ;?w%v9j_Vte|KP2Lb#8V&40-$fa2tVEcMEk7pLaQj zC9j)jI~qdBi_y`Fj-=V@7{d4d8XokFvm31EXpjW;QHLq)syAa4wl@vbR_JSot>!XR z6$3?FIh_|v*sD(4GakA+&@Yy;Bi*(W9=gtcRb8Rv=MBwEm3-gRq*cIhPJTX9d6eE# zAz!SnVKnV8i1Mp=KGSk#k0d#I&G*PJWLwM)+oS%_9ee7YIFH@vxL)x7i1lxY zp_T%jJBp@y4^-%ncFL{(@t2{McZK+tD<)HkwI7bFfg6b-lmu!}kB4GKzi5VhOc{D?M*y-^8oA40K}ah;JYdHYc{9U$c8N zWUO(haG%=ijk08`*=Y~uTbUtc(uCUio0~HN#o7(dKePG7eFzcfMqE7A>iy2MPzS{z zm8;%=J~Hn@ckn?tsiKkX2?x_JKK+Bu!T(Kh`B$WJ2pTl0>F(Rz7+*|3AedsiGexR%rVMMW(cDg||@b3p%xrEaj$3>Z41^336k{PGe|- zX|x4Qa?%F7PSka8K!02p=Th~?>cn#}^o@#R&kWBDBqtr9LTky`^VG=uvS;$1LU)9v ztQ#>LiCG>#Sys4stWT<)4|bA!+=qGERUj7KOjHGlUJk{sbHk;ZchGDUKU2|Dw-OEr zp#IBHH(q=^5@wx}bi@Jc=#ZM=6Ys`@a+aYo{phXF2$&m}C zAyTk1gUq9=WnqnG#Z$c%KMOMT!l6Q{9d;ei&5 zGc$NKY#=>99BM(#Pe5kal2at>PUbd4iR)n9^?W;_B@h64Fbi?R7d)DUkOk=$_K9pv zjAU$v1Bqp~4E4pGbP}?qcsb{Nlt1ysN^p!OV?cL3-cA64t^-y;k|PF$#zPyu;t$gD z?kA_eH^wa8Dsb|Kx(UF-?x7f;bsa&I{9Gu}l^}#^!Fd3Y0<{u=Q4~NI19ieccaZ@M zAF|Mu;zUF&?7;p?-YT-Jgr4k5aS$MP&K7PvBzA_B`z)yd3V__mP!}FlNXoCuhKffP zxIcv+#IT)4p`*)r7=5vLF4G=>?E`>a7}#=4f>9Mx35R{oFPZFuW{v@94%Ai)rb_{y zbU_(75Sd%7V}jTNFY%77&|~K9y`6BDU1Dm250w+yb1A`5Ak7&i10epF7-jY% zW1E;ouE^1d1Q#~cjsor0IQPXHYLJ}pFC2+5sT2^4|I zkX?A#A!c4xc~YEXqP+vQmI^&N289s;2RzJ$T(a6yg$YEj-%4@eL!f*xnFh662kfh& zTjeUs!*eio+Z{J+7%fo8^;!%cuwR8(QcHyfs;E@qo2Z!@S6;!KwZy?TwMv}LsVM2)L+oNy5()fKi}i9>w4@IrHCEJyV%k~N)k{toP;E8z zph=>e?@3{@mA7N6*fTID2U^UB0P8IQRndhcWUvVKJw?ogRZb)Ux;&@^`51}^w5~$Z z$5ZAj(&I7lcgM>Q^3vS|5E!@oI2wN^`V zb|T4AW2sbVluDKw`w&7Zm1HeRg(Q`He$MZo=db%d=XuU`-{(B%y58^CTWk#C&0)2= z-0_vAxnl+S+T#AduO62b5CH(w)G7eW?F%X|{DnGptJX;y3i<;1GYE}-3*l~Gb;q(v zq}^&)XTel?@jtTcL-;$cYb8N~bprT{=7{r~3UVejA!_2np)Hc@+%y?`lRE@h<7MfI z#U|5n(PLwo7G7nSubjl+22ebx<6ed?sp+9*jm8&9b!oovZO-s<=IUG2YCL^iuqaes zU=L9Z=u!%(@S6uyuPm1zJ;F8k0)cU|^WXx6S;)8?^g;^s(payt!sQSh%735?4Fb5&6`aPLh&l#774Q}@nugE;q| zEeAM&sKC)7d-6ijZ{kEwK zU1sX!+0O1&26$Va3{?=vx}Ldk_2TqZrdU{L`h=Pc+zVOZ`91n43PdgAw0#EJYzflpBDEZ1LGsy{;?2m^+N zP!yJ#=^}1xon9dX68ii=2;iX7uG_DH3A6WXKOyY?vK~i2L@Kl{3;|1T!6n=GQ^$qY zT1an>Ngyvzl z8^X6wFm*18DT+^U;DW;d2%C{840vL5LJ?(8&BcRo>36dlckK`my`5t3wNCfhtDD-l zPKtM3pK}{Z;I{WHr#PZd=7gE_NSe6>ll1N2hsc~<*XwL7~Wrn;$0F!<%~YM@Z;IT0o0ZIDWx_m zkOO148e@muK3$aTDmD(dV||mIwp^al%0Bz-f`5_b1-iz2Jn}u&&)Lo6MEdA^;K2tl z*56peKSJxeO_ z8Aw!-geLfb{S`HIBL{C)kM_Qf`2>c+6DKz&_DxJ;_qYz zroG21csm(H%*;ccPlWF`fE-+tG29t{j+BHwF3!wud~;*=MW-Xu+i7|$^VlAJ;nhhP zVZY?>a+E<_KvH9-ddSrUwoa_Gv0Iv z^`ES>e=J4LetbxGR)4=7i85G6>!>wqL*_)&Cd_)( z*Nwb*x^n4S81YZ3uUrGZ2R-6yD$PZIzx-OJ5GtSUrg!aq==a>i9hWXAxhhHRs4TcH zJkDsLW^16`o}%>czdf>O3G-E3!~cMF9US>bw=)enr13+=QRBT23Kc;Ox7B#7-6H9W zx@d^nsx@`_4`p@fcXNnZGvfe(*&+|=^UO3XINA&8^3C&a(TBLFN20FR?rUB+@`pAg z2wD)BGet?*2{(V5+YN{LoYRbt(2As={v%;{{=2ws+q}?|XQ$i+HBJ>rImaAagC7pz zoW2WvwpHbwa)0_m(z~?uY<#TUz_(xdXIxPTBRA>)9Yx)K1fwu8gSvK$0J~Q>E0#!2l zeI>IxrlA85&!ltx`cJ7#8~c8eh_UfOA-dIje$-eSH+dV!1>7DUI~;5%YM>ylrTJuCZdcMNry`}L2k)|su3mXu74kF_Sv-Bi#Vzi`lSb&TQ!|~Z>Q_wk z&7H!NN=(af+3p{r( zP&Yz`zUjv8GL|qksO2p0U%pf^)~$bIXwgbs53=;R|B~q#o*`nYVe_SikF6QHJ+@es z^>lQ;D8oRh2bH6L-+Vv64k@P8L%h40os%12Tz)JU^^9EDC83$Ei8LA0i5Pf_?WnI1 z_#jf8dwDTycd3tlff;j0SGe2TWYjoTP)2iHF3~IiFzAt@lO%`HuhtloW(m6#DDGYB ziSGwIE|a>mG<1skZ4;ypt7Undj`%k5t%S3H`a4uzlJ&itSd;aoB=B_m%pUUkVz|T4 z9-FTZD}GM8eRk3CK6Y7Iv_%N9archnv4ap1V@sdoxq;sjk5`|KiaTEF)gZmUR#-7_ zLtV5Hda|N@GQn9@%Fy_jV{WJ_!?qZoEr@g!&K`HJ1ymxW4!339ymSt6`-|?Ml}fLx z=xXJA{LzP?7t*ob3bljx%7qpVr4E-B-(xFRy&xEvfKM)DOw$f4SHjb-&oUNLU5qD( zu8WHVw(k1ftfC->tkiKMea+p0-2fu8NO(dlFL3((?K>}ndf;wOe(5EL?=D$EFZy0a zm9Bov{fe~GEM!N{e4feD@H_J1M}AgN=MgYM11JCK3HPVdH}7}Z@!-ebFH*X*eqQBD z+5X}`L<)WW#!y5Ye)B@~>x19BZ9nd^z#}fI*E^RhO#9{PtMW2^jt5T|dMm%bSoU%4 z=AF=#kxT#FH6o%?{nKw@CXO6-cdDs&PY4@n=y>6ob%(z7^l&EgbA^%jjf<(;6@;s3 z_R{*J`-%mte`;>qvpDm$fJz99`Y zyYc32#IDe?)lP@H%C*@^iTrq|hKSqg)}VI+CKmP~O9l){99=?bJlDmlNBqOM;?334 z?jTiFL}|nxM=}t42tP!-8rYpyi0=~l=yBlf97keLBcraQdYVM)7Qe_*?v$okIbxj< zhgykW(O##AyVL}dE?GPqFQgrfe6<;ETyknhTiseb_saA~8J?1m*6oEWINUvLI=k0Z zEg>TAVRwhrtv<(fN`^dEf@?#g*-^U`Mpj>9Yo^3p#)=03vjwL|=kIE(>+-jDOv^^x zp((NlkNUr&R2zgjXs4Q9uEd5PoLnnHTeivVkdjR?^21Ws*#iH}vx-_aZ<2&doFi`R zGM$<`Z-9r%%uabJLDh_%?1r>{yY-^c;Rt_ci-@lSH#WQgPj&GvFW>PWEQlqxOGB0suzy)mK?_Nzq#BY3dzqe7u^W)*1s;-qun`? zZ}K}I{gWUb7H(8+|Hnv+%!bG2UMU^MdXvQ~%5XIqrDbW3DbH7;wd}Nt{tJnes*M&1 zo;DVDDZeAq-=X^xTVuUCSQ~1>s2!jERI21Sd{O>Z<`3*d?XBpa#Ed_&F3%%(c}3Pj z4T{R6HrdU>b9|y=8>!DSt2rk)nt|>abHx17DhSg6ORb{$7}K1?6cNb%p|k<)kZM8r zX1Hvd%fNTjcHznn;{8svi*=f6{xL);7b?!eBv+GSmR~h*pq(NI)y2q_rApBCz8D|3 zH_!D>NVi;aPV`bNpp>1J{k}FFi|EdO1Af)s6XRTtf)$*cW{ZT-GQwp1gpAouQekX? z|IN>d25_HAugJOD1ktP?+Af>Q+;VgK5~#UzDNJUKbIQV1)f(fqs7Ul;bx%ghV@u`^ zV>fh*^n8RCAJ>LP6=wgu8guD?btRL@D;F42GIQADn>K3W)nQUZnxMz+pZ7o9B1RBgi2>7I+9p`Jl6{BO ze_eIb*v^K@Am-hlj&Y4L{oI0V{#MnJe!u^|p$7dh;wZR0+<-#UN8gt5t_qCs;C^NH z_c)ny<)yZU;@KkLetEfms@B2(9JI)m#q{8;qR-c3ME02szL;!&b;CDDBuWP&H^y=E z6y}RW0X+t9Qp5{(azcmp^Cxl_MNOF?XT<~`dDsNfsMYwqII+i9(xa`EIX5YB(fv=d zMyOB8;qV~Mhx-?bGL5GTj)4`5sO}7xyDg({>ib(9-;RGdx;xPA5y^!|55-%%boWTT zN_$Y}=#=OCsa@*VR-*lM5lnfGKiCwl+dQCO@asmFNG-`(!85U3DKBh->je2c9d?pY zpEx!%A^=?@swv=BOV|lPK$*mTo5B2i>Yp zy4rq2u{kv~S)2@)pKTK_OP-i*Qm)Gx7z+z4I!(ILC;sA3 z#}oVFvvIYbP3e96>udwA96=oP32L`*Ti>$nyrE3IX2ob1Uo-CuSmq~|G(#vG1jBetZR(nH^2Ao z8vqjR(!YM${e$f~^<8l7NZy{>g0_3%&r8Gfmw7S&j=Jyu?Xo=3-Cj#9Y%AXps1p4O zLCX%*0l<{)KXten3NJKOk3!)$)p@XFd=#QeaHkw{5f4){YAr|XKyC=FR;J(qAR1Wy zqc#lz%;NEo&CWam^y>=~(c~^!)9dW|oTv1z*KF2ggyHEsT^m@0PnoPmvst7uB4#YXeVo-g~`>(bi~FslZiY2sT7PR9#8EVBq8 zKln?{$WZT{(qTzJYbgNL<_>S>PPZX}I+xz-x!HRN+DF=Kf^iSP2+;o)c%cA5u+%!~ zV(-Q53oPk7T<1uqKma-I=U1p8N*9QXhZ*C=8~VaD`om581?xIL{HUbJ0pgS{Q8+ci zl?t`&k8SBc(c6ES2*hqyhM*qH;`;oy)!v$P#hDHyISeHCia~~Z?jmRa-vQyk0;0Q~ z7qKjPj%OwZ&M!KjSAqe%+mPqCdK4&e zKFOTq=PkM%={ANxa6$zFGUWPCgV@%b{~7wtLESmV(N0acd&Bu z;KC2b*_;wx-+tL%%(+~(yQwUo^*&{K@OtFXjYxF#W;t1(khoJ$#5>;X9cq|#{VSIp z-%>ctYbq{b#dHwvB_mgAnqDshq0>VTlb<~rX4kg#x@7ex>J!dMIW_6CuQH!KsZ>f@ zFMrw#?^=C!Xu2?XRfo`C((pY^5oYGIPj+YL=dEfp%xfkbqSCa04(c0;X~>84oq% zKK~mD`nLnpCqosuBv9vbQ6Al4SrG-NehlLKtSc}y9Dh>bDkP)y-%gJYI7#WAR|C2esJGK!};p}|sk@4xj-N&YVAqbY4g z1Fn9Ose}bZk?hV+f$gdG;mcq#DzR(~Bxy-gs0T@sUL@^7&ReOZ-TY|cwCxzBXHgWt!GQ<`R zTUCP*X6d!537I<2mJ3xu{ zTix*U9$_oB(&igLZqZE1c%R&|07VDE?sF`CKErYTcHIWQ-z&OdWLT#59(A#L%?b8eO+v4 zA*p-(05rxB6+qgiABguNSPt%apzvdit1lr3P|U*_f**G1*7Xo`-uQ4DL{`8vbxL3l z0B|(g%Ulj~c8X!ROveFG@^+QsECaVjw_Xc+jbdWrA!GoGSq7&|u&ih4`h0MeCL2eF z;K>YqGGyaLrUf62K`{-M$7PUj^=9eUJK4lFU?V9r?IleK4Od?#gb?=Hyu z{5=jG^5Q@fMF(oQ{q9vBMq-I>K{@mY4YSxwgpLmKD`b7p%(SVeTUcVk$&i;z^qZGh zudlz?!+Kr&dia|3TMETLmAqFXs8sxm@Ry)ts8>psf*utAWJ|%be*Sd!r~;SfR3A|G zlRCJ8iC6}Ncwl$9FGKkaJ!2Mb9*8(gcBf9daX>`nLkKBB6bDeKXTn0K7zTSmw^QzW zSwbpr+-ITg+aRH3>YTR=zMhf1P1l2G`fY<>0~z|;V3H+MANY`qVqvD}7;LykD5Gc? zPT=k`FhEcaj;S}s@B{W%_|MKmq?W*hF~;Tdf{h25etd`?@G50F)HYDSc8$KUA|l^N zx732Z)if|iGeaz)NWc@%0gLe=q$w~)ZqKj&o(M4YX1(V3P*+o6(v+9H7ThAxuZ$g_ z@c=3r4~snj+Mgk%{$Rq1OQY9ACFN#6TQV{DMFW@jA%!es8R(vM0txtV*QwTQ4TcFP<;;2evW4u4=7M4-27ne(B z&3k9)@w^f_^j}cuKxW=nA6w@nv}<*GP!-HSz>*xJZFGE)T!yZ!Q9lZ(lMAWxjTqPy%+yQkc*yt^y-y2{nF?4M21!vy>4nq$2h)Eo1r+AY(=>peZT*_J!GMvOGMLj}wvvU-=QdT|M3R}XZM%Oj zGGK*};_IifRo)c*+my&BM^c%vJ*#O+rj%bsqKjAWPaz)zxb2kRT{O!go)O`^WevQ} z)rrp?%hWFn3+L1CWw6W!*aybGyK1uy0nf8?e)U^GT`B9FW^(G+2$Yu46gQbFd=ktZ<@0{pF0`O7~HBetV_e_RGDGQ0T+p>suCVpC1;Ij_`?*|%> zOQOM1Bv|8zKXeo;=XJd$hx$4H=b@+3D^H-FV@%lE&rRVB*cg*_@(lcHI>Y01Tgn+B z&vbVVmE%5;fFEDbd(YYuiir-wrHtSFR>=v?2rKmRZy6`@=k&=Z{7_KCF$j9sAm%w^ zH&iY&)aC7(@{s`vC}x&%b~?iX{WQgriJSEzpqL92D>mp%KMF%{nI6h!OrCC+MhE)l5WGppRPN1vScO zHJ_Gp5-P=q=8}TXWMm~4hV;oWiP7Dd?U{-E!vk%rOCgJTWxw@Z-tC96_2`Rv5C^u0 z41r1=RCiwXkY=!E28&;3Jktw*0)IKKb$3*Nz{`AJO=waZD%r@4seh3}k`@zDZp+eP zqa4Fk03@WZAEFq(@0Jo6c7LdNtNy3?(RUa;6g-51y0C_kdQ7k}M7+L`)$s}s774R^n68R9y20<*}! zs0}&6$tu?DO_kQR#RZzo^iaNgZ=3u!U75Tca4qVSlh_f%(+f?1y0r>$_Hk1pkp>ns zDuy9#Yeib$m8&tK9q{mgbA zN|D>8NA340YqOai(g&6v2185p?G&XN!AGTk9jj)k1jo~k(1W)xPGD6UjQvC{rpb^@ zi)gDiFTU%jj30`inJ??tEKsn)Eq0bZ=5JJoyMtZ? zNr&iLT{aA+6gsN-2Bs{?9EmUL%a^7}_{f}|eY9kgRwA!Mcs1ykg^+p0Avz9C&gx`; z;yCmov=$2)$UHv7Q{61`_8x8Z*r$rfJ{Rg>H2v$(fTRCgzy3P9Is4UNawUEqaIF8M z@Ww_0OLvdDD&8;OQ1e+puJIu(s*H@3`?3GQ%b;|FhZmcPo_BRRiTO!xgRQ<+vo}mn z1&6tAh}XmwdfMb{~AtmAvb^O2Nk$%ufm* zeyZF2rO6ehE^;MliO=-zm2Upa9ax}>yvfu%qc3Ix++>bE)qDoYg-J%`Oh9`?A08%O zA=`rla+xG{uruz_*EvJXk04y@vv4d2@R5|Zg$<|Caz7J zwN#Fw?VpF5mp4g0y)o3v}Q9H4=eHacTe@$;36$RoIU%2|Z53H2&(A}x%DhY&2R#|}FgRc;>z z6IdNj1L=~9fc?%q7_6XVV3gMN{?oY|vQAV{P$C`%7RTzl$I}@I)1W0$AA-08H{*#o zGyVy-Qa%lI7So|BnpD=9Tu4-$P7k%(s^jSEL>~&_Pg$1K^k@#%%1H&zzTMOYL&+fd zv|Bv54i#__;evf5-L<&&j>!4j5>ouopYzQ&?*@lx@U?I_uinlOES3@)32H9vqC65z(;(pW`6{+GBHh(Y@1F4n*JofRl{+QUYDA7Z(@htC> za`ED5$E6#Yr@X%v4^@B&@(F-`JROXH*TS7;CyLrmL9dii;YCHycMQ~6|10bIoc5U~ zFbtGRyEws7Bt#)tY*$#A2iCXYI77+du(_6b?GpzXmXR&D{X6yi@4JrY5iS`HcklBm zDgqF^MA$PTpy~8!z`f;O!|@CU+)u)DB?Y>GRp&Lx{~mWhedz_uJ7{%Ewn*{bxTf9o z;~fb|H!Jo*bN+e#NOd<00%arW>Rhjg2sH!bnGpD?w{kg(=S=ZOC(pRM%l)s2xaR}7 zd<8Wla&g^GhGg`=U0nU)|33BUW%8TF8RB6h)&Hn3F)!LA2UWY*UdyDu4AUwxla?%Qb`=T#~w8x?46jCSpdxe zbK%FG5C4MhU)aqOXaZca5eR6RWSG0Qnsam#C%d@8+bpMcb*o-h}{q;nD~bFS2^ZYD!Kkm`Uq#)JFYhr@!l693b0 z#o0|!p1y@=O5D;$lCc+!=We@bchne{#1JIYLr1Kv=W?%Od-h`@=CLceBPk*fx!I*M z9CoE5d}r;X8DsvXaTvKLTERc1Q+UO7@Uq8u3ie(`xu~3Y+4wmyw|ZEhl$o#8 z0T-|!!(2SEWh3kGUo7apRu2TuJRzK=dxP{)g_b zSIGVosp2;@6{v2?TWV$gS8Zm+U+6^@X^HC}byeO%i^bqza-oHx!J?mp=`ppkyjGK7 zzH0tR(vvT4s{JZY$iG8H+SR^J^{JjsIGza9%C3?J^m-W*CmU*X-g2g7;o`Z(P4Gri zQ9tHv7lyD*aOw@rK#O{P4qGKCw(p4@i_gOoxghgJKHLbNz|H|JfW2S!YOqf+vat)$ zs(N^><%RO3ve}(gTl*&pMi*XT|Fa*w;A2(zR46-j^>;hk+0b+!cDehY6mcyh=SLpX z71e{mxopi{+g(3eci@AnMtLM|pN{2+|GcvFaQ**Mhp|fFi!QS^yXbvb6l6*q`YDF- zvC2UdpY6kk1O#Rq`*!~}%kafAkTpbHGC|W<**cJ*9u022?w}ms)!o={L*hYq>>-7u zW~|%n`+_^uxf{_0NK1%>znD%SteHG5HTu4xMO=$Sg)er`-D0Xt({NMv(!ob%IpcYi zS-3U((!~+*D?#Z==kp!ad)$OG9nLZU!7;i=HXfuHPm`@=;dT(RYqZ2j&_3T@)wP3a zT1*^|p-cg(C3Y#h1gQk$r8~MRgL(Mmeb*upT^&6#6l!ri4F-g$PypH@3#UZavCX=a zTvd%5W@CF;vu_Ym)vbQ-V@rna3?u`qE4bdsM!uR!j<7EdzC^uI)46ok2#v*DSy>FfpkrW00>S|V~GGcM{3Hb zp8hJ(GApKGN{!hA@WzE*NN~XF59r1B{+x2cPImppI~k!MftlB9`ZCbx?5(&&E0;ri z2n@wa2dg!jEGbtt5rpPGdkWem$~1JdT_7R;y{9P9(Edel(}U7V1RRQJl_n;3%>HyV zkyF{Lx~-}j&BOHe*Z_1Gs>aH{DTQjo#GBW~&>dWxrLkvp_l@yd?`Tqt~0hbCx65wgn-=}(PG_X3W84$?5 zuAbKeF@t#veHJGxX|(7`*?t8++Khuf|tM0(&s zXbyD?9|nwSZ(l;^k%p#qTms zflK|NaH)}oqRNc$>8O$kF&7}hqd2!0Brt~8;!m>GmZFBsUr_8ARudn1n6)y#@uv3;)ut^dpgQ^6lvlw>P=*&Pg7zPn94NM*x5593(k?_eF-H8hgp znH-|L%2c}&LyV7d)L1!)b#{&GdLW>;_iT1zxkh&*aLTTxjK))r&VOK71@+jAeLKxg z7wa94Om-={4D6LeER~AkINTY9CipWwEmxl2=pE5(JU>ZOZ0%BBq>*}wRsaLNXfFfh z2RC$e$1-5lm~vnHzF??ou;yF2Z_YS8bw0>RsKxGz=d+?_iI?=(=H`p(~x{^|~o*D;GBPK*J&)I0M-??=w<*J9_v zr^eXzI=(l;($i=OXz2643_S{HUq_G1>a$>W&z)~Nh5Z>%tp{vyx;O_`JC{hWgyN!! zT~8qYH4PM0gziR$T^i=O)fH z;YlUK7;wjq#XpF4G_5i@KA2ZX z0!hyzZF-@yfgca8BFtXd>%3*{TGcFEYNEriB7Zyi2%O1#r=gsZ5Sq7|kek!^5WgTS=N??@sCxK-CzQ4B}Qc1nRzdyMu)& za3RZtSC8MBTs{(8!#Am=-gFs41hWwwsuqWdCed!zWSciEM;7+%OU5g`05tS7<;2Xq z#LOa0!7{!)C=;)QE%i0`Ml%U&$#{VVBXxZbuj3E0Lckhh)XZ$2+GJBJAL5aikXMCH zub&C#ssx>Y%(HjR9}d8^ zbIcxQQf3~j7~!*hxNmW(4N5j#Jv+lc6L}i#%4L|BeX4ePs|s{ufPKUcBIMgyAg$L1 zYaa2PaGKG5`f{k12qZEGx56XVS_bwkla#B01iEIRG3*`0UYdv*+qyCn?NOR+R{_ zGPBrTS%y$z>6d}z-xBH@mU>wpB#T`8${o$*)Fqo0cC@t+#49F ztDWON$Bn$kMNa+hH}t|4{=M@F<)N3lQ)pB04mU1)pjV6nEY=HGLGsemq1QWlKA|RT zI>!S#U;a{f{NKAa8?jNzi9QSO*GD_|#zN2fcGQnG*9Qc4+3iZyr(3^a94a{VZ@7bD zj_Q$lLYz9dK2!@cnT~ZN6LwvP$DaQ4JF_z&CgI4jX`ewkwzfZ3e}W^OdkHx^sgWhC zbN;TL7yzcj8jE?GtqnPE^;{>|a;AVyCQz z&Nh`UJ4IR5$RhjSf@#!?c94;{+DMHmrzZHg*L$mY*XWvtU_XLZ>kL2bAQ5{6UE6VW49ug+qSvPoKgZ6z^2HxE@iU9!Fu`?7)!xO3ZH z#tH&it)~oNfPLV75)oKSDn0qGBE3HH^CgU%jQLt3c!HFX&C*vfUpv@#XGqrYaBHNI z?rCXR!;pKcS=xpwikOhC$aU`}H5=Q*ZIs6iBh?E7!D+hE0e!^F7Ao5pN0UI!EF6p$ z!}rcexfBi?IJtg@mUB9_`+o0Fw9Z_+UV3nlaaht9Pv)q5pfRN>Q!CeRvsK#e>bcY( z{^vo_@lQISz^yk6I14;=c>9z#z&~^CzugH6>mJZ2Yp|`Ot(%wz+KWm#2f86UadG0K zFkwRexDtasNJpKW`Rqt!%vkjr`C0F{h#A(Ueo`jfj@BvDhaUW-Y&#WryAp?+1(zlI z7u`~d8A=tD5<_c5K28~$n>SBwn4P7^$ohxFk~;tEHk3% zq_+O}hPG&1em-I`r&OSPl6r6eeUc8!R2KPB62k=2RHN=uGIf*3!c5EcN3WxRSqn?W zjCijbQfBa~s}hugdlbt9Y#&EE&Qdo)-j0^!?j&+*Me|KHRl2Y%$;x}K&-9SKO7f5$O*w=vy}AOlG#F` z=g09!nEJmj?l&47KGpsWHN+79>(uyqCf%r#A?k`-XXgLU;`6T$R=Cf-MV30Mtw+yG zZ#f*bF3q}G7V!PNGZoa*kUf86njv=dvcGGuRs!5=5p^Zos#grUYng>2BwmRta}de0 zQXvbiFH9HQwC?q_UG75^3gg!Bj2gp0V{OV*p;>5G_D1-jRNoj$)x_iuT{(l?#)MnA z4qF$cUZ|BufeccODeU;AUR+KL(>un<JO!hqM>_v#a9_&QdQWg=cBxmz~QRwmH;|7$V+0m!)1#f)@4lsXiOo;f3#@ zU6m3vF238Buvp|$$#zI_QKQWlP4A--w%?`{(6^m&UQw6wj-}{c9rhi+A#5|>e@iL6 zb>EG@1tsNUoW!$P$1m%5OMeosUwUljlGe_TSfSt%PFF)KO_!%4k=_rixF3wnQsofm z^e0N+u4)uK98_1H1&zPK+*2T1EkZgZSb}@5Za2Sf=y0nq0k6vDEJ2l*$*%>M?4gQN zOt{(FJDC~O0r}BFy(OJq4eYPSnx^)wD<|k5)24d%UGPl8O@>JpuCeXbgpPIsRA^~p zewdD$Xl&UoO#~0nQG9AnY*~*IrP`vB3IGZ0P;34i;@Oq_#FOp>#Y4ZWG|DhZu2ir@ zDYt(}nkIUSGv}dqU-9VnZK3R#9Q3`t)W?Ks^p!B8^6EyBh-SZU$e4FY2ZxXGRh26) zd8;?y4RgE%G2aYw9H%UR?uyq6UDgd6r#1`KXo^ZXt3xB$J+N0&9w)hxf+ylL3NcI% z6)Q^`!jr_11eVZskw?9UnC_Bg9M~`xZW2p^C~`Ow@NpRIKRM5YkG--<@Oi88E2Em0 zpej#Gy0*^!RtJCsu}B@znnyDX`axou_&&S;iIaSp1gRW0MCM^Vtmv`Ep1F*+D{{tO zP*ct*$zf*>_3{=G7u14qkM4H103A>ON65D8IOEdBb3$^l`t5tYFyA74DwKhu2I{h1 zgS`EwzoL~=bsf^KXNS~9Dczas8x@^1Ww2}2Jsh)fbs+eG2D+TGuCrI3p4g2hFySql zsS-FLp14}0es;yBRn)l7d5|by9)Cj9G;E~CS2g0Gm0<9GT7Z;st#}?*iLi-(ogiAu z<#C{>7gP^h^K#GZe)B4qGAPy| z*3h`zOI#JOM`PO$N5@@mA1iY*tr>s+&9g_LLfSFr6ZVkMSfNr8Uo|Nr2dXjp2=Pp( zX|?8rN@=V}{9dn^Q#x+-BThQo@_6~nD@=KH|K}qaOvCbBC2e0{G-`^w!5&Zy#~3P% z&-_ai{D}SdiV*mmGBf>AA9+NxC3UoFYxAbPfeM8Z&azG>NEM#@tm6P0aq=ZdW$w(T z^!p?vE%VH5^}`*KO50gcn>7O`kn({2+2yf}+~=vWXJxDqB=XPeCId_zeSd_w@XrU$ zRVPm_M9SB{sFch_F;v|DR(c20#mFeAuMY-cy9NpuPiG#&o)T5v{&qJV8E23#+owA7 z&3Y2eSU%tVF2<7}+~4`w=NAAYlECE_vQRk}xhj|V{)a|a6Y_d~D_2n#6w^5X(1I7D zcoL+7u>jDZ1QrvcLx5STPGlOQ7%+XVGrkKtvD5af2v8I}rww)q0w)@!7y zLlDs64OgL<;07f?1O+Zv1*jl7)t9L)Tm4-sXPa3*d*LkegJIaj#0o2+19Cw3Q55iC z?>}=d%SB3G`LT0g8Sm#rCY+*zJi#GIWt%6M?#d@SI1{Cw@-{lVa`NPRzeTLCW#k&Q zjcVWlSgjiYq{LghWkRiT#qt2s%~6l`)YZn6e_v#;3%OuHFTB=hZ%^PBbeN$Mu7wV7 zg|XJEz32FD+BFhNMQr>JXBcf8TYFPA8;S5*P7s zodE=MR)BZ1{~{TH1Be>(wh~^VkoqP3WRjMuT&bZgNfpWk^%6v$HOvX20FV}bCoTWJ z6B^VM)@%LjZxj0U1xyE5RdkVheC)gUtU*#8@ZI5)KpIRVd|Sl$w2W`7E5w z11anR^Exb6%`9lIwAJYUGO3`zVS%-8b8mIRo8pDTjPydqwSi%uSR6uglAU-yJ*IH5zv$ z0IR2KtX&b9Q!U<#m&IvNrRmwnDq#(~b1&kBRkU=7eqoiz~)X9*SbzIP}vHtcU#5wp-;`0-Kf=T?SYxn;H96@`XGU1j<{3OEgbJ@EiM74sSc>iP6C3GC&j#I0w%)3GJx2VJNv~F);cb(i*g_$ zNJZ|fcmnE~gNO=pB_bCIzkCO=d>2@2VwG@!VBYR@Iac(8j?j*`bTJwGRcGMaeyxyj z<~+^rdX2DxBz{CqC0sz;S3@^2n}|{)uJ&RR=ynje{jX=9PcnX{322MbTto@-WvU@g z5_*;ZgY(FMBMi zfsahiwvs(6*Jp2;u&>ue^bV4`ijA^e1jT%{rv z2r5HiVr}RP)45KYc!h2B1>Q}y<9=dW+tSx~erqLq5cdQNxNi@LK+1!e=9d z3+ZwXd1_w>V#@+j*$gE3nZ{cuG4m{B@-VssZ8F~Ha1*Q?*diVb_i*P_OiPD85ULDs z!6kRCUeGZgY&4G%8@2aiq&Luq(1LhMhD>^4=H zJgIr=7_yCwd<7gnt*hMvLbY*3;r1wa7k-L{EPEJzAJD~jMc?NkF+bIkGsJx7ypdUf zZL=s>0qcu&WoVdi-~v*#TG*v0&c9ksAV*2BSlGQEGFpnlkZ*sZ3e_E>21eX2*NDcr z+;Wi&X{?d(zVE*48ZR1NwsHw)=_s2Z3EB@hpP4lOy8M5vy?0m>Py98y=^=q6Gzl#x zGyxR>kq(BUbOh;02}M8<3`Lr#NeB>%7ACHV z-M>}H5;anix5xnRcF=6}#;yF%V=*JGUw~IlSZ5qsl^$9XAZD3ivif8j7++uT1GXs+ zCc&p&i`_mP+);%O|y+3xNwao#s&s=FsU4 za?QMRuNYe5J)uhCCo$$8Sx;TM$c$^VXpMzKJLWx0&9Qa7xKA~TT&1)IAmuIR@DSba z*fZ_XDwOsRi4P~m%q#f8EFYMQTaytIX{YzX{OXmX151o6&AK-0J{*2LvlR_EIiLtM z={JuML;$?BZ)wayjPgOx#}E*{0;ryzwD)^1*wM8Yy`z3S5o$%L#-P z15$3@e(~K2K1L^)SY;3#H6)!-6dEWQc$30|$&iss+h(Rkv^)^s1iQT^Y*>(WaS&3s<|A z_oFrOmck174xVQhU~ZD=u0hj_57K%zVEU04S;uj{(WmX9=tB$ep_QHzW3X;ki(1pl z?)Cc4xEW!pBS>E}uUE?%ck3P;L{e@6Op4H-&3#p5i5t0VE1K`&g4-MK1Hb{N$4W+N zIW7?~V6T+(0njcn4+Oy0Etio z(k@z)x18buAn%&2FfAlrtOK=1lVb%GMBTu*5+l}tRK^b7k2Ji`R}mE0DjjVpXYH(0 zB^twnvUs=67#dND47H8eLvzv#rAuACcn4uzugg^`=VM*<~azP`!Q%p_-#4=*-VE} z!gkD~;hSTZKg=r8C(l@SHvPI-13KUiYMiTOzT-p^PK4v`Tw8DqLt?S`_hZnP-&gfn6-( z+ekj#adS?X;p;#~2n4jIWyc-_N~s21-LePo7I~^3IV z%^tB{aR=C`@wYD>W^`CGezUTKSH(Q!SJB0bEHS9@rS1EkWncn>nLYv{zcgFgE@H?z zj;Otz?Js1=uVoz#-&X4yo+=1D+6$IHyeWT=b{}cFr)}?5che`@;m;*@5zP5mLOk}( z)@Kb<#0}w%RVoZEuT_*;%a4R@ijH$)5dQ@p1$u=ul0Mx zS{E;NeQ3^`Bi#%BdFe?*&T{?ONLM3x$vHke=E3CfhqCN<2s_F6QFOVN@mmtX)c+9q z{Km2U1eI+S$Af$3W*2{VsgDGR1MfC}J`n}CwuJ(JNR^WilRn8{OiD3$;zIacrvChQ8ChYZXwoV@oQkE2crYQ)y%5s}&ydYVi)-%AO(9}pc>5$aq$TFNA3g;i&1jI3-2d~(- za(gg#jTh~2fB>LVV)W#l$EN6;6d@(FdCPOZ?8tjBRK$45?l5#z>s)U8n!wK?JF0E% zR(l8_&qbf=*nGTu2KY3YeX8r%N4xyaKig<3@6WH@)Nc6BJ-2{Q+ga3J%)y-9-MAz6 z(fxJ5+N=6yPK|x85K9n_8B%y(w=tx0B`2oyQ^=*5QH_U|-B^4WEpCY}mU8 zfaA0}V#m!Da~utp@Ug;xgmF-gez`XE>-fkZ-n7#Ax%Zw^ z<)b$h8343cv*hU^Nsh6=>WPpz-mi&Tbv`7&=jtJ0?{4gQPeKaoWu?x>(A5iPML|c? zWM5wu>ewK4U+u7rkZX1#BpAn8OVzZ;3BPQdiF~mB>&UHt+uVQu4pL_rgpM=bBX2=0 zG;WR*<-}KSp5cItY5|ZR4 zxuZ^`xU&~U7cD^4A&?rpAh~LzQ9?aX`+4tz{2U+qOxrzITcbv9F7d78gC8BNlPl?7 zu0!d4O`V2@OcA%~w??hJblql)!!j&COU*4z%uuw_#xzd$%T{iVNH}SI9B$NA({F6L zkbAa~rD5zOtYxqu@8(Ie@E#`Xpgt=tBI)u_YlG=Wj+u5o)$1JOp~2ZX$hB}U<9c42 zdKeG&tRO=!Gn($J3dOmvfZ@hG__nh)Q|{zSt=>z%KzwF_O{nWZ)bl|md!V>MyU+oo z^J9yZZivhZQHxjy(ufO4+#uI#-@sY2SdcP59!<85O4neevVF(NQZqEVy8cnpJfCmg zrgqUVc%$%@50X@$WUR~DuXKH(M}B;jEb3g``#vcv7&NfG)e&2G|DY6Y->#b+$HuH0IfYh# z4Sy?xQK8b3-@g~d|Dk!qbO?qV<|Vmlmy71J>dT81iFNFz1AB^;yN|PU)<&P9_Y@bM zELHmAxs;AE^&~B}@>~S2VCb58dAz5)BvceD%x9t38dHN!vgp56nK&Q7|H~TjU?z8a ze^Au)#-Y{(IxTA>y}^lS5kET}C~P52m8cM;VRb=GTUgjWpLB|?Y%w;| zy^nPCAt}=e*_uUZpN)LN*H;iPK_2Qf)5;9pyEJS(ZWevXi{$pP=SxrG#OuSBa*~a4 z+U?ws4BC+k;)S0Z7iQ_id<%%j1szM=P28c9U!-qJb(mt(4Dz17g1*}ZDs~@^pKSJ zWQ{NyAzjEh?v7QtiR?bs<(ZlI`xW!@a?0Z8fZys&`K?3u50y`V4nF-D-)c<*pi}^g zrvVAO`aI4^Q$Vpn5HJVP{!g#{BsEoXS^K4N13x=~OS&>Y4MQ~Y(+xZ5=*ed=!>J_X zZcdx>gDD7UDaqtX7af-{M63D6O;34F7ZJ~m*a4U+RCdjO-sZ85;jPqI;o6JxCtGQR zK6*#A>jp-~DOtPU86nksU<7T}h4|PDN9z+E{%&vk<7uZ zIIY_FD1>M@EfjGWd6tazp#j8ah_h_FGePK5Me;=#^b8LIrye}>#qjsMzA;xw@e3w_ z8qrQnLQI)p@4|h#@Cs#Y11I2<1@es`GI0Im!Ct!0bDp73zPTF}<67wXPDO2ll z+uc<(`dlX$l?)39!S0hY&n0py}G#us-^v?;BPof)m}m^i$^Bn}(S01|ED`X+Vd7)a*ZD9XBIC9N_AF22EDsh>IhC3L zoa=@Cfm8l|$rdTjQm%54{;{7jiEn3vB-I{Y6mYeY}xaX zs%QllmXSa~#UtQ+(7HQXmIeq3EM_%9tpG-f2JECZbuULtf<*GX262AZ$U9bB2;$^$6jD#K+>^n#JaURDg=S%LE@hxAY9Nr0OL;t z;&GUrTtJ+My3YlnsdmO(K#p3l6IZbQ{9x7~-e!SUXzzu|w-hadi?;Dx zhLnm_KNsdd6C|G7oo5r#cLMEfWAzfp7DXf$hqgg6yKca4Kh=v*Mf(u}e;kIu1=10i zvt-~H4kJqjm{h&9T)>}r@pr6ymql^Qr03wBb28j>(wccHc{Xo6#VF59FRX$@PZwq0 zkO@`0$Xn27l)>`9WQ%Kp71;oiTq4h7s6POFa2OI1&`UxyY)UED7_8y4#%hs3JB~c0@9>?8Fn&;ME6gfM0U2u+ZFQpwClHiTCqrCFh_2KW#QZ3|1%i#clL zoM;{*?dPSlnqYsfVJiiMqycgi#O8xjVmwrq6&6Fa^V0;kB%!ptOHn((gPdx+aL&>= zXd7aG_lw~S4c*GoyGyRfFR;Al#8BQk=u^&ahG4!DA@&eMhWqL) zPnPG{DuEZ0Y7+zse46>|FKc6DYR|*%e?c%L4l*&kF8BdQdgc1bHapr5`v<)c)nx5Y zR3vtKX9>(d(lhrU7~4Qaido}%+#6BNDDk`K;#WJLC)TA+g0$7HWv{sf^jE44i4<;X z&r?9B$8m1y_ELP?1PUnXyC|});S_C$8wIqef?wx@2HMc9A=pH$(@8Eh)vOUi5{Vcu z0)frZ&IlP=mcdt-25(hgG2Qsy>*v%PZf+UQ@=yv+b(+1Hbn~DAb!;cjVN@}LK|&IU z_(q*fMjrVSNVrqQkVbO?MGy|Olrj6XUjf^GA`?d(58n!{LLN&yxUbYQH<KG2(R;EobM}}J=k3+EmDp-;Gs01Ej_=Yl%e5Z?#=JC-cP*Ki!+(#HPU>Al)&Ny{Q zmd5sr}8M1*W=rx1OAzrsZx*m-)&+lPHy)L9tM0ZDN{HOIU>+HFnJ9Isr* zMMc0I5P{E%%Y4{J2pd5Y+ZonMHI5l^4io(_X+4Z-DC}N?7al-Gu|eghjWv(^9Q+!X zz=t&kgZcne&l;{zAT>cTd$}t>u?ew*0y543&7d#noOZOG!et01Ol&S-k<#-Uu`D}$ z2j@Tv<=R;l_x1WFaBbq*0hGdS#PPVB^1X=ib^#&UpTPl%55T_5h2YraNqBWP-gfZ>LteqO*%dFB|hMIOY=vwEooCJR@S0=Uo9s`f<^Q zg0V4yGB(?u!_Gd97Hi-On$_*85%b>&(OpGWl9Jp;rL3>V@B=tRa4({fZ6GI8mvvol z{VvTaFWGZ#2$=}>3dcC5PmS=Vy*F~|2=xe6vW}b5D{aA%_ zA2$>I9*CX2w@RVv9`-X-iP*mYKd9>WXVue{Wo((!f5sk9z z>r3a|u5J8GjdDGScNa$p^}tS(v2pX9vriF$V@7e*SfkhhYLWfCOdL*HbaLGBz%`I7 z`O@b2fa$_5AiliV#{ga?okuz!yv(aa-ia zvdJXPF=O1Mox1aYC(Pe{xX%KO=Xoi9ymphEly@h_)^_*lI6qG#exZSpH0K7>NU& z;n}{fXf9IjK{+Gj$R^RJK|bWM3&9Q_;*RX#qf>Cx5eu2Iu8y(-S8O?4#JkuRer#t> z&b`3z17Yj2--{R&{b2USB+x`WRbsziVdM20xldbh}%y&_lo zFv5F9X7(C>B8v1=cdj}KPqN39@-XcHs&m;Qa8!GN|K`nK<2f#h!$s>^V)~tg&sz$& z+hg>R9(`oYXD8uL^p;oLw=)H{3uNJoDsE_!NNV%AZPMExpWm%_;`(_4vxIjb%&eFv zzghy@#d}w_#GTq@(I7iBbrYWn06!Gwb9yi`8?X0PA&k24{nVG$H$jUDNIA`0dR=(Z z_-^aBsyy~wC%$3aqMdjLr6pWHvXK5617`2pe!yL8&h5y1tQPzaDFiJIzyPYkn-r$@M9dqA-Jw^eQ{Socs-o(-y9)FA^ zuwhcfES@(^=<@sOm^bN_oK=p6q%#JZuwyx+{uKrEv)@rnDGB#Sba(rGQ7w#APP^>U z<42}O-#k{Ye5}i(BE2dROtNP$fD|j+)>;Kne*`A>+>Ia3!s|7N%KAn0XG(+S-Qi1^ zUQMF>LE*kKFD4b4!bQRIcSy(5R=*6j^AHTDk-ngh0!i)+euW4K`2zk7;t6Y~Mspv3 zs+{{2;er_5?IEMtiE2VfIl-R0!o(zkcJN-m%~lj*9$l=N2v0M9_+nVN3Q>3#R8s*m zeT%a_>kw|-FKlK#=ZdJQuon#$-D4+up$OFb9VfJ4&E%I3f9*WDPu=XHu4Eyun;p|e z1wC`YU~_g#wxIl|6FVwzf8u@?z#B zQ>Bdl^xsz$Fx79_?eEOG_F)GK5XnwU;vR?ZOu4*9;N~@e0*`UN8Pg3ZxhJDr%A9#- z4~1hrVJ^=?3?~s)pOmD>z%t!#C}65&_27~rV=EvDyW-?l;-33yisoi+!fgSNr^`n; zW@#8@?`JE{H8w>Wf_cpwjnL7LtBNh27Jaoe0<2P3m zw-%9)%^^B|D-+j_T=#qP>vUYkAJydekJCF3W?nM*y=~QT$Km|;Ntq);IF8S}_|MO= zV=wPmu3dO@{onPnJ0UAyu9n4?|AgQ^c0iQ!0&m4`bic3X@xbJTS%D}_vBelfXn!4; zK-O;+yeQAK@)$NK5_@%=Wf}QE2mFQ_i93=e26E-+;-U`lrFt|(#F#-w!YRQ^aBVN6!B5ix ze^iyK`zx&GY49#lBW&@{6Q##*Nr%^C&q3Ld1<%Xd5`c;M2$S#A^AYCiuYa9vdFpQD z5T|g%kZN|E5CI${!esS0ZS=* zWcU3)F=P9mgsocD=E}s37tmfWDXP5q6Zg!;%p!AI!x#09qS8;!j4__o5JiGn-$G`LM3sa+$Y|TL>PQ2eY#*kYU zM6f{-R85lun~fmws>f45se$O~5v8lZmKiSHiD@tW_E8}b_FB0+-kI9-*&PWxUo{^8 ztNF4-A>|>kMHX|me)+VTpZe(oF3pBq3OCX3xg#l(si`IFQM_f@(dy`;cE$|t&Sv6` z0kByZ>7h5DhS(ya4pY0)fg50w5*8k|ZUzE2Xxf{*L)82Dn?xLFr#^{p%j#4AjyR;# zeK^r)3wWaOJvSFPj#sj&aT?OfxwfYnbzy9XbdQ3gxK{bn78Z^^p3Qiy){Wf806bYm zAQcQHBUQw~ApAH)z$k*-CDK5ECP3=o7mKWzp-qYC5kL^*qA9HMbLkfE-ZHh?e>(uprgsj9)hN!(0K#4AH>AsIT8wUb&@$;7&bm9M3NB3 zcIaFM`FB`cHv?HpGJaJ@Tc=V#X%{XWp$URXJs-ccL=Cu$qgzSU^Wf5M zq|1an9K4>J#gJoINP0tTis#{&qUVr^txVlUinz$i_g$a(<}o*^^w%prC>j38$gf9X z3xPCrXUr`rW89lJ^$Q|MQR_6B-)}G9!ZIWQGWHSx6Lu$yecca02{`&clv(PRGgp;z zDee!m@`O~rkl@FNX%INE2`Jc>OS#Z&ja?7GC%2Ms_64LJ7)NP^CLuC9H!!-vG%P6( zPBi9ZxP$KR&!5uF=pD)T?U=c!Y*$2q|FnICXMv ztpKS@K$-=EC79Ny4x9SG6lBX%+Ib&H_r|6J05Ja5^rXdtsJBKV81F;OGww=Z1@ONc z^wWgR%Yy>QB?6BkBIh@t3Eky`yOPi2K$JkR*EZE@xRc$$TD!gEBo>}e1$_})Y2Nw$ zDHo#3)5h+|JP7tyzJCH4rUhhqfqL%+d8NIpiNnSF0QqK_h!8yZaa&F5{!JhqUYz>c zcdJh7{sHT4$6rfJk1C&!%CJ9M*>8%QuRs<0a7=>F9(-Uh$K&O8mVVt$Xf+qho(j?fLG|-G=+P?AoN-0h>cNLu!=kdi_dCu674JdUV zwN4E8+Id-7;qB(*AzS+_$0XzD5b0JR-Ih^=b^{%Qi6XdqxolNIoD&|o%MRHjlh0);&ygRKd^#GCmVWSB*)UrVPRe3_PW8UVcN<(QM%JeYfd**Eez2 z3fFGtpR9#Ix;KP0JN{PwSg9ICir%Zpr~>)i@Ri+295fn;{t)0v7rFbfXCz@3l-)W1 z?bHLICKJuS44o+=NqPVlY%GjT`3H6ZrQK^FO;kz_b`o)v#{6eoQQh4?znwq*&XcFDFy?a2PMzAtGFc zZ(_#i9C#o+E5;M@Sskv>0pxfZzEpsk@)?ChrjQrwz!0rT&{b0)=Hrmvi3eG#85-k& z1{)s92aDk4Bxk|5^D}&hXvRcl|9EON56~0n7jnVxUZv^y^v1R3pmET6RdXFz3*Ejf zbsj5K6(K^sEtYfLdZ<7F$8Zp3JUqsE_@zXZ4BojxSLR-Ud4m7#kN|jYh)10MMX;o<_s^*=`U=;zj5;AfkDux!t3k z#-`Dp21zz*?J3m>iS4PYr0x50b`SbGdi%03@I^n?+rQf!&9>1VZ5$KE^$z{9ZzCgi zu?or_l%H3q5kC)00eimnT zmuy%*p*bJ1j2@;NFpk$b{=9UAEQTOcnFcSVl%|iId-=q4SX_x$aOv|CkV)aCCJD%! zC(H>Bi_a#IcAWDRqh#zzA% zC*-b3GSq1W8K@7nq%`_@rn!0MCh}pbow~k2nww@(BLAopOT^L><}{6ur5!ERR_n1m zYIxJdsME#xk&DS27t;?_O0|r1wQ|^L3D9tfuC_Oy zt$;v)HDLF~)zM8Js(P*by{oglTd!ya6c8}hphtb(+}&iO0h(o)o7d&3t7>py15(d% z^L^u%+vMh>Z0jfD9*~Cfe>4?f;vTI0{Mdo#!IAEvX}N*0=b@L~!{r5flI|yNx<|fo zXR_R*w*R_Qj|-g=@rV(55xv(VZt8iggGc=7=cgk*66K!}(#B%zb4dQAJrl-v1 z{0C`EBsP?c2;y9{)JMeWr~e>B($tzXWtL6r z`X~0fR27ZMBK%e1@#K+kZRG1TRy1C4ZaZpD7AwjLM|1IAIu&vHGcjAjoFUzKJUnp7 zH~On+?p{_LE;oL1$MfA8R@!Q2hAb*XyK(Np&gcU=rTafN*Pp2tF#U zkSwc2gIb{M%E-vk9yWR?wMpMEi-)Ux3K8J}!qFq)%{TXW^3{hJ>YA>sl{EC0EM$qX zawc`R=uPj;jI~X`RqbXaRSnaPL$~@fsais1lox5p*-1dyrY(-M%`i*ZpZ{BOqPkfl zEKeg5le@0+Wb)PLLj^|^Dxc8H@0|qSK56&;(7XPeQ7f8#``A0$H(7tme&tcq3{^xR z9~_@$;iQUCJ?2|Vgw80=|KOJ9urgxnAt!)?U2Mcm){aqI!z6q<`g58a7l@S;_IeNY zpHB1TnVJvLj8x$p;{N-3Z!Mf|5=>6*uEWK6!q^aV-D^01z#x^?ojQ%T!Mr7Q;eSHp zMZ)KvHF8SqORc7%tA7ruX&{}f^->cT7umtO0MtMhePV;AL2IJYvwBRRyEQBHC>kZ+ zhqYni(q}SES{Q=Y9Vi?{<}fakXCAG}ngVI+?m|DdlQUvR?-?%D&RE|*#1sf*(wzPV zCuL^jr%1bWmAPmH#_JxhIKi0wIp%(De`G_B{L2IWb*XF3U}>Z*#Jj8O-+`lJn za^H8MQyR#~6tpn^L7umottqp?l+}B458DL!VK?%^Vb<7}w1i)x{H*-fZ2ps{nE`y2 zLd;Zm8q#S+vn)#9sa*c@j?#+fC)z=C1Ku^;U7u-P!?i}^L1i0V!8gF@=|w|cFM|rf z@EOJ%dkoKIRmq4Zv4e&6v)kV;8m%zU<7sgmZN4I0c@}(96sjy6iKaYEC5s5JWM)4H zDiz@(l)hd*O?^d6gUjH8dnUTXG*lm=o%UlW^DE`(zC;VX@p~P)Qg;iN=kvh3kd2=T zW#4~)pOF;UP1qed1Th6FjHYk8u^NpupZOA@deprqYAk-7acJY3n0T^8o6+@L1p!9wN7Y?93b8$s=YkzQ2*Hm>+SN_t1A#88>ULEc#9EHn=JqJO%^aK&1p zq~vsbd1*A5#JQ4P?tf{?N9C4`cyk5eh^!-z-ecn4WmBWU{y@SOITcr2?t6HAtqLAf zraf3TIREkTVOd92(C+)=R)#y4FUJP1ZYmv@>8h1c!~+nD&5rwI#;j$drG!Ej*gCc9 zQm*t_Ni{yWmL)qIWAsm6ZS7;KK_i#~K1&Ca#t$M_=&ub*r9A07Xdsfl zjba$cBZIzPB_o~;ePbXi$nN7o-)%J-qYC%m2R{LP52O{@T_@Zf`{Eq3tcZ(Mu2Thj zf}{@ejH4OwM1Z%Arn%XJg+`R!Vvf|EDBb0EJnz9b2`7-=yKjL}n@}_j z1uOaMD6^rzMt0ptfo<4~9W0Oy3j{ck)TR|qotJs=r|#-YyrUC6CUnb2A1Pofhj1QW zFiSY?FTd^__h*7=`jFHzNo;RZJ_e(xFar=r5{bo)Al6@tRh*1 zbTMm?eQRQ6$@S*sigbfLkk);$q5HWEu%5y<&z|c_8*wUOv}57)%fXxXjTNqv5+pT$ z9451DRYB>4^h)fR*(CWk`VUhNFuZP`$x%|>50Ki{K|4hN)LU|=^Vz;f{ftq%voB}I5{E(_T(((YZv&;63{Yh0)sZp+Z63O~Eaz!<1>8b_tlT+Ex z#(nggPgsWqtPv`_=iu4$#LJNVF^Vd8L-wg;1V;C*wIdpK!SH}hBB}BspqkI=x6@o@khQIl>Y$XE~~O=zh(u_gFgwp_yRSWK$O;}`3k6% zc)0s=c@p7@7d$<5Yi?(jJr9(#i1$_f^s+ox}MP*JFX?R$*22T_{)(c$71Q1 z#$qzwN?WufoVpcQaeT>^$t9}#a!k&QDI>ovKfmRFUc%~5=8M|~1sgeF*mW^A+hizx zu4eZ{rA@V}O*1r!JwFn8UjE~bdl3q1&x^wrqFWtg&JJa?Fm`+}wh4D{Sx~PT3!?1JD>=bBu9#z|E71!nm z701o2j)_?ltHi|}_e@MV8jEBU#3-2#Ryo>O&hgXsc3)#8SYEQU6wF za0N=HeVR3Pc~L7Md$Ycd?n6?^dzhk{GPD?kR21GCrg=7P8f6u&tK8)q6La+c1rBG*SIXI!Z{<_F z+ODn1?3BNkN8TSEpuJkVy~%oNw{J10AX{S*XKBBWvk-XH?9=qCZLd1$A#{#F$xsPX z_n_1QXSig0)lQ>V&OMj=02gu6>>guoRC7~QY1aCnm=D)V+ElDMyW>DWV5FY%eWsYh z%y+kJ^m}HXVy0#Y)lPrC6pXp-$ISKCOa9}`7Fo>$$tL(x%8lD7P6%QYl?N{)T=E)( zxdlUW5J|A4ZVl+0YltehA;cIg?fGm8eKnCgsxIf$Jz&&Mp@LKw3n5520jl^!CKS9$ zf4s?ywUQ)w&UVWa_B`7hb*J!jEHW=V#c4`wZsqIO5u5ia3;c47tCwH z2c>a}DRmc1w0VT2Ryrz2^tA@}ZWiwwL-Q=;a1Bw=w@K7sT-iqqSR@fA3Rwm^+arT& zMvDw_<=j-oWUY%bG={FC$O1JTly+`_M5;58N|NRFms&OLlJ}`I2Q7Y1R~HVGlObB# zYb#=1bh`FABupSXSYRq5*{&1{yAGbD6q5#d1RiDwa4? zvA!IU;b#VKCw9KSFd;DDUQ8Po%>>@|SxDV^r12Xe|8Mc=`HKJ)dLk+BaO0t1jZ6fF zdsQgNJoq>a1RX~yhzR3XOYjtL{?PfdK#eGB)p*0; z!2X*xBGDCl?#>=4EyPNeZYmyDn_xF1vz5y8(nI#7V° zeaiz$Bzx^`%=M3jq(Wk-S&S$ulH(z_(vP$mtvhC zg2F61K_NB-qr-0)JjFUpmA%o3)&Tc#4Oc8mM2pz{&@cI#kWx54GpbrM%b;7u(fa+d z61F?*KvlrKRVwRU7IddWE9rzYasPIf*vGo#!D$YI>V>XpZjA~V1U_S*Um{2kSA+5a z2J~CH)5D*eLE7+x`d_K^EtT(##Zn2WxT*`j_TO<*8~yr)3m~%Vh)7()K*l2ocuB$R$qrjvX4758<*eG7~fKy|j6htjUKZA536(d8a`Dth29u3;Q zRc$HN38*LSGtW(B$GiPgQ`*PV%^~9z2|eJ~OncjXewL@)mxAmKJ`UxMrQ9MGl3_+8 zRU&@p6Y%RRceRLrd@N}Uxdr|qh4wZVN|=Srrq(;7$9ZLLIW_olXY9y59k3cMtvWfj+{(^jog?^)>nKDAt~!^-m=d^Vo~}<{koXH^%(o z^Fr^gIMgwqfA`ML+FN6yCAz3tpRjrdghqR1qYBFP=equ>Aah@A4i%3j?(Ns9-KW6; zF_L-UpLrQNtu&Abg^!VM=rw7-7tBC{_wjS?oYmptZ*GsFw>rq;i`^LAZ6E%UOCRmT z$mjGr>`nAME+de*U#(U#QVX}Y+PI}V4K3_CzMLd6cD}eP;Oh*;&q+9`KN^&&-6!Ii zW1~T%zsU|{wjDhL4IeNw<{J#lU3#B3=Nz3oQq+5$r!Uz#67v%3|mXFQ!H%{jg-}S2fKnCVM76*SR+C8dyZt%g_r@7DRfZvkk<6Ji%!k14&LCfDn zIxgnB*8e!`tE%w?{obwSo#K^qR*4hY$1QtAa}zC<+HRlyXf-GL?vtrf3pVp(_W9o{ zFV6kweRA$o_QSug5}1Ep{u8oY0X-FWJUkTFF&0vPiP_o9z1GV;z|`5+lzw;uq8aCH znFG-r(|z9N4Ze;Js?4d13$a2c%s}?4g=j8x91h zr|s_u*!7t8CR)^NjYQ_|d?$+-w&Yjv>oUX@hv zEKm8IrAquD_W7LVWWT15gjUY*khH>b%iFY*Pe@R(i|D-0(Vzp9*!^{ffHvm-jp}{g z^Lh-$!!;0tLI}J~!5}|Szkc4ZImiyU>aEGP;~nx@CmX$*H<=4EU7I(p9mA{-Q97Bq zb!qV%L1r=wW(vXPDoQH)Y|SXkk#8(>+Xc&`!H2vT9zL2T-(!+%$tHdaR*YcltOe_j zFbSqTN-6b1wB-dKE93kHyPjbCks9le1#fDgdNE8lFvIF`@R45&N5IF8w1ai$fSeP3 z+QIYQ&pe&9kGmKyx@=eED!HJ-z{7pLN|)@x0YAa6F~>cU7d;-c4yqLRg!NzzQ&qyM zy|*Min~xuBTRc|Hbln`v64ryB`t|$lQFL|%?l=K%u>zBftYfTLy zyC^+M_Fxqyi|~6(k#iwYYfDj^A*X&VodSnaQOi`Z&}fz9+8L(jA=pXmur8Cg!5 z3_bg5`RrWixwYkUo1y2QA0h()=_eEj1_axGXa_4heRCTJO~Io2AAkx57XMrR2P^*% z#66SGd3h0NhOW|tO4ql{&;6Ir#=_#LmARFcrk3_U001imP^l;X%NiLK7bDm~^z`y3 zB4z=E;3ER;0EhuWu~aiBC;R_O{C~FpQ~Lj2eSZ!Jf}#N6>E8d!8j&etD4)kU=$DC4 zO{_^W{`X=3-vAYSJT^$M>yO}jJct@2P-+GJcZP}yan%1XOMoR}1b<3^Fa3u@{}<-| zhXel??)#r(T#s4^j+quf2D{1zp$X*fB#kd--5P05lcN8kVJIa z|GyIc|6Io%PZXR809c%&CdHfx4T~e12`-aB(J(4VV-HbVQ%es3{=4Y^tIjXZ|8p@x zJI4M`TIxFhFnRdz-@o?%lV8C&Aq~%1Pha%F5H-!^6(*q=$=ZNThdUyzi+*x3Cz$=p-L% zf8SAI0e%O~%qLCNKbR9H?2N__>pn5l{9vp4>8KXn&y4Qx zn;Cq#@T6Ti)#-Y&8~40ddgAGe)Tr7siO<}vro8N?Lp(Rb9cJRgXA&Yl#rwt6&)iS* zxlQxFmmZm&nmkev^88ZdXja&AM$+4B2{W8iOIIUTuSG4^#;#tES*|<1cH{J7{h1Fp zljg1`e!iXjwJGK2-E-^Q^Px$!h!kdMA}#3*GvjPp6rB~z%udR@7@wYx=-}&<3?$6E#?{C#U>3y)>)A^;p^V86+Pu=BrJGPyMn&BTUlRw*E z{A_x**}1sfwY>UpW$iwHuBW%ZcVMJ<1EQj$fPfT15fM?5?=S292j01utC_WC<~+~EoU`|5 z5A|=o9o%~Ndf_ei_rlco*;nHupC_I#z5h5fIlk62^=EMU&xbcZo^xhrr+#;RS^F?M zH#+;b_v`nGg}?7tb|x2hIGbB9|NHOL{KEX=(&y!sFRN=Si}Tydi+?xP*1oNO`nt0? z|L5!a_b)&GeO~|fWAo>aUw^lL@9;L-&empSCvS)SKL>IbSm0N5W>c(-o27!x^E?sBhww9F!LWoq!= zjThH(Gyx^o)`qbf=K^hB?!nEMHwZPZ*Ie)4des^aqIHS2A{^T(Z~0{ykSQUS9m;mu=c(!@yo~2Cl9|(chUs+ zdbYQHpW_th9Dd&Z=*J?r#_hW2)5kx*Ox`;-{ru^ZU*BeWcC+>#>}db}W9en%;TIL8 zC*JSh4F@!TExT}LdoE>1I@*6KFs9Xbn(Un;-Y-j3LpsXUpYItnClVxzDI(%A;XZYh zEu-o0M?MY7MkU=7ql;Ha9n_T)uk|#QdC)eY86!I(ELW3_OuuyWCM;c7e{0)U-f*a9 z8rPIP>uLMF)i?_8Ds5kFzb+)Hj<@T-r)KM-(K>&H&-A8xu`^ZcpmiiWhE;+Wa9+Uq z|5>Vc#q0rxW#jsd-L#@vlyt2X*$EMHR7t+0ZXBLjb8$MmTNS@v2^mg}DKQkth25m3 z)0+!z=L$D`jWg?0w_H%VkRfp``DUgS*SxvX)=_RZmSDv-t6_=7=uU9N1+&&U8?lwm z4O)4+b=;Utt-6xfTWqM8j}F^OT+%IPy3pHEuBP|b=Lf2Nu_zX_uK*!S8;Zzmx;6B} zqoRH(SU|P~xj8CZc(G>RJ^1~a8?`NftfLnA-RZd8zapbro$y-aEJl>QGfU@O(X4=O zUAAHRgP!zoM;$gx-mx7vxM5)Wstt;ei)BIFut&&-t^?l?7SUQ-Hw*=`)jRP0SL*0h znmx+mh2gqUg;2p>B9pT=z9(fkn&)d<%^ZiGfTioe?nffzwT5aut3*;g=J0>2ZSP}S z&SSaRG1=Yfy5YLoDD3@uS;2P56M4N|Q?#gWW2sfdew3-qi)-4p_vnz}TjAw;tmow_ zA=S9AZQ8mMHTF7Yo$kra`SK`z;;w5Q{Ny?e?O%>r&#hw#HD7DBMIGc1pC12p;>21s>FYm-zG|=Z z?FaRSmiU?meZqSI+qRqzQzQG&pWuEkueTGq2U#1T5-$CP_Hz@s&NS9Zy6vGmnlxZx z)@0)QbMn6er+UEo2F*gxE<{DXqhp{ey;IZGpE_V3CWi145|yrRxbkPp5V88GIW0E6 zf)(r~5T)dX`(ezq3T8h0!4F34MwG&3wLyDYD^+O3dq6janch7JlO|e>UoK_T#`RW9 z-Zu(=mF~PBE}&+Iq*Rx>9%TV%*Rw@pXK>=?kAmiJO17!Yi2O`OrRHSi;;Vz?w=4t{ zb0K+=DzupjUle9ft$c{Rg7|Qt?HxZM;ieL}y}HEU7W&;nfDWmjy{H zLYJ}LZ)_OKBklsbgWcR45@eh4=c?XGBk=v9uUwBg(8KNjv#@ zl(S_=DUXzCr`Shg;6|!)BHUdR=UM%%p_crXalbwD+;o%4@YfNwe&|muN5fmSVojNL z=x^)Cf_OjFcej4t*+A+>^;HR<7&)W5%ykT0$bTE5Yu4O5b1VvOt#~^ z_}6*rQsnk-!)p%r@k!s9umS}K%^xo5#{_ti{*tLL^S;;=2iL$B*`Bx2n_h!{+QNM2 zrw*RsM@)ZQqYyh6apJO;w*=^q+*E3`ll|q+ZqJHl2KF0i@)FVs_@}F9?wpe{WLO@L z==k~co6!<$`iN$aD0A|*!_euEznXuECSoub)~pYM(*bgHS%l-q(;qkWBPD*`$D~RK z9p;-W?iwb5m-enQ?Bai_ENs)xzd3XmHt?qJ-w8*RbEktbCA0m}TuE71W!;6Us>7ph zWjgVh>2zKUG`H@sc`xp!W4YbA)*fOct z%Ava@zZ)+Jqc#-JFb_!1sSK0o^#^j|LWlQuW%6 zbHC+Y&gvn%zoNYjIlAqNRt7Irs1uz&6_+NRw7McB50y=@-pyt$t3@o(0EeN91ew75yi96EPU6K$UiWcl=1BVa)BE_1r7inS6Y?Kw*LFoI>&L zt$byd6G4{|V!xvjTKLs z1AoSjounW2wU^Oz!jG2CY?My7FZ#dO8fmmiY&v{_-{R-+I69Wg)shRNh<3zwcu%SR=OHcXillPI+=K1$8O4f?%KxnA&e-tryVc4j z#^YCY@<3kz)z~$WuX6&-2cm0@dhLMzx2Cj?fM8g z*jN9axn!wy-cXDiLoD^7w-@`r_K$Rkx=eetRP>kQ4RmMCbMT^ zQ7jb^E39l}@JdtJL9uU!ud1K)ox;$v^kE_ElL^^6BA2x8ab zW1q`h%6bFtizcm`(_JuWzJY0Y*+j9x6vsfZVfOJr#zl`Ap%XVSsu01ZBVz09qmnoF zk?j?~$BT?SO!AxoLl;4>&cs9{+>Uhikh7SJb4pTRqE{zaxd=*iiR^yogL|==UWD`6 zB!dd$nxaS*Js6dupZvp79Cpyl+z;yr* zoOMA#M9^TZf8s%M*7M}^s}`uw4|7}tK(84n@ApuM%e==&7XO|r+LpwFX(2iR3=zc7 zKm~|^J_>Y2fqDc0#f8nckz6r+^ZUg9B=6?imVl{kBl1<3Z6IA1LSSFkwd6BF6a>T;8!)m=A0_3n3al*AN96cTZYH_R@HEuN z)o%ju%F7-EP>g;2qUD+H8()-(GM<4-! z*gnef0$`yx=~tZiIoqfOHh)-5k~<4@B!Z7M%6|laM#)M49Yyk4mdq%NWME2pLv1fE z^!6Y~Sci&{FQ3_c3)4fxy%||oi_RvvoN;m%tB40vIUpPZIHTe2go5RUtEh01^@k*P zE)3*C31rZ59dNpO;+9--(a{W4)n1qFGI|5(vR;AW0#3^?+xS9WfT}f_#QYF72@KE%T+)nAVPq^aX|NSvPY$`q+#Z6_KkW3 z3cSqwkmTzhmSOss!Xl?L32~In_}tGHX|5N#{9>y_A$MjG z1_$VOf}(_D0-eCEW!U*a(p>TRL{#Fl!J>nm=e=fNaCXr>M!~R)>>3-VQe_#`rTax* z`YWq>A4C)pD1lur*q?lz`xt5)Un@|ViNP>MxiIfaMw@e^uMEW^P z08CP@0T^~)a4G8->d4I+$9*8=3+&Gz81oLs-mY-RFbRa+Dpx1I6gja!GR%XxM%VSi zV7^*7?y?#Gg-v-`%lY{dR)qoZ$kBq$Gl5%ya^zb3J z-WM36GBXDW$IrmVWWkHc;1zC*=-7jD|HPy=b}&zcP+8I!mt^B6*Za~+CH#VwvY`6| zvAPL6Vt&Z46>)v%`)0}_OyKU@evr)%b0_oG^KCKqU7Jc^KdQfXC z|Jh%IS8_@~SS8>^Ji&Ym4ho3Pk&&Y-rI=kJFYNcRsz5PaGsk9zR~g`4LlP)2hH*V} zG3Z`05~7FZFO!F`oati#c;hN5uKndNm=PD8t7RuUf6c z1@5rL61#hK49S*|`s@L4?^}}l4D^94TyzF@nF~$&T`zd+7+v#Rv3TDGc&PZZ0enirriVlFzuZJ@AaqdGZmZrZk`Yl`zFJxm5%d#)R1)x#Iv9D zEG0XjPh^D~_ehvJhYxkMb@^evT{QDVn z&Aj>oR>Ku0`ko-H1Q%W0u`yCXgC4@6k;$*vw#%#g-jkmzYznJM*jBG%=2!Qv-4Kzx zp#JlFsK_;g-`TB5k6n-LjiVbvZc3%1Vy=QqD z>%FvH+t*s7zohsQ(^ZhaUcB`d_OKCLeGe1Rt}uDaP7J=1gs#y;m-amb_0ZMfnIqy= z=bIH$!%{P~mZ4crd;{^L0lzsOTIc(hlarVQ*$PtQNmfjI(~0;aQAEQ!H11@Qkzj(& z2O=VUKVnVVv$R^v!N)Y`o^Z-#ba=YDRivT5MCL5sL>d;IFRl)~Z+le*-BgLQY(RV0 z=`TVN$b^ZF@x9{{7#`Q*Tli!YT4(E})xL2|$7m!zLK{s**Kkc0^pySY9r=LrLFQ{V z=b(?UHCuK~qUNB890{zHlm4v@VW(R3vsSA&8}GV21VyAh zrSn|i880+Hx)ID{3k)(!|CtZ(Cxbl=cdj62#g%7sZ;yJeop%5P0;;g*FML=rLevpy z7BQHszW8ab$Oj_YRl>UOgUIxZPN1Lg32B`qf0g%SqXW@<$6LuYY*jO7;d9j!#z=z) z_l2F*jNhXkgJYi1F#&3Xjj?dZpE+$dkokci5P>!~R?%GC60RqNp70T#W(&_`7GYCK{Ucm~<;X*KD>b}C z1P{+mM`P5r3OgGoQ z<55+-^u#pFto_H)bKXZkpao0urhlMncepPu?e{je6)7V;eNx=S(Q21FUgi|=hQoiw zMkdaaH6q+nmOn^O%;|Wvo$!A0ZWg0U^mNA?M8#xBR^QRD1Z6?CC+dRUX@5UlIHvEo zisn80HNkL1XOwB@wK?A&E1$Gepy6U}qP7bD|wWHBar3#LKlAZKF;5@XFBMnyBsWrH#sUMEHTER_l<@ zb`6VGnLieyW`hV^RZx!=xy z-+8>JK_pw3`o4Q4?w7~4p;IxgN7Rsl*?n2fL!GKiCxvcPRb0f6AJN$37#KO#MSqi9 zNT5pG6jM*!j41Vj-=r)%vVu!)W5WA?Z2Np{Fo9o_*6k~t(KHDgcyK0_?bm-&Rm#YB zQ9Rb#2gTp5+Vi8@%Bb1f=y<@b5pGb3fry^Gn1=2%8Mi7qZLtxbhZ~oNcDr2(Cmr(@ z7w*5$Fs!)x=vwHrbm4-jdS|!e(;ZFVuah62rl?*u)-`jCNGvq@zV?J)eCqKPtT_E7 zsVkwZe95zUetT=qqg3(W$JE?A%kZqxppW#7t#6Y(b78xjgP%8ku1$Z_J(g{i{Yp`y zJWU8Q&T=qHK^oJ9jD`YPT_#2~jyMxy{!k+c`URQI3)GF2rf#~iyNtw5^lB=Xbe67^ zal3WvhvqHCv|$U&{Z~x5IGT`&n)R1z4yJnO7I!{B5fo70j(Dj6Q9 zBEOD*Ev%eF{T-?*%=p%ASDo>tF!0oqk38h{bL6<0Lw@&(65a{{M6+-iIk+=Fy;^h+%ay0c&jUiK|U6|H{FUK6%dXECEbE`Q8W^E)*C zBR3=X=^-e;8dh$xgZ|i&m7-WWU>x}mw%M(ry-tmEslXGjm=BM!D2Jn7 zZ^jrEp4{1|Y9*F^b?UQpr=ozyt+}gM?A?7-X0NNt>B!>`y4xjh_BnhfrO9E$+13=w z0dkl8=;|v>^`xjXw_pG;opXG6;m$r)EzZ`CNy(@?m@MOR*#9-DN-x}gUy4a_IVQqo zVm%*i@mO|;kYu7SHzHZbg#RR;z z4c)_a(Ep>67p?}CTt##AO|pVWLa=*7=LRrC%6So+5i*aR2fmp+5iD=FyZ03BY;{gK z_XvLx8x&*cCsh$*mtMDQpdF(IRYi&AMT+3}zE~f(w=cAhNwr>aO7K$1BbA($`L;e1 z$KRd%7W(>0PpnfZ3XykWiYXLINeh?u6EI>nONKMymp;vk1h9vtR%BhRgDF-IY?e%< zZ@RgC2~^*?5-z=7dD7ff*-GDQUct_b(LE6*hbf#5!fa^eYx@YyKdK3fW@Y}Yh`#G2 zT`NDKI&wcADhhXs(y1?btIOpE?0Ss*=|pzc9-H5XT0sn%G-}mpNhA?hF482WXE8@N ztyRXV!X@n}d>*%c-kZK@7m0VZ3z+iSXN-|8+#96&tHM!jI};(z|H=Ismu;Bt=N4@J z_gW3%_lIvAD&X{}1K-lfZ4{dN{9Q?B1$?v<=~rTVw=!L}w8+LlEK>;DFDKhi)^PdH zUV~^+Kn=;rU#^Q4+G{-ca-!vRy>FIKv?ffJTj}O0$PtPLdi35Vi{LYvlHT%-Tx%1h4~Z)MFguq(3#6mH=4>j0+r7v zyVIQSv_5~^(BJCtZtTn9-GOe8C^j-?DBjAsyGQc%x%;&ajyb-IPb7bBCD=~oBa~)2 zgUvBoEd#oFzv?rDY6wp9o(ZLjIpO1MN7(XI_z7A=0{7!69JmY*1hZ%6SeF6lk~;)a zKD999&CreTLT7NglfRZY=tfY)OM0iIT8Q@`H>(mWp42O}#H8u!;={^zpNA&780bC_ z`a~g<1@`lwOIq)@bLUogsXjGFaNZ6*cO~UUbAdM?bE5X3AlQQ-^C~R ziS6U>TQ*PYmF%ut(w>N|oArgRXbL=eKInNQo!9ujyS`x_^*u4JeSe4*-{70EU$6Y9 zE$H)#zxVDP01^(Qu6^14gXuc?jc@%>4o_`C z*}eGZl|kyOoY;Sd-FN?XS?uqAQe($@T)G3lCb9-Y%M8>4z@*JTRir5jCotK7Lg6-5 zI}u5^X#Q(_JEgG8IE0E}TPgnztX^QXJQ)W7F~HIvl_?lt8jpi*KFz^{YcGvOlDcF} z>X;2#&!}B*n2d=?gHw%N8yJ3{5*hOr(@?VMM}SwN_Hm%_GUu&_Z_3T~E9Js0aiCJiZV( z(eP2B$!wzMi_VkU)F=Dt*9M2xajki$;^7ZU%maw`|B}qjT6qcUi7}e!oHwd2Pz2$$nJY_l?)+u zL5Mhn5l-xOU$|O-gh@YN?bGQWX8h# z&u?4hopIN3lYvD0fuvqh*htSEehR>`=eK77FrWV(%Pk;#}N2SU%x0_rqZ(t5D0gU#3_8e74G+6b8_C zk2Ez{7d2ELg^t-QCF56QMLIXVu$8cE<%9I zSHguzSWx5}AQFU^i9E=88=3IiB8f@kZ9?fvg2xO!3YTv4^W^~tgl7X?pa~ElbY{2W zVM3I@wGc-x-5r>~Ezl1vfNtmsfpvoQaZUmeW`jhmsvz())NCq0p=7LeM}@EieMt7A z0SB^|pr?=fy&<^rZ3xJoz?&_3^=@V+tOGbrq)G#2aEzjWW+pc9o*ua>>GrSpKe3kV(1d_KG`J! z3if>6=NY;jn(gkFG`UJZnhd(n&?$YOdmEkRFiE=)()A`O4(V=0P`U&_aYkfx({;83 z9NBc6QM!8@nCCIcdP=9J`9^hujwKX+B;|vzv&{@Fg~ty->l>f~2;0>C@m_d~L7oQ+ z$E8)gg6&7qgVOk>cc@kkFtg6Fk#jH^xM#{FoCg3n9OGpstABQqX0SxX0w8hwn!yYW zyH2%Q4}OE9>&L^00I0tNO%-QY%}{kY&}-^UED?qy(sYTijhE@>9H>5uuD3KME&NV< zhFbTOX}1n+B&MHxMNvc}l^48I2ZlIc5=tmG?uNJ5`t}x!zebB~uU0(~jF7=K?@=qc1N(4&A zzl?YVIf8nvXu;<}@=vnhJL~69Wj>c@Qyd!tN`8_DH}oTy0D(@ZJJOe?^p=`7gER}| zKTC8cPq_xB5abT#|yfhbmwIW?Ro1y1G;ZR1eVCN-p;rNTGBRE z8=3C64SfTo>25;_7Ia-;Ivd5%pQP$zBGkfY`6Ea?dzYpU1ZQFC+FY6+z`MeK<`YbE z0gC6+iZAdr?Wg;3VA{azgRU;Tp}K-Zq}`ou$BO+pEiUUC{p^FY56W`Np#P$C`?w;!UDCaHRV+>uS8Hh>bc zGs_lqecZgB^M_Ct!$=zBSto=6ANF^$71Kkwl4?$X+i(H@A}?*0k5TlY?8y(>l~Db_ z1^+E_z{jy5eZ+zOIo$^68`5X}_$bi^s2Tf)kK?@lXQ=36mP`VB;gRyJABP&E|O533{hPf|xp4rR|kuZ}WFYlnZSFK|9YIomQ6*{`X@ z=Yw(!-W9oiTws5tD3J@`of)s@=MqQ^ZB9fA3KV7UYq}T{QAzzSG?g6)-_z6`-bS{I z8I^B>zLQ7n3IpJnO8VV1Syq!&YG9fm=7eA8@oXaFlXseSr&mHH^%n>Zq~~n)F*Q$s zU8_@r%23XJh6I9+;>Jq>zeL6=iU7Ox2fqVCp{N``swSU4F=$txse$#H*^GU8Hmms~k%*hOsE?q^GPK#w!Z(`q5gq!IudL!> zW0TZA4W#~Lz`_VblJuO)n&RA_`fI_bK=_)4{HkUO>CVpCzD;E(g^F^hf^EDB$1d>t zH9{15#dZt0qt4JKErkQ0vjF-;KawE3k;R+caG`3y0PQw1BP>mKop%FNkioB+Ykc^~ zLi1{Z!7t!=ZschYp}kaA#axLXGOV}1{pa`&%l0_ekMv-EdA52w1r3e#Lzp^HoDWdc z^^j-V)^avz2qIlt|J0YQBtf&;%{_0Uh;+pEfqyU45G+_h-O0>7Z}a|bisure$aDnH zYFd~s>6ez^?A7~Iz()^hGwF8+%`lIrMLKO+0dKN3q zAJYKvJS*$hum#i>G0v+erK~UOUgXo(e4oCjNuT)TbinFkkJHrMN$6rU?RYUGv5g`> zGaqS6kvj!)^^gg~h&$Vh3Y_^Yl()_(B`r5TA@J;-z_i#7`vuQ8q{p1yND+afU=NFUYUi;Yi-kZlTIf0vAAD47yV8fY#>e7yDf((`v#OadNw zrHvYBD4HFXW{^rkqYiAD$2i$7ewXJkU4Rb*O-Cir&}agpY5ETp#mIWoU{Oh4&i#4t zSVXlJL=%+b%1 zE$G-8FFcAqH@;$xPWL0xw3n!1OvY5jiqet3(CuSHmX|2!0&#|VN<)82CV77Y^#z`x zz#4aC?LN#6@Zi!VNXMwCRpoU4=s+J8)<2 z)41~-j_*wWP=#++87kaJk8OCFMUWB|QhJ=B$wWCs>;Z&fef=o37k)zgn;8t<$`LPSTocOYlO(v9w;+ zxsiTEchCR2bNFe>@2#icHYUQo*do-GdrW_xb9o=^SY<|mdT)x;1# zklv;lm6R=neH6+{9vCh5`!+f8TU@BFLNHIK=m{y`?aIR^cS-9r=^j%17aoLwMY#tQ zB%7dzrG6c$V(bZtryQb&Y+oM7?73~^Ct^NDgr%Fu_~nael;C>3`THg0jLkvZYf?!O97)q8Nc#m@7N=2N@eM7O~8cjjO?JZilxb@@7j63@%s5L(Rl@$C+0^Ob{pKGg z#cR6C8pU2ZOf^mznj10*6rT?_pTdMbZHI=C9wf&@lF?F%9$#K%9x>{H@cE*GHA>aU zIB2#S?Keav{!_r>byc=W^Hloo4aNmg4GKDx!#+&YrPbpCM;!m#<1+? z>pembk!Hzf`HhUp6`kYf!YMcy-rR~C9b)wO4otb+ImlW4c4d*olBOhk-J#h64sr(d4iwNDHfr3i{3fn#Yyk^?kjzVT-6ri+SaM)6RLQ z-Mjg@=cB+`hyWg?B39V0rpZMPuKb4NY4w)wvblf&x+W1>LXlylS?a3IWR!4wwXE3= zX1i$;QtWWS3tkYNPb*YD+!2^=KEK<#SmK$T1(3R2Eaux;A~lh{N2+Bo*85)R>vj#Z zdvaGTxa8tr!`FfN$^&En^2BbAOhaX>4mgw7fzOi4djBp9NrNg3#=2T_pgLBU*p+wu zM~z{=GFE=`2Q49xYyt0N9zE9|8^?_nIfxj`@GdMBD~NGes82uX{k34I41$+C4d}*G zq5Q}iq?62e{^OJ2)eGTvz{TG3rRYv4l0Q1>qr1E8rF=V{51{xe>Z8!*?+2s_hL?7+4MG1c_Gzbg zUb}H(NyuIG?X392w*LJm9Wq-UMPKkXFmXO=B>Vl5sb~JR#;Es6-@HRLtG|j%ap!-0 zG^Dt0CK$3E(lyR6jt{30DybekYC!mVQAg7NioO2DAC1%hg7012&44!p&YArDV2MPy z`#y_C62Abrt$$%t?)3%~k6b~42e$Qy+Os&wNM{$6t+OwX%$+$cEB7D-hjg`sL0L`4 zSIPAE+d6Y{FWdT`tM(o|jVKwz;1Tho^1__I!MZUeVkTU$apFqWLo<%O)QXX&wr#7>2Di>2;NHgPqz(wV3PFg9Wq_~=00H}=CaZ3E$4mh z)kcM}c!|`oQA?}Y>^e-3j(+4P%u4oXvJgymW}yU&St*OySwCS)`}E2voY)g1@1Ois zaK&}7*y9@sb2qJ2#L2|A;nPY|YlJEPzixh5O|ArC_vvtqxKFq2Lre8ufR5h6h@@zO zE~tX{m^=v(^a*gv0TSeg`H}6-R^O=DKw@N*26LjF|nmn%&9Y%a?Rr|ZpOTh>63l(vL!mI z?@7jiFL5Z*?1)K6%7JN?Qg^reh5Wjb7qV`jlE1I~lVAT9b_0|RS)wYXkMix9JtOu~ zJ33!OO!u&>(iU1Y7K@M#V}*r?EDBO%Yh*gxj6*oexuc04U)+@Y_jC|{hY3AVS)1%r zK70CT0#GBfN*vJcrHP-oU9I`9a#98UNQ|TTjdSkwbSy_2^@rw`Q;Jem`8he_C0sGzz;{ z(_;ERuMBN$|G$(Gj3V^1^NjT_dM^eAn-l{VV`-nS*^A&ZeK@dyz)U0G?%$?qz8IQt zwH-DIukNd46^K`jfwtAzE5&zpH}zW+I>8-V7>m$?ahv&&*Ep5E5rc=dhKl=(Y6c=& zh*Of!KiqB=(;$$M^WC#I>3gOq*hyQdki#;SV>#s+*mc{Y`BCU=KB)<(3++`rZo-*% zCusoh5Yr1l!PeAUf*8jbr%&4I<}LhM1rU!bSXIp?+L+4wRe?= zbmEfsUXSAMYVVOIkqhD}2q1J1382g~u!>Yon~W<-*Q&51OiT}B<}JTuRh!?3xWdCX zW72j`Ff~fZyMJs)q(g9(ZPmXFU9Pr9oR_RFXNq1Rbt*-5!5g}rHp@xU-& z1Xa*Hi7H(p7uDMPQqo_!F_5Zm9{|HDs4d`O=LyYeRa0y5))`SX6LKsMzH2@o`U_!(r^ zPWpB?(-{`(-+Ph-hV{>To480R;;|?@%X6ZlM{G~U*j1MIDsL+*$8_rV_E-Z{1ghH7 zzbi6^#@8C^nq-@!>tO8e+A)Iu?WJ@JsQK$>FI0Wq_)ov`h|3YEoh34R524q(jbar{ zmm$$DH0Q7RQmRe(RiD~bZAQDIpeH&;PMiZyDh?@BAtzkR_( z@uOIvhuufBh|lum2wf5?GfT1+NLN394(wn#xT+|PJ#|(mDz&9$`X4_e;!95p6lf^H zi4D;#NMu5Amc{=WP~pyH1_Z#g8m^A)3Ynt_@}D3Y`;1bqTI?@{?Ti2)3hMZiXOkmv$0bynheL?Iv%oT!!}3Xq0rynDynN}G|e+9RB4s2ay8a2KH5QT#RcQ! zdc5mCT$^`pab&p0bn|1CTuo?ACmo#rATaW(ahLmcS{yGo*dLncQg9yNl|(d!tY2y0 z_#8a#PxrJ~c~;*$s@-&9f}+sYr8G|=^x9bhH1xc!G{_0L-PIjOLy%)jeQo z=+bzba7=e`byY9#NiSDp0;(<5y%^^cQGwCwaXlyednwJaue|O1{E1Q88Q4OYmkGV9 z-N91QRi@MJ4%#Vyx8ANKo5s5bIt)X{oG1;!UGIh`+BjPJuF}v)A=elB<2ui&Wk^@> zJJi}K5g>LW`fL866**IQN6?k*DqJhnS*hEmMHAk+fi*eZQBE@g+%eqv`!Nm%77Z4) z@%M~e#@XZ-XgjT(snNpkM=idpm-xVmwwrMhh7Axnxg}$~RPK#oBiha=iq|Fpv}<|N z@6L~7fv5|VL?l&;5bJ7R&*ISbL_wrVPTfb|;c7*__fwZSt|Eg?*AUNFIceZJsXTJQ zR%+E2iw31`L#<;d(ztksc?x`8(K4x~qj^fV+v!3`CyM})ni00{1!V$1A6(@(eQm4x zjyc$|?+{c&iAwIgULldva5E=p9ggi=G@27GjTdcpOpE zx>C#;tdhT3XqTj7N=i7dT(5OD6X1gTtmL$ zJj5TuOTs>_o@w6%mC4GgUfZh6Uyumj9A&=OkNF{!Qc4;{L8Zc$TW(sxd@#F#W5}wM7`0n8hhX`fLaF%`ZF# zi3Y1@KXT4QokF{^X=WvhRgUkJfo^?ZuU)$x?CV+JT(338Eb<%v6s`MIahRnLEGi4+ z(EV3F#q|Oa(iy)19A87%)~+827s8&ediI1_CQk& zVhx=qAc4*nH!O$I&hg3eCqIVidbJ!#ubD$xqeyoLMr&(kIWMrIZ?I95zxxfmu&lq0 zizpB6lpU6J-h1qr%>G_c5-{H&cny}5nhMsn_bj5ut)GqsJbm>`{?UK$A6tt)ml*Fe z_kMHuDQ_$^+_$58q@^JsuO6w;dQ4VcS9;tR<3FlwI&Q7Rh$Y@^ppqY5uEq$)CH=r=|9i-H! zggoS#mPK-Y^}8F6QJFn)nJP0$T2;#+`rkyTHHfqm zg|RhJYS$c_kz-yTEaP2cs$;)IURmSAs!=-MJ=?io`PB5OO+vmM21QGZKl>s8Y%d%> zQ3P}BvcsH*pq+a#sGc@fk3KuuHO6jd7`CSM?z`#X3w&VKwVjt|IaJE?~NYyn@KQhhz+b+M*1#PD#lagx+1v*k|xRiq12r ziLC*{yXmCULkkHVR1AnT1B4_Skkdagxgcyq2xW>*sp3-}(|2su%wo~V%?;-ub_>V5!Cr;k_ z$*q|hc^==|r7ddGQoeeg0*}UYcfo+GujlDT4A$6Yv?efo;^Kc>&yurl0t2!;HnlXh z<5EG9DkTlPEc>kJ*%HR;nOvA+I`0vOm^kxcuXXy2Nxz0f)o&W4otN>)urwJNs5 z9NsILPkA?;r_+Uyia5{0tBS`)Qjk&vidxA1q>w)kt%t z;u5phU@OfdmPl@e9$PD={p%XVsAKu*W20Hcb4pPxhapN72h3E$el0o11yZS@HQu972C77N^`JV&VKHzB<3TeG+0Sermnhk}zIDLyT#%%S-MJmQ*Mjr_LPx!4 z5dG*Qz8{^It*FzHz(f0=nqVB}ZvR!ZP4CfIbmtmzBpvg|w&m5#Nxk#wct`pMH~);$ zhoA3E=pXtEjkT!l0jH#Y*aVoCWnZcA`h3cc1>SDXnY%olP6#OXbnMrNMVTxRD|1Zx z39v2WY@$_MWn_g7Hrs^EL~qPbpSxn(?{2<4AXb2(uQSr?c6#e;CQlXY_V3C060|?X z{g@;*E}=`Cv?H%2_A0&0++MLCW}ypHsqImSMG~?Lwc8f!EeyMe2N4k@jax%MOyQTIL#*t(ISrmOZv-f3Mhx^_8@2 z)hbt5;lKcO^zmOo&|iw9Vys%xTjkh=Li_VPn$xOUw={-`*?z9@DsZf46;V=Wy^!GDFO^#{ay* z@>B@!z#WsecY3>1NMdQ*pOw8?RXcb0k*bmqoOrE!Q=VzE0PT{ssNL&tWoC$Ga!(3$7Pb1-Xg}|(8(3ylMsSu=r}~snyTsF9 z1WFdH^DNfU0S^EctSl}+P)h|LUa`f-G5`*7n3Wq|(WjVPYjmCk0I@uO)8SdM(aQX| zC?_k0{Xb3ADs-G4LcnGdI_Fp}<)+B`7+XuJ4765S{Zfck zA9VK{KTy)eAJ%oJl8Q^-=*;yZ_Lf2oza82;kvtDx7q3T`Yag0m-9Xo=Tz~@V#CJ>1K zNG`EU{j%|>Ig^RXC+f!F1s7wErq-=?8-NcrN*U6aqonT`1PPgp0gE%@uI^kbsazgZ z=58aR@V=U;T*ltQp0Gr!ll0~C7uUgX&-4=5?+G}S7n#n_gWjnR(=&2LAetER* zouaic_d-SrES<>m*5)}LI^Z$=iK3XIZIgN_$FCt&@!HhD6a1_}IUd2bHA0YsTK zb4W_sc5Y{N2$x$SR&OX=viCFbxYat<#1;E?ynch-BWt;;mk@Zqp8)!kZ^wd5!m@Ue z%0++6u*xN8(C~tN1&n4BzEi)-O`Hl?QfQq%ML&hKbLo?>mfm~pJ#9aF zyg;#Vm>M6P3sZY?Pi$1Hb+s;nTo#TEnKj`>g{eQklHiz%(jHX$aWVjYPNy!%>`HpJ zx7(D8xWwt<&kY*IWq2kiJ=K<(`~ZVn&#&lLpV3Wz^an%7Qh@R~<@p@(Y_)IeXV;HJ zOD?y)gnzCWGW!vGE*=!qn%aza*wo@aeFRf;-|UU8$}{=)3lqso>U_T5TDspiXvc9A z`R4+J4zc6na?7&#uC-dpF{h9dDwg88zc#986~j!vMoQ-xhgD5ha^+M#pFYmu?yTBU z)O2^J5U1L{@P-tEba%>6%={aNT%s+#wDNwMJTtwt9d}s$cFL0rKfYbD+(AxG4$3l3 zu#zfB`=Dh5KDKqYlFIxmOd32G8^0{rZN5FoCcdmQEA(64Py|j6uuNPY&&Ye45}qhy zBIe5cereYbS8Kbcm^kM9of{XTqUJ;78lRt+%p<0go&KCZ;LRa0i7@wrx?<+*V37Dp z?tWS{p1Qeq{Uk1O$4S`%>dczy3rhO(souB8T&ysI5AGlQ2_URpAyr1QFp@p#f_=>3 zUA>yvvwgpmE+o$@oa6(*Z3a5o#Y)PT1^~^95CVY%1%xcEkW{f^z~HIY#1j{Hb`*rE&BC^{NtTP2ERGbK#zNJ{e{cKw z-+y2MJpq&`FA$U%B$15U2RnS0xN9%v$$|ad;{>=<~_W9 z4G4^$uA}@{MnUHRaWl(e-CYN=K<;06tIr$E1?XUt{ohrbYxq5#eL7s{ftN|=BY zORR*sNfsc{6adXfmO0NPBQQ49zs+Oso3Qe>EQl!p+RV-j=dQc<$ZIeVop~DwHz}A9 zaev=vD;1W!6slr*qe_Tk3PAZSU`{J5t4;`t{@Pl~f0-=Mdknt5Hv2{^s5J%?sHfvE zt_h4C4A+x+*YLJrc_B!&1PMS=`S76Vr3Cwgl_zlTs7;g*OclV-yTq3OVoNQbH5Qe; z`2@?=4TvT1QXt+#7-(a{#f4~zi4E642vG>wQWU9rqm5YkW)PEa8H>uVd*e#F*!na< z3yo!gmv^9To`Al5BnkjlkJp(xis?qn+yk+4D}og?r>zZ;1O$a&F`dm+;$Et}1QRCM0JQjgH2|~~%GL!!$Zk@-g#g8E zV`f5rjv;>ZlRrwArxFHm2u~xA1004}9#hQD4c#Dw144Lceq0ShEI&}?@Mk4}${0(> zd0NtWJ-99)P1r!1;vcT+H3=V}L<=h}*Xy+RAq?24vJaH)AUuIDZX1707%Iwf*TvY3xkmX2;JAV z5n|W7iZ<|kC>E#CCKQh)j@n?!xRu~M9IE`9*z$E?y)~Ri%WGFc}d1&6><<<(_Ky5JI9)+V!hf=i{)5AQIqwn!BWzN(YgQ<=?wkD#dJeZVj2ZfnQuy10777Bhnz+; zQK9)plYK_Dz!I0gsc1>eJ1F`Z#&P_;AilFb9;8u3qzDX(0Ye;z9U=}$#LBNTfTqv# zbx*fjVCm_2GPEaeeG#dI-MLsmvIzTWAFVuJ@Uf2aY3A$M{JiIgZKNq79@T@-JjPi7 zBY&_+o0MX62pQi)gC*#rW(!vUvgd5!)E4FB@8SawShyOrW-68Njp=T;n;^eSQ~wL5 zn}u6t;pDBzBY}ht%IWDm+iwi{P0DG(6_qd#!Vji*7sIoYMS!hpjIeQc83a~f@fU`M zXCNjq7*oI@-4&>Ov?46SrE=17kWsZawgkg$T*4Toi?VBCz~%}>$@{jrFUr}8Up+1D ze+PX&=r%o}XZ5s};iDGAV}!>f0a8>3Q*<>GWfmI*_8Y_ZYZm;IEfJ2cU4pOjt)SAC z{fd%+^l2)1sBJ`z0?z4~wt%l9T}@8n$Qyi-CzRm3@5cH8-R$)R6``$aq!<*~e__J> zt-Y95pznY73NpL(6D^5bO@d>dwRn8G-aEnRFMJFGB%v(zPW9m184dx@FfcZQU={wI zFT2UYFkh%gAHj7naW4U%;YW1cZZPZ2sbU_D8T|T-t#zoEylht(z8|to~x)+jBy%-J}D#WGMm;|=!PRK@8G5? zwWGTBSM4SJTIwVY+;ZA-jKK$0td!D?_sYgfg4+PQ6E6%uE#uUCUaQq2Wn%2BcNk}| zeykSA%MK`7QT}G;cQ*I@(O^h5$V#^|0KxU(+1T~gqK&b*k@l~^n;Qf-B1VfH)(|LR zlV$ed=@u})y5J9NF`dyI3Tpj&Xmy+UzO}j%p&Tkg|27rl;z71iV?TKxsRv`dW-C6k zB&r-Fw;{}@1Tn>Pn9OU6HwCBPaP%a5!_*`r@m5|r&kuGpIG1KIx(j<{=DjPd2#tb- z&kd?=Y8lsnv>CqbFx&s|3;oerwEi$vh@i&JtN6oh?psOPGmuglCw9R7o77{2%IP(h z-Cr6%**=-xj0Ies(PWm~KaY_V0J5}mY0O!Ixn}hB=!|(N!qrkfVTa@Fjnoan_%J5b z;x4p7R>_Wu&}RL11ql*BTFdoUKU@)GY_geM7THBt+7-=YfvLbhX#%)B1EscQX1(^kr&3T6M|8eE2`-I8x+1V)6}%WtVbch1e)T(&Kdgph!C#eTNp zZA1oz(O=Br6N2wjKmV`7(f56QM@iT&qOLpnFAbnUZ26mLt&2V$?!APhm zbqNwA4IDFnV>~PFxoG<)u&v(?WiR$zs~v%kuawx=voG)7C;tMtzK0W z8ATSpQ=*wp3znwd?&JlTnt@>HW!4XUk>ede_yfrS0K)Yx%3k4)&mM##@s6JWSq30% z|G-=EzqFqE&sf4j3r5Dj$=l4{9Q-KDslH_c5^QGWQTugjdmI&F{|sW_)-2g%gc1Wp zy*_x`=PfEtc-v>soiK&F8BCPzTR4ZY^~gL+Oe!#}Kbxd;?3i00TdWq}A&AHDt0N((T0XAk+7;(QY%D$<2pjOfJgmn8|bO%=v73c=9ja-Em zqNtWYi1d`DKR_infcm8?N;Z=M06f%?b5svSlITLOuoQWL#nD$u?UaZWAe|#)xW*#+ ze#4<5c9|F(MSC}cTJbmmj3>CBI;US@g>)5a{?0*}+n-RVwb{|xq{O@RRz-GMIM%?N zR2dp`i<*4l`}qCy*OxzSZA!>1Q@I=J;tW2l@8gJT3}L`)gYnU6;CfeL{;N6(`IjIe ze=T?zJ5wcmG2JDWv=#UGQOnqcPqS+5XD96+-1vR20o?5gHqTw=zT-!ckA)L&6-@y| zeOrYo0jpSr@FrAtst5PwkH=Haq;rPe5i7;GJ0fq0)zJ$$$A2mEKlN5ls%U*zN}R>a zFpf2wR%DuB`ehES2`P|mc56&rJCl?+s~^7LgY;7Q#xyE{`B<`@?_N*}{f^^kw80uP zqA4?#-qGf}HF=boC?Ue-%N&;D=gdHg09sQDWA}q(wF2umogmwA&vl|^<@{%lMM@ao z^beurJJS^E-@+WWOY})P!;g>8ymp>8;K}>XDv;ll2~gj}7tiq|VD$4__q;11WDYlT z1Z2FnT-qw-D7uVj&&&>#aFkYZ4uo%McR!je4m!{WQL_D_beDAxWxl;*N4=-{Q~mIl zQkrBdA%UDo_-AuN*Bp5jyZm=Y$Tn8k=e-Il)ccJ!OLMtr#B!C=E+4C}2`js#O+P`O z34Q*tTj#R>W8SJ{nfpU4!5sB&$glHHujRdO8XM_uhAg=yhR5B1_UKb(?mOf@>BLb? zl@EP}N;VJJ%Q&-sco$h?OT)!t``ql}pKhIzKuO@;mtRlCA?+Puz~2h5jemE{51bxN zeJsB&T&+ivUy|3OnnlmH<-V(m6&4G%bKYUQe?Me;2HyPM^6vTAEP+h@p4>PQ`+O`~ zf|ohGrFpLL#hXU4EvC>y18*fDVjnA=vRHS*-Z~A!LIl6&`)?`h^2#FW3_iUDg0cnb z&Sg@dnoCV@23El|4W(DtpDnfRP~MgB(!sOx9zK6j`VGT{YWu3_+4`48O1B^O{FZUF zNL<~qOwGiqcuD-cV}aJ|%C8zOnigz*H^?QQoe8?t^3@=uba%vazGZIacb%6lzMY8~ zhPb!ajit4An;pgz)zr+7X?%E{nEkfOp^Q?vr(lOO+n|t9dwZo>*u%vrnCBVam>bA* z8_d;wDyd{zV4lJ;kSn?}T4@w~dG&+jrANh9`)lG`Z#=T0GI;2S9aWRB1CAWO6aH4- za4Vwr1-GKl{kwhaqN$mwe}8uIe7=fFcEaEGVvVs24DBz&6ZIOdb=Gk|yxTW77DlTr z&YAz~nAn$j?4W(?zb8}X-V`5f#Cf+RptIp`uNN5f6QPxB-IsZ)$2C3BT6+04jj_bvQ<}+?~mZ zXvKem0FcZTbYxpEeqZxBr>h_UJdhfVy!FH!)4~)}!&u#Z>7N^Q_mzf(0M&C86Wu9v|-`iq(5X#$IfzMkx zF@1Q8ylp+i{Z6q1jlVl<2jq{BZB$DnVdI8XJ~ploYh28Wd+<5*eB7w+!{%p0w5Lw- zk0l;O#y>Ib+YTUzdR_73R;qa}CQGDvEFeV>&NHskhkYAAupf7vJ~Sr2g(OmM4?+ow z-T;h1;R>Mq5^g)O-2M9FyLYX_AEzswYU7OV5Kf;mGD>aCxO?15gYmLi>ao2gv$ zQRC1p%l$i&!-^m8xbs5g*Fy2Tx}>G{Z{o>{k8l6$V!}JudQBzdd7rcKw7WlM;a=D2 zeTFS|y1#bZb**{)PjwamE2EV^-z&|hi&RhK%;YDPlD2d+NJvA{1}Bl9T8O_OcC~3E0=o(^2#OU|14sr zIpnUBhel>V9)>tajdm64bO_n8!@;t?LaRWOTgEL45sEPwqit{75?K2R&@w_H!zEnbXlM51&B!HRWzM#)XU<&7N+N^?TO3w_UgeccAdx#St? z`#-yQk;|Dr?!%e=Hy)Upa&aD0Gke+vnT9v5ZRL6XHndHA+;FVg`V>*&>t=o)E}XhL zj@Z-PFhDn7$UoK0)1|v&^-LC&JiMvahaNEu&>L!tD7F&J-el^rOZGnBx>Y`E@6hZV z^isGFy-ARvb5wwSQJkfi9n1F9f)PEJAqctvvE`=ERg7G|Z1h?vg3l}zkQ;lEO+wIY zJB&EAFKiIy_QZO*2P(fz(W5kix(R{F>x|3#SBW+}9<p?%IYWWD5#FN)ffLO0~?QoFLyt2F+BEjI!s_Z;riX(vv&-<-#POSnQ` zs&q;ua?rkQy*5gab7-CCXz;AP(ue~*p(BpkJuJ9Mvf^PPyBLpNhvWAHp1F0%nMM8 zw9L^p=DpHwu0wkyUsWL8%)vUj6VP{h;R)=XY@0;c2So31am^KPw;79VL_ekE`*YM7P73Jnk{^n z=9~samG%JL06UXWv5N9Pe-~50_1JXJPU?Y&)NDIcdk(X6HtMO+SVgiNwfBLgUUt}yrAPE}%h>ZiRFAdZuf53= zZ*6T9rJED0X!RU+0XRnr=K)$91 zWjFj%S~YHev=|F69_f74{($jlQ4cHPL`#2Y*U|26%ZFgK}7L)%5){x8?J@D# z%jc9L+gap(c2}(XI$qv2Re!(@DciSu1Y_BaT5m|uw;g}8&W89@z(uHr-|E_n%>uZX3HY3bMsz{I1J)UF zDmRkBmA~wD-P2t>_Vj}`bSi*8qK5MmW)M9518olb0jNcFydUwj_>GK?g7k=SYmz}= zd#RI04ANpC@pB*X_cvk+6K)sA#>e8RuAcuD@3}|BpP~RhM0{dA=86#ROW|C%CY%!B zP6^LhopeN?axUT0IDni zl-mRI#86{&mPtCo1euh!Q{t9e_(5yl&IQ#hsW8J$V!=WI9+ZnOK(5+*qC=4w0W|SB z650md1@HkBAd!eyZUZC*=zDD-CT1VK4N#0JRwfp&zO=|0g8uzkSR+9gCpjz#N}PP~ zMK^dG~xIoQ1>7KkI0%fdQfc+IQu>(7l6(=q-O zAb^M`w*i?*{3!--n21+k0Nfa(Q*A&1<=mfm&u;6o+t0j*Zk?8IJ1s{m)F^cL$6JE= zvf}IqQ2az`_Eq^X?Q?SsQ?`+g@%uS=YC-ZbxhhB-cxn)e4o1=o03;1GPAQ*W&xH!gWogwq+JG;m z#IdA2vy87@#g7#rGk%>vMS}#inYJ@Q6bn#fBEQ@}E+IhY*b(qC`}}E;+bL+hLltNl z$PmA7UpRkh9NdCB-T7)d&BCnM3s;$ z(r+b}_`8!InX*ie27dSO3P&o@kfC4n(6C`&YSRtK<>XU?Xq9cqBMB`^eaNa#Q6M^i!v`e?;XmZXy=3hq zohpT2>1t(qDJ(GE3cq}taY2gf30ICva`F=>>t(9C9klK%z<&>k`^*PdpVO_fBEAR? zRl`vJZ5UJ0+L(xpUEsto$UT9PxF!_cR@Wy-to|UyHB0TJrQ4d-a=slV4HA(deaPkl z6Gi#PoGV7FwuBg0!>t@v&aU}B+y>sy^(+=Nm)(0Kz{)pvwfX_8(DDkgn`s@rl;X!l5xrzBFW7k+|4NSNoJdX5Yl+mjFzzbAe$rqpR(K7lAF z{%qW3_a)GsasJErp!vcPj9&N)XkW&5!a{U?XinV@djB(svgF(@pEKudP1ANr7 zU-5=S>;pf_utpC)n&srnzqK!|)Mc8O^CJtb*Vo)kT51zJ1Hb~I5Vq=gkSdf_hZ=Gn z-2D~bPuar#d~f!Q<&rgS))}L`+Z!I>`KaIL#Hs-AX%s94?)Oo{v52JG1HDk&EVe=UwVfr#@+4lylbHc?06NUe+SnM69wx3vlsIQJ&CwqI9wVv5;S)-L zY8Jla*se|{sIBIw$amp0LDO=OBmj58P;z`w?ggWJMFN@un#%SaMRs(1z(4fCM{_}0 zz~)zy^GE`CQsDTu`evzmFWL>M$S{jN0s1n=&W1RDO4u(W#H10YA{Mga-CYz!R%}IL z#Jl)c{snIQyt_d^29K`Be=o(0nwv9@ZzK+)$Rp89 zNv_BZ#ib_)M0_GZ3|NA9Ek&uIe!D+PB4^c`zVElf0|lsyci*#XDh2@HG_cIcUtYFf z&eVOMy|x^6Yx$h+2N}|b;%HGL_d~*|57phfN}qhVi2sn$`n0Zxik{K~bW4~XCRXyyu553knHyv@zJ_8+3#-ou_Ugu_g*@~?P* zTML1G?a5jcxd1Lp$q^iai(U9w7xzzQ4gUk*TG|Z{OOkn?)%1o5{u*$RP)i~H72no* zPh1Z#o7bsu;K=^T(SM%k)I2d1#Grg?kX(j$AApjm+|v61p#O>__HFAw-LR_-$eN}_ z%m(us?arg~_&yp%$pYJ7>BEgdrM02qMKW^ntPfv@I|WFN>qvj_nh43AC9KAQs8@&= zP;Yo=Ol;ee&oy(OBMu@*w|U9a9-wa^WnJMf-Qf~a!7_rkGr6i_+yjdZ6X6;3hp!%C zYmp_Vz=mqje1_zqcry#B)27GZ4Psmf2zK!;enUJZu{<-Zt?#4GUH|wb`+cy;a9P8H`S4YT+euX$x z(O>m}d)ZN!>BCpr&L>2yjKM9uH-2RaMz@MV(8J4*yP2k}b9(=75B`Br4QB(7M?80U z_>x`mWB6C;z_R~{^>+B&yIuyEe+{-RB9`NRHi6Z!{=CR=zurF*8~C(wFApI+Nr*^& z@*RaG949V3SIZj6{BuzSUpG_G`OdO?Ct1BIs^59z}ab6cF_8J#jay?Me z+p~IPt+*$*Gx_!tR!ThNqZEqP)AGzXc(%t?6@0CN5kfk2Y#Ef=SC-mN*?uoW)ymQX z+YtzT{(e8-G6S!E=*6;IgF~sVXkYW{c=DpvNN$)*cu$h&Yw5i3H9yNl&xZ%hG=#*y zYuFZfwQ|pz7gyIWn_oKoYR#{HYrbh+{OastD`w~7gKaObQED;TpM9rxmH9GLR?|QZ z@}RG@8JvCpo7{0llkwW2o6~8Y*sE(&eeS6?>2PBqyzoFWuxAN zhfO%IeRntWxQ$!88EW9aJaK9N75{&JpGe61tCgC#Hl=Klect5HmR;8^=QCT8^83Yz zeBXJ=UmNjbuW#9`oc-s@e^)ARg)V=suZ*wy1tqR^LDdR_TI1JyJ~jyi5XQo+2nthX zJq8uq)d(RojN3&M<>__-XAici&zfMHw#H9t8W%vU$j>S1el^b!>2hpvkRvlX0UcJ* z6AEaCy2F%9I&?l!E4gWlxO&BZdplnRqYup(#^yB>3v^HDZ*XcT249^Fp1a3CPJfcJ zP^ePI2+GzV`-p;&Wvp8$wTh#4PgL@a>#|kQd#%JYQKh@GEtJJeCQKLI=ftgw8Jr0f z6^A*Ltm~LgdO~{dkRl1 zv_iIACDb!TSm`ktds=D9hv^h0L-M^-IqA;t*EXzAs!Ib}P#+qK z-+SVxU-GRHDf6bN$UzVIJ^Yg3=JXIJIVpMq2 zKRoziF{J8oZMxy_qYckPG{yH2%6=WIIMON?-b6SJD~Kw7S=EsQOw31^{g|4Mu+n+^ zJM#8(&pplwDpyT~Chy8lu4SoyQ2%)6t7nsGizc zuO0C{)hA|7{`@ydm>&1w>r}3nPd0_gqN1k3?%(Hy_2s8iS3G1jCFp03X2ehsK$xM` z(4G2oWz^`uw5@NJiHA>C)_a4Q>sRw3nV;*4{~mdCj%smg>(9^s&U`FMk4a6fA+X7iaI=5?yiS<@`4ch2fl+}lGw!`5jmxI#DR`A_+H|$M9MnE( ziSaDplIQ@$@+kG;At4L7NkQAj^k9P4AyhR2;^?X+2&}X8zibQD=@)*X5J6>QD%+9Q zuk!=B*PzEX*>@9os{129A2>o%b7*iK*2}xJ{U-YC*f8}jlgMAJlcb*WoN`-NhFG*6E^|uBW4iE-i{`lF>J*YNvO8YuGahW&QA7cDPb zi~4o|z7WL1Jcw(Rr4#@8r)dF)61UHg|1)!;mB5h(7=-fx9P7!D__hmxk%{d8(3a_6 zPu8pDGd&;X6pCqlr6LYfGN1^`4WM{SG41S4d%|iUDYc!^(jS2nwB_KiHY>zc*y>9!`X&@Sci2hmv+ghJ!GG}BiodWA-0iK*a? z_>GDl?Etk~6qY4R|_#xe92+5aHNbgSL z1qy$d46v|PRl$Lba*?Bm!v6!%V4A+fi$wxHR1Ji`b*RfAM)m+Jolerv2#BH>@T}(Wy<@1OdO(g3*mpPBC*xg10x{7SD6-5(hLRvpIvUbY*;!}@ko zx+^qnrVHTeO{O({)shkoB3ld$eD=4QB~?IUfzvJ-JIC*-rMa>^H4M_+#uqOu@yCW8 z!1SFljS|GSZtvAqX1+Wbc67>dNuj@l%C!TTj+|1A2Y47Rj%+i^FVGV8oJc6meW)Aq zMFm{+y}(Dt21J*D5PPy8rKL!h0oTpjtFS|-*fL=jJn>)ubo5LHqV7r2`L3h(r?0i< z6;JENy_LHP|B!Km&>J1zJ!`F~{L!QW(Wb(f1#f~_DmMrZp<@YY|3MCd8Fz22EIlUXKV|6J%dRx16zJl`oEZ=( ziUvS#1CXCdm&Ex(4hU!hHn#oH2p{VaVupYy5{ zifiNeC1;J!K?cKf;=G|7ItZ06AkW+MwF=B!$SI+4#e8_Xhgmm7Lp2j>H4fdDY{ApY z(j5nM3lLF42#%yEJqx*Bl;t1tPI?( z4f}I+1iW-DBrfK)N1tXx4uwn zDdW(n=^cK{3cBOzlM2>xA?ZTikZDCOJ;s_xf%Smgf~k!3vGmk2QlSPp{`Yt;U=fWs?9i7JMUHruP$!?Z9`gTlU!?V zKa9>1Q3zrj=`H9F6nfBXjPFok_` zX_MPtX>pQWakZp+?yuy@K&MY89EZkzv*%q_78(LsFjEJq=aD20dzbgVa!rw(weyu0 z8+{U+CbeaqF9$8~6Qlejb?ega1HbCWv@+aE&^_PeT@J^P}KbhU?j zOp#&%xGPFCiUD(7Mg++94BU|N@|KDxUpdGv{A!2nTW4R*lK!_z|{7GT*N_3*hcS+9)% z)}c*&55Ipr@^5(fsyq7QJOeX`0gopG%{)WYUmo85G9=0~EF(V%{xa->XSkAxC+T^t z#WU(3PcF|hdh1`$m?L7xab9t_SFt<15++~9J9{ObczGhqD_QAf&{?lkvzJLNUTIO2 z5d)KHpSfhDg%$110>lWVX0NS{N3ZziEjpeL6Guz0PIY>B*fc@xu|9>CNc&{T zrZmZC&636&-)NN3mU_2eSiRBL) zCXx_A!+x>f#PfIX5{UVU&tzV1%d*qgwlw9%K=qsF?hk(=S;cUxu8vd$j#;FTY)r_l z3lzu!Ih9leH5SYoy|0pi8tpB>45!~P_RkRzYo0@K0stF35`ME~ySGqhn4?2;=PhSo zHWi>toaK}0+r(QAWoNB?0o=7)YGSnUJw!}<05?WYtdjXE12y{$z&dm!@V7XYX}gO4 zNKe$=)ID0Ln~cw2)p+{s&Bos1{VFw2*;RKVA=e}K{n-0%AaB%;<2&@xl@JR6zs#_z1q4cS9_!M?9(Ob$a{3iF9L}31Jg$HQwAe%zC(& z=Rw=GJ>%KVA7aOKQBi3atnhx}A#F!U(5CaGt{STsI#`bv9B3Pzr}?)MQ;m) zPoHN83N=dblRX(I*JWB|w32I;(gm4{>X*kl!MVXh4O`tCtnNpb_lyTuu6KuAg5mf;GG zLNrm$pGy=z{`i=c64XQ97Bvht2dei>wRrHF_t0MWQD8BR6?n1S@nhf zheuXw5pk9JLzP4GYZq)4T(rP#_r~o^W!_(i5Bl&$?TCE$WqDN+0A)JJ++%QNZJKSQ zRB|Bp>9yzc<(_*~yV!$9567jog^TW z&6Z(-RAUF#qo7w7d$m?xG8OiZiGt{KUoZAHyU{o;M8HnOG2llKt90KL@}04-ZlUj0 ziSg=O(} zuQhI)zOPbwN8`8b-hnri(~d*boqx*K?!SE>s@&~((kW7lTCpeJDurtQ2eqt~vQ%%4 z`s`Zyt9d88MqR#0sJN+2Y-V z=0&iAV%u|}DusN}2ay@JIj5ZCBmnakNq$`;}gT@+TRfqD)dibyE`L%k%%KQCG20to}l<5Za=4tPG zCa%73D|&7$Wmk4Fg&;+1q}Vu=$RJc~juQLDB&j(0W)@&6Rzk*-n|VTrqeQjoLmf+r ztKRg=(ET-{9CK8*J=x@v7JGiXUtVg{QQ-ljd0m3+?E$Hw0nXmaBYF~Fgls?S07ZuM zSUmguSX|bOob~Ob-EcqqHV-H$p}GW!ce%*w-hdUtfJSIS)^>WRkRo(!Ahb z9`z}ErI3c(a%VUT^yNZrUGa{>*8QDC(PK}A3 zv#^n|E~e|{P~oe=wERGmls_H}q18f&Y|>$3h2=vcffs!;whMkq;Yz?Dk54$5%bU!T z+>HU>UFWU#VeyXgq`#aiOIsx|N3`S!-2)bnh_Mijr4kqdt^g`OlZk?Erj2iQB{)hb z_|J9AE9gvZj`$M0jBK;=o+Gi+*qtKN2J}5$rNMA<5v>{_Dnc=HL@ZFR3r%5JtfGrp zce2nu0!oWJ@rdPqg`OPCMM4qFx+icKbI`23CjJ;^`bi7F(i6gaa|cm{gi%GhxbTUk zDwQ4qjrtZUf8&#+pThP1YVr6os-p_f4>)RwuomW z$b}khW;!mxGcXipuiWDA0xtY5)A6V>Dx`#NhVf$HWEylz|6Q88@xhXj^8mx^+gQ2_n!Cy6t4uD8UiM;JQyr3OxA43gGZ29tk}rJ zh2CP6PpniQ+rz|3EG8kGKaKIH9`m-zUSz~aTO+Z}FXlRonK@fMi98{W0f~x+@$yOv z!Qz7voZUD?*CMVKd=29G18(kv9zQ2El&+x>k~s{y>($S8wMne@Ohoq_;<2IAL{a_*=w@|-8ijcLr~}EG-m{r(bNKl4Ea zpQW9tbhc>JweyO$llqk!m z5Hw;)E;6*;A(&9yWu3M;sG_2)!LIO) z&n1c_=qQ_b6#t1TKe?5Dgwsfar$c=+x<_=-qP7-!c z@Q%`@+#p=-YB8)oz;vi%ik=kGM|VFBPPxvfa#SkoWDPdktXs_;|M5^6!#C5oI;4Fy zHgh%`-0Rk%7~_F9+|3sK+z=6-?l!1Z;+^huTZyi~q38vZASTE<{vcq$w6#0q=t~>e zL#aX2t=P03wQb5$x%i&M>b%3Q+sHi|{iY?05Nq#Yp~T{WvRBF?1h$((&<0muUR*@4 zPf%RCq1tkd$g_t+5%47oTAV={6K;p@foalB>56$&VHt(GpDwn(agg1n8oNE6fL5{| zE}zOcvrXs{YaWZ?YN6l@NmA=8 zZTjdxL3U({+#<(b>CrPa!mC#aB_~ z+fwNGemA5`yTH>2ro3iW-}3`+tjTwu%+v{6|57HtAW~q8^W*wdwu%;)UbHpTuT=5iv358|jrjSzKW{|Ky(4Qo5i&3+YB3 zWQY)vBP`$8yZ=#pI0XUKCI}|?$3e0T`h@&*oiOCIcR8W7 zho1J}(E%Gvj@gjhm5=H39&!1@7kh7XnM!mHhfUd4)gxITXSw}xmNjczZY>Wt<6 zv2+gUvP*kxi@q(Vm^Nnn0!`UpBp_;cI|!UEN!KalAPvCj4pGOM%a$8GjxYRZ`hA@f z`QM{M5TYQZs@IxTBpE+Kac+oB)d0uC7{+J+M3yRlOqeADpRL|R~of#B) zIgg#}#|DY(=MJ%?m6luIryqZ&2tvJ33r3z{#*-q34feo8n&C!`Uf_ZWMcJVaW>x}& zKU6X+3N^j8V16&m1*i?gGF-asgVwDr-Yo2&53^cZu&N&wUS|w4?`!HbTRaD4V6{xgp_&6Qt;nipr6 z7vyMJwCK_k?mAqDk6a9l4b>{;*%wN891lPEYw;j7!onbY-#m~T(`OLA5IE&eFo^In zU-H_$oUh`=9}OMq>s7hp3Jv)Q^^TA5OIh+8&pe=392DInVop_yt_j?c@V^&v=;6|# z8k+YeE1v_PjBv_mh%m`9DQGaTXXywulK4c`R}ERvXU~U@Ok(l~*VPftKZN1bybP$P zRqFHn>!F`mDx|RR_{gY~NP7cYgE8d2;Q;$<4@9zdoFTM#b_k$BIV9$t=eyMa63^#~VZ?m@g;bq7t2#6Fs9&2QHru zjXHC5`AmFNQp$1?C5n{2Oe%^>u2@d4iAuS(oN_NJ_2F`APt@7r<+GDf=iV%zn~yrb zwtRjw>cUHVYXDIE2?F5&H~uH!=ICOIb8^FSgT{XX$X!_cFZ_?J`adFaN0dNmo zCG(Z9@8Hh=SI)`S*3%K^sE^e*_y+(`m5|uj+7Jk?|JC?^#s5?L|9#kg32=>~0N}aqe{~JZT%@R8K)RV;5_>VRCPDeXv;Dsd z{NWJ^Vcb)HxZjAd*my3f74qL5Dkmkz{wHU0W$}3KMRDaT|H)DROJ@EjhyE|Q?|<@m zd)jjIOmk&)Sab-t4#*By)(QQ8q(}WfWNy3v{%iQZAYd1h5PLi%8BH+!UkU$z9up%- z+#3M^+f%X0@i9@+iD(;cH*txEC&Ms0XalUi2>|@p>Hn+EZyx{CnGng*|D8si1%Ule z|NZ;-=zpg8xkM#KlQ=5M`&au0Bqg>0HFW?5My)8 z97>2ZGynju4?X}Qh~sVtzyJ5|8}}D)F#!B!{QLL2=HI`+wEzH{0f4TP|L1ITB6!>W zH`@T6|7sqGmO$v-SdbsFM{17*N=QXc(nLc}S5Zn^7o#L!%aa6CyrZMPub-cb%W*$1@5mE@C(ax`Me>P`4~|O?icNA0Jr*4m6`FW9 zGU+Udlyv&!(TI@X12(uxE3HpBg$Wl6w!^+>HrP+j8ecs1(}Hc%jvUSkcPKgTQV~nI zk>bO=5RgGST@ZV+K9w}(>-ZwTWjfM-^Qha*nWHmF$G)5iK9iPuKRxI!Ij}wBL=H7& zxH$6Vl@lY`(aZGY_t%qVs!n~VJ+XTIW%p2hSO^|PcPj{{d6aJ{s!sm-IT57 zvp?I;uQM-1C6kYxrA3j*$*DB@x%881nF+L<ANxxKHR#L*u&1$Tx z?5Ho#zg(Sht*WrHuHxeLlIr@3>y1}$HkH=hEM+z~-oAa~Mt$v_dyS2)_04U!?zP`; zdw93wackRydk>#HeAL<6+ScCGaP>;V(4~tXN^@8@a;9%(Ox(V-+CtxbNLjmkalG~B z$ClflJ6bmGw|slh_OtWB$ENycy$^PKI=}UIeql9z>8@z&?0)pPueGU{+V(y7(Kfx~ zd*PGq%BS5ukDov4>3`Bc^q^(AJ+zU8pn3(hJLh8{(SW6XY-`3uZ+`l{flnfJ4aP45u^=67`2ak3% zFAOr2T`FB4+*=wcH$3)!^g+w=vntyh1-FN-tIr#}8iU6kwtalX3>&F*d({5vZAZe# z_hXOlf0^x~3hZ@%-0^jRRjhqv{PBZt%j{a0Ywn#7zki%=IW{}q`RKOQ?(3F(L87o z#*Ai^YcF);E2@G0YewfNJd(Q=9<)$Fgd)u&_U-YF0=OhY;KP-Zlek^ zk{VsE&z}#wNzI@&mDntlYD?HDIeT9%0~M_^ z#S#_B*sGaW_6UGip2JRPKK`+j_ASMHBNGNAp% za4G6=X!f03!{6L0>sJE#rJ502V^SsOYt>tL+G=mqH3L!(n&4N*J)S6 z;dXe2c1r0Szs~hs{fzeBj1UKHCPT*IF>Gj4&-g_L6fPaZfVfyjkoBDhzral+H8pSO z^XIBQ!3|uzPOH}FRTM1=(Fres3iJ^(S)V2ql1C!Bc5Um~lh6~e3~g9j7+gkkxYoT| zIMrEqXa$dy7HCIpr3j9C4(dc_X7mif zB#EYzMHTeAqkT2vZ3ZDPGVuF(_*HBXX*CtjQ4HYBMy_xS$6D0nLBQfo@eU=9@b@HS zYF2mCM$J|lOy<>3mli%V9y{Tsa2G3Tb33E? zRF%!yh2qN(H~kn=O>u%ZY#U6f%RS(?PF>jyx@!8|Y4Ws!-pNfYx}bE?sN*M=xbnzbx0P8$;?15^a7mfAa{VN_^=NfosH>CQB4dCt++5f^>7%g7&M=w|c^O*g zgPvzN=-;YC-O08^f7v(|zy>^!gh8WUb5H^PkKiO7t7jfd~?b;#?MvhFQ56i=xv#7@9f_! z)%!Yf%VWq#ONjUE%)!%q@Y(mD(uiHl)}m5oxAx5#EKe?hyibVedNKHuk&H(hl|Pt3yX%gRlDko^Vh~i3`M*&kaPf z#ig7Tb(X5DkBqro){f22pmE(mv#%d9>9fA+P^d@=(C(EQ1|0p$Lj*++$Lsms(Dw_9 z6AUUY?VsdCz^>c)2R34M`Nz;Xcd{~NFgf;v;fgsYbu>@JknMH$!Hs|Q_xi*;^ww0V zS#xTR1`-*UW?4jr9~sh^AK-whVA7Q3LipUOTCW!~R-N~zJ$J)Xu3u!tc01yh-ZrY&%%iy^y_qZzrZkjsG7pF^D2>;Y-lnPC7f6bIP?qA zc(Tnt_%Nt$RQ=M0)z;jSw|>a>w@6<@p3Y-AbG>Iul&LPS$_vwun%$w|hb~Vs-ppk; zO&qE*H@4TAWVWR&kIW)oJFKdx=J4QhLkqR~OaF=Agb#(uQY7(jjz=`=N8ird$iHaG zDa2jjFDZ6jkS*K2RjBB6BA^J4`WYVI%%`f;y;O4k!AQa#u>2_(-_m3@db;w{ab*bK zHD)FTokpva-Tzkqz^{p8CuvdMc6)RkabuS`n-#NWoh4*sYS=5c@ec{h&Wi5pmD zh`^l))CMz3{DwN&PVQ^0@aX+ScMf=X8T9B%NI>w|lFl5$qnz-`iT(*5U0~HRDA6sv z`<0i+gPG|;IGanN>kVectt2_K>0(@@V0jv$i=vW_GMN@i_LKX{q3T6}T&f*v1HiCB z%m&bafH+ivx}0nj8g#WjZC;k(5OrY^t%3@n6l>2%p$j)rxC5wK|7wJgk z!nELB0G0^iI8c5ffI))JNKlsmAlb0R4w5sLcTpYnD`~gTrW{P|AmKR(?dg0;Pr(b; z*{{#!fdIsV0J?U8TvGm}3{WU0&+8R<5Xo>J1P7OMkotlNOu8e$V+8=ZNS@_}M3YMh zc`4KzcJW9DNM!-SRiM2l4~7W5>;P#}5CXGE$BfqqUhEfBrbo{)evnweC^k1kM@aJ- z-Ar`iaCNAUtKI~7s*2nQAc}eXyxHlV8{lPhASy0xD=N{GtI^7aePEIPIU>4V73c8s zXj=nX4?qF{v3Zc^0l*I5q+N96W9=Z9n0&#}iLMOLfe1cSyYkHsG)PMP7lq(8E9WQ* zXJIS2o7x_1=Oz1L~yGOYo0+Neab0moICO>7C*fhvD8 z_aZKj4k@f43v8r#b~H;|00^$C)R9wah0XLfVVDnIaPg}YYZkomAqkHJgoz+mLBgS{ zdOt6ZM02%H3@FG30-W-6fie$bc06ASo>L<_k*+9p^vW6Tcmg~`kkAF_qCq3gvzN=O zVWJ}D!!h{S5?4l{ClV6N$Axtkid0=YBva|(#M|yzT4H^axLOTV0m1|bk2TPwMQj+b(QxF_iF|8B z7$0h#DCnA5inRtXU7!fz7=IUVYZZ2Oh_q04HUXLNbg1-T*I5q^jE7m;LN6Y1lKRB7 zR;o4&=+^ltBl(%pqz(~*`-@qw7Vl5G#(oI3iLK+W%En?dMc6RUDtZUL(R)vtE0&j~ zj`ICk8MTK;5CDvluL4+Re_#dU2QuPjt%EuU`3UL-PNd;uY z4X5NwD-}l$F?Bz};8odq2p${wK|GQ98XV$BEszmoYZcgCBG=jPLuLLlnns=#hdVOB zn}$SC28DG#eLkQi2?5bX^IevKFz~bqS?SBiM2GssTDn?metYyzNtlry>Q?LBIwrP4 zXg_aq8BWYu&;a3B=5szs5fVK1vy$&v{(b)^$ggzgAD60E!8QRvj{zdFv{H8yMZWog2SQrMD-at!sT9PODH(Cj=BSiUB`ZOS2 zNd1RmFIp`^`0p}N2|az<`eZXmeeNK*_ch6t18tY$5#hj!*wDnE^#Zq!(KIfK$f72z z5WJ%R1e+c&0Jvjwg89my?n4DyrryoEvwMIKruC=TbEDIhQ@N#n^MoMk%N|CZdS7=!pEm^5Sb=PC8?c1V**5BD*dMB6G&?pZ_+}K8U>3l2mH32HVfRdFSY;%W{!e zcd>zAqxp@@l$DC4j?6P7=Y0!R&r?+1Sqr@*`#8F|9ZMa52ei#XvAzZ>zF}&gM^#>s z$3+xzZ#n4NMb#xsHD{Q}TY`{0kI3>LD+47o=q7YHEa?T)W_4ZtEqPpaOGrh`re+Nj``qS}dTT9aMeaGaySQ+pZ2L9rej~X47RIktY4n2V=+lezM#&&kn36C1w`q%1TTonrGS>PsK)r6ZNm7 ztxqNz2*g{xBf>NG!#_#7SJY_Qdl~1o2qhPxLo!s&!}Kv?*>gA}Nmyi|s4BG0=8_Wn zPL;J;1KRUCW*G`c#7}Kb?wy>%a=8v)LZ-vf+S|{~)hDq}#=>x+T4)NomTfGntLWPj z@eb*QDAZ`mLq{+*ns<%D7ohMwF-r+Y%&ko!$2#=WR`XYz(}nT|Y45Nyo({TU@8@C9 zCPTG#VNM&8dVeOK35mfU6lY}Ld0o$W-f1u7=`gdM5y7P|ygUKNX^Z`=NORruz~tlQ zn~}X@X~K__yHfm5d!YHyjbH(je~4c3*JNZr8SHJidl5b-syLT_d(8dQS$lxruiEnL zxp!*@@asgXX*BkdH*S_K{Emor7PIJoCp^p1_V*DwA*r3{tMry^a3FH;WGlIrsk{#_ zlu|Qgh|p_q6LM5Be2aVtPPjuS_$i@QhC)HV7POc^_B#T9DB8qONn?3isGe~6gqP4P zQ)r<`Db~Sw_bYPgd%f^GYprudrW+o64~~2OkqyGd6TmpT=$dnpSb^`&LdN2l2sZy} z*r9t>Y97J@M;2xyv8tNIAIRe$wrqu%#P~!mf~tLil<%=dTS^kvk3?pxs0jFO3K^~N zTi%f5uwMP!wRgBhTr^ z`TAIgVEd0NX|&jV5(t5Jg&TKWUGVNT_ewtn>M!Rf*m?C>l^S{FXE=(q?Hk>U`Xc}b z4_KS@?0TI`a1yz@@=Te{12OSVm}x1&C)mQ33seq?J^#efGT5>UsXE+O8@|)CvRSnO z4>^$7Y7z9owqe;k``c1D=R-TiQTg3UI8yg3T0`lMI&4ljWzx7$`K!K1m!?#z8V0qT z{LJ<`b{hLjIbd}7pvpG|dzE)yNMsl})Jo-zdaIZ>vPciPU2BB@hcvtR zv-z-6Gj%_X)+!C_chAr(IMN5}_RjNd)q%ODh9j@n?rmN;^eb(c7qY-JXM_|zAke%# zw;KxgI;$ERrWQ^)^-EOm+!vJ9!+HKEBPU&XRZbR1I>tC{AOa3oow^H-Y+v;O8Itr=5{}`YsX|jv#N{w+B4dE?ZJ~7A|VOaDUfx>Qhi*jQSVZau1k5 zaGyqj>+RgPUB`PHgmZN$Z+k|M{&2fGd@9;GLIoj^+n>`k+@-v7Qt&oK$w~Bhgqnhb zf7nbn?NxpWfg*MjrJAr6TH(QSGi}v@5mT&ktDP@`V=H;aaM zdz=X;9SY@^+TLdAS6+H>_3+aSq2if(yvy;~CwIUfC*OA_D_=6y+2;_NP-67;(<467 znTHoGMQJBV-SL;NthhHV?rbl(RmdHDpPGMnl_zH`;5{vC`^$9iLhvr@;D^ocpJu=4 z9LqJ&eIX}yC0!6Z$*?y_Mi^2B4Tk+0-9`qr4%SA*!r?{|^dlmd>rgjLlCtH(>^2ZJ z(ygsp(O$WDncbsPKfGv$(t<6m4qP;1TT=y%R4hK$u&_15x7dq?Sx?933)6MwdXYIg zE&H@tbwVO?y|#C^vU76%3@RdWkt0M#x2S5is*oXDBW&;~_Hn~yo>{@-+_J^2-KBoY zd0I@Pra+IU;kdz3UP;vnsd!^QK(|+%LJ%88zucfs8OQCCK+Crs$G+@$DAZsq6OyxV9W;RF*jF!hSc_5U0nRedJ%cvPv!Z3*pK0olAwW#K#EiOY|s;v5yl z^$a5HbAuJBR>juYyh8Q@*%OYnfI^sfz{9K?7tiwD`l$J9?TSYwx?27oYrGxuT;iyw zOzqIU3jT$I$)n}P_cGs{RM4cJr~JF3@(&v~8iSL@F8*~@35!hj zO}&YkJQU#SP*d$17czGH@pJdAM#}cnfDGE|Wqr^3qGa{UxJq>9(%1VfvIXnEYHnHN zTu(JmJ<__sC|?&W#YR6&$SqC{Ws3VDbbnlC7`_=UKdD}^DFGVSzkU<8%fDjw=drTF zHO`c1ek`aW=yIwf@GXy_iOu09U8!nw^lnv0mB}sHn(S8ph>F`hy3p zhtn$kdr}zI-GU$7_P?2{66Na1D9b6HA`p5|MOE^h5@}}kSO>m?9k!p)9;ZgTm3R^^ znv6^^(vF0`+=@0RIr&Fj*&LO7Y374ummI&^t%XaLmiN>tnSIVmabd^XdmfA5?6?0) zN|(lpGA+{54v@QL#@1h8Yod8iBSFoW@QyYb7(}z-j2#H1*=~40PnLK~@Wfit=-5>~*IEK~l(j1ypMRR{-4a%(m zSqDi_(M11xb*rzV(e7du48{k8##Kc0$$7DBP>8&UL9?_0GwB!=L%+?>JOQsD8E0 zFMTy)CgN!BrP5KXClPhI+_EOUv^>Q=>DgMe+5xq~e}}`xYomFBW(-jHiblbK$C}@< zHRkI>wZVqe+6m5bshs_2k@U@sZ`jG&o6+BG(|;YsKMUXODm3T6Eh|56k=-mX$Fh}u zNa#1sYR(CYrlNb<_LyI41tBU>@pUu{W0Vs>5`?u3rwn3;6$?VQLiaqx4}LLvByi=i zZA&K_Z=Rxiihu6(%6!@0PLn_p_Ui_;)B}Y zNJjSe%E)_;l65ju%A;+uP!S%-aP9i?*E(#r->%!Fk9JsB-LXY=27?x*=AoWN(1-k4guXoou)y-@NNn zHd8;t$0g9>_tjd$&v#!omB86Cd)}4N+ekEJ{7rdRCC^wF!l&HkUR8!vMX8k@Dq9db zAT2dOR&)Air$#g_rUYdbu3nE3+-o@We5(0ny?2gaqy|iiUFG5~z!HoEdUanXp$h)w z1P^PoCUX~sjc5=@**Gt0_@t12hrv0NZLhbO+rv`Y+>~gM>#szWV6T#Z&_LC8?S;Y& zgPDQ|s7xW*m5RUHI{vz0pw<4(#K$AMgFSBHOhoi>tU11?SNvs4Tb;c_p7-)2@gLjq zHZz5A`8n25Q?zFDpiaS$`Ygd(f}@Oke1%+I$RyJNwmK7Xg4z(zem}+o6!G|jxts+? z5dd9rg+Q*%EDd|obwWMRS*)I<&*kV(Qi^|H;;={h>N;LE$? zhZCLjblOoLayXWfOjfo<>>lDIr%AppXOJBdQh17RsUP+H*W)KP#b=J!t{PGLwd<_> zE*;`?@(O%p^YH7oRcF1t?KLy%Bh<#e{=;h;{ExC{cYk-A?(cb2Ys+|8@rUQC@Fy60&tM$@Ok4d`Mi?Wl z`KKF@Nb4=-E_kAKB;QruKNYYdYq*krM+M&>SUvyxl_YBbhz3@EDb2tD<5+9hR%ad# z{Pf&VII(+=(e=!RoTrrTSDEywFuhZa-J4iGukt-6&Boydu#a!M@yYZvc(Xib@Lz8e z{4AXz`yfmUK6|W3sdX0zrD@_WF}2SM6|+#^=X!oIvJ6O;mn`I!bP7mPRJ5QizPq#M zmlD4}z~u{pQw{HB@9dlE{iyw@F7?rV+SQ>ERqNJ*Q?Weld-QPRxzs-Qt-ga`KVhp0&fE{jf&Uh|f&l<$s&)cz<3a29FX<1cv!{?? zfRy^POJoSC8$z^(8(5=m_lKwqgc=R-)^*N)yF!uzP^4~Q1Ubx^44MucZ5=q)H&A8^ z9NoHd82Mn2Wxvmk(i_9><3@uCc7ut1BCyfkyL@Q?%Z|^E4n%i9E2Nut*`J;oJh$k0 zP7V(2?!cbi?3KAfE)7HTYxJM@W~GPY1LatEJ?sxO*c{*v&)OCI){~4WR^B3he_!9- zLI@D;xaTbWQ?2>$Dtyj5>tgs&X`=P1+@6TneM)xt!$$bBzM(5qP7B}cIXNYo-UEC3 zFlTd>?k3ZLjuz6)(Dm@)`fzmgRs~T97yqZi*4q9?-|+1z=igGdpW+iRp$rZVTuN6=G5Z&t|2WR?o zNW7t}^aBkrV**T?10BqT9bl)2mhU+`+Aj|d=W1jGA!!4Nu1JRK0e_y*Oh=6w{BAs+ z{w$Q9h~~X@DBXf+pO->k+Pso94YMK)6{Pa+C&J=#^O8u^T+a+SW}0jREytSf&;gNU z)9}qS*^V?hLPideSIN}f#s9z=BFrz@CX_@qM;|CRrEey}BTaduVlxiV#BIvQY-ITf zNHgyaif{w@_mwp1sRqdegA`{^=MZ?E3l@A0kyRly^T1N+3NT zv0)-?>Ku|Lu%jDcnkLHzN>W9LtpJ zNSnV6afzmh#d_tI`^nnz_MD~bu&6e>AJe6)07)|F%Auw8gRUJks{J&z4W#K#r`cz? z5JAau0LdENqm!Y%1(uw*Vj0FBW@ z`Vn?0`(r(DrbApCC~G#g@&#-^k`|E8JNt)X-T>RzH8Gk3+r#6YJk7%e033<-*e8WK zGfmZ7p;!VSap$TYhibV&G2aM$g`{C(VMGALtUyym>E;}Y4hwo!HPey^vnEn?h_KD) z873?!21(OhnUEBEqs5_I@65E_05%gcQeLFVp%IEpp2>s^-A<7$9fWMTZ!((};^z6R zt-xYBFqB2z)rzGKnS+=HsNRMon3!ThiLe(-lp7c6udct-!g^f$ z6mU)A4Tq|)CwJQg7 z-GT70kmo${)(zCe9f}qr!)FKj3P{)4ff7t2l$l8%|CV6dSo!PLT01dh2&#Q=dh zmNYFk)d%3_@a4?I#FwBrHnr>=@16ZL9~Mjtc$u^kY~{~mwLw`}6O_I~F;xTKsOs)R z(+-=0LV&w<6;y-;BTPdvQe1oWceVgci{mlRMO{rp3DX|ZY6ug5pYlvUl{Qc;7JhU; zL_1wvxoy&cnMP><#iTf^rZkN8qAvd3VFukm66Bgr2mxNquVnK~H=!zu34zCo4fvLN zXfeDDA`j(GztgIMV*HnUx5<9*Cjv0=0|N^>4bWGl5180+kp}2K<_#~0Ma&1N@B8%6 zGwJ@m4P3B;1(0si6&Z@>RdJtJiKIJ`+zF+;k!a}ZE^%DOG!1wHaUsBEnQ5sfKryWE z_U5@#ZLL48GUh=HW_hAG3t3-%w0Eef)SX_uF(Y08i$w*Lyseb2tbZ8aj ztKdwoKaawlo{$c*ZSe(>5Q^imiZa8{`i>%0vcPnmEVnu)3B(EZ_+c z*cXs>`gWwTRa`k)xPwM-qlsJ1p|<@$?wc!=pO$6-)0-#5*3&T^0Y04puS{vus)({r zg7F=&I{P>O#;IjiH2Ei7^KK|I&xfMHiy;Q=Vlve%Jvdu2&(GvEy(bc_7ftI!DN=MT zCSK^q9Sr;lX8MJBENo($(yxZVO#3a3Lc~eq6vhmzZRWQruPotn4&t+lF{CFqZ|63J znRr-)MG@%W?r`h^FJHn%5EpH>fxD`7Ez(K|@F54FO${IjQkyy4y&E=EuhifKn3W08u^AtB9@xgXcax>EXC)x_kDRh{#^S{iRN6z*V%ALIPiAG*a+ zcBi4sk<{a5^n{Ky8O~yuahmigkgbb|Cx+hLS(arj<{&+_$I{aCW8?kL-1Se7d4f2y z0T9^m7TY=0wNifEQZG!G_Yg5lK3!{BZbzZIft6+vTq^tV}21-Qb-lF*G!i0KYT) zi-M%*ylOD5BCqCuKln6y?Fs14rolJ9ZwaKs*);lz(}>Dcs@tiDNvHYUQ(ddbRjz|^ z))Na_@95k7G0}mRNfS3dU#SXC4`F!twocfx=5&b1e2}0X8;0K1jd@1h4VKCX#=qH+ zKQt%`VmQ<@GwCMir%9$XOO6K)Nn4m)vp{F~kf>TKl;BMIOy!z6B2f6y&;M24AGVmlET=5^21em6mh@6la0C zgrL*uS@Dd+{gO1>=pM|@`^o%(!H4ThhZnWVf9l}hYQr2LVHm-NMR$MC1o+SDz-Ymv(iB8T)&Ej0b=Q7f>tIX*2Iu50pMLGSRxjY73A z^3|B&$B0nBukK~$$Dfd1ryG=CUJ8CMn3FU(R_60%dg`aB;Ppy@0`1aAq(YaA_aEIO zZE!N&B=#@02Z5#e2V}+XK#xfLh^VG31jVKuq6F;}O=1;p8~6yD%n)H2Cec2H!fNH# zeV%*+V$z2Dg8ZpM#$^9OLz63nUKQo~)(qJqR58zm-Hp>8?)K|5C@+bCsp^t_sgD&| zT0U5nXBZy>*J}9CZAB4%pV;mrl0HbBYW^OdW}bpgX0fA0gs*;`Wg~hKB5_#9tV}P6 z#IRTJ_D@=nJQQ2lpD&Rn>Lq!KbAQPqr9@f-_j1T5i%;@pm972o6h|X_xyr7OPi>Jw z70P2#-Ia~QZ*gg>5zCju`|;$VXv(MWen&2Rz5Xh?Is0V*@rw04VBhdd=Cy?=mf{*& zi5if;U9-v~b*CL3Sxyv^`lj9XA~03Ay{OsN{jNr*ZGM8wP=~h}r{3sfP>Az(zUgY6 zL;k}X&CT}ruMMBhzwQ!}DD%+8toW4klDCK8Zrhh!vAfReBbr<%kO^aIwFR^4choq)e%@OTr4tY66_Uj4lHTI=2@^| zj-axb-QRsPaU=Up-c5_vc3*BpJ-44}oYFVZrxVh=-rekk3C`~v4kEQD#X^$M5^`=I zUt~uZ^g?*Ok%4LzDr9SDt_t-hL@9ROZ~2-s)2QhZfvK=i1hMU2bzRosg(3)qpQ_4> z7)OJ6xvEEZI{!7b-!4Ua*_>*&5ZS$-yFvP6`Td1${TS=UMP>Jyd7b=E7D1MpA>TLp z5_TQ51i z2WI@0QQj?{OdtL*lLBP!v|jHI_oD zGnmJU!U^Sf5*fC#GpWI5+jUhs8qo(2v!+c;YI;?NYo)~fId8Vqp&$_=opQ4ap+N@l zf=sA)xT_ko!Cq+omZ&%@`1^db#oM9bG{ieM$0{0Z#L(JLNP%Ww#V(T#@S|%J5anrm z=;>xUDx9oxcAusN(|+G>x|jqrMBtnUM0}6?Oj>2}*)W zboz!$lfSB^4zau7__td9LPblNt#8zLf3hi0S7uboK+I8gr0_xbM3!es1*$mOeyKj= zr03`2;mZ)5bR3`)OM&tsY7vfmCJP^)1TU475ru`%{^%;vFO+w$rmS}Hi~^++cn1WM zz{h}!>N@Ti}OSt?xNmk&t8whLV&L;9zJ3V zs5&eUy0+fC9hGip`5pI6PKLr`)_Dzf;lX3jn^Lgy2dzxX7A)p&*E}-%&?pM)p&xB) z&OfIUuIyrhBW*@j&h-fjlG7pXasJDFS5_js97(?D#P_bQQWpwsxpn|$myquTi@qL^ zB(GM6Ee%$Y@tLl&P)mPiAxC zq@>$}tP##;Feu}W;UzNd?T+?>^z)8^apk^)aq#j9EDjz!CL_f99jFssjxu6{jZ+U= zU%{o5DcyT{#4q-S-~q$VDNyoksd3%=bd*2YjxVAM;cF8L58RIbr_*71V4C#w4I)GI zrn(RjTVybI3$OmT#-JnyCzcvKW@bK@dmY=WjR~8_uH}v;3Bsf}OXZf>wacM@Hcl8* z=U*6v5PPF#e3Lo_)|`jR+`f>o_tGnb9gS=n=GPKiLyh_V_VB@K^Ti0eog>krUOiIx z%~bmU8hRTmoU8#lA&cK)+l>t4Noo959WU*bb@!Z!or5x~M|nzV`ErjDJSIfAb4du) z;{}m@ySdNHohexo+YhRAt^Epbb4z|DxR`;fimF7}`@{=3~lAe8FJNYf-MM$4&+q3JzL49pm+dqa8 z@VNow_WbQ{3pBgColg{AD0`y1bNblIhyT=-kyJx8+Vys~Ayi3W1+vBDZ*?okmX$AqoOqv|~q53dLatr`VY$txpF6detgAM3A-8 z?oY%q<{mQ({=F;Ga5<40wKrs)SN~Q2=L>9qc#_;j9d+JL%^%bh5beeT}JE$wtcU6IvMBw-hYv;(IyQ$7M(!F zHiKrf11Muuj4%hBf$VfNi<9MXWsf@yQqTzpRHR_gEI( zM`tH3{%RvTY&O-RFLduve9cR9BJ>zB9DhumUtc*`x9u&3RvyIA(zAH`*FDRC z(f?o4Fr5fF?I^KcCu`9O&~Z7~yHLUWIa?evd(QxLuXnbINB6I-=^k_ewuZxqXX|)q zT6wdzLm({|Y&E01y07$GyNO|IENFq-b^1=pn^TPwxnDxq&|ALpUU)rk#C7h3;^Q|> zx8!u)c&OR#smp1k2|i<-rQ~x^MKqdMnZ;OUmCinbyg(|AIh<&(mU0u0wmT>TY`c&i zm1s~Sny*rs$yk$7S?0$Efm$BDl;s^%-86<+pqU6#W4kmR_mg~>O6^^hK4NCP*7+cr zuJ#_~L|#!e9|8E15&`~fCWDx&XOmSDf3BJ_ETs2jN?yt+R=0S*2`fH$IV628Qm9+T zTmQ`(R`y|vT55h8?>KuaAVsmJQiwm1DAo+>g4cIDe5;5V;E+Oih={Ix5qyNRw#9xV zy*EvzW%@vFCNe_0KfvQhc8qquO}WhLhfwQSzD9A)QC_NPLf4tJEVGE2N)NtiK@Vw( zQZa_KTmb3_U8_eSYYzoEW5C_#yip}ztf!00ix0F$X1xev(OnjDao+T0y`(F=4gkb4p*}!J=-hd0L~*V-5XtB#*`ZrSS0e?_UBmq ze*qQlSh3j+a5e7lqa1qdU7d9ufahM&&g+3S1MBM%_UZcrQq$aJf&USOVoeJs@ zVT$6jkXGzz%OF5(re|&!a+DYYPKm%zLgroKK%A_vT?9WkId_9mx{fJY99?%$X(sH> zL8pO(*Xqt6xdrcOPDi%0NPvNc4`7@tI3R-2i49^sG}=I2_WRBQ#kJ=b*mWQQ2rhn#cLV}migJ5F_y|*X zU`$Az3mz_i!2|4aQC1@~AfBl%FQBHTni^)_cw|Rk%&73eYrQ~P{hspSLEhdXs#Nw0 z+XX5E&eqW&=?g2*$gbGS)+(rL@KL&JyszeB8ue@_Cpy@U_F)Iz!6~BaHrzn^dEGwK zDWqG5q3L8QxDsXO@Ebu_*A6?~w|3qBbiM6?u`UhA2hu}=PvRMgY#SfLV!YB^Ek4}8 z*!#%f%84<)MoX9GEZ?n{V+jbzvn*xUfKO9bcbEXd3oZ9x_4&XkK03c(P$iF7qhZdS zKPt+y-|n6bn|QS@&RA|>2w!6mjkp%t@f6)VwA33;fX`|w%7aHtX>8ol*pz7BccM}0 zlQSw>Z)+&$=?JjSYn&c@^C)Gbt}qWZJ|e8w^SJ1LJeeOuhCS&^H%J7v+IvWgLq5Wu z#wERi{`C7+53Cvb4BO1MW=?7)j1j`=>V*Ei!d9mD3M10W;=vHJI_z~$Z$M~~k6&4c zY)Gq5o-#*iz_|#OUQKsC;8P)zZBl6pwIfcQ>(uC^bNG`$`#Hg%hBS*=zJ-0p4kss_ z%ASl$nh?;wUeil=j12M))(Pv0^y=kb4Sl}bw$Fy5+ofH00wo^=(l)s)SGaEABRd+ zatn2`y;v|HAO(R+Wk+tKu8rsiz4p{miJ+=5GIZq&sZpDq#wo)mSW1g51{tQ*1hEd` zD>I|*X8G`CqGfz-=hX?rZif>-;sQ5NNrJWRg{gSY?^=}E`hunRDs$bkW~St7DuOQi zr(-~dvE_~ZU{0|we>WAq(|+_`McZY=!B;?KL)x(bKOb9^U28zutq*4g9#$)77gg?1 zqE8z}fp*!Sq|>Jj;CjKyo6mfqm(7mmOn_j}V?Jn#g+nV`|4L zQ7Mt-?A8S*)NC$($oTLDUbnQ{8G=qkp)F!w+{F$u?_R2rB5&if0m^0W`x5Rs2?W{MJBIh_Z(mmc}H~OFhhXlhfGLcqY8bSliIW z{qXH9UkGi4mys=|j$LgTfV#xS-SXH50jB!b-NN$b#Cbna_zSM(HUVOwv?HLX$0 z?9?nUD|=UM|B3>18v$CJb`EqY0Z8t(rke$RW*-rBA35V^DGLqCfk|lr#)-V6ws+(9 z+`UUYhA{YnFBBZT2ATQP{j+%>^Z?srFd^&`Yzwzj#i*SiYJ@+z^^<6*KJz;QJCi%b~OPUxEE>r43&n+ziP7Be( z0xR0>nFF8*?DsDH*3wpoFiw3kagoSIlx+>`HSsx+Gc=lK+H69(+}-V=7`(n3Hd5y> ztKq}kY_P^6a{1~!o@_l|y%!|UvW;zJ>85(=QZBeOW{=w2O+2q<`d!HfR2~3Sdb^0^ zZaU+rlThF~V^I}2)ZQl`jj6TH9xyfx9fO^p$jz)&yn@T($j!wql6`LTQXOj1aKNPJ z*PxKTkbP%Gcay(Bm-$^53ZRUtGRbw?^3=Y>s!LWuL~{x%b@s7~%^o`foL1ZZIW+}i?}sou3Gea+_))Lf%{Om}os7wxr_?smChZyW9tQtlAz zP7hK=)#9JadY*l|P&5Dt+PGB*0^B-XQ%cKWlw}xKfv*LN}o zGTR6C6Ir)S66{*q?rC>2+)|!H6UJy+Dtag0=*7kC%$O1N?k)Cx1(xqA;|;o|=Oj8l z|J4nrQ>Vg9vTZuGK?Y9WX6t`lZ5kaaG`&?`s>*ocA}x#Sbs0gc$F*~@jM^aDIs2=q zQTI2N(M};Xp&tWFJ|P)2zWCMswcDbOtsOS8g&aCT5Eq^BWG}3}cqpC`Iu7(XKe`jN&nT#)`RcwZDbNY-gc^F2#|6l8p64p!4;tgk2}bYQMkyr zptk*b)N_Ma)E1I~S1#H(sgG9(O$hO33y$4r-&rQpXL-gvhB{mb^=D}0$r^B~7bQkm zl{j6`*5c4Tmv&X#mspjAJ+BY~&dsgQ!;nB$OVlO=!1BK7#sQY&N+Vxa9Ip?0Ut+XV z*=#u$^3*LoJJXP4w!EXYaZqLRzLp?k{Ub^$n|*IAW^LF^(lGM<5wx;SlzmxT*+6<2S z>^Jd``{pjO6WjK#oiWGc;wt z7Kf$*$&}z5i5dFwBmSo4hL0~00ExMUMtZdSMa8YCb5-(*r*0%#Y!mLW+pZbE`=-}Q zcaF`Pwu7hV5arU-%1%IsvTcHiDmGf&ql4Ko=l=lXNw*5x#cdgpFW$VozT4?kfCAOw z)Fz|G0Am2(TI&_S2ze;-Wze&T2CeZzwBLzQ=7BWBU#B-3KOBx~TOte!Fn{c?{5yHv zxKe;~VyvX)|6~6C=UYq0``$teJxa^L6G}g94_cLGT`JrA`Gf-xyw#LFeQ`p7KX}H= zsaH1!WjRYYn{C;Phpk&=5!kV3Bg$-Lvn)wm^vcY{sY_P99ySYoG6fjMGE-2q+1q48 z;&{PUzpm^r0lShsLKP^n@$LGmn{uzjTxPV}I1+ndmb6pKSZW%^?qj{2D&DAfM36CX zxm#tTUWmF|+YY;Fkbrl3Mn3Gi0IRlYk?dRPd&xO`T1Vt2)&8vOIY6?sOWgvpX!&I+ zS;N-5x@87Amr}B{%iUmweSMUnM{DAMHL?>ihIab3R?KXnW2Mmcm?M=xT{xk|XRp3G zb}Dt%f#Dupl6UBse$}wY=tYe6bpK^yK#SJJKc|YzM?|p+S%=RUb}PNZ)X&}9>X_U% z5cnaHDPv#dS82L19)#U}+p_ts@zx|&nH0mPrLPugr|vw|)|7ywFO6=fb1i3~?edvO zX{&puXE$uP#e>bH2t{)+%?0ki$T=2FV>}JDb@{dOBw?G{;{t;@yqbAeAt{%Lyy%AtqLvSMmTL zM(AfXFeNi|Ha|AfnXR$wrzNe-D9(uokuPoTA5`MwE{mpI3~p*1Tx~#ShvtxPNUe`C z)#S7O9L>cqg|a&R9=;>Hi`zv5MjjN^qT*Ku)7=QW5~$g?{dS{?GvGD3TJ#zH{iD3= z=o%fIqJuUpP}qZbq39CX9E1#yPA@P@b0Jw;@MT=x1PZ_$xi0A7?!h!?g)$Lhn2y>K z<_6UeiR4kE2*h7i*O>WUl{nP2<>=YRIu_tLSBumQdN*5a0TI+Z>4EMvS+J=e#Ah)3 zw*MpN=8?u$%n?GB@6;m-@6n{?GXA>kQ5I51)i};}ZHn;xGKaDMmW*>qw}UyjO&t=b z(x&IYNFL4c&7~W*NoQdmh0G+FfWY(C7dq|V?K$y@Oia?ZO}>!rTNg}h9PfLKn=%y$ zYqeeMvKe(CYFksPs;0h!d!7OQ;#COy6*-OM{ZuY?u|_-LgJTQMq|V_XN3A?MT*uV^ zQH=*BaEB$*XCr^FqDie>9-YYk#{4%1SK2HV!3fW?#&eC7WtNM5Ia!l>%4<7HugYWFA~byrB)*#`iM%-z>TI zs8=Z2g}$TjLXQ*>3kN81fjKbR|lfCVdAj{i*oGK@Oa^Q2FZc!;ny*iUdi4Xr^7_wyG%(T`_ zwpLfh&dGp%IJ-+#_d?uOMCJVM)O$hjw#Qz{PqhI!8_LW zZ24-*E}wu+M=aH!i4g{b*7FNj7UZ@pRVxfT2OXudMTYimD%CrhuVroZFfo}U1M;|3`= zSAnTIs-AOafjK*;h0Ni4t6J6PgK6C(RB0})^R45?f=>;Xr6al7B z^hVdg@U_;V_7}IaEZaVB6j-PO8_Z@ z`qAG-O1Pf6M=EETe&|n+jjgamZ&L-j4-$ae(&tR6%SF*p^PYounfUwD3{)Zs8!loi zW}C&BuB*k{I&c*4i@$Vq<>aaLE(NYEr{@~CKBh4NMD0#CNE9z$-oiWQL>B{Ymma&c zB`sb#_TNX93us3=_{@Eo|03PZP|^rj5#+)yJ=EEF=oIw1I~^x8|5 zY#159KRI$sxM2kFAhr(HFahOQ>@TB(9GHyXm^I8jxQK&$ao?MN{{0IUFycY^QW47R!4G>^`O{34-UFF530xf6f0_%R0GS%@Dv>E)!21{w z8K+B8E!}M6Mu9bhy=>W`rYST50Ckyb$@w?!$>3H0UaO%$SIPgLG-_|A;AVM;M?T9* zHpSHepKag4Tb2O-sEHc#-%>I<7l@r)koneBt5{88(}C-B%tiaeQO?kY))M3Vuu2wCbfnTWvHQr4P=-!;gp+w&k+0BAckIgqpL)}^k? zM6~6uAlwyU62!foLsu!V#Q9(<>w38a#TA3<>%g>LW@e29lpfk@^4|;|=sgTyUYdHP z7jQKi<8N%>C$|9@-V3O!NP2SbOZ@Nl11=EFP(`a*{EMQoj3W%Lj2_zkoRW#DG_gLkGEM z@UfG;EQEH(omvI;Da4w+K^Y4fA=x0$R)`QG)WP&RJ{E>jc_PqcfJe(E7?2F1fzS1p zWz8MrNo1|p1VE9CTE8IG`AT60O({!#s$#sxujsZW3c-$|F3*LDR{%Ychz5Z65|tW% z2+3WsyAU9|uS|}rPc!90KKY@Hgw(@8D*j2t5g?T*lgpKHa7Wik;D7`koF7}ol*#v} z9{8*YP?*C4tQU(R?83GKDUv$XanXVDZp*Mfa+IX(Vy!_-55km>Dt%8j1mW=_Is3TF z4DtP-FO0#Zn&=zw;HxsOr)0+6WXke^tFG~MIyxUft8*2r<>XO($^&hcF#xF$iFfD2 zw_q^J0KT(vBR*!i{qzb>1eL{VG)v@S2t&5AB_;zbC^t;P{R>wptm**IwShLTF`Ly(K4W!z@d|m3eg*BrupJ2Z|VGp!@_1q;Ovq z^8kd{%mF!p=Awf0&7b#}R|E5DqE#Y`kh@3vGmO*7J8@iFOB_fiOGyzJ6cdIx0^3jM zlaEnfW&#bL)oY#@ZkJ^Ua3p9~?(!T`Q+D%Qf$H|dKOLjAW(q#mkUvd+&CAbyhS;b& zF2SL?a2cVgvtYyzp6V*O$Ob~fbt z*gH%-&%fvkQ`gHMlMsk0NL9Tfrha7O7vPH70xV>R_R1b_mW7QUCbyHfjP}`Hf@pf* zlJh~iIEyMKl>Ba^D+6vZ;=2}4>X|*fVit;jq$);A%V0#eZPLtQo!D_@;9k|NUm^?P z zYe&dHfnDcDZC*Rd==uBnb<|MaYLdVrtT%{5y{d6If$>}M$uA-d6I4a9j8AmoT9`Wy zKE=TJOgua6yGUh~hvAOtL>fEKy@+4`Pi~MAvXbiW>?5f zG1lliH9j5hF|`|;<=rYFIKiz>r)t9dG2Sy+Ts6kAC&H_m3eO=L6k(kEp^r-mM%;!a z9=h%j&pWW8oEBo_c-c|G_ezcY?wii*PSLo4vJWK;3pm{uk zsT}QCvB@Hx_hYd@U8Rr6Ca-1Ww>5SB(Pb*usVH4#0^)0dQ!&d|Pp=He4z_#+UR}q# z6EJ%G!*%}hwwc@BKUoJx7H9o{b*>9Km5f@x7F^M6vvaAYSfYjsH@QW@xO$RoY5d3U zB8*|Ymwe(=7J<5l)Qqqh7e^OOV=@|vH^e7jr5Y=AAEqfp;MjY!pY7>nre2uB7|q%# z&v=)y@%7^P&vn|(wDd+GWm056!1p^aW-?TbHW{Eu5R}*%svq3;9$UeYiBwEKx(V*r zpc4~thEc`p{8InP{_(_W4B+O1Ch^qXJVKHI$o%H{VHbJslKIz=NtV>5>TWRlj-3#d zhPPzKO>1_N1qM2JQLqV*6((EbQQn84tJ=y^RolhhodA1VqL&fRJR#6{tlwry99b`N zzuarGSKEi@@53uQBA+U}QX}WcJA0?ccf!*eR})V8mUXFy$mlZL{s6_j3Z!vm*1l}6 zW-4CJ|FA&+h>+gi#Ge2WBqL5K%Gh9;8|2s=h#_1>O%6spB0$_yLIr0fgIYl%vM&ebxvf z;kK_BSqdQR|G=-}e(62&n`FU)3kFBNsoPH7=>MpaT5;1B#M@4(qjniocR6Xq{OQNQ zIXsm_geDV2z1(wa-)mHgf8^@lc@GK&6(d$UvxLPOnq_%hYF zR&nI643iHMOnrk2laHtAY-wlaMne?a#ke|<1n@DdlJI*qk&X>bebq#(qe?Az(5-1i zl)pQ?I${x8h@!B7Af<7ZA3!11fhHv%G;N=Y0dRj^c8D=bRlXhlfkzbj7e!rGZ6Sw$ z00gPZW=lL(pKn++#32J?OLTNMt(J=w!-V2%$`YTHe14VuE6 zuc;~vlHoe;bLxX(_sHjCK9Al#dwKEG`l`IT7A0*(M~*3i^?V$3i^liuU15H7?z`5W zkpFLuy!s0epT871AUjzu8JlRAIkq1A=;4*&bDySY{HMnqJFfpeRR?bM0-L5Uro9zK zl0w4>H&2fP1QUCWaWSumihmWXGTw##^2hT@>T{QL<3ns>>}{zxgdK7Y>+~O4{ipGV z<5a!xnh8^wNoHu1Rau55rdN6Yl7tM|=&;1aws2JwrcA-J=~3j#v))lQTQ>;FlaUgH>z8TSR9_b+QVP%-*BHJRprYqryXp+th<#=d zIi=<|6&fLLalE*(RVqe(lbbswl!q}Y z*YA3tg^*IyG6q5BOV<2)Nh;BGP=9i&uQ*lnL-y{lb^XqV&x-_Cj$3i7mmC67X+USBz|smt_~$HU(|u9sX>mL8aWQubQ9E`9~&4Gv#7v zZPnq@3;K+sjLG0>_w?bX zvpH{(JCzcKFy;FglN6H8K|AI@%LlfQbk}uVw;N7R&Hd>#81z>F-hTP@L=Mu@dKg$M z+q?4LrkTEzL&=ZSmnDm}NYV?^a%7YA-nP(lnOL@cwtCw8@Ydh=xnBM^zF&F!Y@0ipBnS%<|C;H&sbwTA z4X-i%^co1r66?5>DuOiEs_t}I4XYHCaZPWQ;>P{Cm%>W=^VB`}t)Wcn1`26A%NbcF z7Y0iV4|sh`4>>KT<5WtsWEaiLRXP>uy*&F>*LA~ozKJ{J!oJP%MiuH6Af#k#_%jhJ zC*!-piz_~D3F&6o*VinR^tNt0fFsaoHle!jUnXR|F5gi~F0?M#^Lq~3hy;e6{53e3eE9>$2?ie#?wav<}H>+qSpK6&Ezt&QuJA96*|7BpbR@beD z6Z^xvWp!nCsABsmoBx~=dNM-yI9~nlc+&K%qCNFk@8)=P7X0|RYFjX%*^D{T{^jw;3E=b7oFkpTmv)}+SX;+Li`RZ{ zjP62c8D0fGuV+X1;I`*(>>})Pj_Ira-B#VFeq?y1LjIU+>;Uy+{qlhB`P|r!&%u?k zLq_+To<5*IagKW=|1cu%u~m;DfFKyR$BnSHb6qXxRpVpn9?* zpcg6>Rgb>T(5m{VyZ>7%3HyW43R*4oML z@52A%O-d2ItTJ65sLeYe2kuVScwH;gzD((=ZMO|4-f$xwW5heE)V0OSzHFL^ytnv! z*VVu4&42%HkDf>+wIAOo%X}Y*q8o}ke_f{GG+VUHjga?V z=o@GK7p@7MU$p{%R6~Q4_9-dXV@o@F(0sWfWIYmPBn~9jSPzjJfQu`eW;LfJ_%Rc& zd=sNO;&kGq(!HPU!ia^8eI5fDz1KUetkSTa+rl&WBv;QH}6um;5L-93Xj+Wz(T(fri z)GUfnb`Peep%=pTF&f0_1|eefSWzZ1D~9i@2P1ebKoATuV%^;&jTo_T(fp-E3ZI!R zAk}vx8zkU1Lm1(~&cppE_s5)tE~xqf*_d1p8pZo3E;BEhTqf8Gh0wEpVxC58G~Y-l zDDWL&s!Z_s1{S+1GZG2ARsWP#&~ouB9~7k_iD4$(Lc28EtvT|ZuQmuJbsgw2Xd#Sx z+?c_Ai@(H}KkJ-8NJaZJciU=0PN5B+p}{;y&A}b;_|{lT=YaTzDqDz&h^2@Jc&3MU zoIWfR;UOtCizzRMu06%^0#8sa27{fA>J(H{TEr>JHCTLve@d!jy^$9mEMcb+k4E=s zOiD7ulm2!1eQ=5l3&kpvo;WQ4IYsXI)^(ULNoAEv9<#*Zqy3dSM+ESzDju>6plVWd zxUHl*6Eg-XsxG7&SqT3%YN|ndCSKN1y^$^J?14w$?uN(nyRvK(R1%+WbR1yq!RC4q z)CL6X_$~zAhl_3F3SqJx;k8$&bFYd692U^l;)*Z6 zyIqC1dBYWvYg2X|k=k<5=%9n;kWAT`KY!*F{$j)r`A{4)WS`QPo%tAvu%Wy}n_ORT zeXC)KPS+u!>4%{)jA3a>L>cX?_k0G<#+x$NBHkmlf??`rG*!L5lwfjrS&0zyp-B+5 zEt~&GFOA>>9Q^u$ygjRVeM?}}__AHgF+MMQIit~y%t@FU50K@kr-*sULo0nHR}-m$ zMhC{~YiTON zB>7zLGpnL#oMy~o24Ym^+IteNNPKj1+$pM>J90Ptp89s;7DeP8jHq&X()f4}DRV%K z)rk$v4&Xi~yiZ4t4KXohy~F*!1C{UB3N6GS(RJ*Q)&v)6okE#h>F+!L=cjGd$PDJ_ z&gS)&QP5AcxSs#T5s7eCJykz{hLSo!zOI8Uj9u`bPAO|40i&Zwt$a$f^5dc$7Qv|V ziFucck0M`E0D5G~PhvKLQjMzruqz5MnVk7b;?Te3-?J5r@*>mY{JiCpSIU`&giFT^iV@>LEe zHFcp6|1&^mEb~*s#88W;{qtMN-Z8DLdyQmUP2PRNlVr&iIb+hS&b56KUA*eIO|sQk zm1RUGU~?e)kQyNYO<_Y?2*wu^qL6Z7yu(O))CnfuhX;_ykS7Xu9uLHnX){ml!Ayvu zi0JLdzgqp7v0yaIXn(~Wiw&m7bnX*GE?dY}$z4nsfe zg~#+G@qLJGs{v0(f^T^uKl=-Oc>>8TgaUk5jwj9~A;rZEmkhW{Xt?!uJK0Pi4KoU# z*42$FIM~7aM~;#f!Aw)Xxc8cuhg{6bj%IW+fIdXS`byIAo__we2Ydn4oDR;Ha8mAi zdTW6oe1xNF>fci0>=}*R-k0#Xhw%FwA&CojILybz;3#fhe~YZ|5^yKTz&-*lAr5m% z0{0=OUgO|Th_NTc@C5D=K_+mr2fl{j{`WNpTbiv??V|Q`3-_sNTLGw4FJE<7PAegj z4CbYex~CO@+iszi#gQTJydUYwRW!phk8#lnNQ4Aj^ulQH02!%bmJYT6I29hC#XW^2 zgTEb5DHH(OJOEU)hIwHq(FO~u29kK?q~;THtaV&JN4R-bJ5%wn*(#x6wg3mJ#T6hI z9lg-ONQ@Yo@C*rU2JZm4gJd9qfYWLQ6vXJe%^)UvC!-l4Mi*%jiWXmN&wc=1`&n2e zj~`LpF)J>1-iJGFD|Z$l*DBsKKr7K(DLy?W{qp3-!X4qgp&0ia4lfA>a>&He!>usf zhRg74&&(497(X&_kbomK0~tu%2_|rWfYV?CY0>5qPl;zOQqRIqf6Y;#L$nJ36!VOxIMv`D;6uPs z$bfkgCUr*{_d=@h;n}7dY~y+}=Dtj2s|ZtZmXwOB?7ElLwu2PshNLBN!?R8&K#P>= z=E?LkC-&v)>y@R1o-sdrMzGKFpG&Fsmn-K&tK?#283bJD<=_tPL2}DA2nFt17PpKfzDwGu8Y=2d#Zd|pL)FS5&EScCC^`_yC;*Uj&?4!~ z#BvT)d`5*{VW1EAkc*v)&t#N}^vgvtVr2TS$`f?R!Dg!#E{Ng*L@x5ny(98sbhZN? z7roPu4!Mc|Bh`7?mf!X??$`5`H%sWbbkd;l%Bt0r z7)^Dnr}yA8*n#~9_zr^0kalJ&1w|sOHkoFn7BWABvK_isJf0gEPTG3wY+A3$D`4FZ zJG#r&{_5ds)PeNvo6BtTlezF0wpV(h4iF%%XH7_KRG0(m5Df3Hi^twY9}^=VUe*ya zZ+1#BVhLs&OinZszXV75Z@}@GnMdqo(*V>fGWJ+=8s<7CsUV=0uH{*P%#zzLnOf*0Z^6wPOS}ua8Y9`ozuYs)%M~(Z)FL$dTN2PS~c5f@@KJK zeyfLWg&pJllZaHECs2bfDZQ4T7i~>^XvMP%@&E3*Hw>vsM+Sc}Ng@0Gc}0las&(}F zexHJCa_+LKB2YyH)*aZlx6Nr=y6gE%gs5=13o>A=^sG<8F4A(~=z69jtT-Kk92A_-O1?5^rN&7-P{775++vAx1XuI#> z^c3K}2gQCCfs4-=?9A{l;{6pclwUK(N;(^plCcY%MFlxWG4hQP>1lQC`5^lbQf@A|kxeBJV7K^qmj>j&9B zF^d%)yh7F`LMN9K-g|>0vE9r?o$PS) zIovlIsRWCWABA_Rmr|!Nss-S2u3Xg4-rfxHwTn$F(NP`|s$L36nQr(|CO&>fbmAE@ zVAwi7I?g(-Kf1(uMm?UOCiis2b?XJtgIW1yq~B)t5XLxc4BC@!h~I;fgZ&#&|AAu? z9BmP6XWB1)C-bM?+5sP8^%Aek$8`9T2Xwn|Q9NfK(aoJHC9V^M>>rtE9{YwGK+)(uZC z2<~}y^yejePtUMdJlD|~_}C=}nYzZR5@eAr%C|X>5F?a-5l7-4oCeT`_i`*OaOeTT zVtBDjdc!pR{_kN`YAi0k7^vXkibJ=waY22WFHz(w;(}IZKvglg1%^@+fofyS&JXg? zbkItrCj{Br=?Q<|10Tu(RRG&x$6N;E!Q*15*A+KPbh^>*NFsAv%u&#XIh+^d@+p3o zvILVt7!RM#it}*QkXo^cgz&d<|N0fUi*oM-{OAu^jQd`KlQuUeonFZwNbzuMeg4}& z^&?mVLz_K#F4#I3l=e&*kL)jT;->%`srLu&#AZ|+DTT-Kl?-aj1JKy|#1E*1A;`tTTECBdPoyqOS zsV~3YRE@OmRPBp?S#t%PJBA|CubOwNQbsm9PU?vZUK?FU@9j`+9O1N)Z=#K58wO`H zR&bC4!}WX3bM-gSN1~C3qUMjeAybG7@h9F_bZ#km{Qf-d zefrfWoz3XX3i;1WpS?R|c>qeGWG>84@4^SdH1}4;c*S1ahsM+I5x;R!+z+zGI9grP zMt4s28-lyt0WW3z0WNrUO|G~33ZCEicqx)p09PSri}%B2&V8(jeUniodN1NAx#M8R zl;34GyyAkt`dsB{NrW}Ijcs@3jBzTtZ5q1|?Rq}+=CMK5V>59yYF`yHjp^M3pybc4 z8@~t8Yf_1Q^YTx3*~L0!Rl^)+h1*EK{jd_(Lnmu)m+d{fFD)9C(u{_mR#uDSz5hDU zCPt>Z4fX~uNs-){k_s${`WG<<^@g`a$233wTs8eUd=GMH<6d=o2l_fv#SQ+#11_%^ zs4RXxnWHU}wtKE_G%THQ|KEqQ)yU!#U|j`hGf8kd;S$E^lVv$hdm!s7oaKV#40p=q zm4H1z2r{#dX_B&s-#WH$HelU1Qz|BO6~wkigJXMe_*^ZeTWG)NQRS*@pPRp9uYZ$q z=Zv@8zarzNdB0w{IYEy0YQ&;Wuj%yN&5FFp7`W7286W;(7{1+m!Ht$gM2>w;czw|Gm)&ZAuXRj&dKhuqKh=tgn4PXVV zH#Z{8xBHL$3NB@2ucrilOoB*${2e8WKSG#&M$7EW_;a3$tC=ild&}zFjPEZ(CcDim z?6tr7e9vnnVa5n3+B+g)rFo*13^Th_*lEWNxgy6=wx$sIFo~@9zbEG66e^1Ifq@t zx{i6hRLTuo@?|A>-QT@US3>A%l)a)(JL6R~c6s@t&4mO1F8S83&onH{U7nh|irGBZ zvGGMCxf-MY*=Kx9sSh`4F$GLT?(tFD2It@VrgnsAIa2-L#zcx2slKAK$>Y7Jt48y+ z#TQL4lP$N593weHpW83tR3cx69UgUA`tGsK^A>jLMzE>h!svxvm;B!RJ{q67rk9+s zG_JKhtJ3n%x+pLxVR50%*Lm-^3Z;xH6w&W_K2&WMuC_t25)rWO)C5`uAXp z0^DNPKYzC+zSUL4dB4qAv7?@ji!S*YkXw?yAuxa4`p0Tu0msOZ;2`3Sztx3EL%nI9 z3BRV!1YT5_F7@@Q@jp=*9b{g3+%GKfd=aGlQMJJAcSzl{pbc`?_|jjYWrwb+g*D($ z!U`gbUX-^U14d`UxBVEO31=I;{vC1anU}RoJoU1b#PV(F@uf`d_c|YMfAwl08Z}6! zkDUI!6kybWF<*=BJrS_sbj$mMwV1O-LR!q=#=C2=!&{z&ymz>muO2&6#Ctujt^4m< z{MeqoC%)$ltq5X9&PVw~F=~a~Ut8n4DvnMb|M}lBi6FM)>v+!IeOY8KkAj*2JAYS7 zYR{ZZ{@|&yL7tIkJ}HBO0FrdQy3XXEABN2TrmTNsB^)?@w$>ZWSiYPO$@pAL`0voe zQK}?bp8$9DC{&RU4waOtwEK6YebP;?ioFuLp5q#DM`3B)-?}1eo zA;T@rS#9!BF&X|^rz^odx_iY71^=gdB;ogsbBe{>`@kww-re!#vl@xu^Cv?34rUo* zn??6cC2SVmSYvPLs%?9V>mL2%ER0t{nkoZ;utF&J_e*%lRWjN>x(gGq4585Qh>%4V z2rTnVzHAIO=#_jS6F@Bs3g1cCYw!bUXWC_-=(7quG5V384;)gZ?Wl7bFwVVTcmtg` zJV3d_C2&2eeR;F9yB|+wKGyF-t>XZ(hBhF8z?djq1qi4*0+l9;5IYljAV3Ew?UGXL z0t*vy89>?sVFoZNlmtL`cLV636^dlhzdPJZw4ud853Nzv{Pbv9RaG+4IV~RSA*2&X z-cFXQ(R^Z(0#x5%zDF<^i^>^RlXdqMJ7l)aybb z2&sx6cHEarAcEu4AOwsAwnGn;x;xH4uT*<-q3na^s|D%xo09jgJ#eG9TAnFglrsQJ zM`H^6oj%FSCx{fO)AdTZ&2W4|Zm<$5q`;-)J$SI4bBP7oDy9CeiE$J}f2Nm&XL#Z2 z^5YNi*2QAoIeyt3&!dSoItJDfhDc`*S zxY?;Pz?=WyX*!T^vPHf@{iFMm1T>2Vp{>S3(^jS&9>v~5{P0k-yjYv*BSL#E1DQ@} zZLy141s#N|ycnsRkM_S!;5(=^h!JX@lyjs)0-~WgJC&HqQQ8mPQ96UbmArt4uV$Gw zaTTx&KQ?@puw$=8^ItD?qtzu#k>7U1X9IYcj@YXz48of?4YOF3oMXEBpUHDq@u^Ax z6JH6yWxbg4-?jiS5`q60%@TY)UQ5g8dfm@1l+pc4K^!2bLlNZbK+!reCGUnKe$ihw zxrKS9*FSyh2-^5?5;C)68D|#6!&3?oWQHiy3%tAK^mBS<&){j__9>ioCx`Tbej!)X zkIp0`H0qa?xIPl;BQzpYhKe&6)e}8i07|DcGz)943GN* z5Y<=XPdT%4`;D3)sy^gGMrVr9U-HAUk0;A65Ag74Xq!~nxT4P6TEWPcF2C6yP>EMV<3AU*mEavU;5j+ zctU~?aGI5c3|56aZmkn+`2u7hN(HZdSL;>oZgpID{XIYbsOrU#dclf=^A`p70*u;A zNF(_ess;p!_P$)twy3|NChOBi?kv-_ohX3oG+0&jR4B;TNo_GO@ab9;PqBc`11DV5 zH;>$)mnfIr_>UR zCwKrZhio>_FVK_roK#U8cA~DUpDsv4-}QfJVM=g4801LuC0{vh#Dp8=+G%VuC}PD= zfk*%LPDD+nBWfO>u51r+JlS|Pw`jsB_O;q&`1|zh`0l8%&M6L2>!W2EqM6E^0&jd% z`H#wj>5bqa?giald0dz(ObkaLnb7roZkk&gId#}t0%|zg-e)t5qqg$EbMKI~H#%ls zraXX?QdPOVGn&hES62VZUC;45m%b1jsTZ!Cj<^Vgb}h@$+yATjxllcXlDkupSq=L9 zkG1!XYT}FkJ|_hTJprVHp@X#0q(cZTR6~;@D7~mCO{r=cBveC@4$?(H3D651f+!gM_LLy!-F zzw6`hRN@LKyMOiD`Cjf0bJSm={!^r!#5hN&8J}RxWSH&iQO$NWS$;Y1>e~eYvNe;Wh3&g>LMWL9;bk+Vs0WA_ z^CXQL;`93uemXHAi8B2bJbpSW&Ij`O2&eo5Ajik#g*?=PMJz&+xqYby415RsRviez zFd^zmmQ+=W78B5-aYnPi{NmDwR=~FlC;<$-8InB4B*hK_hHQQz9lZQ9UEhBsp(lqI z4!x*~(f71II+}H4kV;bJ;*Ys4lyd{iD3pg2T@b{FCu$ykDN#j(5APC`=+`;?zzXzM zgD%njxo6sAg;yA+{wrlys7iSu#zHJ7^}E1VV~hAcjm*Tq~qt zlW4eac647SnF)zr(hS&?(qfXHN?FH+l33IUlhSOn7;7pLIs_uq7Sl;HB!fkcSgO5Y zAXhYt8zVju_h-TsHBMrZ-kVa{n7s3UY+a9VE;^Jdsg|a@NvPs$)1zuv-O4yB3VVLp zi0=}_an+skVk(<;T7G3b!VdM3b+xdGdIy6>xMN#Q_5{ng&5^riP0=bFstlB71YN&D z(C6tyzj7!m|EYh)s_O=y(m~CvC|c+)Hrk9epq|nw`r+d~9)E-kZ*L(4B6Z z;e(Gb{y6ucxm2iyWxZt?We8hiBb=vvG8|%{f`7>lrBIVyro%qj zeiw&@xna?6hrTiGRow(HK(YH|?Ct^1gi3a3b`xrxLtKEu1KcEXp~qqbS)(`w^Z7kB zE*zG_Yf(HF;K0FDqR~*?CM?(pY7^LY(kI=^CohS`q1t~m07&;jT}xuQLJ{-73 z@p!y@sjk|vjl0P$57T}Rvsn-G*B%xhp44-jTk?BarK|8Pc3GKw+RWmBeUeSIr@hNz z8vqYa@^rklun7VISitGEr>mC?RJF1Ey{DUu*9d|F1=xx;sC$6daWAP@0B;lN+teday-d@JSZK0YW8W6`SDPtXD1Avg+?EbNY4xAcotE0{Im?4 zCwV;T*74}q$H~;==MMfJj|t;G&+i?_|2$UHJK^cGco*-B7oJ^+_D+&{7M$yyV*V`g zmUmk8(=%gF(>{2cXq;h0uru$T+G(EOi zTz259&k-0d^SJUs!UiT7f#wRSxoTs?m0*&wWS; zn(Lki6dlQ#5W|T9UPhoP5jbdpEd}5fyk`Ew+GzTQf6lsNWdwrf(9Yt0pFW!wFqY3Z z---*HB;1lFJl`g4^tl8>5H>#oz8hU@SX%_iI#E>ns-pIlQp^dXs8W+y=qZuuAfI#t z7V)f$mhP&|69W{YMKl+$>uo36DaZ!4H{iVsmrwcW+0j1V2meMAdIHDye zb0yejpH3*bZ1L!+q!EwWYco<@r&KvFqNh&l!d|6QW5wC~CfabeSV5XEmM%V^Uy)$6 z6t~Y;!#H|ToHLja5c?I8r%6qK=Uto^eWp&a*HyDJp~gUT+ursvK8Ryt$mKVuN`q!B zlf`X}W!u+~V0Q`m6e&eK)SAbs3=NwertvaJ9Y!a!2I19DApC;>U+mQBJGV4^SVtJd zBPdVmW;*Y_6yzOoGnJ%{xaF5g+1>*@)oxYCs0j?gd3#QgV|2L70-vYDR^|adhrWcG z1EN)$Ucv7}b9HyLBJ;J91oC#2AJ4!1Y+HCtzWOnty!$Np_F1Rzw##EV)Ao4h`!mb< zZ&IfM^^~S9D5_k+EbzrFYn&>V>WP3-By>q(^#{E)hf0ZWfkXk812nFsEYWF4lVtG> z-p}b?bRb@u&-Xp})M9$Tpaq73H&x}_6+Wds(!O?~gZ*+!y%`?o!$E^!jyA$i0mdZi z5NT1|LEsH?K>Q~}hX3^2g|?a!=TiGc=++-wH7ytpYe-63U#Ep01)#=KyivP&Eqq4| zA#2zis*b8K4Ad&|v(x1emPnm3#ZhEw&rk_TAni;=G1|?X%+St|Rc}k(nmO!)IxN?YI`btx@fU)X zRq%?&di*3ah^1U4@N_60hTBAyos+?p%T$S$Ry>Qk4|+T9*KjcK89y-DWXcRF+Z_nK z3Fci~HyIou#y|vEM6+EDlgnAPl89tlD4$XG{Wt5Tn?zn_dP0pZOMz2q1$-6(Rg#M4 z4SYyK^Yd+HW?RUnq+Or?0%3Z z)>X8znh#ciwS6j*djI`BB{_IVRy~>lu>dMe7jJn{+e}dl0Z6D}jAm4W^^a7SKDxQN z50(E3QVdEpqxUgvdq0bw4xv^% z-0sm+F)^_{q^v)Z6{n3G!h6@MKW580^g6I>XZQ4~0vt?*_va);BSaWwB9?W)29r&!NJ+_si{+)UV7Z#>+2yAy-}x)IOA6nqkUb{lI!G8cKR)2lphf#2CwL84 zTv0XpFl@V)Gp|td}WY9)nJps$Uk*xCKP3F`LKajKlBv^k@I@4@(MPB&z`X1=(E zZz#aym6}z-J|Hp6L9D{C26Im-dBq)Tjw)3-8J6GsO#}xrT>ch(>_;t% z@~uOnY)-)<0<3sSUx^0&HKlTo2kby2ic&!iR1koZQ(Kfje?_wQPjmeXaaSB6E@I!o z2*y?`hu1vHwMx8jN@m9`;m;h>;-OsUJo0{@(g`RY3?9fz4RZzwRp;|C?C0VAA@Wa9z%-O7M?k?B-1EjWZ>7)RFMoLnL)fJfa z_k6!pyY2fwPoYM}AJ{L0Iu56u2P?V!5UV}b*$X*5;F#)sR#mRlG!K(3hy4xPR84+Y zZw>o|EBo1|O}O=#YR6WM#BdH12J)Gq{2+CO(4^aK|qC8Le2}I^cCi_-mu^YO%8G{Gd zqPQU-H+hc#R~c&0g4VIogfN8pQ6Uad~Z;bSL@#@|a%6>FUu z&Qa5wM<_WRoO?Tyte07c6cj-$sN#)!6MVLC#4EJA?w zDQWbfnRr6wOdREngmq`)`S#$7uy>ARI#M;D#yoXK3HEK{S^KFgBKCJPpWikv+|2=V z+z?W8Oo0;KHmJ{4JJhK<+<_+3R;Qw`$b1y-J|nO8tT=Kl_Qi!mFK#}6y2_De6)3Jc zk><^@H#u_HJ6W7T&wfK(GuzBp(?awGHJkO_bHi)1l}^5&`NW?n55yE9?KmzkILYJl zy=xEBD;ifP6qr`_Cxo!bT47;Vjk%|;X8e@GI7N$zT2}{~x2$x{p+;h&O{I+u_t7T$ zOpf-}&=|gz&L)|Tq8gm?&uxO7twtXV*@~rTkP#?Jd>oh9IdLo;mB{qwN{2eshl%0g zx^Poje8D4`^8*lPnM6+qMg?d49-EM{J0xUYuBs=yIXx)e0kv847KIW zVUr9M1)6(Ht>Gr`9P6F5Mx>AD(R<-%&RTU7-%g|U5Q-~UOCf){y@Z8OUG@Wmpx|gj zrF&!{m!ws)k$)mpJB=$HyZy|K1eDtAh6u5FA&FtJQp*_ zSteWQJHg=<3eDk4=13mWg1&C#s`X-U&2XeYTPY!ICDEskNaKdaP45TBfK=CuATUl@ zsKR5(2=E@^(cWOZ{UKSOl_42fjfK7Gf=%`0hxRwPZa!BjxIFdc$@L&b9_=>rf$*#EsPWFA97Ca3J`LnoZN;(aj4IA-Mr0IVt*BseOlvjDIr*wEj8c z2w$YMB+|xNS3%G-fm>nRi2O%D<*Lqi7uikL@tX%4!DaEI3J3+Y>(@sSu*bAjxXhPe zE959`yz#2cy%DVO8kQC>9jw1cwO{AV&?3@Dq}P65Wy9a7_NNsPK}8f}Ee{$-vRR{8 znqjPL1Gf+w&EcP1Fn%+4_zMWwML7JM8i=XPgi;j z=-FR~6F7qGeG}yL;$-AyPl`;0u?}KkgKL<2=nE)@$M@z@@YzfJlj*9eLDVVGZI3ag zvsFT^djfL!1cw!cEQB!6bq_AA!6|l{Inc7mZxaW3N3r8W9Ugl{$ks(gE1U`st0Tj| zgskj4smHB|;|mFJ$RCwG+Cg>5IVzI67~@QK>S!Q^GS4<8Dt9o7o#hEigKoYOcer?_ z6m%N=I5+{loLIb*7)FtL)J%c@6n%(iFBPVv!BW(Ad3T1S4uKJf{ouQ7yu12I87Ie^FXCu}R97)#3z5b~wY7ZXGL5nNM4ufBjogKAL+MGZBF2vdRBO`An ztsPl}<}jVAVUSrwH-|zDXjI@^A(^)oslwanR(;!}MHqoL_ z(fkTtg^$xXQ*a%baWy2Ft`rJ(%mwb$&HJ#|h|AdGTb|#IZJD8SRE?0GPbDI&f>(_& z-fV9-;;P*h?&a$)%|z&5&6O2rtU#>DKup#rK|=%X=@F@Oh2=(X2mUF;@}BQ8if58E z$Ml=DwQ4}bltJ*%e2RV#9>gEW5|C*fF~8p(N`!&6Svhwv>C?q;9nA3VKR^qw4+$JS z@MpdA7{y0Tea>hWyv4f;rvxMQ)S48cb>NzHW}QRnynLR_jbxFTE5!prUzZ>!aeT>R zv7k)dQGTBs2Q56|b#^eh&)pV!dfdv4WjrZe`9A%vTWsFcwUHZxMu+;RLKbXl>Jm8S zIOQAqcoX`W&4V6lHjk`~b}!^1mq*loz=HE034^~}Q=dj%p6H$V^!9TGaPpnaqdYHv z*)N|MAsgTLA6zZ)Z2561KvnB8?|ZL?WrgdP?UUxR!)%5Tc}X^keYY=tw110O{$!!p zDVX^&`^xXl=a+wsJih!X`{Cc0iR3>o{&72Qf}RMw+A$Iy%!Id8lKV&KjU#jevi<=| z;$aj7mEdQS13|&9A7+mH{wmoCKvfX9~=i!b$Y1 zyflATuDU?$NMv^rNn zG^{w(K!6s!-h&{ON7UD`2&>JiuT`T)o>@klhXNj0?{vx~^RY}r3l4C}Z#CxMp5enD z0b@0lN#K=1rOP(T7bgUty+zHBq5MU3awZuP@*ACR($7AYgMvK}tNQLC2J?b?&9*=v zS#P&a+i%s7s9@IsF)o5|_Q@L;1RJ%in%oI-0_y!xG^asZ{~fgH%T@EYAr{-K7ELn( zJB+}7a$a-##oZ7q$u%qaP>iyovJnk+&SvTx72~*O;~r}3xAt&$5#3EjH=)f>uGteq zu~}=_j~pUoXC6gTZmdnNzr9((n$vKo^Hc*ie9bQ=_((Cw(O`=GqtIi&){cS0Om#z# zz6ElkMs-71{T6(1x?vtB>mCOcd5Ru9k>JVE5yeVpaL`Y%XIz+f%DVR>s-<$Ff8?;B z2}wD!&Ts#a&z-Oned{Oc$ew$QJQje^$0#5{0!j9X{u6$~>!-lsfdfj%mEl)MZMl)* ziF6KuhK4|scOe*M4_(p)#im@JCh!M_Vp2$GTzEuExUH_W!lqJi!8^a5K}Dmrh?aM! z?=)GMKvV*U6`jG#iq@yQ-$lO-Kezqv++O(kU+>O?BVu?qVuT`MB{yQ_BjS#1#OX%F zn{32mA`%=o65Jy$_-$MWj<|Sw<6>MyV#-D$F(N5zBdH)FxqKtJE+VC6Bjrv+YTric za75bFM%sMDrI#C*-bP&B-nhIMapjpU8UW;eLV-|#z5K`PV((;xad1JgC)NJ~D7$0v zzv+KsrT+=x3o~&B&y8i$GtQZs%bN0Wi(dQE4 z*nc2>eEpDID*zY!!w-l8$e@t;7%Lpk`M(nXujT)g{(sNDKLgld#I3@5c6M5Wyd0M?2Tf_ zmH)*N|0|~d7YF~Zc=UhP@pQLlud~RGks*;m>@q<6>{uiC|B)W?|A^W3{`;@u|0Z6W zsQ8$(LCHv*-v3JY|8t%Ymc-r>0I)tElN=Wn5t)FrVmA|;Xm~C}O9QEk(lG>p{~G;& z)%nHke;N}cI`hBNNXr0V{_x+wfA{})n&U$N=wRpM>HXhnszv~S-~|9iZ%j~J(EpSN zVb8(g;Q+9A0|5Aa0YI3^&T}F@Tu&DO*dBZUh!4Yl9sK6szc1`xT!jGem-g@9@4A2g z{?-EkWC;KU&;6fgn*+|%`oFUc(D<+9N8ln5g}vnCfr*KW9YXLcN{bq*N^8oAOUcQr zsHz$sH8j_fmpFXb!9dB`MEe8=72%|xc--t9&IoI7?{nPS+sWyyw})qVv|seafb&US zk#Q$ull@~7U4qX zx_I&;A@yFm|82ZqcSdwJDP^iK{8?r6bXMdBCHYNb;!@4|clFU*jpx>z;+%l$N<=dLOd2^N37?!wrd&!tN1(=&vy(HgUd+f& z%PC0Aq+Q9s=2yMvRq@%i`17fnz0mqEQB7Z>Z+?ky`1ov%R?CW-+~^wXN;O zji&lrcUoIJn>xB$?sT_x_1*4y(Am{{r*EL|et&0YS9g1JRb}&J*|m4Y*^C?6i!B** zZDm_`DF=PT?c3KLb>4h`x9wxk-Q9b4zw~zf?C*Ww-t>5+_h7jH%UJ&>M*F9s@~-}& z`wvDt+eb)U-*WDMr}TWwANXGVaA^3!ll#MC17nlD!{cpp^Tk8oD@T7^AK!1C-EWxu z(K-L~{_~$53w!z~(vEB}kjE#&>jZDo>P0visGN&HRO^&>poqziH>Eis8 zr}NKVJbk(Ja_P;h=T8zYH#PqK#pL&w&(>csf37cmUU@n{^KRkM=BqcX z#rY2-OTQ+Uf4zSG0LW$EY8`wy>IR%cg!kAD2Tu>Ski_MgR#Ka8*6pZxpxc5Qua zWAokC_WPX=+Z${9TN}UkK79DJ`}X6XjkRANcR#=X^7q~D-!EUkefjbG`_Dh@3+>PM zuQh}0EA0Pcf*kb;(Di-7#;N!SCQ6P(hheT zCJR+PTKp&PG)|YGLZ&Jm?l#R<7{zZdP2Ron_&S=zBjecFJXen`Ji_)pxcTG;uHLcA zv8&~2tIyrjucx|NpWhA~$&zvEZhLVz@<~g;boZ@S_hMG3s+{h%zrLUJW&8E?z1zzV z@ep2F=N|gX2vu19I6-}soJ^s&iZtz-kCQuy>~Yr*H~xEy7YB!J!$r6JvrOg z_5L|MWV+hre)or0J@N0~%-+BEX?c*ud)W0s&*xP}q57#u4|>0BFzcPJyY}~ed%t-1 z%<`lD`#(OhMh>zM9~*e^^ULOw)>DtGlkU5}dO7Xg{;}fHh5gm!KceBsmwX~S^_KDO zxkBUOfr>CYiKeuXIm1Ala0x*`C^E>Sv8H1-8p-0k6F+A9H^zmPtO^zs8=(PChaXlxsa2g>)ZCiXn03TYK zZ>Mn@L#@BOoHML|*{uOhr$(0Q@Z>>mk}}BcMOLdtd!Bk(O{w1<;2NMQAvMW%stMDu zy~fH;;vfoV!aQ0}LquvUFobxscNu$8HSNu6S2Y@$ky&burBN+(u$zZE-Cjt2LBgjsjPLs5I0GZs=Hh9_t=9Gw{ zft<|3@H&nYpE!-e)zoh2@Z=~BV8*XCl515*|P#8CKE=(l`P)>uM6`eE_;Isd-?4 z6!dH@HwiX}8;CuwI%0K~44Q5Us??-Cs+9MyLx1c!qOnkKqkeSIIk7#z>-GboZ1twj zs*hHY4LZ1&;!^szuWaYIbWd>}lDgF&#%k5xSV=LrQ-_3|W*$9#UG3Og$7%5RR^)D8 zBaN@Us?!R7j5}y~{zr85hw!A2e{IL=+%opNn{Cwih7mFkWP{x_}-`r=25Pow%(^0@hdb7d}72|1Gp{0j#Wt`Ao^r$9&)Q4ZVp}_~icC4!`~m0U~ZmeV2^KWD|w&YX7S>MwsXxL{Do1UDW>J}pI|Ce z!BZo&F#iTKEFpt0nM)^K zLg3<~m^lyG+bAKcwv3MRHCAb>g%y2!-Zb&{SiT$9&4#t5Zk#RWEBE}XjGj2mUy#*4 zw}(PrEnd^_`H2er`s?l2y`T0Zo732XnUWoFu@iSO!6|bjU$R#1wl1|>o5gV=(*9c}AWrl(JR*;*fEM zhb`$ZU3$OfX_jp3=+hxS@*@1J+oabKey+5oV;8tNm*0FK1P*SXg~W|pc*wm@^7Tg& zk4s$KUlGjm8P`>1JEY?<{W}+KrySCu7@rLu_}2ePcaydpraB@>U2LMsrg6|Khj%E}G2i6Z_ep8bPn?2G zJRkcTZ6}`+>xU{`84qWQh&#$@tk>3_nsusBkIBj)vu&W6ji(Gp(Kqe#<%quOBjQtl zy?1F4pHM)Yw)YJkZ~s_6|H9(2c~%&t(aOiKRqKFf7MXo3lPal|Z95Svmwiq{EjkKs zt8tXm;MdrQSDf8QU5zrDRd?DikXE+L2&8%AL9Kbdc8GF1SwT98+oh(nv5>aqc$o0S z1xswaMvHPgEce7^@vFjSU{3bvs&L_V{U)_svQy&A`O=fb<`NdwQe;(rI*Ht5nS0v5 zTXj})klE%<_(dFLh_*>A@ zhirUHyYbA0>JMiXK-|~qR4pWdTrFk(O2_io+?jLa2u~X^O?%921#7Q-`N78V$KPjK zE#hyTy2Op8l9`znUmDc^Zu>rGcfY=oy>!{ea`-OnrjH0F5jWx`!y!O8JooLc_TQaw z%ilAGvCpYgwzhEd{<7F|yrT1PC!!X$40*gib+~HVZh6#5**(uf@y*_~M9kuxr|dma z=rs|!>ecT3d6snbUd=z#)e9C}+)fSR@iCYk^{Y^`?Y51}edkhpOE_~C?O zX^qeSATp7ElbrwM%ezB^$KQ`TXCHX*+1k|oWL+_qNz*|D&dB|7qzo&-1zDtdWE3BAH-xAHV~{* zQC?_np~a-Dfv`;^Vo2AlnAQ>7G9681Z<7PqLj4 zVw!%|hjQ74#TR`8r2yi+6^7WQM~K`|!P`iGjuDu-m*C2R25dmx2IJ#l9M(w}Phb%a z*yIGCc(*}l%?9+)kifxbE)F+3)eV=HlSI*;pvG<{Ix;E3Y@}c*0XIlgOh*_l@+W&s ze`b-iBcN=m9by;IVnVfcfo2%Ut_G^f%0eKa&gcs!CGmC;w z;*x}FQ?qVBQM+KR-5fi%B@h64ya;ha<}NHkaI7?Q`wKMqIiXkr7EQI=fclE3Ic#@I z9}oGM)Vo}Gq0f1OJqc`>7Uv*<3WQ=B_8Bk zl|oZ{lCUh8`eL4_JKq&_){Bd|PypnHgE|jF*`)lk3@Cq8uE%reF*wa}0y?pg4cFm| zr&H_!4hsO#gmY{(Cm5E&WW*6KnT69mP!abN-H!mMutgn>)%XpV6ipSXO(UO95n?lI4FXC_OkJX;TzBjq#q)d@HHaA zovmnPLf$cw{@KF@pBH9xaj2t#BfU@@0J7@Mbps#)H_6xRxf%QLbvpOS$OLB^)GiR( zr(F5P8>*X_@Gk%uLE|!mnD1bL^`XIRPL{O|d~@5Ma0YR9SQ$8WzHh*a_rwrWT{nfYu;X5O;=W5NO$gq)jHRmZZhQ;~!2I z9~(?_V?j9R#dj%%(+=Vv=xF&`W8Wc-_mZL?Y3(W?K~5iGv#mn&#OusHuvJV0PfZpI zMHOU1+-oR3*j7(5f-{PXp@KO6vpPbIgAV}oldAv}eax?%_5&Vvv))bx3VILuH3^M; z38C*-JEN#L+(E6QJy)^>;+Gip7`oMQx5$sHfdgG|l>5?GNpZcp0C@!O$xh+V^b}Dm zy;cn8t&&6YYwh}T0%6PyL$}hBvS@S*03U?fvORQh?cGLoitiz{#d*9fH8T<9otN;P z7~*HH{F4$KY(zDnmUI;lyWx;rwpo7a1YPqzgtI0q7si2MLFUAvmlL7am>q(1-Id4V z6ME==Y!yOH>DcK63nz)uCnoYimrUe%ox2f@3)X@8{y(;bw6I-AYoMWEDU;|=~`z`myACuQ|OQDb&z%4ME`T{!1gILAGX1B{x z2Z3y}-#!JN60&Np1&e)TfS1h)P)P*J@xo=_yNNK6CX%~C5=6t2=cFVm&InD83f~>< zto!XYvT(>asZX<3yXB1VCa(LkVFhPk_NpqB6GeZ*4Jv>`&-|?BK9hIPX8``0;`pPi zb_?3Y3ux1za1=QMi?A?Bt>6LiY<2MfHSm;1t6eaLb;n|Z+wwQ1H?o^wvS)P)SbqsF z+P|AT$Ftjsdl4qYtvIR*8iecg2|w*L4dS^?N5l`0s_Wp5K}|Um(8Dj1oLS&*aSlNi zq<{%d_}RqUa)zvWO;8FkUjyTs0YIqqI9|XNm3@-C^x;v2pIPed%v%SR+>j%G3f*`6 zomtgiRc=P}AwG%G8bnXK_BLm?Kr!_DBb$k~$mr}Ky%AvptUvDf%IF`^m`RYEq}!O$ z5C-Bbe7`r>AgapVyTlo2&@&si*zF1b!SLYj*hsu+3kPP@tKfsz_~6-(?p^*e-8}&> zyPH^Sx(zXB-n?~s#%ZHKc&JdPw;vWghVP3J2^=nG&X3CBb!B;qb84kgXKNh<3NT>^cKuF?K zy4BW>$}9XMsjvKs!d7)VsI?uH4>trPE-8Kc93W7o{WGT%=5pY&zZbE`=X9u_|BSlg za^Q($3s`3i|0bXECk;goO^wg^xmPqdJopdY#l=FAl~J7llbvd3@VfPC0# z@_cVGUq+Yk8YSs&ry26*E#&chu(~G1VOLc9&)j2vVb0#djI3KPnpjWzZTa2pmcD0% zvFQuXqB$|@!avIi&SJfW?<;PGj|dY49wZN@_*`&9awA)zSLr;Hl=8oZ)8=^SNb~J$ zoGU_dD|v0Rt{>8D0UqyKv$RXEw{j_gE4;2{_~H3Zd_7&^D3(8^Zb1*G-QC4+uc-G5-Upp?1<&yigDv#<{eP_9}|az z#@a;IT?$9>e(T^j5YY-_@@$2ixKpF-Cct}YbvYcRq*nM2|LEOUYkqoQTmlVkw-{;B@2JS1B**6t^zO&KTD+4@84`>g zc+Oe9IFLOS@ewD-OLv7|(u_Usg$_cEot0O;Yx99Oh;uO1V6bY{bHv0W{XA4>BQM^@ zW7wit-y<)>Ua;%v%wEJFUQVbb+Hm;5<5IkX;O))F3QP`=p=bQk-6CwfHK)Q=#S_9$ zKCq7Hezggz2^g&p-5=iEtJ&oYvP|eS^?zsGykU~{Wj&PjuA6AD@Om>8uK5|MDt}7_ zvLcW&Z!oIxS;uWqO*~au3$c;>%+dRk-#xmL^h)HXlINl{s6?vMk?XHde#-HEaIGZX zQCj4W?5g8xZ+a&_OA+bx5Uz3e<%u;TPEYw=^cT({huObGOZ|XjieF@H6<>S6;UV~7 z3&oczox+~*0&V#BdVTCaxbd~09RczkBy$Y8Qvx#PnxTF5)F@=gGxvC>2E;iv6y8{W zxMTIiFTxZTXq96{A1-9c+p)275X|Y3rW6yR97;U@OGx|DCxk`cTb_aGbIx3f=L*B^ zqa1c&z5zAoZ$qcQSGgzNU0RNRm6Do;K5aSi@kibTM>w~R)1fP;;4SxT0k`c6Ga|?8 zHMOU%uZPcn@XwD@`6N~92H`z9s(RJAE$7wX*^y>}91Y^D;hEDvT&kwdM>>Wn!gzDW zvfHNy6*kZDwGrhVgwBR3%i8&bEDe#L=M~|I!Z#60@n3_>-8gO%w(MwrrERF7@h|%x zZ<_Tvszf!$idd@pCnvfuq|&{{&nX@U_16~A zltd_NJQQ=Pl{kW!@wm5FFm*8Oh&yMOFTLLNDpRMrthXxQVFrKUQWMtc?DD`Z=#O)6 z`jZvP^fZpz1;-cZfBtZvTWG28nwb#!T+&cnMdhYz``Z5ZRhM$*II?>3fN|{I-nS3SpESg_jL?Z z-Bb&6EkE<&qqq6#n$jciY>m4|)fo-^g3=?_x4&j(=XmRuhvmSh18GA-N?A($dQ8=j ziHE2M%@rKWe1$nBYncb@W5mnks8%)JVRyYpx~I8BmFC3b47>r&5fLIzcn0}wm$YCI zdyoV*d)0I1letR?Za7m>HGkY9R>W66)8%}~$N7&%HLLL7Q?*5zpN6gLGT#^ZoWK8u zjl3=hw)r+<`k}kx+k(>;R?$7IL`I;KhkNgKt8JJAgiqJVBO=G=V_Zb-#qhHc#cpkf z)a!Yra`zMjZb73f?k~jJ%ZX^~hS}zvlp|RbqO-X8ZF#fi?CSy95E0+L%p2FzxLe+< z{o1Z{t47w!++jTG20b}++Fi1K@=iI=>apaR(!x7bnW`rkO+9e*YWfnvSEdq{(#RsM zCS!FMrWz4^K0OCsH>#4jr;HWMX2&{)eTD(<(0tx`!CarE*DbA2{YGF;c3!DPzPHzn zp#`2L@Zz11IUo3qm1xw^H(PHq6}?U@f62@A>pubJRy32?7@&W%`{@2ED+b*AsUT@M z^IJ7t#Nr2|o1bUvBT0(e_r()|4_!YG`o7$zfM>CCjs2Sy`s3owRk;}+5&rYq?lP|n zO5g3?Xg!%Yd+o2YVn}%M@zk4I^Cx_r?docsV}oYf9z1c)Y$bkw=$k>_s?c$7Do9qT zz*Hlt>!0u4mAbm~tFFZ~yD`-y^;G97t#pU47!}zUpHr9`Oc(KnY5u67>AjpPJ*QH> zcL-|G^x|d60neuKp9c!E*I5ffc`;B$KBx0Ney=$646OpzHA%u|L?P+99IWvOVtG#L z#!m6DpPU?baR{3u=>t86o+4EH45!f0LwxUC%wMk52(dLX3es}takybbL5)oRA%d|j z%8vV3kM#$n+xeLxd5-vNhSPI2!l}?_Un6yk&izqQFhS&$Exi*Rl;%-xSuHa&yQ4y+ zjylT6hMeslejsvl%=U9qx&%syZb~3n;)f(>cb=l^76q})!U@1&)$ZP<+bRlbjPHN+ zOJ|*-iDC|?jz3GP)eN#xN!Bl^L67;nsW<(mP9Dk2y=68O*f#WT* z4{16e+qEkR<^0H;klq9`k0pVH5`3CeEI!Xfx(b)mXzz4ew_uUy-U?qYg&;3;sKvvA z1s4B6^|y9s6Xt^ic04x>)!Ds!aBJTk+^BJdgG;zyp+M2TvH@uOSi<<-viL=kxty1P zrD|Ob^K7{3^D-A|O0dvZ&rKZzzr%$+^M}GmVP@{aGbqMKY^ybo#M{mMrT}-1M)47q z{`ow;pLxh{7(`I8POa519pykO>~v09@eIm65K&QTR+nB}nqr&ycso+rQaS%$K&VK4 zB!~Z!E&^NL$~XQ%?Hj7jWM{Jeq#mh$jzDG?AVsjq4w9jzpY5B(2e~_yGJ%?s zyBhp8Sga2_@k#$aZ{-8)yZuP4Ns3%v7-^&^;cTWso!-gI($@1ykRT9D{&%{fCyITJ0nH7RR1%J4#{ z1gdDo7qzFBf5d}lt+)PUI4$d2b@&~7(FVx{g_*7xupozhsCrZB3k@dI`@m)1OFd+; z;mn!}e4BGhzg4YKO*kQN`!Y#HbOjah)l`12Hb}&pz~$2N?e6kT>ky2iwfB;niXKY5 z=&-NCk7_%`{VYyV?zhelnRJ~DFDF0K-&OUvpRYgd$wQZCZMimQ+TcjyqnD+F)f}^f zFt1XpJ2e^N<;51-h%7$vxPCwG}upaRgcIhi7uPg`^ zIR8peJn2#78|T&jxu5snkxk2-EJIK~j zP&BDIj`?Pm11R9|f#$GQX$1gy(-{P+Tw0%Ur)UJbAu}1ni62XCHG&C3rK3xSIs)&4 zZq~+E-*1xYh)mbez?`f&codM}psmr3c$dvG6Qwh0X=UMGC4cQaSprHKoxUw4loj;S|Tc$b~vcJT1KZ`JqtyG4JKjP-S6(tX74(XoJSRi68gCSAia z*d5<@cJ~ZJKgXYbVD4|iHTit*$J66W1^q##PhRxj;rAIm2zDe$QfsFUvThDFNU2Eu znKn2g05qr7UttgX4?T4l4j=_$pG7j1=&5hTmiKA$Yo8yUumh=Ic^n#L@+gvWoiy{} z=i$Q>K%8ajhxZ4+P($ZFaqXVSWveX+2bX_6)lPky8};{;^TF>SBlF?=_13h$@;@9^ z0v{kqv55u%ShV=705gE2c@~@DaP(J&LCyqpICmA-pK?e6np0kL!_$ znaap(Tul-uA^EJaL;b_QQtz9St0t$F(4AM$$8dC) z8hQtI{l=%KD~v9xQiP!ONdVrob?{B=ur>z2k~-@8b@Ui?4EMF2lWxw5f&N<^JP808 zBjsqUl^c1?r)bQ#!Iqc=0mM}vm*GK4L!dx3r!E@NHWs8f9;`pk)zH8Er7}qzKqL(b z!0;iCc&O3%>CW*pqvIvk!0E4*0q|ZivoWuI`ImY_XZ0uIZ6*>%1tBvdx48)b!-m_2 z0z?iy&Zig++Fn?gxU^<}NtzQl*oQp6IU-q!FAhQSsE%FsWTb~;{iGSU-E1wJtt{Dx zXS)*nbs%Gbk-LU9A054&4+0|X#nLDr>P>#vV6%4^*Fq;oop4HL^45ImKss^5;((+ zxFTXQEXg8PrL$I2JUFOVnb3gQ*m)U9k2dgf%u~6iu*0f<@5oznjUzj04a+ zeZsaPOd*;qjRJ+^EH5sCt?*XCn_xk_btw}hY($W31_|Sy#IqsiJ$T$tqXa66eG!V* z;9OYbP;`dHw!4s}8<@a*DD&obfy zAr>&s9eGX+i?#+A->{*lLRg74woNPuO|COG!2*K?dB*B*~}H0J20+vwMnoI{p-n!NE;Hkh23~oE)%ki)< zXCf)mYiG~PeU>WSLwMT;a*8Ai$9UwFdP~`G4X05w7$mEM_vsQffG8g7%pw!Upw2yH zlI>t^7)`RrK%tqW>ZcHMIN3LyYxxh+q#1H_aBe0ABF5pG zyvV@@0Gx_+J1VYqagn6GNi+kXf%{e3ERxwS(PY=}Ih?E&0|^A6TASchA&Lo$sKEeN zDN)S=A?QGoMj&MGNroWhd{H+}O%qfczg z{~sKkdpOhm|HnT&kJ-WbG}@SBS|evmvpMAyg(x(VoO6nix_81HVj@YZkxGT8R6_1K zwnR}Xl{BZ4RCLsxd-vDx_s{3r^|{`kKX$#}*FL*muh;YOfNIV|)Rf;)3E@w-wy;d0 zn^#{yKTcMe=i0G$+#$g2){W4ydxC@oZ;kSu8o74%!RBUpLLgz{!1Sj)Mpi> zxWJy?=}BwkWvp_|k@-QZ;1@ui#VXj}o^JuXEx{F#C%9xv6e)sNK8&P`nnm~GZ3|TFnyGF>L{XlD?dmIF$bP!Ekrnnz?_%4vx`dF&0Kq9=qsa5wgi5- zJroW2(L`XZ6yiStCTp$d)!#4xzPT)5W*v1k0rsB=&^AWeg#=X#c9U*FRa4;cTR|K1 z)C_MuaTjyAjZjrB*#~<*nKrk{^G&$0z)AzU9-Tk~yTmJOM}aR|#I^H>IgbLtRRQM0 zT~;xBOD5i!i@@ZN`QV@I-BV+sWcaqhS&K&S3)VYwN;I|+Y%9LJ%YBag4jep{_jxie zB)D-M?BE3C*-2ueJP{?gSsc`E;++Vpn#%fNP31&+&)HcO+< zWAa}J;n>kBAs8Ms0V7WYWj2DpK;c+Xvy+T#jL+X^gxos89j@M6A_Jcu5wJe(`(-pQ zeG~2JQh*mf=Q1BX(4OCUt~6*A*eb#@M*+HL!Lzy9G**GRG%6bh#fnXukHtobxGPGN zB_S~V=HAG5HY0XKry2Z82QEhg&@qGj8;6S>C%D{@JQO7ESTAuXD$gy6LEh* zq4)ET{~QpQCP90aCLa*N(yaxmqnxi@Z&eqdi_7eHFhi!0t<9W9xPMs8XO_W2QMRFl zhnr$-oKvgWn09`_ExwxbwDQl8kG9k0dK210VB%}?{-r!}duY(T&=>Z6Z6oBVPfBU+ zkOsF`|2^WJa^~xGugAN=xZ^=wQv{hADklq!sR6R@aZfXgTc(&y+MNBRD6Uq4x!4nZ zxtR>_CQm$bOo5C|a0iT$WxYMV@r|30yZSFlQ}90`H0e{`er$U+Zh}dhccwO!@_wx3dc(@+_9u z9{{5Y6iiFE>^m^uvXr*@7m!%GKNSi$UpP}ISd3y8IIVv9&;1ou?3*))^c8&kdnq-W z0Nx)2w{hcmZsQnjLT0vqI{uXhXYw`3hd%zyK-o@z*MAw!+_eemJP~xAP+*tB+wbwy5qMEzno=^FZy}6~ zl5%h471-PtY#sgLWg^%N_+@GZHU0z|stR(AGP0LHSR6x`o4(H1ZzeN8rtV(q@J;2) zC&2Gxc!{S9(%U&Yvbp^>9PLBU(M`xSX4Li7_qx)#V%*M+Bb>a_l(dk{>mhk@-N=2* zfd42wZZtn?HPHL7)a;3XY4N9mN9n286HDyVOMv_lV?7qV{j`hqV7y<^kqWj0Z|*lAR3(68{Nc@S z|KZ{aieEI^i`XAZf9&`>cCj1kH_C@E|M-r|gOBnHk`5!!9pU*N>c~2*=y$|N#1{G7 zPo<5`n!hRdsTdc#hnh8Z`E#u(A}>-H5ZpG#kWO1L6M}Hi&7%;4d{f+Go;*S;Kf?3X zvfkeNDp0bFmpPedNBBF-o==qp&~g0PCyP#m{2&(3e1RJwD40CAsCQr>d^Le748Tf{ zGG*LD#^g!OtgVgQ$MgbS;S+bE{7&g^-%-9QD}jq!BIYZ^gzPd99y4G)s*`fElAvl* zXo-K=;k?2$VfcUspAp+jUY&YU8hXEDX+C_;y!y9==j)AdfjM{XIK)-pt3hY89~gQp z_^R^?rXEzh$a`oW)s1{MW_urU zz#v37YQq&g7=Cl;Y=!wQ^Ms5G?uOp#k8PfSZUq=-sBbzuDi1U(P|zKkvst|AbNUi~ z=f;+=gbcIE_Ot+aywL3FMQ_%(nBKWx*Z1Aa{{8bFbd?h2bILy4Yczp8Cp}>TNe~rQ z81-kXn>bTLwoLW0zU*Ax^4ofG{Oa!WG4I{6dp94NZTY9yxQyzOIH451$!Z)5FXlw9a6rDi|dgrggkY6i8!%D###Jk$>aTQ*Ry5#~VZ;T8_3 zHixlBM_ z=Y%};aVgKw&?froI&Jm9`!mr4p6my)+)qDt@B3@z;)~eUqUWK^TG|ZY*7%RkOD7cy z*C*zj@}PE8{Rfy<^KE!cH50A%ZR4$HdyZ_neWI1&cir?Jqcq+7L3^Ns?2>hISftm_ zQu_}L?xh`rX z-~YFf;T+k+Dh)5{Y`*tj%k8Er!Y-FXtxj0^oswnN8uiCB#-$PI<&%bflQR~jpPa&| zW|2RZ`_tu43%f~Zb<4w+XMQ}_SGt3=&B|1;+QL33d6E;!p+V?&j-#ROmbI(d6~-(;E7^X7f-B~5oW29!pFgOw>3J}F$Dg7uzx%r3gJt2nP)nV-^4 zsnyN_Ws4be{eV@_L1ueA@*{km+U?>x$opVKk zG-U&xN@8$ew2!g4(G5LwRYgr2@nfdd>Ggvl4stmPDQVoc8H~+}hz7L^d$`a*8DN^I z2fH-T?-KDIC$gU-?oRbA!^v-ym*z%8r$LHzoPly>7s+%K`C#!ksLZUtMlK!&6bTp7 zk<}c{f;}D&^Q&ks7qK*cDHqI<^5wrski*g0;W##n4Yx7Xes{bz?^8?*uQ1USBBq$wd%HcvX_B(YQ#0<&8Wa>PlUhQIs20qgvczu}EX#>ho$udxX(4QTm_%?eXD}{)GzPN+K32H1vPQWg z)@}Y$e)7)G6+>r0bnR5YB83Z9Kwdz4Xg(?LNQR!SW+ThXAFpjPDEO^FP6nwgE#w1}o&px{CYVbYt;G14!$LS|G5BF{eC@%*T zBx&%648X|!{e7Rd8%+oET&X|kkMTNOn8UsEkiYJBK?AvE3u^>JR*_Q0db_66`rS1Z zNN+*>t=7_`7SV>@c68R)m~+$pN=ocJkY8%Z`~KR67>Nffn2<5$OgIb%HsaOyN} zO6kPPHVuEvzvR@$(Er{KnCDB*UQSw2@-cilt#YS*aBH$#QS059qdPZSdmgmXTDfcE zSAOh4e8|o0SlYAoM!W1uWp>xe%e0x$gqD$D!h7(LWie}=%A&4_E^ML zD_f63uZ`brdjZ$Z=JsrWsh#YL^aQrt8;5G-R@pR6WGAq@1vtzUpbxRc}=pM^`-(G2-AjU2>7qw~@SmhO$^PlBERElFe6B zDT`;K)|QiOcr(weBAI=$I>A}@P>WsyCp_Kfovaq+qg5q1$m4uPL{!x1_N+ilX-CZibsQGgGF);<{m26~7eU zg@oBL;a-)I;DBe$(Wch^U4CMX%9H`9iL|F~-uS~sp&{f@aj=@d`ebEbVza8Bnfmd3CVP|QGErI+Kiqj9_ z!YE(Ev{9sLWxRPAy}+vDi8QKT`^-HNc&)1U6Mmew%hsNAw|RZI9B+`kJ66ta{ATv+ zIeIW5Tltca4%=Jrr$Ke_IVYL&Q}dW|V`YndUV1+X*f=U&D?%6%hIx`*|u!P4%X)L=&1wRj3vb7~=mu?-)W6vr7xjZnrP|lxGxx>2B>V+h?a7TkW(RWGHdw|UUXUjkiofe8Iu zDc-U-W!EN4zQ)oaZQ&}xSkn-AD|1rq(VM0=Wn+Ifa;|s!3SWPcL!EF@58J0H8arNF zNL_ZRni~N>N2rf`9PO%G=O!HP^3VY2Zn1ruDInbxj%IBEbxlEYnUfX`+7Q@JT;6VA z%%@6tdMuDZT92OR9{n(ydRI?vn1q(G;e510PgkD?i(Qezfdk?CEPyjtK*e)SoeNK9 zoUNk{3n+aBvX=^Kb?rfK;w$%EiOpL}5*Syr<^Q-0YlP86?e)Jbx1Vl{%2lhc6=09D zBzn<3u*P1G?`Kk_4EE~;<&+IKJKqRhzAK=l58?}q$T%+*% z0mLbdqgz>jn9a4$>^a3Rv`w6?4dhr0`}98(&x|7+7Jvp^&+1|D+5=wkIB@3~drUnX zE$VX+%%O#EUtW|d1k$~h_f7<%3<&EZs(jT_TwDKBwP5145U!EyZU|MEG*5@*`nTlO zO@4y4$*?4AcHBCEH!<>9sw>6fzIjUjj|q3mc+Vf2yCn`1l7F#&Adisi;vi-?c&Cj1W2y9!+R6XdE?e}2n9 zgT&$O2fz#mWQo3izf(KMaSvaU#kV(|I~&NUw^lH`$Ef=r3iIm5Kv9&&=K`XQ)%9UM%1j zuHX({nywV?mdJ_8DKR1E8CfezQX1~6&&J>0=Ox{Dz2W?!HdvoH57FhK518wQ0hT4a zy&@xe#NXB0+`@B##^#T3e+o+apIm9y_r?ul;yuh<)Tn}ndqQLSh^LH5u@V11~zFF#g2ATC4ks zM%TULJ-VLv*S7?l%_fUnuRVgM2J`*w7yrK0KVsf|beyBx-lI3i@$Y9i06fB+iw0C0 z*3{D*&x5n$Y64vb!l1-3qu)@7df=fakQ9#bVHZ$~QBzL2+;5c$( zRM2P|cq#Ho4kwiWeLRq7&H`=d>eF9(7$)eu_SsZ8nD@8ieJ83J)wQ5g%;45Ssj-Zn zZpeSlbPY`>^`hJRMQgld+QG8!ga6hLd z?%7V)fna8|c_FnD5@5}*>vD5Y^U{=fUnh8!%P%piOL*(gfFD8#u}L*ywDY~NbZ2*m zz$G5xAn5%3V7w%URH$)I!L5Oz=MQ2GlRpJR9oa?bHIx_I3*De35m~gG@z86Rsn)68 zwLB}phccRSE7onZeWQIt%1x{7PsHrU1cur1$&t#HgZ7_|s&~PdF5lx-EgL~Fc5C62 z8toUB*9Z)&==CxIVBWAk`Yo9n3&9=bq$9cN{&8NemxNNDel$qE`p_-p^-;6v*Fi>_ zi3Xa~d}EbzgXGO#6U5=8F6v7zR034J3G5Wh(V(Tc&2eDMc!!J&-7S+Ay&gxyBtn0X zx(w~q57i8Lw_{1c=DCaMs{*-0{p`sWE}WwD=blu7y5)`gP;q5AXQzRpxBJ-LGo4o~ zhF$@+SNT~{5n--Kw~nalw#8G@2Xz`n6}8*dDKi$yAh)7V>(y?z9GXS9Pj^kaTiaDc zhKrqiZw?DEpWoo6==JYz^5Bz&|poLh8Dg72X7UIgGr<8{340LcuTt!;+vLw{4C)} z<5ZYfe@_BrMj*GjCFmnH(=T4^3u@tZs6l3y4tITr64||bagdo>R|>)JPV~MmfU*3W zzIl~?XPwxD=Q6*5drWWd*_heyyn%>=eESi*tX=T!2N(L&YV>!*j!dpmoAD6S&E}8M zs0(~cwQaoa{#~n~19S5oQ0C^P%qi*N=tBfAG0(R8eVzL&BG5|)HZZyvkk6SwPQMey zcK;XpAztsHQxOhIkmzFQIxWDG#XjtMCw@2 zh|j|fO30u((84)+m)-kJ29US$_HF-_aQX3?EkuA8Z@v2NdkcAjW$MLpzTD3 z$B3XG1n{$U&h4O9KNeJ%-Fo|n(9`F%EIP2~-SJMRu3xoODFE>QSkKcNH+ZbY!rC`d zySGvMJ*Yn{oUY%+@ot-vKu+K}e{`gV`Mri-6W``4?t!PB_+$`y&k( zrlpUmBQL1Y6Tb&H2T+B7uD!?knrE*GoyuNQ$27P0V_CplBkC;V_>m*fi(P&1aZjA? zjqSeo?3d2n|6X@EVIQeJ8L->=V&A>>w$Q_YHNyj~jk`m7w#ii*k2t>M?I=tBcesmZ zi|f&W>+oVu2vXuIAxb=FkB9ar0O15GH28+$;fEUbmO3o%D7#ZchtF`bUYdV?2G{L5qCU` zM*xNG$(!JSOUO-s2C$r7mGt_|k;do`C&}I#w##YYC;oXw1s3|Y%iBAy4QXx;ZI8Az zJEX3;IsC>_p~+@_U2^!(=#`!G22ReQ9jv=eBXzU)!*a~jcMmX5+3ByIc$5xmEug|V zanhZ6Stq05_mftxaf%OR_ulOPK`@=}G(Qry$0{=YqaXi~Ply$(CEvIt=zF{RwsX1J z--3^VVpF=ipum-vvs618dw4b31dtv+|DXH`tKbGOpt)())Wj?;2ehtA$pu}~d~)K+ zI~4ti_CY*P@PLcUociF#;7vL7TLw9-8egWD#NK|Nki-S$>nVM!jN=12#F!hbe6x(vNb4GlM;CE`%+6jn zFD2lTnho;oITcpf4VL{@!7jILdKo)!2CwuMJFgjf1O+`LUVc*B4Hyx(PSTK-C9>$Bh!Y}>H?!&LQ4%83*<@Tn;_I*dy zf4c5-tSY=*z5B~i4>qW+sc7cXBoDjqRIpdSaVpYb4tKi9p&tvC+ZW>KX{QsbU6l$Q z^qGn)vy)|)9s2{F7X}oBDC#neSHC&L%7ir`w29~``Wm$(J1|a_n3mCHrnRZ0IrR#) z%h?m(4|O1wX`l`GD7TM|%34@*|0pkCdZkx$vQdD%bD;}*(>xRF^^|bXZvk58*e(lQ z8FOl>NIeGj*mf5-Cz_Cjxr` zm;RJh){KhMG7Ar$vglQRk7}I1YvY;IDc!%wqA9qai>S3;n21L2yyYN%YiX0MrI2g! zr0UfYsqFRxLp>R2>?Qez4%1-{(xp;>SWk6(2lYaS+XLw|qChkc)mvb`K+L;9btm{p zo8{LUQ@E|#kA&v)ru`(!ue(OpE(NC(xbJc%`ZgT%OQ()UstT6{+m;pg-2>Rrs z=P3C3^HhO(LPPYvokM&d)oKxZn1b9A?+?)xiByndaQJ^(eyQ*JHPewZ4r8Yu8QFu* z`q^_$OmDWk0wRcA_I=|S0&mkG2%AqE*!GvQe;}W(RxE&M+-`)I-zBYw%Q$kS!!8IT zEz5M*wQ1b5D-7zMHU*#9UJpCaRvqwu%}!GfYR*`3cnQU#bG;T8SWe4^@^I`BGlAEh zok5eI2>5I>*PM$*;SDkPYZC*HFw@pN!39HKw<2l-2)VV1tEFe=5pdibd#MFXe?{Qy zhdfbETdFtAT=ZgsedqJ+!nLLKtHQ374%M4)xNHBv69=?_!SM>p7pGTQe5LV zTh#<869E?jr86`(@l@w%wYzow*qhIOeL82<-QwO;b|{Vt@#Rut`u<`l>Cqj9ho&t{ z^$Ge_@k-It4uYhEMo%wic;z6?2K9a(*#|2#uP@qqnO7JRFkzsqa!mTA!MLId!vv{M zi1S(ZQ0V2->VBgsGV8$~6jg};oSNA%Tclh_`o4U8`9QMjg7`V?OZEM&KU2%nK?KuLkCy{R=N4&Q&H64_auPnTOx?g)$8bjU&HsRVA0>o~vAyr8`0bDiq2 zPxN5}7v9bf-1&$eXHDT@tQ&-T4dmvpce@&J( zxGOb%L1N=<5#^d*)maepz5adzS{rc5To}zOd7K@eso|jD&-`(5%YDA7MX-W0>c_3t zvq?#_(b|nqYgJ2dJbj-(wL3$&SSAh{xQnddybOv`KEmHYNyZRYKVLtBPTX`vbAUMY z*>Rk}TR7VLI?j)d8oYOR*DnCh@CVn}X+pI;wa$8`4BoapmwLSKx87OStnLvJ0JPB* zS$=f2FbV)P;lWre7XrxGrqMYH6@c|)(=ji zE(BmI89s~SgPZVx5)NEL1lS-g;8uWVtT^`b}s|I3ejJF}OXv;O_4c~Q}m0(u&-%z2eSoi*i0sCWro zU!f?D#Qt2Vw@JMUzFa{2<^kudQZ8I37C{LB_VJ-MfypQki0>GxrvVyi=-=dh45&iH zxDzxWrh4E3!AH^NMR!{r2i2A`tLuGkeUHfsS+V=@Bei7Q#nCH zn%-C7%V3hE!4pgX2`JPvSMf9zA^T%gQo1owt7@~eKM^Vh_0yFeHcczy0FW_lEvNLR zI|0-Z+3)!9PYdD2F>;rfh?!#_9Q~p!+mzk_d~tmbYyS*HCQsHA{;MJ=mH=r}3ku(x zE~wQJ=@g)qg1RNNs_Yr|`}{LP2162nrqZ)jNd=WmK#i?X$z0W!AQTCJq6kstJH>*d zT#0KA17OjsVDYdP-Wn?BAnfOoMlptiS`*pfdPf?}sf!DC=E}flF(R49S#$ zbmYK{X+c4~479$rb%g)&*`SbN*z)J;SEf-dDX2(G^9W@VV0c%&rN-Na*TRLlsPz&g z0L~G?qLSZbcxEggy8zE6%01WtF%zaN zQ&bsS@s9-KWPm#XPqcj4sHD>bLY2CtB1`LE`D>kTd7NRYsKf?c+N9|E2oxqlAOT?M zP`#s<0y$aZ&d8QU$^Jnee)zFDyAGom9gxpKPq7Q(hWWQIsOn6^*WAskT@Z;f6BW8@ zq6n@7%4^`Dp-9cgJUuGtaE%NFB7jMtXQsj$XymIH)zGglKO~ zuS5I*Az4;^>4JHCAKaRQtolH(0Kqau%I@h`sFJ^$VgZn#{SV`q5hEKsYMRYHtPA7NjzX1QD69D%k*BV(WpJRAeG!O6{KY+0_B>_@Y(3 z3$A3>`ZAQ)=zB?eXL~w`LRcq7ppY~u{19{xZa^hfXPE|E{i0L<*kYR!m50%X^pq^m zBlMIu&kMD-9sK2)ykS=Ov7Yd0>f7N&KM?i5w?sCxLyY9g|sP$A$@&BXC~DnZYkDAwNW#AmX4X) zK4`X$!;WZ45bI$uy#LnqnZ*W)=Zf_1xw#0I3*5`UOY%*p1*CU*2VNCWKSl)Y5f70F zpczZYX3&d7e}^Mz{T0!=Clnjz-MYM!T(~@EAagfNRDb|G&W<_1_Ea#94ExVhSHs3C z(*-BLDv1lO!(e!pZzadRicmC=7ShG?Xb+~Hwte3d6qG~5(&K-KG*{Uu=D1Pv0dxlw z{Tv8AWM}MSKp4k37FA(VsA>&G_?jz^%fc+xxRlob-Crl%5d6H`#0cKD>eX!GUXg>h*WVc*!tnI92i}iivZ+I zUqvnfxqLn5jM&-xbA6>u8<}9$Mnw4q>AR9RkKQL*LfJ1l_$MwngI)S!xbuW0rD6t^ ze+_>_a_kk?QnmLWNi`8e-(B={S2vA&aT-OQbJLg&sitEaB^h5D48^3pYe4Rl$X&{b z*gJ0ZuujoRN|eEgX|o0qFxR_uRZkj1E${FqSj1JM)TmR`b1vOq8$Y=}np{l<{CySm z+1hVL5Cj1HZu7fgUlrzOyKk{m&M|O*7oChvUem2PE0R$uDrehmNQ0)H2NQ0_{>LAL zRRMCFufO=|4B)j9NXL+X1N5z|U^3e-bHC;Ipz~)*Qllu~O25@^!!UMa7`ytA3Rlovuk6V_ zeY-DwGRO&2pFsRxc)1==?Xlf#sM-CqM+wPghU2 z^m|9ZfJ1ZtiiCEnP%m)67$z==qURvOpWnUy8McmWp%GqmH2vRCIP{V5)JM>647`%? zNuE7#K-e+=8K!|VYcAbt2TC~TsUtUlY@V5Rz2si03f2PJ@%O}`Xt-N z#9mE_tJe-NRKZu(KcJ^%m?=r#MGqc2$_iSCg2rOIX#s&fC=~;}E=h)}wO9T>sNm{5 zer91A9LDSWh4Vev>V_bri=ugy@GfVh&qsZJ3LNe25&c!h# zP=8M2+`xYI1@=#?b;mmZj{sQv0J#5aq^It!P$xJZcPm_ir~=^be_$<`-=>cvrd*&g z!lBXcI2TL;A(nTvt z0jqUMFbyDmfP-3qF76$mP(i6A{#G=tVq71)lNuQ1bAEx+s|yRuB~!>J+Us9}&M(WwBL=2F^fISWQ?$4y$Lf3sRKLca z&oTmdqsM0^x}B+&Ic*g`7mC{k z-9n{L)faYXbn2z=q_sdNVp>=KRh0z!U3tDtpaP}V%I}1n2J3VA`9mPuOP6=@lU%&t zklEDqKqXgiv1n(!+^qY-vx=ylePBKJuX?xHcaY8&9h)u$J3qFHf39YtPnTi!GqG=0 zSIEwYD@qG%o1!32ygYwhp3uR>J(Amd7m5Y zujRd4^o`}le%mZ&rsw~3n-4{*0&lGpoRqtN;chl^^v4=S$I?J-93kdz^ z`pZd8>+iOOzAO0ZZF6-qAqQ>#`I{LWdE-a(+o!|RSbgFTR^wRe)8S+lLB3ROQr0;B zs!>7Clx;J{&?Pvf5xt|^mVK0N9s@z6CBJ9;Z*Cw9suJq0KfMN`3MEFqRcauT-?`pA zC0)l{q-A}7q1whhC70t*4j$JD3SC25HAu@z-Zj)htBXS?E%pX~&x<{wY~)o%a-dhd zQ?B(An!Y^!&DhUm8^ap>;nFaymyX$$){37pNRh4jh3K`60ero>@1_#`YL| z(TW`-b8%06sO!-n!3N^F3c&_GRrRrhkq%hjVe>iF9b1h4>5VLP{4CvH<}62&BHx)B zFv?S*Us2Y(BKx8%I^Dh%n-(*bxk{;8s&kC8Jd?*&O80h&sJi(dF47Og6vouVw>*eB zlU&vBM^0}nCm^kuoIIU)Sttp)1yH_ktZ?tc&T77XQEYt?d1yf%`6YJ$u<<=U?!IN^ z@=l-l+7S9lfaiJ#fQ58`Rsk--kKbhB3b)qV@eNvgGCGQMP8KBhuK!Lu?M%q zCv~9lpU%1gokPPbXH>G3(xe6-8<(ZV=S$M=eTk_}dr1Df>B)V{W1sX9l?RFGj~x3f z065OFD}9u1SmNjKPAgprP^0cIv9Ga$ejnYr1AT~ke^^m`q9K5$i*{iT8CffZ#ehSc+X9j9h+DO zD@Hcm8aMboSMjz!>s|YIWfp# zH}d980RXL{oOR@fGH7R> z?&}K*UCYFt3tg@W_#6KES=0RgE+fpXE`dvr%3{8Oq zDBKJ&**V<_iKj9=#j{Pyqg3Z1Z6!;u@K88j4+9!29Qr}WP_B%L<6-Z`gA6r zh6W5sl6E$fm?lsmlL|iUM^Y>#-M_DpFnaA9?8u0_&&@0g|4q;X&aXOxe%3)^vP0E1 z8qrnv`V=|J6^KRzk}TPeuXlQ=e-*g2vT06lMur`?3NE!GH{fSjQ|fnrbqNv|@2`m5268bzlzLT?XC5n2 z9Iq(A7p8K;O`*8p1u&c{fy)D|`1r(yOLi}1>+qR5p?+g8;;Ia^)dGsU?|pC(8Ssd) z&;!v~AXpL_LF9}`)-vsq)fJqpKma)%Az|xwq;SXro-llrra8&xnA`6p&dOx;4zn`H z=;g{+VMyZDY^tqb3+eJ)uioefj`k2lzh`fsc{^?_@Ww3Wd&XtzyVE|IIId!txYtz= zT&8ILR1tLCQ*UTHETbch*e#XZ(4q@aiD^WMlx=-*`-y`B5f+?Vzm)s(;q@mN_Wq*= z_Cql~Mm-vv7@5oUY$OMBKqu*Z{Na>7-6>hVWGb=&8ww*TxFB7s*AGrR zK*&{oW+R6RvNcx?2;&YIY)Yhh_b3n6q8X^P0Mg1%NpO|b6`;mJYFZ0ivc2F1xv5?; zh;>C*>qe21PaszDRxd1r(^Kf0smXe_(NpTO3tbY7)0Xn+89i`p7!%#eF zLxt*7fBwv?{)du0;6O06*iiMa-lZs+;A+iDLqeml)yCqaQO^N^_2R>ElttCa#A?#F zkazhQ=Mdt2yJXk86%5rdtEUwbtPWMiD#-+>#U@_#)*{XyQ$8*X*!OLbu&q!m-?Be> zV%e=di^DEj&cEhQU}R2DL@6I8%hlN-21JUHTK%w6|dfuVbv*) zDPZq7u3EX$G&jv5oM1q9AGdbJHRIe){X^_Tra4%PP}yOv1GeXgx+kb}C~2zZd{qEy+v2dLrOHT4jyz zP7%3MLaPzFJZ&LzCbzm>9~c`u>=<@(Luq=l#}X*{JnQ(C%EO44#HhlC-+D{-?GNTt zLGuSXAGF`2J(#ytTIWQoe`w^MGliHu&?dpWqd*-UE9S4=C@m2?TaVE zX^42dzYe_2(7;?EBpLmB>Hle7#k-J8m^&plwe-XSRE?0cZ*U)nWr# zCjGmQXfr|S`ypTvqVqqy&cqa+`g^OF)N8WhEHUx?>;w$aB+GZ~;-H?4!yKPwBR7gV zjP5>z5Z`5Qeca8#Wl7l$Kg9XD&pBw-(gY2_#o%1`?223>q{7=%(v>ci=_R(a_4_$p zse#KF9seAwfdGVNpUn`;r5o|N500{~c(#!7nOLfMFuSQo@!(%l3U!&28!v&_KN);y zLkLOjaJhSp;Ht;|8}~Rz)~sx)KWB73RA!9T`o2l24z0NiF94kPrX0}5WhxTs;C7tl zrOaf6aygTI$5NyX2s^0agML`*AObr8-?|$0WHhEN2=OJ7 z7tZF%+Tg0N9mPQy-^^TJTTBk%CnI6xU2aMR03S65n=v*{7Vf*p{!5w2Po(j6F73V^ z9H^Xn?4DRL2T(jfLWj%put5=#u6x4)1Z{0o)Q}{_z7H8O zRX!tz6=X3LGZAnZXek(_%LWwI6|)pjJAlz-0~?rS2m>3j6t9zkj$qDh!}Jm!0aOc znK;Y_F`z0@ydwsoQoN~R0H0E^0avl~d|T0d$l9;+b1K+Tt?hG?N}o{730LLQaODok zE-C4x=}P5^@%5X>HkNNs=#N7MZ1;GH6DlKERR=qu7?Ufo>rd@6d8h~iun&jP7X$eS z%uyP!7l+ZM0sIuZqheqmq3ln3a5tl>?McY}o5ys-$FwQs#^u}Jgs3o|S0DcXQa*gL z@QThsvogt?J@+*1#J6Hq3fNExAZe%cBwX{mKo|}~Bmj2VDDL)Z=0&dH!Re-Y^fkE{ z^|wN8hX{4%v_2PE+jFfD^AqU#Sn$8IPDs+7<6}Ya!QkO#@q}DBUE}-p31Kh znbn9=C5XJ=wMQx7ePYLUCJ4y}@Jz(lyN6UHibWn+Op12|1>BacXt}3aQ3HHmRPP-x zdN&G^Lws(1bDU(O+C_G^XlG7TxZd{X8m(>%s}bISV7?O|KJl02?;H?|FOvhVdM9-_ zZfWvvtVC?}R)UP{xs^cURc9|(phLbBo;qwk8e7cM7uc(XNp?)E`U zbF99kAh8o0PQmsig_LdsW3QzkRXnvM;w#BF6jg7bs$Ok)&T7ni0HOmAY9>oRa`)JC%vY4RcpUTLNYlqkxK}^uRvDT6dG}wU>NMU2?{)yrgo=X#k zh)x(i2?Enm0SFy-k;C_0*Y=o(tJr4tT|Q=ht$EdPTB2y+-$?1hx7tA-A0N1*CM(*pbcl2PNp+w4LVnTfGA05$LfUe(o&?>yUf%wr#Fw%=hyod$O^BZ5hltiY;q&mmn~L1!8zLF%5P+;5 z^y;{ZOAuY_pfK*SLE5`9Uu;|iHbEOeN*n68l2lHkdm>g3w8dw!|6(Du-r)?QFRQ;+ zc_*@AJ#b_@#zy`K+8vM3qab3wTICYL|GdH_*=#ucY)_c*x^jS$mI$ON0yXRn-QDT6 zHP7$-Wn6Ls%ohkl{Nv0MoX^I3I-Fz80zlC?*R*HHWE` z17X{~M`g)iP0^r!K(Tib*T0UMV7k0Ci8O3QXfQ$40?;Jtxb~gxmTdPZClgqq#)HrzQYAg5>>*%1O!A8V^jAp$)L`~=VK=eMY2&6g)`>CV6Thl|; z=d|n_l~iS!%?9!4n1wdlSv7tzY(r9(Pq<`*Wxip+E=Hda^CLR#iwLyzlu9p1_$t|R z28xUjqa4?3W9wvWp^r#dd>Ey2O}2ilu0I>1|3N65ob*sv7dMfLzVFo<#37>l5KTe{ zyiQ}$WxFMJY>L0Fyog=2WwsB!f(iFOt^{({bZgy54@+J8Uqk2pm(&^u;KN2x#FaP$ zS8Ap>a%;G8hB-4$9I06rj&fv$48a+$oEeVH%(`e>({P(wsV&>MM`dP1>z4QO2b|CO zoL|15^S@Ag#=6RyhT$Br`egv-=XrpH#CxK4?F82yGny4ARFokoH5$ zKwm~SuQ-{+&@7W&%m&44vIvcv?yymZe$_VF#I7*>uuc!=7~LzBf5S7g%;zmO_a_Hs z+}GNQTM$T`1)vcSaF*tHq$Y%3j~t>5?*4}9$8Sk(EZR-deK_NGowV({gJI!;5Bv9@ z_$0&xT13f%_U_k`qGNG42YNY6pn6TreI4W^QF6xrklv{iEZTud5-d|dc)#sT*Zza8 z1HE-RM>2RZeNRtpTy_lzh<{D@nV5w=yX+>>c(t|+QR0jY6BJ+*xw0>k)v4h#04g@f z$d0J9vcHdtxQeZcJ@O&>XlJpUcR_z0?pqRfY0%+*N&YY2{Gfpf23iai>oF z4V1BD%g9{zR}7eEuys!`-`tKmPD30zwvb9ed{td|yie>W0wjP1*!_hB4dieChpFnE z2J6LsM-0G2Uc9}SRa-Ft0B3+DM&Z&Dc`4g)F=urt;l|QA!{u$b<&tCK8@bCVrDOWZhe^B?*3U-EivwEsJ2(xmvcp3g1bBwwo zX^hcoY~AVWRQC?+>vAA)oAd!HsPd0Yzu=m5*44+W3F`B~^U-&Y^=luS3u(yxwFnj| zxDP z$@JxK!<|9|gEG<|wJKI}b41lrAmmH@DJTQ={lrSlky)aOhu^#Q80kAaFjpwW_VAKAXrO0)80q}&3O7*)X%pLO|M?>L zA=UO>!q;i)zUyl|W`_RjHN^vbqF*HmS^P(P;7(3LlkM~uufzN?bgl(otOYoA?NYqGy|;N@yq!`5uen#`5y z`Riz-`L3NWuHx&^dS61POwWf>(?4Z`7zn>m1v^;Qz3)oLRjtPBhT7j|2C6q#cena4 z2l%K6{`vHx^;L$I>3FKT7v#C?Dn>Ekb$slE_v#OSyMUWgtL-sn2bU%;?Y(^P-R~1A zoIko5Cs(Jo@5!mL`ghCi{tfT5TSt}mN?`e+vvR+_CXcz3Md@EwI$W58bH73^t zAosH?{uxn%;||Xe%4(Py1ZXMT4c6qJ&XtI%P%PJo2}~DN20TwZqqeZ0>Xon!7kaDm zxj(;O&%7bERxKu&>gcf;6q}{Acy0d??V+J$mlGRdX|rL;6``f1-W=Pq@t|<)@~Kn_ z`rpql!!zYzmV3|sXP5RtM;;UWA^WR~MsYH&Y$GziEO$p#;g-$M%_yFeAsOq&r%!#* z0gIttR=~+$)8$c3ax>?{0_!7A7tx|EicTGjkGfa_u6k6*H2)pf_%wQljE(g9UxzCW zT~~^4kv;?EC6v6V>PQ79W{=qYoSHr2sQ>2o(VI^LZM;)7u2_q#KAbEcBRd1Go2EM{-Z~iT9^A{-%nUNnPKj@`Co~;Hh*1 z@-3+Qca5l_{7lA5fZ`5W+X9RC5=by0V(T_`XKbvDTKt!}_1)p*fm4+Y!JzE5D}~_f zFAdnghaR3I>^Z&l@5X;;KbA5R(>)wPHVWpJnW5j&q-fe@7F*i>}=im$=~f4 zXDr*DQ~|fM?7g8Nl|wXE2|B;&eE+VGpWZ14bRds~vjxBoafAmUB06Fdk8-8;pd;77 z1TATJ+$RSRutqmo-x;IdFIvZALE9|}SsvVe{htUIvmV#9&`scp;m^WC;1EvBy^%6( zoPWun9aS(kOt?+OQvK_~=yP-X9#3-~>-8YFFn~}+6JUa&B&3cU7=#;#h>0S&XBr&@ z5COt{(RryQ2lUY^0KyrF*N19A)B(u8UI4XGEsy8FboVXOgp@`+L+va#yH9#)&QEY);5h@Sqi>y(6dnz9Q$*@K}aE^{Kujz8#xI^gVZQ4$Bp zQ-bLr-5vxS%aC7je;|%P#3Zx8ShNW0t_u>kJ1#!2QF?NzVnyxsl6Za-3Ks82?F?2b zFeCEwhM{Z}x@gejv#jh%zP!dvvqHWACVetLMnOG}=iRjo3cv4k*#YFN!1!ICNM_Q$ z5X+8a2VxovQx4F#ghHM9tcrQMf2+g9Gf)F|R2`3E?&LXQ0V-X6laFYOu9anpgO@~7 zJ}jSsj+S7<%+FN-!C8Nci9n%=sceh#N8eQu$ead(bQp8ZI!H3oNJmrEm4|9orJAHr zJ}Phx;CP@kh2G}XL=a5z#dysEE#ek7%T2LG2v-UqTu?8 z+ev+ecqyMhh~nVkD$Q#O)KHQ57aGoy(7;&mn^gnu0)%ccGz$jHv&hF??L%Q2DwUa? z!q0@;V^a|T825T=(rHd6#IO~N3&j`Nc4u-UL_e(t=#q|AQ4yqaaia*&{}&)acZ6au z77O_hEfDODdp#R_vvE1uP>p^pOFh&87a7eDdbO2n*?dh&GPD!lU7_RrmIu>rv99f_mXmE1*J7Zc$v>@h zc^;7tdh5eB8o#5JNudYom}UBo&o`;@$A;ZOOe0#eEcT7>qPE8S^~b{=Ub#N$wl5$` zcY$mVMj1K)bO0uU5LguQbjA0aaL65=$TsC-9t(9R;-RG()+aogtR9BHR%}Ru8RomF z?9wlBNSOwm_^_R?WDT>qg4f5 zph21jwSQOqt3ikAj!T1mdA&UaR45KAguxLc$W|ehMd`#d#%x3&6|(aKM^fpr-E`1} zMMOh;*X*mzA(%P?N9~_gTO;~741T>6Fm?O#I+m<)>00s8CJ3ZwO@es;Z|%lX-6&G# zb~UFC6nZ60Wi4&UW-#${L~xeWt<~O<)M-#&*X)nu_axfvh<_Po&+!DML0Gh{q;%$g zU_TK1cH7FrBW&Rlk|9}fr8S*rh>`FnfgwmD0KN%;H&QRj@I?#|paVyO38ZE*GwaR; z=$iDdWH15;Af{||xD1KVI)z+d>y#xJ4}f&(1Uf-0jkRakcGHw=yS7UdlE2V!T^e9H zjWZ3&9hVQtri&nGu|tZxCG%>MX~1W^?1V@O7-4Cg(HZbyd~Q-OT(Rn0Ye@ zKloOzO9F3S%hDEHgoS`L1y`+pb=vm)qfQ6w(6SI|F2VT#A};HJCzya`^zFoZNmh{W zwvla-_9BUjNr+f~80wCwTvM?Ct_SE2q9&ZoD~cm@4Go(i@zz{hv5S!05*f`rn3YpL z%1zp8W?EArr7aMPac0t0NbwTGV$;@q(>Q$`lJIs{*piCjIP?^!Sar$W-_E$TEF7iw=A`0|rFz4VN|c;?LtK3wa;U+5_a&@HD?e{Y zWYy{+!JpG|$b;$t71Udt8{n0&dmDMv`*$SVe3wc=e_>> zef@a_BAL||uMg{^dG02otz(kdzTrP!9VEmKZb{|6%IwMq_m&AY#)P4KC=2S{31lB! zgN0W-di56e8eNsz?ESvx(Q8TfDapr|Hd%Ko_ngYAxKdg@^GEJfgx6;)hI_N;bdAr- zTw??sYV9un^e9e;?6VlE)N+)uT2r~_s~oGRe9id@-FL5Z;=?Q}#-V1PUp0G4vIr85 zW%Wu?YijSAS5NF8%4rEpYQ8=J*_GC`$wS^fVNLKMZ_2$18mDDSwQ0}*{$6w!1*;Da z56*U{L8bq)+$*5gYvb;lEVnfZ8R<(V`6~PvchmKiJ^>|fvdC*&6l*Taz1dgx(uB+j zC^pPjsQ|i{CN1ixXxdG#>mev)Silr65r9#UI}%7x$`Uft66#ECJs8Xm2rf($!F0RK z!vJ;wu`ErrkHVF54u(;taLM$26()MU&ikyd`Pp>&**@~Kd*^5W`DufM-5x1_2lfus z)GY@)f9FROV3X;b;P2+W-5H=`)BHWIJzD~S05ahD&fh0M4XS&o>Z8A}T0ox+2MUOZ zHR!&ufWQEiM1bxb7qI`r^9DUQum)-22ZX%~C~OM|)%G|j6&S(BhCg~9VHX&!{o=sx z7tslUvFyS~*o)W;f$?f$pJd?SYk>*x0$JR^V_Sa%X@?|^O9drKy-eH{l=A#VvUkwQ z6E99A1f{9Hh%5-ouzQhuEhsbL`H_L=nV*AfciA(k9KH!R85i7wooeH^B;yJqkn~_R z$t?wBQKrw_i7P-jtB3ducCTKA^9?u|C> zfaT!|>qKa{{Affld$)*j)LYp+u4C&WFMD{O+}n0NOi74f02gTqAG9iE7d4!ISn~WC z|4~?l)2p|c58uKPlsFUR+lB!AfaBn}=q9G3W9rf?TG1zUJ>a@&iY95)j^2Wu`3V$wQu zZ}>CLk;baJGExySwkI9uY^8pE4iDa^q>-VbMTa_}JS$1a(Ow>UnAv7|Fjt7JeF~8h z0+NX%@$J_Pf<^kn41Jh@9aKSw=( zx_lVOA9bU9b&h?Y|4<31nre^Qb951rBJjyvCyFjY_dr+$9{N^uc0*8+&*dbyKn??Y zx_OAVx!XoPtkZGX=&$Sm0g$XJx&I?Le2N_=w09h)+vvjA3N3VNT)PG4Ju+xL*)8srZ>^t^9JY3b!ff@*fvHLnMW4!AzT<=S7%xNGcJ(cK3Z0>2iW?7 z5*u3?oaE8l)(cH<$^W>p#DYvZGu92i$aa ze%NF)9#y&49eo9io|?B7_AzJ>jEG^oW?+3Lw@z6mofj=>nb-M#-e!q`9%rZUO+=b- z?P>5)8K|~O0-E}ONs^LW%E@~H)M~<|sQrB+y8e=}p@1O(_fB-n8tOhkQ%o_`*`+FU zf0~oU`0e|JinmG^XN$l)kgZ=zRX+aw$VrdvQQw&`46z5QZKkdTa9eGN&%*Ff3)-&3 zjZPb);}Y1&^2=+@>c?AEbal?|id_kt zj^ezZ{oOX%tFDwa^Kcq1@h&s>k8*XqRb$M(x!Tn{g8Myp@!Q2ay}AHwT=U~GrERhD z!flM0CPS4&v#S?eReW?oJMWCUS#MjskQ}+Zu60PcyGdCS z2SBLq+wPDU)6T8V@*25dGPACxQ0ZwvwGSC&@nBp*PXun_s!S)@{HLb3vdXk-1yjMN zv%rM$J;DAuvo1Ioq_{_Do5+Bt0m3abJ-`brF`^xiw72^Ps;+cHBXM-Fb{{T#!2#1W*WiQyRz5D&PCl z-0%wLL&-{t-E_A^ifQE-{^J6N)Dz)qpL|pPOyKPw5N=N5J3F)wK(_-OHf2ie~3amwqcE}_Dv z&@r7r{ujBVo4(NCg#1FO{MBldb?ph)RZNZ0;JtfJSEXHmm?mlJ+=?;M;Y23(uXG}= z?ttL{_cYLeQ)!-N{pZ`2H0dhW<7wj?IeHe{S;ZI0Md)YYawqqGx2oCzVK3-%>A$DN z<-uR-!jmO>bg`5+wEZHe0`J;$p(k&TrVLge;ya6`{h+tq0Uo?+D49e zl-RFDFD=pNfEbGFVX!h3Y;U&U`?4|HM^5cn3T<=38byvZCu$BAsvBdzX&2TRM{VDG zuypXF>d1M+@ZNkq(@7a^&#hxKW9g^y>uH~!#NJyFCj;eQf{RK4ics6 zlIpEo5!gev+WLXMgY1=H+sc@|wPKDrA&0DPbxAjC)*viDqa{wX&tle4TJh$9{Llcy zrSg!m?7Ao`%qd)zq%fA8RWp{vd9UPjGxhlO$m&B29xMS~H;iw0YD^pXWAVlH@U!x6 zH*#M7V^y-24~AWq)$_=JX3aG2oTzoL*L81)rt@Y;63(ivZo7R%L+{1;xVgkvClp>? zdHH-6mgzvn=?-NE!PX!4Z4XMv4GZ$#Gv;iUiu4R+IwG2FJMQ?>jl`tW{;!g_r#juV zQoJkd%rj37dQni_7Q6b=?2zWTgWCaFGQLhu&c|TlxsR?~p4BpAy#TJ9O3UloW=rv1c5&!S^XATNjh`#mm-83rsFqB1=Rp?C~O#u8J< zgAi<}d&40`I@SbhqfRe=taiK`;-!}A?><}&zrHRuWUTebI?$iB4O!A@b)3wrtvxV2 zy?+R6tmMC+r>@=z@iXS*i_uN@hKDJ}_xLk;#!C@N|!wy1}#c(Lk!u%qgefDVC^2+$^?i#<%a% z7>bsc)~}ahA_?44xey~Eo-u5*j4vL(mV@s_Ys_sXAr}YKd-H7PD8<91-jFTBy;c?V zNkBR^wlu;{dOYY52;^57Qd<)q( z!9z; ze8QW>s(p)e!1wb8FP&4n(?_dFl9eeLqI zV6q0((iE_O6hI(ZQ7TpsePNQ+62eu1V`K}jVq&T8h@!wzVJ;Qv@%a(z)DeG&+&NxS z90^Nodp>=4w;=C(Zwf5JZGQ^EG)YZE{h<8NA<0DT2@MD4#~4nBHX>LFWK zH-b9?`p0iz+{+=QZeI$%_W;a+!;+P;E7%uV!l!86v~}m@vSw0+=zj8GPn+L*DgNq| zmIGx6O4ODeTF%nh|Ik!Iiy|2n=3dmVZr;Xq&o`GpHAVAcdA2n&qsNanrRuZ|pZC(p z(iZyeS87L~Ml_8ZLtt>gG(VBJCR>=xtEGfee5^x~3@U*iq`EYKbRTWr=i1bU@0A_0 zxqTkZ;4`l9r{>P!BHg0moVq|U?&>zK?-)YOW=x%z&4}@!z_n2zdrZn0-}B&L#w7`@ zcwEl0dSb3skB$%djP+>qsWTGpn{PsV(OykbgSL73N1tm4{$W$7G< z8mS1;<5+SC z1x$>xdgCuor@%ze*7bU4#V$ZU-N>eH8U z`B~|6mX1N_DeUPAFoc*A;+zj5Vx1o3^!@u?`6d7nJ%ktokVBG{!j+#yxH4dI>nj9pD2YzuzY1;7O|sOR8Yc{)msdwT`6& zFYXa`Xz_*{r``DwpoMOFKVrvY?z==8houYOFZ5gb=UOf)gxz=gbd!)SDYA(!-lC+> z7);I_ktFMb$-8uz;Ax@u8E2i7L((s1h?4`vP!5i#Ol1k3Jzl!G1EcX8WRc zPfD9My8s<5)3th|kXZ|crfVa_suTk6(6B0wv}~ERZjbT=8bXLXPoYcbCz8$US-Y7i z`;}SyrZLQ?VQLqvu$g^wEy_W8&OsyEQAbP1l1Dt|Jo1C<=rQNKFWMz!?!lue(rp&0 ziDY+h&W#aG&YdH#!sJ%rcPXa$fiPY`=J9J^9Sl#{_DeqB0$E>Q4%sd(vB=%2>B$` zvYd-;S%`0MvbTopp!RBcfpxT;;%_e`%)}g9Sva;HbNu(had0dRwMdhVO;lb?)QC;e zUraKIO}1W4c8pE&SWMX$dm?1(2FjDB8 zZ@|gUqc2y7@i|;@S2I+D!Rnqaz-jLe=aEfbqzHs7pXW%*kQ?Gq8DMX%Lcj{YUY8n zn>pSNi%RGxN5rt3o1-^u{i{t?sd#>!bj(HeLxz4Y*jR*z&n8Zp5V!Es$r%O$&msW` zhhd<4{J+4yjDINp@~mEVdKZJu{w6;Cu)q0q?2&KXkqIenlc2RAafA+`8MEZ@jG&^( z$1vdn{0ZZM%1%*-2n;HAT?BIYN4BF5Y5$Qz5T$A&&RhkdOzQO*=fEWvYwO)MmzqHE zg*5`JOa*c}$C%BY#xgi!lY($ce^`ALVa7+CLfeov+y);CZ=+24h?Mj;lnu|ptyB%2 z)ouO;5)Wgdj*z3Yth|w@PDZclx)dx9CLFB9(-TkrfJ9$O%2z+Rw zP^7xf3w1=`Vq*j!@Tlaf1h>C|L&1-&sZA~-PRK#vN50(~T+*pV+~{jnpf(o=%O4Is z`98V~4=~NMwFiLO92`8qlhZDNpv?XI=B5(LD+5QLzvsDHj0lKzBMPYlwVAj}`6qQd z4rvd+Y|f%kv%Elfn2d7q5jA@K^>1=Y6TWc)2&c;KZXPGoW}8W%;BS&w14K|%l;c4` z*G`5i{q6!e-_73G(o}NnD?psQe z^>$djLm(P8%dY0n>7)`G67M_*?YW^7n0W?H@XdG&3gz_#CmA*$za^?!_Mn`>sSso` zA?5_I>NnlhJ6B=9iK!9Yzw&c3tcqx)){lbnq;jLOafd^|@7cmeu7C}J8$@XT)AQC>JM`B&E(I_uip9!5 zmDuv2`BJavlwwrINzJnO;7hM~zD+UbIO1uUor<|JUU)rAPLao-KC)T-NH0hBtF&%g zrPRU)#)~izABN2M<8vxWS2jG%KL$=>S@l1Wu|AZBi;!m6?YmrW-~G!gRjnpAujgqC zhm+6q1=H>C_^{z}96LM#R?+q^93?a|k1YhvnWE%UHkGdBUoMV;+=616Aa0@nC62ZB z0<&&eOOC9OM_CKYI%%Jo!1(pDJb|g8_Mb0PDL$pA_#q6@f%@@(r$pX}+0$M{jg}6e zY=V>asszi6N-w-XSLhgrdtZfS>4#S>y0q-QQHphGShWpzG^RWo0l40LZV+c8+r^&D7zFVc)*rz zVh9&nUVeI(P8XJQ{ZG)M(Z^srUi9Dm zlxGB_ToO^f;rp44G<)&rSr$9F!Uk5DR_6Vc39&1>o=dIA=>lJE=*03Vt5%1t2Lnmw z4E()~71f;#`pt;6l@ag!J@qX1^r_zkBot@oGh;U8`TA5^dSxiA7ZX6mw4aq zZ{jnrwWIBlD1h%=8|HwHPj7G302eOhI;#K|(6t!4qn+fZBl;pltV0vF_^{hoa(DHF@g6@y{_>aA>6BjnDB^B~D zJb1E?|C+AxW(>-4X_u9n6p5iklrd?13R($9(Ae2nWFi6 zw|vBGh5IXom!?BV3TW0$t71#aj^7jLCKtCyWG)oOPyD&Pi~srdKTJRMY?-g$3;KyrtFEInjU0;CJ@R?_^E zh1>Z+x>t193_K+22g+nkxh4`m?5?%Cy`jF(P2ld8GEFTQ_KXgq7Y=f&OWU60hgyB% zC!Z0}lcA`{ky{ZzcbJ8UCQDpjC3>Ww8@E{QSu5lQxEvdQ9AAdoPju$vxeu=^^7>Y*30L`+d$dBBP3Ru&5#0fP|!k?1V z{eU^XQ(qg}LxCT7?Gpkm96^ zY#}u0wZtWZ^4IGRxH(PIxuj%Y!mj;LeRvbR&rsKF>_K5~U<7UiQ?XxFD?_l5egwNS zz2Pd)35j(9LHVKScjRrIbfl7>2c6vkhA6V)QHPHB!j9r_=&e@jI*yTZr`OGbNKrZbQjqRtF zi0xqq9O=X0a1B z*TOc7BLC#HO0NrF79e)@a@!GMm8YK4>`0qGWn^D3j4(jqzDc1}U!MNFviG;?0f;i# z+-eZrn(U1B3CBHOH#w>p!%m#+kPUf{&>}0W%Sv5<+fUizL+&K!(&S}MGeX4rB#8;f z7O$biy8dFpcNKE&D6{m6*pHvt)w4xw%`}Ol#J>6*q@Ln!QB#gD<{L8Q8X9J&Q>lkF zcLR-P6@k9*%M&L-l9VGh+zt8SshH$K+an{V%Sxc7d&*kt-^Gc`Zxm^?qn>U1c3VC$ zmQ@yL*t)`B?O5h>N$1VOGC-&^ex%uEfuFhd&H#VPofQjYTd<5j&1*)WEzOR4qK$ly zWw5=QUXOikB6Giqg3@FcM48;z)X-I(XjZo79`|IIm$l^7Tw0%|RER7q)2nq}=wiO+ zsiung7IB4JkM!{PlOOP;vrts(p~W*ocZ`3Mq{+8$&to58fBtA-F$0^j`GYjhuCeYm zUq$M1tVxgmdMQ0@mK67PQr5cbnHoTi%h(%0&c51x1va+Vn%(=BTqJ%CrpjhK9si{iegdP%i$ z+k6<^tmok^-11pjP4<1`)wlR}{Q^+H_Pffi8w@-0t|MCe6VH}oVGE{Pt*V=uRmj;L z|8@iE&d))f7dtXF`+bl$SwTO_Eg9^#LxNWGH@ji#RbL(qWt189mh%FAfk#f(I(7XU3`?4GT(L@B&Hm%mE9qMJMLqF zIx`m5?=q1CpUDEh-qWrSxjJA~xxF)aq~IvIz8ufFo`kE=7UOLbPYwX)WS|-mK&Y0o z&2-zTh}1}<;ZXF}$l!PGYHgMtk{>dN0Q=K5*x`syxOOnu6w2Q?!N3oA@&?NAwQteQ z5@>R2;Kkf69c4u(l#*0WhC2GIPw7xUQck5c(NZ7_KJz5c|G(8dxIuox>7u0y&s8 zMavHXEdnVhe-x8XvvJw?x7T!kA9y9PWPvi*dnJ2hpnK|RVq-z;q0D{ke^?pPxy++X z>O*V<{2h4;1wUPmQpQ{i-~^{nEN!EUvUEO2_Xj`uexMNzvQMQD1t^I7)zNANs@c z)~O>C7x5F>mPc)VH2`$KPQzt8Hl3jL>9=^xSLw&&-m?xVJ#rRmFc5wYrn2Ns~9toA%U%@6wMF0UM z4F8Bq6<5Xo9NWnwbTM5h+il6ZUPr3FL(UwW3pJn&FR z9doE#W`7YPxldU!rH-i)gz%+2-ES4m^BBL<$FA2?++YJtlr6ve%bsH!1p~8^NxHdC zxhGta6aDxV6rwW1CM6q4!xHA~tGu41;QA!~WcB5aJ&nYPB+3NiqSbCMA_t%CK|@(! zOGDxc)h(rpi(0vMc-C_wtCedR()wK9F!`QB)}!;E&d7Yf=HrXMnOTF7`A=e3so!+G zU_Y&AXi_TtCND@w{X*o*;#8;Csei!nHsiKiWmmBE@V-sx+0WBGtD00riaTx8oo22} zk9o^Dpvo92Xo7Onq()B}7#KSfo4GDkD$|hJ&I>V1?kw(7?!o8U`cN%aqj}3(&j~|x zA({+B={g<{grk0|X5pzFb z%>G`%GDny{hc6~NnrwI>_5s?HV?Q+^>WB*ufkYW-u(k@xMR*(kx*cTF@>=9jdg5N_63&n66x~$1>}n$FEa~W z&X5TN`Tlu3B#tmgnLsF1luTxE;3^8av9WeRYTH9PU}t_(XEhah`2OS`b&u)NjdGNN z{IeXt_6&Dvi%<5_d_*d^$VIncI$r;7_Iu}-=;zg&5TFga3mmDTK9%0qLNTDnPQwkxWYx;WKfxf z!R`DtS9=b`WNalze5nv^Mu5Bfk(GS{;XuB;d?_=CkI0O&8OqQ#x(ZB1A?&7{4bU|i z=P7RJy7sEj3pui2y<#UoW&JL`Cc|B(QPtrTk5CqgNe6(HV`+>9z^aD($B^y{TxCSJ zIPyz2!*jOGNec?7ZDpB~Uc2Q^jJs+cpip=bF!x_q3~^BZg+POKUaenUs}yIfflg6S?l(* z4ENin!l}tCZ?aSq2<`Ovdui7$fVs+#Ue>KRd~IN7cgzZ6(&0>v1np&^6_I3=x60M<$vC6iz9r0 zWguccJf9-PZEzA&t+$%ebm5)Uu5D&-CY{b|01{tQ{*noO_~!j9Yjw|Zoz>VTY`N>i zCEhHXx9O5UqRu~KKU`Vl(k(9V;Ue}icYo`<2kZC^(BkEj$@4cbhV1efYecq`Tz6db zQLgD{%vYe?-W6isroEMuGdI2_Vflg+KLj>0sgPuS_cj`sTnHVM*sUs=Y|zB= z5!d1T%qlAhJG5~JzNN1pzqY4{t$E;Kw!wJCDhjkmLvbZtx#3jt=B#QKn#1HqnzIsG#kKXH2c{8BE<4Vj5U-rW&oDE!?v zERW=pPGdMdKB6Rr+rKX-T0*zH*R>^6=9A}cbvPShGv==}{c!StEIlNss3QM~ip!rA z18E;5Q0^zS(8IX=qriQ`?E*cnaZgwrVJyTKe=JyJRmK9{&R+B8w`*WRY*tQ2NGc1j zCp)9?diOH@4L!Q-#6k;cUuG|4G`Q91csNL{7;B`DW77;*4@~sYEonR{D#68>L8`wv zm5Wb3`yB$ZO>PNg5t4KAu0r6B{Qb@akgKxLl{l0rg~v1k&V?(fK%XJjWs21og`a>kNp^+izp~ zQI$%V=LDA!FFL-4rR%p`6J8;+k4*ZCuB0jsvaQt_VF!i$xV)a$+5(mM(~y&^39+(E z!^LbTG2**HvmCD0giyj{egff?wm?Kf-_4CJonW(rvmJb%By<=tvgbo{Y=ThPZ_Uca&K zO9#A=GLL`g{L?z2jw8HD?v-vIVQb)gZ$hC+=oMy%4s7{ZcSRD9xzEyw+e1L6F(XuL z*_c&uL?TT`)4e}-xJMH(r0+MjEZt;eqPOwBGpYB!$nBn9?uL-ePBxd)&%eHW4aQ&- zRrok{n8hKfd|d8MYR0>PSP_4B>sQYbXO;+MGht?Pvh#pQg= zo7`Ht&Oc_p3l?`?M$L)=-of{{HB&)QVH0!3x{Ji>b&M9Kv)61;>7$Opf_z6DxF1DO zR_l~&$*dvQ%I@ijS}c@RTKRYzBR3$!;6+K zjNEQ~6qqE~Nh%#Aw&3QlI`~I4K(^zqQ=fL$e@#06d+zaXb-nP6EK~?vQmvZx&%}n` zU(?Fp)ye;+g3G;|=Kzxs$I6Ld#YH5=&Bd__67mY-;#b7Q&tp8sH1fX+ZryhEbPM{w V3r+&oRn7}wx|&8BHGpHp{{T7t(e?lU literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/pytorch-logo-dark.png b/docs/stable/_static/img/pytorch-logo-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..b7a1ceb964af782b8a453b3eb6f8eb82b7ddbd49 GIT binary patch literal 15625 zcmb_@c{r49`1Ui?R6~}bMQZG!QV7|%ED70S6xoV~?6U7uDx?Ty-%7$Gyh3Czm7S2i zjHSZZV=Tj%?;h{_{@&yF9mn_Irz2*0o_o3O`@XL8IIXjD@1?D$ zxqo6W5+lubX2LrXf)qBiH7=O=4bT7ZufgJyY0IsTYti)36uA88Ey_&nFSb=@2t!`SGakgOC_P+T-W^Pb*o z7Aq1`BmBX{#wY}huTiS3akMO$Xxv9*i#6_;)5EV_A+GzrI;<-x8+3zb6;~8mGpgFP zmp+4F41_^qkgG%9^$`|Dux|TYIln}Bf*@*)AGJG06m%atiAVp{EyypEPZHL&D>E%n zN=*>@Ul-hm_A(Zov z+r-b_BZB_7Bh`Fh1Yibs>w=ECvhne8`yPe=zC5-2B_v8UE;KFZRsZjcq7xkyBWyjy8LHg?7&5tH?EK%M`NZ0&svAqoS589+bPWK5wG9%YV0FZ~0JE!Ci+CTW)K z$|?We>9hAC3vkrv3*mrhs9H9qLa;9Sul$@p__Y^j#BfTwxUP8%j+7z6bb)QxD}9IM zlG-g~Ka!5B5k{?@OvtMtwfA`aM3p#p`;mbhggyL%`isxsX3^5dre}QWc8imxT<>|b zplwdJj{ZzGPLP?!nU{*G6N(K!&VJ`d-@C!){~)=xx4rHw13EyCulgiYWAbW>w&Z!H zMl)(aK8XiQ`5q>vK(`MB-C~6%mYyUpA~0I>C6ysM3B@a&-N6-?Cn)}@Xc1Fk4=?#G zRt)l$A5|fu`6k%lMu{%SL7F0X>6tg^&a-YbUIw)c8Qpw~}rRaRcq^n*?-gs&Y2WpU8}30eb<(l=+< zO3i*2SV`g6ti=!Ip;R3)aOF6)i!HYPH8WDfjwj$Cuqy!=K<6N&csep$rOJ)zT78<8 zD*CHa-C`6xnka#;d^U>u+mM7Sd!dXee{GR~L{IeBN6}f}>|E|?uvNiXrt+f(v*5t5 z*>v-OVH&1D4J(R^JKgT4U=t-T&AMEaDbu?6y7xF5A>^^-ai_HpqUpu_JJj1qLeoM% zr&8!Wt-Z=mrNRZk;cKuv#>tsn1lQ}ZA>|__&dXQFJv&t zpU!XBIt~>eM6<5`yKY{HhRJd;TCu3csV5br{n0XwpC_ZH=39&i_aV8AH}m7ikV$3} z{;MwkE}j%vL)A?E!L;>#OTn4cv;UYu6}S(Aa>`#^@i(mrlhbDO-3V-ng0#S~=9`6h zZ$IEU?P)pk3M(1<@SHa0c|j^*;HI$4*9*MXx0 zUXoCQf;=`Jorvx)LGo@vHfN$mDS?&j+y({$sAj(&pYll^LMpwtBrLc=s`Lkr{EPoi zTyn5EE)<3<@<>1M35we3S78zr4sePU0uC8dFU?xoog~Q zeGKY)RC66nDTXQ1l7aQf!OiW;ui!<~zJnS+55rXzm(Ix?V`*G6yMU=t#Sv}rx%s34 z9C=tj)bcgVs-&H1MVy)S^`@*=n2;^l{EH(VKWeVIn0+n`)9_vo?@jrhmt1RvBZ~6# zNR_gBI|jXkxdr&o64wQk)d}p!-1=BAO*ottfvPZ2*-JOYBBA47Qo6@iWz`5w!Y;WA zMW}1@{-aU>9B4$9o1L9q)#BEZ&aAB~ILp`xubAAb{^X(aaFvF}#YXWU(?}gfJ=_#b zdI1MKgm(;aP23fnq!@`E{o#-6)WqZ=IXEB;F3MC@a53x~F(Iqrl!VatUWA;fOm-=T z_ZN_cU`n`DQ@<;z8eVz7${?v=xA6Ua7az*+im}Ge%=Wo-n8J*suAsJlWe>Q(6hRzS zhAp`%SH{q$jYG3bZ({m_DdfcmZEP_Tg!}P7{0c%Kr5Frv_HdcS{;R;zs4XJ-R0l>= zuO|&%NJnfoTBh(S{U|rps1x8X=019K5I*V6-01##af?|};jJV?{S_R~_fL^0Ra&R1 z=dR)s6_st8$A+`utNA1b|D-YHbo0VBXUV{Lc;{_Y{7%RFoo{BhOA@F2ja!c)O-?_z z_qvzVl}A#tgvre5hqaV5Nvd}r^HgfxQjA^@gr?1)rRnnBz|*^j0nx*FkJp`ghxOi= z;6aD5b-A5%5#7OCxiE=~rM${pVqe*PC^^(>UVwi}>2uspOOPP92o{!wq!sdG_Q zeg3s@NaH$nYULE`>ia{nPPMVr*tKwMKsPw|#ov&ork*~0ah`%>9cM=$O(rMmUsi4ZsR2)vs-K>fW^#S+? zYyBP8L$TKDEgCb+1H~G4p=t!-sjZzTg?b4C{PDsCGT!CMMcaqTT)!x_aq(HRLTvte z6(0%Q;gGQj*@o@CsJ(YcbNlFw|ES5ud{rLpl2W3W~n!Q;Cf@(fT((Ux~3Hop+|<-0;Ntfykc8BHdsj@i}IUCS;l3 zl-WOJBN6Byv$a|$m*)JM5ujU)*kse2fWt@-l5tea&e`o?wxPSRr@!t`IG_Dq8>{q* zk$^=(q4oV@G2zxaT_;eN&Yj%JpM+++(c|~jkU^wdp4EUYJ?Xr zUi>g2j|7_#6*}I0aX8$(X?H#{+vytOX(}{ULPeOKIoao-e;*oRd1RS5Bj#~*JMH&I zh3IMR>lQPHsnvfZHu+J00RWsAkTMzNjT}>lVuFaolE-|iIah|HS-Y;Q+*s)!cOB?$ zw;KMO{%ggnu)~q~-1PyJo<&9Bg0om>12f@;4-to(^Ox~dr8iKsCG=H)B2THGNkxlQ z5ao_N6xy~tuo=>8I~-lM#$yaC_eqcZl*|a5gG-4 z9-t(-?*rqm#nrhFF3((+9Z1MBP@$x+f2er~r5`*QGBS0Z+g1~Ls;WDSK|NU1ce`&|`qj=_UQKZE}<)bKaCWjCZig^XH9ZikCuxfJ7+^LgVLo zm|}B*6jYGx($##pITqkG|Az&TsB>m}bQTEzwuSyT*tN7TerWATJEDpn6F<3=Gnq)P z=Aw$-^WME-L&|fPe3yU~!Z`OMRT5B&sxeb$L+@`4ivcJ!< z+}_@@8pPf_xMFp~YGEcw%d!wo^p>{Twwem1RjqJW7w<2g)aBfFDqkI9TaNN#Z;TIe z#kNf!PO_WtcK>}1`y40Z%#`!2szi)Q;A#yuSPTNCtnFpCs5o-tsc6O@ zX z`R7E;aXuV>6M4T4Z%PiPeNL}sR34e>b&^PVRwpmUM~Ilqhu85IwEgXK95AItpK9fX z-@o_55<7AIc-^83vHf@IP=N^<#J3g@;(K#;mKwf({p$MI=C1ZLscc(GM#xmuvaH&u zJGYR`ne1G7G@Pb+9&AD_do8Dd3*91?w4%kW*jXbx`iVcB&n(~M^xpZXuPIC898fk1-S$-@1aWe6hv4n+B@F!}ws$U)f5!+22=sQ$c>^5wCBANe17NneKTK;p zDMM;qx)owqKJ=`ToeWJud{fXlu9jI)#aFIlANY7jxSA8cZasPc%mM$Y`EO zYd=n!3!;5t>bh&~f)|^c!Zfr;`_`-zy=F%{oYyIg9X6gJdKO5u24PI{Wff?m(XySLRK$*on5 z#M%MJ@_@{>nXC3vY_%Z!&f;0{cW`j1=zMrDhW43S^k&PERgG|q|ISTr>F9Z9`INPg zo%q-@U)3i)yv+8+e{IWumHMkAKmd?Tr$)y5rR@vZk&0whyRAdj*F%M}2bL5EZsl09 zhM3L1%}<~FAtax-V-hVKnR5UkpZe*RhsID`$aHUdJZnh&2MOZj)+rPwHk9Q2wQ}e} z(`)JNwgO)CJ_~~<{vW+vv%8bJ!`D^551pz%%Rl8kUR`mWuVEifslvSQ_wB>ehj6vx zzk+|v=zVnDm-XxmON38{4YEqw%%s>@&sSSSQHJdJ`1AMyGy;vtxG5gw#Vx&YrO$~= zMkq)-bG>bEbp4xob@bB$!~*R#cC3gkR)FFhS5^*(h|DkitQ}qYdLCui$5TKu5PqAf zJ{rB4X7i+HZKjX^jvqp8I2%W{rkp@k)*dfDU(gj5>HKNq5HWBo016P_eB9{!DkJFK zN6&mac@frQqvs;CaWlg&cRc9)PHtY?;Uep1Gv(}D*v)5KaQQ$m_eYB{cVcMsrqc7% zs7su$%5mQ55v#uhJn82dM}GNiop!Lesh|(5P6!uTB3g}+mx~kl?`RC$(t?PgoC1{@ z524K2ezqvi(G^|)6VP-NhL;lDim{k`3uaJ@7td!iX^P zHs~y!k;7U3k`OltRQq6;Z(2}lxnQO-s)l}=y8drd@h+*0{l^&@5yIS*97>R$lcfGCJu=pLiD;2RnBITDXBYtRg{4| zJL!aqhv9Lu=Bci@i@|(=Pir{1?_=iWb4weSIjp)kxP&oUYa-PQhciR zGRso&MB2%Z_S!jD{4b{9G+~NXad>^2?>SUu06mRWrde$`v>MK0F1jckKr9&dg)Wrt zl>4R_BR@Wk|3WCzG4I}Q?1mmZHX5#j9~AQNy3r~vmx&=@#*EUro?gw}kWj8X;374# zVO~V){jm0&Wj){q&-bgR)IjjWD=f*FQn>iwm zzQ>0PKB391Z(G)1iW&O?!unt?G5BiEwCz@RAYnmY*6p&~c+I2Jc&9h#YYbYY%}jqA zBZ28C>^sgWlI_*y-*GrMukV>z#nQZmeU88XoP3zvA&hG72o4eW960@uf9K^b{8m2c z*uana!17ot4mCnKh?w;dG2U9=|EEB8#kvq`!xn?u6CAYIlv}wj(;G+-eIII6>B+8j z{EDl_y3pyN=kC7~Oy+yA(ewi#pSt3FX<4x5Rn)xK%Sk8s>@Lb=e)N|3&z+4*iThC9 zqD;?R%EDI24V5SyS?QEeT=@F*`5pT&1<=f(NFoO2F)a3b=dxGAJ9wvEWI6kx5({GC%x2PDx1I*VOAr+?JCHkq z5m$^m?Q{Osdp^yr?I)P58a@J?@!aRD2!A0rQ}hw}x_41quee0rE9-T)@-h$0qb`+x z|IyBJrrUmDii_BkxtIvzp1x?aZ{vt@+P=RO1SHj<@UHMi?AG@^tK3-!OhvzYhL>$2 z&R8GaGjG$P+L9W>m8@FSs3KA)2O7Ic%IgYtHl5j z_cS7US~L}XEpaiT8zK82upru<*$N<`Zyd@~*NW9wxRFUiFYjHxXichORK=~^m|R4U z-SqR@NG>{VbmW#biuY<9-pjelzH^qt@bdyGZS?7c$CGK7`_L~m0wm`-Fb95hZgxH3 zyOpPjcY5P&jq+6dOOdFMHJ@03nm35rBDGaw1w(&!2^QApfSp@XXDr=YW7y z2i-n|=>8Zm)|1842ImI@f1J?cX7LH?$ZWsOqu4iY74khizE15SIIQ)upIQEl1Pb25 z7vHf^D&fuLQ@wF-RsY)Dm9Sesm~NvHI;HGm;nA7QE7KQLzkHcodM(A*K#lsC4ti80 z^ct7-S~DOXaqR93yw5Yh&5mylxJXvpy`EGg!cH=M4qkO8a z3$xBmh}p!p?O;Lv>v^Or;lt_ajhL)U1(2eoLPrUU{W*5P8jb~TFsnhl`-ii$dx6(a zzj>#ngiA@HpE2fgxBl!%a9PVyzJ}fckl6Z0pQ%=8YoTRVTN|v2CBc*%O&2~Ec=pqd z96Hf{(i82jbQ~!^uwW5BYzg`qi{I;%NR(Zhp&pH7L)v|%i2@Gv4jsg$lZ*(JJJ;dC z#T6ZV~_ zfYg4zK0bv;5U|&dEY|5(VR);aonHI$=<|zL@_aVqOIS6+4kH3&?UBvnFy%Zh_%Gdq zG_7JBT;&_Rk5R=#uCBEmmP=7Bx^r}T4zMk$T`^QHqHf~y6Y`*kO^oa}w?$F1Q6dJU zaj67qscq)YF{Knh@OAaA>~>n^Cp+fXu=8{+sUasSd=JP59zwhD|IKSy>}sE{cSdq` zMO*w%lQnCBqoloyZ_;F3U%aQppz`pIrz^9{85W1mq5O(%d4fcU*J8*~ZGY~-_jJ*` z{9>oC1=e(w7}Up(UPeDP;!#Y*K+SjQc<+Z<1}?lvh>rfNZ;sU1XBn*%?idh^^KUK!9==uXFEcFl z03S3kS!7MAt9Qy6DK;(W<64}LivPZ-@9o!-IlFS#@`W0qQ|B2o%bvd3jn|ZL`GvW! zVhcj3W~-@aS04a0l`bO^=hGFl4TFKgrf<;|6>lIyi;iB4Nruqv8#&*h5}5^FDOSVE<7C-r;iR z^8%=i@JX6JL!-NCJ*cgMbeW6@58UZH!R#*T?pNq0($G+32)F|5)P_~y9?-g8P^WKF zP!~W$+dITD*-iO-R(~7gEGoj6DEKP_O1$!U^jeARGm{usY-Q8C`%q4`bkfjOP+-gH z+OMDK^Pa|wei*nZ59-d&;ZFpvZq?Nb@To2=Of^QQUg=z4`eYu`aHAmO{hWg<^fnVAz$LSsnI8)C;Twn2KopppQAI{sY=|ZDcz_xsh zk6*gA$fe_l&!RTmn#N9zjhv2O|J)7OYa&g{l2W#~#rChQ*QTe-UL7vfKKDL|0Gz}% z5k(12g|laEJ9LgB-x&4@(u*XA-T9Xl5+}KjgZd43-4_wAeS>1am;SP$9uQsp0QRKh zuI0nH_HEPj(N8zm$8#+xUO+tXisK+CfkS(xxWi>~%$VJY6Ar>4oM~Io+#mT+_VTH| z2qn|*Mqs;Ry0#5$#+KZo+e%sIKF<~a-0A-6dpcNNkxN^LTm&05qxbZ=Y`hzGn3#Oq zGEt32NYvBByT5aUlQ zAa)#=F+y^`--0JF+{Bc&>loO0zai|dPiZ76c!Ygo4!O|7-?@6pO^6uWbDDfB6Z zcg3f2FvoyH&O!(U+_{cR4QEF3!hOOth{<6-I;A(WaVn9Rta@Q8-scG{$`;!VY~jK= z(z~4arku|4=qFkagAir=(*6aE0p*}E&UZdn(4@ggW1 z{=L#j9rJOI8R`8@^%{q(Mdp_YAh*TvsS+)0VTv)1dOreN0Mir|6}K;s?#-a{>Re8g zHPQ9by)5xji$|Pou9jiO9K*NFnyS5QL3mhL+L4V1Q`w@OD!5LE+mIsOtW;U$7X!i< zSn8OQX7T0Ti-7Iq<7@b7flmOn5;&kB*;0W2YSNG|jvQ)2-h*EgVv_@y2ya}Xm+}&p zQ7CK7tWwN--*7{9g;9<0u@L1DenU3CV&>w(%qh8Ve-5b;+81eCbDkIIE^zFe2*<*E zb%6eRE=weW0{iJ9C9nnjQVwE=1uZ+LK#_51VYTY}yBY21JrTiG%KY!X;`<@i(amO3Qoz?h zKE*w-p&z*)oc6tvfFX^Ci#!|#JgcR$JmmIJAHOU=kh^D_O8T&KkSe=u_S3LU<{;88 zgvL?1Is=a9KU>O3IJt28;3*!44Q||A^sNpD*VcFbfo4uxXv53V2g?rAnLY3Ea*Gz5 z*M5A2rP*H;yt2E-ugcbFMNg~ooZZqP7$_+BzuW)MY0E2-4ZyWV{vO^sqNQNe zB8rmgnBH@H7;h%>JqkFgESo*~n$ckq1EB5?!gD_9AwyksCWlngsbL+0Czp!C?-iCG zc?oVld6R(q1!s9Enosqz*zV?JJUzd#l74Hd$+)EmB_&hM!+i9tTTHaR0NA-Flk(w! zYW3=!lao8%usSV32^4AWLp(m%JnqgB&usIIjPMrUA%If-Gi@`KqyKSg*Vwf}bvRMm z_du>_hE*23l^RsgP8L9&T(u!lSzM|wXlyfuMJNbSE~y#C1fDSm5xPl^#e~>Y?U}t! zx{6f}^99bC?kP8d{f_-(0xvWsBf;l~s)06rY|lL5tHX6I{~4&IP0tot!^R zqvlcn!*iYE`ueqZd=&ve(nP}LNY3hop_48Hk7i00x8y~t4Svtii zV1;eOUV<$SjtaBM;(R`@dOTd@XPVZX~|KI`5kF$v0m%%gH`tsYz3%S(6Xt>@UN zmXvtp$NKNpq360jEto^Sk6Cmr*G>IIp93?SDb&B#ng_{E^6+(~u2ePj*U;TcTEB;9 z!@Gz%1KHdr*>CeV)d&j=Ko?L1LI$Z|czj%CP!9d2;2*G-h8ZjIdEZiE zkht@S?{9PLCM~9`Q)liGP88LrJaTKE4FY+zen(d4tlaavn5;MVc#ryE2f1tHXdxhtFN6#Jye&({WZ8$U zZAMe!v;5)D$<#b}(I;<+r)oO|WS48VaY}dHj_=N3Y&d(o{Y12D`$=@=Ln0~cy}3B=_qLgJqaq);g?EqW44n(GKZGo6npg@4r70m?uto6oaVu?XVS84cNTO)c z7WOn?CjKD$jacSu&2-mr&ar`k0< z-FC{f^33m%hr9Un?y@W;!1RJDyVg{NcB~Q*AAvL{>_l_|-gsb%3zSB(=63l2R@t)6 z-nvkFd2}u>SSQ)h`X>ey+x5@gpy;=ZJ_KqC`Ec_eJz+j4P#g6v(|o7y6%zd)Pts#F zVM5n@+=gANK!ORWFASmg#o;#+ACoq#*P5HXKq(YbG{C7$idR<7%yM>FgMCO#JrI^~T1`o5m^ z)yA*YTq}kUx%tay+`RI-TK`T@y0%tlPEl4f+kybZqaTN3^A~NuT$7}DCtPUo!v<5u z1fJVJI}JJXgOu3I&1dV(H-i_x>`0sJ3ttwo`;KM?JW?)ectOLep$*?)c+vL|ILk@- z+Y+S$x=a3BR!twMO@ZIE7!W%pwLv(`On&r9Dy+Y|zYcw2K~D*KtQvxju36LvIlpi= zegtJ5t~(K#eK@(Vh?81Hm(bbM`|=5mYAcQfx}ktLMBCnj+rA07)4v#H+Q5-mSav!k zbh%B&eot!%uFw<*Sh&2leYkX8;0S{$`@!7gL76aUau<9EijO`XO`EYrczn~JppZ3{ ziFNb-#&NPiT0M{Y?XFcU|2iO$2}BEoC-g#V_2o-I$7zr>*lyZ~1D$rBw(S62qAo1m zJ9O~%J2<3QWXu+kZhI(n{91ez;C4X;Xeau+%B{>gqgPm!pSxY=B!24N9NLfQZUpvI z>vr7LeVy>wNPp~9exrty>$1TKR6{R|l&HJI7LUp}Q8uMc9ldmat46BhRZIJ*Swmz; zay0qcNC6Hwe1k0|fB${srl;2Z_;*q6uU$3UW7lal%vWwVO>4^%-XK#p6210w!eh1K zjT3l9G!UbAaR3NqM0MkXiHS|jrbTIEG#bnvE}DZzXq+(V^S+&67}dP9*Ks{j0h>A0 zTMNXE(nGoH+MLKT&7Z+RvwxfK>(Ka4ohu5VjTNNNCeX{vT%{jxRl&fgUDw<0x0@8(*rSs2?tA#Faoo-2xV>KB}HYbd>^Q#UXMAS zO#{-&BGLZwbxr@v_b^cHm3$K)AG!XH_9H-gWQIL=32j&d=9v^~JS_MP4zR|c8iV~7 z1M^954{h;pe*_iC_^)86WYj4yc4is4VPX$P8L|@Bvqfus|Mn(F-5wU}PB;W}2sgx7 zU8S?5zi-KH)2Z+{(^Y^6zElShWr`FX{Imj&@j+>nhR-a)W76S>R%JkMG9VPYQg7mk z9X35>J?$)W**?mHAf$w>MXzYu8PZGPn~7^95|pl z^q^vMkWQY06d(YDDvOt`assK;`!|Od`ewH*s$>TPY7W;5MP}1FHk#{&i74@CSF5VZ zN_*Xd$gKT)qGh8l%nKE%7*6#(fHD@hs$tR}CQ4TTn_NrV`tn_4g_wai2L8oOmv8b2 zq6Sw_c&{^YAMh*l8Cd_cMj5zi7Mc$Z)8?lb?g90p~Tkb^M(+GQXI2 zi^Q9*7SV$bhk@##24qMR3s$b6`e5+ zuwM*&&iQ?St{x{Ql^;bZWKzrUBg)? z8<}kvcaI=k4P?vIg--ac6Oc-`_+c%9DgdV~QY9LheLZl=f~HXvwRrpdyO+YiM90%_ zjm%aZLRwsU(8h)I3$;ESs{MsZ@{v?<1OuViT}IL=H`mr_tA*v@(18%9J-Ww`exASE zhpym?7d}q?a+AN&@M>;eN>;Wq9j?2eAxdl)unAS$-9bpY94o!WJJo5Wcv*J4xW+l? z5}I(2S+S1z1XL(~8~*^x3aecfXION#Bvte0+1hI^Kv@95__=78O_PDsz-!CRfFpVk zgZe;#M}OojtpY(_4o9j+zhn8M2@&Z(l@($L=EyX05l(tT=#f?@#5>Z_n{aW--Yi}c_gxB zhLp?nx2-d5av$5*M$(=m!J7XGuIJ+~J`!uJNq-n)Bxj)Vd;40O88|EM7}SL<*q1!^ z?D;#`6%$(zlWxHj1~$3w_UJ2p>Y6_xwxRl!yjG|7xs!BFYY^;dG4;ff)Kx4@cco(v z^a|osa*)st%`ZKaHvQ8zK?5w6;mb%8CnpQjz^`#Rdo{xLxzc%srtI>-phA?=9wZ>fSi+y69W}{wM;5_c)*`;yxFXsD419O1C|9g}(LWBWFQCFT2|kx;pUc z@aI)VHE30TildO3lbJ;<=OysAIp&3(3(m@>@3eb6{JGv7M82YD{HgHH>-)hJ$KCzR zdFe>F*d-901=F{SxudcLw!6=rp%-|k%(f&RKn0q12mWr%0X*ZGRB{qS);$D0=}7J0uh{$vYQ0Q0p(- zl#a=Ab3C~2Cl^v&?2Y$%o4BGU0o3?ZSEbGyxzxJaAY#m}BeI*m8|K)>4GKL%9;-HY zg=zJ=DMs=ALa2tHvvb_(uPpYu9|M_4(%4wN`KP8m057r)RTxlC@~OTf76-zlzP+rF zt9WUm{vEX5E^?P;au`|zAnp#kRvV+_rrX7;Ct;xXDNobm@#`e3G#5Rz z;b7OR1Q^s$Te?pJ2@!axA(#%S-pc6Vh;LvKM(Ir+nbHb;?F5~dfEtV|eQD$N(M{b9 z+SnhXx$!h+5IW`iA7>aiu_JjiK~kW9PImj;VW5r72G$$dbT;n|kI!a?DP+2;k@8mz zJJL(++7C@!{VRBu67)yqHM~f#=~tTw?^@L$7XY(M6MsGb)cpW}by8Rh?a5>zCD8wO zCI$+Ybg2E}!F%UuA7CBT>?_EEFvXDPa&&(IDaT)t0>s5CyL5^DmiSnerG`FxtV`8) z?QSw-qN;W^kZB9B7}*i?k{?pC2+Q{`xwP zl*uK8nlh})#ST)-N}u`g56qh^&?1_T@b^fRt|-JWYH#&fYZQr>X5Rw>#*^gprM@X< z60rr=(n@pj%C)Ci4~Bv#d~gQndp0)g6ka|8$?RY-Pl_2k>OzA8V}otn=93|9kw;A_ z<68nf-R37rxd5IM(Vke^ursRde8qFJy}|3#Er=8K!84yb-~~8V z(xn5Bnx`~>7AqxTe>mZ-*x&K);Kra5{3~=m}1K& z*VP_vpL|xR@%wwvOj~?-M6*`=TwJFeHC9U4G>^1sLXP6U^AeT@^50mUJ>Y2($a(VZ z8JxyWI*HE!q-dO|3pRMM!?B<7K9{CqU(*0?>HwZjuDzW_qI46kYFb)alFY~6ads45 z3V0E51`}(93l59G4kQoOXZJS69EvrPOcmo_sVYxZ0ne>50(sb-jR&`%M9is@!Fvdr z7Jvhu1L)0sdO!WGDh~tDmk0Gry$+zig;v>cN{zP$R_V_PkP!4wz6#Tc;M5@^sL`<+ zE73oKC=n6Zv5k`r7Vtlf%%rI+y?`3pLoWQ_5 zkp3q8)3!BV1M@+UAO2i1VWHnhQ0C{~s&Yo3_2-g)wH^x3-FbK*r zU}9o&&Vfb1fYuHF8_+lgdIPFQVgKD2GLJz9Ww!o34Xt)(u|#G>#~kLr@pQ@FhuWsv zlS>8vZ5kBUTyWmc8%qd0x&K+@!!IKU_v2rKsJzZ6on=8ZJb(5-V-A;dMQ_Oe(=aUw zhC{v3P#m4vILSjFGZd_*VKr09IHvyZ=+0^4LKiW}H{{^ZtF3=qF;l!$9czH9eEP59 zHSCV#YDboh2(@{<|FnWjlr2JTaQ{2pZ;s_vaFMsg-X*ou;PD^&dSYVFw$B|)gj6*| zbMNimwQ7J~I@i<$NdvWN(7%_7mkMz0A%N`e2_uqz)zi3$xuGy<3|F)T<&jbVj4nI0 zd-o}jwq(Oa#(mb0F14-HoHC<``}~B?(w{(sZ=Vt(&f>^TfWimp2k7p<{N73JbM!XU0N$<5 zcpv1#W_DR*ch#QuOmgoXOk>y-fGHvXi$tWsup39LfQH?-_Gqb6Uzj9pLau1de(`e-Gh2cshb`FWNKonZ;@e!i7fDEbFh&G;TaJMzV9a+HkHC~q zIwu!|vN?VB@B7yH8gk^4w~M7p2fF!pZ}MtsXlO9SugSB$zsbIbkQo(W;I#Y2R{?&B zuJ%roK7|0OO znw~5IpZeNj_qzZ4SMnBZWJzC)<*iFA_)5@A^O~344X<1Bws&rUKahm@IoY%4#Lr5K yn~0y2mynYe7Z(v12frVmyb|`mZg6q4yN&n%f8Rj$CeeKrq^+s1QKV*b@BaV_N|A*C literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/pytorch-logo-dark.svg b/docs/stable/_static/img/pytorch-logo-dark.svg new file mode 100644 index 000000000000..5e5300038589 --- /dev/null +++ b/docs/stable/_static/img/pytorch-logo-dark.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/stable/_static/img/pytorch-logo-flame.png b/docs/stable/_static/img/pytorch-logo-flame.png new file mode 100644 index 0000000000000000000000000000000000000000..370633f2ec2b7cf35a37a283095229de337f46e4 GIT binary patch literal 1010 zcmV3glQ%A$r$!ato85yx2xcuN% zU{ge`&4p|qlJ;swtf%e@ zz;DiuCXiGGsD7_D<0S3;U{}qPsR^JZGPf6Kt+3T~BeSj3Wf;+SQMowdBp+KffJ);p zMZ`p2D?R5k-A|xA3q&nT$F#2_8t5IAa6Of5Zf@3TV4Y8jL3kc0p!y!j`W16|!OrH6 z-w{B5ATfZD4a%z&s!aZ=(f0c%zPja?Q^QXnPCdHVXBXQeMF7(i7jjn9CbP_YuWX>YWrVvU07QQ<=$m^IO+ zTf6*v_#Q?#h8~TvFyH5q7eO-dY;Iy{`CUo>7C1CJm^dfONjxvNDoM%?uJZ7mK*PfA zeZX3e{Ps%o|HeJzmre)^*H-L!C3O)@D5R368;2NExZx+*u z{eCrtbYe*1C0C6yX=}%MznZG2;mklHWeJ~n~p!2hn)}dc +image/svg+xml diff --git a/docs/stable/_static/img/tensor_illustration.png b/docs/stable/_static/img/tensor_illustration.png new file mode 100644 index 0000000000000000000000000000000000000000..0d63f0e9461e3750b8aa206a840d5c5cbdb811f2 GIT binary patch literal 11230 zcmZ{Kc|6qL_dnSx*%HbWS`-OI84`N8DA|%N##Xju=4B=#M5rih5ygngQYx~}yzJ{x zGGsUAg)u^i%C0QGJEQmK`};n=->-l0dYyajz2}~Dp67Y)>)}PCa{`+sH*;`s2`qm#S(SSWN!0MDV0WCHi(Pt!)hGd&$YUwzf9Yk4fU- zhzqKkZcxv8)jGpCyS%p6uz&yW`5&%Um2KP{6V)oy42Nd&(OV|3|BRdVl&B6>sI1M_ z)D#?B7`^~YN=`FxudTGUaB|EoueB*Fw=b^Ed<$Rr-uYHYD9in=t7+le*59s1Z=1Fn zuC4tZs+=A=xN&L9bJ|R-`(e)7+KxVpMjINf8qIv-j)wFVi# z9(Fn1{?C=QfzRH)pBCF&)_xCC3XZL{xVyWzE{?st5jyhrNyW#)W7LBOtH|`BwY7;S zPkKc*EDptgF)NF*dQ%>dGj>XJt<-FJ%5l8sPZRgn=Dn_S)ul^Os?*=d%}Rm4tK< z8@|0RDMS9u;`Rn%$xq3@S~Q8Z&&uzn|9lkD{xv0csHpR2fPI&5bI&72&!ynLt9Rzo zypX`e1xma2fmFMpJNWJVwXL&beeb$c@0@h#J5)X+8kyGr<4^rZ zSVv!X8yKMPb@sB?X%8IE8S>)zlG+;drQ%EJm!j5?FS+1v_zNuaCBHQoJh(5@8r&KJ zp5=ZiKLq{;az$SXfkb(0%$H&yUj*Jq?tV98k;1_tucde9AG2G%lYJ4E9bFZC_#bhY zIi#VOr0wt5W%cxFkQTSrb9F8mE}6qM+qUy|aB^~HNApA|ZxbtgYo3kkB#kZ?pg(Y&#x(?^HjvJCkw~o9YK+hzTx*dcH`0?Q z0Pl9dc;Wb^(8uY}bAbcIsfD&teG(^{8yk^>3m(#SrH?hYzjSI}vHQIoj;j)Qe3@RN z)b4QVvEf3**vv6{NPUz%JqWcYkF=5I$-q$@VA z6kwiL;JBcTJ>s_X4Rujkths_obC+mcC|i0+ZPY*D0oYnMsS5Ad`tQ!DG6B<>o)LY9BI`=uP#x7w^4v}Hj&6JFY0g^J zW%60?KJksv6VfiBPoyE&)5&4k|89o^&U62}9SbdJ3rjp#v4mtEh5G#OWSz|V!hgl% z;XK^G;=7>8|A^m5O#tyTr?91Gz(Q{ISO|dG0(QmX>{A5@Qqin;Te0ph^Rs2uNan$G z={Vc-7xJ@3Sx2`MI}?L?H$n1dgg&^Bd#JRph^&a(0yXu=Z9sE(;Y4i7H@QPlrGMB< z*yCp`xe+Q{R~@D z8HsIFvz8#UC={KwtV`=l-k$97P8?nTS3^OmIkUcGeVp~vi@Ttv^_{*wW@$diyMA6p zDTR{A8=R&@EN--h7#0DfA9K@Jb)D%0%qy5<@XF(0B^eUwCfEfwB2Zd*4K~T4H-{EN zn)=0%CT681dl$TWj|ctE;V(RO%_QvKlW^!04@*+0gpXj^c2@7OS*nY2W=~0ZInpQ% z8LTfw@R(%_k*B zu5+-yjJ4E_bR|=ow}uqS{5}fH_D(RZHR8aY^>T-e36qx|8$myC=QlcO(bFkEt8H!B zLm*eNnzYm)K$6`~7RA;Pd3zEZ=(mS--7&7`NjFpYJ*1{cvf|`BZs`w2AAiccpwrsJ zLEnlc+5onm;aq^Gza;G*j2*s z{9T+#E>)0Rp^Zl5x^b^hTa%w>+K`{;c=vUUMPn#x3|?gXI$f6cI9%N8au;JCgM!}o zy1ZzTLep69a-ny>N)#ueiC0lO&@MR7215`g6fUyPq*xj!Np>)$uvQG+skR6D-a8Ub zypF<0@Im$g^7T5^q`Zs$>rTm`i zWn|KLksuWIo&A8VCi2@)%{DsZBHjBqNE~5kA&&qb#Z)hn4^vo~wWDvP?stjRY8a%4 zL=_=7$Gy480UgQDj%Xz1e58+rfLEHxn+5u*{OlR8TQfg3a3Jaa?q%c2=ZHyFO)kUeaWnr+2h|CFY$dbU$^`qq!$$ z>wVOHoZ{0r2ES4Q9=5&JzcSZ%EO1ZH$R%|l`UCjWD1B=rW)SylxMgUGFWlq z{C>Sx&k7=xjSVD}yZFdW3eoNAs?o9cP*J!ef-wO;0;x{^5bVXX)N&`|;fJ+jb+f_+ zmGk{R-PXoQ4v5N7cgxZKug)(rD}F=SzxJQ|+W4$I@Z9$7(G;L;F5~Htq-(uN2%f(1 zbj@M+YPaKn?aF~zTr*r$Akr{KFrC;M8-cl^wNRV%qQi8uUq#i>rkLi$-?4H@8rkfg zXdb0HB=-*-&{}=wcB^g_a3u*O=~{?VX_nSOnl`@udd}9e8*$jJ2@abr@4Mv`8g8Q` zD_(@HQ{H!T@o2GUp17779{thDG{a|E^%#_otD*`)C4QGe@$OAFo{Yv=MQj@my#dXz z@IH?P@FtE$<^1hAWP^kn{ka1?=9pieUWuq3pXJk&ClC$|^ zbnEzGCC1r$>XSaW!fz#bi1h4PhOJGD_m_;%$@0ZjQL=~`$Oy-vdcRBJ(IHOtJOwK6 zGJJGm{M9$_asDu%Hy|DY)Ga+9rx+?UdL^pLO2Bz{F(k(~0`OzPgjWsK%sA=$=`#Jc zJS1zZ5N~^jB2jd@#5?TPO&VH5{^KZ;erS@JDdt?#lo-p8T&~8+X)@*}EhCv-Q-@W$ zR5A453P@M_=4n?~d{tH(kA=<-+9yNV9|E z*s)W#cw*wPI&c;+&<~g!A0ZBv`$l7=NRhhJlvne42RH97EiP7|D!yhupg(F>tAweC zw8o(l`zHa`iz_vk+XJHC#~)eIn`*t>9<#jQI9E04yINaTpGf%1gT4!rFno41*{q~6 zzgj^lD=oohY55G~)w6H&Zp;H$S?vUP2UsHUWSq?1=GU1NtFUcq11;~K+D*tEfbnR1BV`|o2>E3& z7PApP)>l%Zsy@1le4jR5o4GjX0-%(We10%t^fyJeH{9)LwX%>;hK4G14?CDys1pq* z;`paX9RyKoa)r9)Vy=8Bc)~h8K{!M0R*w2d|GI2ha@)>xNE69??d0HiJah%K`X_I* zLypk4y4lPFn#xle8}>Q7Nl4^;%NJ|@YC||WB7iLA8#@IDTIAFKojPCM`NqHuybdsa2t}rF17xYg_6l?~lz#Mjc*0%CJQQ zeJ(mH!1BfYmQZ~~DeEv6gHK&&-oAjE)7Dh}(DYqeZhOH@{QRrC@*z5xNZy3@@cn(V z-JE2U)-D|9;Qr)Bsu;zzn7@E&L65V_S*%$S{-CrHIK1ZjHE!>v1aHD%xDsa2;L}_r zG3$pCu&{{sW|f)<7x}cDj2mnB=5*s8X{qc!0~Ow{i+_@Bi{R7rd_Ktzd8Zvf?Sz`* z?Gyh@?MI>2n1<7Wzr2Ti#|j}2>l2+_3NcBT#^k=;cBe>-qG4KSc}!`KTl_2ig(UsD z^=P9|+k7Mx(C?8w4=`sMKj>aB9o^pxVvOlBS*l-^T_6f`qDKf5+44~AdMF0?OcG25 zYk()!t3byBtgZ8?{7^tZRa6IGofvxVkuodjvej5fU6uWT4;J|o1P^A49p*rTleiv2 z&%_cnAy6Pwd->{k(6$6|w!Ao;MFKEdAOKqdc}Au}^9F#Ua3=?vs)j~pf#bSM3!g)_ zP^{RA;3t9J@o+d2ARxnKl8bqdn5WJU7RnGm4ua1HKS8TA5mvRUt0?U{UBMn;&2-5H z7+Mi+nCz)aa~|h9RZq1^=3%87eiR5VBY19(A^c$n+EZG150t<>#14EIp1V0)R=spl{;&QwpV<21P>=75V zcp$SX+f!YGw_~9-3e2~(PJG+2atR%^cS(Z5X>mS7^X?M%5EXo(3zi88)k%@ z4ri_-Y3v!G!o_Y=Ne4f`tS8D^qi~3Ex&@Ck~Q!V3J|SA%>$&)+G zghYtk$oCp+m!fF6T)KN;&xC8rKBVV?vQ59bN4;T@0Ngs{qW+c0h$nsx&i!uH-ZvG0 zK=0M%{E2YP>XRvP+F{T4NNnr-*hp55l~zx^_3^1Jl9{PbkIJi>oKOFGGVP6_1=^F5 zfJO3mp})8w%24L%iidTIo?%0cLQUk1#Iad5t8JeRvAYQGu6?$}7A9)o^^Za{sj}16 zXlR5biyMy;c8@>Dy+P!@`@klH><@N#PKswZGHnu52)~y*I?iAZv71Nq_!k}_} z`MpXQ4^r#!Baf>d`rU=KyAlc2@8>k#25(U&^}5q!E|C-0Y4J0`sFx|*SOH>j2jS&C?~%+0<-m{a=J{9w@Qxji zQIcrp=T)}*&ea>b0`pU{;}-LE*n#kz&`uvgk?%h}v2fLB2eoXpcsc$^zFhrmW?#%Q zxo@r-N3c6lzjUq1dt?gU!A{)P@Z5+7mYCi@3mv6oYQ; zSZyb*jpr3hOQKcuJ84LHs=xe?AJCBlC$j2j#Y=ETTSmNqorwYZf|kOshYeIGpu;6W z>L$8Rxl!)Kr%ehkrI&s@e6_uum@=x<)Gb8jFL|sobHw8>dR&5|ekxF@OIItM_bnfJ z>IKkaiDDT0y*NB%`@&K;b1jng@alzuU6}VMNLAlwG@?Y)2{$5zR|T{w2$PQYX9Fao zg?R31`)AvS4LBJ(pvDWu-P8H!Y(*vy4eSlSs`p_pRGe+mw=178(hTqTJp~Leu7yk| z=NizrN?_|-3ohMhBcR4VlJ4K5*I<{fnC`@5md{=;3OM8z*V%%Ipg~CrtbFw@Mjy)1 z7=#4`ga_1r>>?-bY``I^0|`pag4$lflp7Kl9mEOS8HKKGdX86}>*E0yQX*$$-CiPi z=tFq*VldK)s|vSZ<35pnWO7q=r$WdjV{bdev;Uh)dMR^18y(-|r=Acy^vB%hJ*^4h zM0>L5-m`pi_c4NYM$~k?wl{zsj?u=0rHn=a+RSJTwOBlP*T}ENB6!zfqcJ$Kct)j@Em^ z#jWC7PqK6axMS91-1l^ zoI&0P_~^mr{boYocx+ivxAf{3mWHB)FKCz|+I zDZnf~%@a&kU9gT>4b+@zb~+K4g4$jl^MIZGp%)4dM9_3?84^shNMYxmQ?=PQWV%J! zCAuK`kG70AOtVbcK>ohYKGu0=O9zDq;bBx~6JWqev%k{kZruf|wP; z$*v-HA^BXl{)zNJn#8tLDPDlZRD9HPmO0=f;q;5%4jbVm3iVR;BPV2YcZFtJDa^6W zg)};wg+BE@D_QLM%x@l}MiXRqu01o{QOfx^+v4LzX^zqk@dE{as53;eAWd{j} zG?!jfHCbgp%)xR+k>TU=F?6uOOQ z;GRF4$sh@_HtV9=0dj2zEHdC^&5z6j|z|r{dzQAdk2{-)Odltwou-DT;$OFiM1Wo^PwSoT6*;7==4uk-1OArt_3Zw)5feHP!!%&jaD~I z*QOpPWX;n=CS8F+jzobpc&HtwepQEtQtXi?F!eopb>%p->Pp7yc+cwHuvs0AEy#Eb zz28`#smc(SfkJQ~0eR0|Ag?k_77_|vC;P)+t_i$LdAuRq`tL!zg& zoR*hOS@>+QW|%x=$`IcPy#k}v3e3u0l)@>MFD?iv`x}%hn#XDlg4!H|LZW?_U*BL^ zKOlX7^J}kl7rFpw0bt2sa{hw5o+S||$ZPD1%R5;9lO4Wb43zYSf#xffVFb(ZZDAk- zwNp0QfA6-n;n=Lo%E#SOW8PT}wyrWtIU0Q@F{7l>e4dn;7o;LO+fv4+`#DT6Q=u;0qQE8gEIs z9C!@)EJ&nE@R&-ieDc8SKch2l&n6EB6H~9LpBD)W25>co^{h4YSWm-s)Qi57f>7vb z4}mHHvCWY~q%6MAJ6N=uMMGFQ*KIpIsUtkX#bW%-P(Ums5I;4-wz}<^+A@N0;z8ru zs>vWX1;0(iPwYguj;PUJbH0tILhtEdkEl5E_Bb{}?Qzyb4?eK!mPuK9UM143Z{2!+hg zaEwKd2Nz^Gxu0G(=wj+Cc@zrk#I)|^9TVII96stFq%JH~iXaL=21NVspq?QYOJB*u zP=nwOg|9`f{H$1X6heV0E4|9n#)V!w$tDv)#HZ+HO|!%ILWxHCchx|z z2h^C@!S&?KDnaiOb3$IK9zwJ=ulHSPPdN<2*=WpEr)6}S?%tNlZ~87-c;G!J`Cj?y zU^dpKHBd(kGp`^CQ<_&N$?tZ0YYOXf2He0aESue4a4CUpcQ{*o7^n#NV|yMB2n^aI z8H8B`B(lhP7x2?>_yYepH}!M>%qI<~R=Tb|7UnmIyGyL1CDv!5UB9+2o-I6am6wcd zh*CS{Fgy}UoFp2YK3?a4dGV>|waTS$bdjCRn|k*4erwnDiWSB1{yNRJsRl9-woc+G z0K4P8tP1`6fRm)fQs*ky$klguU!IUN+zz3an5Cj0I_Pb`ma|xFEcb@~awF)6vAbQm zx8mGx+5RI~8pwgXhY7~rlHEUSJ{;;(As?XF!Z=34Er1(r&F4K-1PZL2se3s8LJz}b zN`g>5Pu)@OBN2>gV;`|LVQK z6a7hzF(QPXCK|*&#JJd8p1LMMKKEp|i)p5Dx!3}7fj&KOS!^DelKZUANH9ZqS-zo+ z?Cf_+G63YOJ+35zD7V|CJ=3*cNoNJ#WX!<4@z{@$U{mTwGt1co?96Jwr5L7eP1c3<2>s z85)`1zh1?x;M)N|z6Qd~yNuOjCJMhHfDxVuCr)uE+^cm;h&d zrvdG!@7w@e5oSQG#_B?J!M^0|$%1=K(w~^;v+ewj@zu<-o}{_fXXlWz-ubW<5P+QM zm@!*7NTU-uacKme_8sPHd?R~cRn+!Z5dCD=J6xvs@lVy0j!PFNu)apsla#(vAAA4W zs2(4*8^CCWg+7uJe~XixW8>z$$9DX~?VM=qMd#g8rA)Ps!EVx5hMMZyn^Lzy8*&Jp zr-hvHwW3rHY?hNO$yBTP@Rb|cf9s0xe5Ak4lFhEDZGXy<1VI6>#Ue!wc;(~TO1U@Y zc!8Hs#p2awen>FDCl1^uazRD4lQ;Z|K?f(iEQEMmQeH_S;&ZJMtNPbm?oHT zfqWnvfrR#_S0qBt@`KleGPL?7BP_PuKc{G}t0yiw|AuHBAT8p!%7i`(NNL>+5n52z z!x{NL3eStf1$}KZ$=mMMh~8)FURdEDzosvv6tAT59yv(zt)Z^QUq}dx)~E0TKi2UY z8+492Hrc${AJR1J70~#c=&l;_91Y}HDQ&<+ER#R_a;81XQho~AGLgYh>0bzXa;J`{Mn|1 zm6B; z*(`exth*~V-*}936i1(yeq^M}I(tQNKtMdJsnu#O1jh`ajlh-iYH`iVb)Ll5QlaCv?xBB4z9*eUvd%FmZ@7XgQsWB|f4wpI?IaF`W*2=s~bi#;f zMfcQ46|c|B3yKf(S@o+s(OtiT01An-x>Rq!5$?Ut_&bjh?EL5LB!L@wHV(RenDu6v zd>VO%G7)#ir0d7|09fg*w1J`LFyEOwpl|lwqr+ai+^ostgGkYj8dly5_qr z2vowd^hi*AOW3D8Q)<+9vWxG3e#<;*-^19YaguOZD4mMLazTlm6Q@dQIy(x7pQ7Z` z|JCrRp8D>ug6x6fz)XVHf|ns_JnYzDn)HKL+wQIX(HlBc@VmB96dID4yA?3kXFX&% zwMU*Fc+lO5oDn*nIhZ-Z3l)YhtY zkQauM2fM(nlwC2+*qQ3wVn~esMF*E-s9!uhP(QA=5$xkV>}>Hlu92B_LSruBPdz8O z#+ZVe%=Gea+(Bjo(e+bTk`!>7+Pia&h9gy>64n9~$lM>}y|&g+Bz;nkD>N$6{h%Gmnr1-ZL9cji0r!is5#VZ$WxpoaIkWwDXGcfx z@8_=t;l8h(xLjh!B-+C+{=oAeJEKamZ5EOuM#L-_Nf^&`TvlTwTt?okPg#Qtm@6rq zmACmb6`4D_HWkUBoSo*npJndWZqT@@cfzk4)e^E@TI4q9#p&yfO$>#cHiBCXDJr|> z=s}oQgtUSvbL4npA$7U%sG`*CXWP8r-#cdf53E?0+4qh1;6_|!plNAzMZ%!0;C=dsg#MKiMu9I*YKndKdIDTRuRf|x&4S?AuC^CKYb@>j&T7;h zKR0pfhWEJn@Xn+s_uR%=4TwTH&eMef)-QX@uM+5MSGu1JFp(A{b^AxOEdE!(SyDPR%}Yp zwt227ul+sJuK2j7`nj(09ATYp$B?O~H$W3tc-sDR|Dgw!0pPalk)ejzXKbW>SBD_$ zf{XvK|33Re0`u-eIY-IKTi4Jn+3D%C;%-HGdzODq7kCkV=KV&0-f5{lfV^KpcA(ct zJMjLoq6x=!rUxv~3$Mxq&X&)VGa8)Z3lf)vG^1hLFB5Zr?&oFxL3mD)QnS9fIxgpD zdiStQf~SECn}o++zL!@RRu3fzC_T@E&~m@=~^c$*z*tuxNBrLY7x4Ba>P`>pYpu zIzt&t=yR>&n2GbSw?;W4JD6~x=&Ko$5J7R?J+!0JM9GbuarLV#@-|k^f{43( zKB!(v^`yKm+Y3d^VEPW8jVakhz$`mA2VH5;ey4d?Pyr2sjggre_3Lx*6_%I%GgX|S z0TtSp%1?e^?9YGAs8=RqA1u7}&G8D{XF1;8uAa12+-tgJ7yezdnkW=eaQ8Q*W2#4V ziR9H}w{0xYWOe9UEoRg^VH8~8MwVj7Gm{KSeP%Y0{>_3T0alu9T!Fs>Qn>VR*5N|+ z^$c1#ospZ{XR%WpFDfOC{iRS+X(OKQZW3rzpM3$OTrHZfiIuNyd0SuM&lm;Aw-SA% zSBDy7G%2$&@d-Ac&mbue_;dorEFG`@o&oou@q9e@uuQWD@R(~_Kq42^1fVnsbC}lV zO&3=la3A>`pKr7fRE9VF6Kog;qu&V7A9i5sgi8kVt^EiWzguXbxOyc|gwt{gCJ zOKrMAk*MR_TxgotG*Z0m?1XGD#I-yuzq577-pDsX^!2qpP1v`yqOYL?;$#uKTOZ%p z+Q6F?{qH@E2{CIsaPMOjX}bj!uXXIomw2V;sUPKwQOj@e8=M?rs}&7S@;HwF_44&Y zyu+4m9YxvSO;stj!8}f=I5-btd8tXgq8$BlU6d~?u|yn(f@$?K zP_6(c@+ty@C*_5qu%|i5--lqyMc$DhdGB? zeh=BgTawIfhdIbnr4cMCn}uyi>q^+>$3XhZ$M0;OXbDOoVYq*={_x{?L(9|9kT00W z|DnC36}hUw&_mPp&h5VYb*>ca7HV*xbT)m$=_#EA7LsvK+7LDp-L+Kb`5wwUBK23_ zq%c5X&&&JtYz+(h9@!ect5b^ElIp-j;)(4~wm{Bb#67Qs29mB%bfMQ)$kL@E<;z3} zB|ugIba7$cFyo~s%n_a+=+|&^E4y7RW1LUC^%$QUMJo>!n6~nz32r^u_dld|7=Cib z6&12(&@=C-v;5X0Ku8$F)FCS4Bs+|?5zkWl$!Z_&`~dOs(5(aA zFOV_=bo%yN3{20v*G2 zM?C;@2;4@(WrQ!(sVQv>`6)heK~${1C6UYHGRqV7!sA*1G*LHe6Gs%dk)j8$Zt$&c z(!^J|3zCnboO8zITEw+GWPj<1Kj8wnf5ckJ%;fE=eAN7^GS?f62I-1kaI{Y~7)OFe z-gHfxtBGaWl`n(6;vw_KiQj+1BbNG8N~o5+X6R#l6F*x~8^5^stbW%Pk<+b~F=r{f zJf|N1qhvXxAN;0gQ@?5LA$j79!i_$}y-*Dfr|!?J71%2yimxnOep+EIq~F1oM>W8B zuOOBSE~!g)o%NkHo{isWMR}Qca)%{UU+iYS`a}~s8gKa)N~+sKD;lwJu(OF1d?H#H zsM={--bH|Qx@yvP3qra~mA3e5B9hCG6iJKv!TU6A^qSdU>>09i!>gJEl5&rh6DZQt z%K#pv&mX9sqhdaf7d=9`ry~ywfzy#Pg+Ap%_0&U~t;hX>ar(hj1AFKr+k2X)AsjC# zzo7@B+(?6jP}D-}e-LdW&3-g$@%8<{J5I0^ER80uh(-O9?sF8nX|xdIK@ zGJ+xbqH$Swk<9STqEFUG0UrnP)e zK*=w0-t$zVRD7a%PkJe7@~Q2;#GbfX-ylQ`X#_9OXQzCewsZ!A;Dt>0N(n5Ni+Q&$epsYlx}AeThyFc>N==PXWk6+s zWPNenY@L0bWnKC;=cm+F1^8PmRyS7m$McUl-VDE?8KTpPw^hf>2W!>8i+-0fjj{7+ zM|_8!gM)*e!=!3Z-?U0jKSlqhVbo51+E;z5RCq$D!G;gr0o{RRA8lV?pCm`Exd?M= zY8(BGhk6yJNn9~9=4Zzylg#~sI=%AACjxW2z1ct3Yk_L2-_Hpkui#c)%3 zGjJ1cGg8yC=vz@(QEIF$xrfEHLD7W#M|o%wPf@4*Y^(%%eN1nRTZ}HH;G<>1kE+=) zaMl-r@Aj^ri=7%iT0czOPFNIO#CMs!&J@Y`oHfe7XgOyo^1{-Q^d*>o-r94haxB}1 z#y;1fYxFkVrv|;3VH|2-JG$EP>F|jRX;#W`xp|J|xk0ABygu8LX8k4ueEshR%z7NP zb9NrfnYR08IX0c6Z8L4lpL;BmM#QpwvqYPS4QLG#_c7+(nw<9c_Oh0?MY@N)< z8X-+J&D~p4URbUJUS%FFn~LsdE?-<0*1xTUiTH@Xv&(ZhMTM{~@u%>4Tm zKTpTC{)D={Y$tA;ZWp5(rlJ)09Q;;NJJ$a_MuoE3ccIlK&q&eK$)_GHoJ&bhlx%tgipvjT?YQ zn^)|GDlKZBP~L{bRsf)jz6&QH{<7=h^o0Fr{_vu-J31)t!SSIKw8w(QVxKoo{et8J z&Nqs2k^yWO!7crJ_5q$KvZs`1tlvbQ6Lv6O0_GEQMwWU5X;vv1aD<6ZQrG%3#)N^d z!{x(L!Z5t6Dq zOgZqwa@zdtRQML})++SHM-k~7E~yszOD0}AH`UFl%HYaIz0N{9&G0hyFF;+miMfKM z(_mm0D65{=idB%=%ZWx9B7XQ@)+C8hSbW(3*+iwu-4fjsyznQKgM6S6w4Q87%b)H}J(dgG+8{6(2G#XX4f*dgGZ z=ex?YO|Sc92ff!25{QGzX082N-IzsaUGzrv2FV6(+G(1}?b80u&|%~=^J;68pN+1k zY^!X8<&+h#o*n8JHJ5m+Ud$=kQO?$WO{wzh4BWiNAz5Q@vd?$au-`6^skUFTu6LRF z9IsPYzB@xd<7Wr6OP%RhWcucA;dZnc!C`4I)a-vLecM}A1vH?v+qY$J0ydL4`gMr= z+_cgL1Xj+!tSj>GFU?8HF=}7+%BeoBu9$ad^Rp1wuPHlRFl9^0@bUZuTI zxJO*T!R1x(sRHMvk&yqymgy98jY_OTu0v$t`R%zzyRP4w#o_P>br&^94oD>42+-zw zV!J(4Kie|PX%yeOT~CvXz)z@Cn2isv zx3f!}HPuA~GPF(N%HHnHS<82Of>%zoe#$gcw7O!o-dY#acRv%Cj+Ww#UwASj`AGtw6Fb)2aIt$4ey68XGUk&Oq%ig@Wi~6FD#E7zKgDTwJ+Ut zNM6uy^d^7&POC)AE-4&ndqs7AwWP5$p74fc?OhWJIhC~~7zyHqBBdVg7m1HlraiMI zmO{5v{|-CkA#BDj6LJui81u^f_YulSIqkUf3L$9C6>Tyk6ug80M&pifC;duB9- zx>%T(j7-fJB)UeVBP9Voz7`wn?J3XnJqIMU^@_1HW%{1_j|B3{)1KO7{u~b&1ucO} zNM8&15vc{hSy9go35k^X_ZL}7i|GiFb>!GQeeV8T6((ZltNyjlFr+~-qB6OTb$v~3=zcr?_mf7-JdD$cH#`rRW;~joLnvF_&NDFxfmp{ z>FDUhT+J;-v}EP~1xNf6XRvm6cNT#_yu7?Py?8mDT&*D7!otE3E*=OE4+mlfhntV1 z`%7;QM>ocQko?6XYvE?*YUAu~iT#=X|9bM@ivI(t_a7u5 zFW>({{?C(tA;loS3HTp^{^8c2Q3Sgru*D$%$9f6u6a79fgfysZWYwP{t|-4ngZO+$ zd|Cg!BJQm=>^srjk&vX2lw_r!dL!={p?g!!HcHgDo2FVMWl;0-^3HRk?+Lk(Q(9*U zsAiTV=xz$w%yF^Ap*ETI6C)n;sx&>X$nhE-@CsbN^`MZfjiU zDe;S!XynN9!N3>4FLbzs8tLg_AB}#{bD_i_F@C^8T!JHzK%hXv|Cwb^_`;M(pg9z? z=lAn}romvBkN)8JgDT{`bTC}$+X6NX);~!8OyXX#`gft+- zdAJ8hM#9bd<7^ZK?7tBbfk2GdGj_ApA2WEZ8n^|WmZ%DgiWqHg#h(9t=X@S|1e9K* zEy>yLta`8c@#-fo^8rGKypyJYhr5e}Sw*oR=* zcbCKa=p$z1vO+vfxO^;y@R-i@Xs)>5i}@x8F5fe2MHQ8bvX{=?e_t^8Clqz;8^w_>=XHbr(0u@U%q_7 z7TmaOBjwq!U+$ttaN|=+cJN`t7WUR?&eVZqO1{-kI7{x4Vfxs=LVt>d5&;Bt)_k5d zr?C$rc{#+rqaXSyzkJ%vR7>Z8&x8dbdRZQnKv1%FsoKYz!)}7zuT`(2$skY!#t=UG zpFe*(=9s8w3n2&HpXTw~(|G?twqEWEc$Cd)*n(ueI>CoZJfmG>EN(qhP9k=D)H~i_ zqg`dMqy*@=T#c5Ri+ih**0a><*SN-4n+(g~LBIaBkMsTe_oE^98l)5!g|EGl1iiB+ zL3pB`d%>l7ffNVb4}l}Vw7A6E<%hdGUf+R~^n|IONz5uQPB%w1isbROmpVniq;sQE z@LA{2kUS)s)wKWW^P%XKygw$`-ztiiz~<0zINH@O>v0mlNCvACsfsj5 z;!~qgTzsuE2yvF3LUZ5y20ZAxaV*oRNUhKQcC}p;|DrbnZ@k6D?D8W)VE$EA{yRJY z#|12YyRXwb_eYVE(+6+MKzpg5RWsk|)mz$Jgv{B@H&y+rgk=j&nAL~P20h%Ed~2`) zX%v$gX^-WKuPpf;qGydfD_MdBU8%IVZeZimN*K039ClvNRGM_tAOdRyp5RRhx-W9# zuD-@C&IMl&=AF8$8fV(QoVKUmf%^LUdk+II+IjZL$jEy4zSpk|L%PEd#v-*p*VtHh z)-lmI0390^J?to=E_9h##uBeMyPa~`c@@^+k6_qTZU!nQzLcgWu)+R25e6Ow+El5! zSDX&}mj~|Ony!%oJnCNxg9MPU*pwcku5K(S@T@GP1;Cs`U1<6N9WX(+%`Yyw$@4@v zK{xZu4JTJuSH{DD<&?Xd3(?^f*&hp~zs6T|>~<0KMV|^?QD7wozy^ZkVx*VA%*^lX zzE%Qt8um>du63>Sm~1UZCkWi{J#zb%G|w;e%c{3;zFXzJ&9gvb5!~|8RI|6qo=&Uy z(N{VnZCbdxzSAVJxQuw+_Gft8f|ccLG_L|?~JBP%E%jwR%g3Tg>n=gh)KE)XJ(E4RBseOv1KfAE4obro*C&;JbZvD zrmI1gJZpYs@F>FUI4odyN76qXq&IJ|on`idl=jR-XDD>OFYZ5()ILDGoM zmICsyRw>pXm4?Ya>e*zGyn;Vd2WOXABvhbxs5`uth*~e8jeMO+J@QhIN&!zX!&9?j zqf*99nj`DF`{Awz_hZ2gUG%M(R=ccm;N7lCnqNd;CyS}-7_KmNv>ps0>+cDksl*{fO>=E3}m6))RvU0qWY%OrRAToOn+cy3)6jUPw`K&ev zkdBEWEc7XXUBrw3O0UN^$|DRxZM?WyelVu`+Eo0V=q?{n^QE=kYD{DdXyZyU84=ZH z9l`EKp~^ezR(wau00OmPcNhHF$Zv)_6P~P+~Qq6EzL+ zhmR)?^o2p!jl$=EYXzPbJ&G2(mCp;r56`|#T|WO4FqG|Jc{78z-FN(f4`-#49?>b; zHGQk21k0QI|1K{0#vM}H3{VN%m*amlE_kvuzB`Ep#o}jkCl~qkHpHT%H zH#cR80tD2yCrjS_R3B}!a7b#e%Z&rjVtbi~?2LzX_@3L2Tx_lnChPOoMWE!w&b-1T zNWD^+WdwWhoCARU1Gw92Xd!eDAU%H|sMTn;cHBuA!GVCAh7t?Y)t^_1aN3N>s2IW( zi+=+m*7#suQwDW+^H3L-5F(JR3>dJA^X8UjyMmI@X~AP{=9WHw3d=N9Q*0D&knW4% zdJkrQR%^PF_>v5KkkXJP z%Bq7v0U2%Olk8e%2#s2ic_F|JqyR8D=Z1YDH=XQFu(jeYf&*-1WV^{d_x5II@v95g zD_M5@=8~3U2pOZ`_H3to7*|3OZ|r)HBhxjcCtVZ24)fc_%rPy4CkNjDyT#|{y%i)J zc%<18^MKf*S~w`4a06psN?@k=16a$Um-QNX5BC?{azF=z#6Gms*Fr9YfdZBw5Cg@k zam)2%tbEqWXKxCu$?4@mpdAGv>E!P{CiMH*v1w5erFO2fsXzqKnNY3U7Wu9tmHQVH zQ-;}oS*IPuUR^HN0>6=)Wyq%lKyqzuEtlPfD92a_?>y#;)LM_oK()Nm^g`f64-#F5AQLXlY^|{RXwv&oL=2D7W%}ZA}44O1* zU7_Dzx1~=m`7Gg@GJIhvo^YA*oRCt?lF?Kp_GQnGd2A9(4!NSaVqeXeo_fP*7l1;q z+Z6@igyjF$Zyery|D3w79b&Cx-j`a)QFB0+oe1!(Mo5Z+UaMl?hck0LZX2 zNY+T;O|tY(vrr3J@r?9=S;a0uH3bH?Op1)UMa&mEa2>{=QCMuYtg0;T;24&0Wt%bR zr8F2c`E}5o<+*97o@>sxwumlmlTbt8y=jcQA(rqf$%ot1a%cayYX~wdLp**dNj$BF z0-vo0w)c)Sial8V&H(c37=I+E?=7@WU9A=aLU`gQ(`W-4hfVI!T+buZ2TTTw!}Xf% zGe`@hS~&(@YIhV{5J?G2f5p(a2PxmCrlh=Gr1hD^|8$jd4W5(&GPuVQk3IoQi?FCG zx*Fq57I(Z_8R0tyCc1q3l#=e5$p$2(zDn9jbYqj7#L`mT+P-2umV-sny*}3JKHJlJO7v2uSS2QM z6mHF2_3}RccNP@F_BvSI6xyPp@5L?cJ%23&+m?d_WwhDF8?Q3}(pn&C5ED*8jN=)0 zb=CqxkPnIrbsE(M!)GRcCd1*jdrxNYQg!$o>A4XckjN~ITFuJh^q8`;B3&9*rap=9 z-J8(xl7X_EoN+_UWs8u_1SAe|U4OmJNN#$vM*Hv$Ub8I&R(8dz7dmW9IV^>kLT81& z4i;Pk+M0fJ+%#fhXsqm zb9avEcx4~H}1$8zNWwWT>*g!LeE zb91jcj~ir9BeCEps;Z*%p-Z{DH_SFUuc9$=YE{%ZFYNqtoot1LXEp@Gj~ z#>`~qomdq#8kF6{Q`g-R=F4FnG5)OfElDUMLH=mlcui4mvSl-4p1p9#rV?(QVy3_N zoRqKM`U1{XU?rzXeAUn&FbzfqLtjPUQ9@l^>-t>_vZYLMtv^pZt18hc`uW|BMG>XZ zF5e0l5Vkz~qng77*Mv)dxq(68T(!cj{$F3Z0Hu3j>RQ2esPhc%Yqb#$uhr<;&B-_- zqQBYj^5Go#n@f8`gW+%%^Gl)sGUxO-L<&AdzlHzD(fmoXKY&nRFfsbx{{IOxr9fmFg0ovk57X6Gz%CB$@}l9B}V^Q>ID-D{Bb_tA(ryPG17VFqwk>R z5v-@ID8_Jj+e_}Uyt_^)=0MKBS?e1)sn#_cjWz-fzl0!eloT}Nu|70@=D2PN10~L6 z$g4l5GZY@Y@x8llv)FY}t1B}L%bN+j6B&f!vFnQQ`tJz?P(uU{$>KV`$JtzrNuG`m zO^gWCKRorVl!?#2Re({$hH!v`Gla;Y5iNM#+Td*~XIp~5AAnfYnglgCjFtWQ-50G& z9j7%d&`ZsO^fS?uvz)Ejp>) ze}sAPi4rs!4@6X;e;VgvrkPic$UN>!d-Ba&0>|};Cei)F{S**VZ^0pvhc3v1ypzSH za>8|0E#gMhmPBtL5KY5`hyy3A}*?Kc~9KaOqk zq?+L&WTKVgc*6MrxR%{y4Gr`jRJ&aL(aIxZ6O+8LWhUYmP;eix%kAy>$=IrMI3Dj+ z0}{(nx4kg7q;c!m_lR$gCfhp*2)=$@^~Y{)nNkd=1|(<#himjc4O2Ni8gbx@lKbNt#*NshutZn+Z@}mm3pb! z_xFaWcG^eEW1B~(G$TXQWS9a*1EDmuH9*OiqnFiOZ7z6QPW7ISeqO>`HHrq7xTX3oW0^d zJq)-9Uh!~$clM&)*5YL@OYIVKrz!a|zgo-LDPz3-mH5EXXV>Ta`SVf`4CaoKl=TwL z9V#Vgdcvb7)%^iJpd%mH3M9>GSr@bC<8ED>ui8+AZ)Y2C_S2BBr`ji!6bu?HM;Fcs zXqcX~$40Jv;>R3npno8rEe8AZS4VqB@iE6P>0^_fzFub-y+gia?s8Wq*m80bElqrL zc5jY1{x!L>yIs(|O0WE#@5#;L(RCas_qDC;=EN^Nv0O5is2_xuxz!;oPvrv9H+C^a)jvwQ~l<;X1Q=w@{BeDLvt;WbuZ* zv7(gh*D_{9#OHU%M{6WE!Cj+DfisAV^jqh@fR0 zXsSKDgeW&)o9fnmrfLw@aMf+Z!e521lV2lCvHSLx!Q1kRW=po)-d25ExBp_NzFQ7@ zHmM4o28_@L(TpqCt4P*)r@5^@xTJxP+PgUT4Se^Ow?zAu<`N%|*!AWK(1r_orY4av zaLJnc4|?@OK5o|t*nZmXN|G8F3HssbM}RO z_PM+dvBqW>{oWi9BqtpEMz3#u>8r=f&ki2^Q|lspGe9Mx)t^OD*|TPLXxs%>Yc-dZ zN!cF=UOgO$Xv&{@qDeAPfH58QanF1+($QX%r&*!J$RX4O+H2@$<+QEYepZnk*d1ZA zp}sQP`lPyV-T4`(e-t>JV>{3J)|V#miur>gpWniAN$%#69OyM1^dPfaRTT2Tq8R4?W$GUL zto0F}U8iT8k3Py?yHjwJI{vE}V*@ln$7@Y{S$=P;r?1pA+tItae2h+=4l#O}%T6bht0c03KE|`uh585EfZm24ryI_aT<##eR|`Q?Tn(MRZSw%UuB2 zr=5?pxwur8PW*L?TdAwc`ze93NOIM7s6a$^HR=n1enhRG;2At`v+A} z3ht3JeeMIMzVH^?agG zv5XxsfQ7d(-#dp2tTU;S3@d5Ua@*p~ppM41enLS!-RwoD@ZlO5j-M3#~IxE6!rFHqt#)nhZ zSpy#lv|L%Vkh8#n*7pqb-3b>HkWXn}MesKapj^{u(V%G-rFm~sM(03+ zdiDj}NAE~k>RguXC+EmRLF+y6#m2iA_eICB79W$J5}XPn@q{};Z_JmHxe4B+xA1nu z0|y$UnZ{{p=@AS#RRS^E)DAc)W7!ld^2#wtj)fuy-B>b#@XFTYL$K$of^AV*XrKM% z9-f*OM;c4)EE^vqQ2Iz7c3{#V7gXoXmsU-IDKyD9rPr&9gZ4LAY8IL+%ftbsVus3l83Nxxs6?4+!6Q#^ntb2 zsa2R^PPXN)6Qi}d;}D)cKq1{Kwl5hHwnmHU)VUj2ze0Fxx-UIzI_Gp{_A$?kMwp+M z+XaW2wvquW43S;QDVs#B*Q!JIRJOU4HL>|Lo4u`CF&G2*u~;OeVcWY&RMwwnfdL!I z!rL5Yab&WQ#xY)nk^YRgJ!JQw=%Y`{ChR9TYUwy7kZ&acwHl>vEbif&v5vx8`VZGt z0;Jav+D5e1iZh@)3W+6V-OR;IN-SL}}e+%BgLv*aFDVz=$|51f;_pi9a^r&Zp$G3##s-o_Q%UJm8HxwKYeM*XrKh z5v~-LQA{RHxT%kI6SfIt5~IH?S;Mr%4=&kWU{6NnIqhuSeRFykkxNhUs`4e!q0Mol z+dwxKkXWv-nE7d0a#6G`(skL@Z@IAKn9mmJUrEQM+sjm8-rZpp_{|mgDnAMm27X-I z=~Jbc61ycQN(V{w(^3`7X}TOmvuPG?bvy^oJmjTcx{sG!Pd}PZ#J9fB;k37 z5-jXH97e37_9hdyPJdRGvpL-`(67YKt;a(GlrE}K`JU}6m{~0$14^xkQc8@Q3WHhl zVHc%_-+G2IRtc%871S-GQm1vd{1h}&NN!HG0TCqfxpL)1tYM|5515~lcU?Z7&Y1og z^xZY4Z7PaR)!uw{-bXv@ZBEM-ZALC@7~)7&)<1I5|56Y z4rti-goecj@c6D>0|-V4RxQ*`2vjDRv=4(+8c##hvBG}7Rf0%s4V(0({Sx4F zimHK~@LnyCduF1RzCWB&$L3CK7QYQv<|3j>^{SB|gE+u3|v^Evw={cc`n)cwcUmu=qgCmNHCs9WNs^Y@FOG ze!mcCoMEkQ{|v{nfVVEekDCSK$~7_J`<8+cg=kJAK-3pQ?jYOiB0Ch5G$qg)s|q1z z*{lYVK)TiR8Y_7)xG=80YHyA4LUofN5mU)i=wgdv;asMF0SNl;q#{=F^cSIRsKL@S z+SDPXdTj8)b~hM$A07c81-@i3VI;ctvxS4TiGr4VU@*S2%Wk&UQiy|eXLcXYhSaQJ zV-coxZLS|B;MI!&r&j<~JCPBQGX@eLSeLLX4S4i8drFEBWUHMkwmqyU68o;mfMa|@;o$f^!00E9vht}A(co!D*pvw62qnyHw; zc=a9;%hGwxO^+McBZKraxe^kFhIT>62Pq`6@GO6oA>r?&`p`^s7ul@W^F2J=QB2 zj1}$Nic^?rTOO!HU*kb^l129bND_=}e3en9!icslr$#|a#Qk%~)_y-M^5ND)iev_2 zHUJUc#g@c$WxBI8)E&vtd{a<$TgU96iZlHraX_uChcXW=-ZQSFRai9Ca6@}$aMdlN=dd_Xipvncg@Q^d2p=)ku_haz zAQ#58W;ne86TZpYi05Kwy%HM)CDVaV>J>IIU)vTy%#|fr?n$vQd1ewH#rCC$wZx5q zxC>m@trvgx8b}Zl8=d22F3P%R_&dE5ljBU>j12q2yZ&7Np@cL!4!-?ljwO~r*eV&z zE&R5_mZkW?9K;-e=Iv)`ZVD|9k|1(QvX1^_1~zDQLO5JZDZ1HwkM&JSuf8y%pMV~A z6Iz`#C6!;vy^UU3tn3ZWj%^`QzpZV7exSb`8>Njfxu;aYCJ?J`jRKj~e&ZxsjWF3u z!|3*hsz>c5(OZ#XNAL(H90oJD(%)Ynd04M`^j(%7ru-C^8za{Tr>bUe-fDxye>~6L zbvk)WR~76`!=#Ga>8-?n$X2H(1JoLMhz^p zL;K!iSn}h!#oiqLU2dy$WQk2dfN(>` zW%4qB-&Dw75&}c$?G;(K2y6`Z%26{*nQd3Pm!bxPymG_(QJ^7_~5U`ZyVtL13G4H)fN*uG>srGEYTfJl))Mb>~)j zYZ1QCj4%rRg05=m2GuZqKr{%N{TJ>O)5pu(5mYHK9IJ>#caz&Ie}PaFQ$KZK@3Qh!=0Bp16w< zgUF~^=38%1O3bPmvhcU5Y&u!#Njsm0=MfrFt|*R0iUPj&_wgHMQ;ERy$J4`#+*$i% z10`%0>LjB?O&Nk-CWFn1lAuF2((tn|jY}7hI~90K_c&a2{cbchV-5`rbqe>9MZ_wu zgJ0w_IZn(Hi@eclSy(JivWV{S&+&&dvENLa&zS*Zc~?(ogxEDtVBcC zNHv|S*MkfF1{N5rWrW^F+nqPeB&Tc8h8UuX^A?XV`LD}HBOKW7T`4p6wm5U-v*e;( zh#&uKPm`Q1>5AfW*#Znj0UYgy)e9EL&E2ROqWlS1UDH8EHiE-%`K5mVO4SfDn4TY5 zejGb~gw}8?r*2`$0=+(A9Eo~TH{0Q#{p%-AN)Ls&YSL5ibJJC&A{mDv29{n1hPB6s zpQbKdZ*Y=Gf@HsTd9y=$Og7S5+fVA&3ffOs$ul&s1DZzP(G&G=RI!Z2vVxv)7hFr~ zHL4FkXP$g}A1(^jq{O$o{aVR(t?Vk`9Q* z!{KOjhtGu3-08vVFy$uS>rBuUjk!als3}x2dG@+D+VQPwY1*gLbO&uAHLXX$Ih7k? z(=4V2&}7EuNhI6*TXodD_gtXE`Lrer(@AgePH91E6P z?z1J?w#EXOQGn=!!FQ1nLVzj+Epyn{P_PLreevM?m9+YSMh1wjqJBIJ7kf>|GeR3A zlYVqGISS9o+&et7RCf~JudHWpz9gx8*`qZQnGO*bY@Gx6PO&%uIZR3FfRD0Cf|?;< zPoCNdS=O-Xi&CR3+d!V4mstaGPthTc|DbEd0x$j z@0>KhF5A(?0x~AH^%9dO;Wx1TK?)uHF(tA2)x+eWgkS3f+lCyXtwf0dkF=>1)|y#W zg4Bz}8BH1b5W%rctGCC?5DKRX3KVHXZ<;g%iu67588p?+q2(7Ni+B=?_pIxxVCVbl zqGe1Ao%65NtcN!2L7IMSjH2&-+17pv1>|TJvT8lFsqy6QYLq`u+KAG~)n?A2`#0S$EPY&mO3*LxR{8l0WL5O_e2+uO<8Y+6hj($zH3ic2)vu zwJldqra4uuWm_o0{i5*r(z*~D79Lfzcd-{=bMlfGXab%R(!=_!r0_Y?hQn^Ux@V>0 z#_u_G>8dlRbOB@6mz>A)b3YG*vt0)>(y($_jxTby-gOB%UJZ-Y4jahlPtp;R_Vtlv zo~;_k06qsc|9pQsocFvY*qsgd7KA!(F6l$G~>m^A?aj}&1) zrs7 zaFdc_$X__-V_l4VwocgMB>#a6t%C?ur8}Kn#@>Z|PFH;h1WT|)6nw<{Q{N)2#>!WR zPj)b}-u2g)Fc}^K<5kGe{6Qe2wiWsRI$wiaau>(BV1uTN0kF!aA6*ze+>l9Zd^A8P-A z|DZ(R@8Vu1`fun35u*49L&ebl zEolD@rH3I%N)Fz1{EyBLf(ZQnW_`^6GsN_}xQNE*^`|-OpPt_?;0x1LN=D9)e@mBK zC}4U7Nlf{zl>hPP|0gU%;Y{?pCG4B7ME%-w8NBO?{+GghHiQ^uOWRG{uf*1=MbUiv z37{XpCu?|JuMrwmm8ey@@&1*Z)Ql9OEgI{hl}WPsO$-$~Gh=Jy1@xdmoN&3AneYB1 zJE3lkCRUZ3&i~=wq1X5cSDM(sA2<136Vg$HsUaFlL9{R?-avMuPVLc@ z_pOkF&*wl;%7FAQEJ9gnLzLBFb~(#Ok38|%H=^W% zpXv`tGuODM`f=(!e;cA>r1a3rak&SVy1MpKRJtnxaK-17%5Y`uBv9=8H;y^_Np7XZ z6kT3fokuKX-2R1!q5o?$*$B;dffgL974@5heR)T)JJ0V#Xf4kz@Xc4A>+gAFR+Rv0T z*+j~}X!HLQJ`CI}y|xvIH&`Y5Xc|@UPXohmD|ssMXMJ_E@vy0Nh3@bM$2;7e0{?np zQ9-C_yW@uX5%90T=%Bytbv(B2^JkfQvU{v`JJW`;IHHy&cl`B(r5Fhg54zSZx+uGa z$!*8hfWJ^uUWf&@@qbofWQjlcm2uk+RiR?+k*tJ<=nZ#=FWk+qd`_Ktp$ZELn-`JN zYQ9gHZY7XiW9xtNe@a>k zb{hFZlNQVfnLPL2GRL!P2M7J1QoM{pco0-4Y#S8{iQA4Q&iLF#A#5BC6&`|J9I_<*SM4}+dIHx7jRRfQ(r-t1u_f2v4&(>W3QW5;_w|^x(SZ^{aIOeTE z3di527aTX2NfZEo1W`bH}_{8b;mc<`-y5*Lu_s_1X092d$; z@NX_0Lfw5mtgqLWrS-=`q%WXqRkMOmA{PSY8_Fm(X7cAS>V8r8;lVLxMd~YIk^>o0Vb6->M2c+jTjhb!a zOqGR8erm=2>&Rb_|5nsjO${%3Hl``Oyreqc7>+^vziuSd2Xx9SzlP)ylm+I8n^E$G zi^M-r5zV;+8rTpO#eZ4gE{9-$wR5UdLv?Trk1%TO~n-*Q0r%l_|^82b}J% zatgXLj$Ycz%QBiXw$RNrh&1+f_J158fg?EZYNAHM9fi+-51lRu6vXs!MAO1|;}DyD zscjt3llo0);<&TLymU~pu zVI0nCE_7N;Mn9yUoNRpOD*E?TgJTdBKEgx&Rm7#Mm`r;|`?UAzkp#Ni8^h~Wj+V2T z_7`JaoAoWZ=&X!S!g2ej?JBM67fAn#h>#E=B9u_}qn^|<>)7;Za)}8mi<@&h=c{2a z{#dxa)@pr6F8XR_1Jv}Su<1u$?!K>`ipQH^(^_Blo~?fcRsLJfu$Gne+e~4flCfOu zv7E5s?Mq*ak6RMMJ4SSE&x{8A&*T(8d$`AtDj4GvQ+-3e+TOOS(8El^h(TBJFPvC( zz4@0Q;BTD;IcjYdXJ|aE(7qz!{pg$AA=A!pn$)->>huX$SmEdP;G1NT7Y+3L+C{(E z$?^$vd`~YNClr;A!>0&eWoBNg1<)1DOGRzUq2u-DQvT^8q|^FsPCYq8QfK zR?}y5^LQ}HphwguPWDE-joI`pB$B z9QODI{MXSn5{kF!!xer(Ki=@L(*`1Z{MBu>yt?V_kb)JA|pd0p_= zsGkP@4*Q(rNh(ws!Tbv9=idxg^l(Y!O|^gpJ*FDm9+WmE*-Y-xbKSi%l2(aBnU=0~ z_I$1vopRB0^!rL%UHWM8OZ#qq|LWUHI6~i&4-{TS2I6{@if`5mx_UG7s>y|6M@xU7 zESJtvFb;du)%%G)&)|e{@cP>5|Dov`1MBLVZfx7OZQHqV8mqBw+iKLLv28U;V>EVS zHMX7arqBERxPR~2`<$6IYu3!3-Bxe%W2&JSt8QT=v~5wJV@?SS4Wmvw*e?3ye-0hW z`}qbsM4N}!U=dl_hVig3TBLOpGa_Nb5VTUK!Td85UNI`-F01}H5#m$I{my#B+Oy+! zUrSr|jI;~cEAmrT^fCEe_LmRied0gT0D%3p-dV+}MF*a6gbxA}Yx9mXR-3YVc&-s%S z7rkW8=JNxrpF>Nk*X}*PzFXk2k{YH&r#AKOHY3zwA!iXT)&H$}#H#Usb{IeQp1Dgk z?kTihibz%Q5WT*n4Zr(et^(u~2%lT-6U=v#x4gZjPn_-6Z70%*0B4%;*Jb~!t`0O+ zb?1kmEO1+nXSYZ4Kb!stTEfrNfOoXUf#gJpa??`ru{Iyk`ak2xX)pgvAy_~^?K4n8 ze_+eREiU(+vgiR%y+%6Pw*7X&2ah!$(}P|WZ8{-DEZmfJCX?A|??CMSXTsJ>)yFek z#mf^01y#A}1*nZR=lbeC)cn&e z2TWQtg!i4CLT7JS9KMI z$101@CQ5MK#`r7jsmAZBq(vlmSPUvBud2>$GJpQgnDWr35DPn4N8%&8$&Y4;@x6u9 z7BqDx?SJ3@7d!BC378hXWa5!ii58X`K;iQ_j3qalbW15*v*h^j$W-Ghi^uU3-Seid zA<8!cwhPpx{n8dXK^X6)@lpna$ZzdeYq2}x zoN^1sB&jbW-jCG(6oKQQiNm+*e^!w68HLd=41mBtjibm*o8!&v(`=zX2r$;uc>Mj4 zSMd4a(D-i$I9&_^GoBR(@||_F${JbJdEkc)RK$d+Xl0bfwY}Pm&csFx+2BtPFY#j~ zIBlR|cP}E!G;>1AC*A(VgEF>HW4qEEo-ad;3%G=rk&X*Z>}#ZEX~zqP3*eR~sJOJ# zYg|lxx>P33*Pu=?r4{l}g~#mf-jh%G8Y`3^hESL*>zWfD?ylkMiM*B&r!=P3Cvs#Sn}`t;A=i=h$}RJ9^;+vqu6`@0o=#rb{kIm2|`)Oc#$ zb0+?y`hv;@kcVa8<0|0}>~#!``xy~&f7l;p@tU>FDDa$MhmceAAKEli6pQtx4wJM@ zmF2B&MgK!zb;wUB_f3~IRrT5Fnlx~6)Yy(WG)-0us)XZ*c5xX_%Pz4}P=$_n68!s` z(`tuk=J$jJWDiFI=J!ej=Htm*jLP;QHw{2S+Q=AL= ze|tjvwK#z zI7U4JN1kWxZ)#RnVy5{>QBum`Ew40{bzLp zpH}y@Pt|-3B*VVAmNq@^?XDWe6jT-c#@8}GJ<{i!iJvo#MjXxGoFUJ&n71qX+B}KF z;kBA9D-jBLhqKc1R{_yTwo3($(e70<)r!_M9U9mqE=XZW|KE$=CJo$>j~)U)-clYd zhyM*@yljUY45-Oxr>t+$z zawsD)Hoku5X#P)VlKYg>QXCvyr4;8gUJ2NsVuJ-@ElEw1yZC&?!XOFiGxVzO#f5{4 zl>TCm(l57SHUJ%%8G$ScNKxjKs|H&vnZt=@HQBcrUNKHf?+ibNlMTSpsD$C_1;fKR zhD7LA4}KydDdZ~`Nyti?BX%+V11 zQl!|6;zULZ$NQB!mCu}?{tD?|grgz@as`PxNi9U;C)oM+6I7UCAcvEZw>RdYobq6<&Eh=W%igvkB*TK~8R|I!FE1Ya;X z70^`SZ#mA?F;f~V;}^#ZHt*}rro3k#7PB7K`s+zSc}0FMhA7!lTR@tlRhtd{q8EF= z(!j9gjuAW1z+n&Gf;uNszQnQhtQYhzf&qYJ3=Pg(bdmBSxkT6(vxebOkNt0I=x9Y> zO*$}@1e^ET`C2D2%%l9xNGajuMkw6Iatsz4=;PAskmcyaB8;J&Wu#VA1ad_!bDk-9 z#YG9ke#pS!g=k&%u1WW&3F8!A2<&baH=r_=*1~EQ&$jRaQl8z<*JF%hQuv=oUIqg# zm3a%tjjq|n!7^BOv4-(UhnxzH^J#pvp>^n%aK^Y3_?vNzDKBU_OixfJpuEb`>WsZ|8k{x%zQ;V|T!&UWFxlqjV6iQ5D2>jR`Co9&?DVVgm(>)-!{t$h4&gu)?; z=|tm+(2$}c%O&qQrrzkR$k5RQ5e17WdhQ55oEI_#4=EEc%KOBu21RcSTk!EH5;&_o zy-eU!=`ewDk#5VKs`U)a%^??QsXG@v?bG-Uv)o9~$z76t5K$vYusJE0%zGeH%^3A4 zYB+1yVr}*ZJkC!Z5;@S3B)9X1BpB&#I}OSp8KY=*Mp&M*a7d3c3jqQjevXG*%%=EZ zM>kO!Mv?sgGx|@u&QMhBugZ5Ds3Shy+dOEvQ_<&B#J=5ZGbr114=x;hYmA-)-r0Q! zkAUe9zI5NdFQ7TYKZEi4V2V59vj+h1!RDTfydHfQd>t4P(CY)V8Nbbz)%CMn?261L zd|kcYQ07nRYf8c1?aqXWY(q*@sZ8P9xU~Ms_ar>P&kYfwE7LsdjSzaQ|6-}ayNyRz zrB7lGQO)^ATjn&}U%V8v-TTF5Bcd|5n`rj#M8BQG?2$>P_mCi4?HOrCfQBCiG zZSNINnzX#xQL?tbkqm~7gu98OMuqbqrvB`l_lSNZKYWJaYQm_^ENerEnOUj#wje#zKc=yx5}?ccVaJ#s}iTAK4Y&5i6B%{)26 z5tclN+4S1%Rjf>!v!j>PRV0*NLtjWMwyVE=2M-%^y3R}U->6@0LufJUi^BUL(O!8# za`k*q7xqACrgRZm?->8!D=^cEQ#A?J!4RpApOxrAv7Lc*0O}mS^gfpp>7g#iH=pCn zjFH<1*yh|Orenw(#Stb<)@_NfozK7^TBk;eSWgoRPniMbs?jLDHg!oAnLPaveGg6; z%dHF~SBcXVeI1A(>Ci~e^I`r|hjV(fvEaM|u^yQaE$2(NOeF-ic;4@uityglfhRkP zH=m~0h2l$o=u?1@0U>vC-rL&C30w^c!>4q$V;3pDnY_|ZZBlm0g@9?`@bBd~y>ul4 zN`=g(oyXOuds1=TMEah$n~%Px&gvoYh^vX4`U1KU#7+wKQHL^wy%MH8H)xovA0 z_tH6xY}@yNqJ#B7(W8v#mVNK#YA7uZ+nR-Xd;POZ;5hf-W_Rgo2hYZHQ`~@?9~6-3 z+Ge+!1m)7xiJ(t%7k5*(FM-!GFa7#f#~CNP>2$5K0hD=yU_KMLC~n5C0}>gVIaU*~ zFiy+MnD)h-gy0FV#Jjsrssc<;tzy!C4`5pL#In{`O;3ugHj^G2H6-0jli~@7J9L;G z2sujn)vKBLNnop@SH12Y7rZ53O?1XxtO2EabUG6z5m?L?%2MFxaRu1xg`LQE!=ZwjR`QF7ml}<*q$1)@ z8s6gWjXxucM6EB2LQH$VX5hAEY$||!XAIN(otCX4YUFT<)ELs=9B2ZIwOqqKj+ap1~SA{jdonRs;p&~ zOZ~xlE06`26C2oYr~Ml46^nBm5B3{y5!}MdGg>8Z)gBO2muRip9Gi04ODhvM+|M zT%tzsQFfCOTKeWAmjBNAPe*`o`9^Ki%hNk-(o2)~}=L~!!Sl7blhqy3JKNBCY6(E9BH8M_$EyHijdLf|)KFC!n*jq5i24wUfA zkklfl%5fzFkQy>@e-^-cf!n9RnGY^YamBBpTf^7FjIRT1$JWo0)aZn_R5+rf+j(OG zadR(6go+SNN0R3uhMOdyo)?uNNUx5v3yQ*I#8Kga)L(<#LJP>Wf_Dc*d&W+_&{z{> zWwWqRW6X$6I585q{apfCstun_ed#FsGehR$U5L)`$*(0CK83Chw@JYE#evw&)1{$C zhEe{vFdHKz^vW)jhMu=&iXyQSDR^9>DAN8md!(q{Yu<^gX2;cvX*{mH7PNGbrZLAg z5d^0@@l%e{2z$7^Dj5R?x(j&=x5f_1lTWZCOxhzA%n# z^bEu=^?}j(cYE?uF*374gmYM_7%v#KC1;y_rABgopMj1h23S>W@Wi>V+%GG~q0%Y5U%3X_4RB`)1L5W5KucpXN@A(5W0w_r=pXR z#0 zg*^p%EOk)RnMNIz=AnkcQ%S$<*?#=~U5*Yb?)o`=@f;4E@4O)Rp zg(4hSzm+A_GV>4&xl{xz=UxjvJXQ;$L;zFK(en*MLADlP?X95V5MHL+GwkNS526R} zTMqSgWIfLnKWHU~hCO`Dj|B`h1c>hJ_Lvn8_>;a+;$h4{G)p(XEt4mPwd)kI9yS29 z*??mZ%#-q$G8JC0M`E!FxbsOY*&%q{2nVnIaR_C^jXh#TK%*9fNV9GhFvy4=9XXYe zlWVED)E+uSmyogDWjTRKp36S6+b_|$m0=v zH92%wl~AIi`Vv#2p*)v{QN!&ohH%dsZ9hkdMkR>Q0`-FvHJ-dP-*d4?bkLUplM)V% zdi4xBsM#+!Fap@b7Qc@~v6tF2U}-gv*EIO5W%z53&~K1}bkQTq$x+WF_2E$f*}`5V`OYMA7Y0 zuvoYnkkP5gz|aQBUcFa8bJ&W|w@5LPqP{5<*mr*O7;OHYj2M&u7_$Tdcde(Kd9NF# zOh-v27!EyODpjJnwHNoKrI4h9$e|WLo1eSAo&+ncAJMJlwG?tJZ?yrN`fll7vCdQW z3|v=P)hD^zo}_99k{1IEx}RWLm`LX*W1+@rCj~>Ya>klmX)-i>hlkZUZIh61CtZeF(D4i3^R@Y$l` z_}Q}9TE}9`+_hvRW3vw0Kt`j!oRxvVNi_~aNey4a9G{Kx?21ln*7jV;926`hSj4JA z5189J$tQzehoGRU?~-n3Jmzu3i5j(ObKE2XmhS{C@!d;XGIBR3vxXmb_5|_g!KtLs z1_zAQfr%|4$Dp?5eF6K0^|*o0yXPfvoY!zLQr&ce&83~AqEsH*XA%|qUB`tZ9BwJ9 z3b1{+az|o10zt#;0p`3ER+|vsxoS~qfNw4E>{91oi1_rI1&tzBvUd$%hbblhoLUvy zK>P~(&@kCfM63bCss0`hR;BP9^wMHRl;e$eaU47^?D=|PqOI~4-w==Qyakj4>0^aqab$Rc?v&(1H#3mwZvm6#S*Ch+vG;2)zorD~l^|@B@!E zWhcI^`?WKpZ#3$E48u_iZ{x%E)92idAxCkT^ySvbV--6j%B}=Sso~PaC7-frgdaoNuej3ogYE@CC9Zt4 z8~&JTzKNIM>s02Yyww~yF!KpfZ7nD5D*v9IH%D(BJgtsXLE|@L5z!ZJx+@N>6tjbU z`z%R7ZOZAY!iB?aA`em{i{GzMLOqli-ju&diJ26dCpOw5E!RT)5RmIhg4j@1{h0ae z$-`iHMIgukr&yybQ8S#+5P;G|a7ZgQyP%r5(pd1pG-yu&x&NkfHv@VvTGw=b-i3um z=eDtWsEgJi)$1dDK1HXvl4h-t)$CzNl3Hk4pxX0+G0*t?%BZ%l#c0OUu0vC}(-A9D z{3#0bw%%x#U1}m=ZWuW_JaZb_&F7#P7E+n{y&*A$YE>EbL$wxZ+a-#PF4$_0e4Lwx z_{^0)mF=H#LNC)`D~9@wGSux__hII{IeL7Bdrn*abUP;S^_*_E(@Q1CaX)xMapg5W z^L?VMtdZ2hrju%)+T(4)j3dk(11moQLJ6w{!c>18HoZay+J$^8`(V^aZ6I3V_?{%| zxD5cdgAyB2#lsRLq{`}oX&Y=5x`}OZ&NBrs#6wVLTfLlp6^FisF{V$6Y!4MbbJ+uI zTD%omMpwvD)@w=|p)u7CS1n94F5@{TO)mRG0ECk zez3tr$+jAIc+}Q>hd#De5Y4TfYNkchC5_6015AY~G%EebP4^y`1Wj2O==5 z>J`(nO9Eiwuj$5<=VE!5E$=ylAGF3G6ewea-vEBcF}8T=%AC&Q6F|3$U-c6Hos8H8 zrRTF6D!j3=eIT9O=0&;)HUSQ*IU@fgR^nDicH*oL;nboL`gfhkqWWPvVbRIU5ZR}! z>9FC5Cv;jiqs*mj zrY7IPCuMBm2}8E;0JLqZ--g98nD(KaVC{zscjV?p-YI?Pxsv6N0{E34%c6e$r1FME zMRP+$yMyP;DodUf!40nDYF*eoEn0GO;g1dc^SLgZEAKgCXndY6X9%EMHXTEh>wAlD z|Cj*O1$6ep9Ig6ZNAiCG~~vN-NEI16){F=`)?^_*N^w zpESD@1t#&b2Q5xIQ6IA^RD=%znZCQnN|UMC5UC4N-WO@?-a|4*kI`_*!yB~%`4k;; zOD>J@b#S@#{O9ITMpiJ?&$4ugLmn2L?;V()KEXy*w;4`yCvxCk+8K$%n<@=o94|hZ zL{%G?(%1<9FYjGCjHVc6&iptVPBDTZG>ctmKKba8?cck4eH#F zZ2qiLE^0K+rba{Yt-9$9GVqTfrm%C(BWK-Q#I(R2W?4kblZHT|*jOHakR~2fC1;|w zUK(my)8{+7TwGxRPHX2UifjXH1N$;a>kWVnEtXiRW?H8Z=p$!QHJcd8-UYcsqu_uh zdS@*8BAZyCENc z7@gv3AuV*{5!Zc^%iz?;LiJ|h+{sdS)l(s*_K6_U&2rH*FCNnnjJo}&^LYlCo^S+? zqo$AqFo(lk#AXAwkK3e_Y@OlszQ2_O9UgAF`bPXnoNfSLcB0hfuVF=(MO*Knr&zng z(5_qxO7^-47F1~H>M$uYn}=F`gzjC*PdK3sJkb&vyfk+!ji1c_>vMG>vrcsg0(42a z7OaXrgW)ik8IH)k0T`7Pva~lxzg+9)I7@?8E{&f(atKtxb2ARot-Vg--{3wGbz`z< zF(=P^3KWO#CxLj_-KdYzmSb~C^vv z1UJnOd+k6#>N@COmB&8iw;Fd#a)*XW{8KJ_Xv$dzc{fg~XJEuF()pf8QH(zi5A4BB z{fyPWs7cYp!wPK{gJO+aLw5VDM!qyoi@4s}WnuNp=@JEvbjA8>(OU!0*<&N|y z2)j30g=fzYjp>iUZ~czJ=#p&cUao0Hnsa+SCyx!?jEXnacIy)2NM*W-ud^P$hx)rF zen_{>thTIT6cj+a60ut2x~)H67u#^-h&u;&L$>Z!ok?ozCz(GrJ6?xx9fnE$9FL95 zbNi+O?-E2|zBmZjMrKjR`S?XHvXiw2WtHJm{CG(;vRTgrCZ)$WKtrKE3hp?O1Yerx zc+(Jx+lo^L%)^QVuIPRxHCojOHlLK*&TKo;|B-_n$XS_bb#fEvqnVKsL+4) zhS`c31k)M0obH@1pT`oBOfN`h>3Sbyb~53*bO5FH{d%~bx3;)md=vdv{zDka5eM5; zAasR)e7m6O5NaA$?J%O=ywn7BcjHbhPZb3{B0a3n{&X3pbmIlu%z=9ox8dE@)m;4y z*sI{Z&Kj~RYr6@qy5bQMd}fo!BF46WMp1vLo4lM4&E(mNDZ9WFqB7g#HTIsx@z)8k z6o75;$6#HrqIsCbj#XGg#}$yzeB#yS+uL$E>Qi`&lP|enmO)Q}qrwu4e_zAw=H2DU zmO=GeDGs|Gt|Vh!3G-g!- zCxuxpVU%gB3|5CTaRyGyrg3ll5rf%(A?I!vnyvqRdq}h?A&bE+Tsd%O7kWzv2$H3@ zxKOffAO`0Zkbv0zY?-9cAL69l#%eSMttfI#xiP86{KL5GE&KzfX7L@?NIZQEtCcQ^ z+#U>em%oWpsxV9Ptck-6(q^o2L31rg-@bK|27d%qCC?;Bok#`~&kf@RTtu z^H^GSh!*u69f+x{?D=i>?0SaNpj_V(3#K88#{;mpuK(2Gb^wpgq~7MjNLqSdmT0(f z8)X$0P7;HkbnuG6Kj1RLg~MZe;6qAAKea1FYOf^Xf0-hH_}!~mtb^u(8=>Z?dMX4J z{oTV9*!>PJI#u48U^}Dl?%uL|*AWNo3FmV=_GCn#q^5^th1CkIb0xbJqy$x?K9aGZ$ z!Fi|9EMEZeQy>DNw^Fi)%gmd1*aW4yer={6%WA1!LdcOT4W6yAan!drM2g331Z`l3 z))EfXksA(b8)Mw`G1G_mMxgglK#VB4>4}DbuIXLC&Kk~5Kl5Dt{4h^Q%$xx7 z1^V^d@j~T`F30H@aBSgani=DFXkZr%aJWmD9Z#l?|eez%rgxl`ZM|rh)FJ_DA^^^720pzHKRRdY8got zu?lt5-n=q^vPzMBX3!AvnTDKl)55!O&*>EYd{*%UJQrf^ zh)oSXgWKL|w8|V|T!YbJ}mA8%Uz9LERZ z2Q;3dpDMPCd*#AMN&1mixN{V>#u*6;$?Q{+H@D!3Zaz1p%D zuHX(|ED$t|xdE)Wc7>9lS+@*h{fdKlP}|`_m9 zHZ+N9U5(Tt>sJ3u1Bp-CpSPOY>Vc1z;<5GIQ4DpL7tjdo@DkPYh63hEOl50nC2X8C9rSlKKXXsUgnH>CnO7yN8MC!r7cOPa(>AFbb0WnDj$T#Zec&2P4bEb%b6n82-_m_f4);!3^Js3~ABoIPFgH$`DXMBC> z(^4h|(~dVMxb9*3!eJ--A)7ZqeX`Q%jmzo8ua#;ocP&1y7+MKW-!&96>km>fAtrN2 z{0p<<`^=^Vx3xGt7y&;MmnAL-c%J(lhc6B84Z zKAkcaYlZ6syhgI}jsEztGDuu<>fIeD3sUIDZmHX)y3b9RcY~zaV9OC2_PcQ&;N8iIhx6ZJia&H{wC+MEnG+yjbLplo zc8TD!!1i3S-<{xHBy`wNmSq?aIXAdkX&niIrnbC+psBiAP1%sAntr9u%DsmYT`M%1 z&VLkA$lqMF!Pa=9_?np`93ZrO=1xVWu`L?e1lV4dnjr0(a;zg>h4xxqlo()aFfnOJ z%}4Leas!21WQvVs{T&jE@5(Px`qc=HnPWH9t3}Iet*MseUIn=uVN7To<5^H?Z<}uP z2v|81lzt{6{mb-)3to|k-Zz~yFSMoO667|bymAV=TP|w*zFOQ4{9VxWTBu8+cmC-X zdDi-Pq3FOpev2&FM>E9xKJS3dV3MYcRfR>3QT_`=i^C?;QjI?NSQgWwg-M+3msq+% zX8szYQ{d^6!HkFkiWfYLY#xYo^B!=@c5bqrXS5afu=#kwe4_(C8i6K$vLS-GM=qJ; zX!u;hRYXZtj17gYS?j*5w;?Ho7Of>{vGOm-1yuPr^iq3eozu`HdX#)nsX-~6&O0`d z$20me3PRG2>X25oBlWHea)14iv0;zOmcX-o(|t#uF#6VA;Z-+U8uq=U#Y+5=a=w`(-x`sc1s zT|WE~C#ZlbWvT1{1&?Tf1)s~2NBB5LiVG)kmSHPDWUbTj0#Wu^AN+NBN!2i)XBvW} zZR(m}3^Mt&?S}t&inhgN47J5q2?`4KE3EieqXJGd15G25ClykWiZf1I<00!;bK{wb zGMH?C2zy-f#JK@7dC7bS;-Jn2cTjw-y#ymxYaMXx6DU>|5d5-25LB>>jjoVQVz-?E z*zk+Z9^{I00^k)sARiy>hwWGYe|gVYj+UdYR@Olecpyd!O%&R>);MlKmeMr(e3 z{?UnIln5aM0$~sGQTlyo5b@y0ODD=X@#a%-{q(>=bxRA^HfE?8DJ%pSZ0kdNezhcD zu3+z`+R?Ad_vR&mdG1XfWQ~PCWxg#*GlH}9646Hnq%h)w* zg=2aUn2$moEkDXzF%qE#bh0KtoF+j$_>(LSk~=*(nSB+lAK5pQfBD%wyRq@(nijE! ze13+m*M?eR@{Pi|hwiUXGM~|{Sv{R$%6-&LG4R5la&gslS{wwq!}qVS*{k%6XYl1j zMC+S}I9fgAc29$k-uLf?HI0ppOFqJ`htr46U#&myGD!^&hsqQV=pN7Fq*q(NuOv=^ z?H(770tFHW&c9>{#Lsac@iUCZYIrx64|7}bkky; z+vlSTR>pFdAB~0v<xN=^4-fx| zhxU%>2T@AM#O+LMr)Toyge&wwZ|TrSBULSOTVK&FcPUz-tHiRJipQ)iPXr zliInXB%*$Ze)Qg#K8b#PICv%-cGuiYf1dKEcH7(%-3>|8VwII!>ZWt9mD5o^z3Gh% zATqI>RiM@TsR`CvD{~N5DC}@KnvAQ-l)s=$d3l?!P5s~4vIu>?^=cR;33BC##e5*6ihEy*r?Mrca#mX%s5#}{ zL=99_lIFWHsAP?Ilng`cp!{OfWJA&?Ne9Sw>xrgZ5D50GSE?qZN~IN3{a z)!Rkks1(po9+6E1J5n;02u~?lyjma*G^5)=(R~)aKu=}wmrE&QG+qg^!Y>V-{pvtG z>3itGK9NczK6IMea4Y`u*pPwPgF(x9(vhFwu}G;PT252Ww?6y>cNrlj^UFci7| zVR=WAt>rY~`e@Ivl1~vL+vPu2rw*0Y_Wi}Tv_cWwGdZNgig>9Q8WY&vx{@`>;7|A< zRL?T{xI9f7Cd*aUFcphpx!ekml@gG)i2o(~`{ans3~Y90#QRQbazupB4U+(1KK3jH zi-27PPRl-Rx^Q*YY1U8;fI857=O9~$)06t(M+O!^Kh-K4W^CNxFFlnnOp!?!ZP_e@y= z>tR8W_v_d2z<7w;aK5=<7xO+Z7@NASKmeVpA-aJ-sf;Zt7;3?4XH&AViKcNC230cp zWUanE(`J!GUSYmcmeP5;)EobMiYu3=Fc~$6Z)&Q&w>+5gx%_WYU$3XKzs%Z{kaPQe z*jsh38{Nl+Nf8m&P{=9M@(wb2+q6VP?-}=VN#E03V!Q~c-bA!+THKDx7!`_E$knXL zhr}{ob;idj{I@p+s29V;P4t6ltP(C>bE}avG6f9H{@9pMnUO{p?$>A!clR}66vVJi zkBe!$r#WE5hONT_`N5Pm}5(OdEPp%m-|au^C6 z;e$|Cw#sY|Sj{^9O>NR2A5TwL!!~TQia@~Q661UDJ?%uX_=ykDSmoYS(Tf8-%D~}@}ExqqpI4j1U5oE_d17- z%X-J>b_J^K9c^R>ys#=QzJHU)Z6)>xHH$;BrFv2}Kgso_yVT31xrO;T9Bb{Xl-X@) zt};+nFLkode(}m#mG^owHdf92MJzQ8oeOb-aP32|PB|M$3(k$_D?dKQJuvW|`yrNTJdVLOt+HiR1W^l#0gzbGJscqb(W#F=Uud)rL$nzNS=pW+(A7bq@XUR9N}y<*Ojz^dq#1p52Tz$W^xIwr z$9D5sP=NoLlo1xA&zIe=)};9wB3|K3 zn{{QoQ#1bbsLa+eqGrfaE#T7TuXpRs)9i<|Neej8Z66lFY+wsCs>lu7>C3h%3)>YC z$Xp;M2XSqdlw}XDY9%Wi%&X*S>AwAVE4TmoUcjV!AoqbRg%uSgAD zWGQteP7lWHrc|<)uq87@dJ!SHQ=!IxWF+=UcJiGY6*oFu5f;%TTuJ%HY$nFPkTz^< znS>`Cv2w_>cMypVM58~+d1rT>z%@ds@XBv>vKRe43HSfm7;l1iaQh%guK7Gu3USld zu0X|ntjsJ-T-hz_F==`XRgO7(NBz{^nwt(>I|J}X%*p52K8PTtawnuH%^|yxx zrHh^_AQ55Xu>=DFpQ&29J0%{v)z_m@EX6{0yMVK0&axD_S`bcv#iu;9WHMxq5;zm; z{J8=%O(f$dz4^NCJx1ysd3FMF-_b(Iom#AU#X3w3tXM=RM`(slsh|;~7b^|B#kY*& z+Q``PX^bc-Z^9=8HS)|{6Rwj0a&ygPoy9P#3WqIm6AjYjll!Olr<4JH<}le*xG-4^ zNqfeojh|!_*zv2HXmLM}Y~6cZgvDjxv1^id{=jb=xvS_*nZ3)?i}#;4e;56RpLFI{ z;n3K1JP4`PP1o!E8Z8`Z1wkD8&7+-^M$vfD_;~wHSeg(QnXhXI2`keYeB>*c8un1l zV6E$gw!Ev6`4|{03~t&LZWdX)iwd7HYZ7!Oc9;2)ruOd^x9!Yh{1eh4H%L_RhuYiC zT#AiRT7?32XR(Z1%_w%&021#(-fkmfqz+8dCdGqz*_(FqS=F#7W#ZPKGrM0eJC-P6 zh8xoS9HYfWC3`s402E>Pn@AQuk{NZxja_h6nmTRF%kf~qbF)^L#_YdaBW+)lW!B>( zVw840$B#IpT`k*l%qFtB$aL5#^StEdTlX&0$>Po}%9@Ay-06t!YhxgTlFM4;Z?m4Y zr!TBRP>kWN4k>}-)Fy7_>H%Mu$Cyt|DFnIk74Y%z$jkd~iRp+e?C4&@SdJKSQxh#{ z_C%P{e!vhIrl9vx=q`j2_Taw>7*iAIRO_Y}pUntbb+{AJCcGR`ZcJU>zBu}R`Np72 z4M`)j(DFfV)#K5%soyapx-l-P%3_fT!SXiawMdRDr3PUwTe5+~N!=&~-^boFgp0cq^%!@_~ z1&2Qu)x-~#mHvxVPEU|j*LPWG%XxwY#v+i{aybcZpP`%;%F3aNT|{oOs%<-nhZk?bRySl8%T z3|7==bRmTdrN`gaoUtK*3_9j+PDz=sODfYkNkHOn6Qj?9|JH$QX=sxOakhb=DdS8p z*@~oi`09SuQ96qXD2mv!-4+gk;f488V;I2rgp!4k5H>{G0>iBU(g%2v9Z-a;q8Qny zb6bdVpIGyTLAM#~oK@$eBAudE2Ix{prOCN%Q*^lare~v2#jEYY5|SF_BgxM_5XM>EaNh5n|i-G(!8s&nwC&5 zbyC&y$COjX{dA%;R7Mk`8ihme8fj=~*sECx@lna}yex5)F$Sx?O!9h<=t*%WUQC^# zpSCQhYc?x$CAzF_mN@xc79dN4f%0^Pt}NAX`{ZE0$5RA|NEnS{_xzQ6;^NWj!E&=i zMoJK>E=u~!bpH$30h%9oXOCPt{KQE&96v6KSlsj-tCRnhwq)tPWL+{By04X@?nqgZ zYVyKGYrS{2qQ{~~x}a5D_AECkCmDqbmp1{KFQ4VAR}n8#e7>rNPEIHOZEN-S)c0-? zxP8WluI2#}Xc@}|8#K>oxu4Vlw}0R2&?^-)fOjG|*0|Eg`M~Xsb4cvjh85OCCxv)g zZt{HZo0&Yb-mgJ|Pr1Ib%AK!}V#@5#C4qPlTk)fC$;&ZARH~D0rlz6rgu+VEMS8vk zWJow50x=E}`=>-6E-DXV&xdX*;=@MmfEf?&ya28ChvstY9!NHp-?47N5?+RYbZ|yI zo3+(tP;>t9o9Go+C%w;I9|57R9pDSqJa^ zEkN#NLf!yZb!>O!JC`!ter;g$UWWs@6B#z4-cP0AU8em_TSYdS zU5v?E(+`>UWSl~wQ3CBt58at7A$=5b$-qDx3~xX({Y962O8A}HYU8CGJ3M6rCzy1P z@ek<}4VUH9noa?TSXlx(4HME!x}^|eE@vZJ&mlbgWu-57HuQ7+h~wM!&wHHfqQxlxW)S`F?|fQtSXJY& zHc_;UDJXPs(8nq(Z_6G?)4wli69WbryuR!)DcmeIeq8<@IZTS)7}twNC|fdOylN}7 zADwpmwhd{Nt<>oS6~9d!GcTlYa5x{2D<}!ZOg?M1e7KQtavDg^#GpZ^utK14c$mjh zUS6IAyj3(RI$6f~=j*F)Cbz>6g&e+S4-9 zUrB?D9q0IYO&MVoGMIXwD*vy&uMCQ-Y1+mT+${uxE=zC;2^u`;0s%s>V8H?;xVtUx zo514k?ht&D-~7LH@vGnF5T}$F-k{4?~u2jfTxLSs0-?^#&A>o z^p5d)uFE`Kd)QGz1Oa$;<5Wb6eLRm)U@GdOML)Mr5Cl2jpJ&CV5&1egsyf}|`l8zA zCmDFr>$-%Omp2$}HrwLqo?FI|bN3vgis(wIdL#Ctaq|AeoBWLX=!<0c5MuD)clg67 z4|Di{^Q8hbua{0%>p;%hInHFsdYm0wXBqg(dWK(KDDbsw^ZcTjj3vK+JNSYB8^>a3 zxSo;fb)Nf%gv-;s-&PhsavJ$OoKXnCJfvgB%{AMUhK6?J1)C|K@i1XgqIbVI9LsH2 zr71)_Tu!kL<{OzrZ%<)g-nd@OJ7?|8Yo>GA`U?(EL&bhHQ>xwzL6$!_T$0PjtDN2| zemZ7j{t)mflq$GW<>_O?TDkyX^u%p~$}7ivWPv%!Ut6S@eUTq`<+ACVU0hx(YjESX zFe>bBf1eyYd0pr35Rvd*7LBeW_oJHygYa32g9W_YA8*2RkF#q)XX={&Abscw8(6C> zV$72S7^!v3pYGId6-&9|j-3*ny3egJpng1EiyJ3RP6-VOJ9M>xuv^ajnfLy>ad7W{ zU`t~4$3#g>Vc=IUk@ih2+P;zYGJP?gmxY(T8*aMD5Rs;WJpCjQ%{K!d+K~b(n0${- zhniHeXTaBkp}QyC*nEb?!SsG-AeE|8y0XwfEFl#dCMN=59KX~GPm1ndd);UP=WSbJ z#|hZ~?wM<_H*&W35MT-jaeKmKvHj~sbm%4H%2L-^;iTe(mbo;E(hs8m+^c#hc<8Gi zqb1|zQlLZXx!KwL{v~RDFjb--XMUKBl*jw&=>Z*P9_j?WsMUB*YQI!#Ld)J;7NLT~ zjUSXkZG@!s1mF@FIU}10d1K=h@7mNHk^QGU*_tY~&(f@PWixaoz8=?fK>b5^5|QCw zh!+Hv>p$5~s=B5|WZVA{8s&GXemy~SU|gZbc~qPC-3mC2Syfl}+3Vsb=E!>9&+a5R z>HsH?Ra?UD<@LyMZG`Ljs{=x=nu$^E3yAj3uEsHnSNl%8IiKE`L(@u3I68N>Bo6fPlXMrB8z2%!rv1|p0S%B#necZY3!^OyGUxA zl|n9GQ-!_pX8W!Fj2QKYhIk4UQxb)c?uDc)EOXQ@WYM}1HAtl~LK z0Xj%5v(X;)$5dA_0vB!eXEe824#~&u{#*%^Jo#-LbL8VJMd;B)0Ja-6*ZkD8o>4GrZ!CT53?BeTJZ!c~b(Iwu z6n7Dt3~rO>6+dz(Ujp{Y?blX};Zy4;c8D8Q(qajTNoWi3(7Cw2TN0Y@>=<{NU=K_QB zH#4kkeq*f1MzmUSc}YrUP8D(dmf}4ml&C5n!(if2PbYKp=cm5-bWh7J8mzmWe+J!O zErwF?z3`fHI%=_|Bhc;1IhBCJN2V2Q&QG`08l3hLpRouVb%#^qwA>sk2pPLvoo-cr zB1}%ywIBg=_->fn2l=R9YtD#Xx^M9}aGG(ja2dq}n6i;H*h#FNliq)r!nmQ=`%W#w zt%nO=h^zwG$fhSWdD%igLNb?7FtcUSS3C7E zVBS3xmLaMb@2z5&KEWHKy!;EBLQ?w27~-0<20rjNEBTD=VDdLly3cRbB-L~pdWS(4 z9=-Tt%ilW#XKr@JzNe^((umxQ13OCbO+dEAO&_Z}oHco~` z>&mh!D^OuItsK(b~%s0gRx6< z>-kZr8zWhlvF8e`Y(gL2!T)S6NQ4v|G5p=S@6H`PAVjYdu+`7;sfEkS@`{Tg zyw)M6ei3NVn}TQDwpA+ddbzZX^VhzS#G*5Z&f^&M48Bd%!?&2F9&h}%vutr1s8U6? zzc^ce3FB|p69@#EEI7OEt>~aI8R^#@+rs-?Q6-8hFZ%NZ=~6_+;rv=Pb+f?(r}EsE zi^LS~#vQ|So2-@j-__r4%y8>E_pJy81<1L(H&XH2(6T6fER%g^Isv>(Q_boX7BSz} z$=n^-2dn4HwvOHBsMazD)6@2@9grrs)>^kK;Trr%?FH-7X{a#HB;nxcwr!pU`}4ZX z^>XXpJq0fWm3MJY$MOx9A&wvo4?D;Sil6Q3FxRCW1m~CZlZBAwOO6QD#9bD=;c33=+?s%-vhtWvADd8#1&;jiqJQRH9Nf*#S{Fuk zsxV&6bU6F#zD3jd1KG!QVvtX?A;kjwaFA{jKH%|}5BH?)HR1KPA-$SXqJ>e$^BgIvA@J85H6AIyB0XH?<$Qh2*u_5u6q;^mL*FMc=kSMxni0*0M}amK zEyOk)Loue+dPCcF(IPL~dD{kGj5Zv23LT{s#S3SD80c>$eBq=$W%w@3Wzif1=FWl1 zgazBYP5*ayfRt&ciW%GAaLUw_zB?UHgZG#&JOb$&(Uo$vDU|$qFTB}mecuh0>*+mC_ z!HK@HTTN*inX3aUx`_C)0@s)d1qFq{U=QZ&(%wSPVDn`BCKYsyM>o9JMS|_!uNqy_ z-jkL(n_B>xQ#YZ9oKn3QN-yEh>XW^L`({m%r2SxL$H%P;{+6{Za74TRv)&bt3F!!O zRLiJOM~#+w9^4Ku&;@wAA)VF4F4rX-N}K_MmkId^{?W@JhiO}r#bM}6mscS|aueSp z-Yh-_0D_-E`fbE~*n5@SI&VP$o(y%)G5G`qT}8Uy@Hcqp zZM`FR+NoSk;R9T16ZWbg5v}h6rsmX8@Yz`_L&6Mn!TEG$k{i~^rpfM?{3H(SeMi%~yvtv;O%DeRzO#~MoUJUc z>Ezs+Q|LEX_w8ULiqO)Cx{h{Rl$;zWGi9ez2$S#n?n;JNCbYM+cP$%q@th)bx2%=y zg<+)UM_@|`woC?E_V5cQ9&Q2Ih0^eTC`=VzkCc)JLaxKq3~~-T8@xYJUtfA&#SJrL z*Mk03({!`?lYcoMknk~=JwS9|D;bM8a)F!;zNOIBbcz>S4BdTBwZpPa=322%N*mvs zw&cZDBV(p30wh<9H6udiQ7XMCNWdzcz?b9$aN4zaTPkAu*4vW;n&sS6EH-n}F8P+& zjGT&P*28{mz%QOaoL9OSX2WY`bvjtDMv)I#T;3!v)=8Lj5exG_S2$#5X;_TH&ie3 zVFGZ|u;{9iV|La1;DkEqwHp!0da~TvCz%Vr9;1(O_Y&Ep8MBF@cRW*(#{kOEZB2r;ETl(EoMcGp}gEyCbhGCS)mT; zDmx-Iq={&p@^E})`aVat>+2j2apYV`=vFO$#GH&?LgPZ6X+kK4tm3^6q-di3<1udN zMT4*@I<(h$(UU2J-Xjo)fCdMq5%YtQ}2k%g}#0G&ZJOk2T0UaExA%lr}J$J#r- zHLZp(%ImV0uto-FS@6uSCnH0ATs~8cU(cKbtk*zHg^ro6ke(yQ=DG9|dLKR+ z(Ox;>b2_1HkSpT+Gc9ENYmkWWB<;W}jC-6xot3)xA3}Ne%IIx1*!Me>y(m2wSD%SU z<46SLEOS>*Q}4KYbM9rt)lc2AhKlL6ku0lR$o!7x67amL{_x$QH?=h6?8V5HR+Z^s ztm}!vvVFsYE#5|iXzS^v?wpu}ba$VDqH&P@%YPwt%A}XPdFQZMDelLiTdSY8d~)AJ zjo<24!7KIRcq-VWwJxN}fL)7aGKT^FxM8ogsKCd1V1RMi0^!xCnglX`VO=6ifyJyR z0#ZGwA~;2~6KS4->4Ob`(|vv26bQg6{fjYyD={P61M}9BuCA^Z>ddW+b=GNftu(ct z3pnKr4Ku#H3~}JKnggOr3-z@U)-nMG2*6`8+Z|&G(|+?U0__IUCIJe%`>yiirS9ZZ zPg+`8QCUSiP9Lm2NCbDPbIZ~U--_?#?I)XY7YJ$o1j`Zrb9}t!1G)%lbEJ~EVl6!t z@*mD$o+rnW7t82Tgg;Y{*0m-8Kl>(>U1}3hU0vPzOewRlW8V3FyNZg+h-b~M7eIVE z-W*{_;5CnB)2SWM%NrhsF)8;&Q1b0wQ1V%QUG4~)?YFvN0suYbl8X}HM0SbG3*RFD zDe@+B_29c^>OZBB_yZ4=Fdd1UoWmxbv{Vb4LS&YASXSYe*k{GOahdtZ&&Z%!cy5?` z;I)0r$o(zrj;^R)AOd=O6XomMFF=@HuAy7j+BBDhuBPcITCa|1pHF&z_s}cBh zhF?BtwY`pTL{+&<5Wh&Gv^t|FPLO=U|27U%B6Qy*{*lZYGe}?&M>M?gVCC{3a5a`8 z8MFq>=6oHO@)aZy?-`4uqob;tno(I2zilR)e#@Mm(~c+!*eYhjRn7-(h4`%{CcxfU zxxSpeEq7Y^%&oTE5p6UPFu41Te%+|6&~JD^EiF{~_9h<#9`<%4QRidEW;#=%Btoj} z<@@ALy+-Y~R4T9lVD)O3FjvIpoy8n6B4?(cHqKvIJ})VB09Iv_|s; zDi?sJ4~%t=>1|X&)PZ-@q4uT8Weq7wm=@*_#4AlNrS@$Y_C&zMvbx%EJl>fZPmNU? zqZ+rG@y&ouzKy>OQHKB<C+>eiFC<(ik;&R*!kv1f1#{JhR zBNN$tZF@1UQPASA%{V2s|Dm=kBz-e|F)0C6G@P6G$Wj;`5fBuP5k{n#?Swu?jsH5D z?jK^2nPm**NEv0*r;@i80#K6nCDNgpr*DB2Mv|AZkb#-{kGqjlj>Ahg#gm^gcO zI3$pz#PrNb_*CK!pO`xE>z)O(xEB7-q!wYbnZQUme0?_h9WIo#NO+A^(vaxZ;el)d zKpa_G9W=Ip+wFIX8+-j9_w}H(z8%XaX48Tx?zeFfWL1ZZ0U09V@I&N_?vce8mJ?wSF-Nx(FGQ4b>OE+$ZLdCiqX1JW_Io(HCk(mj4B6d2+eR(Z>ylw)b zI*pF`Xj($BLzhcXWDS}dJOXklmB+8mSt+f_H^ZI1O3;KK&;o3Gdpp1+Q%l!{)+5|C zLXVz6LcCf`4V5RZP7d8eT5!kUwU2*MeA1mq1rJx-ja(HYz8czZ-b~w@%{C#6Ox<5f zC`-2M`LfOTo?dQqlPdY4!mc8@c}u9L1Cp5lBe857QDAKJjK^3uO5vhE6v!Ro;^Jbu zx-)VJEh9DfqS;0Jd!SnNj4 z-Ba(_teUWk2yyw_{-EggNz|@u`qentt96In97n9R50sY;H^J#Xi{ z1~Cm+UP+~O-xb`WlnlN<{g zJRqu=F$#U8Q{g_}mvWczPlPSTvrFy%u=xr5YoL<2A7>*5>_#s}16=`*$#!LxIgSru zo%mCR(VrmT0qJi`U`;|=`6bG){ex_txF|Ti2c=Z!lO|v~NYl%u-H1R0fDoo%U`Drz z(fjBxPF9q42_BP5sN5RbauILPqO!0I)V48g@=%W-V)t#4^Xt z*B%FGm&|{50f1I%ZFc+|B;cRObjtW%kQxr!IDHFDELRShLQgI@`WEr#`Kc<&f#kG5 z6E(Ij5PdJ0{2R(5VMdExp%qt&B3jq%1Jd0ZhoTZ%q|gl8RWEw()&10(bVpV^)q94I zZ_Gnw%v>J=V1xhZnTVD;72;@+r)hSMRcVwnEq9LpVvJT^TJq~S{Z4_Y!!v_Hh~!E= zp%H>m;0yc0cQ+uZC4IKL(A+q@!@0G^JTIUKVATApW+M*K2CAD$A zqr(m_UZOB_(-}b$T7qfzW1$T+LRuU{n4qj+Ou!{ z3=|z>*(-PB(PRzU2+BEVx_F1o7T6s$Q1XTwED>Fgls&PFB7=^d`7fEd`xkwx4%9Tm zl=xjHLCWhsTqB)9!)c;XLg@e<`Okji!ER^p&SVKpB}=+%emLYC30UdvZvjB@?Bd0Q zVq>4pL*Fi23n#$`2DI3DJRIs1IpiPX^t{T)ctpnc5h#6^9O#`=qX6E_n!MR;n0n-x zqsnYJ&h;ev^Kh56;R?M+Y9g9!+dWz_(*+?o+&nofNZ6HI8kVOBXH>PPY!Di9wF{5L zDRc;cPZzyyI>2JnsiP7wk!P}1fxlB%@2xQUk)59}3q0WvH(Fj*@!}uSqB0P+_)0GQ z76`yJdb&}H-Vx9*y=vIRlRR<8MrDs~7e;c>h$)&bh%c?w0D?TFq+AcNjsNw*7+71cJWOj zd+)?_eS;j4-I6by!8J0h#iN+!-usT+%E~HES@g{pCf*bTP}AY81`LuOQZIKjFl?WA z3{Bs0#ha?7^Kp?t|tbzerp!a`hhS%!2buMr4naDi&M3S??$b<7wno6iyhxO&%o zW@l&DSEEF0**%Li%9P}LjE`@c&)yi0Qa-a{KnAur4N-Za61@k77w1c7PyQ=5oQC{& z12^%65&lvx_(hdAemD+WASxw7*NTh8vvij^CR~4!(#Ycqfuwr z?c1)YH`vbHR=Aj;b+AfVqG>q~g>90=lP7f`Kr5$$PP>{~RgZfwgvXylgdJY+lROxh zO?>1X0eWjC2KPKuXOl1@u2s{}rQt%s&=t>`k+W278%?;!>4;3`n1Dh&$2VrRTos^63S>P~{$@Ra#l4L3ffiueANe?JlPKBW z^i|w77UD!|zF6Xz!O5FyEC;t2u1!2d9xwhAdC(%?6PA~zRhBTOvOzW;CC99I;&XMK zUDlFIlX&9*tvLRtr8i-DwS1}4vRHCCBP{{*Lf#nv8Fi5viI6d1`*8nj|9qk_cqbUx zo@BIR;KvQ&(csQaXC%sDOLarAgj#A{TnfVwpsV&COvB?F*A@IE(;A}(E%5z7Pk@02 za=8tFuj*$M+B$ORYIm1)4Tv9?XU5v(m;b1^+e#JJsF>&jt--JJ; z!|L0e2k=PkUUKqbz#{vescP^`XlrY)M80E{W}2<$O4HW?qLc3K?tq(`K6*+_U=i1z zB9qd5_9X7H_5!z$>4!O9e^?Z1Yyg}@&5Eu$jR;*v*IK(jx?IFt?`Vy8??5$Wycv+4W4|9k7p_=DLMZskrDlQ zws!C=JDUXD?4?>C^UzzitqiR=$&4Td)lgM`^;W?~77loB9Scj#Hz&!Trz^Y2=m@dp zB_;iF0QmIEdSRD^6N^~m=w$8RDXO3VPgT6+3l^_+*92M^K2$_CxxC14-;SNgRi(zhw z7ed<;@Z329iQ1ZV82Ae`|V zK3ppLu_;2%GyXr#lQm^DNWnB7hrZ2Ux~g65}{8+)id)zL^;hcr8&1>FJ(#z!^LQ^cbU4$P9OLhW)pEmVA0JXtffy zezv~Xagd5b{jO9(%_$YKPkCoatdOU+TR?$9P{t(GKg7P*tbqZm&9Rn~JnG4RVR3;P zh)-ix7J5;<(_)?JInFXVGD1hpr1W*YKVAl3$aW>*L$lk2m#EngRf3SSl|#}PHZabi zWC4b!VI}E}e57E#tDutAF$oAzMB_2;iRQ?y-Z#4FbH61hs`}dfot&K9-9)oFaurB` zIbkI8oJ$s!mDRO|^BNE1lUCHQH463>G)!3{L>F!+8ys&Tn-}dsV7sww{1;>bJ1vDM3%Z z=eqy}*Z9Q5r`JC`vk$(pYV=(mtuk0Y^9F){yNi9@5zanM8R{Q$xi%8F=*8Yw_`mQ; zY0qd5o82#Vy_*QTQW5LlD|dj83mkP;^OexEe3h)x{U5!k2XhTZqHJL0)ZI1nF_CzYxp$C= zXF`+qn2jsCU+=^`4wrnNK{I2Xdwo(@S3k$!Gxov>r+5KgX55PU6GqPIG+pYao9A|_ zb#=8<)_BG66_MRvV*49l=ba0v!Hwg&p>j@tEDlJ(((aKj> z(_zi7|w$$?)eyQ_!3ERe`&LSPG56()DwCn`ZU zXRWIJ^@1H%i!IGzj5+w>TGJ52SkmHVXL-X>p)?4hydI2bsbO*a1UEPtlqN zTxDFa(t1%uh@Lqv9P2BPt8qajq#!`&L;Tf1f6t!}5(NIxZJ7}QL`s4G&X6dhkQr;7 zQ#Q|$m;Q&s2taCxX#aUIyTryCa32Pgt<(@*D>OsGNMg>Oukn zWMa%r13(I%%CC6oTymK4Swb5DTn*m$Ex09h0X&had#lF4_(o)_aIYBK6C<@YXm4W; z2CdUDamgne^al}}@o1Kb1WGTEuNm959UW^@lKT|EI}0r=H|G?S=E#aab9(!D6{@F_ zu!t2g@{G}>kr76zTfq`R@{RTN=>(kX2||1@ zGEt8l%GGTVbT_tOJPC#*?4bhiKjxF?8=V!td?{%7RT&qdUv7Z>M*i`B?c{Vh8>^MFwvnVjRgTP3)v!oambROa_E>Q1?fGo;9^#89 z!B4Kwiv6XF7W7Q&&pnH?+n@LvgySHx-%4rRzxIv#yQ*L*op{jlZ+Cpg;~FbFnI? z3KJ?SkplUALZVyu2#@7_=-M|8y_ncoqHBe_&58%0t|61(1`)#F}LX;0yYjE`bLI=L+4eix49;0p?t$G{7i$RYxG~dWbyg!$# zQF{ka0JHTazO>xU;tl$#EP4z2?g`jp9_RfzklQN9Gzic%#qZpnqArJRsiwhNCY-8p z2d3BscXs)l-XPPVS__u2K|oH#)vsaD}a3 zWjgB0<(cdGtN=Rcv^QgNa2}?A<5zr*GTDEQP0sP_y(KA#1@bXTTM-~eT3%=D0cggF zI9``l%y0$8;q^Mr{iV$H0I<6R^JW@4+`dBHWWzuoy@ z;Tr%tXLl}9bFrUl05pbrKfq`t{bI;H=n>$J75Aq5li?*}JfP>H-Vf7GuP)$M*@21h zGAL#}ePI8%_D^{cyOQE4$zm>>PVm6~*(aIuwL zr?%&3hUB&DE9wOr6cX@l##EG%IrkQNlL+2Ke%=eSp!nH++76NIuTi#( zvVCi%)z*3;t?-Y}j$?#N_N{wv(N zQ-z$#-Jd;>vY3KLo?_eMC|!D{^@Ux^o;l-rpxOu;(FG?j^@UUJ5XRLZ9%zWr+?G+S z`2H2^-C8f~JihMk`|bj-wQnHtP8q}5mSMlJ@5Q3`;@w1p+nGf{etypl=3~vMM3~H* z8tvTij3o9dG~Uzixw+l@)d1K}?H-v^&B@6@4`Rk2kGbyxdrYY zpq{YjeB@Oj6A&^)xpTvsC@u+=34cNdu={pL#Xlq3zRcFj0VSOHJrCGZUrlV1 zlvOpOso8~XqfAkQ)0632^l<63`FdMPFa%D(C!=hAYp3%Y*x|;h=Blq%;UFmURtkM& z!j+}2ca2P<^v0+9oY45KvAq7wH(~Timx(C8viv#Jqw@l9z0C{xRf04fM~!!Ae1peW zUC*P8F0v+6bh47GyRbg#-glAf{~~L3+?kxtIGm4KcQ<=5B>h5euK+w48qW5i(Md6q zM%3ye2d~t`#KcLDm=rwYuaZL7Bbek$)f8mO;^bSv3M=AwfJzDC?4{5<{ zugBDcbm?cQycAFRpGyTB@2iV;T)C>7xv*!Xr&*HvY3qbBKjQh((E3^hej0C(_(LY3 z7bsiooo<;sb##2hkm7-L7gv%`)Jc^*7xW&86G0wCCuWX)G8^_A`jg~rz=%kJ@pe7< zOu59}V%?~p3i)hyq$l|om-C+Po8CZN$|n&pf$`hz5Ja~ArAzq~M*b9@J-8YP=~4f~ z;XR(&)G@!=Ra%Ki!JiWt()IWIG`&px;UKFAtXobYeZ% zm{WWaAjS`K3IBkI2Y8x<1{AvF;yr;v*8^$;%3V@E|G&xqRW7teh_$j7>)6J#lxDn5 zP8RguO74vMYpRAR4sZ8r#{Jr!KHD7RKRaj8m^u%}zrg{~A-8Wa7HrQuKfW-p85`C* ztqLkr>J1CCnW}m9r{DVNs#REVvLG9+khcK;aoj)Yn}5)*2pBNdt4TjH{w|(aDWMGn zP7)A4z>0sq6hHKCXnGsZ+^TN33Ww zOGO9xIR?Jic<~2h2YedD0Bs=Or!NEQFaO|Hy8tApT;!ar-s8XevW9|;1^ZAR*7W$_ zP=~085Jig={PgwMI9X&W^FSEQHyGZJ&u2+Pnl3p137N!xV;HN$jjiCjA^*`t16qP~ zE(#3igelp-_Z9&LvjU^+GJHUUz^6?Na85h~`#fd;VqhOM4+$ScClg<22k2i>`uYOH zHKv_2(PQ{~r^^7b_fWZ7gj>}_neskG&_9S3n3sS8uv8$OuJg(X>!Ae`y(j-|54c@r zE0X^%y)h9XYTEbdya=8J;kq6vAX3Jku_}l{q?(&}c80+tv(waR8^k(8h(XOtgN<(uC zvfk;$U&>4C-Q2?EL>dni9{+9WArDNvGY`W5`R{yak>i2zwH_@s@4x!EDd&U8W+E=7 z`;UU!gaA@eSaQ|=Paoe@z-+kjEI<9vKmPwkX#a0AX}d=cZ!~g`^Q7&5c-f2G8zrey I2?M|X2bJud3IG5A literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/add_histogram_raw.png b/docs/stable/_static/img/tensorboard/add_histogram_raw.png new file mode 100644 index 0000000000000000000000000000000000000000..96ebe5c48038127d2155110658c970087547ffa3 GIT binary patch literal 27930 zcmeGDWmKF^vjz&|8W`N&9TFS{cPE5|2`<5c`viB_;I0Y5-F0wx4ekUT+~JVtdH1{b zTHo()t@Go|>S1oWtE=zsuBz_3syj?wRSpA<1PulT217w!S_1|K4hRDSTa1GAngdwK z`V0f}M&43NN?k!pib~zl&dk!<6b42hO4eFkMBFr zmzLHh*s@$*m-miOPR||TS>jB4@V-3~8Pu^C7gVAsF#27|LufEm_u_3=M{q?rY5@!4 zNVtKZ;-1f6b@O_Nzv>oMD@5*zw<5#5i|UUzh0R%cb5$zoO|U)Lj#yP9O_cXHhbPI4d9&(J=#!R z<0{ff&O9zeDt`vI_UR`vQkE0yu;4hE@H7^SClD2FEOze|o%8!Qer7Ua#JE7&`H&Z4 zg^eQBJyT~wE~)VSr+%rDHWp6BnHTeD)bzrZrjc!+h8<#6Q7f0kRs=FZCt}fVzexe2 zL)ZcEEYfx!c)Zhu~8v#kc?K!g@2WW zt3b^mdW4O`8jyRG)U1(IN4d(Dtvx6~AJ7~nCEZJlOw9j(D zEQX)e@s9KE^ZN5K2TgEBapzAc0yPCrUpCp^!$x4NJxUU2bx{fj@0=X$qWhi;=XtBN z85g$$P%btNnjdjtZj&TT=M03hcws^);XA<>$z%8Qp1@DA_O$^OvG~O zVEQ`AK=8<*7-1;fGZj&wUlS#j{mP!guKXyaX1w?PS z2MJsOI930eb)0>ed5B)4yL%`11Yp%y0zpC^h1`g$FBH)@LV)%p6o3;NNIfYJ{2pFF z`#BWhJ=I05U996542>XbnS zpBa+6F1L>J6RR4f2K7fES{G&)brs<4%@sIa}@W^$@>&MQWu3sS zd|l~o=66B9ByzWPuX6w9K78eRs5jwJtTxp!4VWg}Pu{ni7W=U_+DSC`1~Xv%Hn=L& zA$Zb6oTrk1o#YE?0_o$I3EK&~Uv>hkT#Jt zzmOl8pA_|#$i;L7+pLmTQqLQJ^kUqkX*UT^6H4t%ALCsy`)wviFygShy|57|^i3!%qd1d8IDbrk%y>^_EYk$V#Is+%Zzj6wkED}P z3tsa~3y^G>j8w#RaN%dwc>iLg5^00`a+5>0o`{~4UPH^O%d}hietl0_@9~e8c;U6S z-%n}CE40fdxpFoA5ocptL0kMW9Xo^2jP1Kj^O8MAPCL$2tpKfxhA6jYx2PKxO89Jn z>>bfPFVuGGc66_p+xDA_b5`im=}lorgirK~?Mor<5hDttb@l|gA$}+N0?7pa0Gcx3 zk-Cy~;9V%8Cg~;9f)EI+mG%~ODK2w#wcDFwlY|Cc5br!`t3P#I5Ia0bHZUouKIBgb zebC1s5tIhBakL7+X$&+nh17xOI~Fm1lh8TCeB|)noWld7yIO|WnKK?c3w1He!TZ_| zwQS9%I6{`21$FT!y$gNv#%ab9Z2Yb^M^6F|jgO5647iN_D&<*^0;}xa`j1TrvRxyA zE2wrjQaD-+t16E28gz9GhjeO6ou$^VIbwp16rSdTb_jF#Q`X zMB-5LFo+)AgFuGucU?-efcaU4l5TS8=kxpY1UqfT5k-PTXCKlt8?PeQqSDJXtH%#Rry-xdlv^15sdKzw*<=|kCM^m0 zbgG@-@Dr?Z^IO4+biQgXvCN~*d-on4e~Y!=I>%PsdcQcb+U_`s!p!fPdM%r7ndKMdB}+EV9;PDN6~9hbxNX$>)pEsn`5SqxSB95$+5tx&jvk)& zF0<{9Vt%i=ckYRIODqNOh7g4ek;L^0Mm?$I{Vd;|*&XY~1vJ#(Hjv$$9&rlCfGCPY zzk;22-X2taH<9X(hyi58X6*~7x>UEgm>sTs_@JsMEOO`kkot>zZ#+0##3kD6&v=@?LMh57%O=G8>hVgwMayH$^mKTXyq%mq6IUok54G9x{OkT~&3xlL zhh9w6>NnayeP!v>wyrBU%kJ*NH`fuuL2gK=9L;i#TCN5+-o5bC$aOD{kMpY>73KK= zDav{gMX+;2`r6YG|D7GBha$x^r53OXtZ_5*^e1i=x*EM!_`G_dd~aP@aS3P>`(0|jW`#f|zJ2t;%(16*40R+e2m_cn8g}2;VRv%i$Zm4&<;~|Tb z3Y4s3g5F>6$gb{I)mJBC6BxIO>fwmUEX){SINjjH)q*@iFkyaunB4;=QXSM3q4~W8 z&YDXkf?+GUiF^xWV5GZ6hRg)4seeLR980o@-lzj&;&QRS{S0YIi$GEX2D(W}RgAz; z)xkg&_;`34EiLw^T~m&1U{tqD#*-DPyJ{c+S;ZOG4}_lVFG#tK-U={txxBBg1**OL zXD1jK0{XvSSOpC_=&RR}X{iZv2B|0unb_HI7=5ubHst`@*uPT4zyQHQua7pS&PG&V z8*5u9A+QL|KNLc*&wsNyX{i1oakdhn0ja1{N!dA?Qt@)|aB$IxqES&%0Uf`X328{n z{-^rueSW?*Y42=lXG`^$uaU8xi?av~&EJOp`}y}hO~ID`+mfx*f4cSBLC(J=oNqa} zIRDH0wJPv$u8_JV*wh*%ZE0g_>-5@&C@=pz;6L>LKPCU$;{U1n`G0HjyyN@7HUCe^ ze`*3b|90U2bm-sF`X~2wxJ1!_oc|qqQ8bcUr`6ZlAhVQK(|rAf`#UwSpV`+h(|>+n zpJnZI;@=#@z=*>rNK0sfVUIGAGw5aN`mEewlj)2I5xy7^CQ&K)(-9_+AxIs2;BGol>V(w@&CW-;tKxe8(l#sadH%AGYrZp z>-7u9=s0wKGg#o=RP*JLRBkWtn>8PwhGoa$9}K6K;}$wP=^oE_dtPtr7tKidZI>Hs z3B&FfVR7V%{k|iN;TyXg7q+L5T6#OG0viZEy-FNSJ(-^8 zgD$N?oHBn0RuU#6gn*R7=h30G^yEbviszw?cUoFA7CzRo8P1yb^e7J3i}maB8M{Q) z`6X4tO6ZW*ChGxjc|`&y1d+t`-mlL@Pg~$)k9^<0nF>N%8Gy2Gb_QJwKj-3q|d0&JnxtwzU!cSl(@K+f=kv|$x z9#_5huYaDn+~~N-P&AKf*FINSF>X_qUw3A8PC9tTZ2%A9B%H2Vn5+C+oYpHN?w~oeJ+|IFV)0U_U?K zba?b1#L>zz2>npa6q;>&x?WN(Rkq#i3G=R93}-FhALqPOTcvtxs%SkA*RgB6TAtIf zJN~5S^3nXF=Ki3>gCK1#l9)Rq<@X92Yw3DlH2LB0rKJ_8agjZ3ZS7^-mgC(y10Sy_ z5}P|A|HVt#MG^X1&5Bk)*VDHtoI{(oaC%z#Ouxr2DT`gM8*4+>rCH)TZiu72Y)iQ9Y zXuD2ZvaT6jc3raJUq`#x9d+Vth5C0BbkNQWlUS9$rP5DvT|UL=pbeb;q%&I9dOid{ z)%i;)fs(q20x3h&5P67cWeBqaknpXftp0qSv)8gI#e?!r_8HrC&CBg()#LPT{8nWn z3t~qfO-h45yudi%^D053QaN}Z35D3#9Qw&We^Zy;{gS&WMhRZ`TeO`1i6MXAFZS$! zca#0{xaYfN*Lo&^xSG|HJF9%*@3>RJ zBfNWh6$VNV4BeY4CC<@cQ=DA=9}J>b3f(4FB$fCueZ;bxjud007n*OyLHb=jTBvY*&)FUn~=%!*3GMh$b>{jQc_ z>!W$A*1Dd3SB6o)WGm;Tv`U`J*1c;sf=O)$Y-=ZF;m-qr5IGqVOHvV!{oD{)2M2=? z2?TNHN7A)hS@42SLkBN*f0cvcV%R&2w$J7L6du+D9jGgWd$qa;Z7=zhPfUtx({)RK z1zr=R3JWhx& z{X#BE1$<#@FjP}G+P91{XV$YlPMQ!a3N0B>DMV}=Sin7!8#0+91UJNQY%1YsIoel{ zCE(2+S~y=B&mYk%KEFYBR3W?~3GT)eW)t#WcIXcZsWE;EkIt`}!E$@S!vX%{$N1#i z!#k2@ZcUBGigE4EXXE(^65xU7PY0y%mV=LOp0hveC6wC}M+w=;i{J>4?nhN)W!Z`j z`GbG&z4~oWW*+^5!W)37CV2A<@VV`ls0bmX!`OK@chN)-i(`QahmTGHEIn<1uzg{4 zvbyv@35t@{4fsK{VNnc_<-vPrphjR~44;QUDLH1c-Rp{HMT z$s*!T%!-_Ja$!A+lz<#0J(03s7bhzFxIm?c^-3WXfQcUOEcmTN>DEkN9x6YtL?BiA za@#L<&tqS9>bWO8$xB7>_WTVE2HZ%rYS)z!>NFN3xRZE~diAd`gQ&kvDKMI5zuHVT zGC#YP<9cs`?5!9v{di6*bo8)u>29k4XBp{!Eaa~Inwn|Z!J$Xh_hg`1o}Lx4Dg2#7 zQb|w+s<*pc&IxZI9KsbeMC=jbBra-%IL%{UI)$aL*340J8zr8zZwH^G+Qx7qf?^1= zroyA1E9I*BAeiInPJ!@+=tv|A{O*r&2h%4_8nr}(W%D#b>lonqz>(L@%HHYms{tup zYYW=SU1#1~4e3=nB+|XQA3(oWa?{r=a3q#Pg~Fr8ihscRB@gXMeVz=;0Z=};VrV;H z5L_!6csgT_)d5(^LK>_5>3KE?9krj&lRLQ%lDl3NmB-+3ODjUC!_#BryFV#V ziSQAJN|J}&xoyFalrk!yZxU%qkG35+nOpFHjIHRR-S-suFowjF_sMX*{TCew0{?U4 zDwZ!;gB7_>|F_liHltWrbcm#85Z}4=sb!?^437)CDoJGu?yK*BLKhs zD#RaozUg%xsabniy?M;8TU}o;^k-DoAuDwl@-FPpvgY3?j)yNJ7T#(4hatNs6N(GgA)3P)P{D-ze3@5C_seM1_uHMQ#&99tC0 zI2y%r$Ij}5d#0p+Ax@t=hONTuf~}Tg4T zoj*$FuqnXmNoapU$x-xoVflz{-aF#f@vt&rNwg`4$k4pV1L(Nz<-9jFs(dKagv4*> z*p~V2Ul-9WJTU)pH@e57tTbHpd3kDIrAA7S19i+fyl8dYe@sZYSI-rlv?<<`0#Yxy z%cs>aDVBC3KX`vvlO47QmIHF3|W!5`sL{6T)|$j+E-pXR&nP zzg+iWb;8&EV*I=g=RW)~i+o3o~7{jY$N2-~DlMf$I*T^cMIX za#C8Ljsby>y9%Iuq^h>J(C@Ml^p{(dyrn|~X z@Ih4p*cSU&TjwM2X~9FLrSt3mc7a3|3PV4#F-Iv*SClgW4zvfv( zS#Cw@4!qrJ{_!eOuDACM?N{t=(np`3lRQ#~(Y77>tGmgfxiM_;F%*xMZ#Rk^vP%+0?;sJ>? z%8$IC4h`h8H*!iKsy%=AEOHK9>=Hz2D$s=!M$pY>Gd4OZMQWmeF;WLp9nNMAlE9sn z2*~eCP)#4ZOGFVakIgL`eGnbw!(dy*Jc%+?GoVU8L9H;O7)CyV@xMqFmlrg`_jWAtayleT}|$n zxNRQD!8aOW^UL~fvl+;feRVR`K~Fnz`62BFf}mDMbXeE;xk+Ye^t?x2=2|uM zM|L9I(0$DH;NrmJ{#u-sU&{sgu}iEYTuXKrVptpXZHh^ZP0n~4B7*lE?EQR^eG?II zyKzbt{rT;~5frSfWmHJrdFT3FN_QSDOYy{OH2un{D+iUdxOd^_;P_uRh;9PQ9~mJ3 zup72fOJo+&py?d3APNR@byK&yrg^mvcE#Z&(bP?ei-RbM3jh&=@oJ)}GW_9QhIv&O zh#0b=i%5vvVp1R@1frg#QQ^%*7I0}I{9gWR9EXADjl-dcQFgE_A<#(5%3O8so#C$0 zF#mSp*{e2$njFH?z=*QDT1Lp?0tad`(O~lMyv_EX{eFFnE4Ef|Z#0zaptfBe8C=i# zV@RrsLY!<{^#Jv#wah4_xNEvuZXe zXTw$WDU=xD-s1ARG@iR`iMQ1%L{dJK=nEGKqkZGp zeMX&+t@cM=NgYDF(blchE97ju)R1`--b8WkizGP0e;#)7Ayk6=W004Eai2C^d~-2Z ze|(bIWt|^>O@h0uuCkB?JX^Ix2Vf-DHuL1|o;uM`(+w|1sP|!i$h+WL&MuLN+XA ze3JY6_iSGMB{y6KWqWxaCb;veljg%DXUkNZd4+u4DZ$yu$feJ+n1WS-jl6T9?)mZa zVNQc<`h!7nvbsz*y-nFDlARusEtWyIBE+PV&u+IHA<52ZBhSa}Pseru0XZK^lm6(d zM`UTDFLLxZ&&y3tnXUFs_@6+K7u(G&VyQuJtnmfbtK}s};VE4&l3G-1?$L8w*@40k z`eeiIh0*uev|Y`q1`j`&yu%q$$q;Z5Z>>G{wE&rCx+o6SgLpGqpOT$bY2_$5-h5eQ zWh^OA*2I4|>x-M6|9#Bp%U|S*h~*Wwl8H?&2QubIc`BcMw4eG&n2A_|v+CH(2|306 z(2U!G|B^%@qj-kgT9%Asm)E&AVfTwc+azW|K9VZyC@8UJe3 zf~y<9@%U86)-*_Wx9Kfv0t0w=%)U)4drx;*IO;RWZunn%`J5`$$nc7%Rl^Hk+#Ac|7j-IDfN;O+P}~r&pX)AtZ7c`ZL)WED)KoNkgb*~+m32f>`M86 zyL`pKwHb@M(Du=8zx&Z$2Ax9NE%+|8Ycu{88x-e7of!I{lBQgO0@sV4#Vs~ni#fE? z_PL?4W~uCL^CLYdICDitWyh9KK5wk4#$nALkMu!*Be>fjhjrrp=yu zn4|t=mU-#zt==03<4uU2d zit@Rl?+vnQ7Q7yE=kp>~z);MS09L$|1P!uOxhx+gXH2nJ@iJ6APVg90E8mQQTgUcE z8c=xX;=Ux>HHJ)Zke(N~<__h(xoP+*ccw+eJJsg3nKgN%?Y;jUc~EA&69BxLfZ*yF}RW;%Zqz$BrwtPWW>F7O?B${@ z_2gVjd-;josST?am1)VxPI%SJv;<>$e@7NmeyeveV)+Gl9_&$T(=LYWa#sZ?+x4zJ z{At?&V5}(Oqsk>Y23wBNxDAM9oh07}dD#9D6@I(wfO|8a#SdKNd)wv_XKd0z3IumUioQEp*7%jz18oe)D?KQ zAbrx5Ij`4!%(U|~6Ie$+WN_-cH?zNXU8dr8QRrqLVoT;TJ|k>*?G8{)KfG=}3$vJy zjK6WQDr=Oy6#3IVu*Ng~jz__C`?-%={UaoRKhCFPjsK7<{BHYw%Uv)d+xfdH#nStS zm%tXi@jrFhjTQNY^W;~jdn@dxBSiLID-Kt%OU?OW=SEKl4Nkv>p9Hl!_(u6l-JxOm z41-b*E>GI$1Q=JG+1~uuK%Qkc%66xYOQT6a5Xm_5HZ?9GRr`sEF=2yQ7(t1$TSbf2 zkTvIKDb`b6X!*3;f<@;KTW^nG;mz!E%mq6)Yfk^YKlyGoEjVIZ&f)RUS3lnOD$?%N$9l%O8}LBB$C4*QnJ<4N!LRI2ek>v!X=hhyHi zQQl|q2e}3XES_Nxj zfrPV*E(?@)Vuqtlt3RE?xOYV>tW^nQWv=E zZ6mgp=^y&ywZ?ifMOY}kw*W{)p-Ay>Xxq(YYP!-QKIWsW32!kU%CMj;RRMTyY)#$_ z>!)qDyPmR6x`l@0%SMHkN(rccHMvaIDr~Y7g?_!fCZp$|{7&+FEiCBh68U~)VVx+L zoX!r*pcetuz@WEgfOo_(ml~q8cP0E(7|-XlwqlvLh8)MYaqh8FYMS{hd|p}7nkXhJ zV*Y0m*F3Yjf|R=%N?O=gam}X+T_;hn>(ViJGW^ms-AXqbKv6MQ4}t_7t~{rbw_W9H z&vPVZ07by_}XEozpS^Q_u`;C6L7WCxby_NHr#o7(Zir7Bxael#HzCM2^k*9^ zbMux)at#mb$Vm~$w)Otz9T@YDZl1P`+4#;i8#0Jj<*+M}9r5K&jT5XlEC=OyW=hS? zUT)SJf?wIemv;fJVmvlk4Xs@VMH`~nr~6-{(JR&SC^kK{hK&5I5@^8oF*Kez1LgeN zmI_*G*4|!{5V2cTNHzyYr4nm2+6^y>Um7|E-t9@ZtE-0%7axo8qg0>1qt@k$_XINL z^~Uj&mhtZnISK*jsJQRCpz{_$kf)Ngtp0m|FJy_U9>rk6S_B#07s(m_>29a!epau2 zjn7tXdj9d(EzvQJw=fEKLEK|yC@O{z?iyJ^h%eGF;ibdq1!s|l*_4kDE<2hu;0dM@ zQ&bFO5-g1C1w;?)jrOV)g!y{9t-hx}`ca!%g6=m1$;P|Ym<_9@UUza@IHh8}2t3B~ z?V_W}r?F9Ut_Jtx=yAUkzgldF7wZ|!LJNJz$7W=XMO(IdUWQk>%|#xYAFj9sC6HJb z65_9~&)OYcN=^Wr%GcRqw9j1p^CD~qIzkZ*d&}8fQa8b z8_~WGfM>E7+D+6Oi7ILZXyBZ1!A+ao+U4_Squ)j%oiw{o?b{8xcT19`0c&>)eJ2r{ zCb;cifMQm04-gYO_Xy&lU>Nk@P3InvPg394Ar6m2eZx*n+RkB|1>u%m4#{B3Tk5bGr^G8l1KQ{Btrzerd zxGp0DNO^%b8%6CWDkmM2Y%Uj4C#df9fhWx^dpJX?*7PeoP{1t?C55-g0Th4+ z^JI(o>!fB0xmUw7b6_o{q34*ULlEDPk3)3px$SKIIRD|DDzp>wRPxSH5}w2^bV(ih z$DxIt4vgitGJF!9Whzg)0`~cy{44(S+ad$L*Bs#wjnC4kDS!Y}isk%=<=Zcpq&$Up zF<@#><>vL$3$Era>h<`p2!s1T!gH`6@Og~zK5PI=#?=>r0Eeh1AF@UoCq9ZWKHsb1 z@cwy}6w$!*0CsYz@0nkWxGl=_!Gu)C?j*F(e{WB%eXE_*&W5DDw)ehW=}?xt_%Py~ z`~GkE2B#M+?uU&&O8GL1Qx4EBUt9I*@e$_~6D6(yjn(}Eg;W2%;}~}ToH0q(e!*i1e{VM0ihHGYZq=u=pzb*q^B}AqOzD{5%D8zCr5Hdx##4de zTOxKn#N>{5yk8#`vES#e``M+tQfDb6Fq`Z`_6x}nF`22#fn2LkW(AcV?NrLR z1jaEHeR=UQZ%HQByYfuDDxv-t)!XQsBm}d^RK9ZF^9h zy4=viC*^t0=Aexked~EQYDhp_sPNM<_LpvC{G|ZYEK=#5Hrnwl4UonC2Hz`6PDDuu zPiXB%T57yMSFLv%V%J>$&=F97GuUuAQy6p_+1N#lnY_rbx>ThGXx^UX_fC>;GDc} z6yQsF%z18ia*ZL=!q9p?VRtLvFGRi7f?))2PFc-IA*!VW4b-!E zmA>QesB*H*oPUvqjzo@kDC0#DOZiPoJQVdNy+7JyE%K2Y-?q)uV?;ZfAo>;jtiOYpw>g z?=Xw8!$wn&L*=9@{gL(b_uow+5sk;2|A^2!wd8rW8#CA(M}M(lUWtR^q9FoGxlyo4 zEPBo!r!K2r>ZRPAkrk5dn7|XG0Vn$|S`_Sa&G=8^2zB;^-EfxuuNdYm4n0#7B0hx?Jbdt=#;*VF#u9f^T>2{d1ma zxr;;JXtFKgNVs5N*@2Pj-d$6S6>oU=SCsMX|hf=pl$3BP(={K{?*~iq5 zxpgGm`OORk_IxcxCc;ee^<%K*<=v{z?z-;>gD>XhHs#QM;OE z%jRUE%&Wf5cA_qj%V8iu;wc zGLVi*CQm=TG0N%0fAE@#;h8x}q@^o6yhuXHFllK!dn&`db|#Goq7LKU1o|AEJneR) z$%)fMBh{G&`OC%h+q{3NOv>44@JGG@FZmqfuEg(DhPF^#)1y+b0;w*KI4geKAhY3M}bW_3tm_QC(%DMq)M4oc_PA9m6a^fNsw!GfA z1gJ0R>^;;VDc$^V6SeTQvHqwv%+-2IT)AC{nO=F)I%;Mlat-vCA$rIK;q;G>=#YHf zt@_-$qfy*z=rd3u>Z9&TP746z$=U%f=%Pf3Vaxq@neVvktt)R!EtRZWaydOdX=4&n z>!`_0^3@u%X1zMp7j?IE%IK+5zAO7E$7j2ltd_M44H2Jf0GXujRPpBhvb@}AVPC`M zmDW3%o`wQpn@U4O0NK4!oJ416mN4D;URs=QN-R#tg^ua>q4AYD{!!_EezDopAV{?A zj4iH?o4rLaZDVldh<`x25w z3-%dbXdIW(!naJ`)^DQAF{kx$Gr`;(`pfMTZU-F1KN=b$rveN<{VX)^Wahfdk1(n< zl(XE3BCy*BPuG8HeP!!v`P()hQf?A6N^pou;-*ZRa}z4!J?gh~PaINJp}r{jdWL{f zFI;F7(-e-D+_GRrX%1EIF!w{LReZTmX|@zE$zxex`N0#ICx&Egj-D)aV*&`O^k;O| z?)z4+Lp+DkISuhG08l)*IN)huGwALe}@GSLMu5en0hP^We8ONr;_!R-nA8lEvk&?;hklredJ zRSsd!6f}8{<|`X`LeTY*ctoYuLRQ3hE&y=7AyjGGMN{tQFQ;Iijs(Yx3j}Q;VStyv zIKt!uRt>`PX;(FQ#i2PZJ=(!kS>+Ei*UwRBcGU^pRc@_0leB(;O-^&q%gGS-iUGJ}Qfq)Gw zhQ?-UD0gNl63B`Adw<>Yu=_{HTTqnq9iD&zEjb3!$B<+cgaqWgj@b3PR1_`eAj=ds zZ?aqR&wKj#O(e+NM%_guuybpyKhk?$>mc6|%$+gpBUjHVyvDi|K1uR)&4v9IsWT>kzJe!TU7^xgw!dZHb||6Trw)~ z6V#(v7;*p`9XVhFl{P_*^_+YK1Y&heOEpKC#pX)0?Wl(hzCx>kM zVLrp$HPqGCZvGD{6%+Nb>!_smY;iCzfNj7EPdz%z(W-niUk2Dk8bD$opp!S*FXCKE zED>_C=VjybQcvuf4dF*pEy1GK36mM}W16Tz$B=WNf3n54a7~m;IYpWZg zR@JO*ZUiMxv6CLdpgj_LL zMr5j=DNkRI2lDKLbg}I+_$XN_BL>G=WSJDS1K z{QKlvYs45;*Sm@vGnoKdEJjN~QZjTYb_78AdzJ`w&*Evf*S$*q#ooS9&^DAD9 z?$n1$(TPS$qyS^2+E5Z^ay@-SvEDkPR@WrqFCW{ox#1l5_VT>RIz*LafWMM&zG-(Y~8w{p7?bS|&R`yv4X1Xyb z1J>R%str$==+qZr>$!Ui4l5c`0}lSkd)i9O?MtRcP!Z-CMQFkhjdNvRPui~zI!M5I4l95) zf$`~?%EHiZa`1zLn3s^5pSfwktvzOlub-62Nr0pTn%v%x8tjHWHw zD$7m32*G4)lL*gFp`4%Ix5b7hsgg$QP2&?keB+GG%npUJkDFlu*y8 zo9^?DX6uot?ATh)YJZV;LU-8g^>Qr*;Z5vVz@I!&iapmi1TNlpw{oViGMwlKXRlel zYlO{Hhxhg{h%driw~t`P-~mBQ%tQKqHIJHV{?x~HiE)|zKDRw}DQQ6D^}K&pP?otO z_($+P8u|<`gIX_>GHB*R#8Zp&M9Ru--vd5d^bPJj@gi?WWvd=l0ND}SumI99maN;8 zcG6MR4G)_{W5>}t?CpN0`wQ#Srja5_sAgM;bAtZYYn%}Z zW$WZ7#%R{*5;L&?m~QPjW*z4EHSNZ~??;;SPMWJ0!t$E$GXk$i^HH zCKhid)qhHoS1h%A0NGAbIC~5lzLkS~K;p^L;PVS61okGq!TiY?k`nBQyAvL4~}g!yoC(GBB7t-oA| z!VM=!N8CS`5O~mF(XwQV!ZzY!HW9~Dv$10ux;{nZb|LLa2{_uaa|@jEds}z#x&*VR zwQ~G(r$HRf@BMI7+}_%E``8wRoO^_DLLgzruCCIW{po>u5c9v0*EghaTMW%{g$4rF z;luuzB7S*@YS+){y4TmNA)@^wu>Z0;K#dgGMtF_8c54`55>SaCgcAUnG&swEe~Lcu zCj1S!k|e^E2}fyjt7KMuM~F!m?!MCZqG+j4yH7IQ_$MA4j6mhu?p*I=GFP-Alfr}g zhE6?geK7m9r9nS7h<%BHzn1o*bJG#Zl3y;L9(6 z&s6;7_nN3MUPo(6UFqLI?%$gn;xex}EM5Ka|0S9d_w{GQsOtg#H_ogn{W?uFp1{=q zkInz9#s-OYqRqnZFhxY_`DaVHAoofLrl@1(KCiGW{?EE11%6Rk?*wAd|+P z%2~)<#VwV1TKA_-vCeV6aFav7kCS|kI`j>ytNuSy`6?!9f#P;{*`2AgyYIM{bo(Pe z&A+YrC_RRkY?*mbEwa*HNgBG4yKed2-Tdu;}_-`SO@!$)bdog%A{Y*~8K+CGGJfr*1)g&svuUPxXM+2y2Rdbz- zM3{ZVXWf5B+&)X(e(`W;X~#oU>qSZZ#~}P8?^OuZ_B>;+dcrwoeb()Vk32^jN_Ka6 zJb;(3J9)+g4wvl>{qNz89jyH{{v`llT#z&ZAK6>r0L-?cC89)_9B-|#rw&aBjEMWM z-k@Q3dJ3ex^Z@|=aKQp*EIsx@R3e^dvg)ui4Lyo>(GyEk` zDxiP4z1K_pl?D$er3c?C`j0llpccf6H;lMVhoSyI?R@z=)nD{4Co&61BxFuS$UIY` zj7K3Pk#I84^VFdzV?O6&IEF(hq!JF9ImkRGmHC)wj>weZ?l*)k=t7<^} z_ePZ4iA+mMO6!1%zgPR7_BVQkKc-CUD#wL$8_!qplOrk?qM!#0(8Z&RWCX{LHL`4& zk+N#_MCTSW*8-;>y@n-u52>;D<83NVLQEfq8ttMVQ&OQEeG_C1`09N|o}TaVrNdDe$L`aG z<(bHCRHtudkk>8tRLe4!AxC8WWLUp-BJhtr~?Z+#cRoeDj7phh9oQU0a`M7{O=2bEN4LCRp`|dATinTQSs)8zEwF{Owg879T#?Kqx&sO(up2%pqu zHZn0oj(Z%Q#(&_lzpJBg=aa4%7a~APTiSiJa-4wjZjxxrs?KpMV+0FX}AS^eS=saCCH{36}FmCz+}!-d3Iqi zaG4#=1MyGjbt?)SXX(p^wTWyb9@YfUB)puhuRezEGIvjjWqwATET7eB3AR#%0V-_O zX`@l|I_D3eC}zb!yq$|s0wbX^)FFv-xbe%{dR+J89dcBbwlqF8OwQxnZSGj$UXHUX ze8-Sc!m2qjEr+L-WrNHOiS4ak2IY=lcz(C(NJ@jos41UoCKigT@Ux_=PlOZCzhF0U zl(&ZAH(SmKmWmGbOMOf(DCp+_7ezQ@b)Fwbpbji}{`6Vdf(c)hv>&%OHiRb^CT!kJ z+kv=>L)FC{8&U#R(v$18&R1OO7+?2YZkNUvHE15%TTnEtQ8Ozx6#g)^g*=tZhk7_4 z`a8;{jfpOJP1nSgH!4<^t6ksyG*hk=Q1Cw7cJ~NrS}J_xBnRpchpFKq- zsQ^H?{ro5~j_kJOFGk|}_zV0zGb+r6U7XkuuP(cv@q*(t)45o%l)@i>!gTaoAam$n`B4FbGuK8^1%-uMFk~OGHA<-L5gPp zZ&M}v!%v}aLzA4TY+ z=Hd=Tj0|}5s)M$vU9vgA>n>QG zCx!T~dZx!{+TN4AVMW<;=e#P>Se?C}Xw$^J-(om0&ooG^moqrKLEHI63^t<8qOR7c zVEYocR$QZbC{|A$>nGYQ0<@)+OdnV1NHQ*taJk18ZXCSg1X zvi1UOU^ES17btsxU>S2SD5U6gw`1P1Fcb?K_>6=uHp|^e7Qp)`+rJ#DhcU3|P_>$#B5< za9R}N0?1)>c<#rGdUR9OnAKQFudCNY&@tUrZhq^sQiZ^IEg^R|F+pp9Tv2 z%r;3)%X3WlL4zx<1s9Q)- zofA%l3F&Ef74hB3ap1XS@UB1$W5e|1J`kvDRw8_k$}mGUuETGVm0%b@S($fpDzj4S zCL$4iV8kVF0Q{N@ZSM0dt$F!-(#VRclLt(YQi)DO%(co!w&5mTH>W(i()-iMk?%o# zr9gGD&*(DEO;`fs(-*ztfUnqlC5T0RX-lSm?+nW$StTJz`{4xI(?( ztkfK)-%vHObOuHplmHEKXs7(N*+7#!hRpJHuXA44fK{E{e68mxq_p1SPvX$yk>?-~ zn0dcW^1pJ2CBrFT6e9+|tPG8^=_J^!oQhR-K}J}7siWb*5V~nN3MaP7<QE5H6U4AU{9y5uB0fST!1S#oZKD83LAR2n$(f{t;NpnlfCxWkT)3LkHR zlnkG?HEpjfzXL8N7Ccky{?;f}C67Kz_5agL(P`t)oQ)vArJ+}MCWo^|HW;Ql(CnNS z{z=9*8j{)uwR-eYShNK4AfP0S`VHm%<`^U$4{3+vN1^D)AJm$> zg08&aK8B2xsQ{oMOc(5Y8%p(+dLI!|0pYCjS6qu1Bei8DtxK*MKpyDP2cMnKlTT)2 zm@_SE@p@6V7UgbzPm$iwnEsk`GF* zsX_Cp>e;iFA2#xOQ>`# zj|`#tjnXNIH?2?K2_GU^AWJ&O!%aeUmg>4;Jp&_4`FcUz-!E~0OP3~VmM4`NxG!af zk}*UQ=*;h2U-76VZ6J{t@j!iYi5)QOGdB;~z37*x!gkG|KOv_G5fq$YIwtQok5;~* z6Jy6qrmXD$7Bg7>2{T$=f&tf_<+gt~G_TIi*44&k)9Zd++~B2T;8xAlL(v>AtEAAJ zK2&}=&ySi#TUwNl`Ay*uW{CMtPCc3}avL)roPKHIF{33x)4}lqjNGbENl-S_l#1!nz1!$pk7PA+-dc91cjAhkpruz{3GFTn;}UUe#cCW)zk8XRxPcQ9 z8vM|(y5!)4uwm`r7#%s3H4kiGdx``5$|)rZ_Rp8fo}Xj0-AE`Cq2a*7*}g-ZvG;uPBsKEIeSa>g_#>tNy&vI4fDJy$>PI)NZK2qdIC>Zf5!Gpmqwz`$HsoW{7i}3Fn=9R>`UODeM%jchU zlBj_5p5hlMDHT6VR$S5ECrlqY#noxz?QXf9=96t!>$=CfZ+6y==ExD6j10b4@}_GJ=cc%Jx6mh_=I^8K2_s!)2ZN)H~;84zwkdl$2JrElQn7s@63+f0emS z=0?maezAph*M)T@rx*mCfku#^uvANfC1tg&<$mikZ;T6UVTkx}jUxk|;svj`67VSX z{R297Y334E-^gT1UN8nIY0tvoYoCv;fkQG#Vd^j#f)RZ(OtlmI*4f|ZbArQdfP>Yy z7e|r9kZY<+cwg?nt9K-Zl9`H9QC;W!QTUU>q-ZD=rHm0{M{XcN@sgr)UTE^2h_MB~ z%`D6-96`jb&(BR{zGipxSj=%iDg_B1YZnQEi1%X55hf(46cGO@A97ZYjZ%#VQSkLG zZk2g=+Y;~q7HU$Icz&G;FAFzfYQb~@qE-wb(rk~ddY^!}A%E5@XqY?AZfr$H*iu<`? zc!?Syz&GPz=UFg<9vX356cTX*$l@f!u0aC+ZrJC85$@0kHGfD13y`(IO^p5w1dQew zdyr#;|5bxVe1j$}fo2C92SNgrt5bx)2o7k(tA~&XXm&{BMIc~nzW)IjF%6A~*nlRz z3e65Q{(ow+!$o2=QY`7XnMt+=&HO(f7E4aLt4~kylQG8)5~WVnQ@q5Qz$Wat)gnbT z2OM`&49UJ=)P(jDIn!}#2Sap^oiZzRc2Wjpaee`QU2?OMv*|vgRqLH5SUey!{IxVz zTU$Pp;*ioEI3wY~;yu^%dZtrmENy+8cq+-<+jRXsBFKK5XN!h>b7f>`X|%es)vlW> zXyc9tVA7DZ*qdNY6%8Dx@kq2Z$EX!qote5ztKhTZxVbdY$D!n3EC;9rht4|=7Ta~L z7U*^Gn3hl!J=_~V6Y``~ckE!ZbaoG$^w9tP&h}*7tm3GWE1>I<%=kMzTfrrGaE~zW z_mazh?)40DykUJ)oc)m9x$M#Cj;*jcp(UPAcMn&$J-Su%GY!z0d9Wld;`acHUIwH% zAiG53oJT6gBZfs8wwGN(6kL)O5BJtnU4j4Dr-dvqepoM?aI40ZuYWN}tX)!)K3d+o*u6QHQ@4h}T5Ef+%crJqv^(U) zjEc9)7!7i4bdwVBMyIA=x}>|Oe781XXKz*|qtd~O$=CnEj3a@|Zz_J1E?_KZ>rYt@ zy02sw8<0Ox;_$PhsVcx`ogC|&<Zq2*E)Yc{6$W7 z_DoeKKr$;%c`y)AdUOIV#ZKGrYqTCLQz&wBal=DG*qJZO?hophTrvUC*!_Uv8|MMb z>eb~n{vQeCrb*wqEy|cK;)eLLNE`ZqcKv%ZGPQPG6@OPMks8-N!5g%MLR6YPAvPGN zTk^pD!_uCy)QSUin2~r^i$FIIz?6|6cq{z=Bg zwXcols|;5erbuCN0GZ*m+Rf^F0gz12{3?9x#N8?KNpbeO0m4bm{_oAhtM|Dnnr(fo zvd7PSbG|NjTd{e|x^1N%?A;`PB@wH_qI2ov(Jdc9a@e7` z_?5OXa;$Y_n5-?3Ws2sW{cjM(c2gpsK-?Dj{ar2{8mSrBPd(KwVH0Hf0S&ZmLvCqx zDQcFr>IeHQ-Ka|d9Kvn7gNoZ2n*xtE@*7_MwNXs_hes!q) z1?Xkp(%sytlP|pcEh`BP=Pyza61=_#=mQHSG-X{6ILi20v z@LZoXogPAu*e2+xLy!DjgS6 zA%;11%>cck{nX25{>!`Wj@-fS;7uI7#&x>9p{`HGBFM%Z2mi*17%PsjW(X=*g@1%<^d`i#+AiY~hVWNDe@x{-u zB4je*sL6&%RxJB*FTW16%Z{6{9ggoT`5)*U(t(i{gMMoZ9lR^vxzq zd)7Qa`mLhKcj%om-_TnQw+fK%dVCQ8xxg{R zeNRtA{g(M#oB~>;RthQaZNbsB_2vK+^mkGn?BxpRM#}mHX1CXPT#w6gtPmz>aOFm~ z#9^7LvOZv8XVp&}S zCD`R}xhkB6PD5X2e%2m%RhA98)CToh@M`V$HcP`0Tpz^e`iQ4hC=xqszWpY?FCTGr zM_6c;jNHFO^lXlqlE0R95UcC6RwdD`h^zK}Si+E@+FW@`1nymQ{lcF|LtgC_-!wli zEfYywvb)RN?sdXHBWhuJ?d zTBU`pV^WLRD{m}zb^v{`!|z+WIySHduHy2I`lZEMT2TQCtNX^`>i1THLv_4OZ4}V) zp-a0je6kf2Ppz-d(ujMZTI74(_xcVNMd~q(>EpHUqymY)2fl71alN|b-!)-(vO(=D zP*SQ+m6<ImVfRK?yY40QdFo@=Ih*X?JV)F*xDyj=6G4z zb!i9v2t)~%w0?Q7@JoeHNv=t-l51VmU?I7;Z$R*?aEYjtA_6oU3GTao-~3r!D^_N7 zQwk&&M16gnhD8wENXmT>u`aCQW6lj}a`8dq4Xm6?!HbPy+7+|hLxK9qZRFV9=CLPN z{4~0P7h~@feGQ!se;cYB_h~ELmB6^kZ&e6 zoVsM&J=>i7_)Nr*?qCoqCp6Nw*mA5UX!%{;I-O8uSw&1B`G^JFyyJao6*Th4T_KO4+c?U;!k zXW_d)^(RMjda8tZ{^65O6E4I})N!<{7CSlcgERd9o^p@tyIcQr#`r{8@ShGOn(BHt J^KV#&{x1hx6(;}y literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/add_image.png b/docs/stable/_static/img/tensorboard/add_image.png new file mode 100644 index 0000000000000000000000000000000000000000..0b675524b45925ead44be07ee882914aaa632aaf GIT binary patch literal 47119 zcmbrlbzGD0_dkwUgo=_X64H_iNR5#0P!yzeVyx^=Xst(sDYmPRjS)mWMpJlH8oV8kdcvN$jHu1 zP+S5^TG_Y|WMo$~9F>&~G?kUv4cwvjjxKg&WE!Dy#+RQPwlSueJ<)V{L!m)g7y9I@ zHf4tD^^g3c6q@WAe?DGUSr_i$uY}z==cQ__YVIlYI-jAYgktZ+fH5x4>*ovQ1zoEA zHN-~UM(Reg-)V~Uk^;BKM(VjoIl;Vfi<#AA^O{t)TQ~0J-^+Crd?NXX?2_j%hhNiM z3r}F?=HjnK{ZDIZtIicyGM^R?drJ0{TUvf+5;^bkp4tPHmdXeAn{o@}RHxj>A zsOuiOUt7QRLTR0UI!H~?h03jxOOnSltEZJd#N%$Tzy%>%CRe(L_1Zz(#TTx=if}RV zU%%h|$!+xZ)TJ~X24hF0X>{zL;ms*p%Bn_EQMhX8z15TlP8m|tm#o3>6Sq?W`O2@_ z8w$?;xia2Lztv6xv+vPqaVQOvMnCA7c-J;t#`CfjGMVwydf13p{&@81X70>b<5KRn z0C^}%KGkCd0Z_l@_L?p&vO{^*?grOy{j-c;5V<uuNT|vsy`}!=*PMHsHH{BP}el;3%Q|$`38f{}} zWF;l*d*WQ*=ItvXApASxwO7OhxtRwUv77H*Q+rh_{OFT)GT1dxsKu|y7rL*5SXz>P zO00jUjmpoQdYb1TWm?Ohey5f5L|rzGC%YupeZ+UfawKZCiu`5t_A!N2=~oZiS;5EW z!mmz|lo*X0*g(Mx_|-+~*V~{>gl?Vnk9ryk!mL#_iH__bUeRvYN+#_gSqR(tU*3d- z?jvr0S)+5VW$%hUGW_tW+*TrcdJrH=*8JvDR3Zb?zY z-v~(2>NgyE%h!`xMH-yLQzWjEs(0=TEpFj zTLV01sO}y1XpLsBS5L_9#sA@C_^74H ztm&uGqG_xl_Z4m{RB~}F^^Tq%r*^A$E5kf?-ez84o_Ag`Ot>k2Rz36^1)m3>z`gN% zLZ3Q5v9_~6i*nMx`i197X>NFK+~B3vTdQ)b0zyJU0zy^=ZKl=*s-|(K#IkN@xj|q3 zfgI^RsWM0Li{Oi3Ufczo1dbt950!O!U|{*;4#SSj(Y>SdM<8(U-g9ta#UsBASUD_f zj{&zNL(gKyQqg@S)jE|b^}I}i*OBMFYFPChi!?kr)(P8KE$MZ6=cX^j_ouIqZ^tfd z&7#-uhkie5fM$ScIbj((pzv*~>(|ZUE7#uj90dPNbqnsZc_dyeIepuf`4cnAwinV1 z{SK9yygydlT8!EJ{*sB#2J@h*(|l2D5wUo6G32p*)@W8>R(yohO)tAav#dU~-)c%( zqFHrnBN6g9OFuMz@c3ZNEO~26^0#h^4o`BY#F&eF^H*ruZ->)9QlDMcepJ0pSdvU) zb8`1XtUbnF=7l|k@g>j0aR*p?eou-ct4kWVzMJ&fr|4oMN3W90&+ggErVT-5#^kt; zAGWFXyJktIYNq^xDANkFYo=pn+$KUlG0t97NlrMMRL8pR>Y?hX<_7ziF4<(?WKadY z8Jk%&?$WqN1r)cAd$L|%>RjUI1hwrc7pW*hp_d-PDcoD(-@PgqH9U9RI^8DbM`r?M zd}KmXexwS6vbrt1t(SDWQ*9`0{981d2P3OilssP6&{q%E$lmF=!z>4D8_oXN(=zr! zn;GRhQRS9yA!p%XfvTDG8h|G+SNthx!hNfW1x?jqj*~89IVWr~)Jt2!x4P$o<|L!g z3vHVz^M|Rg@|Ji-pdyLJ?~IF35%6kw#J(=u`E;rD1^J}_%KE$Y)B#Zk_4|Zvfz9!a z{Ty^SGV&C1nnSnFOTp`s-plfW;TQGj?OujfDjgcq-C}`O(f3SGnRoa`Wu9NJpz7ZCr78oB?5wa4(9rP?njsiv1Lsdkx5w-c@GqW2< z%=KFgRWjQ=BOf}JhTVv~zWON&Tb}en{C9uwuRbm_EEBA@qm^--{aPN2ZyIgZuuig8 z6qJO4*N>%$m84259y;C@-NH1I)T9u?l2mm;t)VjzO9`b_rZwi7)OFW*!d1?*#-*=a zsEtUbO)BB}=GKV|^-^{UR*5=_={f+ZVrO6pQXZs#H7R5@Yw! zT^z`NonLNJm&yJ(=(~RB4dYNNTXlPA8zPx5xm4^GpCmUN$|@})w^6KO6~iel*Wqu} zm#>#wTJ{tZw~>IBM_Vy+kKa|FA6V$Rc*(VKGTYU_KhOL5&F9Z84j{1Y_4BH}sxg@r z(s~~8T|Q6lSd!+<{e_W`HQ5fI@FlsFo;0-Axe|68#Wkh1AZ}_uMpZ#f`R8h>S6fBe z<_Y;G+egWtpr%@#T8wd?NaihYKrSpde`gVXG6gmX6JZbmTP^-{nJekBds`B|P`JRb zz?MKrup&+3i0vC8Mz)0xRx9Q11pZn6wja!S?~FD~vryUIy89SSXXcTg!*KiwDZ_p3>LCCYe)GX zD3Tfr3T~J&JL8-LDsG?{Abz!SKEx`v07U-y%aSbrmYmd>REwHfcxoY`FmD`O?Pn)v zTJ(JbD+bnY(a%tLC|N1)g6)_vtEX8vTqhnc?W9B3qcBsxzm^^~DvrN<7;-bD{dRP- zbi}c4W_ID?;9_?p9Sy4D0CnfcZe0Wv^_(?V-pSi@;lb+97#ro5@F<#;k7>)GM6Z$> zFZ(sDp`o4yNbb;+nD||6sV6vH&MPtie{lGkpe}r1A?4q<=EVL{Dx`Mo#6NOcHdCT#Ac!XzCQ4(m=8b2d_5M!N-P2ewMA4SL)19+7E`hN9L?h z*Zy3U^^V=Q-vp0==HNAx#9D>@y9XV-O<(45ei{H>-VKK)XP(j zgX3(V|Ni`Io_5}j{~HP7@$Y2;3luqfBl1A_zQ}+32AayAmC6`6dfT}?S8)W}K|Fvl zK0DC9XO3Ujw}2OC9+8n>e@%9Qz1NtW?m8x5 zLDTl1um67Yo`Sv1k>c-?n`Gw#_sG7`a{t}>I;QM@JpoOxye3ofBQpr4{a4@r>Wl)6 z0mk{?ke42jWyq0zSE2m7X`lr7-xK;*#q;d1ds)ezW(NM#m|2G z8$DidIfC`N*GytIya&Bk?YG`!JZIzF4ax)U+vGK)(WG?d320HBV>(%q7BCw_SJ%nJJ)8d zdU<*qUd!eM67K(W-f_e&47J>QoNm?ACD!O^l0)=0_p09MTWWHfEW=?Sp(c~8r^4HI zgxDh2R};t23%^)d!W8#Aye4BiplI3D{a>Tg@!>IMLH-1awJmtrTZ+%hT2+uJb zWFs6Jo@we{acg2X-6YSZ%#cJXi$q|29yy@08l@>7eR*=h_XdW|!XL7~)D;iCx8ZF0 zqMjgm>T*MWyI0Qvy>rA2d3(xxV<eKrNcWWB^2{pE?1zMW(!^z@(zw77V=(ZM3AcUVP}j>jY2e?%5f%oCnr zO!V;u4Gt}_LHvIO;L}H%UqZ$pVAN{-k1huOyO=ToXM6gIfJE?%f|)F z>ies<8hzkQ2}X081C%T~KZ z@={}LNo;`?(bIImb$@h@yp96Qy+5BI;bfvKJ6`k%vTn=~D}1kv&G%$MgrC%C~lADu9V20eDUM{S&?VvhhM56mc0)%DIq!h8L^CPDHy0WCH zpQi!}<=JMCNVp1`Entg5vL_?fn6~I(@U~taHe=vN#HF~e0vu(?MPupxD@M{*2kkst zDzi|O!n!=;r!`Bo`}dL#O$W66UN&)gmQeYvR)e_gXxudB$lGkK53VN>>szqF$QqZF zsIR^8*Aus2^QT|eaA=WeHBH8vSo8TG`Z-ydg)q~Wc9nEL{I#? z@Xi&zqv^+_(g$63K6V3N45{n&zSgIAVk z``KkPqBo3y(-+wHv54Vey$EXNCNL;P}L|I>?sd*^-6dsbTe^IdHZ->`(o$ zaR}1dNOs=FCJJU0fbLdbg7~;^TYRvQuqfnqN%{RJ!BrJRk6Fa&BN`5GDQt=F_t2nw z3M5b0A#jD?Wfl8P`@++B?<3V5kexexqqr~`*^H{<5ENt~u5*<=JbcXBvj>kGWNKfD z95#+OyYG~v*;df@OTVzP@fcU3KHFA}DJzVC9{FAgDyzgJ7gFn4Pe=OgD>g^cG5JRq zJSf1Nzj*<3@`n6E7zH@=y4gd$usa2&PEC*XQfn_6R~a~19ZbjK&!1EjQl_*jZ>C?0 zB`DPYNS-!^HVkd`2ui1eoU-=S(h#@rjcj2bF1VnWHSq4828C8a4m97!KP$L4;bR0J_1 zEZ2;OA3DZT7o9WflRz&K*W6RlmevZXl=>{aVXH?iR`D{Ly*}j`;R;p0)MBBhQR(tN znS#g+VpaOh)2VH0)et~Osnp_sX}BZ}SX?bnmlbvaL=Kcs(**xToWj*tH#9)j#0uSz zRS#3qJWXPLV&5PpUl&lRy~BU3<0QaOM>e+Do=@s-4O5};j4s|vu;cgN7+ELfcR%Tc z-!c7qe=Dss`6%xcLO>u3^sBS4V>FdW!Dk5N;u{Vnvv<*dGR>u4a~mtc_h3+Yph|;iHXRUqT{JjJh?(|a%@|OjzYtb-eCYKoEb2UpQp=aQ8<%W-5a(x z{#i^N8DxF|s4na1EN^a8-2=8-{1d!3qFhUlhj-(&qv>a{{1|m&V?@fGRL zB6oe<8b=hhu62y{y{eLf;3Cx5*s2b($-r52=!owuE|!O4+eTno^Av|bHiRn_*mh{% z?2zBjSh8}wkyqn;?m$fB0-@5t!MWTdFQ~#PppVxGp4TboxBx?UY`9MCEmFB#nYq2( z9}wWRP(A`HnT`Cyycr$ZEiaZFxxtzIc~z#D>&g58o?Y`Pc5je6Ou{{Be&t%EIbWIG zwVemZB^90biji)&=e6jw7Aa~g*5OeS<4sI#olp_J$H6ca zc2XFt^CSPx_A7X5f8d@~-RNr|EZ6VRZWMTaQMPiLi;L5avQTj_uZJn2OiOUBI;WRJpW zE$DDQ7JEgkb6R5DMND#c@X)0-INk6GfF4UI=x<nCT z0-UxBj(ekAKG_-bgIws4x7QyoCwFj`Ey?e!GxmKcPWnoMH101l65}mLuPG6(6DcV}`KszJWG^O`wS@ZCcNS;&SKWaXf6{gJmUabZPY;** z`+j^gdd^MOl}OpRimc~LV_&V7>ZY~`A|6NV*Wl{d#rMA>dm*sXFBW`dIP+~orQ^}WeV^*sGJ?>L#l z?b*zCuMECTw@q^hq8QMZi%V`QvKKgA+%CS4ZtD@uNX-X{c??{@pXarjEYZk#rEA-$ zFn%{yH&@(k@F5TJF!jW%4%CjiC|zUrHFWVAqJcA~Et=FG1XuHHQO(peEQHI8 zw@iP=7*JkbK~Uo6wPNHBn3%Y>xTAX04o6-!WpZCZkS-g!nh8l5i)Wb3^)R&vef&8`vv-P)+f3&KNn4_9uHnleHmiPYWayTmNE=K+=@dr*JNEb zAy9x16H*Ix#JPr*qKiT?fcv0OoyUdLB!o}-4HSY&RE|=P!|#59gvHRc>JSi;x1F{6 zsXq-dj_|ct0(~@iO?b)2P&8R;^|_SvFw|ofNvLpES7>w|?S2&yOt)d+5CUJPub>Fe z7cUKj@p;G6r0}v`82);APo%8za#17JdJuthUHMu=?mKOqx3^d}>b74hQhhmq+3?Qk z_6WRkE?Vd*ds)ONg*})^#gmyF0I-BlqB+k(C`fnokS0<*U@VY;D@nht1Q7;;r1O-OdnNz*WPW4dMj~JB0VWy2jdwi#IQ?#mzi_s6$ReO; zZs)@<(>Fe9H`N64iA9HaZ?hUmPz9y>SmpS6uU7|}IVy2mMnsDd1y9shs|Cqd0+=Hq zkB=MXo@TPrq6#xPq@KBrn?bQEV;UheH-k@bet_{{D*(~DiyD8ls4PINj;7c04ZMrY zxEIaZN*Yl&>RTBNf-{$ex8WxRn|W?;f0|(6xyNXCE&G*l1MYaX^*F(nd9VsW3DzCn z+wFn792d6H3 zjXgg`3DTg)4TT+YS#crCo;8pXNYnNLdZU)02k%w9teVY9J675yF;3yG7z12lku#WR zrg+Wxo^lPi6na(Fg)il_r>Jub=eA(cU|vyR?0dtG1-Wd+p)FCrSk`V!6evRfTE`0_ zlg)%3?PL6JikctG{Z_alJ@sLrTvpa`>s3H|yy+qpw)(5%pJwX%fz)N(FAz8)Ufsbt zjEP^QDHPmqa+QF3d01d#aG#8wT# z^ac2rUacP(o1Bs$0ZjY53tHZA)maPp`Fya#PS7rAva8vQ(M>vxCNnrSRyu5t618-@ z6RlI)@NBE2!jjW8Zsbsqi3&I0_|cSTDSLlPG zTe;EQi%@;O@t4xr_7Q5g&Y;Z6$Q8r90@`=(nqt@Gq|rO;u`n}@YX%7iLke;~*(Tj1 zI2;L9H!@Nu?kdw!6`U-Y-0I;h8@H8|VW7&>L+Cmn2a-e9Plw;dRwy~YEKjQKN<1x3 z?`zC0tu5Eq^1r{J&1azVD-l&>z+&05u*l@(v3Xi8lepIuut^Vj9Zcw)^*@zyHequZ^6qO#8Vba<21Q5R-Ss`1ZoJ1I*3x8^Nes@ zYj-3cxFPqa#;2G$ltuk3%($0rx9 zxg~Qifgd<(*0XDkU&q7nx{q%U*T-aYCi$-|2<>$SlWv9cy@)(j%19MI=g3t1&w1?X zI2V$-6*WVyN{TAL>N}8GFc1?RZhQ!@gMo)%10(nPxjHv#IgasAi zcQ6x@<(Xja-uB&6?7S;EIX8!aio-Zr>K)Rvd{OIJ>G zOX<~Z9*uc*;fbFw`=d2a*D6XWUFE8}iC-UERZ4aUO$M}62x0Oh7b-2u!=;uJ2!!Tvwe``Z9Lns-KoFF_8Ux~vCY2jVmQ}pL- zP9j57bacz8L!`y5DEFz2L64dHukrlYh&v8XS^xbUi|r%aX_FaJ{sP^nd*{lLQd?u~ zXdiG*$${x*2FbBTKGe3LeLPt|7CZrCq10WVAJ)n8Y?<+R6HwD#w~b7$eq|ZZ)wPwZ z*>hp}vrg&WCiUH3M^TW?=`XxXL$1D1r zcdH2infpNqpT&0kojAvKy3F-?_t#7h@`UjGOLB{@#MmDe_tv{Vr|U`E9a!R|fqB!cME^V&Up$42^vtJ|M^{CPkRB!ux%QQ(Tn zNpkwJTnXbGvx$2_#kpJ+Fco0GP>n2toi7ZYDqTfnhHSlGDjxPodiApA9r zGzt@SK<81j+9z-#+<|qtu5Z7Tk0rTJGPokN|5{I%)Z7Pr zAf28o8n}^IP2N=c)Z2b*Z(@4Wc?A9NRXM~iD1wePvOcetN})p#EB94QnB&@`7r)MU z*ml!O^6aUMV{lTh9u=8fYj%Q)hDIRGG&fi=wjxxk14hvY_n2WFlHu9PsIzGd2yIMd z7c}1%yYn?%_KHv5l%-JF?`I_6RRvt!FmvZ=_wqGGQJUlHm^LXKobMWVp}*U)pYwqe zD!RP-JCjMPc}pkT{J!7Go2re2gpzqTQ-)7Kn>1 zv%M|XjP1@X=$xPW_}RapqTo`8J}c8?(n#1$q_)U(Gf4sbl4sGq?b$wTD_C%$=YrM~ zjg^$Tfct=7wP@uBrMq=#Y!>&TU(0E^6E7|8_RL}h^I(82h_XvgDMJWNS9ka+WvoYC zVe+U47T0$HwmZk8V30SE^l=6EZYJK9r_;;Q>#K8X3g#q<(9JlOKF(j>JtnNAnGq0+ z{PMWy1i@lPtsG_-?N85K6Gz9oBkp?OiUz zVzavEs3kS}gw7}HK}s{{KgsY;diaVZ^-gHm^OYQ4>uR%A$_g>w*rHJl4){9z@%@O= zk2o=ws?7#!Y?5aI4QWT%! z9u+Bp4qptUju$aQspEOdxMzpuPGT4zr#AE4JDKM-=N{N_eW-?B;9A}hjS-*uLb&~0 zcstMe$YrTPvHE>){PP%x_PGY8t-up??y|Vcy6-(h?$TGkoV10Lh!OD-@N|kEy8wNB zK-x~iL=d-QNAgb5)Xbj0=cv%k*)u3~{dQg|gcNF>rl$PTlMM*}H=O%wtxq&7MQKsf93Ro})h+f>T1}Ix;0#l7hL9gU~ot zgE%G(tqq0;T0UeHZj^qyoq(=nJ3VN5X_B=;+(0ACtOa9(0R<&Oo6Qq{x*4MI=dBn$ zx&~KaRz}OJmR;;zR0s2P05pTwnHg;yun3?TXgBSvpg2=W>MQLRlw+?hbZtWsT$ON) zV@|8Mn!UL1uYRLS6c%O%AAGEyXv|ATmzejNuCYsUN|v@fCmm5uTo6ItN}OG;H7bab zKXM@44p<$1&~k?^AmOIM=CAYEc7dXGR{u>!c>BGa4`$!=sBo?=anFoQnvU0BDTH^L z+fHVcSop4d9dAk}PApB_P`}d`BcZ@_gUgf+uh(6( z(BB9V;JYnlYxM)G(K#k$NMymlR!2)g&^rZ2<37Ysq3@PxX#%P!@N!hwipMv*fCl3`Q zoA)whc*I`Q2+8CSE2gX;=UHH7LaA}}w0eP`>W!8<@7(=hC?a*Zr;7T+qXE;yErjHW zuqN&-e>T!A_fX?SeV?*1vDWnq-&g@Ro-SBHS!A%>V}3(TneM?Y;{h}#?db&$Yc-hC zMBcVL)4_8@QVu~T zP^&@9#YGGyIFV6sA>DDb4}DWD`-x`GEB3N?x(*$}3Q3+U%llLbx?cT`9Kx3}$eso` zhpSseE!vuHu|kz>@r+PE7vf9aF8c<`@I!s#m7?J z!A;SN2{Mx4zxlANz$k4{4YpgWOea!~6{&;&cHkc0qeqd}gg+k1VeN`3qj?_r>c2?~ zN*tTI8WU?#r?b_NaPHn@fDzI-yb@*cssXG?ND9~8A`A$c1w8e(pXah@ z8~>6=1i*Gsag(1OM^||1XE&F#VkNWDhl|On>)*TD>Sag;b4fS-H~gl=M1=F*6Js47 znn4a15~5VR9Mn&?J49;sYnSAX$r|Fn`s?-= zT=^WXpK=QF`5{^G%+O84CJ<4m*p3K^!Ol|{cN2lhcj)7h7sS7*h4_oj8<{!SP) zOmlY@{hG?NlbPj;oX;<&y-a&@JD}W5$32IF@)6i>w2d3i*C&tLKlEQVBQ>SMMr; z8jT$++1}(B-l%kCTR#0pys?OG|E}bKxS@LNpG|WNyqv{CAnB5R$`;tsGSdd8>-h4^ z084$RhN#{rH3#QsPX&rJ;oZ_pQUwwnj^)p2Zgk5MI)x^c?pFQ2#9RsBq4X`Eg3i@9 zN>#o7D%Owfuh^SrKAW+umB(P`VH@z-|ZOTUB)OT z)_umISXtwwL(MVo;xxsQc+>qwD-z=GpAMty8G?qWe3K%|^_+IGtmapPDpV@QAbG6) z>n+IrnkiMD2VprQlo7MP6kixzP`g(~h+l!AO>@m~>05qg$=Z-x_AQqW$DU@$X=ERV z)qL;88z22Se~vb^`(bWK(do`n&dP>{t_Maw~GVScgftYCdK( zoYj>C=-oTV*9Ll5t6<{Mv57%CT)u7qdutR{Zk@9D2`R=#8RW7onIgVCqm1j1S>&47 z-OPcV^y=MpuNGscqnE}tixSsM)u-SSZY|5_w2xb9+hru(X8(9Up`24i7ysm{xu#3x1Etb<_|pN$TVxzSX9`N&*# z-hBzE`NhlTWJ?L;r79g_Z-t<%BdP&MODkr-|NEYLE z*K)~wWp+F-X-mY^4fUeJn#WoJ3Whuy zNH5XzxI@m#ZSXKdsPuy~Dc*&;uAF;ZVWN^K7oZdJ>V(q0F z`E(to{5I+!zb`V0i&V*%H)XIgHYtAjaA9&{PWi;~$4JNLaz1^Gs)}FDD)_Z?rEwE%B7L_)$0l!1PbS(6yz~Arc-g0B{l7qIg-?>4K_D zS=YSkild!{neZ&T!83B3B~n*Js_)ZhrTX=E&yr}qs9fN2sZUKV?w+?;2(vAqRWiFc zjv(S%3M$*pRcJO>hL-E9A5-BB)`RngjCgPcnU@s?Sc3;wQFRf9AIg+9`s;wb?`w>8 zq;ZDSZZ$`PD;ru7marX1y;v4Ewf8xFGtUz#iho#SW7tK`HkV)P3r*e_zmHvD1?$1U z56E)aTysVU>upwkQotuAkiW*0qMQ-_a_PGA$M)PUtbX|{VU{XlfGDY;xjW0-p?Xm0 zAoaUW$B-L15mpQ`q$ZMtgCafH4qMeZHp0{eRyAvIH5>Kgnjf7Nz=;XAV{goo{FPpO zy^!Ypr_N^w@%iJ2GFpv0XUPablp40kMdt1<3FDri?;m>Orw7-Dz#rwZQU8iQHtozRUQ}Kc5@ZkoSQ?i;SRjF z!?!sZEx+^VH=&qV_)ym#G5Vi;KA*;4~>?q*8%JP4xT|Pzv$}I$N zJexApF>9uNmb?}R#aR{j0Y-$QUS6Jz2rq#u(IimM4`SuB5I2(%Ikiih6vSxCXWCRA zP*q+nU`Q#AgUT&@GE|$|_4TRa`AM6b1v#Eb$m;=1$ z@fEN2jDhx&$-VEcX9Rwhq3Q48ymjS$0Dnr=wV%)+50pu>cT$ZH&fH{hNgK}=mTZc# z;Y%MJ`#=f+s#z#M%(#S?(n6ob`LQ=e=#!(l2DiVG&Dc{P=_`Tc+JCcLoubV}sjn_b zWKiQG8s~8JY_7k~MLz;Bz52YgW7JxMukKhm*Q&}0gtg0YKHC|gZ!A}YX9_kk1*#$~ z(J$hi_hH)MRw5;RMsLJA@4qwio(Zxz*wY|1o+T{&JKOD%A9>sj4DH7f?Rs3r_=G>9 zi)l=R_u?1H++_^nAV=XicRXMpJ8sB-Xo9^Vwfa44f~kHpXwdlknYRW@C(ZL_Jz4(U zzWKW~hACoQ@jvT$(@qPB`3XQh%)qci=noAwXZ`~|d5W|swT!uJ%5w`Vl?Yw#3V-I6 z^tzQxU1WF6AuA#it7)I=HC!nrx-sV3c-6*(w&3W@2ifwe2W0g>>3>&(ALRyBz!;*t zwoG27GK@2ITu`cUD{h+jdMQJ$ggXVtuq(qKdmG%8GlcZJ5g7@sT8P)nJCXWtjv?8b z-*kNCy~FLhD1eY)qn2hg2wIxOAPQP}IU4q5%8LxTkuGE#kJ8rw6 z#!*(jUpgT^pd%*D%Y6h}&CRLS4_Gza(za~Kcchl96MO}DQdejQEPqc@=t8h?S-Dni z1C@YEV(*b_+xr^#0su0_%pC(t*~ZR76%ZnknBWz_mDg88XZ*oEbxgLMiblUQ8ugp zN-VI8*J+oNds&&8Jpf1WyWh#-5*Co#rEGxYy`OsEC#gg%AF&L#M9DW^5vx2SOCz%e zn8uze?DQ%}%k4fj)E#Q`^FP@@p4Je1L*(gChm+~id*wx{Xpx%rlK6y$^@?!_eo5if zXFcH`L-L=Y9qvwFbl1mMEu}FNicVRR-I{N;`2X3O;?K($g#pFQZX2j6#8y>rdi@44 zgP#fkTQnSzK7>`zo*Fb{u^BGN3yLNd674E#Lo=-e= z+?+15UH)67o|2nT$@Xl~=}FzSoWTNwVrJYN6F_LM15z1jTv^_K46rlJeHMTocDo5x zqh%XmIvlr824@nea=dFlM;ZOB`x4p6;xixZCMD84H)p!LmxxurJ1%U^E_ zR23nC;oIBjvTF930-(bjfpzh-B>2`+$oaoS?fUg|Bhe68{ToZZ`fL~@fH^csa?j0=#Nt$D`dCC8qxp32o+f<giHbu9wkp(v_%O$&PNk^6coC&{jX%OWKC25ufs=C7ko?l5VN7Y6Ha*r zi;xE~e&ICh_R&t^uLr%-bz2-ZtPTb2P(nv)BJ@&GAxyB_^ zs9+q5#tKz0XLh@k#qE{$$b*DCfndVInPy$COut#F55VI&(&!tnVH4hpt0Ud__NS0& z07#$@nnbFnPWDh+573#o>{J-$K=S`wyB8OHX)$5*XR(jTPyg5kyL89YGy8xA++${i z%I`L~z`fhf`7^@R%N#yy%VpR}z$#8Wb|8knPqq%bmwABLD^0H|LhR-xx*<2;v3dFm zd^N{3Mgmz5sl?+ABwOkTGK~5ENR>Fb95p!nY0|6K`V%@GSY{*)GPrRIInCw!b z>Dfstr^8?xP5n$n>JMh|y~;G@?78W_D-nAm__EA`XZvHX(&fVDmb0Q@Coek&4?GPx zNWae0eCR=MH8cOz7*WI(y8}X^wp3SFSpyW=p#9(cS5X7dP2&r1MgZVc$Uu=Sbvn(R zV)Rv(;)GoN0DjtuqkyS^{i;wp-c{#u1{@SYNbVkgsh-#7JJb2eQ14cdl+T35)z& z(tbCetTcQ9Sq9zl#^|GGqEhI8Q2M{k z>~*?j$B`S*cP!!h=-aaP<4ahdrPSzy_~%1be6G7ZnxI15xpKo$oK|M4mXWFBw*wNu zr`_1Fb>GLrqrdOsEujSiVUTBdR z8?k_=j=y+!bpp^f_LH23F`FnI3ES$sud+FRzB=Vjyev2RJ%pFZ-1T>_*(yA-F&{u% zKO=mSc*njV=Q-IlK}`?-wyZiFAHV$~u#QlbTh3>qM2~LHDID*a&8R|QI?W5O>4IK_fgc^;k5z3%d)47u-hp_*kh zyCg!-24E4BHa&N+BT)pALO0**ZW%$0S^-l1MrGw;V4{Ox27c%Mx4kjF@eV(8rCXN; zkD09e`-Cb+t%b(v?JvyS|9eOd@&E=*){7*bGWBSh{=fg9;gJSe221}m=q*eTX?r|t@`;GLz&Kx6&IQ6iGAZx=zJ`kA6u7t36qo*<2MhpAxMkP>V;3({ zvX@c6<-Ga#A}}}pliL435J}JVx>uMY^2Pt}3BKvS?hQm>v1d@_%$4b%5B>1yOYQ|r z-xcnCAYL~B{@KK+tNJl-f&yp$dGo?Q3&`Sf1x!NqRmq9of|7RrzG?aP+K+nlxv4BQoS$cVf27o{y zm;_o2Z{Gya5Jr>N$gPUZ{)5&4SSIW=p)FMY^p79F!gZ#W=cM_zry@@d zqfhPPO`6YQ(^ZY_a{$g4(9Xm&Fu7n&5+(A@7e;0K?fKRJ+*#0kEkTFc%M}lrwvH zc3ZPN#JZ)j%zzN5;W0POB!H#x03L|AmNz+wXZ2fu0B_??;Hra>3aX*-0M8H8LNuy{ z$f3p{Cd->c@1A!moRaj8n^*ISCMCu=99J+zV2_zCmR8x$5G`K;0G+#ar58Sd2ed)C z&(;?btR`1(9|4>WzW!*a=vv$s1A&HWy&xqV6Q&64&}~t zQz9>K&va-Aq`ZN*Iz1Y}Yg--lv-hDQh_BCIb3Kcz8X~vXOMC2(hQ3Y%9xS~2QjMDv zfsR+tv)b~EuU3ogtUWf8T%q3;sLOFS9a(^*i5B~S8Adxi-MzLXP|v$0GXJw~52Ibc zyW2xu`4eC}YI4K{i=p{8MYY@QqX3L_3Xwrf0#HooQ0C-d4ip?60)WMQ?uo1@`6Pjf z`?806C~oLoVyvFbJvgI05ZZ3pBCQ&u+gbE_+rDjn)^Zb(ot6p! z3`&Vuvh&39?bIY6)oiy5v;dj6rHni#yPx6`Id*wrh!^iWSSuJ5-hc3!NscpL+yaie z^#2j}=HXO-U;i+tlaBeAhr}_@LqdjdjF~C(Oi|`2v&hMmsbn6)QKB+M#}JB-Qb=_& z=17K;xg=zWe(Titdw=iedampF>$!jTUtI6K&)V<3*IKVN?6vp%E|2@I2R~f**YfWB zFWkFx-KRInHNDS7@I+bDIq$7x_h6&tWxM00Gw-S-FYfJ92KAexPybl%{+(ZHQhu(= zz`h^Pz)f=iw*Jn_T*;( z^$)U6Y&%+>bqsjyX>7OcFG>(DpFMeHkwQ^9>5Spay_5iB2YbzwT-z?axQjh~YeS4> z6Q+B6o6Q~R5q7*oPh~rPtyQ17q2^M)|6JylUG(}Lz9VF(^5ridhg*E#Bf#B#c{eY_ ztu%Tw`Pl=h7)2$Y-@Qtie#R-X;siEZ(aNQd`3q1K(h2{1{*4*S zr+Y_o#OCF;tD+xhSXY3@Qd?C0v-`r$bs#y_!jOHz`TXLctkLA>;qxhJ_3m5l(aoFI zY-Q3xe$m)1?-iq2>Ko%>8$T|RcYdr&7u6KGB#JDr_(9O3jy;v1M|$Tt8_1sfEv;DUQ8+H^)(}aueOkw>N+CyULYU>Kb>|SogJ3=5LWw z=*!opE~vb8XT6X=7pm=e#$dl&(5Z1~>Q+vNvQDzB-mAm4ZjLTeD=rD4$2Tg6osDgs zU|Z$4Zdd|y_wv>wgS}`-i>D5nne7YNNxANC`o3JBoz8U(NPc#2Zz*j4SIu{?hYjI- z(mz($mCK~Rm-RZS{TwXaS1_~uUPydHbJpLSr8nT#am&=TywBNbcX~GX8us6WJsoTy z@tHXhB9F8Rct=Ma^E}hQtY>Xb#P59UschEv@3^Dn0ChR_%7x3H4!f^?dYaVi zy?ix5l+eg5^!=;<^_ShlZx6WJKUZ8(K-ko5eTYNg%CwKKOPRmqp1rnhRc>Eg zyv4kU&=P~eK(Ek0Ye#B13y2>>$NgUl>3TSvY3_J5V0*;Q>B{c-WTs(y#NmdmcGX(5 zTCWIU0-KGbt^>UWgr}c^$Ep1r?}myf^j-M4Gg#g&(%Sk1Z)nOpS1QCeS7Wd@<}6)M zEmXXACc@aPFEeK4C%I%N-`&QQxwbvka=zD8Ce`Hj%R!|yEPtiQ1KPJazfVU=#xDT}1Ds#@!a?39gBJBw65@E_>^I^PY(eLy) z^e%UsZJS)07%HVNeDH{{naSRJtxr5hFto8H{8@I^_^{A=T%s93U z3sn>k`ZV-O9bP3C5Jyx@k!+>NfZV~)Zw3fY-LhdJH)Mav=jhCuZEBWnk#j3&SoH=Q zG?Y|&>#wbt-KhO^;*E$e_wyeu?PT`dQ4lVyITF7&3b{5WzC3Yn9%4a(PedrrP-= zv3KmW1_lZDS0PKJKjdE-uA2-JVt{(@AJ<Xvz8l{M}=FMjpyvDw}Od&TZ)yzHsOT4Kx9E%9pBzm#2L`Ph(t zU>5bd5Y*CB4WnCwrdLebu63D*D2v#bIwj1lv@N5?;XGZ^_G=%vB;H7`%S^u^^_tot zny?9VoZO|4+T%;7@t!k!bAzfHhj|q8n=gs5vYKCPG?)&KdbgG_60y)uP3LH@^xMAc zv2)D8n=zV)aPffqk@6hZeUqMGhlPfwjKWB_Ay|+veGPBq;wttq@K4 zd5F?a1D4({lD-kq>ai6kuHEo+dGs~hYh!FHU-{elJ$rkhkNy^e@%Sv-Z;?F#9oJ~< zN>>Z}9gh5dE~>#)k3L!8bnTT`LTLW)fZE@yIQ@dqc$r)IiP7Hg2-RkumW5yXb1en% zS%VwC{_noE5PPG7Kkf{KR;W#+si!F1@&b>uK1jo3|ID`U&jQv5DcF~!gm;QQGLq41 zrN4%b97}ba(OhA$mQUNL0{+pB>oqTH>yIdPwX8&4bT^1L7xR1Vx^_&Q(8iV5JteBS zZP+i7<`|*KvH7a3bafY>uz9>=`?Xl9Lxwzu3!l47=A`hK@p@anjNl1^Iql)ksVpZb zDAdHrcLA7@6yHpVumG0&b`xy`tRJ`9g`7H9!|vEH?SD2Tts!}&vrP1p|AyZ8BaruG z7uq4PsTRLl(!Plw#yaBOM2Zc>XW6t`vHKFrh`B3|kK|2Ra<}G`C%qytd(HV-pKcCX z&OcXNM*Cy5m+BqwHIy1oPA0lJct}O4IQd`*&AGhu`@8&r&1b(SGz$vXu7nS`5ZAmI z7s?B`^7*1ku^GYy&p1DC;R{)gCq@>6)l>+pn+4V`YJ4$YV5_4;{ti(Q&-S5wsbas2 zT2HJbJzc}|v|EeqKy2e2la%7u0*NCqaEuBHhR%th4&Iz?gakYf{H4h zBJq1^xmVJw&KYIq#KbsXAHu73} zk!zA8z3vWA?>}nFIp&-3V2WIo`fgLpyDEiSOxSCqQvXP6AjQzKLhyip^T1V*hzrE7kdUtbU-Flzo<75nC6C1NN2`XS|#O|}Y)8gm4H)~fh7GbP_9 z?jbX;@$MX_Qv)?TlEy70|Cf<$B<92shUa=5j~)jOn^fp}zgccmEno^}CUKion>3Uf ztD1gXZdV+G<>Y%8HVvolflki@S2W|BCU-SR8dhQNm)|0D zImeFf5>~{b9`6s2de)#NOeY)|oNp4k)O>m;q@=!D#1kOd>udeQiB=moL#c##Yno-0_hw8~1|@!d_*v z_|fux8M{7qo+*@HaKheLwJnRQb|^9H17Yqs0q^NalMs3{2u|I5zg>@Kd?vbaRm6PL zulrs?(?l7=b|0Zz`7TJ$Vs?L2^U5)kL6PvC=cXdWM?@Q|MYmzMLDh>PmnZxLE4KWs z?V1C0Mr&D6dkK>|WztO5Y>F&*dhPQ>4*BG_&WA81H1}$k9eGjutJc0lF`%FFek5x+^W+tUUlZvwuD|%j%F-w80rRScPEhSkz+ES(V-+t7_-j}+v zBwn;&;j!d2@}TH$&c@jNcJFB@@6}tB)$nMi4}R!q(fM4K^SdVl@Av+t*87g4V{ zY*{kwH!G^Jk+Di&Tx;2w`q{OiCP~|ouFKhL=y?91?)x2i^$((=St7G-eA;TuFVpne z%-h;5)^}|88&&5;otRi`!K^Yyo^CMyTQn1i%1DycA%PetCvtA(Y6j6jwI;yl#XVZ9nUPJ?evhnX-z|@JN#FEyqdoLRp{%CCN96u^{fM&kw;j5}SGKUJV;3aN zew2G zIm@*7ws%;yWyE0DrT`&zsMLm7UES)N89W_Qw)CKQ>ExbaPj9HzfjYlWracljTC;|F zy+cO(vix+E>`2?I$I_0kL@Isf(qQGv9Tn+reZzL4l2Ygfaqz(H#cy35a&M=DqK`+t z1Yh(kx7}|csK`rLoFVTGeUld}&hWiSfTHIS@7f|Ze|1i286U|AXWspks-k*Pp7>Ur#feK3m?_%h{$@^_G_ytP#P9JjAyYQ55^np(mjqpYh9^k z38tO+S$@FL$iyGqL^7zzMcGRf8{+3B8M!@PC&UGc5uObzs^4>uYwAjfGd=AkYTIXm z_M*LWLWVV2=>Yw+gw5V_EMB+&QB5-|b@<-5)Rl8+EY9(fb5OnQ^0r8L>E`%*8#)Z%Spokt`K8 zF1}@*PVWdNoe8%mW`-)ie!1@w{3PsR+5J3SbKX%yFAlD#hWG$cru)AfIyN7_jh5+q zx_qnVh;e!Gr_xh>JCA~nO72PVIy@O^xz!%UwKMv8Xw5w=Fyv`5QKqYw<&Hwxk6o;rTiDRcI4f`B&X`a*WKUU@6^iolBHt^T_k!lAXjr8R=5ID`mB=hD@En=0oC zS=Y7)^@PtKsJ@=mr1j45_oQ5WsB%X9frp_ra(Ti8Q;cZoh;<8O65ue=Mk@2Zs(Wqhx5$t3$>~09Ay-`J{rbyePfNYO z&)Y$)nRaqpE=sRW`f$mGpHGU`D!MKItZ`tVnc0@q6_J#NfYibcNZ?IZFWEJBKTEnG zZ5Ex#h>%PVHTw>sIDO;z*uBG0;=U41k@EAU=6=uz_GigLpr00pX zT2Tfd!zqJ1Ux}=uCa=CajrsV8%WSLn>$K$GT<6&TDnv+4Q=e|kZhTUjJR>K|A9Yuf z!)$rE_AIB!N|eoxcT7mXh)pr)xE-s^I)R0x_nwW3G<`K&9d%2pDxJfBt7(j{m^VE^WuIzH?EF9~%7bYis#d6`T{SN$u`eplc1t zacu9+X$o`;5M7p%iB4?(n8R_2`=0cXHZR2tW3eJQ;@D(V27B(yywXnt_4 z>>u#!?IK8*v`doMZ8rR?yd@qrsq`(SWFO}WJ5@Geyf=m^3oT}Cn5M{|+ot&R(R8n1 zzL$H|->v^qt9Xf$*n|BB8+J}b#TVqNu1N$Z`Bv$xsC zor_6(_U{PVN6&@blzJU_Jox~Fawo(2n8fj2&w^XR2ONuD$xn#?@qSmiPxIvlPxtw? z$TofDxz@ZW>|bWcD4O*4hR5AfleaNRVgEGrf6{I$Td-?8+JtJ$I7RXh^4C-I6YX8H z@TB9&)+Wn!lZLRdSJK%R6Ulkk4CC^9ZEeD4zBfbq(%{6|lPOiS}iMPix&YM3nqn{kF}C@)4AN2Yhud^H{g`NNfC{7DB& zn!de@Q<0C#*vos}z*fgAPv`8V>0$m(M9*>Fb@EYs(;IK$w`+B8KT9u2mH9V~(A?pw zQKg!wpk2udmhrzU_o2cxrq$WLk=D|^vGsPUGsi8vrTqQOw)^+0?ca2Lj^;egXk%ae z;B1y*y37Ef!y-26jDzPxO8pbnZ+=u)6itC8P|Tk{c%*PO|Gh=Tc)?>K6*lP$@+Bv< z9cRqrN>qm*elmW+wjW@7rImN3UALwn+&iZy{93eEbCs6KsK>=dNE{`M<^_n8{N0jc zsHI;ywg~FjHGK|i6g_OhX0uIDFAJf+Ryt50o)zc%{#(4Z|1r+!~Ua_No|g ziT@^r_2m7X;n4h0QIljSPOQ(%0 zWVFYd2fOk+n zWlsKW?^5+!)NW>|d(AC-Ns`_RFyx0*dLHfx1Mim%UPXxw3~(-#*|EH|UvPBtF7??x zrwwVxFUBv0r?f`{(xkg&JciPVBO3*NcWkb&!tFPDFJ{fO9sT-G2$>w;cJ@nEUMf{f z;Hx0osI+>Q+I)RjtG02Ly+XVc>N#)&(lj*;<<6Xio)s&DRg^lR=I(LUN!9HGDJxXH=f+BughnCeOA!0{ww*!69G>9r z6R+P)hDKUAniZaByRVsig1XM5c&XXOHfu)4yg(RB5Gaw#vNLF@O25CKCBr({oA*(y zH236pBh`%yDVj@#O20=$dw%1cZZ5eBcwgusUGOA?-kDsyVKOBA!%0ZGyX;`b=WzUF zF}psL97@=PD)pjK6`J3>Bw}<1qv_Dy_)m~%IB1GPr4L)@I@l^R_>jC$$Bdki2 zd)479Enm64Je5_(tnCSI{7v4q6P)AHY6Vi9Z3@TO)iuw|WksBd_NA-;v14_3U?w}r zxb{}4n#_2yqVu7ZTZ_k4+_RhB*%bHLR<7Cy6{%dG37CVEGIe4{aweU^UyAxuinm7v zy!ErUi+*78V&tn_(%g?GbJ{zH^x0fL8rCQHcFiH#2A(Yx{rJNFW|HR8rPqpiMUZWn zV6`-PrxY;xkJ^>PFW?pgzW^wA^IsY@+ZpZ?>sOoHHX|Q*4mNq#ML#px(L1os=Fv>l zrhQy+zC^{A*=0*bcBgo9q<`E0;l%{QAD?9v=2l+Zwlul|m5C-lZ2Hc+%q#z%VQPMV zCiOAwT?b$y*28r4NlGcKeY6^I?~Y}gcSS*k!D^HHug=+D$Cv^jJ4-(P9bMt-{LJSS zi-<|jx@GzPuktbRTebEZ^m~D=P}gjmJqa~EHnmTq+$}!u*oM_bTByxe@TQdCZHREZ zF!xQn@ikOVCZv$}8r3?qm)i3K)s`RXlqnA&*tnv)!rQL}lTR-M_tcHFlFx82 z>};f}y|b%0Dt*IJqdhhRHj|r+U;}*(xj?#6N>tU12>TZPQtfv8{TtqMs_FGl8hrCY z*KWCbhQ-`0m#hHY@@Vz$P#mecKAdExIBezZ;u9uizPq0F&%K!Sg|O|5AN{IBc_PIv zCKEzs3%s@MG#>@SuFdZ4+dIPy%`dop70TXcGeHF*mLC!Gl5g(y>MhreRNZB`q{u?7 zB<7G23u(_RZVO{^5V8Lzb{t->xTEOXuZeL9kBEB_x?YUb%{FVlY9i-;PK54Ho6q?( zP5_U_LWkiBSV1mwamZa^yqe`lsb>*C-o{P9DaUG~rw zlcjr~x@Xz;i{*X<#a!vPppUujL)%BJzo1Dv7}--Rd}pqmUSk|p>3DL>@qBSeVurjS zxnSkqA<7A+53t)`d<8SS?Od(zBG&Qf>NphF-M+kaX#ItBMcRot4+_Kqk*Q`x@IG$X zYi21daGD{S>Oy=dRelWL$bo~A=w8j@58v)3Gm@825H*i;Q6Fk1`BW+1Zf9jic#7Z; zSt~AVv^D3b6bZg{bT51Ix#-*LHJHKS`Dg`z|SW8`A@ zXDb$JmomI|2fLep>UcGaI*vXwU)Nz#SvZ$4T(a6{G z*NCEIZ3WuZ$px-gU%h{RmH+xvS0%IWy{Ue7(@8UX%P|K8b{T6ap z2MVY7u0Kamq6qx4#DD`ymLOE;q2HUd)l)|B_p}Xd!By-NAiHE;noWh!5ryk&RWC9x zGeRlV6=h~9F(oF#2=qK=P-#^Rbk&Y^ofM}|Gx6$}(`))Esh8^Jx^&v=KtU|llNu8% z;TYnD z8oSF;n(?9LQ`B_-DZ^JNfmQr&N0PFT1>ffG;m_@P)%YlUUpJvljn(`Bj zrJ~E9=O;QLSms>|9k;W25PKLb-+*HG;d4j+2ZKl8H2Z+aj)>~9EoC6Yx)n91c zXCgTZ9uB17{vJ!oqK+&y2D$stTKO40PfMv_G|KtE*?dxjkQbS`ve#NGjTp+KD7KAW z%FWbr<3zq6jpWQWJb-X!X>s2?+{VB{l`r?jIUMsXEI!ob^p-CYr)_&mu*bpxg~c%9 z&gcY{ z@8`v+AYOo3DCu-Y^ZWz2Wr!q--Mv5!t=77NUv;nj0QLP(QKboafM{c^SJ(pq?jTX^ zTVQr*KpZ)cC*{B#Fa!G+91+@WmTd2?sWHt!5$k`e+{{AZPZ>=zqe;aNuIYbBM$hq6 z;I6`=9{h)i4uQ_srAR-$lPf1*_GJAI+t|mtb!!8vL)x+G-iNN{81NHW5hk|_PY-6Z zAXccIj{h`WQ4dyDbMJ3sh&gR8NU*FAd?HTyhzpQ&0quE$Cpz38axW%q=qo zb66NuL&iQK;|R)%jJggRdI9`$Vz3-qH-$g?z+oBNw*4M;q`nE<^F2d@i9lt?LpLEd zEhQs;Akjd>uZF(I4lHS0O#Pq@r+Lw&I057KVHl}4bnj*qDlH#a_?!*9h8ClSZiW(9 z{j$@5ga?~;D8@n>M)Dcvc%0CQM**J7{#ecI?z8f`k#)w9i3Wl8;-uWkOSD$*PQYHF zmiuQ>_O0sk(3Q)Aq>qec&PiW%xG)ySfI^|QSU0XSkc{}R+Gy_c6RIqXKZC!<$pCT} zsGLQE^f7{$9ZXp%vqP=$PBF29bHfF1^I9T%2J6;BhJsn=gdsG^`kwF_hz1>qMwId` zFt&=qFcI(WO$({`P{vghoG%(8zx_Ejqaci&6O28*R;%rUT9FuHj(~dLM6fpMJa5Vo zxIz$sQNo!hv5}tm%_u&xmM5-^3Bq0p)DP=I5+rX_>)?^deBB&8a}NT!Xo#OcE_Kj; z_+VKknigRzL=_*(JXIX;;dj;D=D68R+z!v2dVwq z_+Xg9-702zB(4?oWc02`krP9FXqInjgAP6ndVl0%K7hnzz#QVO?M;Nlp; zx+r-H+#1MLUdQzwexwn|mGexWx4{USM1S8ag&Jo>#_rfETg%FUI7|t((9Ti zBZmKLztab}Y%5yXlbBe3KiWDleDVvwoDTvNb(CU32mbCtal3gP8QGN|rgsK3 zz0BseT=#<~x(6|m;8u#kdYv>ZCGcBZLL_7_xaDIAhn_X=*9#DqTwi9R5eJ}tniu1A zI}tkwJ&VukwawU4YPZd>8Eo>)IRGQxt%-)9h6*XP50^P_Q(Y(4US>N@fl33UjLKd& zJd98T7e(L8_Gz*pLDDsWl>%{xOi?&`AQAf-C^C_9F3g}-`fT|R{y$#^q&{##oq1V^2Dd{N ztx6ejLm7C(ztz~+D71@0p(1~sT*SR~gp{|13cIPLmf-ljWpp=_E#4I`Fq82>65n0aq80K^7PM07O zj6wxHfY`s?f?+{2Dt7rBTivipzLZ8G5j2~jh_b?AlQ)ZN5He&n{wzxLNWa+Kc9K9w z{VNAve|prLuFS~y3&8rJgfm@IAgh7oE?Hh`Sh!3rvekdgzch58i9x6OnSh77JUx^NW$im ze8Pt8QTH{T{0N@%-^Kp7+3f*-6dz!A!B~*xbHIk4D-O#Bl*mY1-v@(O*Q&1p-2VCW z!#sIzhr*EwSO)0Kld+1x4S-tbHTm>#A|C+zof8}cZdi+u8Mv07YFps=vi*3(+n(!z zt7Ku)kEOP`@C!wcOJu>`p{t{GYVE()1OX9aU8W)R{~NhKr4xD4zTkFsGD{;dQ#{&5 zHe@PTiu8d4?J{u!gf?XEs9L(RtoSeR-X92x=3A{aX~eHIwlQxNW`0$#!dEVW4P)6>WN_1Pt`kL#Kc&qbw|5_skRU6GXovWyHz0QZms`3on68p}%h}eq=iy`w15Ou3n7U#6XaX z9VLqR&_9kS1>zZ)Bwlv!0UTBu^v>75=2RJ)v{B<|L=J7hF^RG(H#~%BB3j&wm84e~jIl6=UJf?CR$~8O z0|M&XcKEc6?+!Q9h})|nSl(wf9pQZgG6Pr)G}H2)+av(vp1yve07Ez%}ox#Iy}EHuAA1AAq|A zz~>E^u)K&`|JSoS1Yq*r8g6+ioDpcD?d`%Iq5tUY9vOZP>CMDqFTsKpIsW_;s{DQS zU*xs$%s?aOo`^g(C?M3-y8KAddsOyN)L-fF8!`q0jHn89foniagcQeNoe@=k zd1kv2e;#my7IG`jon4Z!*4HVbdz6KE{+nOC#_X0Mbzvgo7~n1tVDq`p4e2i1KOQ*T z^$rOX8MkRRa0*UB=7xuv;Xen5uo2X92D!}vJzM|?u^QzxjaKO=1PE29%s;~iXXdX` z#CdQ$Tm^S4O%Tf`&p>_XKMpm%9r7!$9FKpdLs&y_b~?e~fj%!pIHRqE*PAiJZYO0@ z(VS<$y%2bhBHtM!5KD1e7$zGhAa`wl_SW#XW%0KCXiRrD3t7{K>7A+{TQ#?ThN~v# zC2?AG@zJj_H-YCl!qEL-ne*5ei+5>YX8(kwvlzK+xQ9H#f*c@T_o(S^UxAEJt4&d^ zhIX5`qFDq#e1Rv96SZ5YB>)GoB#hnAd$ihZ4X?{?bDjKGblRuohCbk~Vx%tvp4=%G zPF={;@P^bVNk_+r4*J|$)5g~!sg9yI72|Y4_N{U|F_0Gs^(@lSI|hD$SNBbSRCZh^ zhN|mgcA67R>y8M#r>BQ@RBXfTrp2X}%Mvs$Y7 z#}c+-{h5#mHnL~?mtF!$p_HPQMl0yoS7y$S5Q2OKscJi~kAk=lB#psswb6(S@(-lf zXmBZH7wu1|{Vx{{!{-c-!l>!K!GJ%Z-^4LNB%Cw{u{|WWe-pVVhgO2(0R^N4 z7SBLt27=x{!yZzynKD5-_IyL8+9vIX8rYA#* zSGVQzIWPdn+xErPk&zINedXe3QXpds*M^BWKM~xziK}sZz_4aA!Pux|s z3&(Y~!chUjfs}u2<3lOp>Y%eL;KU}I=uItr5TpxR>n_HJ{_LmXMwF>rwCgh9^jX|8 z{DLfb@cLcn?s+{J z1DU1set$!u*Z#rTmm#yY87{&Dwy~8A=m>Tfez_B{c+>SF!YB`5==s~zph+1JkE$^W z|H8V>18;Ornq5V6LbgP`g!&|)hzw{;TWo?YiVocGsPJlVk_w<(ftNBR_`slOZ*-_N z(VVI<7V%~l2KfTy;>P(dra$P8TKNOf&%iYE^7@{>iWLDaig25OKt%xE9~NDV$V4+Y z0GPo?59*)r?K&_%G0nsvxplxxJNuyUsvF>I`>w>y^_gu)lU0H;}A3p}Z z`Qd|wHKxijV8Y00!y*dCmmc$u4?Sd2?G;Ax+#B9O4p#AR5V5$pD0qq?@xK>~awh6L3xWK6&V*0mZodB}K zu747X!gjLY8O5(00|mJ!2!ms9!Hdy==t!kqk44AEih>#0KSy>)x4CF@OBq!Bq$pME zUf8G~Ou}A4v6607X&SOX9@v5xQF(c)q(uG`$K_Q2Z%M?j$!fSmKjxJKPX0+UE`qb?{FA%lGP(ycsl~J` zC54ccnILdiwxj3C`N>io^AUhHVBcUCNz9uj;fauD{KUrYtXwm3AOW(wpP)arU(CB6WVr0j|#_kVt{FM znE{HQAee;}Q3NN0g1na#Ym|-lLW4$9S%<#4EkL+*wd*jf{uOB3s5@(f!f7$%8M%Ze zd|s#n&t@k%?ui_|sLqTCb`ku?>^AxyJ^YkpC43NvT|hz1ZT8+rcO0tTjYrLlipxUN zTZXtqF9EiQqd;bEbN3*TK8AQ_FvPWlei2L890H%IdP_*GmsWrM3ra*Hsd0No?ea0v&TV+*#HDs{N=Z@Uk7?#4`a?RiO0W>^6w;OR+6^0gyK) zoT&XDX^9VIFbD^n`XB!#1iF0c?)yw^1s=Ovuvzpk_ZYJ9f3oSE3OM-<*(>{7e?!97 z0(zx=dBvH3aV$ysv#7D-GFTh)^n2%V(KAqUPD{-Hk#_y_`5BDEk*u zI}Lz&C$ARk@>?7R4w2=&7gFKHEUX{lc7PRARVpg-mU05%20)|Re|)YxU}AU=h781G zqk+PbWnQ)rA7F%r+16DGBv>M)*Rbh3Pj&`5BI9cWpoz^l{e+I2E(nPr!n^@}kDS3Z zmB>8v8T^Si=`=Us0)W5Yaa#nNT| zd&di)>d!p?<)CibpyET_gZJorFk`MI<~Ot2m6kZ$RMqPqzbk@NeXY+mlt-W`1T z@QJJYA~L?y5s5yja1aa{2Oco;wMM_=|zXjPzCfOc6$a543U6YLc z%ugh{fU@f-T1w!m_W?Ag6tuW)Gpiw~AmRJhO(Kyk)6*33scS&!q!XnOBhCeSN0ptl z`&T-y(~Z2LdF9LeHkD%o)644=+X60*9LZWZ+F% zK+P}~@M(Fca(x5*s9M+J)3^Vx{f_Ll?A?4e@$o{}d%HcK8_d9nyO>yI z2`C<+h5!=GuV+>5*%yqvzv7PU5q?Z<_#Qo&N(@NKA)CM)sDQA!-_#?C?1LD+{Bp=Z z9S=WYS1&Uh7X1ax-Oq2>G7z<%0AyU{a^;!hL&d8SOkfOafliG5U6Mc@T8s}+b#2(N z<05!%9ELMsCAf{j>8rOk|A(K*0lFXkk6oVuWHYMSqj!$a4HvTebeV?u9 zULW{uu0qL&V9Q4e*7UFE&Atc!Cbf_`c4l`>zrNd>P;&eNA`@Jx&AMyPwoT z@3OOjd^wv=6UKrbX!s1VJdVfOgQ~r9IQF?p6Z-E>8%UwOK>0&;Bumcd;LVYM{$Z-* zjK#G(8rHJ!u=oO`NZPw(g-2h+6_Gn+?;2_kQZFT4UMMS(+xlMp{#xQ6cN0)W2?+X6 z1;Pf|JnT_UFfs97gvrby6OMyMs6ylLeaVY7IC*%OS8wy6Kr}v7aVgyiQqBLZc!CU% zf&wQ#1KPf!rnvyJ5JH}`rR`@{geMt-&(>HSGym&xf_p(CYbWlRIE)|&vpA_6-t%8Z z2e$}~ww1L#;>bSors)fZ`{znvejUL*Cx(UeYoi3PbHlJbpo9Mo{N|oSQc&Pdj7MZ><3g*=np~7m zJ$Dq9Qbr&vXy*AXRiyP}JGC(HTupkck2d;0f|}`z$YzVWQJ>4 z&Hs?ySoBy*W7caA>AEV2b$bxYE1xrX28rT9GVgWcqbWA?ISg2XG)cC({d6%Jy*hDJB9C4q0pR6L>>M0 z_O{cV_$Q|)KOzfJEkZ>+1O@7NX< zoUw15e;LJgwqO(X*EY7*k>AxS_b!}1=EeBfJZcQDo<5+BbfB?nK{@i?DyMeB; zUGcxhVjE51w5?sv9_!BioFZ6{T0x3KZJv`^fof)Oo~m$=2WfL}ylrbr+q zj%Lp@n7B_#LGk;y*Q*9E3JMt~@|oF{iKw1XR|k&OpE#7j{E0Wp_SwN$v?RQvHY^u=o?#Y@&(KERWU$LlKV~*}4S%Jigvs^e z+~djJiJwn4U3}|&ok!>P=6-(s`Q#_bH@a4FbmtA@#E;dj^||5eny%|1-zJ0d93yE@ zEK9n6^X9xkfzvz48(K_}DlF=0e287zs{i*g@e1!Q?70rIG<(dkug2rn}Z1T=;R_b5B$T(!i`Ui#XH}5V`Dg#5R2ct`s@$_>2MIaVsmtq zgAGkcW&haVON$~kE?w9_GQdAc)btiQSYqv|^HoBt+E`-DKhvO1HHfRBRryP)Fq9tc zg|S8;K-y@IL?ppK&*G`>@ykhj{r+0F5!vDEXhP9iF{6e+Mxy3r)mhWE@U^kOSJF30 zzv~%e2zwRh!Te!rrh)Lr+vSm8DS(K$PUd=2t|G)oint+G&W0>@g(r>fldJ-H7QeAKj%UrRQ9X@S#ldSX^`9!A(PZ z;2B2DLt`xQWA_&kMD#(P%xLv=eSBdo@9CPEk+zgr{1cu@Lq_1}dg5E3F zA40zu#&~@@bBP7_6v5E@v_xOf!W7GR-zI3uSr1EexqJFcfkP*ML2EnvNw*dNqFe6| zpq81@gs}R@$yiqy-_`sM(>)PE#1GZvkuBiLRl8k8e`oL@CvU_Lb0t3qw63Pboj2 zM<7FK=VjX#J{ZJyX2tR{UpZ_-OLE<@_HF+t8$K=J>o={Dkp<@_i&v=KVhJqewFjqK=v9 zPb`8TggC?V=>0KR^D(y7%LrfEd4Au~x69P)u(nZ#+@d+aCuWC>v1qA91n_P8A=aRorxWH0~iev0OLy$(xM-;>?UI(&4`#A_sq z4jpK~fK?R2X~P1Rotb=WrL}7&mdAw2a0O}TcwuAURu0l|I_!1*!4<`&b0h!LKdC&p zQnwZN&DpZQx)SB(V)VQ$13A$_Sa@zp{6(HYG_L|bELLok@EU;)9VyC zG`#;c`FtcU8{xR8M>MlvyPQfQJ>RzR)kC7z*@-%lsoVgtqkmdsYCe~i9c)qxnolm` zp+xYLjg+SxHA4Jg(@rYBxZ(3Qdt+nrkD8Gb6nb77pR~@d$V-W+`_sDT(5tCK>*qg1 zWI3*aKx%`K@b|efBTv)#ZE$F*dG`L0_VHzzbF5)w!Vp117NmVnAdoJ!ej9n-TDy`R z{Y!f;96g%U=oXoW!|k<`(~e*V+rfT*wA&iz!O89kX1RP5^?EDbQps#**vbF+bYp)G z_5>_}i|K>c2EcBnu{sya&J;-CVqxB$w^@ra@y^v#xyEk7lu4hBhUIM$-(gbW ziuaWSwb~rIYw|j^K?D<0JRcwt;M-89p7t}k8d`tVQ3jU-QY&fw%8B|p3sGlgxSW)R zT=8LRc0tfmD+_T*q@&+N9QTjPZaX%B9FxL_#fkXVI>-H?&?o7Gqi8IkJgs^T8fiCC!RpcU# zZlBoI(23}9ui2YQ-^&tO3MgQ>vG{yspXbv`6IZzT&tNKCrZhTzNX)wkGmb(pv$c z2=87P((HpckFxqIdv%jbZ8`qD)yLB$01T7v%+!eo_fX)YTGbWfjEv4B6KRx%lGeCyVIx7iqLZZq8=&Da1+V$kwOF0C zYb8*h?Bshlj|RhV`F*GFrqmn!8~5s z3Y(32Qu;+P^b>b6){}DY?!}Fo;?5vS_0KKSPXsT&8~r0n(HD~cPjgouPu1S9S!bz@ zV%s}oCvD@dj3v=PdlyBFZlp$M#A)O?Wq0*?422wKY7fm#X3`wR=(-Cqi zD&anBcX*!X`?a|*1w2oMd!{OzB4!}R=53pD)3w_4 zM8T$9nztxNR(rleo1dD0`{2K4_yke)hqt#85oJbNc9@N(B>I! zda>h*O?YXFmN=cqtoCdwa<_>RSYh1LckkIn_pw%Ggwijr7N>s*%EYa`p99$yzuyK=^vhviIwvQgr z=rlc+$Zov@;tU?+OWDHDy=OZ0^dc)|2ahUH|%IJo2j6} z;_bIL^l=g8-rDd^CzB(OUnEa*X}jRX<;i7cvuLy?#q+$0nmw=W5n}qjJ&&a3$R3Wo%gj@$*!XO{8z1uMK`xR2-rCVu5LndGGxJo6r3~wGY*bL5#c}I{9D8^oZ?#-zec*tk(6#k$ zxLKzORN)=w66?~*XlxHx@>*LS*(J8ur;?;?kTg~=h@cqcbui?vmkk5($$?Am@b-WI zLolUUF;JKde(Z`dEZB*E)uy%9j~@RS;e-1oCkqKIO}oZ0Oi? z_g1n9|287zZAsoYl8{Vzs}~*87VVqa6l;_D$XkPBDnTVtSrH{S*?ou)1I9|bMfejD4}Q99N79-n0oF5avd$t7 zn#r+zaY;U(Do~L*+P|Kqb;k{~PRin?^Ukh+|ZM$|lhYq{4 zP_f?0S;U_pJe;c1`<;V~A>XnjIeffG?)V%VVAxNI$?NjZiV~&0xxy%5l9j$kJt!Md zOo;4*91tifJFE!&%|TP(fiKaVNVw>blA8+u0B{5Z1Yd9y1BY5yTJ!X_#3E+pI+ErN zRFZS`OOn717{^V${a5B1^Z4Z3yms?5^_dY3Z*u~r@;*-G6t@t#Ej0bDM(|B=fXasb^tTxQ{e<<##BIc7Z?S^rbyQ{y{JsS=*zTjN zk6y+!{wwlR>Z@fwW#l5f_ITx{Ewf7dGK7aw;wtUQVA)#;R9wzB9$MxF;4@1k|6^iw z-$eWWWpa+FuzoGGRpd_tNUvg_QeXTUpw4NdREE-tWY=WFt8sxMUxDOb5Z|isZ=M;f z59(Pl6|Lg{9*rU%DdqO>^;~4|vr5*sefEZ${CPNfp$wtJfBZE=7`p;fZUmp0&jw*c zF^5ioa@V|rYUX`FiptX57$1`ZtS-cqJ+J8+upw45+aKNJ{d)w~jrJClf-BCkH zClchiUQl~x?K6G9bI0e_qm)6-e}7beCb15S7{y0#RaPmin5fFjrgQ>UkQs1?+2djo zv8PXP8sT($2X^z{^?3obt%At9l7@j*x`H=KM7q81XZ4$*52_2|FeN% zR;^z*^z6nsFz^^HJO@u!clxV~5XYkDT5}~G|L=T`@$-z5NV4GF4Vw(BCCmqK< zN|*fMC^sMxQYI+Er=o7FGQI}=9KMn{P^jC5J?)$4+ia2XB~vy`LX%Un0GMU4Xx30<0SCfC|EX0LQ5*&2MeFI-yEyas7Q(!E>)5N$k?%sT zi=D=3MKx={V0Xdq%TBn~oDujA77Nk6QD?_e2y{OusZ@C{Cl5h^IfNJ+g4{QstF(tB z2EX4u!%dC93Fih@e-}_=4cIsBpKx;%fJ!&%r$ESp5y+n3u#dcha0!O~^9oM&Z!YrJ zROvtBW*!@yhk0r>Z6)+K$Fo-r847HK^lOH})>KA(9+}oK@VS|h?qp}PM&>O0gMHwA zN9!oa!zl)pdV}MI)`frWl47*U(-N{QqDupXxs4ZR1w$Np+_;*(;N+j0ru+sqxD7q8 zcEoMYW#IF-JZmRAzi3J@&H_ zJvi02p5Bt``ZkyjG?n2YJxCIM^ILY-(pCm|_{H;7NKF8FX0J)%E`i?AQsH|3Ka%+2 za?j)eO5<+a=l?5_g6vaSR}cbJ{weY0p8nrHNDhyP^!T%fLO6oQ_Jr^1Jn5SJxbgo`IwT@GyyUqui+XiJ zaBS{7AMUNV&Y`^_Z`k5>wiAKyoP{ z$jRaYgb9MDqYwpB7QZ&WlV+^ma4sc{5euQl>|*>}nMRmWPr+t3zW^RWK~53=IK+C< zs?($#!lC@Mr0F?oP{y(yeU`CKKpDF-5lo@~t~~_hRm5FAQ5k&+9*X zGe(3Ti#5eM%w!Xt2s)il&u=*+EI^4PO{xIyiu9snpo!VV`JdY>)^)ma-BprihJd_; zT8T5joJ+RjB?H+!x7+S^5N;%~4`Q+VN!n~Uy^XuoDs7&ggYH2M4i>U_mbb%$H~}!n zy?OcTkDSYTG2``iAY6CskRB&oos$I^76hDp^$^%pZ-n6ao9 zo1NRWjqbdn&3DIukVN_i9Io@}N_4;E{o0Ck2s?E_*=MnrAxtShJ}vMdfMZ(Ov>q6*R1YR(@dRS4_ z?F-0HP8SY;WyER#P@Ru?Mt8PBsR%>_W8OG< zCTwzdRM013RZt23vP3_%6zdqeAPl~(F3g44aC_hqdUY2W4~MvI#JT%QvT4f9iRMw9 z35dQ4^A3yg9q_1wmV97KbVyP9VHuRw5wY>zA+}klQY{bv=~wrXtQRu1_9U$Wq4dt# zoi21|5FD5M;NI6dyt^0T6d{fL3&SA2mws2A4Al5$?UEy;^1OX(6bJ|*G|s5lsqn

    PIZ9nW)o53A@nALw8@CInk?SJ@(&HCDcaKWJ)vJ$EM?180$-Nt zD$m~xE8m=HfMvBi6^0MozZ|#5B3>8mQi(*K1cofg~k#7aE8X{%5= zE`Dk{^VwQNv$bORKND=Y5PN4>^Hm_ltEUY%{gdq7W{O=lWjo1O{76r9A=Bu&8rN$6 z7oH%(A3#7K$aFVn{aB>D0llS|*KVC5kU?CLG3jT3Y&ZTyb~C9uTu1;!g{^JV%7@V&^%%TjX0AtRQ!2LIFGy`E_k_-yb6-`pa=b zW!?`s>DRA1M+K|#@Dcm!0HevyDD-oJII-{#E(5G@A<<$EvuQ*BSrt{kkO+5tYu?G|BO%yDJR1qtx{V%(BFXX zGZaTDa>qxU!w+n}t^LW4ee;$!fzXS8Pu?-`M%eTFZ--aOV4)Vrbq)$xa2!gW z{~35wnkYQNk;a{v6EkT~;cT7tEtPAq?kk;ZLd_wpW%Abad1?QIBQ zf(y#=XpMCOymJfF+OI>E>u(+VR?;|Yq49iCUfirSb-o7Bj7F7D4cS3^zrDvT^PSV- z``M9?g=t3ida5CK8vHTv-IskL{0tbC&5397;NmmdSVcb+(b*X9!vIy7R~_H}L($$8 zYJW`Y_~^cUjJdQQiMlwv`@1P5yySb_&>yPyb_f^k{ZuD_nN-;iT>|0g9+4`QrX2t4 zQ$vp)`I2Z&W3#r^_s))kh87<+%y2keQ*Y_z*2W$j5ayc#$(Ic`u?Y7acI?ZAu-@%r z507!CVr%!G{3Qu7>%PSLxuh)&z;0Z>F~P;@WbK}L>EHbbRxteGU_X+{M~T9N)_XaG z=%5ByMu#sjgG{R$S-o_4*RZj$++96owJRwBIY&~ywMP6`ki2;;&B=?ZZ5{2aR5$=j zi-Sv#%onGJeqJVhMN$_Fu#!QG#5Aq8MD5WEi1fR%!|UnX`BcAw2wx46<1g;_uYaDJ z`X?r)-V-cp+CUEtef6;3aK^?I@(%v4Z;kstDDr*7%=ZJO{R2`m0BNHI;Y-~nD8V+l z&E+e@@}9q{c?(@PpH88$Oo1clXn9K2t{fb5 z!^-fbQG#N^ZG3z2nj61~h!PDXRr8Oqd9W3K4Am8+F;j)G635%EbSl=0(`js|30Ug> z@Je>f&RHr1=`Fs&FxT0@a56CpikJJ__6*z;x=sF^^g2c(kZkkG*UD7U|E)@{#`n#T zKTWJ!HlyTE>8X~#69hR232Nfc3y&bfA1iyWf_4>|AdBu_HcL}&2A>ie7(;W>;&W7^ z=O!g`Lqbpw78NBX18t4_?U*g}K)cm_bOMs0vHA$AZNM#m@!?Fk?Bk?%}N>1_(Lb5I04tHKhgJA2K6aaaZ1zv#_LAs2hxxP9>G zbYB0p*$jR>aX_jpeyHA>bxlb8eOvr~YS83KR~XLbquwr6Ml>I?$VwL8OKsXdD5v#* z^&HjM_C3O*$7y|o+Xe}KjMI>oWBejll5a^IAga}g{W_W-)-3Ehuvm)<&ws&~2MDBZ zzJ<->*TG5Zw0J{fh-4Dnf7H7m0cT`|R|SG-7> z%PK}0q@+U5!Yf_K>+PP0L`-$x#_Ug-tpw;39U`m)8jv%5CvgW7ri7Fk%n0mYhMGh} zC*#gXC7ja9C|-Z`de=A88-KM5kX>V~0*|KsQ>#F9$XKg@K_w)ng)L|fs|_g{pDlB? zxu|B+lSMS^Ngq5fYj%YFD$bwU>nViLx>riIgU1>)l91VcsN9oXgCX6Jd+W5g&vjY) zDCa77f{8E2$x(@ZOlrtPm!6$&)Lo`A9BS&5&3d9l^mUW}T(K!2aWxXIqh7wu!0bUyPbk zRZW?!YWQ`xhTqTVydb+i{-Mj0zABeDLgjl*uS0}024$Pt;a})d9neuY?9CmB)N%rm z(`;+y6>PhS>PIT_nAQFR$B^|4QO@pyX_;&UMBgq;bd?BY(u7&C*0kkr z)A%Y_HQjM1)F=wfv`&pc0ep6@U)Y14;-}dze@DwYytI(9@e=d`X|Uh0h0`w&Ba0Y7q<-K=^UJx#^7mt1{k@CxE@ z9!0v5$1+97yLd5(zJWj7wE4CAt}F@ZAEc@^p7JY)2C^EK$mlNW4cc2$OYU%Cn`6L>O>v; z)JmL&mg9?bC@&+=?ScQ7$C-@21Ip3~SlE?6>aEAF=z}RJ+rAgY2qTZk!Z_m{3Vf(B zsViuZjqalwWXC3Q7N_TQvC#0ZsZBsQj$wxVH7kxe;;47srL?gt=zh&Ip}JqkW;FwW zTlPW#+RR3;lw;8L3gxjOSoS#W9Oo_mnqyZKbUzG>?zwK_H1}E76A}Dan%kP?Eb@x} E50IUtUjP6A literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/add_images.png b/docs/stable/_static/img/tensorboard/add_images.png new file mode 100644 index 0000000000000000000000000000000000000000..5fcbf36580b7e781ed61701dd7ff422c05360ece GIT binary patch literal 76694 zcmeFYWmsG7wl0ioDOTLu0&Rf=_uyKr_Tugy+ycR^I4y34wor<@OK_(Yid%6B?iSAM zTKnw1zq8(b*7xiC^>NKy2{ZF~<|AW{G4A^wiF~E5NJKzOfQE)fq^uKs+V zRPb}ODx&-Q~69_1(AviYb~Z@y=1ND3g-hD~x5=)d-4r z;Rh`TSqB-g`%IBdNmkc`EOe=YF!rR4yhgM&WdidfQkLT9g*KdTgrv}L+}f?%myg!p zc{ z#3&@w2%eV0ArI01-lb!!pVviWt6x~D9K9*kjElw_)BC{!{nNsOvtk)I<=R*)c14jK zb28c|*)ZXF@CrV2Y>1bt;UXt%m`6LyDq(hUo3mxK)3KM%6K#X2mwmaW4g4uWoA>s; z_sPxr-A)h0gsmUzm^W8iPS@#)RONk4#c|Gg`h<p6Y~v=Rdh+n%8vY=566s8uf8*J40!{^t|)BgmR^m* zrEJG8+~_s?g6$l+BYqTZzt|=0F5yKQfkRJ1pe=>2h?9z48Cc&^R!EF`Ga7=)7$3;V zj0K_{E?0bStAJ64|B3n*J%OZ8@mA(tmCP%=vmAx$og%`%cf*w5#a-IXB2J_p^@X-k zya4wrRhu8uO+?(2W)Zs%hsugvXfV*(<_XYtbAwf^rW2_Z9oQqn0pLEcT&(Bh1WsP9~!}I#cW`)<zvBqivp$xt06@tibWO2oLOYe%T zg>{=>5U?el;)1Y_Um>xPI^p<%v!N)@$aIcxb$fInkR?d-rtIlD24^%EbmmJmb5rXN;YXgJ(l(UCf) zpj6@>glPpoC;0~x!gAhXjG}JZa>`3dqwdCX2_!_li$<8xGmt}=Orztg;DHG1X*rJko&Z_u76cRitqf2mBT3{&b=)>49e@igbE#Gc8b zS66?c(x=i#v9_>gw#Ko>z9#)1)SbMd82J_Nnd>u-=X1}wJ`R3l9AN${&Q^oy6Wg1r z!l=Te37j389moy`7Z(=?mr+T-u4#$9Zj$az^@ts0!bfAgKx9m~+J+z70o#Fn4{J|w zk0MLG?lbQA_!jml#i{7^^J~m&F^8~oZHLlYDOj#YjmPJ6ioH$IM+|xlwIdI*OtT2G zFhx_`uic(EdNk4-Wbb1v*e-N73c2H+Qu{diRQP!N44!%H8vKBL*Z5gCPCQPvm9hmL zm;Aak+)h3D;9>CSWmrX)bJ&=f6o0wUGOan?N4i_{AC5nu-=M;a+%x5U<+F$1OsL4s zW_cQh^fz8^z&D6C!e3i{p8g#2IXT9b+TCJ8@AH@fLP6#;@8=eUsTc|Bs_4#W*Jv#| zA(|y2gj%L5TgH&!jJ-?G7icxY`hM(o%;NJ!T&vmpbkVe)j1hqa%UMfNV@pR$6E=Z4 zYmb5A(M%gg`)r5Sk=s=7GVIRBKV)BhGm-|sn`c>`>80x`=)T~r)2-Ee zs5_&_s>4+=Yv;a{Zo6leWz#a!_^WZLr^7OFSUkfgL#+0Z9+O_e9?qO=Ep!jL_XgQo zWmgHag_@7nfNRU@+BT&;@m%^mzq!|MD7l?F4>`}TP5%xN^%jlH{GJ68`#fSeV!Ej| zl4XWx=G&{(GZEXcE#qp^^r&&7Nt}L=o(|&CKb>DO+B*}iLRaTA-{72M05NbisB2nu zANS1Is_iQ2-uv40L2RjI_AVWF;mN#Nu3}Yh)X~Ul=&Dd$+j{?D=Gs-3RnaCp7z$3) z3f3yCi}7sqjJZ%_!psrQS(n)K!*6A2CG?BCY`r)+<~W=?xF~3g@{hfDye}X}vg5Ja z=lozWrf4Ueru{+DN1#f4%TmtK#~VTQj_&l?w5T>o^OH;bxrD6Y#ZEZm3hiS;kw?eL ztG#KXBBbv_6+)6jYs0t0SwsH{h2Yf@j1rU)AH*F-r_wn;P9&kBXb?SSn~EOXoOHfn z_tD6dJaT))^@8R5i=EfiZ>u>QEyzS|R=(7H*zcb1Q8G<8mF5)ka6sM(-_+mM8?lkI z_o|g<-wH2s!3}R4uoOCmLKg6$WU^#hY>R3xN^hRlutG-IitiUB9K8}&9P$j!m8?bvq=XjZ4zxp zl&o_sifiNR!`L`Zos0QSuY8NVw5heV4K5u$j**!1KJtlqwIb@S$-~}n=nZdI^?gD9 zj=SQ6-cg&7%+c&Ncl1hTGwEHK-B4CS7^OVtY)xvTu+>qSihfG5&hd4|N2u=0p_i0N zZvJ#f4t|9mg~g{Eo~R`Uo%di0u!GS?h5c&fs6|j^)OzVU#X3{UNs7_!;@-``LAa)Q zskPB|jmyc4l^6Zr>576i4|G4*<$I}J%qrW_O;t=LmB3oy8`p#ss~olVpB!J=Z+(w0 zwO_QZa{kp5_gCKc-Cr!fV0Nl@$-kNxo=&@2xE^kVaarmO)cIaY-*%Rikm}Ld?b&kF zlGaf;!kQu8Hw{dF@Zvd>%Fn*N1zCw%22CrTS*0hXMRN{~Fbjxo*|&oQJ_n6nja*3q zp?ZG%g~55fR$}B^{KM0$@WIY5og9{Z;7S3S2np@?k>E2t8O3$xpKQn z`^L998kPfbkM-NXyb3r`1YH^k`;KjzjscgBE?%z~!%zsa2!8hh0SCULP(j4;^O2 zRy~^*Z<-}9Sk^l~AZD19nK&dx;I>!vXIG1_7JtNlWM3_;#h|9Qwq!#Cdtyjwgu=of zqJ4Y&Yg0Uld8ew7An-oqmz8vq7kW7#b-)*SG`UX5fTgfKOFp8>r6`;F!7Jjpgj`bZ z{P3pqC>#y(5KmdzvLQ6)8ni=YL4N*v8|$rck5r@sn)+JNXv#~Ljw%GP!pjMdw^Y7d z_c*!raAmZ~Tmb-D;5#YlxT2v^vi^CYE5CVq2%wH^n|Io7+G?tzW>5!^i8<8N0_5f3 z1YC`VChjE)d~~pIGhy~}uy=G7^@2S9#}%T$=Rc>xkD32*iJKkdv9{VPW?86<1+xH% zAH@Awf`FNsS=`0kQuK|S!hbXe{)0TWc5`zQ1%o|3Jwcv)AgGHKm`6lJ1kB9~=H=xA zuHbU@c62lG;&OC-^3P8GcRz9#u4XPaPHr|(N9I5MnwUb}-5`%2|GCkB{{3^D7G5_0 zbtgyH|CkmqLGYh%z&s#s@PGCVG!_4IR`ivPmxaByoQ;EpqbqO^2_8XSVex-7_?NH# z>z4nmsm^~jYzMuK zoW?ug9plg50DtGe>)C(21D|uKC_dnOqoGNmDa%Q}^Fl{v;Wj^#pX}ak!F_K|LH-_t z;Kh59PXQuLcG{8#Q-3)Rmx_c7l!_FKY*uf9QmgE6RH=D_F@o}fz6IzU`nm5oXNcW3 zS~bFaEPX81uRdgNmtK~A-xs<#EwQz(MR3QX;s4v?DccVz1S^ji+f#Hh=6`c21_Tk} zV*L9)A1Ma}sU)DKWnw7>{F~!hz0-q#JH~ja1gvNEkMQIBmH%CTO280%|6fe|{~zN2 zePjMVUB!2-flG1MvR{0J433qGyol}FlCNYBx^=T8cdjmmEugA>B5i7A^24%X#aXVU zkBmy`P71GX#t(dS$||g>_>A5LdtXHzG`q5w&Glwhz>N-?P>1tt+m_?M!Mkra0{%+o z(SkO*j5Np?S>7+&Yc`*W@WFfH3?~*1W-|`_GaT|!mqnhAhlzw~n;tr&e)oqN2g5Jx zAO{AWxD)p=g?ug}S*?aft^)xQBAds4BA02N!!=NUTI%)n1)TI6s$E(yHTC>%^Ilvd zK4SQBlDVDe@KnzYCsV9pm8bRGh_831$lccpvR@40j#Z6UC&vq}BS|-dtYvXRz0HTx z>J>$9yY`FoW=;ZMni9=B!BJreuP^mJwrr(@}^H7Ub_<%SP-ivY{~>yq`)6~ zy(9rewzW+uSYBa^J^HJM>43?BF^gN^4I^&Z(fkaxu;HdDU+?7& z%LMEV6^@&UW0Bn?=Z$JAy$yr5P_cB+^Z9b)#TY6{eQxzuyhr-SW4{Id2Tu?8>ZvArSyD6zsknH z3VP3Isb0m?Bzc}sBXLf6Fsl*n;2@u?;95vg7+Y(a6f%Vpoi1**wmMk=w}~7n%+}2C zE)0?T>uTjuh?l~9@%T1&(*lnOXs*Z2^LCFW?uoeRF0_Tq)uq|*aJ(Zwge_x`7CV@I zPFp&cP9%9+&z!>pb2Qe}t$(YYtflvR_Z3ILm24|q>fSS}twd6J`C@60{t{IulJ6Rj z8j7XsBmzAo*qE+K79hc0%8(e0X`5TR9~!)n^l-e-$y$~Eog&iKH13=_TzwHz?sIh` z`U%E$P#lL>r%EMjznA2h@e+jnExDg@X(F~JG{4-pHQD9E;FkMY-(7?#70<@7xbd$y?B)->8g}`8My*{EEziaf)#{PUS+UFhT zm9U4~+)cEaC;60AA7#*Mej;ICXd|i{y3;~*$sp0jCUKyO^TRc;9-Yq)D z;yP6QkQ(6?E8=; z+KL;7n6;f^X-03i&GPu=?J0773)saj^E171)*VTmfzNc$Q+GlXFtDju5KsQcNdJgA zUd}0yN4J^FamiM|>cl};=+TS^i`-CHiQeOd%in$6l*jIe8kf<5U{i5}7 z^Gr3^FGCMDh@62wcO6VFYP3duO*^bbDNHgG1=of^OQ@pYpAG3U4z5`{LX0j2wSH!Wz4ycqxC*&vMV(a*a0A*vL->u&D609e1;A zs^-b`I6xC)#Z7Wr<6PzJN*^yA139ADZEm7WcbGA=QA&TO5G@A2s zT3gSmUe4RZIZm_g=I4nvQ|0s@bS-7@aa;~L#cU=z_gUd`O`*{1=v1pgZNd7@C zEj6;eIyX6vBSROIjk)ssv9nS6iHjMBy)p?p#?o(-ikWgPjPA$!z1n_9U`*FkX{_s$ zCns2MyT*T;a>O&XDu6v-C^-eD@^ukBCSiD*$!Cr2+Dze@hXt_;!lGJW>O1Hl(&Sg1 zVcqk9ovNMW#06c+((l<~Z`5-J?PO>;1-A1uk5YHH*iAD{a>uwr43{C*-G_%LbQQcG z46}kX5epEPM=hN+{$mW11IYhTc#FAZD`)Zt|%E$$pnn~-?1}O7N+Q%r9((yb zlxTKJ?@1f-9xz^K8r)mV-zG6)ie4%-dyd^7<_IF#buXoog&Zs1U(LOQV7P#NUwmIq z6#Nj^dWSe|I&1hrkOzfU?#&iz?2~D7KfI{AnT-$@8uOt(eP>9mjFnLriG}xc zy3K6hqHQgLQc;^_*+`Qlf~ePr&2jE)PFJKPmE@I#+ii!7;|>A(c^&SEPyFHRFQfliVzig?_?LX)FG*fHZB% zE`LYa)h#A@IYj>Ebx(>WUKg+bA@}Tg>)3|!E)ffQ-m~PNKUIF2T=8(qhAmT($1mwl z5)$R*Rbw7?5ppRxU6oNlPO~95GdGAD*AzUtS()7U>a`* zOje=aT}zDJM>v==-WuA|gbNe1(5bxd&ukZojuERfWMFFelKT18hJWil3g&u$caagx zAeQb>w=h}lKokO4SyV0h>^4}^`)uWI91nua z&e1(a1MJqqBiJ5K25@v+K7baSFP8LjJQ5CV6XS{7tT>6Rq%_@dEtnkf?V}d z^X-O^K0@?CNrT4MgO*4RU4(K7NwisnFB#yn@t7a4Cr-+4q-Eg>gK@m{rSRgJRrH9O zVISEanHtVpU=x37I&0~-Wa4SfkJR<57N|qachVs8ym=uP^Q^1cP^pIY`MvFhFKCh}1 zw>a+KU#e)qkdi@PbIc2$km|dQ!^<;ZaXCky^;U3lzn;NS~B8q%c;;G z6fl%}4Gq2=(0E*LJX7}d7nz<{FawUAL$R8Q(@~oaYV<5ch?_GU+uoNbf5j%)OJi<$ zqrxp6s|EZ%v(p*y;{5(Qe9(UW21RUDeFw>kDGsi1TKmf?mImS zXZ(}Csx4uQi3ZBjP0An2%qsS12+TpaX@Lylg>iMb=Lbq6;F0mk>>x+WR`=g(Nj+&# z9i=UF|EE>y2c%YB*4(?!FyGrua_k!H`qfOfDb~AM4?1z^QtHkU!M^(n?ppEf6&THv z-drjLNLA%ic+MJLf6H`PWUcInM4xDg%wBsTvg1EZC;p&UN~zS|6RJtKBrAl1)*Tka5R4Mq9kT1HxV9RVw|bpx8gC zDZ5VP!Ol8*4fD6vpnF`H!0$az!>$%?7l6?jHT6j99Ok zGUTJucM1%?n`%S-vTwUPFAOXQSFwGTM*26=v5O5br3UY)ztryKMMXvlAK_shfA-hy z-Kz^aFg0JCqQzO~@+5ub#IQ0@=%JjUvNv~YBHcR$9qat$^VQ&?eH6^_DDizbQ5Pfi zIIEn_8&$fcis*^lq6?S)i%RI$AeohaZAOsU0UmYu+?>$XhY-7Bs$mw_7VCIM@5P2Q z+eO9)C~=bq-C>J9;K(8&gBhvYvD`;ND*9|W(`wxo_jCqAzLbb=e?$KZ@DVnqxH$9m z;&E}^Q?lk*&5S1EkxHTAxoL)*0DEXlqK}*Hrnrq;t4U5p_>VB#84v?8BNf#HL!Mq^ zgKrMHPNe82icQs7NP&}Y6ig=ilK!XcrO%?5)Q}rUpGbitCuuBBa*u&;6^oVTjqqcd zqb5CKUM@;g;v|`jemeyAO#!mXW$>q)b`QZ#PlOQGz*XHsCR|HQJ+5}pv0r~-`n{iq z&X`6xYw1nSltyo9O|i%MF<2`G;^Nvh(rLX+?2IqpF z;U}5?a=&xw!o5u$Me=FoR8W+E+n0LIat(EDJaYYZbc{zGX__A)%8`}uz+INzvy+xA z4*%DS8mJq?S65ndH)Cm<5o0Q`SF5j`j>w;{UT8>Mb?N(Te|1#miQpmxi)&bb%>-;) zzr7>?Y4bYvGI+X0kBV&4yRJmC=l5g;{a;OkG>)uAE^%>6{=Dr+7>w%^z2nkEip7Ul zCV2eZeXW))mR)GyR?TuEejQjJJ(Tiob_d-k+t;cJorD;m@DOdc)aMO9pxt)Pru72L zV2hYj%&=G&o6onwEt=(ko>vItKX>Sq~JIboJtR-{q+nf>c-9j7F z=pR%&r4xL}1NQy&dHpWhVzS&BHgUKyk+#u1#snKo+IAyR%bvC|*whI;(*9!^(h}V7 z?4TSG!Ww!9=rnV0A5IVL(L7-UD?I`Bf|yFblh?uk`9`zyAfc-_(Qj5|BWVFg1k9jM zOsMM=NR1XKhtXqTR8yRT>!*Ai^LM4^0NV(_wLPT(Y&7?XS$O`MPY zfB=#-*kbkzle6J*Zg|wIm5L3#S5AcuAdBF?AxA&0{K;E3^@!2zul;`8<)o4;_)dQ( zGQC{5Uetn^$YDLAZmh|@N{qP{DNDr37(~}JuXir(3t-J=oQ89Op;MG6iWOQW zy;U*UzLUBQ8%QY@lKL#ryz|vdpUoSk{J37+Jms%BgpZg~KsgrllW#V!F+nX^JMGXo zUMLooy{#bu+hL~H;5h{2+;@m`2sbd(_S?H09`Q^KuraTuCY z*;#Dj`k@n1&s9+|`^%32kz!nyr>-Tw_@W`&u1)w^P}I`g<{b-N8una`K+ec*|EXp+ zB-N}4JUcT~ko>4=>AjbKH7rJn+Tubok+mDJl8OTj2?X}GAj5g3_lVCbv(y(ft^18* z5~Uv+cx^fw#qz~y%&l0`*L zL#)czm@!uH2|XWoNa{Krfr*mA-Vz8_5MhH5b`lj^z5RW=-6D#&QZng_zuC@%(JGQn ziR(E9%SMWqlWlg{PU2^uh8N%>^{6{&)i9mwCPPSm4dt#WW!2zaVVtVrD{4Fao86Lo(v>+lCCiMnkWJ2}qRJ8#@wED5P_R)pv{)cjJtIWI|? zIa=+_5L)sb|ElNMu&z3q4Ka{79)-5uS>~%{Y_yI@-rwdjvQH!78$-!-SrgL75w5pK zy-dTNOMaV)oRxi#^Bv}D4UfTXw?A)DHwXC!O&*rHp}P9LY-;J|i{6_I;}a9+lZLRp z_G`bpvnk8dtuf2kzqV2gJamFRc6?hfe3@RQ0G&;#rg9@V8`cu@Jr5T1VHf+Y+6W^c zWa;%%s9s+jKz+`3ENJ&;J%sPaH6$CZLABG(8Of3<+v5cp7mKh_-=l7-(s}sZbp!mY zSLJ#ET+crzKrMN9`nH$suDOTSF4b|NQGP(;1$#C2y7=v3=SBeLLnm)07_wp`;z_Ji z4wZr1>+yWGK_X46Xr2gosvrwiEW{tDLmGEcRuI%@N|2;H^$Xu zc{F(GZ^eK_Ql-cBeHlmaC)~E5E?!L^FKP|F&4KT|U<-;`#6+huZ$szIt@&!erW)O1 zn(7g`C%M^jU2)a>EpDyIv&%(}Tj@!Aef03>?o_X}^p;_Hx@tTMZJEtiHD9QH-q|{6 z`rMO%-`ggO|Ud^TShS$8Vdh0&6k?s)STRPD6YG_nXj`2m+v zig1_#cdah4+a1Cn zxQ^-^c1#EwMT)okDAe?m-UlU9x~7HPz9m4g;uF3*=!CX9%aDH)D0f=e>#Q z{U`{xpe4^-{PDszIHqPWI;K;R%ADR#x_YUDVOR|2`0TYz96(*u`*00};@HtEz|2Bl ze^yA74#sKV2dpZA{OLoHwRlZ$Wb?_$My%LAH-Nj$uA{)M)8w{Kk0>Bhof-w2`$$=4sz~fvAdfk>dxC4PAoKK2rFqg zXuX;L`l&R`$$j3rH_fiJL4n;fXUy=6krM)2&mamW(}h!Bznz~kq%O8M@qWEM5Odfc zb;86hh`r7fK0YdWZ%?Uh9Qet;*%jP;7(^rPRer(1RN98}aUFPGQ2m|G3$<%>&kH+f z@f`6wB?QgcSH;lqnkyBZQg2dYlKiCpn>ZDK^^@gQ^l}jGme9%(Lyv@gZ4BV}FNqMC zO<-%mnj4+=PY<3nVci>&qFM+^G@#|6ZBQ*vkJ+y`@3>_EO=Ru7AYM4Uba7n2aqQ)> zz~Q%;8WKpk|I5t71c7I}5Yc$bo|iL2x|=muKcj@>a+@U6O;O*Iab)N@&&Kl$Yvx^| zDLyb`bQ6VQrTAV^bUuy1T`axNp;(h+mYu28H)!>-9y9(XfHJ$P7M2FjU=au1-GI$v zW`+UTqnGwjI)1%6Z;g%(81POX;ow(k5`1#~+LnXAIDD&{mEta`i?}m8t7LFzsJg+% z9eh}>isy0Cv@H)(EyL@lZ*Zi~A2=Irpj_`Zis~VT%;pc9cjYyt5Ee*>Ncfm)G(arn;WvS`Trppd{%JA~!8s`EH0K}~emvCUyen;R94gr7L!lpZBadx;XhbNR9QB>jmVgYXOMSFaOsTwb`QVV+muTy7Z#4YGhMF9 z!^A3tT+F}Cu|1>dfky}f51rSDfY)pL#_fC@L)q6sMoxiRKXXv0?CjK`UyW=Fzdpbc z#CzF|MXFD*j-IgBu$j%rQJT7IOPfO8>=Ptj>~;m1vTrON4hvp~R%`jqev=sK>lJH+ z{75LnZ_={?jI8k0U#;#u?SC z)&G^re=G|%JCoTyePbKCva_2+rvECuJZ9u895zQZUp80VMozt^|KoKO4$$gEu{D4TYa!ni6A(#|u^r#@INkuB=43EW2YS$aO6FE>gP~##T;||LIxs zRj~{x;`b!`TjhcE;Xq{OEYyjh}RMBSvhmyek$~DD4(JMS^jTt9!HbL>v$jx~=fIr*{d04kFJbToGH|DwNl&0~e8#Rr%p(#f7>6 zt$RhTE{rH{LtXFh`FlM#c6ITuUk)P7#Kli;462|reDXMb-eb-o`|y)|{F6BGpWZ!N z&G8Rl@MspQxv)ONtm%+2&(%Ylyl z{G4STLp;606%zkFa+ivJ#&#)>vxa}bz}avPM2xi0v7!Sf1PE_?H!#a{c07G+KJmDR zVo2^3hGcTF%jMt&s|cl&;i(mzi?t6L>0EX5Z{EA<*G`)f))mhJ4Enx!%9paS@GX@P zW(2lHE)(f~!zSY<>ssryKcnC2YfLB=&fgmF3O=AffXWBm7#tv+B&UndxOY16o=G44 zeyk$@n>M*b^!n<;b@5vmI_Zr@=JiUmMW^|5hJKBMy8u@{;+YptvL47BbTE~@W*Je} zp>9CI%SUD*$-lln7Sk<%GZNG{G)MKn|NVdbMKxCe2jU5Rign#tyyU3Axkg$vv;Py9pS z2Cq)IC9B(ESavU7j$A)-8Gv zCv}8z&@Rc_(VA?h)Ht&8wMjtn2OABCdq5K!`k7YBFFRsYB8Ila@N_)qEY?YuODf0wjgBiPVP(rtd-_O|>o|wQTuCxf zt+pDQGe0MNa<&5QBAk|jrxNBbm_zogd-=}?c0~OLe@0Dv;;QhQ;`8A5T_6KF)*{5U zvmLc|bv~zvbR9cE_j$AvLm4?Lzoag8V#EG2w&7xUD0%C|PTkjVG9org?|*xA{ELA_ z!8nZ#i&NR7n?kF8l*#WjLB}MNVt(_JMR%;kwY5e_APL&4-%paioEok+GKi9S#(yuu z4%}7D`bDUbCYe*{gE0X6?y>(!+~kKKzILffvW(cpyla*<>y*<(2br)hkRdIu6H8pS z8|P5qPy5UJ_j=>W739Dh<(yZ7eG$B7hmO+isOKaPPbK)Y6YahxX&yIQN!%XFR^1*C zsvJ)%0^D?bJ+gAp<+vRVnuaOJ5kd0b{|=F1UPRWf#+HiX1+r&*nX1oR{%mlyDzk6f zBrM(KA$H#RnxgCY433bG;!z5BI-jx1_@E&%<0o-4#Md&qk9E)Tclx)IDl?$n)CY*; zADxRnsNkMZnDLi|1%%MZxI25@+*j#H(%2cfsj&CYU5lz`j>0P*b8aIg(u&%`EJR(y2N3IBjsrk4E#}D>cO?A(fI4 zVa|5uC6tGiQ<36QO1ovWylbiSR zcu5+2SA1c4zq~^A@(bs^1dBF-T_=&y$HD!}PGZSaKzs4oET>OL67f0C3)c-%gG?Kq z`-|NU7Wn}^F2Y}sSgVU}KTQFP+XkH*BjmkSRkLPSk_#aE%(Z=sAZZNDpNn3rF^+}& zz97wMK-w>wpQf?SYkM#2<0b<9nolF%b zfl5xn8hq+7Zuj~CCL;05={fGYxy4fo{lui6cYr!{h4)V1319b^kRnVm1lbu#^s^$c zNwPfyJsTTu)(&l{41fX9ZFU13aVAZ@l5PruK|z!4VpEw%2<+@9hs^i{vwSjyw4$Yy zaplT>;SWzHF(;0gmsQhvyZ@kH99TSa`&&Gf4=kz~g2QgRlf!ERwE3X7Z;2&8X{L92sg1!9?7mFVX{qT0X@5t8hujjbT0d)9O2A( z9TtSV(I`77q6p8FbT)`}FFifo1uKR7$hji~e{m6VSRH8f2=i~(<_N12RBtT8hz!of zOs=81o04XQ1skP&mAp4kc|mvm`;p&^!xIb-qtFFSZQes#jpQ&itOjCa<-5=h<|bs; zqScpIzJ~1RWDy$7RY6gPl=hoho`Uyx2mX3B>M;LHY%0zi){L%Qi|*57RnGj*EgLunjpi#E(J{D8a+{eyZx_N&u{+TV;Fq z$oCquw!khPs?t8KA4=u@$MjV%9|MV#&rOyTb^A{Z?OJ?vSe-F(im5Csb_$W; zbbB;4Plo_HT~Vi9HxrOT!j|S{nDJ$M`<=TOcFowK5Om-#k-aG(KI;z*J9U>%Qdhxc;+L_L~?6 zk^gOm&9qlR-fWf)0-I}m5TdM7fqW$B1Dz_(%6#ct{AD>$o_Me2am*Evi4NHol~@9P z!O!z$Z=lc+OE~??Nbne7M$h`2SIgQ^wvFz)mX^cm>mxQ+`6acp1AAIQgwsa2H%Kh( z2*YLlV&*47gdtS`X$X@^2c3Khe#rZW9}sg#U2SHp#)^&n?+HcI3Jg4;mN`X@-7Ak%~2Jrx35_#(0 z(MjM&zBP`iw19pBu;Hkb{+KkZ@{jVS-i2`Y%MOZS>lyt&>jeHxjSXYGjlXRP&X_{U zLGDQTkF^j`L^B=CW~Z(?T#*;zIte0-{;dLP_;O;MFe!0&I&S$vO)#NMo`K_eeWu$_ zy(`^kH+@h{TP3oyo0+M~~U(4Pr@@`teM& zk+2G#DiFG-d3N{E@kg&)zko5Dg;OPAMPoA|> z?Neb4&jKtx>=zDIOSHDSYze7^M>))ae@=D&-UFP_e#!*D%%$oDX;Dyb-W;@DtURFR zX6AmVu4{sbvTET;-+QB%>%3}M{x~${0?0a$taUPRo~=f0@I~)?+zDLHVwNTU{+rlZ zd~mf+&_Z)-19Yi%Q6TbjT%T6!5-{n+35GTxceQN7O7=8Z31x64n*)Df*KlMMb}~ks z{s}~qHpqiF>gMt43b*tt9I?_A8(KQ~`V2 z6o;#f2wa)AxHk(=HYuu~{N0lLm(9GA#Uk^r->SO?Sh{I?Frl5e=WoC<3Hn{1I_CiQ z*D{MacP$$dGxm>v$RJuzK6waLh@}@f$h*dZ66!0qTOMh{Rle?pl{xg2PC}!+|n3Pd2Mc*Z|0m#sYt#bS}3>0C@@*#evf^+tKo{Wrw+B+QT z?b*+=#5@I8eg{=J?GzR?#%za1t_hj*$E3MwjVou&6ZM3idpzBVrIRZ^!og+CRcI`&0Wz9D&0Nd`w< zS=FUApY?D9w5*!1w?^72{h&fcu(9v8`~i zI8>@QbT1mM}BEP3Fyd zU;Yjk(?}IgYX%AtEVJAvBR_;V|E)6eU$_6X^XKjj02Z4k(_n^;h-|Lemx8J{htgJ? zDc4{h&2G;3Fy-Grv#BD%m3frBNh@fdIs;UBrO!!m?iYkv`Cv+CJbgaSyRIRJH9 zAn85z!T0iP%7{o@bp^|7`VzZRc$+5W->kv+Kq)rt6aYfb?kOL4k!yI}#I-v~+WdSv zUSl_9yR3UGDsvkUaNPGK&db82uqH`Ux8EBx28i`yP$65PLk!b@_j?txXGDRj1fWCn zt6oV|!uSfXq!SKkIj)r>ZS7T?(R?G^KoV;CEb<@eSX(+al|DEBL-4=-Im#nN<_|%H z=d=i%J-lEXBHn^*pYMSeII`S<|A5E%JhzgTL)7D+6^S>uD*=a<0W6H9FzIOGzve#w z&of^XfR0scOUxAh%jf)SZQDQ3yu!hFq(6X`CjW1TJ0XZp0IFGNvuF65#O6O*OJUX8;NF z!FdrQP#8)B_-4rJx82${fUB^)KHat)lcy56yFM$Tjsh6uv?(M1g`~C-(cRSZy*aI7 zP|^!()4>!YxDBKKt#j4&PEmm&5dlcz%aZR&jQ47c(9j&T%>jOUq&VK@?_V`*Ta=a! zfnA#(bNtJoq-x094g zAB9s>$0AAT%zLk9Y6vI<+g|}SAuBBb9~b+vls@54nWEJeX(xHK`sMIKfksYW_dOuF zZj3213Ak2mJY-UgniMK>2xzvGoCa5@mkb<{%{djU_C?T_;21B5862QqY2M!rWsF0Vk$ z;AxSbSccujQX@bv9{MC5JVd6#Nt%uXhAa44A;u+|8|-tsWumEVg3SkHe&auWkfehK zu%mZr_$>cqCeL2FfgjEu0po5u$gr=B+@2`>;K{XwJ9P91++F3RVpD3j+gEf`dao`& zz9wF^YQ5Uv1LB1{;!>KFyMQN*I~jg=rZwK=3AOSi;=8`uZTqFEmj+~blFxQCU8(21 zqVzg|*kLwX>tLr4O>JU)^PU`V@7wR96O}h5uNf|C{#3=d?alsL$Prp_dpllg(DcLd zDOb_7_kP)r1Vu(0P|N8XEA!q=8NKw3j5|P6?Y+Zy<#zld2{!;K?oa?a=7SB@2HBs- zh|RAlhAtO5k>u=+FsA*Q^0AHG6PG%$qjzdB5T8?FIJXyV_c0mbHJ4tf5&(;WB&XX> z?i^kZ-C9BPvjqY%w`8W(W_IU-2~NIJmn-nI zXNX7KCvmA0wpWv)CB%UypqI^`9O{D5ey zfExA#tlr1_4@H(x1jSC@0D_T1StPLEN0IdiFL~QlKje6P;6411 zbC4nNBUHYy+3XaMa|!LkD8O06G#6LJINlGn$6E6Ksy0m)Dkxq~tOIq=gTo#&>?Dy3 zYMoFTc0^rf47Exv?^t2?PpAZa*G)G5^_aNQuPTqzqUZSU?Qfod8;O?G~pRDkOMNH7YU8y}Y{=LSg~T#Ge?bN^`G-rc)LZ7(Sgi8-2fQ3K4MG z!S1Z`JDKBa|2zT!U|qzy4mg1x@icZh?}q*^X6X_r0MqB|;xwT&*9JV)YRm4q+R4V} zIEhA@pujJPqusK$k-{5buXm0w+9uL8oUVXTx_AL08#Ii|==HzYdh38F+pTYuloEyp z>27H$fuU0=k#3|>hElq_d+0`5T1OD+MoJnVx>4z_bFugPz27-|pMPNHp1H64inV^L z*OsAYwk4%#oPmK{s>OYQaqA>eeN^D6U}%<->;sD-`fYe68br`bdqs9mMNp8y8bTZ6 zOnnoCbH$)CcFEvGU}T0C4*PgNBf~ba<8a{vv<5kLenGW~%Xra>jD;`jS%+=evZj7l zJEfu&lMBOU;(ZFxMwA9<CW$hUXYlU*Hx zPw>U(DOvyqV%a2cE7VVPb>IKKB`Y~I=%B#_=gRG)q;Ao&7T{cl=#%JEh7oPke13#( z|1Z^Jbjo$=eXKxYtYG)sW4g8YA6)3-BBx#G!N~rT%hsIB4dPPu-AV5M=_L-VBDw$y zcI#3pZ{yX{>=#uw(W3Mlyhc?_lBwe2w(#2^oX4@Doex3&CftWJbpc@`&(k2ymne(; z{{Wu-=h@BO=OLtllJ=q{58BtTKmx-nS0GQEK0C~MUoLDkWIhv?ps^mw-YE$Vq&h>z zdWgM$__6;KCAc{yyoelUytg*sdy=4mQE&m(8u3g5&*C~aF^$JEGEWU~aEtcMVlyUB zVklB~OT`?o^~KM?Gl9p~k!LjPT{HWZ!5CRF{LJ>z1(hOf{8;fQnh`O}oN6R-R0pYO zlPv8KD-Zr|vfV(6H6pSWlYJLpZn7KkjZf130Eaum5+43j)4D%7HLhHZ*R3IwP%FekXKFU6Oj?RF! z8~+te{fiIss=z9#fI`<1$gkY^^`&z)b=N2RsHqG~{kd=3?M`W4=J*Uh3*F`}-PWI@ zxBqjNglN1Ywg6S#q*`! z`@pRN;vyO^nMVpIIg_a9L?^(labhv+{^#33i~|A-_`Ls91JLFP1V-b`b&k!B6YR zGA;0leX75%g8KkpM~i_moJdbac^FJlD9WGFL}<*#t$Pd}}Ff8@;l)KaXsG=`aLg`AkTZzJ>)WXZ6xMis|0AegMe z-H3j>3T;a!zsmS(+O9p?!WN`DvBmGWs+YvuFT``^;z??&UMQm)h2EsPFusP_DD62Q z8j*ybR1ybb*zcFvOG!eE;T|qAVv?s?iC0$m1FSoIb<06tr1J;&!f_u7Pxf!+dXH2> zrl*so_o1s#aj20-_rbO>(=gp(eEBuGqJm{*NYk#>OM8EzxJ6NX47skFaw_gan~+{) z>S!walqDtsP5v*Uc@rLBqD8US6OLYGjJJm8cfE@gPez{DQ(NjEZg1(Az0@V#xN{pY$yWX0Pv1gU@< z)gflbUAeS5(1T5U)!!!}k^Y}v0HPZyn;=v?kxC5$blZi|Mm~*C9zFFBpq3)>m1lFL7-2uwTaHdr?Vs2N$S^BZ8v2ltcV}x}V{2r}sq!i(&dFjmqxy&cPB1_es}1qff{R} zR?KcD`0FW&-Jkl3+2%13Ja0-!YvkG~LL7@%{UDE@z~J2`#nuGw`B55n$SC)utU%|p z7}{+A?Sk;kI&rv9pXjgXI&~dADep!_x(Ty83{TO}RN55bvlo_4*MP!&_P0$7?~M9*p+eIJ^Jndcy*9|f<-9}9>0f5)^Nq4ms}+XR zmd#8Dm$bK&qddP}HO}Dg#@V#x8kP_n(KX`mJY@)G9VvQ;FM6^hA_9pQ(&xZO$-I$Z z^pvg*KUUO;5RhbU{iR!UCqch+MBTL?;i+YQkbgKamR}_P=CDs|%ssoypq%g^KLNu@ z!{-I@ZC~`a0(@fK5pyZaoMD-cnw#sjX!|&yMArWf(@32X2Cy--QJObmc`*uBx$~;o z%0{g-Dw0cgYv7-r!}RnS5bYY@eyTnHD%M3u&P2X^UO$QRNRY4b3Z2ZuNXa-jjyIs+ z$*J-6D%xA^DtJ5gyrao{32v`B3Vf1m$QvURcvu&v(8GkjVR0%PG(KkH6<0AlHT*Pz zac7lV9QWWEguk08R(5Hgs!LcdT&l;lWP01{_o(O5?=6Nuxcs2x05YyRVFYr_Oo&^3 zQkRZqCqVFc2+hqr~f!_ zc5nBUT_|KyE3i!9)nJSL{%H#$9v`BdUiuRRjJA3LSBzG5Rbb<%5XuGVW7GYfAhHto zu^}1z1&fLLiOemnKL6W2)41o|(QF`cNw_t~Xcd*PUK1V@>7$5=eTPI@DQ*)qVd2R_ z-8nepd}2dDprP>ny1aN?b?9;rmGOHB({H9WpFgK9ctDp{zrN3qQWEO$^)GbQi8=#*=Wr3{9d_?jfLMDq|Fn~@*VKSM zaT!Hd(vFK}0)e$!jgAB#2rPQ%AxUQ6oqWobhZPpt*mQ=aV_eks{8-g_BG~_{(7?DAgLb{>PzJoBvAq2plAAchb1EppLoV z)iRq3m61N9>~NCxU}z7BcJmn0_7naKLN=mv>(>(5MC0f3F}S>Ed$DbGA0nXJ63>@& z=$vBgfo!YW5Pq)X5>P_9zk!F*4wHG^Q)q`xdGXwHuyG*mh-ijVR8 ziuZ4Zj@E~eoG4VOoA`)@k{*1Jor)0-f&yQFk>0WkpsuUd9T+jHrJ7m$Y7U*%Df?a zM@jl+yinoYb~wlq=$ykcKPRGO_+-wBgrz*B;*hHuBpzKOg2 zamMR?MUE6Sy+PK+;n%gBuzy|~w{;I>+T&4PsWoA~U5Yn1aAp&;N$CJs0Kx#);a5@i z{O+7G-9Dtx6F$pdu7&%OjekrySJ(4;iZF|$ zX^qJ-ZJ%6J@Mtlj3^<Zry)*q5-ma0?XY;!wNMraWJkap0RqDyzJ$>(P zLSt3q=GRI2+^2r=5Ww<4)l8D~?nHyEkh_`w9OeAY$s|3h44Xz?EndfhsA8FK47uME zbXF`5DIImY;+kMynnWca{5qJ~pASGx@;vHfFj{>|zlRS>P>ph7vWuDB^*6l0;a1Ka z>et=H5W2fEpJM;&_w%Ljjy+=Z>Z=I|8}oEU4cd(gHFo$rO^+gb94jmFOEOpcId|?$ zusg5P5Mgm&ZN`Pi>Kq>^(rL5b#78nntLr^xp?+8AL^vQd#;ri8SVPJAY4IqbE0~kb zc80y$1HC5|Lvk0i^NGM8{m($F)E|O&!+gC#*$~1@<0!_MdTED=ts=U;scZkzO@Jai zPy!RTG3b2L7f){*1OK; zG0k}T-|YCz?HW8`GLxtZ2gTCAc>q30Y@SS5&5x1xv4z3I_`=(D(XeMfeQMt2h!_48 z`~~f)RK5|GMYIf54tJt--s7p=N(V9vILs8Ku^=3|$FmD^-G`JXE5_kRW2Ux6CKY9R z9r8$vJ@_Yq(Ao3BO1^e(Et7V%VO@q z+u1zH7YF!px~9HOgI0@*8Kv{lD!GaXjP0)s6ip)8c}{eq$Kc`y+BRNZucZlKiPkMiMxBBlL_De>U3-T7;}}cNcer< z!goS5H>o)`UBBe$4j#~Y0FD*YcJb$Fm|OYtI8d|oq`nHWxR^@EscDAwS4NsrdPBGs z8tXw6kw`kzq^U%i@BA{xy9bm+ZPawbf%clB2+whD z+kp8k89zF)SbR*YN;eR;WXn1qbQwlgbt?H})?ZoXU%E#bhB!wE&ZbccNSKSf{*6T> zUhF5`>ouu+<{<-BNw8tO>U8?j^C3e(LdhEvf3L@8H0Cu*PcR|HNyD2p(>IC zBAP-=yll+q+5VctbWGUZ*D^uyNb%D;>z3z#cl9chK;cd6kNJWa1YBLGpNwUaZ@zx$CW~Wqq8}aD* zy^FBeCm#i>l^BnX86ZhC9=5)piqgg6fRr z98MkB;SBYXraHU8w%EvyD53iuW6mO+qo0lIvv>qQVNzP^w78-KuTzNfn2Va0u5S#m zbv@5QuxPZ}==hLNJ9Fg;F_u=C>6ePA5+Bi5-K13>-t|npy7F81e!h~QlXT3z{ zn>fcMFsf}%D((<@jBOsInzU8_fDE3>C~IPDlbOp&Za(%xoeir&(c0L;gyL?_t_k5# zjvd{CquW6CL6SQ&pXuho&1=&|5q2g-JC35+gGWm1pZ20(vrLjSnPUAR?d~@neg7Rh z(r8693IU)CzeK8 zWGTXes>4Z@&jln%6~6W!q(>~r`tgZ$&~ZPaYBQJj>$C5WkGZpz9c%t;9)v@|k^78y zH>zN!?UxoOovZrthbGuDWfwmBBA1J?PMHXZB(%D^RF4rXVuqs3SHnvmV*86stK&$y z(kApCLk7F4qYE;#;ZL-WbvjLmgSkQ^@1qzlCuBrYoMW6(1L;))66pV^tZEYJgvI30 zb;!pPvA2u?;}eAd{gLG3k`rMYPu}X5 zE_7xs4I96uaK8(VtqNm2H-+-N6(nW!|F^-Y_!Um$(ogTgv9G!LVfL1Or-AjwK>{BY z6H9)fmOsHUOf8M{ab^pGhPZPO5*2NQS7Nf=S6d5;7+XC2bdsi_y-D>C5*!_i+w0E4 zzbv0_)Is9o#$3SBsyO_3YG1Hv*mmq#W;BOxzb?Od&-F?kGDoIr3Lo_lmD(*ocZ6IU zDo;dOKJq|E_x!lS+yiv`0ZN)rTBE!?;*pf6=}NFfs9LbL^`A#2OGbea^r?z{yS$FG zy!-PY3#^Ri>*z%hx(UbCTN2Q>tCVd{tf0&jf!D-q+P<9Byn^{f?ny#=*_FX0Zp~3@ zpTmELDN-5fu0xl<Kl1q}_l$;NS73g_)#LCGoE))7za;9=T8}SQ*nu4rR3xvMy;p{xgd< z!}G#+?Z*&L+wc0o4&2_-+Iy=3;u}B3Y@b|IEuQtrl8$2P0}T%1#yYiq68fF)O1fcQ zro-;COAG)*lL5@dbYuCgwQFWC6tVQQZ|NXPIxTeKWCFUS0XK>thbmG#7sb72pq1qND$Y zGyaKQT|p_(+N>&;_20XCBMp-_ELkcV@h>K*A>!@2ouZ{1QE90o&I@6(DtR_qf`>uv zmQZOps}(}19tW~Ai~nUaA_|QB$A+dle#)bCGb)oc!ijo${;GT3gr@R%?iQI4LQpI; z+I`6Oxph~%WTf(4m=a>zMAI>amW%4A zo(yUgTLx3Upu#(ka-z2Vt~)Ys^f-P} z4Rz0JlHPZ3DcRRI@D*XwdF{uWBhUAT=Ngbs@w1fZ)w~+K8_7=b94b%Hk0!m#%#=%O z1x0w?Q&NqB(1!J_sTtWISvNTi3u0Zha%r#^F^GIY3%k1)WTTcn5eD$CuhET_W zaDCmEVY+V8VWO{}dZb&8DCa}BXy?OhSpM?x`!!JuRo&1%O2?ST!>Q9O$3YT+5ae8#;3Gcdnto+2 z&>QdDZ>Fbl|0XPczjbERUs*q~BhkpXNIpKtqh^Be?lgj^`KQ;^J?<*2+sYAmY0&!h z=KGdAR0Zfd)Au$!B}fO0N&rqu?rc)2WG7Mhf9EabUtzW9|GCHAzJD3H6wFJ$lYibc z)<$-l2Yyh(?)a7_P2|oJ6p?wA;E`zPgDn9Rr%8|XoLwMpImk1* zM1}e7tS0abZ6u~ed3if_}xsg91@VuO)G2FRw2b@;<3!oP62 zQ74iKr5@kcaoKtN|Cj)3D0*R!t`qvRzRr^)7padWz4Gv0yw?$(=va{EX$f5_7bCus zs9GDeA$U!fbP?>gKAa) z4IU?QO6tRv9vazjXjsqViMyv*j8SYCPdeNrLt`=(jbbHF3yi0%{gpl0;lxC2DSBZa zT$2)bS-8Xt_c`eO=I<$?Oyovf^e)Bku}Zwx#TM-@${KHX7l^Zce$DTJtLz(Wf|mA` z_f3C-j$K_)Oz~+npIE&~J;%0R;Tp?r_M5w|09+@*Hjy}?4nNEeRyHPfoJ->8qwdCU zYRdA-Cfvn+NS#E7i)s_r=Uf3yx7^LFq8`Ed0=*@F=O0lk#{BVFUuRC*Pfo35R$`-HQ$A;S~1{0Y)7qe zZy7Hkv?Yr15%HnFUiZ%Gd9paXq)AlH`KQ9E#CL?=rdY3k8rR4+3myM01k(j<=+=J3}bvNm$*O*^QTtL z!8)gj%wUM*tX>iPa=R$LkO?a`c|OYXPP zWmhhcx5mE4-NbhkOvRoq+ueMdkfXed_8iiCi-N;t25K{&*e$Be3&u~IIaUSE=8sI1 zzY}}^JKg#$W0Z5?=F7Dhr9=ArbK6+Io}|8!M> zj8*&ac8lnz*2_~2<{(yW4P4<#HRAtHq+F+mbz-rSw?LW<#YX<-Xx5it9%~m&F=KOA zCmhJ-5F~g;GT$%y9RHZyvYNu?7GCry5(Q?|OHfkq1EklO*mMQ`VE*6a;{SaSI6>4t ze?048(H?+oz2tlT4A?#DJ2+NY5BjzSBU&xI&pe#ky4Gjrcogac8af0=eQ~_pfPc8Q zw)9kLne*-0>%vZu`ik|CH~GL>d>{|7Y-g#>f44`sdO1l9%liK}%RE6ADeJMNud5^B z=)djugczhWvIVcbYLJlmHp!~}p^UAZAmHNbAV+2TL#}F*xVu6fI+^qHpBV=NSPAV$ zC^^cYdWJ4WN_Hl!R#jXb#a%tWz`^n#0Xif?OvbVPOMv7~?>voQ}K~eh*i)1Eey3M-m%xjD6APu79z{J%s7@ z4;#Xk;>p#E4rWao4VZFBY@|}Y%0oY-q&SOboYJ6eJabvYusyw0M~Q(K2jgi~1Fc1d zK>E~^`$h%dd1%Pb3f-oypJPp*7niq0%FhTgs;?AtXsJHT?S6@x`Z$7?!QvHAA~3n@ z_^TN8M|#!L2OAS<2)yws^D+UXZ~xCsxxn0?Ei^gmVS_8hNOigOWdgxaboe~^Fa3LS zL{UnRIHN0lpc`1w!>e*- zlvt}XSh;e9qxKI%wl8!~NSN-;;#Hy!Rh0Apq7TGSI;zZ__pBT9oY#N}MDGxNe3>ZK z8+t@+XrQh0(s+S5H8)Z@zZi`}JU)x`g`MBQeo7J^k+q36itE-!jDt1FYnW^UuCt4+ zZ5bSDyryApM3}A$ev%*@1uKLwf0-~Uj7+Ir1GYj(7YG9j>zq4K5ef9VoKopv_IW&c zlImfdovDym`hTbWe4IPm$eQF+Ogjx0?Oh#!`{oCA|ZxF6};3l@Y8`~<6pL)oIY?)FSn~JTNFXhiM1o}w>lYYd1bZio^cqGMl49%U^Wmw4kwMqy6T&iLa3rY=qb@(0Gzu|D!Ja;k^n4wI zw58ZH*09V$r%`_?-BE(=mG_!UF8{@G@S_45>W;k(rhi>$PQLx2(!ojM zz);&`ZCLnl$84Or7a^!pEENG`DLD zNN@7QSDoB=$6(MkyI-9RdW0_#rreAo3p=}c8OJ~RiqIH6O3Tap&0jJT)Cxx;CHuHbq zYCzDEDC*e}lU>{WYLv-wKUJClL6-D47!hL9jcKd}sY)e#Mxikf+5YbQP|MI|(cKJI zsrkWpmDqT>)an8`+-HAR z2%9QJ?h2DC4n4kKf3WNoHxf}35CSS2vt<*fr~CKTTpDiX@L?1sIu39SpeE5o0!;ra zjd#x5IF*27Mh^DgN*t8$v>sN%Y%}QV;;|=(qeRJY;y{%rQU7|Jc8%h_Uil$GYgp40 zP*R}U=5jN&c@g&y$I>lmHRu?0n%XXGvRirB3!wK4jg7a!OR&(7E#aw^ya2{Ie<5>8 z{zRjAeq0Jm!DA?4OR?ustLDkj_Lk@+O$9ZkvHL4pE@|%XG&n?;u*o#`RHI5l;r^y2 z^7YL}%3@Fv6aM+psn6EB3VbR}-@mSSU#4%*jycL^r{Lmd8#sILz4Il=U)D0c{W0Ck z2*Q;#JiVWI06P?(3g2VL_LfguXu`^yXso;^n1CQq%B-uRmrv_iw_;70%r`oV(m_bw z`MzGkE7Les-oofp4Hc`>s5>0K4t1%#-Xk{J^~1!MN&_4}pauF4=`)V0z*Zo?3Y#PR zw@E{JTq6yC#1Bs;kT3)e64){$4lXsdZNMMm@59XRoi|h0BsZ8-i(~C^S*?SMArjnz!udbzvyp2&Hd@*p?QAY{G#le<9Uq@Q6PVUyuB{ zjU1p9`@&OGeJ_nOD=IrvE8eP{7pc6?+hAejy=C!2FfsONGijiJgHb_B)t`P0Nb%Xv zR7WMO2l0SBDXtFy7enn4k0xxuth88@nSwW}knNlgn_J%e&1w~)(#{HZ9=OssJ zgA+*)_?Kt|9uZN(XQ1$KK1zzU6ii59d=We)x@^FyFrjZUDA}}Bs7&~3`Q@&b7xE_b zfnuy~>BtS6O_!J;%sCbz_&iDUY`P*GXUnRvuG2Fk`p;e#I2P5A zC8hizz}edPlF=jLfT9JKRQ|eFmM{|JQf0x#xZ}P`qy#=OH$#ABX6d zqiVzL0ka`Tbp9WxSqo{dy_r@^hkC<(boVmQg2dP|Z2o%5@4Gg7#o(5kCnN+~V4G2C^&1m;)#Qcwgc2@U!R zP&5E+_AV8M5BS2;!xscp=>wGj!z)DfDluum(GYL6jNd<}q)wEww7J`%A|NT6q7M{u z|Ewra{oYwW^OsT4pi2*~wbf25KdDyHTJa=W!Wn+M@%Ll@Mje!~2s^5O%LkvtlH$2Ax@p;Q0~OB`N+c350*l>0P-67L zj_*4}d(;1nwHN3TJF|SU15dMW_MSs9U;)-3q)VO=4Ai1b8G6JsZJKIT`Fe`DyRAOB zrDF0p_*d}PV@LM~R;IqyJ~KG-{GIR$#(v#D^d`_}1mgn^I^2w#il@VfW1_*c z|FOgVulC9v*i2+}AuBv?_GtzRQ+=zUHu>a*9Lf`V04@q*3~N(mOIwPTRk`K5YISsc zE@zlbH!aQ9Xz(%#E&8rEle<>WOoGctpIcg{dMi!a)L6Nb{;1Tg{uTCd5opSut%K@J z8s=a*9RHR9w!A}b=Wjl`oL+?kh#Gz@t-}3w|8r@~$ zD7st}n(o5GuH)dJr*oUl_H&7SYH+9W`Ob<~wQrx-4BZ1<&6YcA*#okAuw|znu6$S9 zfFRK(cz}msJVl3l+>XvZq1X6$O^yzaIT5Z*R0d0TX$cKd<&H-z+5g$+#O8KZl#1=) zc^F;=bOUcXquMi0218E1`BJaR4&+!8p_28hWVFm}WS!*OL4^|8{OL`_>L-p9J64-X zDN>u$%caPynz~2d6EF=s8;RuN$JD9g8wqYc)qlUK_^R+qKvNFDIf3+s0vIiwKZS<-t3(%o z(+BQUd7KBSk_%`!YA@{I(ma<}@4h6+NsY=#1$v^qy9&D`7_Klg^|E72vG8wnc0`cs z8*Nd71_3A=V%_bn=q zez~mGkj)NLf)$F97S~W~SM%#ksp_X0+IG2IZJ#k=ev>->_0M()ZGT1vsW!_w2!0WJ zcGI*oe5pB3U|hQPNpEw5Uqsj)q_%yx-^^l-40HV2Ygaf}l3+V%3?)rf4W`zsdl!g{ z+Z5NxmixkJ>w!Klk<5;dfzV4LgYZm?0wJ{LgH(Iz353vkPgk_N8sL~5NI=<-c&pRM zOuIACH@X}~JGF!0O!N9$^@-RB7{DL(zs>%w&N$hiC{0CC=qS-z*XPDU1K>f36k4ie z_F#llVFOti@Mr@jl+i5HC0+QJ?F=L1%R0@aZis)OOh%a_T4vM{e@pwV(4{Wn*P1(_wwwCgI9gw`E%v#CoEw!VLf=G z1acd&xg*E<%&8xM3nOQkg@py6H(&D+%R1zn+_&i4K*bJbg7-X{bvLas7xWMO@uuET zuUv~L;^M)A*^N_T^#|sq22gwxchg6X_usS~T9T)MKC0#NLJ@_T4rmK#8W~nU z9-cvIJU}bgb80zD+ni0?<7~A%&mtvVuW%(?7}%&_kmV)NnM(^A`jzW8c0LJnu7EY^ z`GkUF)W-Cg?*ZBq@p$dE#T**hnQ7r2&fXWf3_eXql)T$;8t%FRK4h#EV6Vfq60N>B zNrA01*p_$X(4+iQrp?)T2iv?U(~htGW&bc83NLmU>dTu5BzAF?S)hV!7+ITVyI39? zw&E|}lqKeF9v79xJ#OLAg0W*e3=oB%8V=xCBgX+}03I4)KR(#BC%cbzm|X$Xt>-Ie-_3AGjg_lt!_kdFtG!mZ3kABX#|AsE(bPu9V}E~ z^Hz-br4{N))Sh=uc9$>t3#l*GD;ZQ*h%Q%A3mEkbN8n1dt4L1jNui-U z@vT*(xqB5d7_Mk%`P0-<*H8OnMh5${+*2ljsZSpA9Pa*LD-J)6TBBcNf4G(DV9x4l z_-mg!j=_lzZ>EfNgrxx)J>arm`!1D5D%hpOJ6~m}CYe@FF#Yw;MU`9A-W$KhKicfC zj7y%UZ;;97t8HTB(aOgrUA;6CJ}%pDLQSCnjD9?I?*M=0F{=h(?)C zBBo^^=T-}rF$itt10FT#D0u{-i31E5C%%zFsm46G9LL5|pJWWg4`x7zdr>{Ma&+xS za9ZLZIzNmY2!Me7hzpK}g4wu(4@)Wq{#~j-A$D&BJ}1^BVPGTfU{iP4 zTXxjIn0x%nOXeQ_k+#Q-yqW|)$)C{;=Yv!WNBUt5amkNAs6DBe{%F|@+VxZpJCmhq z$My|f980{FuJ843IQcPj;lVBuu&SbT{r&lhq!)sChVjKq6!r%wW_V%!+7&En(gsPYuQdsov$Ti42UEPS!VJg)ig&Pjll5Steh z504qQf1T5qcMedReiMtZj0$i`s^w@>gE~0vv+Iawjc6Pv;qP?N;f?pO4RXdnCbapF z-}*F@#CUPBh?z9Xw-ie`T3+c!yz00c@(AJB+ZZ0JN?A^Nf>fpCj$bzBTlaR*o9Z>j zZ(;}i0zcI{bdrGzgIt@neefF+@{cfam;>p+_6L<6P+>I!xa>^SAbEHoP|+&LsjJ?E z0$wf0_aLQ=mnzAnB{o_;eU;k#i4^tq^nn&^z#zU>G7x+nC~t(bl;i3;WlM#+yjgse z8=eY)UDm>zDqexncN^E{ek-BPH`b&nI>1~jTFyG&uFn0&?+2arsAZ~@U*oflLq_e% zj4p1}WB~O9HZV;0M*e?(xD#;vGT}&=u2R4wfw;g-I*mfwdZ7APxYZIcGXM=Y zvR&cFXSws%vL0QBZ!S*`fQ`3@Y2NCJnX)Yjq6p#qZ6Ww;qX_}{wuT!t+!J^-%0RW+wHj#kx(#k6w)~iTRQ0}t+12+oL)XjAd3{!r)tj>9 zIBs_Vhm-FbJgQNL`;}Vi2^<4$qKk6x7RuSPaHHqq9EA!An^tq^Uhd|JD?#9bp8Pz7 zY!8B^@PN-b_5qOBsE0R6vBvVVT`>9@`ihN28dS4@FKu3KPq2-ml?NQw z&qJe{A93v3;AK-F0IeT1CCf=HFE`~; z?gyeyk>wNk2p8jRr4R1i{BNqy6{~22g3t@M3J>)L6lKie+v_ORzfJ9H;%fX6_9C&v z@r~j}7T6NcUI^*@6ydR4;V@nQdc3w4?$2E@9A-_qx;{46PEQa{wl+-$$oh0@$?>fm zifbM}u@S?OMdK9%-EnY`s_*G@L&{jGjK_oH99?=$34AD4nY1^jd3Am^?u7*D*AKix zYLwkK9`^E|?xY7?D>NSQ)YrUO2_4M=B-u%Dh>ew!Bb9Y##ZzB_vyS?)qJ`)E#sUli z*BcgrTC7xD^%MVIbEn@@6gQcoX)#IrrCOOM0-a#cr@n0bGp0vP@pyi12bTnuIHF(#4&U{!ol51{6z7-{Mk+mNmq}8NfUZ~R*HAfWYV6}* zb}*YjT>NgJ-dj`5%1J5XgFD>+9N#aEQwd_|YD8mKBHY?Cg`q*JkO6n{hXk6#s9c?r&&sOsoBTr zW%n2^hCUE4X0i$H87BCNjP?s3v58A2Y`}}}kM_y4t0#{wrXM$OPf=LboDLtR7*JE} z#;QCQ5JMX>^oZb#_D+Y|MVqr6P>34^CFNpSD;lp-ExL1*tX9=Re!MNI3d3>U6ACJ0V13nZlkiB0q*dd2KUNg2rp4qOAE&0fn4fpkgFj70V2flH{VpJsaF4;H6uF zp+L^Pn6f93J8`isK+)D}hS;{3{k3yM^$$hZkS#jP9(2~1K5XdC;eeYjQ9JCg8d5~5 ziqvJr+xFI;Pji-HR#OR=o^Oae^)gixU4 z5tuaY0Y$}8zzkB2tC@#R%6()=vpwuox=fR5$o@Nj*78oPh0Rp}Id2__k7y9gl;*{Sv9GMA-Q%S`MkJr_{h(m_kx9$4Q%<`%=(K!3!+yM9=@1ELctLScq;l#M&r@P(m=FtB7 zv7rRlw_fQ@$aeF!j}@5C(g|8Pt+}K$TL`H7R8JGpe`W%Hw^n}K_(I*=CAq)pYgrP)_R&KUa+r<*`n}E^(9P_OSYrODbl}R^DU;cPg zCaKM;NkFw>X#Du7FYkJZMJRk`>4$z(BQ4%|;Zn73WrP*TV1GKhRS6o(hkk`$6IkX_^SAEankg{^l9#TO0(}x=-96Uz};@g z=`oMbb%$j3eKNVQ-&-5{T`J~n;lf=EQbGdw0IgfLOJ~}28IWRkS!E14<|WY%MOh-a zTr>W>IJ9X?~ftg8~atQ)ratUeHL)x&3$_J1yh4e`V0z(mqW`e6E-ou&BX zGRvEON?+Wt1M$%=RO;~g`TC_FUPv$R2%!aLVbGwbXszK1`XW(<+*7 zLxcYACc)DMv_6r-Mmo<{P|&;pBJaG+zGEP0huLX6jKwOfm4)lZyR5~m@QZ7c2Uwy| zr9X@6x}!%Sx4Lf!P^ns?j9Ke1i2dW?k`1_Ysj?^X#ekWl|-3iNJXik4a3|6n1 zVv)^=`D@V`MP7_n@9q71of7??_+Kl8-l!}ZTt!!ReZ!jg z=8_cvJXS?bjsXzc5K%kXI53iDpPyPFkCO3=SB^ba8cY zR8PgB!ttuIiY=*%y5Af9D<^~S??H*K&LGlBMH@x~>6^_-v)#|$AI)IKsN1+E~9 zF~~}Odne^5Xea9@xN>Zoabr87lu7Fl7TwtTQCY(?r=19^9?#}}P}(Etw6*3S?Ehix zt;3@F+O}b06p#*S7&@hqmH}yyln^APB!odhLb^K?q(e#+kk*kLT0%-%YCuXFq?zGe z{_gvE@8>zb=R3adzgc_FTwSQjq z0QAi9os=k@Ya7jHix98r0ix9G@y}S`uhwlD<8g493HH;TFdNxCu*3%+EK{u+nF;0= zw2~;~h9X5271FWJ-wnK(yhQ+!W@!tszUDC$SFK_3SSMaHqn?xV|Mjd)x+hGLZOlVJ zMNu6Ikg$`lK!JikM1h(U&M2s>9Rvb(^(RpSMXq7q^Te*$7pidbKF(L zTs7sr75*LI`li7hYGghD{6y#lBW1wcc}aYzg2VMVsxCt4=Hd`lw+~Mgnk9xzrr%RUHcnf+yISj4=UuM;5$^f@T#2o?d)lqs6D zwekI)uwh+R;m|y}R3xI&f22o%BLPyND`L!<1o(usr(ymLNyM=FFmd{}Wc6;9Y4k}ai5lz~jy7)I-7k+~jD`_WkkoIpNbU%$&wIQB}jP1ED*yE|~% zTQ>5M3PCu=cn)aC`$=RY0MQfpEf-f~+Eo|iYaZ>}}K8?+Qq#K!8Q@KS5rwz|5n+%@-5W3F4u|VZ!Eu zCCilfMC|ZavK3o}i8I!*!usE$k(EbnsQZDm1iE!9kYZ-4TBrbb`npf#6Xa z8%aop=zz20vsCMV<;^8FVL^t_iC~3z;r!~AVfdsF4372RC!_TYPC!wx$y^aX*K0nx z8G;(imO?99haq#1B+VrvhdA*wC=o9(k#W*kGQr53Ax%vQaps{4;yC@JPrOgB5^as} zd5)@n%feA*`o;m)s{Z0CPUBKeS)NP%3&iOL^<5UcAAYT-PVN0hgZFKCG~$+>+OPU- z-n-IAGp(F$Ev^x#>(h@2K?H%vnX{m9@teV9ACla-4o2Ppeo4sTPnH>ch|3(QWiJdm z+P>#{u!8%tp1=^AK@QcXP)m4Is)6?@9!2p;E;3L;{i+? z!7*LCNTP`O#eOLbJ{5o)ydBE#G(ehtz<2fQM<8GGXiNSt;`9~b?O!(SfA|UJq^}tS z(|){@g7o84?LvPrypG7}FBpF4{1z=0hpMH?tlRk78?xn2KqVhMHy;EM)rCwCD?2}` z46?c;aYsZ$-cdmAfZ;sSEBSs01hQJ5J~?N6D)~Kg8^pL?$+j!F=I}7*T%M9g&ZiyD zYRLhqASvUpt*Hn@J<&-2JWs*nkJS$MD78kl?zxV4M%gyx+aQS2-(o8){jDxf@0`&< zhCm8>^ekfiUiTRty@==UyZxp_>6UVv+yo(CNb_g~-}}`Dw7Hd47 z0VB!bqhD2B@f^RhQvu;V_RdZu7qBoM=w&b4TPIi-T!Zaiq}H2cmo2u)0bXOG^oPtp zFyW{%E^w8yt>L)wgML^)t>8^PCkh+`5OfWmk7tJo@q{#Z|!4 zDNmP)6NKO`z%!MdN_kn~L9&0F|Vi3cDU1;@r|Ay9s`wD~;>vj!$q^b2M%FdOtO0sS=eHzd2E zGtm+uPlut5_|LZbj4Xf>jdDqqDZOeGHP=ZfipOn7qi@pJm1hG1W?4~thF?bUG;pcC z&wVV6xRfB@clGDwq8@-j;%orvWSX?Pqe%h}i!|5|i~io6$E|B2PlmnxvVmk>+!E{lyT3SE7cahKJNgT>R zHC{?!WxOTzATS9vW}z<={%`1I9)cA~_)jQyBcfShnDWAr{)P_!~Xw1^efMC@&r zptyvQ=#ir1gs773Kbd~B$-bm-JM0C%UyN%-@5XLR87nsUqR;tcXkvPRFfV z)~F4lD!Qmk@lWEBY;W_E6=e-RT-KP~4b=!0yHmhp!=!PGZ;;&?80cqoh zV&_aar&Dvc_E?q=yCAjng1U3nafMkEhg3g1hZf<|0^fDXMp()0(T${-NW!gLo#wHA z(GYkf`cPaDwu<@vTBDu2Lh^mGGcnCs?%|6g%B>Lf*cKm5N0Lvc3gj)|w*J;a|4HU-{k7L^JlEP(tDCyH&lA;~a;2-p{_R zODo8>c`}JhL_==Or-+OM2$I;u>^nfxoKcB`9g)b+1ns8!TOmY{UH22_n2VvYsizl1 zxeHh1dO@8LVcA?MrqHZ?aDK)Gv7N$}bc!!vnJ*CZ{(F+jv8=zkWh_fl|G@BdhNQW2 zs6GJ>HL&PAWn&K1V4h7=?U?`fcK=UCFJTU-$8OdSW}X+-{I|Ae4!vhVMHqFT^Uwg| zVF(DO=wF2t5Le(B+i?4^8SHw0-|?Gua`fl#y@vvoOywz^{n(N@Vbideg|Q&_j~uRw zpSLfD$-f{w1rhNGDZ_&a0>Dg?F z5xyPE(hk3ksIWbWFJK&gjlRy@p!5OW6r~x5!N(_k$?OAI@3++K6g^ShdTb4Sn9(w7 zk15E=W&4XPNo4eoagSN?uaW)vXkL&@mx}lxo}gYS1vTPx5LNn0xa&qak(j?V<8$&Q zA&@H8u#u{@HjdeWzef-o+Wj==u$;x?_a|I zlFMbcu%7{z#EG(`w2l}uerA|dC9bu}2n!-u z_Tei}Xo+xeydW|=y|jW}wb5GMV-Mxz-``f-+w0t|il)(5PE4d3ExuEUSY?9t21>P!A*6)M}J6&3LfWSX!!PF(NE(cd}9FCA1*0(Q@=mG%}fdfTSH zan$~rtWOb8K5*-`2OHcHWHD@bMM66W-IK_;33>j}c$otNxj8cZPlk4vWPLWbySpO^ z8^4Zei_`L1SQzyjybtPO5DX^InBsWGDwF;{+0Wl}p<>4QZj$Eo)f0|3$T z3O7CxCo*J4@5*yG2ibY|3*!*y6CYeK{Y2qHn~jH&7Go+sGGi9NiEYJ&xdm|6tay9; zxL<2FF2u+bjq!nyW+W~zRJ{=Gnes{U>i#cYH{RYK#%CG{$fe_@he6#8g3e}RWDql8 zI+L2o7nuOB+8L*RZJ@Bee#$xh6|;mFJXB}2rE|d&hbrX4Cgue5zWmT|({KHSgoj!L zisFInZk2MV#uNjHq;U_vj1e3nW zFZP!-s%)`A7*afV$zmw|%HyC6F9-aWS6_e$N0Q1#&bmls96o#6BtFN07E$`$cjt^a zxFFmR1@FEauw#h3ZNThP@lD1({fdWT0g6NFxY$12GZrP=(!~M~3dkaV59`jrEea35jeSDOUt14A@wo}t zL7EAjp*@KiWGDy~cgZhMTmK_1coci-JOt2NMn>1UblthvF>Q3L=j6&C`c@1-8f^Jp zM+FTq2pXJ>hXu=ElMIfhj9IgR_#V?`$M+i$vj4L!Y!2E>_&BO~v5Y)PWMX>xsva~E zSpOvs_(CeW zY0A@re7OvQG&e$nM*OTP{4a>wQiA`Shap@b`6YfQm;9KU8QR5MNem26O-vRh=V52E z3-Fq;te32M+QGPW&US_w(ZPR09NCl~cs;?{hEm2=i5&kTeMu`wuee-A3}|_6VRKKZ zF-;wuv2{{+yddcb0^#!QQxibfC}zmHxfa+U%$Z1B?-{qNG#vn!p|(H5X&b09`xO5lRhg0N9g zCn)H);g`|Oy913;C{dZQ4JZzjtXf|9vVb2$5CVrVTPDtD^`0>62;Q8_#JW^-T=M*) zjhw;VSowYeudF(pFqsT@YKuUwcxgTRZ7KbDuaJjgwRLtDKJ$bB*83J;xJ68G-7!}V zYB@gNz%WCyQ-(w7>eV%pK~VJhcxW&-FczYMF*|x-EKbFCY&U_ifOc_Ca2=i*5zW`s z@j+csJL%gxZdt6jHX0>84~{~O32vPcqj{D z#%n{>ipdEoS@VSS(kL|v?dC9?JTas)gPl3RNkBeUc z7*kWn&f6d`kl~U&;(B6GmoSv_kjh!wTbt_;@T~Thr|YcuR+G@yqT#rT|l^1{!wwR$uul~I| z()c%0hGy!7Qs_0d^&eifx4Vr#86 ztzaK19mfh@DeG-gQY@3kAOAzA=5&w@g7vb zcmTn?h!0sCAWej@b(N%x45dRYzlkN{LwqpN1r)Z1+CSk>&36n6#(U!|jqx{Rf z-mTq-vO6Q^A$^8KNp!2O%RhKXksaP`=8>fV>HF&+htFORDdF-JmCdjC z&3+2_VV;y`})TDDL%OXDG;_@xKou9~ozx=>xr$^;u^UA4a_i&%sN` zd@Z<8D$df%qEN~%>mnEn&jE^D$|sXzM`Ao~Dxa&JklGjIX4_tG5XotJGuGeZKmL8; z929eY=<|f26=IqcVZ+d1z}?Xheyq=o zK<*MV0d8mJQKGLMInsU3l`Y5+vd7@)n#7hGU_g>VCO~f=^e(%xMx>pP@mbv zyh-o9DuwYVtgVLVy5oy56x-{F|9+I<>@4E=ZAKVnUB;W>*$_I(?Lw?4UKBQ4#xb}d zZe_hMyDm7Ft(tq2o z{~jgK{@;!gl-(14%!EsdRsQs|pzBM)1ZSD z^a4tF^q~8nWS3fo@4pnI_;TGwhN6esV9H>-V(Ab+x1D2;$^) zHhA6oY84s}(2Y1z|1U;!!)SK!elVK-zl`XgcEM$cC|*;NTNplyRJc1Z(>L?*|Q5`*O6n-ZODv-*8*G&wK+!VzMpX1oBJXQ%g_@Lu~obj6P zA15R;KlU`PstKwnbZT63zkZrERG-#I%CI>Db48rErCn9C4q;iD~!GJZo$-sbnwYh7)uYmD-BnR5{LER!SL_hrd zr?Gy?{Hy+;gUC20PqRn`P7nk%t*P>3f%iP@`)Lc5WqMr%BPc^oD7*{XNz2Cn>EJG?{Uw?}7JTXIM2BMr5>dVS zn2ze-s>?7<+KF7<1+mXENyWy<*xY{lTg8eZ;FZfQaz3Rcz8y@+=SB$Ed=e}$(^Dni zP`9#Jo&NTOkKdw|5$TA=4I&Ee`;1YePUy?kYs`6#+UPauKatE=s#<(vzi`_e^tCpY zJu?ZS`-gubS@AvL^2UK2t)tg7&!^Cl279>Jh;(Ic`(`rrrXU~e9DBw$yP$A1#d+k- zynW>EbJE&2W>jP4LMw=VL~}gu1nY$J8id^^+fV|?fz;72M#I{#vt;*_;YqBk@NZK$ zxU{esK6BOsC19EfsTSO#10l2JG|wJ6qQ+HYW_R(am=Ps+%{6cE{xY1MyR+(GN@LaX z?N}|^aa6#Z`KCow;AW6dC#QLZ$kKmG7XbMF=oDF8lv4kb!s&2$L7*&7uDRrJkOma{ zCbOC6Mu-CH+1;jbC#R+7O-2ut-_A3K%E*ChTr8~XYler|y#Muz`<)iw zYOn7mQ{HQ!mV;w)sS683V{u8O$nJBNK8fsKQgDh^ z(tm4j&%^)1)Oc;I_DQBFbwu!Jw5p=AcD%y~$MG-uQ|ZgOI`6%OmdW1}UI(Smgf@Iylz@8)}5lYRC zLzZWM?HOPe*m6sozu3QBINVz0R+wb%6K4}lg_E!*QUP^2%Mh59bGQyaE?Np!Ju*Oj z?954&Uu=uOK zyH-Rw$qSha|FG}@VdHT+{I#&#Y8>XqQUF9;P$+bi2nCtXO9I*cTVV^wocoP?FySp( zA9E(iEr=yCCfx21adxP`)O-dOg9qFX4` zc%$eZxVWA9*qj`QUj4V|WH_sWW}HuwEr+?mcetHD&NxE3!`@%WIy?l(dUA)_?MfN> zfiS-8AWYMz_!>(m?qM@*bg~eS^=EVIZ%Wo_VX@DIlcePP@}nMJ5ywg#>CF_&N?o5G z`^!;z(WDha$e?V`=a+5wxj9sx)K`R`k&W)0pjT|FzERo4S`i$uj#Z~?U5_mGE`nqf zsmU@D3Wg$3##|qBV+%_MV_0>Akio%MJ1a-ALnXZ7CruuMzPG-HCe~I|G$ZKtvrii6 z;Co{?9)%!VOV{6q=ln7*0ZoeKvSu_Xd86AREE!I|$@vNN{BElukOd@F!Zd{f;t-P- zhfs-5#d4McO)MC1X)I*|u`2i=kx@XK$cJ44{U|j$Z4I$xw}E98s&;1Dt?V7+UZu#f ztD!`R3F>DZI$+U*(Gq=E8FVdYaQ^&QSwqtUQp}!DM+pV%$1t!p%K?rL7M2{~TX(w3 zX^XJk!%;_U)F^AMkRvZd@=9q_{`C!=i7^g_SarmM)!#OFKb_eb2-4&2xiX<*MKn;i z)rTHYeEe)E0|K%MMTIwDYO=&fC!a#9Z6vY^X_L4>AL35*pCg3q*rw-%?D`_H5OD~h zG{jZ9pU<&|!lirg@r4sZzUOGUFhmqqa(Qu67ZEnzmn(7D$HK_$O z35;gNDVBVbT%kiHsigd^s1@Aec+|u3ZIDKMc${s@NW(6qI?yw{KpU@G(V*LFhG$d< zVd&LwvudB~HGS8!FBof*6!+Ub!I0yk*bDSUO>?<(q7~i6LFWAAM)BLSDt5yQZwzZ5 zoK8_K-i8e5N5sYvIU0`Dx-Z^7+osQ9{!#pqL})VnLq~Lf7$sPhGUPP;1I5c3AH|MU z+LhWN=JbvkeFAGVt)XcJ)Ce54feti%Tsup92hE>(mZ0Mtb0Kq|lfzt9kKC3v5)7wP zJtjn)Wq*zS3qEmmVV@%KAkpaLF`K2hb=I$vq}73pI30Z-2%1|7sG%8q%7MhCwNK*( z_*hY6q0R*oB8!Z%M&>Z&Ub59L$LD*`?PQ>i7w3;Lq;jY+QxS7!$Swg3>iICvo`SU& z2vq{){x?Hjfl>2M%dxr0-!^vI($KLx&7K_f_tlMYt06=M>V3uiL>^9=vM0ODA$YxWO{t5JdU3N+ObnQ=gE7=r2&{gIGXA>b-%baA z+ir7$5vAgv%Ow#LF<4aFICM(Cy3Ng?G=q`8XLA?Ef?6KOvTVHTd-VkEZ01P5iV<*zqs}}h*i3~FuA?rm<474@jqw_(3Lge;FegwLJWkDQ&MuW9n&9&6&B zjt)S#!HU-9X*`SSYSw~{l+5h=9iY?_IiUangY&)=m?d%~dIJ9Xgl?o@3&f;=ZSgrH zwFFWQ^T;~qbZlaJs5-*agHrGo&LzZh2Wl)E`=FIxQ+zx`VVC<&edF_h2>j6}F)z0X z>XS$>MNqLU1u6wEJ1fRXE0}@!y-eg*Pf#w+0m?MskAT)vJ$CdlyB0pT85#cPgSiXt z?&`pL;mrHl?fxHypXh`4H(vKp3bMh=AUFcR6vsmOW*H+CE{W~u#uAp(=z2%x2NtJ8i}pEIyF_gYoM5x|LI>~22_0-N0K(@=q#4Ud zIC6`tvQVFlHVm2FbB*(YNRzg^0ntclBe(uAtN8ST77p>X|G1 zqwjoy(Ti4{(E4Tq2eVh@5fK8)txYVfe#;GLSL*04(VFx6coNspgiS8{%U&sSdVaUt z;aJ0aKUZdnQ+}yQ=jQ%PQ^!*ib%c-Dw^G>9RqbZ(Wm3bDAcf#Arz)N!8?d_v&bAfi zU~S^eo66k8(N`a%Th z2JY`1V@B-O6V|#nF^XC)40M~5ezcQ2uh5U1+fJd^MSN6K?(Wws_3(uJaMaTMnZCa< zA#;gVFis(r*nYA=&&MC9`6q`+63;;hF?LN?*&{`ktH|fw)B1OXu_wVhtE4)6qsY0X zZSwEdF#qsW?1<rc+BORidayOR*4zkvY5I%Sht`imbDyA`JzDSdvl z;59Ps``H%AYSufbfZXPwLO&(KS@5S@>?-o`ri_H9q{4nptU_vdzyu4sk#W{38?k9o zRpnYY4*?Bdr*Tf%l$ta*By}xQQj#=>sO+I+s?^Kh0bd4&L`l(k*Lb=JPcb?{BAmxy z>Re-%#PH`R6*^Y)ZY+c#-vTWjvz^I7uuFt$LEuH*Bq}5a3r8Q4CQ&ZFry&Gwuq-E* z<1jCj!9mM&6o?Ol(56nuEe0|BK9xD(B^w{ECHV&-3~){|Tq6?zo;whEM3%jirE0th zSpg-j0ILR*w8IU)4_^lo-oicJmyY!$G!t}S^VEGh`*&` zpkqT?N{Bkl>rux?4HDM%M5Uj{xJjkD-Y+2QRY!=w+bz`}#Pu>@0ozuj4T=hB zU$*Dc_$3`TFg?~7XxqB(q!grZE5pUn{_D{YetM>G&`}tE zjEy)e@LtM_r=I`Gbiv1wq7va*-=b9+$g2~RR6F(DsR?Mjidh+yJG#?dA6W%#+T_8{ zm$K!z;}ohFnpH|oiXWxMtgk7Ns;m%N5wwyBz3awulgDR5*6L9qEs~zi#1e z3v~HJnM2EN_@mJ>-(DR+X5^20oiwl_E55eJt zQO0I()nqGSTPbtIsglgViyuPJ2g+c*t$1?X(+UFV4`N@e4M!PK;K7$p5AwNn|Z=Y)%uz(4-MDMeMv)_}g#_c2Oi2C%U z-CH-9;{8O1?BgdjF2e_4>)Ql0>?12uM`-ouk{m?|brU|%k5CjH>rrrF+KxzBduQslMcAG8`la>ZxR;XLnZr!<-&z~@1E zQg<*qrtrNj16I$$+VJA&9fN^16(3o!J7F-NuJ`iRF6JYJ?Povk))|cQJrTOzy$hZ# zZu4Y#3_X2$VEMf8C$rpjrQNuG-}Wq5wlRZ18LPGU;v3_pvg!yaO2|CyY>vWWmoc81s9M_c2221jXa_N=x zw&r)!aWBRmp6> zpxDv<{zlOmx0-wMD!7${tW30RaZ>rL(F8oUJysPj6R#HJzB|eKxiZmhh09hFW3>n5 zFRddIJH2lxT)zLh7^RwN*|1_eKu(Yz(3Y0yCM~$Cb}%o>Tt3A(Y@hq>xKj>w_|E-I z?~{;rqGP-G*I9*xC+~LHN?&%|p#1ot$7!8--D6P>Ra>Lw1GUDP=8-w+UA{ocwe}jT zd3cWKL+WUug~m*V9dcHjp%XHD7Ns^TMg2_Y{bsEPyK!>ZP{(>to@W{N&i0jlV87^% zAa=@>Gaus2|K>;DMmX>Z1*60I*r^x(*37pze`wO^ zJ4&S$NXESVA|!W4lg=^o`u^^+XY$z(j7E*J6bf6c1#eV8dM2o0=)&hQ?252wo3+(M zIE0s`Dqg4||G~%YDA6>JY+Of%XSzqT-=m;2=2a(r8Bs0-pSIYCCgnu)TaQ*+*@ zq(!5?`B?@l>i%m1-d}<`crh>zbBc0QFgH%_%HYonlm>lT>(Z-;TdE%HX$KMEaT)&k zv1xv%o1VyWHWY=Pc()+?kh<`9NUHQ?46B%coevTlLnc^hnf4sdB3#x@7(C0qIg=&E|E&uVXcI~+BmB> zDy&a?5S?Tj5a-D7{c-meiGc!t;B0r)kG*xB@d6gM5l=X>by#i8#!g@#6yx$)DZ^Fx9Xx zwBHiHo`4Ko-H7{pm6F+HT2HWSY$ML$FRd~Z7-GU~!=Fh8O}l?L>kHqj+og@hSM_lK z>`FN-|5ZaLpZ)FXbJY&c!hVw(2==mSsiTA&J$3_#DAcRm=5{&R6aBZQw_|O^Ml@b$B@@Mi_mfgx zJETJgvht%Be@EVB$@rCxLU3$D4l4ujURf_8mH=(>lYzrr0wTLvz1fX`h8+N5=KNXlcQO{g zKG>!dw7SMFPjbFyi}0EVxkc)>WUP67IDeiL`i1_{-b+;P!mX}G%vxOKISupK-M3ct zehh}Et{s7tYGxTDi6S8dt>UGr=5Al}888?=I^T8N{c`o-?yAE69L;Pmn?K=%c;78d z&(agNEpFVsmRW&H$V+$#Qh0S}xHb3>?h$|H9mRG2&MzvSD@g zYwx3T=Jj8{_<%F?DIV(y`zsSxS=Jb{A`|*RC9J^WY;4Y|&q2*Z1T+~P;R++Zw+Lt` zz5g_+>sg)Po?!Y1&~y8zCM}BYRitfhG0E~mft_8_r60-8z>mxemj`-$!S9XnkAvy`KoXG`CwgNj1c*sc7`VA`!LCkw)adrf`l zH~IQKQZH8Q(ZRz#!sFI53q%-6WJji(aFFSNyBBoq64M{McrLTQNT-r6*)Cx?h-s(s zcd}K3Q+o_m!iui#%JHe+Q(S#}=OvhZw2~&4w}|2G(M13xdh*TCy%t;KcEq7|3Y-@_ zb^F8&CjD69sq<#q?-o|F1@AYPYQr(|kMD#0B53pK?zDeMc$^qF$(j&n1Ua`q|6%W`&bT|EC4kp@_dE`BU0$qO5`2zsRgRJyfR0* z?!F{s5#pRs{F6bla)#SHu#-VDY;g%NbI?>b6@V??GNy*UkqdT7bZtvIo;RQn0g;gGH91i*GYhlgYS&IJzlM zE79lA@*U9Ogn({jM^uGlRlR;n;CVpIbY^mp|LOUNkUC)U)(yvk)6d@+PTR7UoHGCv z>5=wF^mk?Uz@Nm2=jhvz^o(y0+`qA>aDQM=Oe*G_#l9P)5JbOkToM|XzqDMPNa}WN zmgd;YyIhk-we>RBL$$+RK=+I8kpE}`?or2#uTIQ+;4MY1L#LF24+64i@_)&z=GscY z$E%8eTJI6pmCbeW@n{z>Zxyk{ArnRy%dEX_MLfiLgfQTWXCL#GI@b^xbSL_q7$a=Ct`Pp<2%7&XnXZiCo)=)dI0H*Fb)g?8Y@Uc5&Qt=N@ zPL|y~9R{jwP$?X`7J4%fsWygu?T!W@#iClhPB z<9=!9%o30JTcMn*)(Ef7NXdnT$0_NbG=>YoUm1SuE#9J`5VY&6x%DKMVzVKk$e5vj zeMs@W*sY=LN}Zg>irSHNGL!PNkujPtx1$d2b)65?I#zAo*1V^5Z6Sa3Y&F|%9f3Xk~Juj+{*PwrAZ#wQDy`b{_t7!dxN{O*@r{KC)t zX)jmT!d4cepSj7d97sS(1r^xIluvgFH2gi@pF2J5|9K(uF@9bW5<1Av@apCDQ)*}t zeSTTNAFF+rO)=@fLQN+TFiG5o2DMx4m96=2jyo@EsEa4K;nXS5ZbQEgad_a)KQALm z;%3w>sbPjb8OR$vxFXEA`Nxh-Q{?i$(S3W7Z*iP}AbTYAh=BbG?2X?WXoiG*JQ~2; zUm=q&XFc-iB$Na+ykA?7o57QEfur|3N~(rSOeEuy^b>R;N#TU#sdZ1B56FMXzo)?% zFAn!zC_))F+fBQ^8u|Z{dDBJgIq*(KE)Oo3mUs1XBY9@Dk5Tt1e1P<-AvYf`T^Rnt z1D{BS(sNPx;d2GI4t?i^XJTQqwbJe%t^BTkEl@)m7JvG=$*IJrPNUpE6FDW zxoH6cu_+|+T&7GQJ1)h7MbhNtvyJM`sRM1GJcWBP+LQL4^j|86oGIrqet7XYq>)oP4)dJfVL}URHs6j2JX-kw zANt$_#bA}M0K0lc+70ar*fWvka2bL84rGIWIyh3*;WeMvL#&r3ZWj3QceuU%^3}uU zckG{SrR&wFZf(g9N_tB$aN6uiI0&~HYW#Q==#-mW5!(5I2>Sdfr~P+-+6UYX`L4tg zyIm|rjKL&qoG*@)GQwM)j^9jMQX~JB1FcK&T6RUhc`|b#rpw_kdtm!gR>xz%`N!|! zpwGvD@%|YR1w=VburFM|m6(8r7193D>718{dig3WTgfH~p?XKrHBO#)ZO5yUfw)@9L(?i#!maqzU!*>zS+NiST+;NVO z@-#wfx!tF}=z_zLwzA^c)E8^{5@F-+Ml#o`*LW#&9xL1lb_*-dneFqd&q&!MrRs8o ztDdPF1$M~@;FRqQU!z`NV#~mvj+`vQ6+7$|qfE%u0ex8O@H%&YvODrJ{Mf_FfW!Rc z>(YibB-~1Bh`}L4p+`7;1kaYJHIVA(B_4S*IHTR6wJ)De0b-z^e%suY)L325H8_R0uQa540D`20n+zx%n(!*OqqGFKZ+D&Qs> zd8w;X8gE9r$)L&hj3k)93w2&`#OqB|`>`cP#I{wbPPF{gwN$X83@8`u_#ju7r$0&R z-28%LUO={NsLA#jd_SY8jdjMn zjKYL{>+HeFo9ii`Xk;S0{bAs7-e^AL@N53o4Q%k)GR`fz z2Jg!!Qt#>4uc@#FQ&fSdUBUapxdu*l*X{+{Q9= z0cfyUGXqkFLJl8)r|WJPlz@io%V;b+3F&PBdlPz{=Pj+ga&-GH=;Sbip!PZ9w@O%v z{F!3Dh)KGJK!hoG?(<_tSC%M;b_<9}hUiO*&^PxwCFKLdu3b$BZ5^qaCEMA@KJ0cM zhjG8Y9@FvKCmFK%LpML=1G1JtP{wAo6s3m4`=qj08YN?}Gfln+fePAIUFqL6{H~nR z+b~OKmv~=cs#{FY3_Ubc&IOh3n+tHvw&q<8qBTTzbU1 z%Rhw?Ixm{o{FAeGQQBw{Le!_LZep=JEKPHocO;I7MRjTT})d+2lzQ$N6ty1z31 zk?OH~mk~ejIzjVf{ILu9OWlrdImUX-h|pnVl=!zNIz!q_X@{(*3#yTr%x@ zIFfTF8oomWe}?_rty-eke0nhqATs0g;8nbaJVJ{6*$|}=yjONqC7s)rTamO*Z2!?t z=R$i&u%DMa!|?Ql{{tC}+38=Eiqyw)64epgD}x8KLKYrWfz<7FV|ZXUTkbS%Y3~*F zy@k6A1*#pZ9DM$Q&m~_myKg&=vXFoc#OGL!%MWAr!p_aKWI$H$MaTZ26wIhrq37zk z-5_dH!h#)mbb$(AhA=)SgZZ*QmJON!UO2nuy-u(g!x3F2`)qn%0o0v4 zaDI-t6*w<7I@8-8Fdcj1wHhw!cfXcmxW#pU{T^XY)5cZHRWWTg`&B=DGe95S>Zs^z z!T>d0*cZA1Ya#L(Gqp&qI9|`kbhP=6(q9Yw!!w*E3%S_cXuA`B${WIBRXxmrvxyJy zq6}dPJPhBquyS4ph5&!hao}0G$1!DCsg%k@Kx6d#JHv~0t-|4~FelHDBlPXbJ=O5E zET*|A&D7TpU~eE9JVpBb%l>*S|FM!Mrh%c~P^aX%YvR5O!RD?ne4+dGy+{A(<^J|7 zI2j*XJ#p~^P<97x+a*s6D*5cOKQ}J1>?~h8=O#-pW}=H9S)Bfu%|Jfn3TKywl6~yC zW3c(A`e0POvKsT@k0N`HP1g5K`R}?NNElnynm@bqgQC|piR<}u{KxYMtCC%1Y4@Ss-e@_BA@Yk=`p8D;*t0wEcy?H&a@Qg z>z_iOSLuk?^Dheb7NE2TJpasCv|TPuoP(#k)ECO$v9@Z4enf~Vhh58>Khl}Q0Hv89 zo{*$UA91W(LQ0P!`r)c`>eRabKQ#MAEVdHpRPbjcGPA8fH*HQjHyJK%6VdkZ%E)4d zH|X#}pAflu<`Om080coH^@*^RfMO_waowpkh|GpFr`ki43QCOwtJx^qTM7?l*n?IEXm ze=HZ8v++|Hk}bZYt#*7CER=PoTB(2M+nrR&swd!a?*`u+80&Le-iqEJCy^eP!#9=t zN$OIcf`U!MGp{KZ(oV%1V7y!m_GbKBRs9DgCRr=^KEF4v76T5vP40FDb^tho%9YxWG&Y*-m8{J$_uVLET>DwowA3S$Z2f6*(!}bV&V194=FeSHvN~!s4X}`XVH#%d!vtaXpsZN^YvE|b60d&_rSA^1Cdx>uBa7SNl(KMCaZp% z%mVVpBQk*-B_?j)@S7MjS5`QgQsB~S>#MFBYR7@gtw$su(vOw2gEl@g^A+C28?`wP z6nsL4ppa$88~te}D(d(i1Zk(0j2t0w+TcLPhras_s8T5dYXB`RdM|52Gu-WENjts7 zt>}gEAxY0^j}Nb(G)iT2$bSqnaHY^OYJZ_8?P{)Fc-*bxUnN0EQ}EbZJ}!|A(!waEmHxzZFCQ5h)cV98v@n zkS>RoQV^s;1_bHu7#Qhp5Kv$Uk#0~r2BbTsW9S}ofMK`?zVF8GdG23u_MAEU-S1lO zT5ES6`UX&Eunm~9oaAfJYNj!PqwO}fT1xHtIA<_%i2JbHh2lI zN`c3K<*Lm@MqNEE4yT#sHDl~hJEngcei&aa%t30#EJw%Da6B z+GP~GGf)cdRTG3v7~#e}xqRXauMjACmg{0Nh5lM113uWL#&a*|onC zUz{ee2xR0(lNlNPILiGTBM^O-RT8sHs<9N$(?7#FJXfb>Rpi+P>^m3ie7h|*`GibX z1i}O@*7>i*TAqi{t(Moj*^{AscewYDw{` zc3ToaS=VQJ-ck`n$msqhQHIPtU`+cWp5V)ZCGO8kIU5Nj#S@{-2KqtMhoPDQ12S9< z7xjX<;kzafw@_LuH9DT%x0=+yv|YXT;>7_b=!s&*-OsC63x{zhmkCs-3wqin)*qe- zuZzndWP-As*{!aO8p-61PcCnt2Oo9)#Q}Jf6f6Y?{3y%`ZWm){p+!XDLttaWK+5;H zdRPh|iThtH^jwnw8_3LD5w&m*)}yTsE9`KN1#8H9dh z91g=Oo>E^!+j}SW+0|p!h1L(B$sQdWF>gwPMloS$$U#1m8W@s5l*D~iDfTjak66h6 zCsM6nt+!#fM9$Co!;8g;vkZj(=DGVo>wL6%LiHmID$Loa>H-)AUc+DAwHF^iP zfzo5Y!(PK^YH@Qy1d&@1KC#p`}};mCmKp_MvX<8ponXZC&=s0At9y3Pt7iV zo0Ya`N3aDbyb(ogg zL^NRbtT4{jZ4~p3V`GC4tZ=Ep@&FzZt+O$@)yQ)i=V^g#KXs4U`=;O_{PNXOg4Dw5 z3ULm4|I4H|aBIqe^uvCA3Jz`W{j6e8StIBGLjgHyqDkXSHNI(;737?O-9t6o6F#An z6Y_5>@SqU9dHh$vL%ixBYy#c6IcEN>>x88q#` zRBW0)GHcYw0WoCLy z*q6&-|DKHURl-DNYyjs2k>yL{akDv*xjLAZnfyK83;EZ>V}t?ej5w+~#J-@fLHeAG~Lkg}H#9z?q-B}vmFI*hTIps`HDtMgi2CLLvb=^D1joFrHo5ad*3r!TJHB1u(uTiyC5paI<7El z)BxAyAJ$2NV&~(wgK}+=0VR0#b$5=jB)8whe)?K3ZRS7~J5EUHAoiNX@s*usTCb})Z3-oMUCGEC zq=mB1`vq-p`+U0S)d;7+%_fSyle~t{_dE`_&W(R?^3{e003;K0evFgmFtGl%&u_>G z-z#9y?gv(sqU;9a$=#nqExv{H!qv&{(9R=&xJadUzrjx+OT@Q3$mv~*$s-}>aPQ+C z$lVZI_X>Z$)$sc#CdO_u>*?u}Y5d+ZUXG{Q133*%zMRs5UDxZl23Jdzg>XBvywURE zbjd%`C&fEf3zd!gFLvNIDOCc!GfNUFAwQG(`<1+F+{aD`OSjf}o;H3Rc&eF$4sP^z zOP(ivo5>}td60>$k@9Qna2HLm?LCs93yJ3*G2NJTfK^>mbL1cuU)K*4bcNEy zMCcI?qqhG1xr$LpyGOr>$as`v#czkapJA)4Z1@n6^WKl)vy9fIZCt3dcfle6y>UjD zdk@O+BTwpm(0>;@gkz{id8zLt8ud9EEf|$^5RnWfV2V*SIe*!pyITV6tYEc<{0-OP z?0g;y4Gm;dsV< zd!jnGe8_#$DcZ9V6|Q`LwV6gAPTnz!MlrW0L2o1Jml(_1hI~pj$xn9j z6SVnM6h}EbPZ*rCY`Yvnx2u-vFq_vc%zI`{95(jF(AuliTW|PPlO{6Cz3IMZj{h^* z_gfVh5Daj5ayI3!Z6TbB5J`9%pee+hL{Z`?oaA z@8@?UMKg;?#z`2^A7ArflTO_i#?D6Y!~rl*Ap|xp8;5^hA+piDa%*-yC5uU~TzJ7Vq7&numbn^ObkqUsnlGfH zzOb!^vXOJBH`l4Rnh`l1|r2h?%MJyCtDLN zgmr1_^i#Wn`45xM(MjQckBxZ5q~L5_xx(^IFg53Xdq7zV4VD5{^ymM(BA^#s zqaAo<>J{PsnfV8BmdSc+&$H-hWS#Eg1U}4*Cbm|mfBvIW}mHU+^^i6CyQ%!lvpchef>1WKhm;ujM`*>k?I91E}5k48QCLk zO5LL&9kKDEi>u1;rrMk&kTgNoeu=)Aw9~MHm3|WC$Kaog(!&d$vT^4m3>9jxVX=BO zukg}z8Jyu2qFdh^&QF`V(bwZ({!l~T$y*s-R*y(43;wVN_$!M1mVFhqnX{eb*AED6 zj34bd;zD?(XLS1nbf`^=&RKkqi&Y2D6h!s2LkI1N%RO4}gY|ARdvhPsk27X6Y<&&` z>uhz@I$y!Rp)^k)9{Ld&KE#dz+I1zJkUpZr&-Y@S{N<>idu9xalH3U%8HI{v&qB0J z-9vTiN{j(TMJ0R$SlE+o{ZH+68h4GH=v`Ep=iS|Z!i2?MY*0?OVdiu%e)7|6)6hdd zNb?%|NH|4(kFHFU4Sn{kjpVOA8_+r;bvjq6O}2ViM`-^DInImceBx0N=yhQi0q3rA zqJJrTc|&_nV1Kyzme>2B`r^JK?{npZPH3W%-=U=MvCfj{=6Q?nQ85?h@7t4N$#IAu z6Yo_61p9`8GlPLSE7Y3gjk$ApPW*6K#CO1b4L=WgS*sOIU zcM8BKWdXwr7R>!X6Jiy=Pf0ylcTXE;y^EBI5XrrMNh)Fm0K|BZuJ1x`ncgGm1}A^Q z)I97%3$m{;47`KX%I%#rN6vO~juUq6dr)n}5(bq9Wo=^D|N52oz7pAeKQC|Vn$Ntp zsWnBq@SEk?t<^7N!t0EPx5dU@Fqz#aW$E9=Na4~;Tc5KImB~-`yQXfnDO&mj~|nm4?B;X z1J@`h|H}L>_IymtF9GW2gq5p~F0#{#-l1qi(htnYgV%%s729%ru)XM3qOfk;6R4lQ zjr%qFxKxCz6K@G$4XUWuji|K?c*cxQHd6?(8ZnL?2lp(y_)5LzpRL#H?-5b(8Ad~H&Sj3>=Mgzmu))v27Ck=Zyv*_Y>=L%*R!n+wNxeiWYXTLK@2Qjl;>SdI3EaT_Fg%{>UCtCb_hFINr}ty> zYfTIWpMA8Q1z}fo4piAsv;hJ(VR=Z1aZ!~Sb}opsly}dJTsd`b0${7*D4s1q6Ph`n z$&D*1%}0o*!j4q;;QdFzp_$53o%oIy8NlW|yoN{pQdi@=HX&xp8~B5;RE6qu#c01G>`|bzyNNBJ2V6KNiC1 zJbI{8D|;cYP-L&!(o`5uTdPR5e2R6N+4HcLbZAn?syON=!Os|G$GJwjLcki-CYySx z`xMEAM$CM)&ll(4&#^L3^jA!{ubFij zHlskbys3j_hLoyt@qp+TRdIt369Jf(irV@Dc{#@jWE)Km@Qq@2+M0^G6a?bO&iM#U z`Sf!FJHZ|OPLDjyp~lBP^D-V*?uc=Q@HGY9G(cD^cAoPn%}$79sN)A-Wt7d?-ks+P zF>gnSS2m!8Vj&pb@5;F-dfzHd!p7+=RnfWuY|L7Kz}Y(h7ncMNei)UL}rrT(&|^|KxwK_4d2~<=g>;S(UhnUuqck z1rzF&X1RKlq`NQeM`$oiv7dS2ACOXbmL=rSKeE|wFYEVwH*KDQWLVH_3;l@_UgNsz zc>ptQ9FO#h=;7s_Hw1t%Ut+uYFx&sLG`dAeQ)bq#6ZX|RgeF+!FXX^3JJ648>yY+l zc#U5iZ>QHzb>uw;@UKRVOyd+BVU7Cwe&>ZEjaCaTdy}=dqaX{A!NQPr@6e^&vnuny z`8$2*J*9-|VRunJ9?*wW;hCNl=&!c-9!!eu^b5po{d);MSrHJiSgq)~WPHRM3LYlr zsY10t7{WGCLAPXaMLp~tojhtCt>#OId?~`DwojG+oIsnmTfh+GO#PCzxaunKfweaQ4SveCS8J zPOQ0WW3pa{395{% zlH|VK{-R@>MLE@nT7O)J5OnNx-Anl^kFEG= zu8&)km>4r9p zjj24KI!ddS6*-K!#{m^(yXG%DA@6jlDYgtO5&S!pOPv^Ox5=o5=IbA_M8bK*W~G5T zTUjS@p1?vy`Mg@12(N-ho66W~&%Ky`lcb-%5p5T-Gihdc7?BpO7npQ>r;+7luvUqC zTA>Mo5_{Qne39R72bfgFl!)Z2X-2xK4y&p9%k|bN6VC!~aY8SBd3Gl&?WctLGp%1# zSWhN%)-@@y`A5kz1I#a=6jZ`Bi??}ry(qRPnicMq=^weQOlGZ%02phcx~ne-%rGZ7 z=7(FzVN7}oct)w58>!)U?Gxx~L;+C)GjTnHz!+&&?#2zU&L748o)9J$4u()xfbLyVALN${R(NVmHu_3?UkKUV$Ze9$>2`1t!ip6uf$YeUunTkPGNDdD8T+7So(bh=c-N85hScHSy?uYM>pxg`5zRV;jh5e(b;-66Rh84 zp%2MuA4P8hG!L1&>>4RZ{j*w!?8fF`jw*8NJL&8glCiV8Auvv4Ub&b>1pIuBIf9*MUJ4W=E#wso_n+L& z4Jqb_WYa+S3DG|Q(Pe-JCJs${Y=cp-!X}wY|1!8@vNh^ogAFoJ^|>WD*biz2qQukQ z#v`A8xk8YEb5uo8-P1ZV6vam6ws0pI_!xZVQ2 zx#;yw+VvPsz-?24v($eYx78IH)#Waa?Cw1slx0#;f*VUc6_Lt^PDZrMTN&@MEo1s@ ztXjoFNL>6J)eH_hL>am70t~B_39yn|bv;a-D8TmDdf0(A2AEZTqR*@4f+_UTe0IM? zAHWHC4?g|#4~u>M>)41**<8tfYUi}Rb0YR}DWG@^^C5(X zLTlByf4HWP_+s|QrZ2|~O-0hwPG4bIRX@JRXSkYC4h<3B%O(H87?YSqU%TGDp3=a~ zR8gNgn^?=ml0o5~=HLCo;kd48!&XzJ`u`qKQ;B_dKU^qwG~)IgA7%7o159|0GmpQo6J2#nzovJx}-&ragn>!-8Sgku`L(;M70YzkZQ!_&j=Ism$@hUsJK5q z3sS}-Ep?^)4RQN*NCEx8#~i+!Xboj5U4Y{eu8C`1iJyujgT$gT=q_?d1)qP5cmDPy z`hJEit*Z@t@9k+~oC+-E`IuB4!9?WT_ z>Jyr%jC{*|iARk?m*o*rc#ko0bc-|uTcCAs7C^2;^9K?ygIK}6ds$R>mdQl|9Ly8r zQt~`7dhFgZASc}M;JbRXntiN}GN)gNxN?zl-^e*;PbPwDGRSgs-ns>5kq08~nTZ7t z_pOnPi}+#(Uu_ip*6s%H8zJn?bIQa3I_;`gdHKy^?462Rh^^N??RSM6-DWBpu63En zu=GypUF@AIvxmM4eis3C^n>h?ce-~ppXv=9{JU>t` zikyMZ@cfYVM{)QH8f5E0qGRqq`!jRF&MmNZXAI*f_?Webe}I3r>HWXNyFDjk3x|p! ziASP>ie0G91VP8W5;x5~O<@t6wgS0$ZFlU}Rpx&iXVwybLWwpG5&B?2*{s0dOEoV( z?D@Fh=M*ut&=B&8c$acFP`pG?qoQxYBOgc^djS+zG{CE?@^Vp^FP z951tfI)CwZHg^>AHlolJA-wmlIym`Z$Xt70Sv(PO=5i3Jt-6JdE`UViKYr*N5Ea$-2*gAJQ!1Bme7VL8*_lgyb9wg4z|a z!+iJ_?UwZni{zum!5wUixv1Pm!@IN6wJOKtd>narw0pUE9UbySJPjFHZI_yZL4if; z(81!l;`r~_V6hmvtbJL4Zi1pgCHGOeQ(4t*Tf`rG^Y+q2Z2^AX9?4K{!yOZl%Q46AW3h%keZvE%pE1aG{33M^R-RN+Stk_^h5lmYJ#=-n#pTOWX?yHo@R9)pD5ICfMBGuk+GK8TVHDVVYL z1tqiTM2;&|PmbQB;1_5KALB|O9Tk;IPy6h12gTBAwCV2hixktasFbuCq;@KqfRJ%e zM;3}#xf+U%f;kYBo^};}Z4g<#JWt}vf2adpyJU&`=B+`@|M?4A_viQD5Fh~I>3YI~ zqi{i3>=d}k?ECR!R13n{dF2SC&n0W9KQk+KM8COy8Rh$|~2x$jf=tK9xpT3zl{HmvUkceywR!$@&7vKP7S z4O~{|Vk%vQ$|~RS1rveepeL!IA8}H9Sn%UY8Kn!X!JBYx-LlYR{xMC1=GS zWspWmC|CjUfM6SE zlAIYz8cnQPve-bQC*NCZ>Wxk;snFafkCl=3yt92YSyzimN@QssMH@CgN|uPa(!w?C zz)wEZdLolnn1|QTchv1n*%);X=I6%hYD3lt&Wf(bQ6{XaR6^LZW^ND6qimmCkAHeH znNaH5c1!B5ls$K&4`TprrCxgXuFP?^+rREZK;!(GS^81yx%#9lgBvZN+$j&-?Qe=Z zcPP4d755E5SX`5giwL}v#_Dg7FiOod%>3 zS?={;9d(z9Ai!I7nETymCkE;ewSU{<9%rY=#(oHVG?=!5EsTE{ce7055TqiI#%nr5EdsKz zw=wE}KbZgKIQ7Ym8&gK)M*I*g=1#9s_}cVtk7Xc4S&l*F6>cv!Pu1?RgNX)Xf5jA` zya{JtsOg$ATIHt%|1LiAY~V^4CfjD3=X9O)vT}Rn5h3M0X4sc|DLklRntidxI0OzzTKevPTnc-QGE%LI37WKv@or8M=mF7 z^Y<#8bFQ_X8XFZCwgXlm{&KTcYg20Gg@ioeO^*!R!1l*S3VmqpEm%F~}K8OEwB`o$JX@@&_m)nykq+`q!Da zQYl^m0j3pE_3E8|j))`B85HP+4}>%cU%a?m1?6DM@&?ADx_S|Dg@!?BacK4B(3r-b=vqer;b5j20clZ zBs=gz&^EK9EYa?W3N&|goJq;@Ry6+j7o%&|V`^Mc%H)j*=Qx($UzOjK0OxwM@KtX1 zg?mzPD^F;HfXW2PZ{-EQH)mlTCTlc&;7|lVTl6u}lCt!S$Tg^-^irB;(UWGYD)etA z8|u3onOh~8kS1D0?S2xa{8xf*b3=DKJ*0Z9=Dkx_zQPcB>=63-lEI|{@Oc~!ztMqy z&{F6=T7vJeFTDs0bdXdWSPH!5l-U^*KQzo@?&f*x+zK#LeLU`B*2_H(OZD--u1?Lc zsLt1P7pGj3xm`*uXIvmrawfs3)qv{h*B`c-)?YRm;oj3@M>P?(;iw?yBuGAtoAsYt!&5eZdCU}9%Cc>%4vCV-^s7^&T9|+7 z&bXtbeD>qnSFTQUKn!6x$seHT5x+F#UtO>DIWxf32bbonvu!vwl44*$dA?pe%=K7=3(qisK47oSs z;QVEUatHpdeDej4+#@Ly>!7}R3zJ);^ZC;vw$s@Z3;L;w;lG` zuV*w_n@X1mqzPufq_atpdA&M1y+PxO$b0Hbaq+&)4+RT#jtd!2b%n*-N0R{n*5`O! zYIX+nqKV)cC3?A84flR`_`;UuqDEHh4<~GbSrFE zUjXkLmi7VSfC=+FllD^wO90?c=Le&emz%Q5opj{Z8`CIa_e~#Z@xdhVV;&TtqiQqW zt=m2QGXtrByN7^H60jTlDGFhw-IY%b@KE;wU{&!A4~Rr~ZZ|#>IQ^UFvAJ zhoT|r!G9m80j&~T#|;ti4_5b?K#y$euR&%vg{H^3+e-(d2$Ro7RKyL-i8;!?Zx(%z zrIu4-Nu(nH2aX<;cepJT#zhhwKR%8M8J_PP+0}gf02(Lb=!APtGt>od6*KFqs1eOt z1jJui#`iO4_=jKR%&mcULDofD z9Q)h0{MOwtOJKaMP@@)N*pXUd5J|3q3G=PbmnQ{yfkzg*Ay;x{x^ zHryb*i0dUYes~b~bFE*hgD12{U>wFcz_weGewy98d}?wY?K%3#;P`y#&&8&u<{|#l z$Da`b*cpGKx~u(=2<7G)Lemm4^LMmX{Q8TOYzXxl|22RUodvzVktZa8CPC!4D|C}8 zC8Nb<5nohcCV_80)l)738!23+@B!sf+2-5IEv>)y$AM3e9%%zI1~UFbAT2A2(bY?$ zzKdfK91Vm{av9Hkw($bW1*>4Bz7ct>|>}6iQZV$~>mw?0fpM&`hZT@{CHT?bgay#!}-VSH} z5o9;06eXCuo9+Koq~Y$E54G&f0oEy;-k23!Ej;>7VB~e6XqDjD){l*fqxuS^3TD0D z<4;cQTeLXN*a+lWy*F=T|N6;?2`?vuUR2o`+gT0&V(gJ_9^@Ug^&!48Eh|MN!ldaW zit0uz^?}uP4EXm{@dzoOf{Q|wU5+1A*|nDWM*8>MlRYIdMA{El3)>PHrz|R!NH}*c zXo_qrcRZ2&wj}kQ?z5DMaC>x+TwifgGPoyNc+20vhXGIj6+v!?CK4@F0*WXxYH3{K z8+G0jdJ;5Ya;Sa)fX&9bUs zp)ks#SNZ&Fq*THuDx$#P=@;JSfb}8xkk)G z7T}%GyKZzz8X_7BRzYSleg6&)jw0WjJfUsY6Qg3J+})rL`z!v+DvcJV z1xL}8w<5FlG3v11n;P=K%h{P3!=P>Ej2_wLckyonEXZG5ebsMO!i4J#b|_svfiK3F zH#iD6{c>axiTnw0VHf9VQA^m(aNooB*~^3v9l6xFCWjQ`BuZK-J&~Epqi8{*827AS zBuk7^X!QOIjBG?yiig!KDM%d`_uLBqsVDFN7zZOx0 z`Nr)w8ps-P9MN8|#BuqArNpE*%M{(ohk@+@A7uSnPk0@G3VSA_%kD0bzd^I;$p#j$=LoIXcbrfZfBIQI^%vJ& z%rNMfqKTi}ZqMaj;CtNQ&O+r8F9b7DG$4E|zZxbqizKW(`1FSu=iNew zuWA491Bq3KOA^H!p&yk^Ia8;F=zjZ)qNgnfY9eF9gH?HQqL5m~?*4ZTfuP3ISY@ z!UZo+Nzy~yueO(^NQ<+_Xd4AZG}tl^?Yh)A@(9udG$3-zGpdcFT1|z zZut8v`g(NVw!l)oYy_lF)DNuYiC0U0JMHPRIGP*)Tx{CwxokZ0`PY{Tzn^Rc1A3@6 zjNB}X-ppcJuec5-PJGltP)lUsnHa%1(%CgS37h?@%5s1J4tS~vZR?Jg$?cI{iXs`P#EjQjR8%x?rYLme-g{^Uq zG!gvNegGK$J~w05`@Q3EUdcxENmH;Ilzuz$pS}CZx&2Jar1ozTQdb6RleNMs@8AWI z;`rk`OYeT_sHH*9;5YnESSMsjLE9))H|6K&oDfpNM0rfr)747*?rbj-F8joo(yh~d^GpIA*xRyTh^v+tJ2B?U*!n!U6T+}T& z@#w)W{(3XBi(XoTH;sU)%eS4|z>V=gCWS1EKn&~5@4Wq3%yPvqeOa5WOIoMgtv(tVVK-QTe>A^H$$+>nXiA4a7SkB;UPYA()b zKy{|B4?fRL`ly1JZM=LVzYN(ZmLmN|H?8USb7FN8c)}RhE0)qOnrA;~jF6l!GYN@4 zzy`UEDmx&hrf|xOgQ;0R>%Xykjw%l&Q8neb$Be>z!@0gc@5=(8tfPXU;7@N(_IJ7a zRa`AGOwx#zH@}P}S%!%82v8E_d-aBa#BSsz*7QtzzopyHp^+q;rCfU-KXpkE3#qJ3PE2m)0=8Q9R?WMW5Lss3r)+@CKa6p79SRaM|8#Vqp7TxhDG2W)>tj^PRm*B+q%=L6gc?P74edl zXYlQq;2Wf1+;`Y><$r_DjjIwwDnH6fn%F3GQK(Ix_uKsU2JGuFU6#2)&jXApl@HNLDJ8sSqZkq_1L5Oq zxVf*cse_KA_8sWT{T(=3%TZjP?9uRfZ0$hc|$KA5aZ)mj+x zV3&ePPe(@K3y|KWh9iy0z6WVMSU5EFUJy2&DTFHsD+u8A3ciT=D0?YbBo3l2DxVk2 z?Q3mPQ!Q|?z}~rfq`3n)+XsWoFYnI}H`^%02vMUF<{;LADXJ_&u;l)m>t zDgnyT3FM}MCQ|GuD2%Q*S!JhnS^}bX_*3D&=jpD^9>?-q07}K5byIFPgaUDSMhiJT z$sMwA!{?eeE0eEWq%kWp+9IwPzZ9$RAGofJ=>xA{9RxnbxZdY>)aZjP!0DmT>Tj^E zV`TN|PJe7ovnvA`pbZ`Ob9fF!;R+^~5O$=eB(-V)L5h0Cx6+6R$JuYH6Q#E*AoVRb z&~jsW87^K(p1N5FXix`Leln_ysUXwXg$@<>fNTebmJ03Z<5!`(%V@jD6Fc#iBjt6+ zw>)C~ZM*^DS3t7uLXa={MNZ$5Hpkw(oWsE63h!6d0Nr(lk163v2C zW%9kYI%ziSl^8etE=f41TvBIx`!G%HqUpxLLm2}<*F;Ev$d+)C{8u>zZSjqyUaE{z zyEH4h!?x1PxlFwBHaN?w-ZUtR#iiUVQb~F!l?K$mbROK$;kY|~dtJU<{_vtelCu|j zTkK){KQZfH0}k${!l?rBsmGQrE0^khVha#vXLj0tl**Wh`5?{{UiV_fz|S{C%}l7Kdo- zq1vKg1_vK~fAld-*~se$&TE@$N|+5IU`83RyhtqQ1(awdcMEhY?=mALFpu8YsofHd zTAzVm3}21kV2|C!`39+U+pmM*=Qv6QtBnr<iF}%T211Snu{waqY}6g(!Y2~ zS(Y|s;8_rybyqwo0(VO#0hPI-R@;>COO{*y-6j~$HrBAQkLlXD5OR97(LGL)B4y73 z$P><`Oh2Wzj4%(tu{q)Y;V%4E6K1?xY`~vah+bH4(=BEehE=SV3()kR=grY(Qv)~I zh`Ti^q>LUr&e1u+I7!JlsI%CLdVXjGU9InWe=ygVMg*9DV&3g7c<^z?>XYhlo)e5O zoGZX5cjMPj#f-Yvw3A=A$vK=gjJ871WH@_&0jE=p9?)a}<3EO9Mb|3c#)BZh;~?W- z^NHCNqBl*f{%1vAKtUhxjG9B|vnh*U5s{lEK9OMp10eE&x+E+fvW*YBzXt5-8Z9-s zFISv~{C9QH`fVc$9i~x*#Y~8F{VuD@{RTM^+wr`7%xs7~D-|8nDfFtUYZbmrm>40{dSSzTWwlHJJIb${CgwrETjp~G z6BQ$!lu5(%5;YvwS}Vgof`*1WcFt8kXj)?2mKtvfJ9<3E>QHxE>9( zVI;FqU<3}RFoqdoIiqL9`t;$+Dey!X|^P?T3tudtc_0&_gPDogrL{3N(rj} zNrS!)wtd`W_1Vdr!g1W7rka+4NzdOZb)99~>yO3IDiJWy)nWb)>gOfBa(w}`h9bDO zcz82^n;i6j%v!EHS)s|Wc(0ZxpVu?H+J{Yeffx660U0XZ1^D-5L2lD;WnU$y0ntgX zY|5LsuEP8cN(SBm7%SNLX|d`O&#$P!+YVIGz=lfndbdbOY5J1_DYyln-lc1n9g`#vz==Ilnf9LH$njH_(Laq8?V6SP7d*%5Y1i6 zw0)1eLw|o1v~F?W0V;J;J!`(Uyx?@_6o-kZ?R3Bu3B)~R{Lb)pig8^djbfIpTLXN7 zf4Tc4s?OX7(I0iid__Wc;v_Byb=u8R0I0qbj6A9EmcY>Z^`wlgUgwxT0?}>(oa}?k z2aI;o+Z8^H7Db=3DM`GJM>TDy?8W|sqlJOtzxht$WYd1-D*I518os>@BA#7(tRczH z=YddL5Rf-VLOmo(NGrn&0ejHpvlQc{O0V}W5uXS2qzz=L$lh0Qq9MURegVQnzw6~3 zsVuv7(Qe7;QGhr5WiF^0u3+)0pA9^&S#;GlPHoTZtF$WmVjhsIlY8c&V1WeJHNYR6zv{aOK;E)BQ3T({e6Y!HO2L-9T0a~oYR37a$N`f zt~kDJ`stulHVL%F+GGh=cHOz>v-t%%nI1rjmKY~Oq&3&SNjbZTE>_3i9RRSJ^WvHN zyva?`d6_!%SpWX=Z@)Q_Z^_lsb|?;-A{fx`3)(9hN+m1ynx3s+NAgQUzKjcf-Z2lW z35$|&`e{RugaY{&IHya(`P=ePDz&x$o3mYL3d=rE*MPC7*p|oD;GtX#urvk;T0evSJYVgEX&F$%_HUxbN@VO?B zX{!KOQ{6H_BLeS{g!9}RrnZ6Rq5_)a&#j1juD;1l4@{#C4ub|{aDA>xIr1}5s0p+_ zg2V;?!(ycP_}6~Aq4_MT&29?oH ziCrahW+TQjSS>01cP-m3+WrxrfT(Hy^5>7ry*n`UOR4+YZP(C>9rhKFhjpJO{KTQ|CUzc_|)p1ynj-swS| zcgp>aaO?^HUySPCe>ww(z@*ObK1g^X&qGQm$M?{Ft#Uxr*Md5Q(6?rTP(8x7L)OnI#Ps5u9~U8 z83)&A|M2c>Y>XX!lEjC$2v{(f^uB#48_+t^SDe$$wl$pt(+~#0B5!yMd?@V07ZVxn z0>CT-V|SEvYYeic;XAhmL>p-g{{2z7=X+itFsgHdpf2TBnig@9YVKT?>G>Kgxig)v zGsHd{N0x0~xE*uEuWKRt3xuT!>aCk0g8Cbsm(e4aUsfm#X1tAiq~>2zb%&%h#@FV5 zxjRzkA*KC@qv3TG@#HWw{zx6K-p-+k{j^-pXDDxN@$Uz7Qy)+7IFZWh6-e#fO!?$> z;a~ScIbE+T)ZLp&TI%WXWFpIn-`Lg`K2%Py7c_fUF*`Jwysa5zeY;PP9;8l zzt3I^0R%dVR2s@{C>^=goRnQa=_MijDs5z-bw&%9^0Nk{$JzSI&UI$L4of0cLT?@+gIJIh0| zD^zH*jwEE?mk?2rMAkvJgh3cFM%h9kF+@s{>_!Rm*v1yw=84CW-PjqjXG}BWd-Hvd z_x-Uy;GN_CagO7@KXV*&eXjdF&+9yw4uv|n1ySHLUG&ln7vOT7YWjRcL?Tzi0jA|j zi1?y5oU-(kMFd66S=rnK>#>mtADLdTZa@i)}=ZW_x(iFQm_&|#i3e%9nR zdpceX)!D}UR|jWBk8Biu+|u^coo5!~JTl2av;+%2+jjXU1q_ieqEg$THk==Pbrpu=g+us@D+}|0~s{ zK{eiBq9mI11I%FsPh2B85{n4C*=@n)~{O{60fF z4=PqfF`XaS(L<)0ufMgP#f^p1&d-Sc6B`g)CNL8fsdPafwM8kDnHCq$9JJXB=)@Xn zy%;>HY4{RKQ=B0!Pf=?OsSZ*OxCSbGU-yw=OU#vPpV&-=z7m<>QY^-Kn>(XiOG=KS zrTg9IwYR)E@H<;T_Qjn0MKy-Rbqjc>M3>zy8m>=WJ3S-%Y+=bjRw!%&CiH2^9}f90Dg}1Nw(8f$eAeaw3(CTS zX0XxXhKMwFYPuekWui`7Sc{=SrJJ~@vn|Jaqp|qrut1R}C0I~s%lwH(j%f}fht;_Z z>*QAvv!2gNZ{unv;>c>Nn%eO{M{+)7yHZB|=ke>`Z;5T@it@-PB3Wf^mlG^DDvvu} z1c#$Iaws)!)O?>ZIU%`SL+iBS*8FNRk~q1hl%@uM4cS!~o5!mq*P_JK)rOlN)Qbsb zB;9P;{+CvJ^um(ncuiCi@OZ4>+9 zJP=}WNKn-hD6omEEzhiKb5BvEBZp0L0NIFrF-U5evfn|>cM4GRhtT^VHiE9aN9`e+ zz)`t)W-;5yL5Jr}HgQ_XoF~cD2ue6?25UJ1Tzs8AbQArnWHd28xTC(NfFhYcJEK3x5rOWZ zsK4SYOPecyIJ#$THJj#W1lW`U<48{ma7U?BYr!a^-aM}iMplGhvIu%>V)Hh2-_q1y z<{vYvz|lQ3SA6$(Fji%t)|zwN*1ffOFD^HN(0Nn0p?QWZ!{SHH6@rWt>!RWW#8bow z!@WJ^d3XIYji<;KmY|1i4h%dASv#fRGvZAZ?0IS>;y?x@gO5Frp?f{3fak5X3=s*C#~_gMZ&TC4Pg6g`fUIoZ(%WIMt@=a7PK3L;dWp$UZYv&&-iZ_|Iw)Eu1HI_4dR9?mp{Mw?H$A;**9*`HB36Bua_XWm0 zJ&_{bH7uQ%JO8%hQ}_5tp^6bFvLN zk}IhU_@|l=p>|V7VK=Z9R5U}}EaI?$XFTQDpIcWtlB?)z^{2}Dbw&FDWd^-ihoSA% zX{?a?4AuOsm_5fP+e7&W$nb33;-i1mYsCuZ_E28Y*h>0q-(Gz8vYav6az34 zHc?|7=FlI%jVifU`PS^5M<=A#7*QL)kaZs4qor+xn~@t&9*DV3`If-m z2qnj9q%Z+fHBuuId%${X#9wu}#yC3(-Bc+`NidD*-#cQnOk-??O57Rh@8w)^$ih}Q z={w~~rHp+X+JP7)T4Mz0En8!T|JpQ0hu*=P?1^c=8#ZBHVZPWZaFJAVr&3tZ8|_>8gji%m&JZ4=>gmnT!yGzib4 zS@p*ga&2I$n|fcr$Ci76dPL?Y3CLt5bur=HgzL>%*wjBAG9>DgmOqq@xIBEYwl&4H zRF}9CDVEmu!FRPK9n8Z*Q33NV)9Hx4lZ8<$uNrV7C)|~%*t>G#Z0y;{aeWhaN)wZ7 zOJEK~@YUg9&(CD9O@$@OpoaE`U@@Y4p|6GSK&Wfu08>TID{6zk#Kz0~@*Yk)O_BJM&)X*cL!H;(ws5 zp5^E*VkS3*gBWav^@36Z@`gmW^OaA35#*84iy;oId5^QHb7q2uxqG|X4D0&Ws%xQK zhS0#Y6{QL}9NOH!Qow6nMQoZ9Nt>i>tu)i%Z3#>@w>!r|5Vax5 z;m5JFj5rU$aAb&;&{)S)jn5rdU+t7;nlvvI{Mvl)igmI_Uo9JYo_R1Bx|%19*nFbO zbtY;p!cW5A7Z>r>DrsQdhSlwU%eshZS_q0VMYYQf&M-Lye(bc(9f8w>`HFdLzROdZ z8qQVu`zxw1&grs-qKQ8k;P{Y^G31{r0y-v+v~K;4eR1}!Ft|1pfxP{FBDU#c#0rFf z;I=@ZRQf-KBe9|vw>p;4J(luYRq2jvv-(8^%~+{K+p;qX2PL~djP!rbF!-OBXBs=0 zzCJUKI!GuJESjq1>*|l5GEA2|0u8G@e1OL1mX{4nJ8xg_S?dAYY?Cth(nbe@STFZR z_DwHrB0|0huf0&!N*>Hy?}<6-xDhl9ZHk`Yv3|G+BUyaN&&WC9g^DozaBtxO30?TD z_D3nXKD2O~@zYry62j;D40Hn;>B2Ww4sje(T99kuh5kOXZHTU}%C>{r7c(tZ$Y<8H zHioTQwER1Q-1rpn&ru_-IXVE#dD*l%{8d70bn<%5RdW*qu?E{Db^}_JpfYAZ!_rJiyIh*eJf;S=}Tj>PIYpJ4j zid`D-Z2LG^u^0RtIW5Fc6JZOx?l{GWI7ex%!aM9w7sG6{Y)IsVt8M(~$&-GI3O)QH z4;BL3sOaNRDYxTu$n*;`I^K$$-YRJig_H#lF2yXmO*D^2C8x*lHV{cc^~#)`4dJ7uqIn7S*jJn=Lb59vr;%qLD->C znQW{v_U?rRqWXw{I&7k6uiX>mGKkC4%|F-3IkO+zB4Lr4orJ|De8@d$TtI%De3%Db zVf;b!S;~b$u*7KA8xmlkUoEruX~)EAn)K_+KJt_3?v0@_A*Z3_tBqr8X{}L!yn=^V z>;u>SxTTkmkJ0<{kG^JZrp3qyjt_ncl&jN4OI_8%%%yGstQqA6d!@3L6^<+sE46tV zZeNK0Ie`k`R4$ZHf4%GECsgwfcP?X*9YJ<7N-l=dSqX{YEhwdj+syK~lm-?F2=CcjqcF0C==$ zt{Xzy!__3<|L-cV(p!uVD zXJXQcdj!SHM6(uqJ8HVz%*%Tn4{a(#q?drtgDU4~xE15Tj_guht)zA~i~0T@?V8D! zA_pY9YQEvUI$=Oc6|;c{4R-kxS*yl(C}UL_US`5R1MSI?jl49U*S31x<*}yDnk@Dm zBwE7bh2QZLdZAu$o zyZ8qx2WTr=5SWN(y z!opm7>ds!Da@xewBIj;$WXbXk$V{5}NH({QNRGLh9k|+2dDuf+K(8GqkRnh`#2Zms*SV`IEnSXW z4*3G_;oKPztIpQRz)RmN`nHmsUA8bm9^FvKuIy!>Sp9zWDl?K$nDBLtKe@PFN(xL= z@Jd?U>78No|1xuv>XZ-4kfmV=xIOAqHUQlEpiJjbqM=gKmUOW#*_c%dctRM2OjvfU zJjZh1|KflE*V$ahJf7o^kNsh>U4KlC?rxHnQ{*~E_lLy=f>sTq+0nYk82_-Q--1NJ zJ?RaOKYj5SqX9@~l}sXZiE?{~L(=$7j+# wObp~!wxs{#_Wu*9+obX5XZrvClTIHn!#_(|e(8vmIR-x03~w5gUvY~34@L}0%K!iX literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/add_scalar.png b/docs/stable/_static/img/tensorboard/add_scalar.png new file mode 100644 index 0000000000000000000000000000000000000000..a872b93eca32121acb79ca5780af2b8b38176ea8 GIT binary patch literal 45941 zcmeFZWmr{hw>C^S(jXu$ozh*>UDB{bq*-)#h@ukGAT5#-BDLslS)_n;cX#v6rT24t z@4cV*`Sy?R$M@sqSRBB$=Dg-rV~lg0<6OMeP?N{TAj5!zgTq!-kkNvJLvVtFgP%b| z0a|qFlfJ;gVJX-=eX61O^eMH5i=(BDy#*Yc!rKHrRDJDU;tV4#MeATR1@y+ZTA!8C zvt@BVuuP&UQfGI3z?IqI=x2d=;K94g>d6|ru?H0s)qg`fyVD>}NN_KI{&ZCpqj)=D zzi~f(Kh^g>O=w+$-gQ46UNZkRW5U{}I=B@@jF*RaG{ww?Hf&k~l5i+)E!HgyhpSp1 z#>Tus+ywWe~Ft9cL)In-%T=-2fVqLNU6^=WyJI(?=YZmLYSUP}sWw;5s{EG~A zB3#zf*Mc87m(Z!BUU{jQ&au(I_GqD5#>^;gcD9UgI`T50(>Hmq)RX<9mN!XY{ocO& zKC!{L(+NvN(3;lZWy3ei$r>Y}icj&$D9)LW=@^J#_=nHGMP~60*DO-NOZi+auX^e7 zV2AX%^bX5Hn4FY7hBM@`0E1!9U=QINSDM$XNbL9|P6T{S%3()ek+7{I?REWjxS;XQ zlVtNK87f42Hi3pQaUJ6a^Z4ks?KfrbW#2Myr9H9D78F8x0S-?&N_)jrg>9+Lw$y<& z-9vcTcLSzABT%B19=n;0P%-do=rT=Yq$(vbV!?Mc=Y1v{Mo)%d z#6Ct_)&BIx4iD{V`&gAZg|uR9o@ueNAufK|p+ECL<@cz> zEy#sy-R7T>o!@SW9!A*DbqKnPd*OwnJi^7$mxPx`Nk;w_3~4MY#JSTNe1%B)A()LC ziGyt5tNc4#IfOFwtcN%7F}OYQH_|#4(i&)|nR1m|Uod-g28fG9U0Te;k0l9v!kUR# zIC~W;UmlQ;h2P_)y5}>==B4u>m;$oC)sfa;ZEs8vJf%t2AQ*GVr*RKZmnpGA3yRm6_eJ47H4>&QNmr) z2tYejgD}M@>f$cnGm8Sry0Wh0OV^cV&fb2mC5(Y z;>}pCWyDDW5BIUVACx4WxW#vDB5_h{2hQVx$Xk<^sOPFdVHDjT%lUT-@S}@A&Fg%z z7Bp;N$$Zp9cPB5J!I1YY&Sk=9!gL~fs}{j5=I9npu;R1p%Oy5V`1jcJH`2sYz9?$KK|5Xo)!h)lT_j3bjDv(T_~719A+~rOCqb|1{X}))ohn1i zJ`feTLyn~3bT0h4;ksHL=_8`*M~fUF+)+P#75tW(_;H917ovv&(nJN;ca-Cn7aACS ztt?+Db%NQSlTRWCAY4lk2qLJ3R4m|cz)iH8Kzw{!c!qH1f~1h76woNlsJp_EU%VHj z+Y86R4}V27tRNcu?lYZ1IFcsy@dw8bE-$gQ!t7lO zm(4o{$M{e9^Rf}dnPGnAE=Ks$N%ocaLITuT_f>pHOs#`d5Ysc5RS;jj?b1uU2@Vvc zw^)B5cf%V%6c~QKoQduh!rYp&Aisd0hx-k!0zLN?M%#lnnl{o4-c#H;+_y3e!x?HtSXUTVq~39KCN9ap%|&B!WnyKXW@eA?kEiUTeim)3j-ADz zRZ;l9FkuX3i*!qDipbcnnPluC%-YVj*9o|zo;>t%@G1B4_US+M*ftsREm9w;8O0eT*+|-O z9F@qOA82_vj`bjP@ZxoOy7TK{b4lK>0t;j>$>YgyUJf}7ITkny&T;+x+VgelpumiT zz*0@2N@y$5M4m+4kt))_e+ zIaBmP^~!1@J?lIp&sC`qGX*nO#n=7On`oLa{i83M&X12+52p9e^PArXM%_Ey=M(HO zqA}WM4pBTOYQdZ&8zSn#P{FyO`O4bE9ZsS{e!?^14g^w5}BKp_I zov#^v)YBvm-3ZxPXo^_2G%K|$+3GCtg>9BTSHGZ)%a`+P=S*`?0!msd7Ln{>N_&xpsXJ z=Hq;0B|b%>eE$5Hk=Rj>&)6#1sdWLigS>+_xuXs$HJPTow{2K)q_sRz(6N->RB!6^ z4;isdBDZv$iF*CyJqwm&FHeqzZm@5xLY}t@OOQqEcrnVLz!^ zyctAHKTRXQGP*i|jN;Tjm*=G6_r**9p}xM!g@eb@4x+4&Z0x6MAvM>;0dK8GrZ>yR zejI@g+oJv6@7KlB1~Z!7;lELvOKnSUhtXsD63enpRVCL6S{;@t8z&VT99^ZxI~po| zS0YYu3nV`T`xkl?7N4wn-pzvz-fXKWAOx{G%)S*{7oIA2dIF?Nsa%e>BWKo^-QtJy?6qZfVq4<98u-(_T`7XGCtd zYs*@VS3~6B+aTtBT}$O3P&{q+Eyu4rKRq_xq<+aWz4W;B%QU#o*Fwy&tYCkZ2dv($ zo-M&A0O7Tt?VmAf!r9T@xxQUL$#mR_o|^Y*S(j{=nhxcA^YBd{SxlEu{TQ9fOCtKyQ=1;rdN8_Y@$k-4`asO8DZn-_Ht*vGv zB6jI^ol?NFKKMFQ%st9~@8UA(Sf1m;M9^<|{lzGKnQ|mJ1NYqG{Or9*m?z3UN1Z%G z&jWNG(1|#TT6OPoGcm_eR+@wJl&V@x$;+)Kb^dlo;L?%GSBY|zN>8-hOY3~>b~9$~ zU@mGt|8DMB<;wnR*$Gaggl&e1sKkZUK_>WC`^wj}#}up4_^$8b2XtZ?RP&$%ThuG= z-0}eYQ)Jn*e(t(K;+$r+J+Ad9l_C|ZgiwI(<)hQfIgPoY5Alr4h1CcTA6Z*6z;Sva zNUDeVzIgywp#5WAG=X}nq7Wna{?!jFsRS?huRITfKFh+%w2Spw3fj};wbr|Q$#{sR zffF5*jpv>BrvBr56m`*8o==~aeTSp2f;&*;=jDaiSZ|DaB=3OX)KCMnUhRkY&B zDUEq(llZaUqhvz@6ye6R`GC{{-ATc~6%LM=9`+5dsP*^&$U4$(boAZyRaJz|9l;!C zFCAZ4aCm`%bOiVk^%4et1zWh8QG0>y9bARI#AttAAq@NuYv!b-{&k6)ofxgYss{B_ zM;8lfJ`P?EE?RL6YHDgxmzS2pS~7Bf-46UEMr-Zn<|NF?>FMdo;mO0{=wikBL`X=8 zlZ%^^o0}cDg5A~I!OhHz-NBXacPIbqN5;a{+{MPp&BoDz8rHAb3rBZ1Fd zJx&WRn?Fx-aQ$mqzyvvA?{Gfh;NtwdZ{Su@SgWvxjhBVJzKjjn!od}IhB!YrujsGq z|HnIjp7@VD4gTE8E5QHHJOA;{Uw4Xf!Y1&K3H@HJU#-A$iDQUz{=M|#7{am-1c2Rm zWFw=l1N=mQZ4L0X2YfUA^%MAAdpV#}^&AdP5>8P@O2-R+Cmr37_($!1rT}d$lIkS> zW46}|Z7-^(yukfGs;#B&_Zkg5SIgMn)l~aD8>{#qz*vAEK6_o4(D4bu%g>uH{9+%fZh;vZ6Y28CRY(ycO%`%`$kw zlag>qf4}OmgC8Op*VO72Frfv({q=$%Q7T zT3T9icun*J`CoU!!TTc7(kyuF&#B^wo#3jbaCaUH-2NO4kkbSC^X)d@-({@Fp#5uj za0u7W8o2p+EwDZ^EqFUsB*-A+`b_)QZe;KdLT~P8oRP%)-p<94&FVLLhv(5@)mA6}k7M*;MT2p#A)ZZ`eHfpOrI4 zFyUB$3U9Od9Cngywjknse)dIIGgI7O%VA*t{?79%`u+k(V7D+ogqZ%ke-SAGnC9h@#~0y>*@KrO{)xVhDdS&j`l9j#jMZ= zpI*O45k8-B-Q*0sRDQjAb-GM?NdAqcJLtf$v4?JBRxz}*4cK-BDFd`~`v#ILb z#VPBwb8W+{b2r6U8L0Us!A|Jk+b3B=4~*cVMtG|rCmdvPRBhht5xZCjJptXHg7!H3 z%78<5&{=3wBR ztGqu6yWt9SvUF5STVVEQ%Ju!5%w_JD2+kU5C zK)YW4^_BxG-UN)&a}iZFdZQo^cN z+VKX{wB-ZJzgA#q7o7=RkXZhviplYajPLRXOIf_r?dilDgwUj+kD}h(VrP@)4&FC> z%*~Xi(;0q8y-^tg)xLYR>l-^2gJ=CAuK#cbU|3&wyBxJEu~B-lH$HKU3#*5g4bSxS zM!%Z1&)gc{E8kfMHw&(EDus$iDBNi!lnpip&#;DL|1lo%Xry@LneDP>wdL+;Pf{lw zl0c>w?Vtm}D$hA@MneAP%K^s?iTh)T*>7HIwavG?j$=9UIP>4g$;mf+=%U@4UlBbe zcBBfppcz*qqP{@?*F+C$X|vcCyo?R!oBU^OMD%-l<*}RZPRlm?Sj)bX_8kvO%s-Ls zooG7ayXuy>le)aT3|>`B;SQtpT7aL3n$St;4f5WKnRaXz0eqBAZ!G;DCt26hk*Fhi zplngyVK>42-C2#c4`Lu-w?ljr#-o}Y(SbM1Q4{%I2~)wyB7bjeCIP|}0>>q*rm0R7 z1Oh>n1%uiBuQu~3x}`+AsRBh!JZIfcHgcj-QasYupE$gs@OVh!e=_VqcK*$I@cB{q zgMop8J+e|--XsnaB|5o?4JO1tol_5`qg!JN8eF#6*?b`}^^&r?tMWcxi8%y1L`N&pKBB zoD?{uSac)|$V>Bo_*ytLH!&bOmNugn!2cPJHC|BAXdANmp8;(6AmBQg1mb8Q{%ISc zg@exz!|eKdo2Z9Qf#_U!N1284r!7ks4nYl*Btq)X5!*%wLiVkgB=(=NNxp|eN+afY z_>Z$QK@b$QedQZO_9spU5yPVivKw=u{PEYNjBxN1r{{2de}{HBcy?f{hLGo2fA+#e z9S)&%dmrBRZ`=TfUof3SqypdXK`Q)CK&B| z(94+V^*N7Wb#+yfjaXT=1MklzkMSOg`M8Z}de!4PQL=?1&3v=yw{Cxp?Y8um(Pt$} zcf=~wU(Vg#-66N>D4|To)Xib4@@+METJGsn;_^Cp&ceiW@HJc)|5{p22-oaw?$0{nVc8mY3@i z_nZ7vem@lB81T*?$$1;qI8Zo$6+anzTKM%VSKl-D?Jk5A0i!^(+ORh#^U!n=o6V8j zQZH6kR?I>j-_CH(h=)%?e9rg4`_eQC-{!SZAhMj_Qxv+WA$9~@gMzM{wWlDL+nKi82mJ3D# z4aw!DNll@Jd!Mv0i>Q91K{Do!bUI&Md=-S5#G1qq$&X}wVP7l5JLLg!Da0okvVJqD zzl6}?2WteTCgy~4j)ee)`}l$w_K0^ayx$*dJZ-U&9ObGaE#NE4e#z1V3=lo38qZIP zJjDgYhK_n|7%_i`xA!%!Q!j7ZY!Wap4&zeekFa?yR;=O=TTd_c6zs1Bo zhBbn_kWX4jXL!&A5v|`Dz#hSjFv=Ati4C6*ui8FlnKfSUI=1ivfecJ1 z`Z+QvSso|t+vsC*=@$A?G*1wv8XCl@*2QMT`e{qpV4|{24)_gg5{ol@q{l@BJ%{A? zH$Y=0DajC`mD3>g?`4NQ0!^h|1-6nXyx7{Q!G<|xN_k6|)|ClPHsq}&l`+g#ANUbo zP>MPAsm0l&!6&KE&G>r@(k!&C_T?((V8fG$8*$)8R#TK37j+=>PA&2zLg1cI0gc0! z#*#Q`tzx17ceDQQX8nI-S?im;r>CbGo3R&bsg7E%t~}{JTZOTNKQno(pr1c}B&!Kn zPxqt*+_`LZsqvH2?X`i_!cU($B+sw>tWxKx#H@e$qr6(q)?8}98bHJ_jP?l6o8K+(XeY~e+n4MSq5*%Q8M~)8rVBG-~h22DH?$Xi=y$)|OStd=t?Be2x zqvK;b9iIInH+c{f3k&UDOym^`=&R?O#~dvrJ6vm!7RPw%g`I-|CimN`Q&O8=bRba> z2hTU9HvuS@fl=P&zVUo6M>Und)?H+`a+H>H0SFj2{fX>uJv49J-cv$=WVz1%5( zE70bcifU$C%g~VQbW6+g1bHu5Wan!;?Vzq-w&Q#guhT+H&jd-+^L^r0lAZF^*xJa2 zb-+P#SgnpcK~W=3sLqaXY4sv1VN`~V4}gh=Yh zp#V|CX93Fr9Vrc-x04OOfM$U(*d|#q>vhm*P@WkOqwI8^_Uib-M^@u=faxwm__0Tvk%KfW#;kSx)}rq@0M9HkPZGI9ZPswbXDfgJux zG^LT)v~XD+`8@nO2s4vCDO_&)fgZ=RB?PkF15#zJ()I5~0z4!>$+S~9SRajgz)4co2QoZ(-H1C5^sfTa`+^+kcVf>V9+9?2F> zQ1&^QC2S>mvuu*5u*S%KXmAsOBy&n$DNmy{Wksq8g;wI>9&ZF|dwhKP6b5EHsQbJBw-p{L6mu$T z77xwvx?v{cw-0CS(k(Mpj*N_S`*B{DHJlUOlO^E3wax3kWvBBYQ~eh8B!{T~i%ri5 z?z*$OF9n|aDvSAcna)z3NWkuBp#0h$tOewkF+h+ff$KY{9FPR-=NA9$08WIJZ2Tzt zg|ym*ACEF{`sUw?IgREuDhA$eW4g`zdjdRDCxw9ZYTq-r(MZoBM2Y4JA}g1rAA0jy zHFIgQp0oZu6izMhu__f-&FbnBr)**uL+XWAgBcW?b!QZ$*dBkui~lvMlg>YBsj8|P zzFZG{L+QN&zct?sLO~$5aBRBdpY@o=-m7)rwl&r~pIpY#3(9 z7~ORQ0v`eO0xw|H%D;`;n&meZ%}W6+&LBb36=re41rb;am~xZx+={R{D~dU7kC)`j z2b>DdUdK^+&t)Yh5`W~~w7qJl3RunvJn{^5Gfg?mA5|mYw;2p^T0)N7P!9}9J32a2 zn3${3h?D^gP5)ya%z|t}w7HT13sQqm3`Qpd|AKMAvDSszK43$pzib~e#4P4iVcDqs zo|q$>l>xMMX*V(9m_O}s`$X)f|K52JOO#lMYv})gJ8ezINxn*lVD@gJetAe zx;Ts_BM9&&7SCbc1PW&=`3sc7XJM&NV~xFj*7s2r@L(RgGD7J=oGY&e1u9?$m_CNs zEoOh{C2GNDbVRXq=uB>FySbzc^^9XwbMu+FJMsFa*mZ|F*eqS|Su;|*o-SK=MjB?r z0Ax%61ckv@z;Asjguzz;{tgUT+rgS)upHJ5gXOZ%KU(rH!#42pwrS?tSImik4FF(f97K?f!a{jTANGM5xz^oT` zBqh%X-*Pko<;BnTJ-x!8N6uFOH@g`rU|ENt)_wPyy!4E9T&(6rCk7}rTH-DlmZt&S z>v7flw%%ITbz#>%u=5v}qh3aBHsbfvRo}+5tE&$@mk1m^e7{0De@ivF-r*y(iPVCS1E&)kcYQ)y8qQ zJWBV}nfazFb#y))3dMfq2e~Egr(@|;XaQYa5u^lHCOh9KAvN;>NdecX^TUw>CQs!l z5-vK)$UKHlC+3x+#mQz6YpgXBSQp%O${LPDf^q-C9R|Aq>L|?1d)*a9k@|yyg|Vop zsGzB7u0+kk!eRw^28M!4qZq-IDvdrC0l|-mO0}XBSoP}a0iXl}PXM!0>9K3LB$w)I z`l;zHXM93}5GZF|zOinM!auv9pvU|83H6cvBIcPxOh;J3g|y~rzRp=I>sXG>n|03F zv_NWq-W|a{k)sO-0+Ng9y1}p@J6qelOo;>Kb$hE(X%$4rE+%w zrBzQoggm~Ez|5oHnW|!Cr$0UTp$BsBn%~U>sB*DW8Kmeu^~^o!3+S`@i>3FoWgc|; z15qp`pymugeb;DR&y8TCO6wS-^BB5m-wz@IF=%@Sps5-2o+a=Qb=5P~x}ynPP(eX~ zhGBUw>vrFSC1`;XqL~9^?9UL&IGcvbKoVB6Y2PHTOZkI!0*E@hl$%F(MeAk6+oT~bETrs zx(?p*nzacQ6^p)h+pQoj@X2<8bQ`5@aRW`<=9$J9Rh@cbp38ck{k za)hr|P_7NFb9R6C({UZ`>D&2|U{ba*RiRdyVtG%`j4Rv1)M*+5<|CoKpqE489&0@$u@ z)gNR{Sqiq%$K}se`G)gO$If)FW$xYyiG2TbNA@F+6PWY$@a}otXaFJMl{#ijlkV2> zV54+d6My=ZAZQ>V@j;es6dD^dsic6gcJQN>a;4Bx|2L@Cd%u0=8vs=1+OPoJLK=gh ztSgXFmt*uqS z>$@A*oO7nrG#?=%WbC3|C&><03ras*8PNHRcv%sZJ9EM_+GbN_eb!A2x0;+}kZK_OtScx8e)PFtN&w*j{ zd*BcLHv^xu^3S$m98UbGqBfvPcyFD@I>)V!-`Vv@y}W{hamz@l(wV&zcqoUp&~tw-gI`mraue8d6i*ly00^qzm=DJNkhW;|ya0GNW7B>a81IIe z`t4AB9kV|DwB2sP>&UsgcEGQ-Ua#Z@)@o$)2kVZQ6mPv}ntq!%XPmm`PvmE?KTn#; zkJdH#O;y^&)tmAoJt|WUQ~gcuvYW%mTf;8cb6~4Kw@wDO;I1WCGH%f_tgTStlv)aj zNp_rGj&ODDg6tM3b0+qX&X4mC7|^$FBZi676Q(Zs1!;I|SvCa`UPvS0`YhnH#h+6z z#B17+_qD&xt*nG4ZZO0e-q!<%07XmHVgP6|Cm>sjc+3HC{C9%c0sAryy?QIo4rTYf z*N9=JVT2-wD(}*r7&HUSTsT(!fqsdQj6=eXD3Q`&`5=s~`j6c*VG5SL^}2i~3XN=L zOBWbDzAEo?h6>iOMuepjVt{b*p_5YU*es^m3|>sTQZ3A8+}yDMGJ>LIIx@iUN>g%# zx^xR^4(4|zfDh%sO{fm;2m_dWg2SWj5tB&L2`9#-+l}3w;&d_yJ- zyzT`0d$dRlgxv_g+Q%&<_$So=Wiip=^CLO9LV-B&D+$JR`h|Rr0SwueYw_5b=4neU z!J%+lNoz`kfdBGauXU9i$Z@W9SJ(2)1tK!#x}{kt=i@AZT-0aU2PT~}!XFQVgXAX9 zQDBpXp=s)ch_B2Ru@JzmzEbX2SdxAhG`)EOpsQp9wIP`X#QBu=hlnQln<|iOVO|{Jj)g2>K<@mflbUU9 z)YzXgEDdl&*)3@2#?rE>T1oH~X#V%p?Qk}`TngjOnz(aw(p>99X(Bj$z>+U|C)sL8 zYg%GhK;BM*UkqD5;j^PvC)V~@{Fj;R8Ud3ANdWTIYNr&~W* zVU>YumsO>6D{pV_mF=mSnVomu55{Jkq z!A~g8eS6u9KhFxQ0ajY{6JG(!iak4$usEu~nd8FzUY-PJQ$&;R}8d~uUuNfgq2=GLPEmFxi#6(0Pbpe!cTL) zgGB#(QlcE>h2WEj-MX8Q?lgz_E}4&Vm2GEJI8!`MF-#W zJqmaP7I8qRqTGmx+ioinxk9+d!+@U%Q;YMni3t8h(ZGnVUv&FJL=y4Q75pbCzvvWv z5-t=D)Y2QXHjnk_%eAq$UIyPCGldea*i)6g*`Y5RH&f?BjyJ2*ov=U(l3lE0NSLc) zXo40xQ?GyJk$?|@1AHG7BO)3k?cmZ9Ak*R3-}hdxWQ=<(c4>!x28-f3P8)G**w=S| z%xhR^2z%=_T9oCF22!B@w+;m`)NmK>prC&AOh-$YCv8gRR(FXezgM2;FU`3qPWqi=#qT}{>i`G+JHPaAy1{3xt%(dHKX0OU6vf-WhGJl3-mwZ*nM;hL6C8RN ze|JJ{`T&OtpY%Mt0Tn(!LTOq9s8Iak2B{Yi@K#^K!6y-j&wmG)vjx8=Rnd;-i+MtL z`_K`WDd(%l!d_#V@FW{oqriy7{&yoX;M#St*>|>^iZNSI>)1Z2)11wVa9vU9sJCmQ z*ZEP`EK-H5K2Lax-Sh$S^xwH5nCsS5L&0?tT09;As4oN#Z0&{;6A0f}Zo1QhP~tR+ zYnOM=GR6+wefkUwQgUS(kb$m<{&!uWk3O&p@dz-PO00IljdoqeAx!j3dzm)58cpzm zQSb}eu8EWB5&{smG?RUQSGm$d{(Ip>AM@G2ArXtOq(`A1i`Bk*wvAq`mzVn*erIE2 zCynwz5ZeVMo~fooNShYkvR2BZ0{GX;DfKrw6Z{V@Aw!xNz>$kbh72$qX*d}lS#l9l zvGVL%rXeawd3x!1F7Lv(%u;1!`y;YS2PQ}%l=LW@2sCLVk0uzJiu{We!~|+_Oo+c~ zasLKTJu2edaW;)P`ZAKfYiF3R(?+^J&c92#ju=8Q|t3 zK~lu|`57v(1L1u%NNE`KJmhGCJPs_iu$-cBXru-}^rv>qnNI)v{5j;8ZAKo$ zlNZU9VVHo+xD>|u`4jdv6EEnX;2}~PYJApXGzhiy+B(g`_n*kC@i2fP)3?(AP-40{ zJGC9a8T`i6x2ghd8k!uM+a8~4TJV!M@k%2P%=${*FVi3j9Wc0464%)1S+76@9Rw4! z6{h*O=0Cu75_kFrY*fD-!?_z8WQNp|^;a(yTAB9@IgHE=6TZf7g2S-Uhy8SS z`VFARBEPWX+q3^%GQDMeupkv&%>W!!hOx0D$(BNA?($e*Mpn|Cg3$w#6n^evn1{${ zf?^Iuz(iWaz-%*+b$0M^diVtCdSZ7QxFg1!F zB(kVV^i5TtzK$QRnlO=V^d+&JOa*A9lvqqCDm=;eo-t`yk&#*I-Vb1=?-Nc3-vJ1I zvqHN!dk53m%AOokfO0#r30bwHpNi?g&q2uHAnXx5%1gbouppNPHP5=B>197n*Y@~n zo7Q~H%_+NP`TUG4)iR&qt5fm~-+OP0+ao;*ey{420f(t=WCqFCZ^Yb%(5-I&Q^2aKy2BS`l0n*YXdp(l`I8f>wR`kF@ zTa|v|YnmD?eEn_^10uB#WMpJBrr+!l_KLpb4b?RJKS+0K)8h`P;Q^+H?Lh6xH%nm`hl7Hni;Qalw{ zz{rkmYja^ETkU#c%DVeieKqS2hSUfcj-dR!wTog8zVq7x0erm9gJr;D)u**ON8fcg~>6gN2c1L5C^jCvaw*wR`DdWOP_! zQzSq}gUIh6!JI5C*Y>z*`})n}kl0a~{T)eK<{ew7-m~1iW6d(@cj;c1eIdswa*Mew zByHxK&$QlfMvyMM3+GoVU_cb-Z{cq*=B799uBq}I{g#o0H$M~1>g9@f?0jw(y%q)P zeW)ibgZu8yYO9sorHx^f{yS12+<(!pE4@jVQ-yyKuZZwgU8?j+Ft&)u@QNR>%>eL? zE9^{Nx^j!vd(EL%&O>+ItS`Tg(R$I&35_z{Ek?ki%TMC8bKVEcculzYJ#3wm5tuQA zP6V7692Cn9TPrw^lXG<>8OeZhW=oV1CUkwbIEPR9i!taSKO4=0 zkH2W#1AX2%FDTcOQAeS0z5E(teVu1rRMx(t@4N*$_x_*AAOj=RBU%6P!;j7yuZ61{U?7*Wa7Q*DcY~1mL?G>4rp^37;|^@^z^h5Og75QCrpjuEWorX& zh0oWsq%3sw>tj*ae3H)2^&^^czb21Uod;T9e~2;h!2DHRoHhi{G_2R;BYHdCOrfk~zK~Y#CK`O;y`G+4r6N=g!1pXAdzrgNWN zLB9SJD#jPxXWFgfWMqc*uY8J)G7gQpahS5!2nO>^y4`P$9$iRx?V|GqFSiAwQbP98 zAXKvfoc`uBZ%?YqEcO?#0@X!<5SBRH%NUr>QFnmhvoe7*B!xv!+kd_z}tIJ?C>QW3Dg7#L$NVo`xU9Y#i;NzHD92|Y%|pb{ZCv@~#<8<%N7X_itVzL3U<8p5&f3RA!k3}6iG z?bI8IQx}&;4oeA5@;19Ra;!}axPLPb33<6Yb?Wxs_2w_q*M%{|chADG{L{xgmCNAA zt0x3vzd$nSC14&?L& z5taykQUu9)wF3}bMfh$%FS9FBs9ugl-Ev+$x{o(0JF%m{oLl6vnzs&WUs5`!_WExQ znBa6{IEU`G9Y@?n+{tUi;KyRq5K8CM7WV~ge*ihUeWpNG0;+-AlK5Lu04lPTO>ct@ zgs?}L^%7F1G}={LE0m7BK5f}3Q)%WdCj}Ps9S)eUi+z#6Xp#0_3&F2-Z}uxG2`%fw zLuU2s!}I3G4L|xHnJB_#3VOng*3&ftL#NtJ`)Wt2QMZ@trrCO+R5kxJbq{4_)FE1{ zxtiEmc()uL_Z}(nFT9_V2;8s$$k}rvij*c)pOpq%q7a3Y^cvEF0o!=7Bv@SXklQ0A z21Aqik3NQ@GHiW^i2Yl{E0Mc(HwoQZu zAp@^NJ1x_icu*>XqbPLu7Qm$E11n=u{wd+-6(0xoNP79#?zSNM3L*z#A0#cAyMT%W zF#-o>5{L5Zpo%w?2vABR7#HO65CFSo6Z$aHIMQC(wccr3B-7~V=LNveW87cmkAxf_ zguQfG+*2lh@}dJR|7pG_B+Q6uAP)#Lq%&N)rH#|9kenj|)L zNYbbzp70IM0b&rwL=F6sD`_S^74{C;cyqt$3Woq3+RK-}^e1@Y^D016?d2kF!=tfo zTw96IL2|Z9DdkqZ5;2^M(}axOTQ7zEdElR#E2!#>(4 z1JZmk0EqyT&P<`92{uUc9kKr=&{hCm?H9=hD-ysccit%&-A2!`GG<*PEC#-@urYty zb?VQ!ddsJTC5N) zz1MrW!u0b+>}BDmZ}bajH+ne%l-JaW63A-#;$;9;(82-6kDlzm*hm;j3FAy*)IKbg z9kynLahH&+8Eb)E`}=Q|7J6G%7l>h(P(O!?`P0c?1ta`x7XQLs$;DQw04h}ef+$Dg!!Uu1$Gc>DPV3`-@C);R)YFT;9k zrDxx8zIw-#%Vk3++UfVsg7?Ai(u)|ZAOz@k|Fw`K)_}t5pNvMB1M|Y!e3=Pk*S7D% zWO=_)?$Dd3BHGn;oIUs3BI#wDS`g-Gw;jyMq5pm3csOskz1r3c*FHxi_5+}9 z)M8{c+OvrRmHxhn35khXWu~YaI94=Qp+(|Zb3ZUex0_Zc9Y?C^?$f7yJXEn3A%Li^ zflr9hqZ`5n2p!0HELEV*zCQ2|5tGz9gfakdy1O_yQeh8#+y^GDd?7`68rf;--^fE= zTS4JD<2)OExGwBCc=IcL@c>Cv^cfq{W-nX(K` z1*a3x(jj^+4GbR0tpeBw#&QaSj|`1w>KnmE6nwnALoH3RI(25ur$#qT`6TvK+_AP% z*AinR_0j8>P^DL#lXL?JrS+Z2dq4%RVx7Q{ewnCqT`Q<>Ps?FhD{ldt8Yo=)? zP~{A+9_Z@&(Et4=KdglD~fnvuLiMz|-G_g$c14jr>MV=?l2TtAjC zRTD1%u#7j%cSyC+H%Ob~{8kIVL~0)w2)_f;Nb1$+A#z4|bERvI09uT)I8F`sw6YP} zJum6i;iA1MyCk$LwMPMIz?=&##v-LvA3LG^*T{nee5yow`eCF8+&8fj8K|A6OCGY~ z{8B%$U_Yoa*`0O{%;@)ioG<~!$}&$z-{Xg|**d`>*OR z0$oy>w$==Jd35QGmvw3Z-ANgN}CR0!-Qr!p>%o~erUvm0+P!h<<}3PO&wus z1jj_~1+1j!;v*$AF1!*e7H*7wlB6{+7=CKX zW&ZgX&|l;}M_-kM(Y{QAJQ(Gq06gV(LS>X}LFVE9C7ZBP;_?-TA*`RIt1exZS^i@#yy1M@0;oOy z&y~D08~BN}??8niymK_12#K>!lTT8EIeKp2QB{~xY!UT^LX2;b+Ez+b^?o1W>^8hg?=-A z-V#{lW6Ql%jDKbk8z}}b;L4bhMgT_HN@GpZ4FYtFk^IdJs}s5DTzF+2$Y_vCa?&<4 zrY2lzF`f58oR1ZNqx~PIN($di#c)uUJ0QX%*eT&6u5mFTQ4yH+9J0^=>kMl}h*80~ zNAdf*G3F@(P_xtpTpjr5w}cCj!OvD5ef{JD|HQ_B2tMD-I`rZ?)gB)Av%@eZ6@7&( z4BmggKwiC?!4%BEcojAcy;30*g@UJefX#^%qGIZZ5*6pkeDMx=;BP4{EJcDzX(KTI z4H>IT_@9NLJ^xu4`g}1-^LZi?8ieh+v_c|``$qU)Neerxxxwh0Lze&gfDeWd5ScZRVL}YD&9^+@r^vzug>+);2ZG$d5#_)69W`BgldMF90<_>hpkI>{qJhGow=s zW03z#5rrma*kKijmwLI8YJ)~oYa)364|`u3Rb{lbD+q#YgGQtTq)WO(q?8U3*a!;J zY(lz2Q3L@IknRTQO?M;R-Q5UChj+c`Irn^dzwiHb;9v~IcdvKFTysA2na{M0$8BFA zz?UM8{Kgam81;aU{!@;XCY$XciKHIkIP4Bl5UmiwfU|$0V$EGpB2Yx-sMs@-~CzE!v zMJ#-Ik8mutK8=5bEXEa~qwEXi@^u^g02Eisdf~6Afi9!-yN>M|B@$tu99<_rL}^tV zf8C#2{Dc=fT`VdbXa2R|cf)>O@Ag!V?Y5JVLCkDOjO zjwh2y{)L^%MBJcm1W`;64^E!`4+^>_1wKxswM8b8qA!C2zxzsgpFU#G)e1Oh>)-F{3i43 ziCfo{`z)$7%O*SU2Z>dN?hzTW5Wiq(WN0)^FQcs!;Wx`}lbEhat-uDOEvi091re`N zE&r1!>)yHYe{on-QO+Y$LF5dN+o|n%u~|6h@-~-fkxWc9yZ(oLlvd|bRy;a|TL!lh zx^zEpR5qe=sUuvF;>JNbs4}*D&K;V^ zRP)I7*vjaB!gTvdMf(>wuNl(M_qnsOvl8s5rYbYa?S*sTHBH3!DiXkBn z(4ib38V#luL@Xs|9FVN2z)y^yt|Q|?>0t&9>#vQGo=CIe&S@WU#&VFM@A=Gb*ctH! z^P%4w59;<-=}hX(<+6kAg8WFYvNVkt7uRM2C)ZEO>NcYP+wy9S(R}@=C=;n~xm`Fa4J&K??DPtnRnU^}6_fWu`$ooMQlJNzkdu5pO#Y$;ef z{^^Qjao#Eos@*e@NUrrEySNj30F9g9130=wP0c23@_K8%fa!#LzyyQkN9yjF)7=(J zY*W)=)fsa3`xT4A>*a|TcY_z5OUb?{3f3Oq9=Gn~o4?4NoP`~(%4)x&6t$Wab0NaY zT-;uTF@HYdM=2fGbej9fJM|b4{3~`^F^FC|q6_bBV5}y*i>WWughT^oULGVCasd7! zpj^NfvA-I8hqcuCT;t5aB;N35w=#JpE`a18ZZ7i^BE!r}`oe=_X)~p68h6MmVB96% zlQd;*K?&#qPB;M)pcLsksIcr)n@4Sd0&%V)0Qa3?*^Lzv*miU|3``wOic6DPN3a#r z#X0)jAQSqcz<=;YQSdVR*n7VgLSN@w$6GO9K%!P}Oo z>JEKCu0J*pB3XoukxJ(R>GRKtLWC9ujYVOTG;5F{YiMPIJfS~THUIz}=`W0Cb)a3Z zKB6mSVv(t}-*T>4`c}Edz9KugXn2MR?@ZRSEp5c1rWhgKZSDqzgy8d{Ukm+(+OZ89* z<6LUJ-93_)HPN9E$8WRTN9HgsA?*BXwe4^YgMy!ePw44gVOL$_ck`690zW_0K*W7k zTjWFN+toXWk!k3YPb`Be_AUL@WkjVzK2b8mKvdkb@-2={y!GmpYpsE7*qJ0Nc) zT5+K>Zz76?b%DW}o)>m{Zj;xWohP3z(*43dHTAl2Fg2>|vm3nu-Gh`EC*S)MTqZzh z^(k`#N-?S$rI|873AV6^xY#bSEjZpKpVtee(U8L>AwVlIy5CF$Qhd5DgS_-2hg|p7 z=0qi$kH%wdRUmVDTnDkBp=w!B7+WD%H}-f)hf(pc(rJM3zK*qTSRO0o?B}3R@Enzgrh^z90b}h>3#mmO_)D|Gdo%m0mQGg=g8(^u7lU%b41V0$ro(p@drq zm~EoVSm?g^^S`x^wXygFODUBZ|3GVyv)Di@>j=Wa{!m=t<%YtxmeE(p=7y2fG?8~F z(N~OWW*H`pm7GAHwA*{%7|Ge|6V+=1{N0>67^T=Rh6V_jJn`(Y#{#6?$jJc!zZ*s5 zpxqo?_3DtKFSU*K8qXold)~eoWa6R2!9aS9W~5CPDyqlr+;@3}5!viDPiI-;+yG)9Zpz}~yT z`?tLMUn`av-W1?wki?FpFi&d5j->q{xnj$R#01BT7f>K)t01Txqfp7=`>}Wiw;cWr z0DSk9wA{oGGSJwlx`N-Taq^`5jR5VhHf{q{-uh zdzcZm*0)D4jl8h)% zo`_M_WHDaI>BUSOe~MgjRe@3#t`C;17>#t>=Ef^D=U^4UKS4lL=ILsyoImCB|BlPE zAl6k4NFQ7v)QAN!Y>Tc;sBVT*r5YY!g_NnWz9u6+6W1WpA!b=6Y>UqIqfw2rH5bP{Vn^Si8ib?)=%Rys;N}61+bNln?NP4|cm6>*g0KL~gT7P72or!?K z-QT!kSFBm979Sp<4~Yx>xKs0r2njupv&`X7jVE|tZ;q>CeWPoNl>SBu%LmUi%eJP& zw7e^4&js=0g}J%ic9a1fHZGD27us@AoacP%Ep^$=<-<0b`rp0jSKJIZ7?gAw_fBUU z@$XdxVBimJ0)k9%z0A+>d*yUM&7$(QM#y&cn zgS}Sj)cF9taYwiC1~F#YcjLDrC@A5a4d2lrcfG@pJ{|8lMV0g!w?#S*L8EF%0r1k- z(SHR&JNMUE=3@SsF-WMl-l$1Lwkpaw>vXsw(jNfYi5y*|-kSB-4WO{kKVYKL4YVyFXPHRB}77CjP19&e1CNJ3ZWz zO-)_4@wFRfW2}K7O-AOy`0ph}>yabKki8i4(&8Z|dh(!w-o(a9*_ADev<4^6j1i8Y zyZEI3Ey}uSTe_m*<9AkFMTvA*fk$?mlrO(XiC8zi^4R;_o0Mpj=#Jp#qo;nGipSnl zaKPa8;0hw=8V$3UivWBWF~;iiY*U`0+JI@+S!@5P*)k~bRp0>G*y>lhfV*`a6uaxGvyth~4r;Dx>D`K9Ww&#= zGuR(HLv|mr>w`lrCGn(=|8}7uP9F6%2M=Hv#&YVWoClb3Cw<#xcP^+XS4GZTxJ^qD1frs`ms5VaN3xkOMo(jv_9|sIP?#p^dMbHgxibMGv^sM z32#5auKB_#K7)F7JlULgLa?o-wHsu7JYnyW9Ec$wpz6diO@M;Ftf9+>O2?@FWbF6P zPC^nJq2}%ip>*^WVb~o8Whle?Fu|mG)Eqg$8n#dAN@?`I1|1~pfZh8w4OmB260w?r=c&pVR)+rv~-Q%r;AaTibA#{&ILf4sVy|j%~|E>;fil2RQkIL$hMgrF9Kr zfd-T`EEUVm!n8nj`qd?Kxb@%<5A|=s41{e^IG+0r0=}vT)%1aMoQxfHU@?crSbB?8 zI&|i*oX;_13?1!(Yc&Zspxgs1 zQ0eZ2S@Gsbl?dp`_{`m(D{gN!kQ@3qX!lcAb{%v9?fS|F8#&JW^RB4L}f}_NWyNx^qb%8~(VX+lET} z{tc8D@CA4gDb}C59ViESrAClk-2>wT?()&|%pwbuQ$pR6x-~l&cHp%;5ot#sTn7cr zYg~9S(eAv6&T>VzD$AEuB&<ub>XbpZETI{Q^Oq9aYraF=C(uos-HRhVPN^^N+V7)LI@X*96Fe>FOd!)X)IS zi+%xjYF3gahU*^#RQ`_x6o+10h(>1Id;b>mzQotI3`Sqjtw zdJUkI`L!wsG`c*XSU{-`KC{~77_O4wl`RiDP| z)P@Bhhy0V1pBy(h`uG+m$N%t2mdio06G#dJGD)1O9H1)`WG9%@LUKX>s)K~RFY%k= z+O^-mTiqI0{~_JE6?-#+pt<~LhV#~=TW5jhs0)OTLo!0l_R1x@&_AOL`ufQxQQa>^ z8_Av~DP@*wCOMLL6lIpAN5$xuRF?0HMej|2#@LvD>+E~yux_>RVC2Ax?a^AQsX~0D zhw$B)?eU84ycyLU4}iYtf561AS5j7f8YFVc- zi8_TwT#8Ok{E$)>(4u<^E`$zac$K#Hc*ENh-oDYPT$+ zgqGOZRl>u+6wO(!51OcHR|U{2RPoTT$e@A9&2+lGjRK&YygKf0xdl;Pyc~Koul_vP zyZ-nsD{*}H!@{S@-mgPB&-IcT1D`Sw(V{8{-V#14ECjaEW(EJ@^F!FZfZO71Zp5({ z;t$bDgQ8eo7JkmHwy0u>bvrv802yIfMPcqAavqoE9)8kk!a&*a_1^_2odMfUive5;m_DjS&E2bIC6L(j=?=PiVCV71ix8tb9)fzBUpQspSpUHLrDlB;l21Nzi z(z10EQ9N5Bjcj;d*b5!gG+q0(pMVW6k$%7ORhh#EgEMqmg->ACc}m0t9&f zS-XeG$-;Zj{T|krH`kCwT6Mdmr57$uFXW>tD)nU>g-YeKRoL5{EY8c+^gRcLFv(Vf9cBF2;8Zz*3{bSGyD5|`LFl3%B=Nl zx0)F^{I^y%@~S1dxr=QtM}NY=t!0kG^eL1KWVvu-zCC**9UF$qN=rzaZ0;7T`- zjwRHC7)u{MerHsmD)q_9V&2-M4IQf_j81ZWGb4nR;8blKHOAgJw1l9J(7b^DQ>X~4 zo=6h4)gG(ILqZxkH>dk#0>;+>zd*HbnPsqAnz!Qc#S4}6cgY9?gC*O6Cc(AHA3^PU zCGj@Po#{XrU;tF&TSRZu;i7imW$|zV6)LFR6ufNZsRP}#AfZv_duL-TY|kb{U2z}P zhi1P`LukS&ZbA|-j78~Z1Chc6EcKPso7!vXWS`^V3}b9!ot4U1acM=mzMGhn(E~Gp zC>N`c9&3+FK+2*qosjQpX!&0Hgf&FV1R zGE~`16qH6L%fglHK`3LQeA4|uaQ!>Bm7e5U2{)7R%rJ`{Q^Mskh1DL6WQO^nemygL zPmY8XxBDTP9KT*%^g23L0FjkQ|Cq9%F1Lms5}j}B)p>uIH%kgNzR=54Oy3jVz`CyA z$DF5UGrCE1@y-!ir53So(`Q_Eo;Y6|5HC}GP+`~IUXI>LGKzaDidw4I%viA;#iX(> z8>mrX%K*p_G}5g-K)&xR}#t8Xtkqhye%FCK4LYlJ#1j<0?(wEHP;k!}k9J?j=_8ex;El-b` zYUcy4(i^6w*%8j)kKd1PPX+h?s^D=#7XM5~uH-MM^hYsLksuO*_~&wsr+hR0s@O4_2`;>tO z@bnwOH*MJaobA7#tE)#v3pi|zbAMH+48um06Pz`-XMG589J}K<)9Ow(p&ZW*WANy> z53i~W{H-`{)A>VfhPEg$7U9La$3jn^k1z_QN@dB9*a4AZXq4du;TYS`v__#=+I5tY zgq=GE$t<@(fVPBLR-o_kR5e{>ML_GBSKGdX!^(&P6N zF1;pVHH~|hY_>w;c1@%LkM9I6;FubRG7z-28V0+(*V3HMXOZbbk&SjBbdzoAKN)lz zLY1u&Qm)3y`_8*cM;?t%6J8UuN%V7+S(vGo|4prD>zexyr;a?Zu@^BX-`}iF`$$nC zTBB5dXFymF{sqP+6dowBj_G_jMwGEb>1MpBDO%8BdimV7&*z061z**(j$x4oiM^nZ z>pH0fo_fXD=2a{*VIaL;uOSq^F5>j@Z5C6AN>gT9fTJqgBAb+v^j03tnoXlgx)DAt zhB+lT!8@u%F@K_>8nbu0ZvRz*{ek1FZHnsCnvSp^U@=LYO_cbvw9M3jg+i= zmkB6HIoRNh%A5IW{aS<_mFPp9i{{Bf6K0HH`h(1V6C+o&?n&PlLc?s95z!_mtkEMX z7vIBUdFRR41ND52wEQHN&=lY^v0pAwrDg{?zg>MRTDos{2;sQqN_e@ax2Ie?4s0i_ zXNr}U_}Qx%*lMY{t(yW~le;rK7pfrELiqXHvE%9C~%*E~7QJ%3~qD4AU8}BQGYxWCVQmt1MdrXE$G1k!FM0 zz;sgNPL1Nr=BCN%YL|Yjf*q2)SC#VSWK>51MfjMGs3=6(F(38zT5qDPpnU6L26H1( z7Z%QgSvsB);OHb;T0ZdOc-)wA&cwW*<9@m|EGxin&2~KCg|ZDAT&b}ylQ6af z6s1Z`(E$E0xFJiBv`etI^vkX z?s@rBE89On0DP5LMOIQEwq!{hOW_p^>p_KD;sD`;S?xMOn#Xng!8a+`VtL3DKNv-7 zzuU|xzFEdm0C)*-w|=->CsB7Tn+_kF&O0MKy~ev(r71++vT#q&A}>OYXJEM6GGFdA z+X)~(%H9b4z=9^Vl9`|Y{(0XP_8rjsz6+_vJxvGx$5+!QFAI~^S!YRk=4s+qO4O>X z+%G3ebZL1uCHw-)+jo8T`@w+BcYXXq!P^pHCF{SARhZaKLcx0lL#cOSPJ_lPH2uEG z9NX#>*C4qp^we)mBea612B?8EF?G}y%(Gv;f#3?qa@BH~7^Ts1vxzPr8+duvo4MYq zCID!|dNmH-+hjtURSn;q62AuNV+Xx~)_ErJgISfx48LFnASp%A*W&y}nhZE4)PVAH z-p0jG4^AlbiyVtTSD;Zg6d|)8rw^1=>O}C3ir;fy>sUvEsPosk>RUiDdZ0{584HNO z94Whg;5pCgcIVNiKOfD)Ru;ncg-Vaozs@1W<_7z2!|=Z@mp6AzkODUg%qQOeem?I! zoOKk$Cbuh^@O7AD<`DVaTT;IaTWOVhutz!hMiLt9Lmaz!==&hM{P(9oTdOM2Pk+=K z%dgvR6^xnBXgiPr+bYmloK6NzDA9d`XXu5=hcvUD0glbS6JCCyr=NSK?WO=b&Ievg z$*@!<1r2upS*ME#G^5jzCqFU=OwLR`hF9gIvxR!E^g%yH*@mHMUo$56haS3u6A@$% z9{Lb^sE<|lX#87r{Vk5&(`;T+-uZx=KvH43DfF#7bqWhes?das)Lu>IckjjCHRL;F zYtAk6`T&JvF6a~x&0YesmgjPTuPtC3lY5=Cf%SJlq2rmKqI*wLYbC;eJn73@UN&O` z4a5CMDp0Tj8$dYw0l%%l)`Kl;rt5I4WIPf1bnsc|WujoH2~NyZ&~iz?5-I!SEN2L;d#4TD@OQ(>C_^%R%Y-*t#EN0{mjgMl8)Cxz{WzgdVKhz~2D>U< zhei9AIVEYWZR*vo5;)~q!?xm4!k4@{0GP(&+=a;sI9AW~+(}@i>sjw7=X+h}sz!Ys zcFiX)Ci@XhW;va|BcVn4z1bi31-xx|pjy6dtb(A1e~fHBVa!;D%-`9?{tvSo2rG+P zzcHt-`Ax<5KiY}4Jz#y1URn9(~Tb@OtnUI@E3@> ziQ*!o(kP{%iEu8v+F;PFB!C!V`M{DvYnr17&9J7Xi@-b2{r3z?v}>RW0CmoWrHSm| zgMqF}UP<|2w$WWPowya+_tF0hD4FY|sxk>q{DUEj7Ze>nX*I4xkKU6!V1i*8=Js^d zTbSs1fdE+2?q$}kX+#k!8|BPoxeX*bDiuf?iA~bg2z1E%fFed`3kZjuLPJ9Ex;~Jb z4-MzQO-6EGashX#y{9M07G7?ht)9a%;kXzqEme_Iko8tTsW8Bg7m)xYn(>0Ftb9K? zS>}Xyrt;{znNM7<@5ISrDDa1`S3bNcGBIgNoyr3I$B6M#%e<1ew~Gr{?tOSHO=cJR zVSGsa$HfTL&H*I&2TLt=i^I$^ZkHsWtk*NXhMkVS?V4n>UG4SyaGNCU94Jcj$Uyq) z#l=sVc5(M0_Hx-1{Z^w#(%=^C87%B4OMt`L5%ruN3qDGrbL;2ONw;p3~n!~Tg! zgUMGNvrr6_Nrm0b9UwJtB~49Y(B5H^m0!hrav1P#A`thvP1P2qXC0AWq{TF`4c-j^ z>DA&NhrZN-zJqQnIbv_ykCgA!qHet6vzonz7*mmRV{%aL35!=|imsks7j#H3#fa-f-t3k)8`xs#3K%TKGsdgpOpR)#PH3X>^zM?k zJ6SZgrluLtgPpuwmsyJvag-8gq8A{rB21$r-OO(O7+rEjz;+y!JW;^LLC(w5MgmXZ zu>b%huhDm1nX~Ky!ew+USd&zhF7K99)v(mDXb7}><6lDT>d&D~*yK&iY;!lffw-Kws$kBRrx@K_s5C?L2 zW`k=;yu9T3dnF0pNj79xM%ma5Vl3^@GO zmT_z(-*Y*5c)sDwW!Xeh`>O8xm7zwdz<)Pj@0I)j+zslzLb3$;=&(CWbkK8I>$bc- zVb{9!aiFuS?MZxayCFb&>g&4>*h)Z^E2Drr+EN@}jH7|k;2-JijB!JM>&bZT){v^mq- za=R}~G|hY`uMFWq>5c#4BcIYUb&$M=@5LcN@V?girZYZREn$vJ-SGp%v}0WKCV>0R zm3;yS$CN)>Xse+26aHk(dHBdr;Wf-pY+Q}n4ec@?&m+R$&Hua0?a^ybLc@Tze%ME# zZw6orL7C^~HD-ZjpLiJ_V(1rWB>WtcI|CN*R&vZ75q!BEvhU$?DqZ|BCHDjEi`(2$ z6ea={%J`sA{Nuu^nwC-RzwzJ3luI2^UeJ*T?G?F(?N3g}KWj<*aFj<&+pNpCQ;3rX%{t-Rc7<7FSfX5}4CE;@7>i4Tq3Vfn zJ)6L?1#pvBk>oZxaK5|gF07vLX74zg* z-g$y1;8fs|>~`D7tpIm;23yfQb>IWICpsKx`4BU0LPQ|ki77F96UW&OMmWvMp-T(` zo_k*Vc1-sk$CK=~2LgevWcJg}w4&t0u~6h# zDodS7x+DSpb#qE=9z~y&AkTM$S)17NKZ6o6e#?j5`5&{7U@j8A_Wo#CYm@b97T@efMlwe?; z_JNtQoapzzEvU?_RQBu#Yz3P5B1tj_2|Wfy0J-AsE4+Fp;=@yaY$zSgGPTX36>{^W7Q*iq`2FL#E5k}w1PJPk#w%@q}^Zh zva0lSbgeu=@gSTOMV>E(k=d!k%2X)F9fQ5W^t1T~IKD_1|6!(Bpep?_ZsnVnwf<+w zHrCj{YQc4LTq}O=pIu3C^{42meuV13`?#DrqTS*ZkE?vo|*xAFVV$utEhr+!15f> zPB1>Q*ws)(v#5C?HUBj+mlE+B z0EMAINGvNijlRlaN?<hg*o99T( zhhb7r@Aa*Z^YbjrD~T@!RZ6-2GY6KJmYw!I1Z(%7j9*T1W+?cU$@yeuF;epR`^NJ8 zE+#v8fbbHmrT2IGVGY{EEYE?$I0LmeqNyyl+mo8V3WTXA6viHndOu9O1gRa!r31bp zUMeB=Y@DNB;W3jvX23mbDJqP8K0tg(N$0o9>q%H&H@z=-7!xWGLcPA;M2OA$8VCia z)sYB0BV}`qA6A3%inM{{VQu%5rDy7M>*t2&8Se$n$eWF4bf%U&5+|6+L!w~#SW#urwC9@y26!`e~z5!)|64m8M z*ORw4XcizT7i^i0^x2-OjP_ubtgeS(2Fk zenPOvg*Q+;jAB^0s(-Ev3k{8U(oGBwm2^rKjqd=soYL?RXFNmGj-sVDxk(yxk(nmM zfme`BPXNdQflu=p7;aUEj->NFV411-F}iOjPf-shCFxb{cb^#-YJ7@1zR#lBgEX=1 z2Qo!#-{2NsloKJ?6)WECHOPP2q6JEl)Jl5IqPPbYO6JilvvLN4og4jY#0%1 zTIEi8>{}a~g(26PRoQ~NIVb1MT~UT;0J9RcW}k~)@^Mnl9wb+>}yd$R00Uf z4Zy{)I6K@*kEN+JmZ^WhGGPe#WaY5>J+4g8Y&>}LPv7V+DzrS{dIDe_O*{W=OC5p* zV@Oh8nE;Ql%!ws+z~pKw^U?ECyq5+{U|lM8=#87z_-HW>>h3nr(sSI<0*UB<)-LA% z&Ha5<^<5Myom-eU!M3u-Z@&uxLk!aXFHAC}O*54N@TM(O51LD$`m#R102$7(Rgye zKcv*sJ?P~#sm3+##@JKC$fN;?yMFjzjWQ8ErXB#*IK1R`vUj3Kc!XH(qE?pm zp+~(bTkpQYfAks6ya&EL{N?)JqCg*UtqkUe)T_kmd!(w8irEYJDDB%A(KZ zABWII%{>)+X9dg0vAmPVEYI1~wdF5XSKfCsoEC~)Nu?|Mt;X%u2ActpzOFTtKQoa;mdKsXyvhq^f>2U&K8Dz~-_r1Z?u{bXF|`^1(Ak4UzvA+S{U^Wqj70 z5shtGJT!cN0MXR3KiMMpvskR&8RZPLu5w{z)Vo?x;HF$xm}q};5UG%!Q<0O8J|vNI zum*h;lbIrDj1u<z-oGxkBOy>{I$6a@xIPeV|0}v?S$$r?Sgr6MLIWzh#*2x{dW|bcGG##IJijr+E!I&^ zi?Ip*b`~AWCLIs)n}$CUhW5=aLJEM zQuF#xpQ9ZApef{k&=gF;E#YWhmB&X4gSjsS%@{z~x^tUDovj!|T`&*@BP2yY8(P=8 zy+{JoZn{SlP~xZ#>&x@>fjhmVBUJh5FN6~DAn-P(Ym47XY?Emj*RVV0DJ1J^I!GU` z-6sPKCt(5gqScr|xUzzR0@r=R5v<*YldpE!#yf#qT(87_OQFy$*Df)idg|$R7cfb- z?24wHa0_DgiM zEO3QjYSTG@On*BT_+%;wM2`HkCxa~kBqpHwhHd{-I{xKP&l*5OMdKNfT~h`etUu^t zs>!bX;^2@{Zp6ZHR)yc;_7ouZrVawE(R3h<#ZI@#SHSYvPm8AZ<>OG+9pQ))k0V>g z4Ng9t7pfHIqxtn!i?j8SX75IJv_TIh@MkbpmPoIa>>wXFd;{bfuTnm@NagR28N)!h z!h`OHW~~KeoAPIsoT<*xN4-_P@3xoe&hk$Id;ie+E&yv+oKXlJT@E)BL3e^FZe4s_ zA~qelN_+0jaQo#*BPrRsp%K_zNzhxy_W6v%%`q0V6?cIuLpHmY)nBy2wgS?Byc-Xk zGVWnmQEvXCZ=lv9lA80AUU~3|fdLHyG!Cq=+mNbu=Dsja)fE9MZV6MB4g+;w*BL}b zXDoMVdwL+aQOJmG{qeXsK#iLzWUG^>wY^zZD{|Q^6#(jjLFcVNm%I9XzzVnmTfyT%jSh)k0zvDdj83HdFry&4#j!EMv*B0Ev6$P!(D z50lUBClLKBfc&9vX2Wa9=cS}22~iovBG7p-GT2%Jqdz~mjUOZ*cYq&A((mzG+cyLHK}7t9wx4+0$V<%H|lMgGz=;e z!D;x%$nJ9teQJIH5==*MSb-H@_Ok%C6iN@c+pT5 zDByeuXtp3z-tnK9a#L&z=IO8EjU@Qf)Z@_t7`eHs4vZqpYU}LtsHQU_T0ycQH6NU? zP>iG{<$>%01g-&eL|K_AeZVB!%;1Uqf3S5Xk3jaOL|9OTa~C|+t>Ez}O**-Df|9nC zclpta^XFPWPN?kfprr4709u&(u=E@d_k0Hrv*7;^7sPd1cl?SXK5k`9-i4ybH@=FM zUyg5}Ls00ZjE{c^8MozP^zG=!c8(LEvbFQTQZr@Q^sY0JWU!tmM{ngoc5L+ z`_%0Y!M+G@PrP0YBn_MRY#11?ZNY`0to&SyOL_Ts<}e7Hyw)hEDsjaM5<`~*o-&5{ zSABK}01WgMr>P?p2WcK=H_-S>YF7JpGeZFYC*9>Lm;jDgPJ;>K-JL%PeW1fZL2|`# zNP9>l4H4zEIU+%F6^g=*OkP}%DIp`^wuTTl^+wJDz(o=3q*GCL_+F%2hpFv(Hb{jY z7SD8)-PE&2=Dxi%evzxb5BL#QIfcj(3;ya__jCjx4A<}|A z8tWJNVtN!Ki>Att8Lqn2*|rSRo>1oqnMlz!E`sw z=xQ`iixXL)(PTquv`r$g0JCw$_fSf8F-tEh@82}Rx7eeC1`#4L=mssJ0@*-Ql&$^X zrx9gqW(e6q?-~?%1jE~}~^*oYmvdmjl6pM(gmQwYor?&!T z%5T1pu8M$`?mUE(Mr^Mfbg;*%ySP*cV>5 zTaE-e|Jqd87(t|s)Nb4uwRG*0zkd;Z{_5B03P0%a99| z*Jc@m>nz#HcE`;@X`%$Npw8iOiF(=!Q(Xhih!S`Z<4p^Vq+Ear34ek_s8W9=gLmeL zksevY*jsaUsSc$f;)hWWo!oZ_c707R5biTzZ-~bu31M=tpQe2a2XK@tBME@T&wLEo zQA-Uv)GIDZDoyr6TQgo(R^czJrYj`R&m*|3=ZhIk9#cgHcyfLQBn#PQZC;#Xn*E17 zGj`5P1kehJv_UI_t9--&g?n}&s{*h$I*{(UB6ARMscv|(q;Mz`fV4o9D<`p+%fE6g z$7QM=_3O&~TL%2ugWU)t30lAJ|C{x@ z=T*7cpS`wAN1g-{k^IwM{~p-;GK3nqQn)of{?lD$H&CUuHRV=g>y0X%vD6Ma@HS<( zDd*`O(9($tveAHzr<1!w7X)!sL@!?J8_*UfycE36rlIct4_iyaBu*ExI&A&`7P;F7#jKGV1YwRB`3Pd6f ze6RLo&o{I8SMm83cIey!PD;cJ@Wa6|wl{!rs;S98#kH&8Y`B-4`wNH(DgusPT6SK| znaSs6q(&Np*HJvrbj0$nmQ?Q_dR!xhH%DWo4t>J>KZB zjFgo3TE(VIQEXVBgnsXICZXFJoAjT_-sQ1y&@s}QzIZe~xQm@-F(j}c&(qX^bxGvk z-04AVsT6hNk?tMFW%aajwbez?GzYQe!}6X-xfSab4B}9{A(=2YV>^P0p3mJi0YF;f z&hxkEnxd#Lw6vsnXIzPd_vKD;%7EfjA86ej-1%V16*L1@gTte7*{FFeCS@w^X{r*% zXeE9Xy_z9B-5q9|&&lpKVL^jaoK|dLkJF%$qW+^}vL`vJ=xJZjgV~r~=cwyPr?;|l zbJGg+d#tl)$!jg+H4@iwfuBh|UhTpQC{JB9FSxz}r_RiMNB6sX^s|b)DM>o$$egpp zKI0g9rektYhVQ%eYTTeXLtRqR=Y$0`!9>)^Ba@EV>f76&nNC#{7YgIpxjz~87OD8- z0{w!W9vkY{T5T4zf>c9f6Ah7KLV8@BM67_FZM*o<%x`P}u-zaaBwvYWH52FQ@^S%P zsEqfQyOv)7e!Afdg}QaF zGHl56l|E=MQLD9_Y!-JcI!OOZmrd4LeKIPW46&f8)f^m70#MX@Zab>Nb^|u5-KJ&)>jDA@IkpB9&p6}3o zv46AsiT#)|NZ)!AJn-)h6cDq~_J`=#_RaWw0FJr$a@-rby?EatBye{Q{|N?F+9v*G z)4}s}gx7cq>}2S|OSv8n&n?|U?S`1ai-1nddQY@e1-e|z0WA8${T#@m0}Ork`8huoyOff6nQ-ztD^fx)*+^&i1oW3VyBI zNQ6Qipi#y4HLl#Izq}<-*wyR$W@t?L%=kn4cj#Ibp4*GPSu#r!xq|w~Rp~D3duRy6 zv{DieH7-Hr60`#UubBi0WgTb&eU-Vt+EHlB)K>{`SF!!z=YZ9kOdMATz?OkSDBn8j zZTTh+2KgdEID9=)ArUyJtid0~^`XSKYDbt^2$UTHoO>JvI5;_=EaBe~@jY?Mu7RfA zS5ZB*Xn)yWU`L~LNvi|qXpo^}JH#EGz+;_>dRvF zB~zVgg03_mIca!Sm{<+!ehpo0Y-}Jh(YI`K4FC-|wQU7BKf3j>hnaouDvfsX$`Iiq<-rifxR~B%qJi3C>wt@BY6eObrh#*xC$#iyVnya zu1frZCE^IEE2*hTOq5#o+@~loALCH%-RK3VW1!)Dj3-4pXLLRT@EdOmgntBXWHJ@{ z1>ig$+9l}buOw0NOq>ERs-^M{%1B$oK8}ECPll_Jg!C44?#70d8wrJGv}TB3gS~pf zc#UEX7M=$RIJ3lv``FMLmNT(W(G??54cbdU z6qUQn^Q4Zu@Tcqhz;Wo;`;^as4G}A`8_Y-VDIIo@g3etHR-N>5!x9rGnl?2nWcT8N zelGm_-0lFjKg5CnNAxqJlGHC4M4@=^UJsEy3Ufe(kXaQluK~Yoq7 z?tOL*Re?!I?an_QbPaX=CPY{CyZ`tf4-yk&Lc!$Aaa892$Ahk6X3zl;KaSylG^pTA z>!WSqU(H0a#=Ataps!n;t@zy0+G=u8QcO&Y401O*?}bw3xjkSBK4?8092pt#Bgi)C zOKL{CP24%Xv~KOc+3FErVYjocA!PuhHu<|^PDB7F;)iRFDiAPR75%+$yFO7Gv}JHi zz$ROxE#)ur`)k`Hx#gL&=>h;r#v$B?f;WZ{35udGB}j%XKNpdSoZFht)KqV!CcU8s z2b4>{#RXuEG7}|+{e++{P@ck#tEc28>B&0^QpM{Shovb-|wX9`HIx^}?o6muF z@sPchp%B>b99Zprg3&Y);rPV_K8iHqbmQBU>9X z;Ni5#%T{cwiZ;WtY0x+|XgWqDMC!9p$mN1)V0sR`bbv0NIU!ZHChcj^d7Oa#=9fM~ zR!vZ~R(VJmWR4aV>P+nE-J1X z-3gp7DZCb}>vnK^%46o8Z9V!*zjdoec!m9zc(!_OVy*DeU4fGnivHB_m&TcPQ{%U7 ze_!0~CpL;<96zI&m$Z|JcQ5>6_$lzRyV4?!I%x20d$RU?$ug~%RPd0Lk^Y`sr)6If z+uF=;=XX#^-H%5F^~y!T>G78&7aL(CtBpa^a^`7HF(&W`5#Y#`+6%`XH#UTpSU1gm zBvJzXNf%5aOMIYSB2Vs$7z(UY#M|Q@5Y2Cy?P36>yBe{TI2)b-oZq-l(#*YlB(1IA zVLmet5}K~94W$*`cQ{?~59vDMw{cBF%;kBwDV%#DKe3km)V|MU{GeHneH^(9ca|Kr z(Cw3c=`i)Y@MPHt{9yk)LOQT(?y!rU`=i0p+3w2)Wp~y7f(ITU~-G15MJgn7k zysdYH@80`;I;wE+EBZ&vb|*p^ z`9T+43$CVBLm>p{nA##qr1RQ*s)B`_?k+GbuhS6IRgzK+zhk&Js26I^$dNgqi=tbL z50Un3U*!}>=Rxx~_1-#gzX(npZ7g3 zVfxK}&l$EBxk?6_c5u1jUjhiJ3tJGS7sDEr_SK&`&X974u=D8Jt>KxLi4f4Gfb8hL zc?ZXval*9P-I86=-xq@KG-)v`NRD8jLGNx#8WR0w%>%3pNz#nv>${;CiRC!Q%?2a) zHj%Ln;I;KTop-3y-yn7;`6sO|a$5F#YK_&@)$ixYxpgVmeQJH)^80c}H4uU0AsI#0 z%l@Pd&0?eRuJE=7EM4?;C4FHnCVU|V6}BRJxr>v&*t<&NRz(lZlnn&^e)u~QbG}t7Mc`AKmik^v!xh% zlVT{501=cDdI|7NHf%W;=i>Z;ljq4zo{Y8TXmhUh&N<$ZwwV~R5BgZ^MvIHXsS`8( z$MH%p(|(HB}%}lLCi&Rv1hgd12hCJRhUjh!=#g-KUN_r=y;h9ei^W6P=3+z zKPPskZL_m-$`!Os9J!AQ!gM%G+F-FTEzK8aak{CfE8}}e6!o8EI4y(MPYMzVY4}k4YxGn5JK5XlxJ_f5z9$!ZC9(3zko}#QicXouQLX-&-i*e1SbMmDl^-Z7Y}w!4&GQ zk6WOhq{-86rl9*ZOCh2xqV?prTr(i#WK5prDi=Z^RRa1J#!#9~ZSu2;^t6 zY9LZ&q0@8c>Ld3Vfpf_){ta0~Sf*XN#Eh+k7C&4i?_fTzB$;FYw(wM^r#}fW9-oz# zgf#E1=^Fo?JoJn zv=vtWrZ|WK?*8N=v>(mMIR3}R*v=KhcPm~Hc1`QbCF)1GwE@HK%cj=uwT(PW;g-U%g}#yk%T70)l47yK%UkF#kaw z4;v^*SLoAjdkrmIhUkBwl^>Lw1YBfyI`3xGTx)0-=+==QV+eVW^kznG*9im1p?rR1 z;)Ky08aqt+bBiq@AC`HZHGPs*2%oa&U1|>5YiIok+SE;LH?7mQoR=PT=)zrIE8w#Y zJ#m6o{(!o(oVrtuXY>P>2Ey`PU&Y!fQ6W4@7@u>Ox_&ovB8}&r@bkozdME0qCW|KP z^R#i4-Ozv~y%4Da{o$Cl#jf|I4L@d^02&=INiYCE`W_HW;7f^sbdGi(1 zwT{es&OL9C#iMaJ*J>P@M`XP4@kvx&gWiQoBtA~l;?6X6jxEo*S0C9yqx)vuYSIcJ zqp6-U!@PmJspOGw8{k<1&P^nhehRStrsd%%ik{a<(=$F@$6kukVeOo(@;6wS!iMMt zGNUZ_x^U%vGlnT)Pgs-|YE`2vpVt<4yv@DKqpLIgIVU zF#^0SR*bCF{gE6NKZ-z6Z{Z&y)ms=Z;aYQ>Be?~{pg`i+=EaNbW4aec6;VQ4PyIT& zxWFt1#}}L;X6{oHYBv<8`u*@qS(S@+f$_OsDZiMEw!#I5HCR39Sd|;DQ%vR8oYEst zbJ$lk4-ey*&G4Din@?p% z5*Y9v&sFo`T;l@n?}Gv(N#*6DUY7GL5DR>-Afu8l=n~q;+B=B`OO73j|BEz^#l1OG zX=YHbTTlAQvfn;>KC~dqZK9xxT(m9GD`WI+VjM9~f6BCPl9kcWU=~tUbs4rJX zap>La#d0O0@0Q>hkey?cI8J0&8whNCuQEHI2a6SPy-i65j5i^fZ1Pn#L9@jA=3&*ryXv0JmXkJ7kviWQtTE%uN|E}vkz1vX<860`y2em* z)5R8}x5Rt}$)~B~|M?4=JJiQ#SPbLbHV1=wq#w_tXH_OJzDd3?5E<0FamO0GZ zE{615xoyl0#yxs^d}ZzOF?IXIczVt7rt-} z*@uNCkRc%3g*pBCs;sUNTO+TCMeeHSKRBGjhd&H5fOefx4^ID_?qz4>z;egz%Ml}s zY9{pP!*R%$xYqVne|vh2yx4n*>DU1Tx4UiKwbl+z#KWI?>8^)myR(rg20CJ_HM$G0O)!w+?;~MLYLpW15z0r(uN14JnYbbe~4t zal5n|>(Lt>6-~5N2lJH~!ZCgUbKowxxgRyZJZMn_r&`PUl%>+U~cxXjTJ2?{wl4L%V@gCu%fmcw7n{2!=ITs5sRqW3YZ(K zb~K2Y(+%-ihYWyDWzJvUCf&>1nqQeM4cYZnu<{yQCGH2M4i--+gZHC?^gl0vDyqoS z7ZqOwOOVU~aP6&78hetIETsc@8NeC+3F!D~j1y6K%41Vg<4kpRMwXsjBe- zX5;IBUr4~Uo$r1O{jmf|si2Lba8d{h?LR9`Wn8XE~NC$|4Y`F^6vD~eM6Q?YB zU8yfaj&X%909vGva6*pdxHUnkLIMU7LwO8sID$cRJ&2<`mW$^FTku$1xO&(zkRbs8 z2}danir;c!8UXExnu4q3{>DE*Q$m1=NU*#P{Vk`=3r4E8jLyUzTP!X9?UhgQy;Xur@%=l*^l0=+rZb153=^OQL+l%i zP_{t780=4SO)~7=+df;7|KVJ2tkcc?AOrSkYL#dILORe4V8RpBenH09YwOep;R}&vH7-wHN^3)e+U`GnXgkh#>ub~a= z**gYS@h(VLtszugL5Zq&mWH_kSCCIm9qx2^kF#Bf25!CB8d{QRw7rm^ih1p{g)8YCyS>FQ&w&Ft?lPDw;XJixIPv*9DmP$KvY61-*kGj&@%A2aYR8i@*m!>Uz&Qm zh{@ZRZU}CC-VXR;?7m(s0e$O@nYZaFMI=>?{>Rm>e@?r+Dg=(_ z@jdf63iX}}5Y?$KeP5TJreK=IJ!&~hC*|}5ZxpEsxm-4hTGqnOn`F0GOcIn$Ypv6I z5{M0?@rOKAzc%Jq+Pbe!$h){WP5m`kMLPR6l5z47Av71KD5fm=6t2U_kg|!=sijJ? zDP7yl5lj_k6nGfq?FhB;AGX<`74@nlo$=?DX4Q0vF)lRk$u&4sMwsWIzWga))bkU@ zk45@kfV!FJoMjfVnPA-e>k+yXt#@^qUuj*;Owrr29gs=^=U|-rRUn5-b%q}JJ?+(z z-wn@I_QKHFF5@Bv=L1=6BJ(X8#L^qBJbs8eek_nSG440Ckwj<##*1%WcPI|sFTr_m zcYf7|*}%twu4hiA^M>girH}^S0rNdCTwBYcDHK+CUx$+UXpYFz1P;4?b-~G|1I`kk zZ{Bq6dnMZICKsdFEu2A*+@W6DbUlK2FZf1w(YW?Rw6@KJP6HNQ3q^0s2;DEbn8LWe zGPZVI^5I!D?YVZ^=|fYdDgjgK4rvj+zv9TQm>ua=Zv3droy=5)37V)s1|-yD4)q>^ zfr+!>hJ}%(7Fjeg`@O3ONht}?P4MXq{5#=pp9L{>H%&p1KI?gpTBaoFV z3W=kLAH1D-*Ff+{z5uQMZg6f@(g!%#XlaAAUlbgi>EQza;JassrrN z#R^Xz>TiA-ECDBc70irI6>@vU7gEXW+0r?@=)a;i2ARJgd94&&pK$NIsJ>ri?XvH*O4Lsxxu0_1{Xr;B{-mB#Z`R z%HK~_kAf;41eHX6V{jenHbv`Dkc-ZFwnDmGci2Z#|9`Zs&&p6=NIzOqLN%3(LaTq~ z-kxlYL2!NH8ED$x_HE0a$QTq6yS?M?8T7a+SnmdjJy1MaG3R`YY+u!f5^a~a-9x5d zdsvE!2+4sf;juGO3j|u3BWOR2Q3G7kr9>eS_C%a?!&}(4kimSX^6E~6O&UXU98Z1^ zlzk$_Nl(p{2Xuat7xa(|^8t(sTAr{GMr2MlA8>w2H zlKdU^#!FThMvuy{ox2%ujSCe0sdjtL5E<>(%MmgaDxDO={H2m_`Qcv zmI`iaGNCBR{_pai#Rpbwmq=f>3Fd!);w%DrWxqCmlki*4{QoZnwqgEvTaiPu|GGly T;h&Iq0(>;pbZ?cY-VOgR`Wh@+ literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/add_scalars.png b/docs/stable/_static/img/tensorboard/add_scalars.png new file mode 100644 index 0000000000000000000000000000000000000000..2a31a4b76cf90c9c2c27347c57611befaeca5040 GIT binary patch literal 99156 zcmeFYbyQqS(>IDUxVuY$pn>4-5+FzjI!JJ5aCdhL?gR+Ioxv@*yL*t}?s6yRJn#FQ z^L=aGzwaNHwPx?#y?0l4RaaM6*RKdy{wRZnOpFW#1%)OjE2#(xOGiYuD8nAC-%qhjs<_KbN?`z$UU$k6G8E;5XxsG>%nxj(zy(nR zoFL7TpIVl{{GUXYz#lboQG24Th)^8S{fQ>fxvQwxW#T^gzb4z^t4k#*lc91YLU`lY zwvZ@eg4`7J)|lUfxO7r&qhyzL*qcUyF5I+eG<73C4CEO$ai;L>y;}FbCbt56L8t<} z=G0ootu>|#jXHd_`Cn5J>~m;o=PN@TIp3L0b-?;X&Ohz!J!7&n5Y z#jKI-!L&Sd+TX?pa6=ubLYUxKun9moTD*aoTE!B_uieJ8dZiHP3`|6{k`r~uwR z{6O$L%6jc5ud}clRu}>)CbFg|v0v5N#G;8*<*^( zCVO7uSr{m5LJBPOsa5jkvk>z;B&ROGW_tSj_A;Wzz_M&N06Ek>70=jDML!f_a%sbD zv3CvhGY*_-Or@{sk9J&Mlg`m(y-5M8(kcOO(e*to1uQ&csu(;UOhRXxOrx%n-g8_! zdyNrf;dgq@%qK6X9znUF3xw--#uzC21(WrsS92c~W5<@|ud9`s^J=#;=8z81yvPV< z(-qYuI?Q{_>&?d=G{Jm{zjy}l))qJ#Z!xPtN20AiiQ{W@QwW6YoE+?;_+1F(`zW>< zmb7C4F1PfXpKze=lEqBs^!c;7ph78NJKZi*#va~y393Vb>H;g{@JgJ)7vfNAcitRO zeVwG5u!x$m0%tHUl!QV4;gtBa0c!7HU9{llYf(oKE;|jCk%zh%tH{;-Sqw-P;C)~o z#c+6GJ_gioVDCfCcjwp{2X+$Eko8+jJ__zYEEXH#4jjS z9Az-2y~I`ccOv?|%~irjLLYw;@uIl;Gx1_8b>F)Qw?pa2Xl%BeNk4J=;Q5B#Zs#C5 z1-$J_+mPA7F2bw<)FKrIA$Oy9Q*{&Fab9DtVTMc6O=j<-<@Bi;a@(`o^V#Frvz^0L zCoU+GQpU^n(VD43y-of}gBSNfjzrE=wqH&|R;a+$n571OF^lx$M;iG7`2oCNtG|qX zG5w(4RLyUAN~sO zivQv51MGuEaWU${uJj2r?<^6&7`{7 zCorp)t3Ay;&WKlp9=0CU9_}8)*Di;;6P_hXQ;pLY(**k|`*zbJh3li8gmb9qf#Y`} z)mio-lSZPPRXiKS#w1@!o{T4KC+v#tc-P)7Rt;1wofUr}z%g24ZyEvae%STdMcWNk zG5xmiE$CZvv?Zal$&AjoN$DblHdC!Z!wzTL*Q8%3n^r}brwajlvznerQ$+Twqq<61U%87dpkv>><6wrL-G zN_DS<@1dR$x2_)B0{5OWOWb%insa~>jUQf3? zHp;SS8*83zUhnHRO&Ar-@W>Eoz}2D9i9beIc5JXaJ~~!CYOl4b@wBuv9sBG-m z6LkeR47e6MgLh?}uIxwbSAH#Q2JyS|hi8^#u?l<}(;G9~Qyj}O0vLJq%l6I0H2oHL z{L+HkJkugbI!sC;!{n00^9dVUr#VAjvwmI;F0D9}32{Hq`PLu`W3A_Ph z1&k-EDy9LBFakA_D~1JrP0UuBJEZ0KtkJa|AM!0?Y7{=)i{$P8v~fPHh+yfUOR#mH=AJdTWl57C!X{!^vN2g8;UXWxY!&$^FD%~!1{DJ^!olP zBSEW3cGwcw8gy%l4zjAW^>l}{O7a!*J{j2QHFSkmL*TcU-+>>vQ zy%__~8&^AWJ~pA#DoOnW8a|<6FZIc?k3VYb)RvM?Q#yn@^zq*;Q_1|A-Wi2Q0QIaD zft0;U-82a`HFfW7T`rDbr97k(@*DU*Iwp_0tCH$HZ3DeneQgf~hutIhgfhpoJDj0w zD2>Dp#Sequpm^d-F)!7pHuIXDSIPrZ%Cs&XGQQere;E0IpXB6Aa&F`O!{tZW)voKy zx{X!@8y=gD{%*DPcFnj+KuzRM#SY#MMapH0{?pp=i?dVkfqzQ<(gG-Sy8%d)9h&?q+MBjy87Nmsb48igo_8vX}vnUqSKCX^y%pF zdG9L6?kIL?-J^3)v`1_?kSmlhbci^622M43E;jWSN!Q_ZdAXbz7 zhp?ra(+>MVb%K#Ze`G91W?c5ZK$>$+i?iwB>Zea1bp?d(ogUMQ-|vlw^zp0duw#l{$^|^k!R&LSo+%`EK9wlohW68qdkJUwLcDS(IpRJt- z&$H^rHm$kS{O&8yn6`CU#a{975V*aG6bN=jIAv{?0c*JE-}>~zP9xU8Iy}v#E8tj5ZO= zYym-$JF~MKo99mto_Yg%sBOTPp}X0!`EC71^q**gZi%<1XEuuh+paBZkF6rNR69M1 zU5gZQ6igy~K9={S*Y|76YZGx_>9>D0zz~v}o6Bfc*+N=?%FO4=?^n5p9s8IUJ>%ZK5|fVd0ddA z1rkVB%Ml6+|IMEtw45sK859(BmW7(8lcu5qzmc5{>lb4?Llagv8xZ7cC@4WUe#oPZ ziPINKHydkPM}9XU>c76=hdlqOW}~M3>k}s{A!#sy_}SQ8U0qpS-?Q2|n6a_*@$s>}<6z_9V1azW;^=Pc^u>+E z){*9KBL722(!|lo!2;xDVP{MEhwc|cJ7*^$YU)27{qNu3<1}%z_-{|Pj{ji`!XVq9 zH*D;z@7VsAHl(TGpIUxp3pW#MO-TzI6I(|}AHv*x+=737|9`#tZ;$_IsrBEMTpYar zZ26Bj|7j`6_J@IgF!Z;y{;GwDOBh*@?SDltj6A+*pa_`_QVU5XHOL+2&(uIB4gk3@ z{C)rPJZ|bdg!vN+N)$>?QcTSa`bZbi3s36hwU8h|R9qa}gVqUG5*klw%t8Ivi8aH) zkM9O8wO{zI;i~N!^7phhf7S*>cmx=pZc)jea@E0MPvJ}r`}&fYtsfcRwt3sR(x0}y z+T8_ZEv99qJ-@i@dfzs7+$&0!V*9~Ri2^WTf}sBAhDisI#Oy-Dk(;vlXSFDx7=YPT zi0F?=2?u}{_xt-sS%yg&K%%}?kU{m2TFA=)`SJgy_!oUqO)611p0b~EJQDxvP82RB z`+v#(Mb*Db+z()@8J=hx@~^)90IWi={~hNas$YVkuvV(D=~q$zXY_ybf(7(I`0pV9 zQ0+#BrUrj`^Rb!ee}?`y2h`vz)PJ)Xga;!D>iLlA^#0#{NrD15i0*b$^mD7Kl#d6s z%(CDoCnv=wm=69W!Mt!NEFgtLX?c0D$#8Pl+wYk$)z#I)A4tsX7OJ2R4i08U@+3mV z4Gr@vHWtlt&~m(Q(mYN!x&q^pk_v~v!~Kg9KWDhGjdX*0!(L>!=j-{+Sq=T*k`l&+ zMmvhDgW2JNy8GWbJ`YHJF=VB9ck8b&i1eADfaE>^7Lbhe>p$nI6Cc79-yJeCvcYC| z@Y$3+Ti4C;auX>aY$Jcp~!rf}fepg6?x!fIn56_EIlOrY>2?PU_B^)=L5LH!oq{@ zlCUtqz}Oh~k+YN(JSm@p)`^WCmEZYC-TtCZe`ZWz8Q@&BgPY;q{{U|HJO66Zoy zM`GQov0ddK7#|PK%p@h`G>;1j)W&+2`Ip`rM2VK>h*6oBe^5MFUw^r2oXfCnmXqIh z51#qK#fb8w?h;w+y|u|yft+?)xlW^{$sM=pX!>l48=?8}TzQ6ia=q^u-(DR#FG#dy z>^ER;d`T5hv2(CDlJ-rP$8mRLj>N82MSi<2T+IFL#D(k8!sO6)_ma}bkG9@BNg6-p z-zxrVIFyu7;!9=;^f^&p=Q1<_I=Z?xnx};+dL>u_?*ptPt?zH7qlssirlt%vJ603@ zF;}t#TrF>%M%er7AbZfA6exb+;+DQIg;m$`wqf7npNpiM7+MB!a_V#2GS^S;S#}*v zpl8nV_N@x^HW%0hz)IuRxZ)sxNTBbP_p%D=LF32X$NznqvOzs5d|WqO*Z#JmPmTF$ za%1Z`hLo@7x@j-lO1Iwf^I`3{@N$G?+NPg7?mxAN5^e;mN)fa-o@1xkk_b1yKm9}# zE0`bO9!lY}P=(>5^dJwyLXnm{1#hF5yZZu8;;1CF@s^E`wWAnrM#@`F8{6CUQk0PX z$pHWw1Ev9RLh*VhqogQ4@;1P8@J$MUtMN^elnrrO>%&1Q+VFzVZ;Yzx+{(}F)##~4Cx;R7Jv#_rFx)RanSdVStS;Bi!kp?Wp>x102KsO%J~1Z zK0efx!5hgbDI_k#5fs4pBr68&DxFdwb)<657(m6Qkr@3y=~e~%t?OKGCAZRNyXAM! zLg8K@jSF5z5fi+&Yy_ZeKk03BQ6~8t!Okk!CT%$g%`$kif z<6eWF^X%tlL29s>h1&%6dslL&lBEAOd3mMWAnuX^+}l{i|MYmIL9-O1IUX03cvCV?ce+mqGN2oIsyh9Okg)<%_Y(!YmdDMZMHU+#>qfhsCk#>U3`7wmbd ze4UYO?Zw&p9X>*&-dB^yLlghUnu6*;vZc)B;^geC&~3@Bs6cXjX2=sZr-1jLiWMXpMgvi;q3$q11#`gTu84SXdOFr#ZwK#5wlezQm)-BDi1yZkvkh-%;dNd<=X` zO@l&NotUgn!zGh@J}1}gIQg22h^5=|d(2(F&UP74o`DOm^&;HKt%HKHgvXXHj1lAX zVRp>yjkMSYV8Me9CVB_ro#zqV;G2L_A^sDoHJ?YD`ipQJ`sVqtNUrfL0gY1cp|5YX z1;0{T%~yPGIovgHz=@RzMTdSdF){h!Yum7+zDf(*7+0^yHrUCpZkt3)1P(5{_Gla2 z7)VBIj=)Al^#^RCST8o_bd9wLb_9n%Blb)*3eW1Ay;1Q!gW^m|v5>kfT1T&U7pUV@ z$U$N6&n_->oW`Mg%c}j%lDiIzhCdB)A3vFA4pRmkk|z7l{jw29sr#5VkJXhF-e9jo z3u^>4&VEBQAh`@K2PKQu=BJe}x9GH-52O;x4v-ZTYSG&DsWH5mW66u}7W#&)efn-? zQ5xSjWoXVISd7Q^awo*#@rcKw>oBQLb%=}+FU&7Mluu0)wqXfykHze|j)|pdk_3_Y zPpZF>1le@$4fN8&${5srMjGtI_lf~3aDd>Xb&^sCMQJl92j61O*DAF%%qbC~6nmRc z-G(>CJuKI1PF<+b&deUFcem4<70>fXGo~+5(f6ptKFKT6R7(X!Qf2CM%#b+ki%^5n zp?{YXQ-jwg;uFod^9C(eeV91&7W*e`(P&{?081U&#s(A;EnGphsFI2P3sJV1iP=8_ zQHLqJc6y4FJnswi?bm4Yr6wuu+VV!fR&z6wqdN@+iLte##YdolRDrm^iW(PTLTgc< zV8&&>P=ke`9H=t@6*vq_rGbD7p{Go6y72~F#LWRE(L&F7c&Qx1$6WJD^!G%$&sHdn zoy_gyDwuDW-!TEtIF2$d!0DpHMhti0~pcs9;LQS#ok zGpY5_i7ln!s0pl;{o)#Oh$vd81&;;HfZ_-74R%6f z+Ng_hR3YWUrX-@0LQR_DNpoNxf1%mlrlR`p+mfsvGqwB+-TM+pR0+i&Ve;g5VAr|o zmg2U@|EEH6bWG^o8ea(+>t(LrF(!mFZU~FKi;Plm^M91Xnlq12wYuPgxp*R6U5cqJ zf?j9*S1FFTKZWkk2+_U46Cki`%jG`n56qO+Px3|uma`6{()iQWi$Vr>TRHEDL73sr zRz|SaO*b$L@mo|==Ai;d#R_535o*gw8bolY`GdidAVoj>3r;K`{UmA`D>c}EbNAP0 zTG+oz31S9dK6kvqPB~84(qUPIYs4GzPoE3bzgU}c|12o9vWUtJ4eSoStatpq%@&aP zTgmdbD9w}j`!b{39@n0bZW?W8@6#+K`m9Z@1!*{qz&@)WK!t`|<{)jrA!>|^C42hB z(G2~gcKU=d&04!qf|#d*aSj#HfOM<3=O$y|Qr2>=)MR#z-!77QQD>-PO3t_1X|x$3 zLVfza-e6PD^>hp?U+qG?L&q*jLeI-lZGNCh?rN?C*1y()44ek~*qSyKk(-3fNAf|@ z{}!?tU^!8WRRF9MKjBTKPr9Ui+Mx4s^_yG*!4qZX*T{G4Pa5@>(=9F`SVJF+&%GbS zhR*2|&?-WrZXrhH^o}<{9&R3s(B?DsNAm{8L%+!1(QUAz1nIOy06!Z^b(?R^blK${cXd$fKcgjzbEpa^CACJ{?moF{xKv?#9TAQx=T^FwF^BGC7z462co6M7j z6ciEbK4Za#irv80=Jj`WHDBf>V47@5cZ6>=`Qhkc`X%Jehx=B z%OTk29_VM$!oHq4d~@O*%C$YU6-^ivZ>2pOOs3KA8Jo8FP@D_>l;1NMkV5?xF{UrD`Ut#pb{2d_S_& zqLDgnNARTZ^?So50(Q&yE$sIaXF9W@y{r+DI29<0{2nOGvdxnQs0ix!1607pd2>=3 zy}REJC_e%{sZ_(uB0_BQKGZ}oog4xTC0QIQ57EY4>wd3aa9DA%3keS;v@`}XGwMz(V-O2dG6E91euU zSk{Fh^hOFi9RsI{UfMu(m~5+L<4*rcu}M&(e-gZDFp6G?wylk#_yCpw(2q%(-$m;_Z5 zOJ|HqN1hbmN$_gTL0G8AbO=$9lKWZyS}B3ZB{qI91}(<(+f-sj+mZ zXYzXjU&j4y4xdDbOx;k!{@(=44K!9>UqK!edXSKCL~(V^Hn+yLbliq`2uqF}vWA<~ zut7LT72rt1hYl$buouYFU+W%2zdJ^tVkbjwySQt#m?C^N<>MzLMv9vI>U@q<${fM%S@REylAq@+Y#4pMO2aEl0!Wi%Ufk zvO?CHx?1o&2#j<8vG%j84ism6#sMmomjtbfCzqxGSP`WWrA$!icgKi@Xm4*F2PrdC31nYS#UIwxU z4no!M+rm?WZK;N5<>69bzXj$)v^%2_|Dy<_?MHK3*i*m+?^#T2MiV&I+RPr!dc9rI zCIo9Gsz7t#L(HdfqQzf^eQ{+4xmsTM9~~zrpJOhXq$Ti#JA z2~Sgro=s}SifW=&N;)CPruy?3-qg#;s%RZnQJ%o^J2A~Ss-i+U`N;VFGIG#JfD#qs zTd5J`hq|aH$N}*GHYN`$Q~_3pDpk(x(Xcva@LufGYliR&fu1LF{S7pYW&Ts?(hC|z z^DFR?_&k}r`CY$%OsMU!Lx&x%;p7W^=U|6=7|``zn$(8$tDb(!Sj0pqYPi1nq1f|E zeb$ChZw(o1sN=?zDU~44`Y7&xLL_hge9VR*>Us#7W0x9YLJJ_F&3`UT-sfV7r*X}& z(45-m&vFOTo@!xN4MPh?nv4g%*u`9GukT9C^0TnA@(%w_-@hg%O55ZlKs#DN0@1(6 zm&8Ecf~o!a68L3bC#PmBhT@!WP`H)kLkAG%jXb`XkyiK@61i=*kOxQBkg_#zN+zb^ zDut??uj~X6fBOvg|1e4rErIxPLdXrQMfL_L?S8Dg^^Ka)*eltClc1^devfjcR&6;a15?QKJa6Ya4TO>7UhE0O!(V344Hd+8033=$eiUZKl#Nyf2@f^2RuA2@ z5|+$Y2JA*~m{;(@#GfN!t~XLR32)*uR9HcMg-y_*2Jfm;23W;J8}UJ$^FWS`i2YC@ zgMHGLIO7e%uh1Hj900;*KeiQhHMII@a^J>B*Hb^eL6s5=b^WA^bbaQG0?e8fr?yyi zeP`Y*3U0W@Jn{#6PtGlAa$J`cKBA$HM_}MYIlSot)PtmSxwor{&e})FsF=e{aomKh zjdNEbGmtT;!AEZ$&Sl}|r9dp|iXun&{tnfIgOHdMq{LAL_$tvGWog|D5o!J@(Zb$V;aPD{t>x1PjgrijN=5rDoIaW}wSq@(8J}@Yx)z%~ z`=D6`6Un-Rf%W9VaIhXX6B{^+(8g@<1k|)J_7VA17LyzMvBpnK;?K|H;->Wv>7~1+ z5Zu_Lx1q)bu);R@{qtk_!!{nEWW~9ktK|P|$h5E&P;vuwM6yu#^K_ESI=jPj3JB&2 zeoH!~&|Ki+!KA-?Tfu;RxS8F+5n>2K8R1}iet(tCugi4m6x7&wqW{Bp5R0HAI1&|8 zf*Sl$L@wcwCV+_u6u3L$qlOxRu=6&3f?toB7B<2EGu%P|RlDG&krgKaQM}H~RJ*-N z#`oYJ8DfK%i{~n9*SGp_1o?e1S9lv@PcoOoK}3kNiLO}t!7 zi!k0;C27DanqO3LTZJ1NghaJo=263%DhTgqCRiY{i73 z8&o|}TZD<7&imRb^eC!U6FMCfj5=5G*$%SFCGxwjbcj%p-#ya2vt7|7^nF>3i;qVS z#USW0%khD?YrAG0$>bvoiJdqV4VYJG+xFpojQ@A zbOwWsz5Iw~IzskCfleBf;wHR7mfg)UQ?R+sMYl7kATJFMv5Z8ng4q)&z%jwU+7(|f z!U8*saI1k<0ggivqK6!{12Y6txf<(A*xr@3MK!DLPbv2%P_p`=pb%kp&HgHltTAu)00DIq~MmE&GLwdm+t(Ce7!s*#qdYk%gO zTy()vpM#}hL(9YewimDFa8wXlpmSWRwtWSuwtcCv=X5bLq^n?CFfvDzE@7iJf%r~^ z{Znc~-6#(1uz;N`kK40`BZA9Vxlt~}D-rRZY1v(_g#JoEUetw$Q~>T|cK?O+ZojY* z)?#RiYeb5WDRWU3?d@!~?nr)1fjH1bDOoXcgp;;aM%*EOvE}rMz(=bd0fH=j@o=5~ zE`&gz&51B3{OD9al31G)e6m`*Q6op--e!80y1m(YImXF?Ba48wBC70oL;NDsyO_Q+ zgO4u4NDIq19?_`&(lMC(StVrVc)5HzCaFI(el>N*DJz;rzi%CEo*jpYg9O`B|5m$- zRS$vhefyM!-6>xi#fqbJg+-!yk>67Lp3eg8X}Lf#Qz#rkc*Qam>25xkKEv`cqYsFv z3r~n)K+k~AV?{R7Jg5Q>KAk}d&5?J&j{tVE1jnObFeA0Y{rkQfVRSPSZe#M$0??>CltpKl(&4cLE{3qhHkEtR(j!8^B+(-!Z(QhsPDf*4s*LK zlR>+5&2tA3qu(<)s;X|{_wiwp8DeVH1Mkw74xGLC^mmaolSidAeD!`T&DI=xLp&7H z%HKVoZ$IHT+|QUem$g0O&eEk6uJURp`IFcPeBMjPSpQUNNDRytH0+x@q!*t(*wJ>h zJuwK0K4W7|?Muo3`Rwr?7q}6kPx>w>5OslxHVUg2F__!&y(Ea-1)wZR>CC<4mnz$vP+g8FtyIu!!lt zbQ~>nM}k-X^V~2-LWhxwq*$>>Fllye3>8krx)t zM6V@`xW~2`++UOMR@tjB)nnU7aH_~NBZWsxgZnky)JIGASmHj}vnLeyCT(($M4*;! z&0_5+-yBX#iXq8@q6HbsNDE!txGxUTDi*A2ANO>FHk($iB*TR3l)OJ`5$O$+9&mJe zCY)!GI?y4%K5x)0w46d^I*t*&+wRf=J)_Nc7OT)uQBmo=X_Zg$K|k$igd_L)b^6YD zK=gXiO^RJWR#w(eo~AAin-(+s%wDPC8;_ zOhh~?KeD@0OS`cj@$!=H@{)yZuN1%LHx;dxBIG~3>1IZr+!dkD;kzr)vyv>5Ww&Ua zULy_@!voFxT3MM@k3cyZNA_BxvB<@}&hPIM{4;C>RCqk57yKeezWTvw)Y+9Uwi9vd zo@lj_Cw)bw>8NgpmZ9`EZkRpNzDirsR?B6H`gvKMwuqUST!D||OthgmaHojQ7x36e zatf9HwoI=LJ=@2FuAqRn6lU*Lr_uJCxX)|e!b&@;y}kX}Oy-`!*@iy|GV|WS-?Kf) z5zvk>_A(y@^rcw#AW=uuBrABxIcuK3QcVFTJ!4vNT1wx7-G-X$Jm*UsZ+tOc}Pk#ad0CF7kdyna%13(<+guKL*<*v2L2 z5bh z)tV9WRwSg|6@;C;Zr?=3B0 zUq#1xhrpN5;bzYR17&b-=dCBq%X`%-?ETB14<=v6bUz`BoLn+<3sm}YD2NnhjC;Va zU>-QC)EZ{AnH}O^-#p$N!^RV7VdjCOjPhJ2OOvC9p1e(vS`4|J*K-7)a~09Au>8$pX$6y z;5QWYN5&p<*C9+Mz(z~r}4ea1Qx^lrcql!R@CW5X^`Ao4akNmhcPn9REkmcmET8SK2*J#G9sGsZ>`K@qI7q4zoQDud0ZPThLjAyB@Xa5LlMa zFc@R=@LcO9a0)CS#0bY{ZA?PFZUUIJi72^asft(XwNX-ka&HQ|EnBqkIbk=~daMmm znfYl&brA)L=lzyd{KQ0Be9=m1!)hBa&;41#Mv?4F*g@HE>1rC;O?2|% z0Xan}9W_?u9P?^F{--W5HbFO!ZK7{pGRKedoRbbth!u;c6>iji#adQf8`LKtd89)L%eBV(x!ejeroIA%^0ih`f%#7u{4Qn z??@H&090%A#yMm2IEM{6P#((gqMA8gDaks#objc!|H;5|>{L+I3?1QRZ@`gW*y=6Nc?)K{BtabAgq` zo5`mq7!#&-nJ+I)<6>4U3`iN@&9Ks}0AhB!- z(WT-LV*^;hP*yYzB2HZLpgz@DQ{2_JZ>~s+n?7T2N-Qsz^C-Xpm5uSY9b&& zOgxnbDvM!vy|;(v=l36`8u=*rw|b2}6-E}9yR$L(lj);d11 z+x2Bd(7d|de+oI541PjQf2>eS8204ujQBZ9cDB-=5$Q+&uC1B2v)S`wSD9ZoBtYUP zNdRk$`|fxBY_msu%ML9sf6FtqHN@k_&J!fai>zvHYvBqIaQj*9chac255Pbkky_Xu zN*>LzJvYCtR)U1}sPcZqIAfsGnTk3)(9P!nNZPxS1f#mB$0GQvbDmnjlUB7?C)=EDa+l!1d8mayPe$A z=egRMt%rndGz98AAQ1%)j%Fb5P~e524Ej+`+V_LLLfh_7sOc5?TmG6?8&;!MA7p7k zAUpc@?X}o=T%0z!KPErpyZIk_|yuQ z{Sm?!i|BVAp)!%jv4`yHp-Dyz<5DB

    ;@wuYwZQZgq~@1-b)a+Kbg@dAi-%%Ari2 zb6E>!HALqh9FBoqtbS*I2D6=Q>fw(DJd)cOWl(0$y3?>ndaV=X^P zZVo#1h_858J0f1a1S*#!vbHux@2#0G7S^SuqzNrQI+}VF-dFYx5h#(vyR5k9u>ogD zYpWde`BrM(P~^q;?#wkk$9b=dvz#cgha9oqn|y(DU06t@qFa?fz5GwaO8G$eo_n9Y z@R+H}Woi@94A4o@hi>$pSOXTxOS~!IB==7alPwO92wmFeE&d#1csFeK&@HhSk{n7& zZd=25Unvlgl z8&rN--D5`Ln>LepxQnc>o5nJge;&QTcDuVghSu=iJhM)9{ml-ozm7#Plo~0Xym0~Z zmW3tId+5Z}`|&1(Rd=cM^uC5!PuCmn;^L-6=c131^~(w>Bs~7d0f%^~_1SRhj9-1# z7RVA&39-psAwW*`i}W_$q>ShSRJnC{mv zh|8wG-zvdRI%W!bY&n-6KZdFa_ zTlZK_g7)qd;WYE2ni!Y?wtjaXS|C|$oW+VH3GAr~B(9V+f0#J_4-@o6#!erCnqkSO z!m@EDMmibFa^B%9FApQWzv#{o3Zo@B28H^96~3R>8~O@tou7WuPm`&Am>AUEVB3q{ zY{5|`J(R>iXSU{7gZI^~i+|Gb3lgA;8kIxaUy5m#?E^|e;5euM1CE=e zz)jjNk!xnb zt$MHmY<~q$O-I)C&d1b3u%F=<>sVel&B+b^&5i6uP))PF|FXAyLw8N9stYpAb$#h) zN=b2NR`E;RqH_(T3d3|znPGmXxQv^!0c&>^#;aB}g`Q5{>rm&z?lEtX62aXsghzeY z*7{B9?7sj8k_4Os+5je~DQLMrhJ6_sGH!C51n5}l{+~Tk2&?>WMEB@o8-_=?G-pe8 zN{(4#Z`dPKh%7zsIn3vETC6fYnKB>~adT6EIlZ=ZJdhe0w&CFp${eB&IK)lxpQkZ;2frq;t`Tw&i|lJg z3rijy6CN6gI)!%`G;_y`_A|B4DNE#2vOmgj+^-q~DbL}d3vyy~-!uFT3KWINmL`6f zi415I2)@hOU$NTb-U~3HIg?G(>Z3$~9I0X;q=rO&Faf0pGmkNRvxAf}OCQa^zssG?6 zAODS;6u%Uo@hEa^Ozx+;X%6J(LQPpR=0Vb$h{KTho}EiK*?VL?1+GSw6bSMylAejb zEH@iY$wn?8H~dze&vp9<)<4hf>|V&^baQixN_UFbxNynTwAVI7&CEBC zj^D4@_*G$=$4_wXGRxRCdW25%kvCqfU}=5mD%arGu8m|YE`xHzN!wu$%UO6;Y1*91 zn<*YH^deMwKar>-SS>9rFZF?+CL>ai#VCmWf2_zqQlI{h)Pqr7PW(8{=KvLH?g7PZ zisbGATlhyd9ey%7U`Sv|4_T7MRPZak+&)ePgYxW;iYJZs5C*D+#) zL*r8!O|?q=*#d!p!H*iQcPQX)a*ZVcEQop=vUX#_CT-m8!_vfv=mknPAi}HIf<@o) ztQtx9YzvTV9CN{IzpCOYK3P0ZZ$|n525HAKi1JZ3K{Cn2W?04H8p&wKav&*Nf<-TL zg!m#yoO$<5E;7TR2N(hv;=ECTfh2R{^FeOW8^me!8++yu8;Y zFte2}0}qUj-82wKC|Gf#aJnBQvedrvjcm8KzvU3ll?j|`v$?5;WIC7WKh3nY=VIG> z`Xw0yHWm6i+(v$ENx7tRL~2*$_glhlBzS6^zzEmn&27Mb6!SpVdws>on??xi6WRe1 zwg@$$@40Qv{57;iY+tRapa%D}o8vKWY#th^ZQ4SG+Yfz8je6?)m@R}Ezba1y&Sz>8 z%^~ou!c&y5N!X@j>NV0}wS2uV&yr9e+$WfSbWnFF&(r^ z0Ia~JBLc&e>L=kJEg%GE@L0kTxaNsNRvn>*onuZa%8w4ANH$Zxv5dF5BEXrgxxX_5 zEXnp&@(#i4U6aWEJ3V-nmF%22AS2cxjU(58!ygot zl=R`BF{$$1dCk?-G7eeNd5RnzLEu!`2rO=^e{4$A_l`X#^*EMVcz(VG@wSgv`=lf) zg?2Tr!1L+i+}jrKB0b@^Tj!;aaCEv%WWb=9kMrp-)}*YHL;k_pdxO0fVZj#A+Z5)C z6>{6rcJ@xvNp2c&GIxk)M^Y-Z#_eSTXGkE?syEXN0Vu^)Gi7?RT~5uKO+T1<=uL>L zAT(#FVB1E)R-;jasi|2DAjvd;U^Pe@jgQM<zz1=RZMu$E*^9K)vxya2^qCSogXs04uH z``OPaB#!Zu5neL~dOytLwM5b0Cs_Yc2LHQ+nr#r0XoOiUn*IAY(WCn3-p^P1g0xIX zAb(1P$u`p0Cj%dbA*jbuM4moqeGv%c6A=dfe^ZB@5B6R2V!eX?5DcGOXeo_8tVKoY z&6}P9J8o^;o?=Sdht8JRZjKx!Pbf=yp_bq*kM-v`?`D}Ex@N>}9vzSs>ylS7G05ya ze)C)ItR7RJ;e><9a2rnvd(tzA$p|CEn}s^h9_2RnRPGcGLxG)?B1926VGTyQ6l{;` zl?upXfOWAu+MnY!xZaKju*QbS+f##Cp$}#1A)X$^>Uj%;qPdU6?j1ZWY(Ld?Y4-!H zXC6o?TN+dwih600y)oX0zQ+G4qftT^)HhV-{@PJ>ddN0oY;1)RMZ|*u!I1}R#OgYv zZVh-C>0k&~(jz4^J_74F-8Vh5GC1fPj?sGUJ6iw}L`S@;7?@<`=?oAL3HuQvB+t$} zq8Eq)NhYW&FCHa9HSx2}R`wj`1h@5N&Ww2mm}eW9*%PsmIpKv68{=Qh+xhA5 zwinYS5?ke(ISEBQdB6^H*LU>7*ba(12?Ueltf@Q|k3CqX%(2BYSP=P0ircVqOm&_X z+o_)t>eh#A`x24kdfj*a@mR7}+z7rboGT~#Pj2tb@op?bLiyh|RWkt<@|_PV<&dM3 zjO!EY2b}cvI6fcf!A^Tw7L9=wLH2-*3SRIjm-OkLIxXYJRQf9Ph|;qR>TjEgKN=W*rH442oGNf3_~zYyl`Mcd%nEPbK{ zlWmGY3+d%XbE1GBfu}QY$#9cW3;%O^*a=NXt&4HQYV3oahf8BPGXUe!2OQ$p%pF3k zZP{(#YXW3J#iso~jJ;(;RZ-WlOLupJbZ#1?yIZ=uTe=%b5owTCRJz%8qcli&H=FJ} zOYi4?KfTX6=O3&&=9nX|G1ipsC$ulJ)BZuq5kp8WvSS((mzkDc_%W-vU%1rF63aZp5iA|6* ziOxs<_Qwz!a^*g?ax@~m=fObhCnR-->-FCS|#i0j-^7?8VUGgQmoIR~6CWB04f*BX|H zkM|1cV-r2>GH`fyOOX4SzqsnZ;oqaC8C@@9xj8x(fIRe~H@Jt!Tz>urWA2GCL+A#?UB-PUocq78@2EB%aZmSv= zX#8SiHmZ5D_86;m#xLnCTe(+d00e?OAw#6*w!)`omRqsfh)iuD++l<1yomIz?qvX| zatm;k4c4?}wC0BDJutwi%B5kBKB z9RS6*=@SFsYMn-MFpW(4mVJYM3_?A!BK-Zd`2X)uD_dzsH%K)e;h^efOZ*s7-XTEJ zoo1fbC1}{@NJz`$+t}xvs1(YEt7f}!cwWMZ*;To1;_&4*WEiDcgfMm+nYWQ9{|@>l zW3Cny3bs=iuEs+C=H8XlRBd>~b7u#kE8YtuAEK@k(kJjF_MyMZ7g1Ac#l|+3l%@oC z7)6@rODZ7P5VeS4R2x%+ZT5wH^N=bd%n^EEaJSezVt%-=1$duB;{8a3zH%-5a(K8wmP-Q=93>t zdX@RUHQk$La~QD_tPnG~OIAn=%Z-+WHi5BNAr3dkX4UK;yZ+wHQ(bGo?EU4eoumh@ zv7=WCTKUGd6_dNUuD%Kzo?QPSQl(wq`Yf|=y{m{D80SG}@5rUbdrl{p0IQ+tM?Y_E zXFuvnUi?HS^Mk%Mv=7gtS??S0YjMRf9#NL6U6v?yArFQ$q50&{W(_zmh=;hZlLNO@ zhkfoO^PX!~6^wO0hgg~1D3WSrhETY)1+_#M?ik$>u@}28h-4G0JxHVs?LGR(nx{}d zd~imR7FKtIx7^4G+7ufbUd9WZq#J(}*P#RR75gm;M{fzcf{Zld4@_ssfMS3R4 zzS^Zn0RYQFNQ7`d3b}B!6~5gki~s~FU*P6}NZSsO-1Wo80Zq1?U*_dp57bEzWkEd_ zf4F(WNiX}_LznJq*Jp>Z%P5nM1uXW?;AvUtKGI2eVyE*zdx{fZyh*j{y3I*y1+Jcr z8Z5jC>IZ@#xN^#wzd?}auhSyHW}|0q5q?4=`!H=AB`-Sh@pIHrpA{o3%iLe70%Ip; zJGsKOYOlmz--gDlDHxvib-l*LUMjgYec%6{;cyws-x#O~TJ-9MG|NB1<^5~u)9F*I zLIfS%s8a{R?!xir+8LyMRA-P}wzk)fPSk;Cdc_$fawE3R-Gb{`j3O|wZG8wbgsYu#xdB1ucyL-FUBtCqYzaaW0$yIqLtoLGCV$M ze437cL73?*s#Vd5U!2Ks<_(r_^EnoPL%%gGzTR=O5d8AE!pt@*2c$9cy2sq8@HEF8G#J)@U zm!hyP{;pMa{wAe>usDi@O}qt=)le;@3h^%}=>OdX0MMd;*>NCV3lX`f;!&}C+1Qb_ z{~Sj8BYTd?VeRbly}NU}tQ$?oz20|Tuc>}bdli~>3{BQ;ozuDd-k@R*4B6Fdq4Ww0 z_a&3%4qY@LTR-RaO&U8t>u>B8yx`$uKu=dDrt==R71SCC%Zle{lSq}g+=QQ~wJP-P zbaBEc(jJpZs6S#|ii_u9C1vI^yS4UA<-`E+`ZuR%0>J11EQz=m7lJ0#N^Kyrid}l) z`wFVTE?&DOZ6|3}91lcEbA_L>Ze=!N1$6`L{{yyunF57w=>a2JkkycU96as879KY|f!hWS$n1hYa_B9Vaiq+^ zFDXgYkZW)4@Yb`niHjK=$c;4^X{!^?tXCr$TacT81=MraqMi(WGsF6u2vOQ?bi(Hh z;0_SG(W;q=Bbd4>^;;6?d6YPs4FJEy+}1L55Aoxq`5&w|c>XqDOx9-{8OOQ|i@ycvg*v?gCVy3F4nu-~%YSXJ4Te{-o2_|YMmyNWX0}AbH#{d$ZIV`b za|2D79Sb*PhYMZPY4KM9?$8RGP(87uCFsCIFx&uY5T3&>UyeRG4d1X2`a;Bj5I`;4T23)nhX}pB7@mbMU zblsY3$I<$xkUtN;5t{?YHRloa^=U{c(3e)`JJ}x@BUX8{hlcA*WWtxQK|Rt_*sF+@ z+CYFQ1$-xZU_{JcV=xEh`&|vY+lxfw^(Y0XIGmHf| z0V5HR59Bth@QJNKORbGn3$NTZ22G2(hK1j1uuY6>Oz(xD5CyG!q#X0F?LV+Mf^bHe z)iq;WepChqB_c>tp^gH#T_2$Eu5n+68y?MZHCSp-q8q{vh301TBW4^I6?}&YDdvm; z^+Yp)b~`#{I>V4!L0VQ_503x89+ekiV8c1};PD3O0?9nR1e~4*K z%+u7^llMf;$jBU>mv!)Is(3Vg9R163IX9lw#5)rdm41q)-@jAGuvXX%qu_r*xs2zW zEq3x`THtMdz8)b3`su$b{|NqYbeJiHEYV>I_RGQ??P;Gv!b$oungr>w-7>gWF$f=X zpD#ih9670#5O#(vYM&I78mm)ki}ly0-@dhcAz=J*fz|nHG><8JtNiDB(vVtfw+KO| zckF|nbuu+->&rb+U-11KCCMS{If*@mJQd1%v=1~wiidcweR;M&O%2HUUCQ(%MOEHCWb$!Y?>a@Hx1CRgt^R7$m3)UGsp45-Q z0+ahUr7r3(B!*C1Y4~aC8v4k&n(qo69c=frv5=WiCXv5kp>WxW1R@pe#vnLKJLra{ zf*eSGiT53iOZ^#0GYH;3*pr(6HfL}Don~oRm=^nJ41xn2=fe=%V!*gsM|tvEXyjZA z00|i1Zz6BSCUL**hm1tTRIW^O-Q}|quXDFH{n_GtBYWVfx@YIR{_DM7x@pz*-mYcJ zMh|pel&V3r0xj)sQ3<`>=&h3jbaFD*B#T^YaczibQ9yJNy$NcvEx4);JO3N>PfmmU zJVQl3H4gc!fme4~Bei$#hLZM<1b#l}feME&U+&Ic*V zeIm56fi2G%Y~g<$6$5Tm8-9!7P-23fY)wIEyh0HGz4?*i0Od@M#K26g`2!lY0@

    8eA(ZSC_!t)*FMU#uD_Sgu*Kc=<5`zuiO=fb)*t!00!N4()%>HT zo`?hm)1iI9Iq%+hN9Uua@(j2NPIeVkA5R|lH}%hPE7(INAOM=d43ySS4WPeg>^)|* zB>NN_PW*t82h=F2GNE|flzq-J#t*) zR+q~_J|3qSkz{pJ#J@h%+X9Jxp}k)zLMHwsL$A-N>8CfFRCVw+<**rI*FP=ZuTSEM z`rZ$^y*HwBYB#55`7=gYsZ1R6Y+H6Sb@HryL@BFioA*sAKq<1vXnPb9gm*jdI+J5L0IBkEhym9{;FAXkr z$~$czF<){@X>`hbLbP);N<^Ud-y3&p9Jj~!vZMIp`92^W-Nj`*KGE0k`s01jX&@$- zqY_7Hbdrk@KG(zThqU5{YR$vsIjo`7uq=cJJQ+jqWKpBK4~-%<-fq#~;+?{qTGl?V zQ{>6qrIEN&9k#jlQ1J8TzK3<-Kpim&zRQFKMnE&Q+|jCH_pa# zcUHFRaH!z+rVOBI4wv5IO1Zyisq!w3;fiu@k?znKKmaAHJjF>WIX!noUqJ3iwjFEzOD*Qm$AbwgW2V$-kGf& zw>w$2VEDoQD5A@`+^OVhf>6VW_y5VF@j0hTECY&wr2i#WN&=z7?O&Ao@Hrgz9vE8r zJ8BHXQKKOLb|4L?I-4KDgW%P0(ZUEf_j@QKjKFbPIE$X?rKOoNsq< z9Dpe-bUu_s_>tA~@jJFgaDMtbWFPZ`kypPih})^OEHreHegaB zRCd-S)Z37H{0D8whu^aVL}l|zqJbyk;vr>-rUBj47`i2BXFG{aM|~Yz>FoeJPJH&! z#)%B>Qq=~+hq94Z{M4*=U^NwV;XGkE1W_)pBySHw?`qy=j~5=lXqIZ}Ce@{IT+C4a z6lT8{GWzLnxcI*v6YdLBFkfF>!f&=b(}o~Q;d=`XS08F53dn()b?S3eRcD61M2-4JJMCm3)T?U2cfvw?6{%#gZ;G5Vq; zIPDLxan<)zm_Tp9sRE(e{xHawp9C925aU`yC%Gr4Ni!i#Oh=v3klF zOn|<{f!r_5aME9g5U@T4MT9Xzpy}x5|Lfxl#>WY{8#M}g)0_(+u3C&jQn7`)8E`UF ze=u)Yr1uDASfWvVP|6PbElPh%a(r4gqd+WLq3z^Ag6>Z{1;gaQ2X7Dv?QLrWihd%Z z#_kIpmmEu|n)U@sv{%CKkW629qkk^gw{p@BvNOq_Z^?UXGqVJirm*R0nGTm5uvJ^3M1_k-Pj>C4^M=3r2bsH0+z0 z7$yl{33!_}sdME*=1^UPRowAhu`){#8bWMn2^+Y{@x#5y3ijZx3c4h%l+kFnF)S1q z|7V3mx+;+e3R=L87eFvZ4klM+%z4@sO@u`Z1~4iN2{dg7PVmtRSFt^9pp*`szKDu$(u;51se$4Aq6d zZ4=fE+oRTt$-08l8h4$&`O(Oipd2s`W14?gZA@84FoMty->*O>ULTf5q|F}Nh?%VL z<6Rm}NHS3NZqA)r;~IMt`(^yAFo#46$twYE^LuAzLNzaPZ_qqG1pjD6o859&x!Xrb z2c#GyBhpx!lO19*lhvo^=9uKiTxP%&%D>XhTn-%B5KZ_I)+fHwWX?<{&+)x)K=mu~ z_dDoh@Vd8E)}sLsz(rIV@ck870xMquXg&CMEzzEs4OhQC&1Tt?u_bmv=@n8#S8b;hB>3N zECv*1JV#to+zFFI6exF#9s@<>bOd)qw*H4eW^u)K)*g-4zGLAA?C(au=@$Vv0CBk z>Cho{gl>esuHA*$f?_LD%^%kIy6|ETnHfyW#ZZTPKPBYWCapL%FUvoyu8pntmee+Czt@q_U(70e3 zx8fL=*{qAqi7ddhAm}5+*j)(UY4yT0Ex+GWsNZX}KqVjHs*VEyF<{VG+kT!=4sH&? z7sHs@l0#rxyVv2RQ`rujqpcLQq*L(&-h(T~0PiX@G!OTV3e+YM06tB_AJrQ(B|bAS za+<7*3>2U!bUl2Gl{^yiNf#Y+u@kz>WFlG543FCJu5E_ z?fwCDzA3@klyCpVso9>f<~&nZl#LhvYFP#05fEyV@3znOWmyfWJT{>x?{I8rc zmQA1{o9SwiemXn3bcyHk|MvCa_j3K!y$B5LgHXDY{OR~*!K!|rLmNH*PVW*EVonZG zlkf5k75GZFe-%EH!|n@R+WC=0l5rOQR3{NDSdVd8qpZ)mm@Lcq@yznxvma_#an z)%NXs)tpHk+9Q6(98}>FC$4q2J0Xm=nH|r(rvA7)>Zr!$z)u^nb56;W zM{-mri{bh}5E@AS55Eag=ne}6iix3ibtpi!)`qa%grXAHKvRZz-SiTB9ERLgJZ9{* zJ#FVw8p8G%wl}Pt*@WZPjflE`%27o{BbTYP#`^kqi%&U4s&HnL>`EyyG;+o~e8 zH`$B~QMQjm@l7~Ug1o{=8)gvbstz{i2gEAaQa^VShPzQBoLIv&=#ay%veF_$>Pl=^ zLVC`HDrG&pS%jObNsp_)m9$lSAk6y`BsXFeIvNM+nV8}G{^ecrwN-yoM>XGtkG=ar6?&}_pHSO7*l^pX`jX+tNQwXCmJu9riTMkpd6 zY-culS#i;gJLcYg_7${pnd#=9+E+ot)TL*hYjJYJ2 zfGZO%nlh+f_kFzT3uiqK9}2JUXS+(QoE2O*?AWTVr$4ptixB$l%(Ji$(6kV!%^!ko zk%jLLyl2U;f7ge$q=0BM(X1RW+gGB&-4S3$`DJugzaHryLLr~a;B=>NF4?74dGa>f zrI}l+=@?sUVk>`AtBFw-f!b9CyLnsLlg-|A%M>{hz>p)_TnqShtjs}K_oCYjuc>0h zvE8~)*$6DOnHz8FlCAE10h+u|4pw@F@3}Iq&1P-S+y2>tm=;u^AOi|Lv`qstf_&qP zGT(V+J2o~B4&DWIk9aZZeKDREkJ31FLpPmPN7B^5CyLNoI%Z1hA`7am&t+7Fu3|6s zAeXQ0{mzQ_ly0u5!*@e4X%hd~=RY}SPtRMv8PqCo!o{FBb9ZPWZ>}P7CiKeO(3imhqZM9T#M&i+^@hH2Z73BpXy1FLH zo}DKIzq5IWGgs*I*5Rwq;qe?*h!mk>K;iwMYYnkkj*VY9saU`C3# z+Ndq#)~9_cLJ*8Bqb+Ad1Ku1)!ZCbG9S@gBkg0+NuW-0}N3`0#iTxGzK2Ti9V!^;O z*^X1C=q}v$=Be&wi5RgQqv_)&D#EFYDi3~lf7~zCww{VgyX&}YkNPG#b^@Znb`JYA zq-3-7unFy{um3KP>1oUwSoDW6Eba)vzA%uSv?IwN$zIB5FVn!HdGh)$VEVh>kdKSE zD@rB`b+0VnyCGbZ+C8pW@|LOTFE?HPx#*e2=1dHf+3`QwjqJ=i1hRN_Xj}k%_;Y{ZH_>~oeqIfJu)G~ITWNq3PwjR053?76g$sgRi^viX z@!|J;aTRVgeRD5sXv8m7od~P36|H)oW!zxL9m~#+Jh!&P+>M`4{NhoY+}P>4t1svt zTXwg=y?Sy9uAOk&R?+mDYsux9QjPa6=y>7=?#^YkSl(@cT5PuDj#oULV8t%S>PtLF zj452^u)wTson`eVtk-XR_7$oxru(lf^{B5fajG`v+FX<){9wER<}8Yyn4X zaBdqgr40v1WbMCGYGaVCm9G7WI{NYPSDm*s@kE2{P5=G^EbmkxR;*CItq<9mmjE%L zDahBAWpgex#zX+C!fw(!Gd3nOX@7DT2J_UI7F@JPz#SN_jbQW4E6DyA@OF%B>)ita za5OupnZIpk&*2NYLczlGRxQqOA*(AQ@e~-*%w$8_MiH$kSj^`BBXyQQt7m4$mu=X$ zPu+%r+74}%wjujhcSl(FM=o}DN&>Xp$nzF&XvUE{et$D2j>+$=VP}5<%h^)9UuX<9 zR;9=_v{HUog`0YNwwp?MsyhuPuk$5Nm62{$bdzx=?Ie!56Iig*gbM_@7vfS*U`)rD zk-xq)1#&XEPZE3y0ueSHxukIlZm*i!>7!gj`LLJ$~i8bNvr4fXiCy$@H38;{K@>^(NZtmxj4NJD2-lXui z1Zh>6RN@N(OM5Eh(4#wUpQY%Uog~m%s6OMHE&^x~GI0zXGd(j*@a1^EUek23HrvM& z>iAeQ5=7;?@fHdI`3a^{0BC7d|m(V7P}~bWvPodVaM!8M(2OO?!zb*PFiT^BF9K#L@@FjQjoE#0kP8; zsG~WZZ#P@jj8iJ9Dqx~Ux%j-q(`T$IDy1Ljol`sGz#t!Ef0^a@wY{Mh+zXsi)O|9b z2Ue-6`_CwaCMv&iH2l`+YgGm}M@rlsydI4N)GNZX((A}4 z-w8(+ER3`uGf4Dy2YNR<$VTvOaYbY$*4*{%ISaq##8?K~x{hnECj1x)D`PVBN^%|QfhmiPtqLzGK zir|3T6=&p~5ciq4C$bGEdG3O1U;}f|W+Yl)l&D^iwK2`p##*Nv8&j()Q~aoU>#iqK z2@vaWY#9s2#@G6xK zen4Rm=8r5daJs~k`!=3bdLB}G_We%Qt>!SL zrCA-3?_&#Cxalo2=4_GfwSDK!VK~DM^5wexx?X|?(=QolZo!UUF6!P*-GbD+&ewf? zqa15uRNi4{*%_gMFwY(mp71xJ6T?BSX2o8H3iCMQ0^-x|lg2V;EqWe}fGB>Px%TfZ z&!GPUFtr5KOS#lgYeYoQKJE$BK@Ymv7{@N^9J-E&$8%LqEcE%&Ft=bZBY(FttIxET z*TF&a-6qW)=HMsD;Uoq>_kt`KO8l1WM=VLH&5VrB^MnCsfAwc6b0-sRdMNT$dh^4J zS=Jmnu%M-Q3$Z)V@(~G$h&T7%)7IUwW@Gz|r=9%bqbx-*S{a9-x%kdVvYZ%4p8;RD zH~teK`P?@d$ApBt@jD4N!uw}GWOP86USG&x6>EZjQY6PcsNcrw7sPw$?7NFRC+8QQd7>v;(g zgLNOxds+5c*3!!tRA<>rswhk0!r~RW6L{)YYdkQDzL)A~AicmwPIspgWdMI;6J;Ul z!#~Z+cp>&z%dOlMV1jMvF@41qTZupUB3(ydYV~zRvd;9BoroYicANs_-_x%~9m@~g zXgZp&-yJDoClW`iZMod^mn~GymBiap6|jn1e+K<33u1O;fjtsxM;?<7MiQX69U(gZ zF}pgn=5=WQ*&5mmV721>DrHDf$fI`V`W7eRF)^{)r;ZS{;evz0rGkdOEl#Kgrry;j zEn{mJ|4RbyxS`U&OE`=Vg6b$E53lp!jllQ4DwY)L!*@zlxv62mx)L}`4|86l`v;PW zS6`6p!-2tKtUN~otj5yE+kSQqTtc|>#UKXjt}Xkl&%i^=);4a~CbU|KGgbC-LuhyJfGpe4 zMSy9~;3R!EF{%<2L6JgLrKYUd+?2J@7{{qJASRMMEr6D25Tz8&|jkK z4Cld^Ar;)3)h%A%0U#&ZeZ8uk*5#~=55fH%g0Kzs!PYbVV_jFm3{%H(O^um!W2SfZ zUpb6;rOjwRy9{=X1?AX})QknGjfMVQaC+FP)`5PyY(~>O6=)O%=i4; z_cV_K!Ghb<@eej%<@rVJ+60*Z1Ic_&_If}(egLy?G}Ba1^4&Uxy|Ie zM22)evHu=(S-Ou#JnwvcCL&;g^L+^%H(8^j`^I5?QNiK-Ai@!N$I$^MG5u;6-ubj? zcsEyV5BbE@h1J@>zyLPUOvwK^f~n2!1__Fj!RVJD|j$jKX>_?tjeW zaQ!w*{(TfL+?*_Ck*^U6+jY??0udo*3UG4%hG?Fbt)!mpQ`fGi0mh+Fa@!h|QG#VfO;G04?xkKh(Id3v$lW|H9_yFy$(#B=96tmPYebdn~ zz=2DCUVpjFXP3+X8`~_0e(P)-8Asx^w-!IH)GFiX@skqZ8I7JJwa!S$1G_6V2%2iUmTPP3_o>m0#|DWIsk^;~5J5nle=yV>GOiJg z2FPI5?HN)^Pk>ye?|arA(v@YKhrp1_Lt4YCO7`l6RUj}#{#pDFcsI$l_SNE|iyS(q^gEa!;4U7n+|MZL@`r|pHoPTIB zbrb+rhyT>x)Bz9s5cWPF?MXJ78b_N)cYeHC`4lG4Gm$u9X?h$yBsJPnMdeVs6|z!$n=w2d zF$9&?MNgOG)1n-!{OEd2SU7(J8{?tkwlx8XBW!;$je72Q@$Q& z+5_dRKe z4ZpIlD8v4!>Ni%y!-mo zRXfk@$c=?2;GYNtvBh1t%v+VvmPsQP7fnQc>}1u+j3A=P|O<40Y+JMX2TK}<0t6!?3@ zje(rXYo-W8mu5s49fC@9vdp8p@)_W72^G{kCIWz?XstD54dj>Np9GB=rye0l5*gGO# zZ5o!{zTQ9-u7cp)esLN?%!Q;7euO&kcnUZ*G;+8M)8JTxEByXrsyA<-LG8zqrWrj~ z2izOSb!s1Q)yN?Km($ZPu3qFLV8;W41zNm}XoSm;A_fbvtAkPGMPmAm)MQK2iR$o4!!oh*XaXYx z6VR`!wKect%=6aG*m1n6bm0Cs!Mpr^SfeH*jWT?_)bmzmvudt;*1xIBkwDo*!q3yF zaR^NY4U^XEqP%+oM8dvQUxLPnk5doMf!`y2kbr&c71(aqs?C`v9ycjY(13mW`;>m)pny2bA+kMl5ad>$Bft)-oOZwiv zV!PcIulM@=b$#&hV z{lQ@|@ofn8nksV=EQo|Gpc)r;QIhgA((%GBPMCGB>lZ!cJfSCX z_&>q&&(`l-F+v@Ec&>-^CsN7Q;E);A{AvW(7d>ouHQlqvn>@PUQ(z%M{Q=zlNcNDA z7ePFS2ailO17d@5og1ogS^m|Jios*?kVe_9ThXI55{J|4B>T?%d^zidm-A_eAkyth zg0FI{fZfVJ3W$=jYpT+>8lB^(|a^CjcR@P*sFCW z0e@o1F>sI7nl{%I)6fIqj9uCS#boS;H`pry<5Hf$2Gfb=yn1R zenewUHP#k)PUEXl_~?%JGXd^x&V~he!~1zNWxATi#^j^4v&>Ebe|@-({QDX4v#9i4jKxe#SJNQFE>q8{D;FRPu@1i z3|KR$mqXQ$&IRWn&~c=B?a3{{z6OcesvN}D)L$8fg&KC+)lRXu#i(b{4ziG`EZyj;&!Q%2{z>WP0fov39Etmq|c0y7EbmuA=mCIzH4DV~rwZ$*b!ix&@-E z^LdkAc{0VJ?My3(Qb=IDvy41o1tYul@gNk=?HrP7^YBn|d)qjrw)*qWFz@%bQ53I3 zcF*%;WAbINLe@H`66C3=snXDuLqwt{hlfRvCjpU-POGX-YvPn5v~&?(4;pTBg!UsVX;Efk|L&gYfU8_zxCe?r>S@3a4PkE5-kP(<_aNt zT~8tG@T>SeET|5PP8El@tUKD_qc5J^b6v9TO%8_#`W@zgMoXGG(|(Qv>3|@Jz}F#} zy3B=@_9mVpwjo9xB~aS`I7Nf~k@$$I{`z^(e*K!zEnaq6_U7<`H*V*;j#roy;4d?2AUaJ?B!gA=XRfl z_Y|pCF?{GOeV3qRzi2qhv4)U|exNDv=W4{(;-Vni;zC)-Yy9NSr*Ro*m*3?ckZyS% z`Z*NGPZRP76A9Ej`ot)2rkS5qdoqVd1=S;HL#;VtVoBerL2*Q~(5l|W1h(gPd0rA`kq^n<7LJmc2B_q zTX*s3TI{g#{Yz|O*j;85Ady4Rurrm#wt~5bG4N-w6)ZD58-)G)_V)SGw_-FU+G2RZ&fJ*Pn*Y(_Ye@~$qFs*t7f3|bBmGq$kj7*X7eEv0Pu!or3l`ijC#NkCe+U0pXHl8s z%AczHeY3ZB=yUI)RF5;K->N}ZqRc~2L(k82tHYD}Z#bWT^%42%rd{je%t{b^IZ%Vw zy@fbZEQ%&dtNy4PHT;A^%foBrYjS)zn9$5X#%`omC{0Z^j{2F0x2y8|+Z^H8O#H4l z`9AW8YD=ClGfMDbDO8iI)s~l#+l8^fby)mG(pN!oKgl;$M^M&+w9qFaK4M>^*4wXY z*999!?skU7ihhOM@`&6|)1$qUd@q>oOab<{yrNvN3!)IE8Zc+lCSKOZTl7<%Chz zMkew}cD9GCX7P5`8yqOY`?6r}4Up5pjsL?ecUiOV;>CS7soHc>?q5NuUg%_eJ})Qh zR!09WXnWuqPjFRS@RLUJ^X0KBX{$T_y}M z(WC*_lyQKua@=y%bkn9jujm19^@|*%p;wRd4(wdnV53HJ1(XX|)Gc{Uz7)3jwh0)0 z4w`&}==P>vWD9_sr1v?jt-da86bFxUhXjZ5IV~%TJuY0QrtNUYbjyS;k%qi4qi>e6 zGi_o*aa-B_ad|~OnfWDS=ml71E2W8utG_lXf2%{Gepcc#6DG(}SD(@x+f5w7gPVRm zo|&nT+uC%=+2xQ zdnIjuzIhji>+frAjcZ>F)pl@Tf0cOIDa()_DB0ESLJnAC!!Kwex$-NDKi)}H)FR0&0nNu_ORG)yk{c}P4oDpY#EC@}p2qjGmR+hDS8@kYQ23J`@-)CzhRsJxZ#U`|g^lmaL z40 zd+$dEc;NKQx0>I!^wMInAtd#OsaepgMppP(?Eh4xn5Bi+o#a0XqK{})^4bKIL5vma zjJGZuK0NCuUBs!MLDg2)yHfb`kD5Nr_vl`p&%nZ_l^?M3;@)$Q&v6KbI_VjgiC1ql z>6J^KlU9nA{E$OCi1fTwt|j0ppB(u#}Zfe-KWJWdhU0KyAFLbld4bl~duN zBa<~s@~{sj*v&F>ZC~Np7+P?>7rRz>27hO;2*1mx4nuO|`;+pcjcKH!sO`5oHqvvy z1q`iFmMS#xhCjxs;OE0ozf;d6YjQXJBB%_3jSW5&HxaAK(XTX|XJ7sT_3S&IGPu~=Apr_yDM&pb9;HHZ>^ibl>Ft4!>-n%&~ zns{dj5$MuhOJZA63%sLQq-vAmh_aYWvIPVNiddY$WgwE7$t6G-vs(g@3a zKzHHZi-mL$I(<{;JWj!ONJ;(n=YW^a>50QWu_ghvK$&NJ1vF(1Z*qZdte#F2zuDQoD=F zf;fqTHX4#G&ToZy@bLz0_prXIP__OsTt1Q|*Zaxvqo7azu%y(WI=g4FF6nz7EdKOo zeQ`EoYO*8#s)FUIafuqeXy@e|c^-;@bf$#0Lv5|iN?@>g+5c*2 z9_@t|dRPx~bsb*FSE|xeLqpG(o-$>!XINZ_pbHf;H zI8{7G5pl@@3=5y6W;`@S7=E`QH(_Xt0fGg0CqS^lUEkUJzV|%m%l!%6Rb9QRYOSgnKZD$44;h|dX1b`Rc!9K> zLkh!EiesLteRr;ea~-m91Dik%B&B431QzPm^SHA@INFXWb6o%Gd+gHqc_8BxO^tgN9P>2okdIZ@a*l2? zf;L!VQv~Y`O^Tnpt6~4J{XcwAkXufu21cl2SOT0P9WVnr=~obu@ECJ0E%r^w8pN`V z@|X$jB4nlQ>y5|dU=c1>LX#`}05 zgU5HDX8E>N37WJ-Yn$JBxBw6%b7Y+GBd&&r%tEQV0D9e>wl@0R3OLZY`sy##K+mEo zbSmKENT_jmaAo?_Qo{Km9@>@dq~br3#bg!8_!*mg>(V{?ePn>FZi7FeVbS-uQMUZ& zB94_$3UY1epCii@STM*-LHKpb}6N^z zRwKyH6qD@Ms6{$83f4T{GtA|OM!}*x9A}kri$cpLpaxzQRDEvg(HX5y8=HQqT`E^i}BFj}Kl|WP0)|^K#xQK3}z*GBJt!@p_lyh5vqn=qzBm)khbNU7n)+-&p|a zPdhnWb41!6iU0l&NRDk!Bq~DjyhGRUueJ?qkdFEkRa3q6oVp+?qHRtS?QJe zuxiQ@W5)tU&cVsStI0x$(J(`QYRAq@Ak>h*^bkj7i#P-r;{F;^L;tt&2Ujyh4}Cl~ z%P(pe{dsOijAk920|g=;{dDwGlnH3dM@XtB7w+W6&#bWPjw#5C;pb|H#hrwLecTHTjRw9T8bex6w0n=gd5*%T6 zWp7|Ev3P_5qM#J%8KL~xjvrwOOjo1nd!DOGo^xgx2}B(J z+HYAAs_UN$)7X7?cv4;;=NOnn0lFTRLyHiw)?{T@z1oTRL3VP0_N2^WvQ~N`q#CAu z2BR8~iz1ohuR8EBC|HaJipLUczd$y6eM!!W5e(cs3r{@9^s0ghBuc5w6EeFsO$eC4&Bai$Xo0f>4RXv05hZBecr|RUC0gZnDjxzo`{6^6g+{a>U!2Tq zotnSlAlsX+Z!P74hj5!;?21T*09p$_bCmh=E>XwX&=%dVO6LBx5w5mY{j4;%%VEX3JStVaYrs|5|5{Uh|9Yk@qZux z&E5RqS;R)DKvtm`c&ZLS(@H;qfa%$<7bx+c6*1>nZ;_55V?!~Aql)B3kLNFW7+WvB z_?>({1<6?s;EMKky=NCe6G!d8`AOAQp*IFRU^gmclu``I0I`*N6uaLDHeiCd)8t<1 z2| zp-dCe;(Ypxb13eT`Q`*2aP!&M>yNJ8Y;CIhA2IUjd0Z43xluV7Bc2~jC$eDQ5tSQh zsY=*fMhvfcXZf_o%B`ftlt~65zSWRSt94r$N-eQcS?e$Tn889=B#M6z$W z&!CTj)aK|3@}Fol{lBA8vK5D#L}Wv`bMSU6{8=im^GeZ%i1nCOs3qBy7@{qxgl zl%bFS%)1O&%2JBZ28c-tw&tu|<3QPUuSYMH;)10Hw{)ew#nONH)RT*)xByKskkuzL zoQzu|nUdX_e^v$dscC95be)bx!5DFg7!`YM^*m_705D_==Oya7Cu^$&|A2;u5p2{{ z``d=C;3OZiy3yOn^jJo!oN{cUutvvgMNR&QRtK1n0s9?CC+KfviWR=MaBEi<8wZAK zQy!Unv;@DxR0$7Q9Y;5fNTY2#Iik<^?Nbr7EUHPmiBKRs^lN)`{rhBGS0jGJ7efZU znM|5>$f#yJMEqa~1o4Af{TT{m|HtYIqcCXVyAg44VHpZ*t&R&S=4u0A$>ToT+~<=DGn8o;JHT53XRfK&lg6w zPR};LZFZYeDXfgOyOul}@31)@OyThzIcFNQQx!dE` z7tYdy`=7imI@zpBW}YuxMNGq*(KYzch@l1Fa>vKj@S%J4w@lQB>BU96NE;|tJ68B+ zi57nEB=@JN)jhv(@m=S3I%YrjNhr2*SsK-(BY8M4h+~M1m%0d`n_AsKUTdG|xU{-u zM|P+LYX3Q_@W{c-t{Dr?QA(4vLv$t5YQe+^`{6e@t|$wfBsEpA z)cA`g!nnL3-#Ruv%CpcEekw0vZ1BD}Ml{bl58J&cQ+GmM@I>DCZul6$X?V<_>CJD> z`P@1&U(s!gc>Fy^_@md_t5G{7_>niqknLDJApbu-kKF+m-0jZsc~*|5y7R-*Z>>&J zb}V5A0Ki-3eyaY0@A;RJE)-wIELed;M(!tWyi$n-=> zIzQ&CVdg38%=pxWsJjd^LqtItpff;K*BU72aD?1!_-!DIiQ{XrdgtLn%q zj0|>^P{e4ny=G{#^u^QoHO~r`OUNl_VivuW?6;l1j~Zxh%owTb@_O zk7dD|BMG3pUnnfErkj&P_>DLKcBd7#n43BPTJlqemMaQ$pz)N9uIX26rPw#$F5e2% z!=sGO#PSfC3U;C3*`pVS&wkFI1R7*Ocr#R2qCO=gMvenwwz+*BrttEEYcCbUe z_YTiTd2dxfji9wn@D1<$z9v0c7Bu~5>TxGN72g;5G2!Wjp^?wdt+{~nu$i1LCN)wQ z6eGKF#XN=R`3J|4t1@9`wt+0+yaZ#G*Q{ZkC#!#=kj0~9y6KEOx`0$Th*DWc{#h!f$QGZWi7xFLUoL-EFE@DW8k%Ko zu^P-Nl;hFQ5X9gxR(6G<*ay5MZV-Z#U7C8wVmu2}WcL?*8wk*w z`8H+cPOe?Eft!t`dMS@QKgarMD-v*)_1(tT!x4|Ajj<-;2}sjGuXIC%En!Un#yDXz z;EujnRp@&wRUZX9)d`ymo9pvyh%X)WI~|BMvNNW+eh5JDd~vFd92;3~lnT_Qf(a^} z?ul4qFW<9LYtT?dqUCn3Z@e-aJ4PlH*Ds3sf;wd8yOLj5xC>7(bIA1vy5!ExuG7O9 zo_sE54tYEuOkPXIgHG?UUGkz9Nz}MDPp(O?)EKhT3ElV3`j0<=9zk1W4ft=skjn&I z1>+QPxbNQ}Bi5t|$Fkh$k?rX_HqXdiTWI9qz+dRAQBNW+qc{-}WYeu)8ZXA*8r3VB zCk)>Xeq$+VQ-vHCG+4kN>+F;%A+%R3z~fhJ3QW+cnl!_5iP)qlriMnI1hTGs26LkG z`KTiv|L3b7!^Vs&2JpF;S*#$Af4>OFVn(G`pHMm376a^cesgsA1&3xnQP6_Ry7+8Kg#SZKBN=s^tON z9%1&zN)*j;1H(app^X_=2N^GbBX{i&|nhe|@mkOq+_p>6=Oo zdJt7+Rw_=)e=2GB-#n>d^7!QiUB`I&*$Emahio)kSR$CIR|+y+eD zmdQyL`EoxMxog~QUX+bB;=-_T_qkOan0N6u@;nvGx!nr)cN@s^7-H?p-spc^a7^En zJVMKuSQhA{6=9%@!wQ6T@ey2*g|^Dq1dG_0-P;8JzxT}<`9BT za$1~chw3)uElrGw)sbtNh;~a=7dT1!y<$PBHZqbO#IV@fx1|BszK^_|Trdi` z{)A+2>)F7212TR)M0>%+`9!U2n=J?Gks~Hc|%YKo!Ez}H1G>; zX7tSjkw6}dku;lB7hJ})v=b%Yw8*V;FGP41ybgjM_uZ3 z<%a%*0M3Q-0nP8;6gP>;L@u zT2q`hJ37yKc+V1Z-Pi6Qy1(Vmrp$lu-?V2WRtLkiB4|j)4+H^~CVo4SWr(ol|BD9x z83nWHAlNWPF?Vij5c|Da2g;vPwULN~EV#&au5 z4~LOg3KsroriV`zb49Ye6*BY%+FBSxQ zw>vE83)MYx7o&j8ZQ;!PLP00i(uJP3fR&6)DH#|zO!S~9D~eag8ZivPe~9vzNf9I7 zMd>G2W?OTcsvA5s#a?Mb4+?F^08~>pXGJ80M22deZB%(x!a`WR?*M0ALU10?`f(s zRt{wpK9^ioNBP+=5Gp-xzAg`})6!1(?cj1`(<;g&ZBu@4gz}G{*)`L5ayzr#W-<7K zHo-XbKFInDC!UtOzaGbC4Rc`oYtG`!r&ROhd~Tl^h3I2;n_T4_-4gFg3OZ3N8qiUFxvtgO%d5%K0q zW0}?6J?#q^{A^zrc*l;}z;MD^`5Ue#JI$~HxjXCkMHut<(F!2#IByUseEXsjd{lTM z0Syu-P^p(Hc<-9To@W6L`pAsj@ncl7tx&!`W`c-u#@f0HsR7>E@@^aqW+13i(IqqI zE5y_GYCHHQe|3Q|?ZF?6|FwXF=O=xt7Oi7@zHyji$CbZh|B!%M`>bJ!`G2c6e*$;s zpdC}sfIcc`=1`%c05Vu#P-JLl&uahUvV5t#pA>7&*NZybiRXv9=ItPg;^McS0VesS zy2|nVZht5-9O*!PP=ySRSr|al&!Soc>O%w7piMv>wfviw=UT{&P=)Y~#vt84HbrNb zz{fJ<8%aM5W0N!$`XL_F+y}x*ZNl1>RPkMtHhE*lgbkz04`*wtzss)Ui2ot?TelEU zAF=dZpXR_BFys0<5^kdPjK7T=J5P5lEkZ> z6z*U%Ox$jhemWmAW@#cj>;vbOz_{$%?O(SbnkJRMwkChB^-Kq&_k_z_aPzZK%U8&f zQ>1=jsyL?%)_r`#)P=NcnJGwUX@1}GAjbYR9vOe#Ff%b%klinBs_ISN#RQ?<2EoR@ z{fqZr#&up(bm)d>zjbOGI2;{rjP$zGo8 z`XuPCisCROn&%r{XGB+y0|;bIslk)e+%PhB_0Gcjl;Q&e@{t)*@*O|FD<2i%2KU8k z&=H=n8&>u~Gt_p{WdmglEz4<;arnnR&?+#G0(sAqjIc)&cbnK{Xx~je*pFSxe8y;I zzVt|YOm}NlgBh*!&TiHyc%`J7=GE0zmN+&`@SBB&<H?~1`^L~>s{fxYB35LsoyT!dK6g(pAnN6t~zrCx7~lVqKQ5bmwc_$b)Sij zkau@2tP{y1%BU19EA-d(gvQADHn5mEz_3|80Y!Y>fJl6`@dn@V!?9$w>Ua5p;$%dI zI!!sMNbxN2Ahw+=Xu_7txaVCwjCiIfWVt!3vSQ>ajp1ly{;n8yRZLLZ7mTJ1Vjg zQH=yDHF(#zzun*;!BxR(BdYP1X>6Rr&GEzH`D=Ov#HtTeu}+t@La zN@1SH&n*Zi9IU6-&3TD>OU}t-q(vGo#g;gH3UM!rB&o)z9vgqz^=theLcqkHLAcJr zB?ZkWR%1?G{~!Z)gy(B=Jqtv`EVXZC5GYk1zNECS22SVdY1)w6$a-CrMRr_jEPIZ~beyGi!6RY`V(Uos_^spHQxTH*VY< zcY=dCJK$Yw5IlX%#1H!LHK>9yxqR)N=u)*D`P|XV1Qjr4#%$7@>jl*|a5GtcQon+V zl}3|(6VTkk0oLdI#&~Yw_U5uQrQgp+3r1SKw$-Q)UpIW#*3v6 zlLhLjwfaXp2vtzp(RgntM~JVl7k!P>%?X{U2yHC>5Bah%w26`^Zxuk4zCiVA$O;#~ zGraixs3tqWA{Em#EDcLIGuU~XfW-I8V>7VND&Y^2aDhCi!FTstO7A1(DyNY+R>X9%$H=>_G`bdzNE@zVg`QN z3nwuPzl7&K&4x8^pOZw?_#b}(z{AJ1{`=d1_G4hrRm41v$ zgIGd2{Sj3Jha3uvHp>L-PXBqzTmrXXOd0ofI0eS+gUTEHO#OwB#Bf(I5;!3V>+|u4 ze7xW}(=q7sbMkhXG|4OKwzAXWzi$3{bb_;ul^?(0jxwROJD-iwx(9gTq<-p7qqz1* z#iTyXCd$lI22|gGYL5F4;Q_+LLwt)&gptl@1$TBITT%xQ^Mr!Fa=wBZGYZI;De5`< zO$0#k_ToHH>d*)_RB8&GZKLAa{`N(SKyRlVGm%Ei)L2_nQ@yDC9t;FD*Hnp?VE}E`c}nmA+p zaFQyatO0{LS39-CC75NWEKjPMtbk5W!<@t48bSsYJU{D-)E1ZI;wci92mz-F1hL4z z0Yb-u+^7buuN$iI` z8mNMut7O>YLO!J}o31Z;Oy3VpvG?r?lxFzx&w3?3-0`Nt--?-1L*d)4tS8;nZ1wzXKq zB7cVrTH}PX6@n{(U;~zZP?kbemW9ZZz-}qN`T?rvM-rPW~-{H1m z$UVl*rbb^Z%AW=3QAeJs0Kl9g!$jR!c^(r9BFfnAyM@?h2^W&TxEUW=(DICTw|Ze0 zwA3{}S9ZXsB&V0N{Dqtx7Hh`_0G|rIzab~Zm&Z>oqLs{-_LM6SbC2H>n*UDcv}3xR zC#4MEPaUm{*Lc(~4yY1Tqy73RSIhWx8F?}W_+ge@`tEeLb?{3S!1h@Pkvj}s3Fb#I z%b0&?2HHADZG0#5y+d6;fB^Pl*F$-pwtCAtD}O^(OpJ+BFL-+es3G%R^6MRtqP#G zwdxFOARaLVxu6BSryNN%r9DRUqT#p>NUqZL*09uoW|k-@8&Ywsj#%=4@;w%(vUJXT zITaUsb@k+(WHl`%8o3G>(5m{e3KSz5)8UEBHOWz_|ENZqj5lDs2MORb4SO@4qJOMXCgCHe19uu%|Y zxgy}sZWT3StU<)8g!T+gQM>ml-vJ1+_)0rhAyC|}_6p$pg@J}t>4;<9%!5VKj+Bhz z0iQ|UZWkAbB+pv()tdBH;5DcFueGH{Dfw?c{qSGL4}+ay;R|N?XIB9(|jG+DekRhwq$0a z=&a7$B*+fqBrKj0abSG`NB|CUJm@fre+}^JOUOcZ9J&+*Iz0wLY?6Z#JWW|hf2Y%; zj_xN!M!!A4396kYYt+=tue~%#j}G=Q|?=?bKlF8j0R-0YEt6Dvk%W zlPR?Q_8Ywc^N7f`gfCqO_@+S6MVmARxOAB+u zrHV&&LnASGQR3pLCe*Ri@N#!a7_&bo<%)8erO{LT_2C8qA*z{-$T!Pv|Lk$fI>{;W zL5|8TsywAm?2Lfwoh;))X-Zu}A{IQ>9~l5pU;2`~$U@-8nfGY_L^Keq+^!iNetQf@ zt$9FeN4JYcNdNET&{S>V)E$pQEn?ypHLA@lHtn0}xAB5d{qil6FvF7jlXs+eX0kWh z{bIqpxm@c8>fz*2w8dZL(#bLYXY+Zc0l|S!!}zhZpdD{%b*7Db=rUX9uBn35HJW=< zPx`b8%S{W{H|rU?zK?X<3EP|ly|8x9=Zb>A;dg6Noi*=%@?F;8euc4KBsfCmUrwZl z-`4f_CzU`Xcs-LkKQ9{*U8aV?dvt$d<98Cy_0_{3b^MS_U14wLO3pM;j3taprm$y2 zmeJ#dyPf4SJai3cH-?d($l&-nq(z-R4_4KkCf|VzT0tiGOf$Ea0TiSv3>(|+YBqI> z&sb^YoqHX1vo-D8x`$}@naWvriE@3?oNy^un+QGsK5Znbib_Z*p>lJFhJ#NZ!G`ql z^{RvIgnaexyW8PEc#~Th<7?xxmX^WP*+p{fYAT6`8|5-W;C3UIW(7BM;xXc1*fgfe zt0p&5zSejfl)T)OdC6s6yU(VaBC9&p`QPf6Vo*CP+Swx!-fxi5-~Nfb5+n0OEqA?* z1d$$)cS1}WnoW(3l|>}Mrh00WB~cvn)Pa;0ADYqV)UFHEOtD|8A@?D_VskA9=S9Vl zEi|z5_v*YL9|_*bl9y%6>G_DRVu(+goiSF#JrL$sKBlGJLkdsR-^Jru{C6o6V*L93 zN@DWWMYhR>!o>lU7f4&UwuYwPAI=bvB^>}-Z+G?xzoa$S`!o;DN6Jx_e0NE%LoWh$ zBE9@-xY9RI?qCPlqR!89Fo1!pw2WG5NZBls(@6k2*rfpG0=b6xz7L?U_XRaIu@{P} zs!M?{q}QmXqHU9`SC>F>OTh1+!GRY}n*A$6aZ@GS*q>m9H@4Clp0a5gC=7)5d65HC ze?8K}<}WnQHAVNPezMA2)05{Lb}HTD_$&PnTQST*v~XbL{4(8PYzNnGgY3@AW_mo9PyJS5B1)c zRn%s|q;sLmuz$91UGK#*d@&t+pK;yR3HHW;&nhFZXV=z&JEVcRupC+;F7p1C+c<5C z7qvQsKU$&#Cu80e0}G3B){b`P4IbyCwm$zyP&0kH(yI+%LxI-h%w0Vez}gl7V&V{` z*;S}?jfamf+en04#rbPgxwhNXWl6mqRg$W&vk;!g!c4}U!WaaD)SZ70^ZUW<*-eAL zTzR#rBt7dps&$(4($uKXYxHRIeF;~k&x_Wh`}Au_hTic~pbIyHV9h3BzEzzVuL%iY z;)AWcQu&{DD|=-UgKUXpdl8+b; zn=16DdI}8zJ-#koO;;6S9}{OWPfJIgg{$(12DI6`JM!op-&} z{GFG-Db&2W+?3mMWa0#~2vDcaHfh+ot58ME+03Men9gN+W!zaFQg5;f$UC4i2%RFU ztTd%G;vjMdmd@+WHS4|=^y#R|;yZS4M?N*=l2HUrvjzkZ+?)3d!WSzsD1Py8++;mT z`V}s%_H$aNcM0-JIzu-t>iX@8qbV@|dWeZ?Z0%F`@u@spsq%{ELsIXM)lO)7phVbQ zx%wIdhk3pHYFp{eBU#aFAseDgUxeE_9rc3OicbWQvfw+X2DS*XZf znsMTtAow|O%9cs&lz$%rp}_ZCj5d#EC@+_fDS{WzW<>}y6PNm9E;I0THA_HV%wume z{wM=H#ZcH2`(2l%c)nAdqS@ba@`q)`ZadqRc|SfGNw7{IAp-|P1*~}gN>~ozvpfyo(PgG;qvY(O+T>H}h@e1+qJ`r)so$Mq@7uB=*@l=T>ka)%7c#eriA?ps-kn6$j$h5s02l zuT(Sf+Yl;NS0m)iUlaH64EsFE0ILc^>VgJi%~|I3X*o2{M(YbCbCCg*U!zVbEU9JH=gGz z#NMwm92QpwDyA-sEjiDQWBCEidaUtlv;StiHMZ9BK-q<8`vh$+ADM1*fTO=4XinS9usM8d%eG^3WO;d(W;@m4_#e$APCB9)>CXD zdMjqjXOqnZXC2Q1x==wmW%<$+t-fZ%j>#L)XJ<~xnd;*4*bQAW>cd_C=f^LuRr^Oh z3m(k^yy=@t85J9d#Z!$>#_lrF0DrY3Ebfc(lc>A%nY&B2yUw-fvWe;V4w0-|bK7LK zCh@MlfLC)wq19uWRhlO@oLyez-U1QVwY{2A%Rj2Ry7rK&*8V=|>)Jfe&()u&)XI@X zDO8qRT8olJB`-xGhrVjR4CnmYvbm8B(ddSmCYygK7Jf$u6DUpN)OY?`GUmT@Vw*x= z`3N&FN!X0q6TGNaMDM`w4e$Ghu<>SQxY*Ra16i9l=PXGnt0cnPTmkv#Kvi z{-drGy`Og6jXZ^&9)sJXBnl0~sqjf+HGb)dsS~E+mC;Y1RNZN+reBb(uhclklcmaD zRAc>^CfY_Mvncod1?t_Ryg@SLygpLjk`6@1&VCJ#VI9X;x?y&V+Y1ncvW}zi}33r z4Ui#RQ6HwZUz9;V=MaBO#afeqrS7bV7SDKonN2bnRb<}`PD+^&ZSVr?xe;hl|0T@$ z-Rjl=j19QiJfMQ;0M?-$uA7+R7~A8U(NzIzRg}svIrRQG&jBN2kH4KZCX7->?+qcC zNUA6zWKhl#4;@b$XEM$q!;Gp#@a3Oxu+9F8EsJzr20~FiDte{=7I_E*VWxkw@!-55 z(SJ3N4|-S=x>>Z_`a6fcargbr|HFYnQQws6k3u^tapfc=HO(Yksq}a2mOsq6W1oJ9 zc`AEn@qc+77$h=kDjMRNJ!+=<2on7DyQcT|aZ9mayUx1HkpYyo^ixi6+T13w&MwjnBMHu5h{}8CP)ku%Fc?G|G z@e6r|xi#4GQqo?8i8gK6Ox`LA!0;nD_1I6})6XNJZkDTj=KkL!FvKuytS8do6Xz$L+ z^GkH5`x&z6#{Ks#F}YM#;Sz$6`_E#f;qd7vBg-N8sqd8SF&DHBOAsA#7%9Yy?}Y&l zQsSbFvhC|Q06y!eHYAgu$utYv%c;9-or?aJiIfj|Loz z2OPBom@a%Ptfk=On#KH)li{8Fsg>9pFb8>ZxKOXQe<9YU6+GNWXOL2B)fpS8v5V_c z>@-!?KTpVYO#R1M0-%-f??SA`F)&76g!<6&N#9@f7l%nV9 z3W=iO&c)0gt&91~Qk*#<{gzB^hBv}REQB$_$o($oa_duS(AZe)&XW&{A4paQJncsM z&LF4^kR+@Y4s6jGowB>E`{9)s^MBzMp2*O06mIam9PNwoQXS{% zsW3QcX=ssRJhxnV#T_-R$#}>smUKQ;mpHt1lNinqi0%}cJ1=^Fguj=m4b{bI&XxH> zR%NYzKddjJIs_{uF)CItrLP%$+}v3E24t zv-)G!%NJuXCeE=pFlXb=h4^SntVk8XyK%*Pc!)S?8?59uN?TIQ>EY#W3kKp0Nh42U zhaxqrCg7RwaeHt5&^?yU4cNYNvoWm3-|Q>Ry+gRfd%10<>ooJv`im#h1WuwzlWm>c|jYAVB^!Mqy+6jfarsa|*mI2%SNQo?bXg{E-oKCt9^ zOg+!f?gv46(7{GVd{kYqT`!-Y&HXrPSM&FLhx=*7=WH3dy-R+y==p&UAN29ItB}XZ zvX~-%^(1^~xxJYSU)x0Y<|cf=Y+u=jRl-`0pdlYGLIWwHT-&Bq)Oq!an!>i_Sg(Ys zs#^pWdLpT%F_ws|egFgPostE8c1M9!Ou0Aprju}6b7w>4$1CF=1^H;^pW)lxDihBL zA(&ia*b`&teX(FuZ-sH7Sfeq(1 zc`sJ1P*GAKkrr$Hav0DXjW41JbU3#^p|0!Z7gEjLxL zW121c@^H%n&m|Z1KEiEM!AO>#E_a<|U{}8*F304Mvj}qDsD^FwiiVQ8|FnCHLZ#W} zbO6@i!z6{i@Y^n`P}ET3-*9RgB*{e~hN68QD}LORFFiK8=cOsNI@pYp^b7g{V{JzZ zb3saugXx9X4RbF1_9GLUjH4205BmJTNSZxEDx`2Fo$&xq1Wy5$)218z=bk}p1CJfJ z)7iYPK!ptw9c=e^UeZ8Lh^VV*4GTwF0zMMQs%^tp(a&;Vpas#gJ+i^-nQ;q;Q}wY4|fjOa7+Pmuod!ad;s!^g-%#v8knTPw1>? zS}pFH+EsmIAX`f-4ihD;z0qW0iG&l%oLxPjkwEB5+Ig;O4b>06ewINOf(?+6HF-cz z9#4r4fc%rl99zaIoC9L~R|m|D3eWF{K}y$GIETZ7HFQ%nz0drZLN0*%9Ml`PIw z{2IFjnpo1fmu}0l>5?MS9ZkQ4P2W9^RsL5R4(!{ijEgQp#~EIqD>4OB+VV=3g6S`p zdqVf+&ZBJx$EgVGgu}r3>CW&AnD9|LM4UmFf6D0^=il#T-g(uza>(ABOx2Ig-`+m4 zaiqBpIm_{o#FI8>mE@xB0Odjx$YOT+ zLnD%EBq(c3D>$3=ih2B=$UpD)Srt}mW_>SNTL^%O0q>q<~8l-~_)9^vWJ5C)^B+OJ?qm*f}dRniddy)GdX`~U^ zK^0R!r>lj7LTiiq@n4Mnyyj~2-{ll}%P#@e!|2mFr6*F8!!vGbegiZTbQRvg>0sQK z@j29^6=$-_jWG`_ryEM<;A}na?;l*Rd2;2$4~ISo-K6}DcpAnn90sDmukQ*Jdrs4p zF(wyv+iFOsB>na1xbs8>w3d7#CjwO#>-Cu8ZX39alsc#f zpI}f6g&P*~3~Cde)O^&Qn0@!qsLdN7H`khGIqDup)ei6ObY1N+&-GL_KJ)fX*@PHTGj1(mpE;HOQywJzeT6&q@CGxg^8$md0Ads1}wD$V!r- zE%r`R<98LrP2gZA_oSX*+_3aw@yQPPjkVryrYI;yQ6Od5}xbRo>s_yL4vLXS)th;lFfrf_JArf~FKW zea5$zrY-Xy=tjNADxK`iRPN#mg2r3%IQxgbYfVno%??1hf1$VL(7&t2G@W+CB+$}c zsD5N9(Ief^!|Yb)e?t?v{)npjaovZ29V+W~6w6hxoC{ry_2?$}wb^cn{zl!4IAD5z zF8){<%VjV^h0+~%2lj4#>3q(AaM;>vcKO7q^o5H6lvL}eT?g#0u0FQ^D5YHlW$lK= zG()AEDI|-h2^FvBr37l_G#h!IH;GzwHw&IE&jWN)!V=>nPXwWkyPHnAs)KUFbllg_JJV%&=@K$6fYIzI?+=3Es&A>Kq>d#)6nv5 zj+Tgx4~S>(r-y@DL(+B``jOZfREB2I=;=$*+ij&!`SZJH2 zIGY02d-{xM)*~Npur$giOGd@|vgt57u4bJ$AO7hG1!pA)>;H2``#1kKq0B~RFU0OW zgdGIcLD&1vF!Z{Adw~J1SVVMf|ap=diKe9)em%FE>E=DN{3KQNeS88VGl+BRsd`B>*%N;aN;GTW>N z^o{qsVN{TtnuZa$7?+}QYH1$1k7$&MTx8Iaa~)1Ois$;R8yh%mRX)8cvuXI{1RlAu zsk}FpDPrIQjL2_5o7aBi{A057i7J$}Qc9iXUqRv50ZT2SzaV82%J*M`aKN@VjE<&o z4iHoW-kiOLZ4N&LUrwQMRC3Nn&pV?uQn!T#d0NfRb-g+w#KWwMx@x7-?QG@e@NbZR6#@MJt5y!!D)=3axQk0s^jgiwFdl;^b9 zTPseRt<};i-WJ6ez9=ZuDxRIEPWZD2RXwKW*^8U)-M%ggUIwA4b-PCuS;ZZmUTWBJ zreL&d_Geci5b?uE!pEPWS?KLE`Tp-9MYO%i><*eViXZ|S@rp{!v$i9`4kR2)ntQuS z+R?StnaXsXw_g@W`T||GD@jOlUcabP>sS8g&VQ7&L8(v8`(xJuoeok!HcI7e|2thC z4~$ROoOmd8$O41A%czo(tzG(ZykljDqfNgoONi2s;7M0k)|Msklm~Z>f$e`0glu0c z17jT5F@jOqcXG{Df_rdx8GNwd9^5Us1@|-e z^FC*t^{(>+t`FV4ud41{wRfoqJP4NPJAn+B3Jw^A;X5-*AMdRIN}AzO{kZMVIi>Jn zv=i*Y2I5uHJa~5OFPA7Dd5f^=>DN^}p605nB6qVA1sbA2EU!NV43&+AkrE^rKUQrF7iE8t*l!USIp+6WOLXPmg@O)D#pHp789Ub9e5VtA7Y3@1(O9K2VAbb#Ac zoBqLXTU*~hjB?r3h;F9u8q3<)p#1D)^g6IjhUrZTTUKh?lQy**MENpF2WE`^Eu!#V zPRQ~yrJ_6a6kM7;EXGM7j5+%}Oft;ZXKMno&tkVaoaX=hwg!nE zjXZr#VP~#!#sokd7>mAXhDR&H{A-5e{hDih3rw&>u4Gqe#lGVG-U?6+Scbrlv2Dtr60^_C4lXvd+vB+bpmR-gU0K1^bf0EQ_4e&AgUY%QB zoie0&Vnq73SZ2clW>I8F(0ah)C_GVOxwa4-s|)PyBj-7!eat`X)+PJVkor5%t{W-2 zqO;Y!>G^K;;mgtDcIw$g$AdC+rk{qyQY_}G9mwh@qo?%9seDrQCG?*Gd7!oeBVr1t^N3U446yq<^TZ(9+R8j1OXGp0 zK+3SAe6^iVOh;`aVy7^TCZd3P%j_@D=pP;M)Vv&cNFmKPEH__gkW_wst$d8yecEym zPdg*sARD?puD;K67xWj|nwC6?q zC0X31JxAkK-Z+2w?qN0JkU?}=%@gqJADlV!j|2**peAQqK?53|#-@k2+XWKZA2mug zGt_Nc?K=Q0beB301OH2cYQXA~*Yy;GRIU-d;3nEc#Sz&L?p^|NIlk9R+$>uX<~PpT zyC^^{$NHfPk=nx89o;<^qh#T^T?)d6rE zH+x$#ZS8L2L?4eU?ktjR2J5wnvjp~XKuq@KL30v8a!ao824yUaCm2Y&WxdfbK_j!o zJD4p1sD#v;9@Pc@VQUyBVrv_OF`QM@iRlRyoFtpn%1;4O$-w- zWh{(N@>l9aWq$WrUX=WLF9_+D&R=QGjXZ1?^N%K+E@)km65>rH42N}-ExTMt29uP) z0M1VwVE~~(gs^kEGNJ%hAI~H|YMYLChHA{wnWdm*&t%RwQy@6iOjQ2jiNBRs+JpuP zNLDWkZr|ugt9k`BnA*r_%3|F0+4oitEZCHz2p|ohgw}_Ztk3riKTbTp z5itxR@=aW9oH}6PHhmS-Ef7Tjk1GBy7b{%(O+2As8_3^P758M%3(n*yY~k@6aO+6J zHkGGnygk_|+m{H>ktn2fJo?)2FD)5+?c$tvs6UToZ6doYgS)zbbv%PHCIE$pH}LAB z#Dq!%QqTQY<;>2?e>!Ls=i&<={SyM=`Z!r4zFh2KJXZB>MSll*(ST!YLZ`Sa6#{}# zG}r!u8WRl2RlC`cTmafSA#bxXZ_H^cxC((m4ttTY0y3Eu3KPGIkL;G;uU>Nzc=1nn z1O<#mr07?Nx-19JF$XW$al$Tzx}A>$jLVuC z&_yaY8tL>Puu2f`y10MyKd54NxS>#Bwl2a4&nVLoPqUZ+XRza$@*TKz!4laVCewxo=d z?j5EfK5l^>I;%NUBFpyO3*P@M=8)W4E8Swf#Q3Myf^m6+jJNIHTHoZ9tiz)v-kMK&y;dSz|^JIGsN8AdP@e`LR} zO|U2!qC{y5bB=7TN>oeYzh(c$PA*H0nyM#;q|FQ+cPcL~KB0brmDH_m1&kvL84x}H zA&9{lbhcJ!Rr^hroQ1ggwEoLqCWK-brn+3}y#8}pHY%AYRzojTC3j*_D z-Pf6HmqO4j^WT2oHJ%X2y&x$7(9YIz>#kZ>^9H^a0JLkoMzdkB42yVSqW=`=V$=jk zHsuwyJTi3CFx?AKF%U1j@TjgJKzSq7(?*Mm$aSj4|C|gIZ171q^img6NSakgK}QR0 z=*JC=!JofPoAL0y0$nw-?%kgld~4%3D$eMBO~7iZs>DJS{cXjRL9#*2+DNyt@4=#2 zMzWdXb>gtq-E>3M-Q5TLkkv6+{?9IYsVU*#Clkz+&H4@PLK=ACcd{df#9esaUrfwG zrQcPYMgNUg`6WGQ^$p9#@5%O+yE~H;-0-zaM1}?Kc|*z>7Xi%TcM7YRN%_V&r`k&P z01N{@0V$CjG@+C0LK?{!kb;$l`#)f78dqq5+M%heYUc&BnT$^_rDxZL;y99@f9`6| zLzcEa4fevkPKiAB1WSBFjaK`PEbEYpYg!RjN$wh>zw;m1_(z!kSE=q__} zEsW5;WwcdT_77&F@W)4lfUf#)cwiI!>%d+XJ*L?o+>Ump;N%AWde?QH%og^1eP>Hf}2yV-p`tpHP<0B#k zFrDPx5Uw3y72y}^<=oHj^UzbduNcK$6K5mLcI+EUtpRI7upu!=C`?jHKn+-V1jx)* zNtj=}b6Yo;*ukt6;tTf&zn5fD?X~$sVM6!UF!E)Bxf3VcA>f9?m%kR7S$t(UYvT=achnD0nzcL#}Twak02avUmc*c`R1k%T~i%T-v(x5E6(h^%YE=WR*4nu2gQ zcYiw+0jz8Z;w+4kc1|8!+>sLYQTWXqF%AfizlgmI+YQEbGG8E(|7C#a_4>0K=#ZK} z>k)$u3SWYU3hngOM5}T-*`wt(pC%i@h4+}gi7^|LJ3UO?;lKS2M^mGLmxBKU-oA2t1Y6%L_ zy`ay)CruQ$?Tq+iszq<;1F7RUz=;-}q?Lo)oy=BaJ;7@NW>a~q4BN}tkL(68DJD^x zKoWu?m8Q0-8z%D7L*H;2FBh9pyx(~t7fG;zH$8X@TOt-`DBP2W7WvnsSaRyW2SaaL zg3Y_D?gv$4OxDxV;+^Q92f^+o_AOFD-sfxll!a2Jg%zM3D!CrZf6O8^Z626leR|rX z?)yr9s@l+0!o`1n-`Xcb6<$p!_h@5+10w}|i7@$%JcJ|@Cw80PER?m^%JFDZ^tQ!3 zLl8t)GrN4VGQtXWiboWL>>F}Ht}g-C=NtrwKnj7nYRYxVaO3t!EON4R;3C}Sh==|J zFoT`-FBgO?0aXN>ZPF%R;2n|`BgdWe7pn=OQnr!vzMsG)7Q|}VnV%uc-?o@pJ8LxI zAwer2uNN8Sd=olaHm&rmE+HE@b0@6@ zAQBqi2@?W{cz`u*i|{y!Dn+ywG^s!@XO+PpFgBeWRDMx**2xB~q9tR^NfVQKmgjAh z31Ke;69E?NDIq2TchYL$ilD%6!fzk|;O*@YT^;@Zb;&z{*&`Fo&55r(^|I^hq)g06 z0uj;r;bf!}`a=-;e#6Q0CN4_#Gfsbs>*SBUUqV})vZ++V*7#9GV`-|}%(3FPah~Io zl`j0%dOg#}+3L1xY_Dakljp0G;Qm_rTbAG1+c%FofK#m|oO9<*ipHxnk)egXs24$( z!xDGvK{y)G$YoAsQj3R_w0()jP4>z$RrfSrN%JZz0I9ejKM;VZh(#m-(O%Kh(VSJYckmcLqGvno zw$yFqlHmO|Zl+!TGY@PZCDpKON;(PE*VgX)Td(!-d~G^>vDI7VCX_}~3jeIAatMlo zb;cI|j>~2&BD*^YBTt=3so6R=9noIdZ8XGDTQ~jno|B74vixl2C7JW?0mE(XWvAC< ze?$4_tQAf6__(T}26Br=Av9;%>*02=gjVY5P}hx==91DWdACg3KNTKnuy#>LcX%C@FYgGZ&$zn%C|mIFT@(ekL8APFEDP1$MCRW1Q;xq-tpA0N z;3-S*F;v^A>X@#HO;vq?+E^hjK5R^ET3zU2US;o!xNhgG%p`TEYo49N4|+J5Hqa>? z0@Ey}!Hw>~twMJPH^$*tpT6>y8r8akKJ}~y=R}~d_jP}MA^+SZan=)Y9`-L@>z`86 zHnI!O0X38h??`|7eMyI!r{Y@}<3Ty|w^Q+FzpFB)+6l*h8Zg z@Qp1%&a;{U7nvK|K~*B}((x7(av5X?t+(sZNaGSouSdK|2r9cStNAwZp(yBjdjQiS zR|qI{biCUofGE>UupkKvYTAVr%g*ml*MF zNf#-#yI!8Xf3osdP{@^Z==-YH>SRoLmaL1m2pF~iY?inP&~L=?exxf(44C1OQHs2#4uQU8}^NoL~c zL}wVnz>x;HiXTpzwW4MlK%AQTGlQ|YuKU79`)r?Jfd5LN@7$rHY~IfZt#Uq3tZeZv zthF!@`HWfi57otege=6#9Y21^ZH}o5!`ezZ#3_Rd*N8 zAYh;spl$pi#n2U4ZJCciv02UH9J=>J_MTk?AxE$otI=(qg$2moOd;uHZ_DN3$;hE1 zLixMcxPUuCVu0Zst}D!Yl-y_#dsE!c^We2idwcW|L>c_y>6WzpreC~*WT>n@qI znmcu16G`$xT_I++q*ir#!A({{6SPU$KylwP( zF>r{1u~CGlT$5dhD1+}*bOR`S$nVE06C@EJx=eN0lmwVH&a7eL)vN3quJaIH@0)+a zZp-D!Z>%$tU?&M^z`b>nhSB1L3THb#HzTng$|P>~OQn+$bl#t4{o@^_BlRr=>6#@gO&5{zDL) z-ke^(Svzp`*!YD9eWe;gOI12KNnBLk%LtWriJ?mE?U0Xf zT@4;?edt(1^G8qEdPLIwd)p$B`{K(A{$l<(JH@~nC;NwQ7;ak1QOG;hK^|F);{VpT1wtD#4lPfU} zdh>{;%vKH55irE7L-lUmDvYkelbX<^7R3j)uZ}L#W+mmn zweD}agEW#&ii%`4YvWKZi$YR9KZ~kD2O_ot5({^~KUkNGd5kIlQct&v-wvTdsE2HR z9_8o!ddO<~xc-^kLUR@lvcRs7totPDiy5N=W_nw}%#SaFDIK0N6*kxKQdExNX_oTU z)Ik0#b@4Wih1juZW3`UY4W+Scg%#U`$=f%jDKZj7i0Hg@H8<0|o}qB8MPnac7}!_s z&0qI%l#+3w0k(dfj@6Wodqq>erJv6s-6SY<+dA?P zC7?Ih*#Fzolk_(!g`^t*(Wnf6IN=A;@tU)NE8;0jAZ2gUad)!6dy@g7h~QF8EO0f< zT-hW>T3K6{QZGH;{N^i6|YtPYp?te7ya|QD9XF0 z$`;7|_Ly~$s_vw3!3|+JOP9u!Wl>eVL5lc;#ZFXChB{%1@-RR4iU+Pc&eFt#Om}aw z@}$+5QY*M%Ok+T5@ze_ta**mf_ek2Rdk}KH@lzBHN1A*nUFXv?BtQAIrxA+2GTOa3 zuuo{DVln#`)fB+zns4Y4Wy##@{}0`D(DuX z^@&(jeb%T>FMvoSNKmBI?G_l8A6f(v>TN|B3OD3Ez(EdzGA9h&8h@A9ympLRS>~9a zqvBf?%JgsEzESqn>@|20{Pyy7%=KOLhyS< zfO91`XM_^AGtEo?znvLM*X>ifDdIYI#BJDCueImg@*t^ZCL+pJ>qPeWC?&Nu6~U6{ zFrb(bGsRf5juXrVw~GDYg*@oUftc%@TLP64;YYqRT}rT)VB>@!Nh2(MpM~s+muubT za`7Uq->=Cle;BWCYcyg;w(wcok>W8x50%_y_k{a#pfqdl_err>JP`87*cd=W^b)2d zG*MwJ(k#znkZXvRS%HuG;kqt28;8zvZU&ef{MvF5p%`p4^Ql6Q3LRI&i%RxYx4+#$ zE&b546?^)~HQ;>(oe`d4ZkC1mX2~wjCcWn$C>R}!s7_AOnt)B~%Y>86K#6MC^df2! zHM-^SF<{mTZf0sLf)+85tHiKAukGN#@ttapi8xX|d_lnR9Dx5EDydWV4}P)x4Z!<{ zzn)`+SKxMW_~O`o@>?V_mgslJ$czlV``0Io+Q5JhyMO=ogk73%L3X|en}5$nn2s?1 zye%4BYzQVRMVPy@2O7B6A*qLLN%5e1VHUIBV*kc?-)KNs8Ytpbui3*VUTJlPX8+4{ z(CRhONraW1B41gQ>>oo8YLh(ER1M|p&Ci2eoA3^s^uFGce=jrkr71UrY}S-KuWG2S zIPWih3zl)3mp5!?9${UV-l)Hip-)x{TueJdY2$Q8tAe}f8`$unfhQ-%-dxvCMLHfK z0Q@_SrwIGm6=2x*ecleIBMMEtD3D7o$ zJHMPi;ofSpm`uh}vQL8I0C(Xj9_bOJqhdQT`-S5FqVRh%alq|=Vc}+}+=UTOlsF|` zQW4~Bl=b1eY0PKYk^x2kF_8l~i=Az-^p^vj8u8$uI+_Mka(&hnue5E>uz*jW_Ro`J z_JUOM&&z&(M(ub4!L3FXGZXV!z{avkqqU}QB(18w?+yCAMQ2go^J<1|$Ty}QW=hh5 z$>~TRnYl=%h~5Sl)nKdjNE359O9nFQGEoM>euI~zWI#(t2VC|2IOXOBhX{~_ zS;kecf~Q)X?;k32W{N>9^No1X2>5ePGkT+j{kFgKJ6w8b@wMGp~OUg8}5nf zq1E;h2*Fn;zv078*06T}X=krJ5|a`&RceDA75fLFISGr>#wUEh!Bp3nWOJJjq0VM_ zfU0Zk^4+6uJ{{535)LempF6XC8`P#$)r^=;xoiRzxr;M^W614BmWpQBx;f=mU84w% z%fifErLJ*ygJ@J<_d$>y{Yg0b%6S;bRbu{PY$i&NQ-Xz^*;dM8A7;NmBG}lU5cMr& zB+mz`K}XVe$cK+sAE}G7?W;#BBC~emzIMQ z)kN%%if@`!8>yIbw|cU!-@^PmPXUj5H#>qbt|C>Oc=CVHdYBBjyX$V3tna5bnL`VD zd+Of2p0)0cVo(lMmdSdlXibHZIaKuBdj}AOjc2xSRh(B`-0vKn(%l?!`mX<%8!cIL zalkD99EG^j{F^AM0&ty~;hWz7LSM|dcqj(x=}3tmM4lCtN4HExVd;+9?C0}LXIust z5wJd})|r{SSPUa0k(>BEV-&*EYV8lt`Ha%1j61KeJSxpxcXm5+y5-85houA})Dw5D z)uR0Es<@Zl5iIbBbf^^agB2ZctYBtc`UnvJ`>^TI0iFy_R9~S*LoF<&Rq8MGOdv|+t(dxc+ICvzpb&*XyJzhI6w1yslj8sOB#+ zz3)1@{gV!@9uJF`<%uC>;1~>o*l2ElHP}Av{b6c;yn0xk|D3L_Nnk?tt7R7V$fT!) zp!mgQfXUpX0aK;L!sDu^GtVt%Ox})gGkWDpSs@zCQ0(nRUBG^PD1ktZeBf#jS+N`! z;!{5Py`aVw+?@{NFAlEIi2g%bZkZ%lUsD&z^BX-$?7ESV18Jm?ihaFMNu%GfHp+Ra zGROZ=qDI6BGcDrH8(+M*QqQ4eju3{pWPY8=hYEO}7Z~DNnwl`EX_|C#Yp-r@V6#K! z7Wy)s*-|Yodq9*QaWdCm`kJ(#lE(9;W445vT^z4%?3>B@Q4PVzUrvg-@jrN$dI?HR zte`gkBnglFflW+t4)pVwi0b*|x#YS_X*28~o4aO-F*D*?j`m^{C4_D)u+OSrLwz&o zuce27AvxLdtE(wAc!j)n|3|NJwC6vphuoi--r$No{$UlCyZ&mce9LSBQ#Fu^V>!=h z1dHCEDedRWqpmf;S#qBlmHQS(sqZW+YD@iqAwqJuec3q|oXJAa;;W%u_>GVttxt>8Qs^s8q z2~A5QhTJD!1+1Yermmw|=Ln)H3z%k1cF6F+ILOk%*gd@AX;;q=dNGVPRr6kEoN6y# z?*>2_kO2C5d6q%0?^Jh%gY%MN<~*D5XL#{}k- zS4x)Q{OA!-{!&1fYOMvm3T|VG+&nDf18rurp=uJ&O9r+Omu&XK#~s!09Gn*Aw=g2D z1aln!$AM>tH98*@n^YBLCCknSR?x00bqTZFDyfzJ4hhOQKR*+&D9&{yK^d zy=fM$F`$aEIh0uKjPsV4*3LH3G5Hp7$%i?mLP>P3w#igqsb?W=nJG9U%LZH>?gK%G z-lwbMrM$5?3`iddi z`DuCbEHO#?JCvGiVL4AsJaKJulZ`e#{k#TFtb!aUA+n_DDh1FOp`YfTGG z9&QzMe&$`f-lkw}_O>z%KV%4h%quk1W%$L%&UE!O$8{kYXAKe%;TLSiAS{}nwR|Y$ z!|dncgs33&b5pF2u5*Az38K++1nmLue&W!i&V9k5*~z2Pele2t`BK{D%kIHN%7z;C z=KK9+zS6y{z-_?X$6Ppxr#a*3$gZ@yEO_fM!1NE4mucl{Df!}=e!$tgXO~uolTMel zSG5g|#5(#axg$XhJ4Nu5g0>;IqTr;Nyt99q0+|x=bg-6g5yq+NfWaH;eSF7V~J;XzUTxvFJ7{P#B*$iMQ@HbXoW*4I(>w4g%~rD_8% zj^c)fhF=viAGJx--Q3*rYHP7WfbZ;hC-=*4ju%FKij^8BJU_Dku_!F&LAP7VHIhR= z1&f`;W0k_A>+0TiBj5X0MQdwm!M)NkFenCTu+ISZ7n(kz?dJzCAaQ0idN;che|sl$ z64a-iJOyR_>OXWuhKV(4QSk?Bf2Zbf;L1+a6E=!oq`ejMR-?JBe#XgC&+^s4Qv&Wl z3VX;ZfibKQ_QyAZR6RTHi9M>nh~a-!$wD#JevPVMb{nfX1{Zr3iQZ>24zDoG-Pjto zjbIV+Z$je)cs~Y^duY0hp~b$?(2C-%8+KE^@^4!_r2LHe(2IyC3I?e(dxT5qM$~Ij zGZ*5`^d=!j{)=P%r{hSv+bBI;!X8r-wv&&KElbq+yNAL_-*}9_QbpiXEut0AhPoKz z#dG=9%;%T+OA6tT1lOr>K#k@DU@J)R6l)d9(64z^39bEB3pv}rBdeK3eL#mDlgmYW z)C<0O=C?W~d6ZI`_8M@J$D<7JeiJJ|DhUNOaR9DQ|Iai0VHLeY9q@cPI5jf^x1aqr$0#LF?CtTmX7n^uJg5s6JdjuRfzcUrCUMS+ z;g%G11;YC>kNza(wv)ZOYiq{9k z6^cwY)}4=18pIs|51)v0s~u6E1&;xHQ(EfCHTB?tbsT z8&t;bEuzrPZ$FXj2~39tUEk@bm-dC{VY)abp}FuOz_M-7iG}uqQZr7h`4lOT_JB~w-Qx`p)aRZ+EV~5Ir;1+$3+xBF5 z4(RvNNx$85r+YL|Uu*QGnDsqD^~NCjyXronGyIis*8ji+O7e0(RdUymi`k>lvq5-8 ztr`fcb8wnN;9TyxIbYoufZfS#w?sbQr?rzqW@%rxLx>H+7BzO$hlxK}!J)TbDcWUt z{3)1%J;TO7)T>rTO@I6be|ys!B!Rw*k%I?{oxGes0YE_#zWibqTO6pvAZwKg}}<&LzIfxhc8{u?3|B zLuPE~vjy$1Y+zMBa$oa*;LN-MoDDm$UYf_1$o^qwm~!71KyK{Ffo#Bb&ek5r1KhnF ztJ$KhS5$Y6xfm~0>VhW4uBba|pHU*`7bVMKJXm1Mh04XBS*)JTo*Y}EzVN4^12_Xp zIBuUMA}D$PySHh*eh>o^o^kG+YXKIoGZoQSk&g?A=#VN7Cv#?Q)-=bK^yEX?5X}2i z2elLt!!$YdbA$mrHQ@v!vY0!0u~1}}0Y`@*@c`si9&2>V+@S(+V*Lew2=c;7^MU`rXBl?>G` zY#g`W_F~UJBpxkYt^uwv9wg)o>F8PsFIS#?c(v4RRuYv_o8%k4$}b#j|BE9^^0ZTx z8hD6JXSS#xMWx;i5-qJw(B3i1R&we_*r{+}2%Vx+h*&@pxf}NC>t2d5?I@emsr~=q z`UVg+L5p@qoi4QS21zJlk?!d#?0OgX;LQ(wVz}x%)O2|<^Q+d~qJSYHJHi7}SNYB3 z-w@c_vrd_1R>p{V`Nx=y4!SA<+4MS^*kP zrJ3@YFshvV51K0oS1f3$=?s^%Pmd)Bb>UoUtj|y~Tf@!b8`ehY8w|%9LrUQrGcLQY zTpfM`U)-ec(1Zo$o0bnxv!-41WRt@!g-_3w*_j8a5K?vWI)ZfDQ-@_v7JGpc6EMG+7$$qifr!9f z63Ob?ytEaJ7`?PlzJ7WRReNe_!|WVFt3tB`{7b#@T`;hL({d zs2}vmA#?q+xNedn_iUCfxSME4S!AkTsOmGBPnq`)=RouSsi_yRvXlAmkpBoAp40m9 zMKi@_dUB(i2#GFC9zZ1S@`(!az`-ET&a4rc^2X-$2sYVix82u=a~Z&((E6+bcilIK z+JX64i|7x@q(FQ&q>&>E7H*sJhqy3X9$ywryG|uF#5{HyXQpD+y= z0-c`)^2lHAn{U5C#DzFlYK%ZG<6f#9;!gXSnVAfZ!(=2RWvSzdSM1QHPqF1v9l+J; z*fwc&Wpw)gAoMCSg}s_TfXJpN6KXxuLf*`2-2O<2l(;nCu3P5H=_zLnd4x#T$AnJ* zyZ4uk#_U(Zv56+Ie9xX#x1t1W(fDm^iRyQI@=a|_NP#6)THGK|*jn^W|0t9I(2{e> zOatBP%h07bVtRcy@ZgG_zNoVIO@fdQ$HunRNYzKwl~)M!+Fz}1eEZBBp72?BoqB0P z?ETP+Yn~hu#Qd9u92M4aBiun$HOa1DEDwtQzH(|iAz?1y%WFxk#Sjs?OzaVuLm2_m zI1JzB_u^VKlhTkzDa;OO;J(+i(WQmPc>~=G@Xh{7SnBF!>to z^xWf;q*736d^-0o_lvfxL`rH%0?Hz1A9046!1^m@*v5anF~9Qlt&N|c{?{F3CIOCF z&1T@fB|)bq-d#eves&&4bwq>`kl=r!M=DEL4eG!YGJ90$c5M3${&ylIt)8) zsPxgbaC z<1>5~r1&jzmy({Z=@WMZ?v}s4qnPK=${4^M{Lx)a;&0fs{VV{>MO}>s;0g}1(P|jw zz^yid2y6Br4F46Wo5#}1#c6Da&_dioeSf81mv2-t!uhB!X}n_fAx`BzFEy@NeG>V# z*O)jS>D}w6RFb*r&>!5^Ia$p@Ay9Zj0!}LE>7b5`RW_%zLW~x_L1DNPD)Fa0Wk5`E zbQJ(%MT}po)PZ@_hgYbJBoZGC0JRzBJs~0nX&}4|Q4*cYj@_-fZxr2U~!v zRgMFN7~xlA8#N@ltx>nlSVWHmM>r^M4yHdzP$I*^g{=n2|LGGrGZ~lVfigy43e={= zWn5yEMG0bu&i%v%qq*EznlBN+D*E&at$8muPYxkA8*Iytk=0FKb&-DoZ>>2Eu1m zLz#G)!Qgcd(fNWV6E5srjRMC(e`BZdWFWf}xw1j6w~~hs7mRv*T{}I2Pk^iY5ou1Z znu=Z^l;^dnR*2lUK_*5sKmy+o@b{RLbbl}yw#cHtu%4YI9j^gINJ^ACJ7aDgBb8wq z@%7N|i%(vZ<(skuW|e;=GP3T$D?dy`MA?ZHAq_Ag1 z|F83;BHA3uKj6b@;7B$(PD@W!XgS>L>k9i*!sf0LHoq;UuzXf5;h~4@!j;8i9*wTh zD6#c6PL9eZjp+7dQ-{TVEeX3eAf}sdZ5_|Gi~U3Ej|CX;prh(A98ytpAui|RcUxZH zeIi1{F%jf4j6LPX8;gZc;Eu^fsU^K8Iqka>-|Oi~ge9{X4bgSVQgOSMSBiDU0flHz zhY}KUlfVS(N(MofI#FIs18~gD6xM$Wh_lp&aa!q&dm+~VcD)2{*IB>!X#sgCEqC5C zuoHrye(uz0Bu1W6N%Z2|Vf(en)4qo9#zIQhtpNuqQ5My6lkRE5dHy&1h)npE?H$JH zoTj%f>OoLUlIF9*tpu=I7YWe;IWF}3q6Koy@v-f@C(6T&DPx1i;)M3_EG}efs~l-x zLZy>$_hhIZ%j=~!ANF_qa-MNvYuFa+5XY7QIuqLedn*2bboAX+2BgFA=&?hbFA+<= zy-4zhAE}x3_P+Luk?{BbmUL{`#tywLtdyqzNae}jwoINWt7)b z$w9ChbF@Qr$sBdN@rS|&=%?bqSH^2@_>@bf#pM&;VbM^gH zbY$S3$d=e&tYQp`T2bOLk(f5N>m38b{_M+>SGj+UIRV(GqY=57%z#vDT^u4dJ4Xts zy+~QnTyxh-1O=RLHyP8wzY7J(cu&U&pz1$4iMY0`5CBopFUtQrOeQxFStI%dNB1B? z@;`+zINlcFim=@^hC!33Q={ds`p@mY(FqL~OMIzBaAAlguh4s$s6mK7aEK_{k=l&5 z4YeT1J7<^(gNeUTD;R?5Wt)$&d0@?(rjuM!8>paBWPg*k$p!-%VF@CLbqH<@b)rT?qt$ zt7!=9_r$T(gn#;6q}0N~Zlp&Dn(@riikCtw_9pc{+f0w?ru^ebZO$bJia3OPer#6ne`;!`uU z`VRXWyP5+DiCDHlxPgpIB&wi_ppjHIhz=1Ks>!PHGOK7br?7wA_yORlvAxo$iH4S~ z67T5|b*1OTq4{U}T2HQZ!kQzuewQwJ8XM(*G&yBidIzj?mLAcHsvXS*1DR>4Uf@!q zp<(p3L*o;rLSQt>3UeD1deep_%L5V;*o8f%v{l_8 z+ZSs(41R41pA zgnyzOaJ0bQ4MNz0E+|tw+n;m9#>(QEgL0%xGfhyfJU3NVV0EPUKcClk;0%7k@%-p4 zu0m7D5N_M^V1ulE!sTVi^3GdY(hV|V&Nnu-@!-uPNDYCJ{rlfFh9aJ}T9(x3kX;fA zDTsh-I=%Pi#ySmH*qdeoLFz~b{N{{QdE19Q7kcyXqj@K3x`FVk|I^#9WoirM_BrM# zZh|}7TrouakyHt0*)CGt5Lib2gJC}5Bpt6Pj;+=KU2DuOZ914A)4=s7e!=VSoSgf& zZt+R#H~WTtWgel67DQNF)pnyla|c$Vn39Y!yE*(nl)YtCo=dkah`+c5hv3122A810 z0>Rx~gS&?pcemgc+}+(JNN{(D;4b~J&;I(J9^L1TzJJIcMyPt0RIRn*P0sSa?N(Q zsnpMjGXNCdb)y)Q^aG26C|cm{Mn4ue`VnX6b&wF`(HIkb;NKf8$5di*+ZXz!ZLqIm zd$dyK&)fymnAK^%S@rAdf{$}C{ja4%5K}$Ed!e*^dw&s)AI=4aUg5T5D zgA%lP>wiu{GEDTg>t~5NwQ8bexY*Bq2Fy6$517$$-KGUFW5xEjzQe@Vhh)6c_%E2CPr#uqs^<;P=%AyNCay~&AOe_Puh%ejwJZ3FW1R(yNlhO z|11h*m`XAkY~CHD5GxkKQU0(i99{jk6`I=lK=QkDebY|6tUXY~b?9>CzOXOSSSxH> zpCh><%=1lag7J0=J`w5j`@c)l<$Ny?ZWLCmu>)(SF)`KY!-is@m--k)$*Gp#k=`+@}mS^r$nFi5Df( z{elNULZ0{nQ;hMj<8NUQU)_l#0&!+6d|cdTq`6i|tslTO>s^^NoWZ;>*YY|Ttx3xC zD}Vn}lU_(F5oq%-wbnegtdT;c!;)z{StNAKkS{-*M88SF>GcCm^O;)T^>;Cl`}@XC zRGc=PW{`DeCMS7Ug)vk{CAHElPTUi zD_FJp`ekRV)3JSMgq%Xe(0SWAosSy^ycr(1ny%(;rgW?I6Sj9dlYSe%d(&a7g;rpQ zf7UIq$x*fc7?M~!qPy_H#Rck;PN!{Ogqz3`qXw8PT8DZsJJWYxP_e!Txs594Wmwe< zY4tw+h5yU<&^{BbYn3)BJT7UK6jg;!ly!|$)`(>``{jz=d5WQb%@pIB3@`G{vpoJE zo)zQ7nM%F`J>|DS&RVwKt2b3VSL|>KJ@)bIgRTrI5Bf8WC?~8Pd#iR>fbIKjrG_uF zgYwm+gJ`(V;2+B|AkOxNFU=~1_-Hl7_1)OKtI?MiioO4C1_QXJ2ZV;&FNo@Ey}I5# zQhO-9G>GA{52f34ZI0JSX!-Q9(r&N1VaETcRXBxJpWng65jT6UEgG92}a%XaK$@^8PzvIGxv^Fg>62A}tEIO>xXaOZta zNDFxc&M#p#s%qe}q`a$!V(oqpbxVO+o9oXzo8n% z0R2;xbgMGk-HYj#Yw@Y+FJ; z6v`&M6~P#aL%9{*ZRP7F)P*Kd9%dz4IizzjOYuj%#Pw|TKpYe@lXBe~H3WTygk&Uf zeUAz+$R*uMTL7q(_OSke)W#C|F|K#hA*%ksih2b>nrJ#FOi!;#H^F5VDe1f4 z`+*xt7Q6p%co6(B<5+0n6Y@D;|BIUdOjOj*!yp2OO%N_Rx&PEW-}hpz+L$FPRpn>!?1QMdp%za0I^ zLM~;G+IRa%O`IJfaE)6*sL}Ou>5orv+>cr9pDK;3Zm}QjcZoyqF6k{87y|2u?VI{) zBN#F!a#Cs#;fJy9F$h5V2+vs*%~76xgvxjq3_o(WaLcaGgBbD1ZKH^|ar!%6beq}* z$${6A5;ber6-afrGJq1YJ{q{eOQlTmH$$juGA3pKrunrQkMt#l+;|h-r+_S$l_(O= zD=Xv{R_d*CY<*)%F>%7iTp)OSLuxKPv5EuuHOj5*?Yml5cC-b8ToiD?+cwxr$F1TB|UCyrfUr`s#>rJ^CSa= zx(!~8W%9va=&PJx>wY=h6k%rNwns8*zO=+3nE<=Q2%(+8NubfqVTe2Mmq#jS3}&R~ zkXhuDbzItb?kGb25hdo7H2_|U!_uEOodQbYwpXaP8s&ev9elf9j5OF6$@c5JE;qIZ z1%%(TpcjS7YNc~a#Ce+-%(=rplRUp|vfQ17jYbBh6CJVY*FBE~+f@%2xmfkXO})c` zwJ&(Ng*od?ZwDus-W(en5L`=lnr23SCe3WsgBlf}ic7(Z7oP&-`;>s7iu{}z`g1Ps zDZnNE+iZnMVM$COO8Q^$oirJ?RPudrw7mB|^Q@t?lH?0vdWDs}M*?iLq=`FA(=v!r zrX#E1pS>&rv@w3^#@qnN!bSV4D9g5!`uGJ5hkNmFeGGk4w?j|Xt_p3R`~f~^tXdQs(wSRy6ZP?B^ZkLK@@iP?3iDr7B7RMjo)AeTAO_~D;t>Hc(Silsx% zwcfmASLAtk^GR552Y6*zE#JW#gmu)@OgO_JW9O}oJ``A6L$Drd9yBY2>d{uPsRGyC zM7rF=u6@&zimy=9L=Rw}XN5o6P*`*CQrZii#MI%gfcP@O@CH-NZ2X-p{3p3F=3F+U zyWhqDj#olWHtvSI?w%AysS2ExA$Tp=3jH@VHu^9Ac=Nm8CIqOkq5&-tzyZxy2$A>R zw)VXXk;u_OF*w*inSQ*!Y5vQ0&I-$|$c$qR5z~zwW>l;f5{Qk+m&^K=1!h_Ew*YqY z;dkTgHMgs}v&A^hA`aOz(7IU7IXA$CXwnKV{W&9>Fe{0UOAEPN<@eZAe<*jiHGE=! zqqo=bEAZiwUKke#rWwnX3butqB&+$3LI72g$KYOynO3ptjBtftT}x0LOP`l9y=+m> zXtc&gsQc4E$>Aj@(7kTLj+jFIp@ioBIh!s}{!Hdp-D}&)hqQqcE|&*B&xWxRMZ_KM zFA#4+LJP@bAzem>mn*ciyaC@UE$MVVY9w+I7wReY_LBdoroD*!gdz2}(^w^3UcA(5@c!K33#-On{~U7Segm`gJgS;i z4-mmjM5sPLeSvL_(f+VTaQxITEvEx={5>cS<2wl*G{{J?f@2gx0Fo#&7GEC_k~aIy zmhH4*yI#~CF0)19A#Pf;2`7l9?+iR@tQc`97zM5B7-1Jnj=VH}1T;%&FiwDsY1O3W zJ_|ISLjP2Bhx1Q7eOv~oE`w5_g>U2olzGl}^0Vqbt`_&1JaGw@n0>cGF=8W*^&lp| zfTXP8t61b(zYND*?>blYZS+CvI!twwt}HGTZGRos|71I38>Kw{(MRf~K@9dfpVS0wp%6zXD zNQOo%a#og&edQw>{4>BC-$;}L8|uQPSe!T^$Vew*(t967@#_8NfV3e`z6ezJ|0~#Z z1Mt1F?v)#%Ih)YfK>o+Mh$W8eBhrwmWQ+uo9d{q`JpU+=FkE^4BmuEoLdq$22}ziJ zkG4UM2=_g~eN~V#++N(W4)Q|q`)~`ho&0MZt@F9K6gy)+cbb!z0x>aM5$B`)!c)wX z&G~z%<^B{&iTzaMy|#Ao--&ocsG!FBAzWW(fXw>s@(RaL9)EvvN<&NH>yU4lC!pix z{a(=WUHmjH3D;+3yqEUCssIyDbg&-ol3HGj6>~S}ilO}BL2;enkQ(`ix2L|g{W*f!CXZA!t*_3K-Py_BwhLDtJ8S~j zM`)ee%JaxfyBtG1=Qz6edFW)Xnx7*ea0!a_X1W+%jcdA*=roc7$bCN!0ek8Zp@OyJ z{Cu7}D7j|Up8muIA113(g&eG;Pafyn2rqYxDeoVGjHu$ZE-w(i58?8ANju*&JbMR+ zp;3(Ck5UqmxXxRfH>vW!cLh6;SS|!FTWmqrjwEJ)#(_At$|1xG`eI4RHbD=XBE=B3 zn#eXs9aQDnmLLpnI$3t*{j7kHBiK}&)pBI_;8Ndilol@m>hInmu$lS_41QV_dhgb8 z#u+l=p;w%7ip~i0vcOc;xvugrh;lg3etj&yv}3?#T59>jfqa31wb+-}8Rd7EhgH8I zmb;7+^YNQjc;64w4QY23^GP(Xvj`W#2TIkEYZ!u0Id6B5-1a_j{p8Yf>sw-E0?22F zBmLEMz|G@Zyf?1VHh9-%19cf_$h=3dj`~4`r1Se0;y)1w%7_0cl zhy%5iQ6WIP!64OwY<*LmLhsysm}Al%OFxJG&CM(eFo=S&^o`O_&4Es+Km|1JdX7k~ zAChg|j=_OC(24&j(}Bne*r-G=$l4yT5yGMryULMZ@#Dhtf3yI!_Z_XS5!+XlVQwo_ zlkwb<`0(mUpbt8c(aQH^W?x&MeMrQyT<37?s6W&32Nx2*03&?{0p(#PU!lnakz+K# z*N}Fjky6%-{D*IE%|t-tos_QzlOLalcj7jV?aW^|`YIkpi0+gn(AJDZKwu8wPzFm* zbUp(`AL)nG8m>>t$H%@vU9Q;NKU}W?+BB$%u4G2Xg$+^B@*E<0?$YScyX!2Rwiq2lV3f(u2$vf8sHr!{UmchK(}N;lqE|!MYa(GrCv!>R(Jk|R7Hyb zl)Gg(uhh`3`?wODnq4}(^=Gl64yQkZ1L=+KZV|Ze5WG}IPMO4n#Hh2^Fhz_-2xsb9 z2KS%+d_o|)wms}<_~MmXE$U*E4WdYoMSX&7+!T-5Z(*>$w!EI^zY%&5au-Cu3=M~F zoM9O6%@)N_>2QNSes~sjZNVxvk?Rx&P#ed%c287m4@Cv~L9e(!$&_%dB!hV>iwHEs z*mL{@3D=>P1u4zez5l^;kEsLA6ldoyudD@HZA0UwZ0MLJHhMxXpNV`SK{`%8>7ohL z?jCkHp(?-@74P%lFwkQ=YKcXY?>xj+@gL@MSfKd`fVqu3n8cY^>geq}a}UjsnTD(@ z-~^y5q!TXyk5UeSpb=>5!ucCH-~f%ybZwwSah9A@TX5bNJfdV`xk*wDblVt9UnPRC&G34>1*T{5u`g za&e+eqp?)FR(^%{sK~3PR`zReQj0qSAD+|a3DX#}>+PRdy$l}D&*^fSb-=71{n}qe zBkgwq=|kzeIA8eL5b|%1BM3(QzKf*9fSq6(8&Rs5l8~y1Tc&bQkBbL>+rMq(0pO8k z!GwjzmyutGYSfsZOwY`8rExo4Rl{80E0?GYouu-*l)wKg9h1)MA~{CH`P+TdfqL6^ ze}b&DV|rTYU_6AUth{FiTraYOgeGhnCtQb>G7swhg9Q=l!tQyGp~U}yrVNSC)Vjuy z`O#0zVDIJxUBweU5am)WWNE(mX}I7tU^7{%FzU=FHhzi3ceD$Cf{8Dsx@*n7P>Pr8@*V-&vMmACEstb`%LO1c74tG=(@w;Yr3LPl} z<9kMTL(X_MngIS!rZl4jlR+(Xce&t5zxr0MiM~&?BFF@YWb%^=1Df^5Ulw8o28Dz1 z_z@ojZbH-Q$;tTvs5bA0VBIDr5iSPem$;nm;zZ9xw7;);o`TQUd5~wRLq!wv>Wt6w zVh<}xD-R&Eg$(HevXcx`Qj1M2ZY$B=B-CDh(VB%ojG@W1>08RPX|!MVnBIIG_@~@n z(P_>f$qze=+{;QqPSI&;$S;kDcveA3wh@w&)Vj{j9F9)3FM1ja|H4I0ey0^Vaqo|p z>2~N?Z`GRr*><%F%|c~1GN#31Z!3CENJ#McaVOZ>v5Kcfv~O0{E>m4Rtor2(*#`%B z2;PhA=L#gHdtqu4H2t~NVbgQ`@s(!$Zi^EA3DR+?rmdNR>?1+cX|difv`f=uj?*yk zZD*3fh)7_v5X8C`GZViHkI_&wumEM^*Ato}_@G3_UudZs zh8vgpFijCPh87#8^90-sV)n&L*>N8AIbl7@uf~LYw)!(}=IA6C-RTG~W?5TVCGjUJ zv&-}@Y#-_bwvxl*{#YS-Qx1H3cm4z_cFaOJSNG>#*j1vNHdX@;jE9q7C)a0Fr~&sI zPx4i^E88&p1`xQ1T+g>(0Sw(2?WN3JXxncX8?Wxf!Vl^r3hPp!{Vi>!zxQ%IFTMUj z-C6$3t`>?!06Xg*ug$>oz1AloyW6f-85ntx%(S@^Q6aP^36OOyhBVYNzIWBhC3E2H z9EGj)7c|OLlcH5Tiul#tIqs;i34_X*=I`Jz5o%OKQ?U&JWY8NKCz0vXte!pnc z`hbr$>m1&fw9{>`&G9u1@m5ZRnFvF3Kqo`pFwenIA(xC^1Trdz^0YMMz_x!IcoC~{ zZ7#6eV+V0PJ!ugEH=lM^t$k?hB)zgip$c)cE+XLitYT(3d|`Uj)%Cy^mo~HdlbwWo)`0EHmmc}^wvSY4a&1Lu@E(*pm1wv2Kqs96|6sm;XQ`8mJ zZ!Hlc)WGuJs5Gs2Y}`y@a9($QAFV{YlTZPCg^;=*l&#$jDLW%nxmpt%N97t>Kt_tt z#1v#*QNw#}Bx{?=_$cRQ{ziV7?M`C=jCgz;c;sMQ;24o$Re&n;P%sfJ5loy2D?!u` zih&@(P1}%ki_r=12-@7gY`ngFl^g=6GuW;Z*IhRl9R8emtq0NYuIHmlSj*~b+NE27 zrHw4vK0Dtlx!PxL`i(r!gz`5s9ozW+9md`bq}NFMOn26Nm!;W(F0l8-e!UukWRnyiQql^WT>5AT`w+&ONXpIvvEC?CiFJ(~o}ml&47U zyKi9ZZ|8&U=BnfOerDQ)Z(5Tw;&eAhw%f38t9n2HMCb1^geYrlA4%s4JH67RPpo(H ztJ_WtsO(E-IU0H@UfUp3NkNOY!u*p`6Lo*SdX}5teah0Os<7;geRhB01+&ta_q<(x#wY(-pyyTo^v3%#g& zVlhNT7LodsMcGN(wb#tfmcG3Nwl-X~a2T7ZGc%3euXaZtl|58PM*jEx*HKCRaIzS) zaac|oJX!TOi2&t8Z4wQ*@REs=7z2IKAfNY*$8UEj!vEZ*tclF5p58iMySY1>v`~<( ziH^hg(bt_U{HgD4_y_A=kyi^$GGY5ppGZ_?#Cj!4Smw1==cjHksQI80$feC=?U zgdK>C_*nV3-4T5h=&^Y-5Ut;@VtF2PhPDHHL!^D6>4E(>EP>m{%71PjGW&v86}^e) z>j5@J``<(6(6@7D!fuE9SiZOGqU`)_gRHYu1o2<--8)iyker&w%(Y7bt!u#GG_&v(^OSzl}A?+EEpZYWypN33mjLzer zf@;?qoiDe zs|IH;wANE!b{dlP%P+%BoYcwvj}Yn3#ceZ0Qc|F_9YEFopoZB8@Fl;o4LGU*6Jrgd z#I^N!)D-($VQO*cUv(|`&ZrDdE#59s!F+UmxsC`p8%S8{LhfAAUP47XR#+yf-t1ZL+j>x7zrZM~dlF`8 zJ!^P94s>cljQR+$5DaeyDG1g>Pw8RMzeaBkj930b6CO@;bL7s-UFDjnc2^MnSHvkK zmYK5&KO==zOJ+-4OG=3NxY4oRlaYl2Gg7BcMn)6={DvQW075-C$+H$IL%T$+8+V)l zGEDXee-V&gR->h88M(-ZE;$6$n3Q9$6e59!D*QmU)IagM`ER_YPk|eQ;Weo>_6z$o zTGA%jZN#;d0@qbWim4w+fVOz8lMN|El8f>B5^XcJ5@%hP8`IOEDR^!#uQLzhr#M?D zHpBCFkEHKA+>~Fsp4iCk=Cs!2885?%SXONNwiTyfL zN(68wj6=R$x8vKrYvha7#+vAkO7up2Yimr5N>&0}6j=ent5*owy#T|XZpX{mtZN#bHt1c z^+#lss@F5^+a#(FJggf<=z+zS$wDYxkhbr1o0VUWik%9T_~{b`pvd2?HZ}LnQp6WvhSm!)RM-_1<|6 zpTGmA)4GCtD|OaXxKf&Nnavx185 zl3}PNqEX-G5%*(&<@~5wU$~m>+UR#*CdmY8;#z>?o^i&fsa5Z#86lUzjC%I=fF+d#t43R?Q#jZSBp-BVKqw3pRIFKWU5;I@KEQO)3x#R|X{y0dl3Jz|RNK>CVK4 zr?Qw9X!=OT>I?&CB<=>6;}FmsHdO^0sp(VfD)oPO1Q(g}ZZ~n$^ra>@vH4sZsRsKz zG)6+R+ZKC1g{E@*vpmpTOakE8-t=BGN^Yj2s9Tk8-vnM{$mPccs2V=YY<$=okgt$k z48F^k)W)4otbtIB(O-sHTj+l)3H+EQ1ZmX3n;CltGBCD8FtFtMbo3SM78T9!r(lZn z5pl^kC_C*RbQWJ!y$6m#TIuv^pv6Uj2a@G|GiT(#<~&6pN``EL_ty-)Y=JcJ?t#w* z5cXce2oW-2e#!90N9z}fGIwLCOWD?X`I0^w{EcD|j2(?-?RC(*jEa}n-ZlFo5-8@j zkf-|QVK^_r+gQPXYMT?;LoKj}(IqbQvP^hXTJ*T?&VGepbDO|i*2hkYy+}VZlQvO- z3+;}M;A-Cl{vP{mEfHYD;43`?xlU`>Kag`{S&@eaox@_lgQmsb@9zA zF8^A^S@u8>ZQGzEMwtEsTe!NDVOCl7duApQ^kwpWguihg(m7aBMZ$^)L&6|#N1kWS zjADf6qY{>R>0wNeW#PPI7IGr$CW0fwgL>YuGy**Fu~VBwrBU6`x~G1!-XC*6Oq9#> z{eo^H!x0(X@6>c6UxHIVI3T_n`rTwqM#%sjvFh0QBTujINgTuK9SZ>6A?I1b?-9l; z)nJys=wR@^$NxuuuS^AO*Ca@mQvl7!8TPina@RlLiJ2_vxBbEWZ})fDNjNz3ov)ci z#27hNi-0ETryXhgv8ap|Oe1z#I_argV(kwBXhub42-VRqDmoUp9rj)h`Y6kHt1`_W zi2j|5{qd@DP?*#R4z`5BI@U1TwR&&HFhO3-Y>CT48(ym1Xr$X=fEb?aOm7IOo~7}A z=x@3BbJYcqC9iOXVKM!nhPzTxOXK6xjSqnJ@&MWI7n{?tJ!E;L-uurdZ#OF5e{WPR z1x|W!+vHoqMjB$A0@`l?gTeE(Mt2C80bBt|2p5T0{@9uNsx~AXW4f>)xkd8Mog?+T zU7VTv3QRn|9*f-!pX>rBQg-sr*U>}Zdvn!|Yo5eJKnPgU1S5@=sx|INg96ycd|sBY zg1;nA?FrNoAUc0+FGbU3gU!!$EZzPQ=9A{Dt$C$z@6Lm6$`E~sEO6`_A_V!kE)AL< z&=Yw3;LiWM56-*~(S^*&xY=NitqC-3HY)&tN2@XR$k)gS2+sqDjB#{cihc%)TxugAEQx+wonn=9Px1IhE=Mkt@e6W3X7q z=Kqn$R31QdS({3+t}C&>Y)D=O=V2wl${zn*uv?G7oFnY)bANSP6-n+h%kqTO%K6X} zV0`&%L$My?+o-`e?q zRMPJu(PSbN8JNS2H6@~cJa;&TF#stiIBYxm)osBAfuKNg|8j;jFo}p zs*u8C3_#t*nNc$UV*SDb5pa=vs~x@a4t*eKzJ_qwA*?g5YEj>BPF?eTZxGT{L=x#2 z>oLUr=I@7Iw79Lmnq{WO_EBaya_MDilO-?a!Eui4BV6>YGa&B(d9OtKzKAF5B}{xf z!pa243sHui?J^%a#_P+~l1_^==JH*0H+3qTC0=X18#*e#C$1CZ!+#V@t;9u+yt2eG za4eA#Y0!Yf;ERrPe9<%pNQ5|)YFJ?HnPh^4aQEh3~U1}G=J|UEG+a_#U^s~c1 z`{K^=raQ@R8jzsQ_>DT}nt!5(A4KQ}ex)AR8Z%r=L8KzYMluCFo0tVlzY#= z^A@R6|3>Pg#0;cHy>sF$G%@Y^X7*Vjd<6t2)npC!-{300Y&xwu%5*Ehl@eUyS= z(pztnwlP{&v+lb9V}QDA57;XSjECEB95F_KLWU7z&oSA-J`rHj?kGsYFzr(c(5ex3 ztGW#}ncPAk7kS3($2>5DI`=3?vK(4A>-BD-SS^!gAAN3N2R_$gYE}Zn%_YwRh)Woc zi7D`m6>#VMpFg=eh}`R)#`aXKL3WC|ohZ_2=^&Ns(-x5+1gGJs9Cu`FzV;|OJYO9Z znvO>1nl$87H_OXrOewSus%Ptxa1B|{uk<6DsKyEU38FEe17Ji>nuN1?(gv3Aw9;3y zwaVVGCFRHDe-_?2?BmD{_^C@H{#ER`igUuRHxB_4 z;s0*A8Y;`ZcV=)3!Yzk}tk{S#Dhj!RC4$7LZZ__2PG?D54Bq&Il_?j`cCmdJu*RCX)K_3! zgp+vg2K@e6bru(-xMUEkz!HrZu2?o85^0eLwVTTeV3YQ%4}dEC3<*?aYoI{dA7tY| zw-E`HQ=x_J%=sL+iPml!)_yBMx)#;=>$hTjJu#hs7n=LMk4^vwj>r${rYi}xFtf{Z z8t{q}U}XP4eP&BlP$#{{wGZ9Sr#q-d!V%5&z*}d*Ia^rE4KED{PVgtn&wMuIX?%<* zHT!U?pcTiNYp zQ{W;fWFx?pA<1A<09puGlKJd09q${sL7@ZY7M#ySSWw=`1F>G#*bZ@e+U%^RIu9{| zv}BB3$KKGLVbd^d9{yw?!qV!wV5D8bkhw4e$%I0=YL$&&Gew>Y6+m=3?%&@gApZ_Z zeb5+~m8qr0|Ac=7!>W@ok9uD4(gaHdwQvX^h%2AOEE)HSVcF~JkZ(Q;My=B0;bT>5 ziaD+yin7t2s#@G1oxkbF9{%mZ_=8X@KRSy)6&r|HtwKV=$qT zap091SdPs8`c$4V|Aa%<#!|s%8)->qpJVUlhR)HmHmTJ@*SIktk1uk(_m$}drYCxf zA4eb;uxR{mrF{#-!SOIKoo_t4KX~W3-+)QfU>-A99!8Euy!PShu(`jBz+JOAPO|=5 z-z|7v=Z&Crx4f62J(vGqbxz-s$7j4rp(xHNacE$EI(Rx~jJy6o$>G4+1TmM^dOY}c zh09%0%6}B3_;R4&j?Nlh8ZzQM9|aFHX&~_!FcpdH2O3+bMTo`# zMh;y9?E?@z&dwwMP77Oj5q>F~T#TFxb9C<2BkFK}`!LhYBM@o|1_7 zcjx8753zUAH})-R6BRmj7W4*~*cr}@B6!&7K+LRpQmwJv_4ATG$Cm8WP6h*W^KXb> z2*b#7+LX(4*u9uN-Mbc;a~O5`_oks(OXWOC%;f!7F5P70SZMSfBlaOKVI9z&^u#rR zb2dmRfJNJf4n7YQ`%gmM`8S~ge|BejeiHURf@lSTQQaIsM`uvrLNY$A{l-H_YHUdm z0KQ-Qbc(&Rkf~;Bnh@Zd5B*60aBz;iWci82`0rZNI0!x!LuY)|H!2gZ3%C|x&CrQy zraz4MyIxodDVB;M$A9kC*?^5CHVcXo+DI6oe$1;vDwL*L? zL7tTu2S@|DIuI$f^OXPDEH-CPIqWCffVC~eyVd#&{bvRwfNzV3 zvFia{P4ZdHXxukX22a`vs!YyDnhaXfB!rGL7CT>(m9Cq4*`J!Xyz}hpw#hh`-eg|m!VTz%N{$ua4|88&I z&J!a>#Ifp31`XQ^bzO?J>BVU%fl-7};k`pa9McB)6D%=Yz{IZJJxbU4gz&caeP)go z4Y~3}?@KLWUE|_3zLWh@*LUqqpB)zP#tqH*p;W#Lpr1%BCk2R>M8H2t(COgW;b^-R zwsL&|?#L2AZ=5dB`+UJuS>n}3uwGwvknFOm4!cx;;_v%JzW;|BvOgRjHc7{sER5P> zxHLZ>r~TC%x>Pq4rK!n{S^r6m>$K5uJ=u_2h|OWNf#M&3PNDk)l(6NZyY?+;Yz`N| z1Dqw$bxNW>VX8}b!F)*!A6hJ3-%kexmOxmms^WTjq!`F6C7ICg917~nYzo&@&U1yJ zC3P77M-5)FCy^f>c<5lroB|`4ht$OPMS3TCVwMy)0?XvlS~JgL;LDab63U-Fu6~8i z4z$Ab>CWjpiNP{GByw)J0EAKdmK1ecKT5nAp~_m-5~5br+Hy_1@;RvGrzhdYXPa05 zIN1zThu_?o3W|#B6WR86R~@?DM->%59)GnF#bp(E7qZxQ0L_unEU57ze60H74MUc6EDE)2@1tMB`Flj|P8wYT8d zZ=NUb=;s?YK}=Qz!UNE>PuQ6z*@XgjJF>qXXb^lThm)LbV|z2Sr9^Z;cIA=HO-p2- z^jjglfv&3q4SkgS2paYZO}FV6p7o;{Zn|&HJkC@F3(2w`f|wIJ{mG04r~R_CNgK~^eiUWXhCKnG&h6MDLc>8iz zhwWo6|AS{~UTpi)J`WA}$UZn9zdUvhypj6NX^UXZ>6$^)p4x zy^=cLtR=EYYF4(1XCetj2Lm@n6a^0+(A3^e<(VgEi)rgT4n#9?jE?&O&vGcW{qQCE zywJKw13R_Hr6_LwuedJVj$LSx&kqu_Pc&&ycRUDbian|f$%w3h)QZ&u0PcNaD`LhR zq5?Xd9y0S3&}JN7(?R;t@{EJ`$soBL!ujuSu>Ec|vseH5#KHnXM~VkR>-nyIaMYZO z(0r>KA-(R)$ImZ!Ud=lX4gm5HAmuH30JxOddNu8oM?uArTxx}K)M*8Po}oc3rs&Kc z7EaH$^y&4R+45uD^76kz^GpR^qNCw{2K5|NRqeuz)pQ(?p_z@EeFKljg-9E1T?Jp= zyvIxPZkfLqt$-bw-Je9}0VtinXkdV(rZ_c#mnlX_xXs9xymjmjM|E#5bH9#5rIJp` zOQKxAY(rgFQJw2-byEdo71}^tGx{G$ZMAQf!_KZ)(LBZ?1@De)cDq6om^@&+b&w_- z0kIC0Y%b5?w?0|Yf4Y}Iib;e2&m#gmVKuBddP9w_whWEc5=t1nnW8{qc^lmh5tJ1R z0B?C@J>@1`H2wP;O7x$0a+7``WgTJ>j*rvEWg*X`w->As3`1Due5cOyNz4+GJat5j z2YP3P=RTHbSkXd|xaqK$K#f;|Pb9bIdm_OD#C-w&RfX4M+3kA!<6r;cVhh)u9nOsb z63$O_R=6KnaA2=0+MX`L$T&E}+K>Fu_ycpg4eomC+Fh*FYt24sxvW6p@$pEt3rqm) z1?U95^;@8Y31#!k8{rgMT9-pc+s-K~kqvTrrS2r993JgS39TelV>@*hB{Wbp_sVo) zJ6wmn9JLjG?~Kw;yhaVq_6y#`FS&njfs(@iCHrkNWUhC-Y zrNimaZ$6@=%%jin>sIJ>!!SEf##|S8X~fmsi+}lchrdbszgRtrOlnC?H*&pz1q(y? zOWwE_NtiQ_f4yMbKuCHw$A8v;cdUUr9)X8noDQ*qLqZZE{tl4*fcx8iN%pjr`NPj{ z>$7h8f0HO|imlBBGz#hNPcm$~*-ntX(|{U?M7ZYe4PewKt%41tP^HcN7=OUJ^J7q2 z2C8yN28=t0oeizP3oXzZ_O$Ee`ugD>iNaELke2DZ>9JFFw7OPtZ}xN~^*yoDbonyx z$2p`-+SqP(=k)emg?#$F9C#IU4lORsw`8Bvy`G1scJCgC0x>v9}$V&?r8CNo!Qcc z1gk2}1}S8Fk|!zjZ`3=k5|o>DR8{v_*MN_J%)r24yXv-gKS~&muk+*CsY(5EZ#-$M zjkek+Rk zcwE^mk%}s?``M6UzKP>5Z?q;1|K@3Gl)2GT&s9*^b^vU1UYC#{C|q4i7d1epANVPh5lEFrq%C#pK3X=K5?q#Q_xwhlH`ShuJE7k#F~teKwpBs;>THI%yoY1GBp-EF*)8g@e;Q`jfTI7-{kQ(OgB+5{Fev`{>R$Av_@> zy}{w?-`BlE)++zEaGcDnm9lm@YH4iV>kjbY63fwPj)HysPA~vSPj&Zax*XV-cs00J z)j2ed`u^dFbRtD0)4q?qsYAHmM3(LJ1l5q$oQV{Ykw{tI3A4ZzSs%Nef7;i&P4HxN zoaIJ$K4-UI)aoNzH&SJJ&+BhBo zbXb%k{qNPq&A;pfp!DbIXVu6>40}{DEiXa2Vpo23g73;Qr)@( zr0RY(_x=z_<+RD)9ZOXV!|8JND=rW1f}0WLPqazqpjk*4I~w_OFs}WR&0gx;pmT|; zj7!6!5L(%H5jsiMt5Fbq$LXd9deCDhi0_knvw`RiQX3ABVMY03_JiilBi2XH2cZ=v zz6N%7;+qWbQ|I+LxR!uH6-2|hp$iw&$xt#h`I61C>B3TbukHL?s);M7KF8-*gSH_n z|H1bz?dq=03Dhy)JxSt&nb4gyou_l>#=hk;Y}WW!(QBs|PkNaC)J465pK%5L-x)QL z@zMpjlf}9cZK%1hVKjj!5u3Jq;x_gR%DJq*K1#L&nNc|inBI9siUpIftkZZ-7I4P<+xu+ZMd~&^0=Iz!{Bg=n%OLe3H(IX141s!g|6TWF8X)-gBMHyG#QL7Kl zN)4~Yiq>H&Un}}KrnFVBL?^FwIZ>$~;9l`DmbZeVOnhnM22?Z?uu6w=--cF-Ihr5& z7|P-G2y2{_B=?wuVzf-v*f6iAUp{~R&0%O&4Qy3eROoAedz@qZwbi#feQQ@rper2d zcn;2e=fmgVP5weVsH79eu6xx&&BNVYo*%`}7b7-izX3SIIA6li700o<*BoNL zv^BYbNPKFl3(Fir>nl#2p}%X$Fy*=mLrA!NWAUw1xk6G@njPj_*#=ZQl(rQa^${wF z7}-sns8-*!3E++r{7~3ZPCIs%>Zi)BfmIz>|7$}$NN5c?KoAo-|6D@i9Pud?= zMS?<=ac&k%E?7smhZGNXl2-HP)?70V@o#o#H!HMTGRoU_aojh6hitwC-K4nCbf7n8 z>mkQWU~wvDLAi^F`Rzi|_>NS9@vDN5nxEfxmOhkk5T`mcUiYJ!-DWr5zwC`d@uX6= z>dJAYnv(znD#=1<{AV=mSW^Bg_9NGa1dS;Pv%x4t#OQO8*d?S_9owE`n$J7ZWV^pL z1#ee9Kx$ExwGMSO^&Q@) z&`0PY${nv_tgG%)vK3OPRE~o!nFo6gy=VTpeax_9=dNnRL8SS%tekXRH`r6SFVQSkON-RtO4)aI7?kG* z+wOOd_A7hPs_G^|x%}^zp1fu8HzVpD_GH*(;aA~M4*$6AWlDQJyf-GKE~JLBmh- z<{2C9c~If!3+C`V3)E@GDY(vUgG*8UG{yAdw7$n*DZ`w~yD6&iFz}hLs6*Qs_PX&( z_ko+l@jV;3KBlTkr4@( zv;YTu+Sgdjw=k`Onj7>UNi3_i-?2EHuYzVb-?S0h?e0#m^}I)Pl3Jy&lF?%!g0X(H z5^regknY!aNC9vRpRD!!eNjf)R$xw-g4naAe$Y-(7@6ixatdF;IaPvx_|ED>{WRdL zX$D>{$lcOeb;BK~|Gl$;pwp7#w&3Ft!L2=Z9*EYl>WOVtid*xV%@mpUd*x=zx3Hri z>MkOmJ__YK{%QqCd%7f@_L~|trgw%=Y9Eij2AXwE8c*+*PXuo=CF^U}^;xyh1Ykju zK2~l@7;CJ4_*f8(HM;voZ^WhvVnN2}}HifO_ZiXTUEi}WjcYDerkbT}$y zY?od}qH4Dj_RO80hn3$S)(-ya;1h^#uCvsW^f+^Ii@DB<%w$Qg53}n^>9m?_Bqn;e zGFmj*7fN*U#s6|y4Mr%&v9q zq>7TX<7ZPpXCZM|Gtxl)OPPqv#j791LAr^UVo z&BrV1?z1ei%v-|l3xXBFy&(Bz5PBoV6Qtn(v_RvQki?VHG%TZZBWLZs_wL*k5>XSp zoMFVznqQtjPeRXUS!@qqaLOIzN4S5;FzSyR3ojS`CX#A*{36Emuw<%uMcMidw>Kme zrF0|?`{|0PjOe^HrMLN5YjMUz_V8jf^^hA4Y0ygVRGMG!?1Mp7QInD7ot&16Q06P! zmoZ;hYcY3%XPec)!9Z~{C!~ub6F<@eF?`NYQx?F~I02mz5Y^&1^2^_dMd2OsQ8Y+B zvqb_?Ha{`GW1U=3hqZp?t2tP~{*-O+{OIne$$6<#bzbE5*a^299!|E|zY%KO?b^g# zqUm~aUu)RU)ZDOCZek7iq@^a`6T%|@2{rqQZ>gBlxOQ0VmM= z3cn0b>S2V@H?}SBFPJo$1>21)X*5qtOaWv48IviKW{LbL6i$nS*te5V08e|l+{7Rj zUzVfB!TJ_#XU?*91|w6zl}l`lLR5w-yM9?3?g$Wv+;SGAfV}Y7YXO(SYrK|tbxH}c zD9C;)UuzGel79Un9R>>|Eswj2Op8&#Hycw?guz%swYSA8Zea{%CU*gb$7M?1F7esAKpl-Dob1~(r zXismezHh-8ib9fT!K2;s+~*K8sOvTi5B251x>NL@k6zfnGZ5OY)hTvMHB!l~EcUAaqX-<-fF(X@fz8o04C~TC^pIUqt0SA4= zOLyuCs!Tf4sI7`N2}8-4uj+gzvti|9^RCi?S)fj~6H5QT%KOT&s@5n>S_F=Cc_>jj zj=hR{>`CtxXAxIjpW{*MlN?8G%hX5U9saF$<5UsQuuLod3P|V zaASQnh^iSwmBEBKIZ^!5qd&5D;!W3~?jbSmX=2ihAr}MqrQc!t1|;=;l3&#Apz9iG zXN!D=iq8*^7uwoTHTgKNU%l*_zw^lY&0QOJ6QLxUTf}w53pxuxVe+}Bq}gNrA}bq2 z6~VF1_cU&6;W50Ky`aGJXVfeC-$XsUaAiW61j%-0$CSZTfu@A`TxXz)w3bR@wG!)$ zYBp(l^2AVx6I*#L+#@`7*umoMPL=w1nXRH4d68AroQr1I1M-t;JKUlbXXjSr6nWLI znteRvfjzY1FKqphxZhJ6{jY=^i%5K1wQ;L$)cKc}Nq?Z6cwZi;Me3YaJ_&6ydZ@rV zm^SL|!?=wK`27_gjm>@U&5u$VOfinRy*ftb@EZNz1K}1bu=w1pojqf=0!st*dbq*KE2(nF}Z655~J(9cl!}K-8OK zZLfrPVX!nD%~yH*eT4mEI}=&h)BVO3Ym>Zy5TRGs(S+ zdtys1I!AAsm?!DeT!>Y;;))h*ne4@JQWmYSm$D&0ZVd zAyNdz1Y9&C{fzwy(N{xE?e8m31!>T%7dBhplXnadscCEti}@H!(`kxK&?NKyj34Xf z!67UZHsneEOa)lqQ^6Fc(;eiSu!a3vXgy}QCZZ&Uwh2y2cm`g5kMSPWqls?E<-GzpePd!P+uwFbXe{I;bh>Bsod>ot1UC6zL zyCYwIWG{Sb1}9m+Fkg18b^pS`w)@bJCd}!fRz<1Zls*YeOV)A3i=H?(0>^^Umucuh z;sl)LHv9ok&p)1*8DGL_`+o_%fZ|@lq6>7=NFfahBt}l#*(qS$Xxv(#{U)+}w^Gqj zUeA)*Ta(5F^`&hKQ^hn_qlq)m4JIWaY#}rLxbAG5%dH`;@#xzjpe$~EM@vDQa9)tv z@PM>wN5uKKIqGNZ4JFu(Fxt0H^;{+9{GV}^R;0t?2w38pUYZUfY-NlY0V@_!L#Ha0 z=`Yn;3H>=>QHZ>9wz4}f=B(UCI*uFEOse;M#t1DMaJlE}e?*=HU7GPtyq24*m7iZSCW$wI*Yox+meQ>!|&6+KnO)84zSxGZxLuw#>(fgwZx8%Po9cEH% zT75XgiTof&bb^d#O@Lz}Pk=Hm^WgjdC?gEVuznX2fvaf$%mX=(r)U?|YPA(|^swBm zdXm&%w1j`(O(5ZDnS>5l`4kq zqhG@P+Q-@RSBNNQWBy(h$k1KPdXE#Nk<^>QK@`3D(6v8V8;}kJ!JBf0Twf7{Y1_4?X`k%-ya7* zhEl43q=ADt8oe+m7V{ZMT95&`KK!5Mif^WD%nUTP3blIh8V6o(MtPUfpRpo0!TL~P zjg0pxW&+hSFeKkC(1(KK4xfBTSEW5m3Y8>S*tzX3Q-i03jKGeaxre@PZ!B-ze$)BH z18e>g+vo92`??H*D&FV?E1SITa~67Rq@DS^WPGvCo(R_S2axD%CjDh!rw%MEo{o9p zT@xluZLdTWzjw``n;G}YJR(+R6U zmT7g)ngL9PmF5|DRBhht&mhFGyTqMNzDy8k#e22+wEI@5Ke5QEY8s@{o3dIZj>+4Q!88~_j)pQ$ogD(}=#K$?twZW%8 zkFhxgRoPrLL9j5> z6;fn9b^Hkq`#jWl?EJ&6Sh;2C_P3>yjL%2k!&TaBb|5Dx>Kz(>IV1kKW6Q01wfS+X zPs?BHSWG__hEh58?d-*kaa#&n<{Kz1#)&YT3+8I;(u%>&&osUL6LaMq`&}sgT7=8On&nh@g z2MOp%bp(ND1GOuVQQthft`z3!yFN)5Y}$sX$%7d|6QVzg@xMAb%B$LP$T(d>+PXWQ zF^#b&8x6W*561xengK6lZ6|C)JX$oY3Z6|Co6g9ukyJMtTu>jsiBwDfaq$27xmnau zfm81%-zxPm{238`irH0+A9bcxn-tOzM;i{%gs7>A^$Jts>#q5>06w=lx_a)IR}4u& zwVRr>-sG4dTz`M8P)M~vcVp#!AG7IDXK!g|`Iu$Klt|ngES(WG#X+p_s;BqoudtyT zUl=~+{{(xW9Bz~(^E(VSeDnEEC}_b2FnuxQWNf@imc-oEjS$^Mq~r1>`wFHOtNqyn z349wU<5n4gr%hz#;WMtVi)yo{A4S&jU{{aOvCyI5n@D5in)~4SJ#Nz=C2YPs0MZHnL3+5Q zm9oYa2J;M&ma2CEh2ajHBOhVYlN_sqz6CWxSmE)uoWoQFA4dNEUV5mXiguM&cvl-P zFstM*reR3qCYbME?XBDoL@$k8Ox7i-acro*uVATgc}}rOq-yR}A$8S{3f%~Qk*b6T zyD3~WssdzGS^O{x^V(Z+al!3eqiEZ5rSd!2`gJ-20aV&t#PCVl*~V(;AG8a)14iG9 zi|+^wj&-v2eP@*1#W*Tco~sgA!~1$1S(s0;%X}Ls`AR{a=Yii%d-vJSd5SqI z4#r6dP6$M3#u}1n$C^f`k!v)#4=1v}m76FI9MLnhQz4%X_(9CT9oF zY!zE@bn|F?ZglBLk+J)X^EG+Qi~e=$?% z+H&m@Ba{P+*u2>>8K=loe6)D@a_$a(L$zRmnEkIhLY7}nk|iTWw?fL35npWXe36Ke z2+jJ2eu%zITTp$*ZK~rN;YGoGOnhKG(?8$V29$x#~OHL~$7 zt#(V`xod-K9&Fj|o*cB#IRwnP>rx$k#8emNLU-%f#AN0)Z8+xX)V$?V#pq#k4EE;| zZeH84Hq&v7_n-<_4X+^*ji8AXiMdJpy)6NQPMqgN9X#$jI#dqZ({LUf_IF5_-aj`{ zt9u{rHDW!mkHti@Zf<%Yg2MNjB=8Oj4brcTNjg=%Sm#+jA)58@U?)Mcut%+Hgq%dD z9rm>;&>^a@o_TCOk`1b~n&L&^&_2cI4ZI;=N}#t=G1=RI&*~j_qwww`5w#di(ELLw z=mGgI?tNcdek=)pWrZAW8k?kU$yHpiWY;7U!$fnC+}o=Xv9|?DcJDxm|NP_B0Q5JQ z$$2r+4N@gv`~37)-QUBi=&B_>s{Puy?oQ6|J=1Q!lz_tVFXUgo`A`tP?bH z@j+SW;0K*-{Vz+zVz~)rv3Z|{XvLPkM)l(UPxx-bxak!9N|y9|lpbQppz&hG$>_6bY@!l0?i!6bbFmhJp{A=!^SN?5|^n_mQA12~tW_|6Yp(6-B$r zyCe+%I))6#6rJle#{YEd-}{Nu;33~+2YBRv9TVk6!Go1)pe-Z+mIV+BQxYIc@r3`M zizvI*Jym6uV>XgKQrvW<05J2Z9w%E*ysq6K>!TZ@y*)kAW%~y!Y?*Yy2W$Q13ifQe zW@h9Ai5yymQ=4%6&mI?5Z`Z!#X1CoDhy`+%mAic8q+VxdeCO7!xE0zq#^Y61;K$pi z8J`tXGAPQm%8l88#@r6~*7y&V5-af0a3g`flk>@KQ^tZ;aHB8;RPv| z3bPl&JZ6)=$M765@KL!KJA17OVccIoo|vJKvdK5OZj+VcBft;kI(YCm$kHJRezR2_=bEJ)l>rT0X=!6aRtK?s9N;a zLJg1c<|EAto7;szi*22+J5A#B&Dv(;w*r?JY_UJ8Ypou7BG7uRh7w&Vr$SWOlsCe>k#~Vs?HAKU|Pie39`4)YkXV_fs_6MPyO^KK;6jDDK_!&BZXL z=;-L-rZ@G$$LkdgN828m&fdqF#4Kt-wU^?$9!G3Kh;EN>4Q}dPr+T{13qfhz7Aq9B zXVDv`O_#O#5M0?9_MCAYj|9hU)^xqExY>^#`f*RFJx_z401rv>Jnwn?P2^5awzyK+ z)2rjUpiim{!BnoNIx@1d*mb#8KlZ-2m#pfhv9Rwb-a1-mq6%bV1YXU~;3*O}>*2&b z^LC{2EkfiXcA?i8^~HSqZ4sjF`UB)6I^duU3>DM944_K{kPXT7M_s(In~>Vhg;>rEZ>{6s_%5_3c|WU^oWwei8@+^ft;g~tlh>R#&C7!1L@ z7YAQEN61Z*X??GMj1@dz)kYwrIwrpoy&_BO}9I zY<^joo2x{x7@wCobRxc-7U0iVJ!=JJUGVao%?M9bpGK=Nus(z1%z^>+mM|5mbREfYFtdyEeJ13{HXeLV{uF1-%pv)P!?eBF-&80xQFA}7Pv z(JLr+g<7^g4gp5tu^3~@NpvqVm};<@C#Wo(tgRF+7I&C@Z8EQ5bhs8CHKHh>TeAqP z{sX1Fg#u^`4ms}09n@3ky`8-pSeeZ(*M5P%}CK5KYb^r7<@OjuQ zETrm(%Gwu8k&tf~3+EfwFQ~)pSNolaVd>Rk#4u|ku*j9bvd9UJ9X7#ar7WaI0bk+) z#mM554`8c|y0zB)Pw3brC)o-*Y$l-BB1nPBAWKd7Dd7Q`s-P;aU$vZ{b;5T zOWqO^&z^{^b<}{%B(x$q-Z?uR`vJDz&d`L@!I;Paj1!#G;(g!~$2HMOy0z+sni&Cr zHe6d+Z!MIUnNx3If0pBYJ@2v3wBp$L9F$d3qU52$bKYm@Bd`P0J+qv?4(SLMSP2Uq z*KyN2DP{)??wu+vFGYS)hH>d@weT(jU+V-06(ksmEm@nB;s`b8YiNx|9bzeX?>_>v)cEX%8Z? z4mP;fHJz^uEbn!t43$UpV7R7TJ)Wq{SGlYsG0;?{767J7&}s6P)0eEVJXms{A*yH4 zVt~+i+rZ`NB9Wh)IK0OgHOKBi=!=l)!73yKn>G@3y1W)4TNKKYG3ujR#Us#T>_e#d zNbB%JjApRq<5gF?;;fatJ`7+D630M&ER69j`x<{1aLzQKIE?80nxbH6XN?jAI0241LT*jv_+rOR^6YpjNFhdIZ}pb% zxGahYjZ3%hvC{l(fVwW_k2sN&w@FhwoL_CkPhUG23w?&wzFVu(`5I}6AO#|z`FnlS%9pd&Gr5FFZo+|!W=)kadYbxoJn2w384z=#Jc^M01R>; zm*bh@JSjM%oDfz9w9~uKkk7iP4?cmdVG>(uwa4*VQP^La#OK{U#etK6QQu>o#QqSg z{aNsI&Y;(iROhocuq3K?OW!j6We@E(oeVFS?99ANAEW6!!i-N{j2CPZ=hDyrRk4Hx z60X&-IlS>HB}ZW-FQ?lOl`6VpD=Ef6*{!>!i&2nIlpYNG$Hdc9+ehwtI*5f<{YS*W zdI4rl9?_&(_r+Hu&d;hdVJ2a2y8jp!9ZBTj<)iHmFz2a;h*%_vKQ%Y&^N^Hq>-u>G zKan?Qwg`B9my^^F7=xc1D3&@LLYS`_+bQ2`j!K2nnqpcgVZV7}NMHry{Q44gG7^yI zyGGXJC|s)YZ}bO80ZZ$|PGSZy5YJoM{I5=y`LGn<60i_33d#jihU2?$mgYpUv}dX< zNJgzZ|Z17>fYk%0XY2NV^7U(ZK}2|P$f0P*fozw*a0C!|3gGjoZDi7yUX){mtVTn z)zo$N-Z&Jb7Xxo%TDbN2WOEnBPPNgw=siU)GOFA1=Q2_31xx?dtK|D2?;;#7KF=a%9p3-Tf;T9T^ut z3eP(w^0O`%sD2C)`!C(7zM4_Y(g86c9~|!FXlEImaODy6n8;vz8vVT-uMOc%KsyVL zOF5(Lf4@^hc;ymJSao*`7pj#pKL`0pQ~pTfTSFi0pF-XU`kBO zT7eh58%%K>O6F!7YP4!lbUz$_xvU(B-sWjI*L!7(teT05fQOe>3y!7e=5AdjJ7YGN z=oRNqS8f2x=h((74b?APAc?!Bq7hvaKv5|KX3Bcc9MqUd38WMK*GX_^& z_*^XqxXOe1G3K;Ng}L`*RijNNui6FMU`~EMs8&?zlf`yBuC{>izzTB&{Q!s(vbDg? zpUS+L>_R{z5}P+wsx*$435e6HNeee|p!m3Eina2x{J->0m@nxLyQ$fcF^zIoakzB}a;}~HtcEkeqj*h*~LjN07 zlbCR75l_)Tt3)9myO~o$et|g4W(X02Kbm3RHauWPgaj7fbbO{w58i!1z+h|5jWaHm z;Cnw31Pfs;*i^gwFghjND}h&H{>TpAV~AQQ>VMr{WS2cpnp;o?60IPSn=M`peU%z}G@&Xu309F&D^WIo*Ys%as!SFW@_VwQg zc7`M6y11e*3xsN9a8Mtz(?SwJ(`0av@`oM_!OnQ~XS;UpP5Tp)1ArZpI|f)hO@)>; z$qSpN4Z5d%%OiU*#_Xt^EOom^2}AUNK?GBXKqdsK-uB1J3_Sn>fkj9ae&EOQz0+AY zYR*E${b0~E8heb$M-xt`#NAUzzAj3YX-}H~)nA}#ek`BNt(t_qQ?bVPbz$|g+z6c9 zJbFw36U<0NPk#Yn_v&4?QgqqdFckO_Y(jhBxvgr8;2*c*!PLo*0JC`_(T&(Hb`#1w zQ>yXt8Ft%a;hqg9%JB~NE*)Ah*zgyy0%gGNAW3zTda`8e;+-=o33~wOe6DjGRSIDN zZM!g;qF`W-hN{u)7>C8A={M>q06x0T;101*SXzQv*K_*X0Z3xS32R(&0wmKq+NXXUwa3+wmlo+FXx8!-nrKEVL1Ku((nk1RER_zYzX-H% ztsc-q9AD0y|Cd^0T1CORuWuv*8p)oGI&Z$9^|`3Y2_8RKKEe;}JGM!y!CwvdpxpnU zoD-35v;XYU60Gbgwt(?TBos`|tWBk1IoHwoY_8W3b-QEv9y=m#Xc9XQBws=p&I$J( zS6p#i1`6i);Ny0PcfCfS136!uW$GoRZE>9Kqp+2oLzYO8GcDtZ&E%Fsj};xR!RrgM zBstzW+iJmzkPhJW4Rd=_?%}N3B@-{*k6vPcoo{Qio6>$=8c3+lW!c|#op?eo=y<&t zF08I=jtH3#_feA&9EK&L-9q6-Yu}VVKi}A45@HLjeG5-m8PI)tWN){Y=;`s}bn#-M zJ=e3?#h&!T1T@57_yT63=XvSyxZz~mbh6%s&tW#eo3W?z|pxqY|u|$(k;Rx1TS$TdyG7z{inQ>x3b7AeD#fO5A*fU5EL4g_CsefR&iVc)|U>nhy%X%PHTQu3c1`JTGDT-LHLM z;u?bO`acf0oKiI1Srm=Rpw9SNF*>eE?d#z6hRFi67n=1C!m*q-GpE|We`yJzFx9#h zXG!CV`OK1BCS5&rvdXIPY9KK57Gt>r{fyfV<+oCU0N7(717c5%ef>oI(O_ROPAE%$ zWB@BKp045A!Pl;lx}A`0$F0fd7ebQ}YeU2*N6Z9}>$l6QG0#(kr8`t5=)sowTQx5) z{cl#tUhFhckK%g?09kc<;-+Qo&Xs^!xX@%rREkjE6vln&u=J1+oXWxqwmIEoV9xFM zAmF;!G5*xuHfwvPPRZ8xcF-NX$zo}z+U4kt&w6bb)*54L7C^!=={`i{x%wrk!V->k zFa9{Y)?wpGq?u%m4Qiw5Ji$Sn87n3|`XAI-x20%GYCfWTZ{L9_w=u6^bU? z-7km%tz3rcf?0N??_zU3-Qgp)VVUkU z2q-%`3h6q0SQg$@eVHK22^T#?;7`$TaYas6zuBm>`qY83uz_EwSrgye+n0C9_aG8v zc_sO_7^n?o529Z*Y}T%0ha|tm0#>+Wm)I)v{7cj2h62o;MaHPI$-l>*>H<lclx_ z=wFu3{60Y3O`1A<`-{1I9*cx#jRhuEMg2qEK_{Ogp;Yr~Qab$0EkcRtk)R2!ol>KJ zctz1vUIOpt!caBoq<^VJ8o%5BABtC9n;)&4IoSM6irY;Bo%j>u=qrjBSMuvwL&$}k zrb;^$<{u-CYyAm-n+F;%4`|qUN^x@d&G)Q9a_a3=dGV#*O*=o><>|ElHx*2sM}N-! zMgwr-A1U4H%h=Kr5k_1mlbu&6^knOeK^@%Iwr1~2GeR9*#~&Y_a_|oszn}hNpNJ;_ z`)X!7aC@8=dJ+C{$}Hcoji8?-D9LtbYpL&UhoBAG?@obI#%+~^<|CHVde^&cG7NCF zL#;1{F%q)hgY!w)hjcAGB9nAg`%>CP-&{NLd)#Up{f0`DOQh-#n!)jv0%2i=--+PZJ2}wx{jzIC> z-yyJScO%6o*;Wb~?4O`IprchI8jAd02^S-5D1SRK&11l=@k^_)@&d~o`ss7$-pZX8 zV~?F<^n`cqCkxS6(MptWa-XXdxHmTi)jsTtF(t>t{c|xtS|&bB4DAEuT{v1urJ$ox z*f7L$cC1WmBf->jxh{yET>Ys%uUc%`9FL{+v6X1t5BfLX8T6vQI{9N_{#F(qIe?-- zB`tJSN%&*q$Quy`?OHUyOCH#3S#Q>Rl&`gT8EV-bJKa?~LoE9%{prRi18@nAaAfP1 zbwmbqX+D&Km524~u4#k-Nl(c{V2DzN{S@98&_w&k{_Oyw=)c_Ic!k-`PGbZ~!PeA9 z`E7z-z&AFKF=Ght)6{u?YH$xymS+8ZAu_#^6-ss0d^J|ydXZOpWFsPaN>kUu(@5XJ zH-Le3L_(T@_gViNekw$1EEOK%(E3X?tzG7n?}zwzek)8=_(t3CcII5#-Z2hv{@h!f z*g13ykI*wK=I3i;Zybj=LBxiKiVs=FO||dnJl}*9uD`HWu$zMiAY+YmZdVUyuJF2PnEHQ!98R`ju#}PNOCw$0yI{SC{WK z@INU2xok6YE}}z5hjB4>!f;8y@J-o4zB#2KL|1jg2k?VmvTy(=bC5g}D#OalG+w#C zXNawE1K*Tz=7e|MI>ImXZ2w5DLls-m{5 z6-h3uovOBYAYV&*SEF!?e#wM{DS2ZJ+Ld1xeZUOhxI(*RjBg0h+fif^6T*_o4`z|$ z-;J)jf5Dq7S1O=E%U-Zg7s0piuVP}nxlzVH$Lq9qGpi8DcQH{R2=BkH4343kv4d-- z55f%`@rqOWiZmSWh6?2%R~DqC))JffY6w>6gNQd9rOe=-U7sOJJ2h0G1E=)v zoHOgr$t8HqJ0d;sk6k>08=j{TiN{XT+w3H~loQNMrH3`XDqkx#AkeZdp#O$zg!ao< zCxO6SU8NzIes0aAww7308ih;STh$9+TFD%O9o&7MQER3e7L57l)^jkd1!NL-Ovpc`S}w$&#prmh=y@21}JFk zR~Ua^_53}M*vP~YM*ZE9^lt9>{-=Eo?1;($bC^7neLCmg!^VT1g8%eXft{yJDA%_< XNku~MsYF#F0e{b=6eNqpU%&ewvNnTU literal 0 HcmV?d00001 diff --git a/docs/stable/_static/img/tensorboard/hier_tags.png b/docs/stable/_static/img/tensorboard/hier_tags.png new file mode 100644 index 0000000000000000000000000000000000000000..cbe895685cb8b2881f4df24f3b904a2a21c5d79e GIT binary patch literal 160926 zcmd?RWmKHa(k`43JP;rR*Wd|(!5s#7C&As_-JRebAOR+La0%`@xVy{X?k?Yu{p|PM z+2_~y>-;#Z#Tw|l`|j%Qs=BVKY6z8=6+=bFM}G0*1*(L&u;PmsFw@ZQCPa8>%h4*o z-isG6JU=o(V*@bBY|EG|26H6o`&yCFJ9;e{%V9C`mY47zT+JtZ40f6)3Lq{ z67Y4LISkzV8C~(;aqv;>5zzY?Tp4N^Xc&^2aT!#ZwwXA-Wh*E6zT?`$`HXFpYmz|2 z0VBlrrHvC<^RtqoN?fds8K5R7*4EQH@*?}qI}`LQAcCKa$vf_7^nRV5R^9x8*4Bjp zbgA#))-o@&R{@7Oo+7QAGtGV-KC zHugrOtn@7O3}k%Bq@<)g_J+n>io&A*42S;4OJ?fmXv+lvxVX5`yD-z+*qZk zFFB1|&Hi^LYlnZ91zjNE`3ZoLo&oSb?}iTLd2Z#BH*+R3;Ne?{b_}67auYY;D5HB51B$#XXeEV z{udI$0-s%9?l&Q%8i^zVy_fcQ#^Ybb`@JU@c==L93k#!bUthMcsBo_Yc!od4n%D9b z=!8lehAWS;LH+fumV*CV*x*#^?)1Z*9fq`qrmx|4IX$;pZ-5vYnQbcx1-k(pZYLM(oYbohsY>r}!v-1?0M5r^)xM!GACPH`Yt$ z5)+Glji&!v@BeS%zc80L)DbpqivTj?<@i$I5fBRLe=)NB*QVrh!9Y8bxURgMYjk5K z;;{9#Zn~Jt*S3it{%d)y=+OOu_Ix@H0gf!xIhY;HRXI+6QOQ9L!#dP2`Rrca7zan+*i*U>1Md?lu!1MhB1o%kaHgdSTJGL~wk7o|RFd}k@cBR5+hxncLI-`kj=>%efI<2HnyERqMBH2b~}QIFiq zH*cIA9fdu)xVd+fK(`6;NEJ;dkv7>LM@`c?0n06qdo7}s$CiurlP7gIn?DRru>VKx z{;f*-nokEdJ`VE&+P7m#W3wip19_}09KRpd^`T1-hc4sC#|&PNXP`WXVcsO~;{cM8 zVV_3_u?CkTZTDU&Z9#@W;CTkxsK@f1c0DADdwU%8SSfXMbmXET3l9&kiR+_k1ZI23 zxYh<4lL9z_FWt7%#7f-;EmEbQE^VGfJude%06sVU=fUiae%|L`U7y<{z%-EJ~d=$OcpY&(oRo8@=^;x?66b zd!GmL=9-+C7%A^2mQvBQgKIiXEOq?!bl39qw+%g2=J5{V^;ftbko8}`DmCi=#woe~^mypfBDFVNng<4p;vje@Y1)Ts z#DbTO?~YnP&F49Qx-U-XscxGo))xdGJvzt!IJG>}S}sIQXK6={kIT>g`0 z%W`pH@ckGGrWY3%9XAqHxU$Yi#YmhQqqz?`YVaF9uB=VA+kL&pe=NPjn<90eEY#lc zJ~o&~h_GTT!f;s&rLHHrS9PQB)?-yeu^8!ip`lci6D#jE*~KWn~q$h$+Xj zS~Bc?`Fjz|=QR#JMUif}$AN{;-Yx^1saF1&je($|^PeaRHODnB`>Os##)XK_C;rw;TjQ$!o2qEHbi|?xLkW#8h+(1ODI++yE z7t~?y@x4RlMRjEBFEH|D7#8hi7?nW3MM;h~k-p{L z-lUe;X+j~esc%jXblHWYvwtZ|;yj}_QAn?e?p;KR9D{rTJ<53~S zX({B_ft?ubwQq19<gmR6xU%loSM)c0vSXnWuGh21~R?pEm zQVspBs5r`D0W@J9&gcucE1%IVc{v}iAv;Me_c;w1{7oyQ%X`{aT(@)6&7182kJT_kTV}{1}Thmu)*xqn4qjq7N#a4~P^mQUy5-ah^lf`P*URnXC&}H+##CjQidE z@?nJ8$cm+9qv6*t8_}NYEE69&KM zc0XMWg}--+(-9T1cNf~zk6 zB5}|I|9`Es5|-cUs-F9HMhHz(zswg58Tx-II|>ZMSaTVQ^NxGPHOg?mWPY%XVyS2< zU}=LppDoF6C%9GhVh0N2;&N_%O52AGN9opI*xml2Nx)_BB738hZwNGq_a_(O{s z=rDe{9YKjn3Z>My&oZvKt)TmL>NK+DDhr&d>gusxYLIkfWMts6oX@S&?b21V7y-Mr zV_0)Z$xtE`h-Fl?oUpvkek^An%-4+fCDNKb-rt;ekUYkCLmNk+L{#F{t5=n-C;Cas ztbUQPf8~sSk%g0O6c#eI?6Z?|@ zF0Av_Rs>q!Hye)6#AaoM)a%dz?4-XPyWjnYrepFfVT3$yJRuFfB?T0oZv^IPe=&on9fs_DpU zVq|0`9hcZ;!S1{hQ;s9Sd4EPg=kYWtzo{v$EaoK;<>1+q#5fOeZjC&%3pM{=U;g+@ zzY~$4yYqr&rzex8-jl_O1{#R>wj}o}2>CfVT~=5r%qDM|ZU${~e-CkmTQ}brbDj2( z?^N{DXNA+(e_c9>a;ZgHS#TK6(Xg%ydgh^+$~7nAXZL5rHCFE-^CS@s(EXNv4?A}L zLD12W41e z^Uyt^Pz?XaFa3JqS3odrX4D}cToK}j67t2WR*b}JqSzE<+m2*C3AdWmv~M(&TrHOH zhU|IUpvL~(@waLnX(jozb~&$Jw^=xg+4cn;p*0 z<2^7Fp$^Vw`ZV;%a%R82P%bDxx^*h`Nb?xpI3JgDfxoisY~TIk9a{~_8T4qUU8RWb z9*zaGIHVDw?$i3>7E0KepXq0Wa%nUa=iB?)Tsg=7q0RhUFe4z2+jsL@+EA*>Ay{lN z?w}s_FJbwsFmP)U_a+NfFqI2WI$m?S-oTSEUPHqsY1eon>HtFfp7)MV4_6Hjelr>m zpr;$q`86bZ+#s5FbGOcPl*~n|TtBE`tw_0X-Si*%1AR;79_mRv_RHy!HMni5F7x;< zK3uV&1GvBca}>#{u;9U3?`LXy`9)|4GQ->0`Zb9FP`d)d^~{VSZ9KC))G)k9R%9-8 z?$>Y*H$+p-9*&+KkA_GQ2wepbJYa@-&ZJ#ZCprZUbN=8hLmlGv?L-p1SkhVoC837< zZhqbv$>T8s7bJptd&%SILL#fIY5c8#9`PRsRuIM?tV24mS{Zd>%Mo1q`}cTS-p6~p zB5e%~0y?dFmXmd~?A1?u+0k7q9Ka%D1KLo|e>px(7>V17Qs6ml`eCmU5V+X>52Qh& zTWj&BA%Nz@|HvEsc_vK+3%1henaIzS(El$ClSX;w1}H{HqxUk z@)uV5g$n2{L|8Pc#`kZ4tFW)P2ONR@7i|b65qttD1IGm69kZ*OP`%YPey@95*?3tL z%=Z9hZ(iji(9sSRHF}<$|NXP$CUF>Ggt%KS9WFMcDcr_Jdve7W{YY1g5qLIU4!T2HGTt=E7xz)Z@xW#u{S*qom*4e@1Ix0z_1>9 z9eDtM8}`NpW{-;KRpGllw%jcqA#VMm*J)0LQjsmb-a81%Q*ueurTTYflc>A>I>H*4 zLDp&XFW3}2I1lJDrB|uw}JTo&hJCG!BzpR4{pYSQuIr;g$U3x}@-LKYAtW#?uAunRn zKSw{CFum2@RIwuz(-eJtfL>P(%ak;Go14Zin8(F7Ug4x@yA$*Cdqa1RP;Do!|z(sA{%W^0%JvMCYsDzB?Q9dxfx&Tp37a(DT9~XP~E@z9u6ljNr$ulsA3GQp8}gvGk|=EpCIiEZ3PcoLi}o zciT|x6##hcNbThQuojG)KlKB!QE}pCAGvmOBa)=nZD)p)#WgqcbweCLcd4{jfAGU; z9hz={ZGz@E^3v0Zp|HZY&Q}u^!QiV`6n1x-)Uw}9hLjT(J-brl^YoZ`as6=QBL{`i z@kIm&Z)obH4h_=EpM=Kkdqw23AV6~6ieL(jDoSV;d{>0f?$DT4 z_}92f^C{MIbhW*v_cM#w7WA-hvI9*@G|c;x*Fw0KH0(83FQ2(BB+56!Vm!}aJa43_ z)N_N@?C$blVsSBzi(0_P00aKbYwYKEe-WChsMPC#zlvoZAbD2$R&<)M7kp&Q#8E)$ zcnFqW_ji?S2PnweMNuQ+7?U%0>sUH^KPnm=LNhC-6fm?E3)9V)RWzyVbqCe+Whva< zv=f|mKg^9v)isJR9U>J9V(+Z@7MrR_j!n zcE2canxr`?A3~8Zyxp>M>o~vw?yzr@;4RK`IPcJSoA&Amr{%hy3u?g|nM2>+=>dBfR{xLEA{l%&r)nPGS1>_n^leP$nmmyXGeKw#K%uc3V0pQNQ`K z$AS2_))r~rP4QF<=yG@T>pa?jXSX5tdf3x(iJs%9F06^mO9g~KtWyK+3OiMiz|M%4 zb*<*F2npt`@8GM`TbtIfN`F4{f%6d|v^oYpRFsh7cP@)gDwSdTWJ-_gjJoQ)Wtom! zX)a?9>1Tx9FEwP2f1RieyMNQrYrdG$STsi>7_%lo{8~2ns&GR!U(Vi@5g`hzDIRM^ zuWG{&f%vDK_i6ldcCN0NRdo^kk?m6bIhgQEiRS-KEI23wqS{m1!D@X|f*UMItI`|5 ziK2mYbudpY;mrCB)1(-FNQ@=rAqP#M*{_+_&3Vi`bYrE5QB24ujSXG~j~BPDRRK0-XuqvLWG1vBXpS{R7xLqjVNb9XUK=)U?X&t?OTDg+qNocFhR1n013jf$L-Pk$0Rr9= z3SxE3-{>?J((U&Ve4bMfyD6RBYya}hF}We~Qsa5Ow-ZM%yBN;R=>EVue-2{A54%XO zE(V6xbk9=>7p-bbyf&InNv2DTj;3`=fSpG!yY<4hvXL8YPKkavN zlIoj=Vj$4+)q5O|*%j8R~XeQo0mW~A3@ z%q!Ym)gAEQ3u}(b4rCq%h{-!^o8|AU7Ta-B^cgp2Tig||>1$gzx^}s31TvoUxUL?N z@eNH)izo2!ZDL0)2cJ_y24T+DvP0?GU9)2>R(?wZ>ViO*;& zT>!&+*LyuQ?rkF*y#(E7R*_tdn9N7iz!#*wUhG?f_HKNm5o>r*gS0cXGcQ8;VH27J z9z1E6{x?sQKs40V{Qa`qJ@57nZRgA->78GB+$WEW92P>}5XaPuh&^cDdNGVucC_^y zYA@~ihPIs~Qq;N>SJa!rMhTAwy8@&>z((Bed$)Toa^kykJ3-`X+llGvhH;6quwiz* zIJ$a$g4fE`tg;;Wu!Ykz&6z}NM}?vFz<_%Gs? z{ybF9F|y{5669bczXdy)F-EpOzv(~1@8ny=PWBU7FE<&%zeo{u@;)+M@I`_ZKIXt* zV|44GA^e4-1!Mg5-|heR`pa`K!j@Dw)@epn+bnChR#!Vu2`Tx8iaay33v|^wd!vhF z84TH6xjb>9WL#)%NJzI}M@gejcH38u?}lR0=jj3IgF*Cj?L*6bCud@GbT!%N)+66~ zOZ2M$k_gw~B{bL!ca2U->HgW!0IfI7z3Vc*btju;779?fH|326K5|@KT}6NNFt#O) zqnw{Va5eO(&#d(-_AuKTN)vILK3H1YHZ$bN#3W|DyArjNNJk)cCB-JHGNTEXjTn0a z>&2f(dGp>yLqkJR(OmPA-{iB4Zv3V%yDebToNJy;yK9?5W>=-brC|@S&7S4dUYP3u zGEitRv7mSjM!#*~Fr!_^P?@i1&P=M+S6Kz&5od}Ky9EY^oOxxaHcO7HtE-z%muMv# z|M+%}nJ-5R_bWHcb@}olfBWmN=##aGbA?gvjlPw=ar?7GDV0#ZE>#g;rU18x9|ucE zYlizf_8HNVJ4!M-`ODLJiJJgXAikG+1W1-v3}o(pB1KX#TO*c2X!|XWp zS~ClY5$$!UFvO8Ask6pP5mO_{3@DOC6-C6H}dFm&y(L)e!oNZEMXQ^(^1UmImPQ zHc6mF&N|R^@}qV9ar0U0f-k}~os5oIV8**un59=~0!U=x^m1d^bkyH<>eGUQg9)j& znT;Cg!DaN>+HI~A)COUXW8>qPNsEB&*hyi7o}$!A<$N8lS5@q9YT44*Z4w^TT{CPl zh~Eww=jXb8U}a%hH&Qtm46-LM_J8bU_Zvzizhj6N!=0d2QRr{}37k{<)FPA}LahbL z4gZ!p)(UB)ZL-1H9@jgR@`P*<-i{;gg}0TErm{@MOF5Op-BuzVn8tk^%j28^F%&7} z)d&BqJ>)E6#|3S|2}`;1d^gA(ILs^WM%s1=`9Z{Y6x938mTO&@L9i#6*xmJ#k7 znZ(HWWZ;2vxJwN#r9WO9hx1h>?j=Mq*p2#~Z1pKEh0^+bqBQfXT88u%{;F(AOmw+L zE=V$XC-8-D`2|N_CQ~x36esSKt-dNZkigWoof%Q);c17;20znMm^-A|fs{obJ!78c zr?@pa?o1*_Tk)7@VuD^Jl$7lNe#KtOHQ8Bn$HTSt8%!SWcK{G4+ z{;m#o-;sB9+AYhIlBeB(#Z9WLdi}#!--x@>Fsg^H4Y}?3tPSqbn{49x<>^51g?OzZ zaFl5fe7YV?m48@o6*s1pdKi#FqJgR549oe3?<1>80;$*guo2=xo$epYVy((0Ucyvd zMcfsaa+QQxALY(hw`*R=JhEf0xKW;{Aeo6W!n|2-+rj>Bi)&-SN;u1 zEZ0?}D}Q~RJt+jCNzu0}_mxj*mUJ6C%yGc&aunsCBXAmdtmMLqfSVjnV5hsAh{Rd_ z0@qcW9!WcmIV-x z2Ap$x3HoM)T>zD`Pez1N^(~7lKy8upE|<4KfQd-Ode7=)X(PXH!y2Kb1sf3t4PO-V zaIM}tf0T90vK#{|1RhEBt&gj^ioXBZ-D~<0yB`>N7d;__c42v~+n@=tVYyaIUm-$k z6POcdFE6xCRTbp>c_#LVd}kE2e$zgf2dxsQXEA>8UW~8uQ4Gv48ZkX4)4;1p${_fN zNgBzOAkwR1KU{^YJy(0Ug+t+S14otJRZWqds3F_fF42p#(K)~7Nfzu;M-xUEL>+oX zGXtxZFp#ymYSbyEHxm-w6xo$JA*4KdO)0TOp}iH^g>~r{M;jlvtX#}mz&sDIic&ZC zF)!w&%sq?fV35GdnnNOMIJ|QW3t?DN8NDcSo>fW#O)o&m3K~gYZ+6*ES;bR*9Rhfp z%~$exH?E&S=rl2&FBHH|bD-}dECh3I18BJ&R>!{m?;Cd;>;z~ z4X?M`Vhk-K3q8}xiYtr4x>s=fX+MInsm?7dr9QYTD9@G6)qT*>H}O~kV(Umt6CEe_ zWp9ck5luLeIJ79*gRj5Q$-wBPcnS1bn7^UArQ*ZKkX;LqKPCx(Z8T*!2Ih*sd~L-^ zE6fGD$pgTQZI-|nfRePx6RE+4mhege@`hJ;%{+`_g@rj3)>}3(E8sfhWj>lO6A|Lm z9&Y- z>{d@uBr>;aXLk8{3SWn=e!+sMV`+}$ii`!HD+Gnn3oOJbz-@dRh?Pahq{J2w|oR$E3Bcwmrybu$V{Kt-z%_Uk)oK6=cq97Jl`d3V7P+UexR zey1D#*AAWQ$A}k_auFplGQGHII(JgW(q!`XQ7I}tEve0VUr<|0Z%)K z{5+ONfVPU`eP$H)t6m^MDgJ!s^#`MGqIr-mz_k91IeyCP@KFtQ30O-Fafz?s6n+=; z1ClL$qB}+c4D+(2Dtl=|hiHbT5^eJ)MwqFL2XNA*g^LybnEwUO}zI)#uwK08B30;YEv0|E*m7 zyJut80@eUok!T`?)A1cNKecuRY%l%sjrPZshWU1+WQjz7Hmu5$9i|u%E2difqjX+Q zF@3k`M|ACYb7Bd5RZTTS>Hfq$B5vogCW6DfjyF1TP*TMK{2)73bhY3>Vl`Kp@{LYQ za+n9gQV{!KU?jjSARIpz8*fM3uvz|6&AH@+`knMf*xT`hSaI?leN9M^;6A}Sc4FFT zd|*PBN=bjc8pi6|lJfdHILeVEqAxPql*oyh_a~9o;Vcr-Grth?Tf#ohB9H|e5aPu0 zjCU|+otx#v^w$EoLA&xL(Ko9VcpFGLrJDv+GkLo|rM%CHGqp5AwQs9WYl1qDrB@64 zlfxcWJ*4@G^htI;{BBH+Ko%wc7Q zI4rTQzZOfPn7Ymb-cYi#v5XXIUodg)mkI4~FFy*9EoO`DdY5uv3nQ`_6Od8e3yY^} z2ZQNPX>ZQ6YAEP4<=4rX=80eb`c{Nkf|ce_+!u*bT{fZ`_r|uKtwc#j73n6SUKlrl z>=U`x*{LvndV?wg?KdjT9ENB%rfN#w>}h-C*O5N@?!w7BK@r!5m3X^poVOEu5=3h2D|hX{X`_wk(6X z?12gN?i;a13(ZF%x5*x*51g#aJZ;z4*)#su_@dVp+hx}q$VgQ{34IBYY47i!kJmc~ zX}r`&#QHiv&oxS)_UOLkJ>9(@)9K%&49ps$K>@oao??A%s9AS=e5AgYHK^sKAo;OR zP`D(y$yHun&W2?PMPO)Iwuq22{F{Qw=t->3#(3o%$*8?yIa|bNVw*}5WB>hlUv2YV z3993+SzhP&ZIq=$ySoFQ=&tt+?L^rvKn|~$!Izgr4W>I|lyHtp9|c_&LeVXYL<5O6 zFzFhi<>Ef6=jtQBYBbDIRW8{u0)Gj4JE%jL5KNv6-qmL^%Sf>ssJ$HfNT-sdWTp+` zqmulzqNL3E$tYifgwnO`*NCE%tkwY%r}$%eRJi!h{DTy=Tb5#_1k+b$4l!mv>eaZY zS8~AAZ>6f95n_z(A9M7u@ICSoZr-7De9*b_@B#zV=N69(eXA#0hz;6nKBwn@E7_oD zwfQV`IivJ))D0=ZN%6ZT<7@{Cmkx{uPR`j5n{*Z$6P8TcLFl;oFspKLH1FlM>W|6E z$rJ31cCLTfS0D5y3nP|DfaBc!6?SmZQsL*Q(TiwL);C|1f0?cg~GkO z2REuOr)FxEKts@om_A&1MI7%mv?ic%Tps^GCtUa@)JO_t+ua=a)oJFcTbTj*&QV`2 zKNk6wmh4Bb@<8*`kIGR=RQ5RM;|zjhS(Zrl=J*HEr!>*o7UV*d!z@PwV|fJ!X~cBf zEJ1_R2Shoh#0rKBS!bfpX=F8An zy1aI3N_lr@C!@}x15LS8u^W4)&LPjB%xs)$DYqhNm+Mkd=34afIF$`QwYw13z?R#2 z|EJs8<|y<6i_~=;0oA#NY*^y&`Ap~B7TVfhoh>E{FI?7s& zMB^MHzldZh`#LQ1CatnV*Ml9EeJH2H&Ax4AW+o|XBZW(F(3$uCg_B{t-D15Ht<9J3 zkXobb!Q0>|q9Nc!BPe)tQ>F3uL(^w8^0Y;9tyie~Xv8(fu9Ni{blU{tuf7GG1wfuI z_h%stW)?~SRsUaKUjdE-2`u-r^*F&p{mcvu{PQ(7(csXeeg7C7`wmpc^S5QRoA_6j zcD>|P7nS)QzV!_a_9 zi31%BoNaD&?6JJ?QK_5rr3hY^_#!_+E689XFbQFw6uyZC$t-{JgEOHWKHcK9Br2Uo z{>5#7{>6Dxz30Kf0qmmP3cqJzD)BTEyjC#sQzc(9gN4MY;w?-6GQlsK`HkyZH+>!7 zFA`vB`6#|gPtT^K&A>~JT*2rYD*(ynNC#JGt_zJq(xlTt*7tP!Bxv%KS>O9)UgLU) zc9*Bt>W9Vf<9qMjWjwyg5n)SOR(l}&w2yAG{b{xeS-7T2J>NwkUvb1<)!x(yIMVaB zyCO${my3(b%-VE9e4RX+M-*Ctkzfp1H3E2GXgN(*IUd%PqE~F!dq3RvprIvbJ2)^y z?=sV4>qPqE!qBReJm(`6a;v^Et!gi6p276dg%La@enaYTftJ?}RX*C}<_9xn=m9IL z9kui*Ca0!EYYi{i6>@1AXdJ?ml&lZ^+sS6jdW~Oo(6HNJY+!HB4^hJ(T1*vXfWd3v zA0V_<6x#HGNBE=!Na^*)w~|at@*{WTNWo)E^|DlD!}emq_fSgi%+I&a8Bi{3IZ@29 z`mVrdo2t6vC@ZImQ$1#k8hRN4XH~H1S4CCZYktlp#W;JpkY8Wf& zdL8siNjk+t<&3GXb#gmV$<}rH@|F_zDps)Yr|W|Hp|a`ePeJQjE<#F0rF-6S8r9$J zEj@F&lv`(|eJ>N=n9dl!9wEvc(w3R73RkC5x{uZ(S$hK%BPl@*Ifmvgi z-MQU}My*yS^;$kCXUk%G)n~rfoLRBstKeE5W<4;yH@ju|UFtV7y_&j& z7q`KAV7l6HBiIJ*JcjQ zZB10hUD*25_gAvNbu4sPy^k|1%!|NXut;_pi|5of0FFJkk|L<3kG3I{ zQqs%;mtC3D9LGXqQJ}XLV2d?Vbn+8m2_XnV>fA-!E7Y{HTvkPo@O*8sZ@_U7G$AH- z!ZIs!vM7}Mta}@M;_F08?!MfSpF*<|0kXY}1MvzJ>}z)%!@6asGLauDAfBI9-i))` zxryvjH`*1?pq};>tao5-urN)f@Jqm4y?w)Q~4@g+0uuM#(SfU1wpUb z&bN?zM8N68Y50DWZ5b6nN+7KcB^DDPiQJ;6to}8`v#2w5Uk`td8N7_8xQ2F-Z^XHR zY>1e70>G3ReOy~l^loQj!`jK3(azeP=J*`HKoT;Gc!)ZWcOQ34tW5?7h{OyO)hX_^ zufor_;Yd2#V~AKM=l}Uq=ltag5<<%gQD^6qSo>*A^bYLV2}8+fZv2690$n|90}C7VUdhDV2o_l+4ON~8H;JmDq@JZMKy4!S90huIs~UnZmYs#iHZaA{l#It#XqZ)_LDT`@!X($7DK>_yzz#W-1$rxQ~5`qF*!f4%!c z`}$ZRMRv|HcUOrd_%!y!K~@kq;AS7E$(R0iRnClvVAMlH9f{hnN0My~cy;sS2%sOb zN|$Q6?<#fMFhf098ecoCfTH`fjQ<2>Sm$B^$3arRp%Wje1j9q*Q`bqEkCy zLnfWmu(Lm30;lfHa&9h8b{yRq@f`8*CBRi9@3K!mqNURJN5`<#ak&%O;;s4QZ*=t< zOPY%AC{2g7e~WdNHm6aWbda-e_oi^{I`yNu9MIe_t4q)^ha1k zBXa0dCaP2+-J)q6T_YJ5&|Ymceuh^f6*#udq+oT&GD?U()8(_5!w*l$+Zgkvi;BVl zZaUlP0uDaeNvN#TjWncNwgW%3bk$kqj3Wn=M{z_P&$qM$)bMrIyl*YTzFNZpZCt|F zR9Dwwitg?yDKk-Y7-~`f4%n^G)BJWu^97Q9Mm@wMt@$Q$lT+PCk@GDMUwxl0jaRBRNaC|-V|0&GL$%ZWGCV!sNU*q zO+R*~YI|Xt-S<@Td^m|6yBag7koc}yH=tS%2#}1vrXCq6`<0R<=fq8GAXt;cc5lli z&nX%_RNDP+Z4{*MCE1JKuR_5vbuXftlt!U1)smEiH%)|B)>`n|P> zT0a>aO`r|e#f0>zRpvvYc=&to5O%z5BJK!YoO% z1YA^1D!S?$mho!8Guy~Kxv3NI?+Clq1A-}upcM?%>aWsj9r%KKZwenIunOE$@l6fr zxLJL$`e|w&3&fKs33z_CNfEehVT9hx2R5)F80u^Gr<-}t7Jtf9vuFXS^$9ojz(erq zW!&KNbDmg_#wM`j-xZ?&4o%buR30~L`6gU(7axx+;>|~d-J;o6^DbAATaET>{UPQ) z=<@dSZJ31#kvxCj$BZn!R|`jIV~-!D2KMqA%05cD8Qg@yGO5FZd}2K(W!|=R_)(IK zs1x8%t2(N=iOa8Vimi#*xFsOA@M;9OYJqmlJ$JYws%6TM7pp@M5nr>Swuhd&r!{OM=FDOIg$GQA*w+MoRdlFpVw>DB zZ}Fh$!7HPKmCb`=Onlh8pVPrh@>Pc_hhk%6L0(+m={tCR8Rn2C?yvL@RHFyelx3K= zffT|y-!R9d>Oay4#f!+X!e$?kCS6)^R@p_KX!ENSJeeBnaWWE9o_fq`$?q-VmQt+Q zg~i)%P_e1ky{QS}_vsVtz#gu}j$P+|nJI<=@C-7p*Q{EY(+RyFE9r^OLUZ9sNuAfJ ziy9>I#-40p8DeA-A+PA49L+zk^!$o*!x{-<1*>3wCwabBJ^R4#BHwZrRbyFR(XCp$ znX@rsN4YPANg_IR>8}VV1NHRqOkt`%J7=wD)RkD}1|@nw7AN+bkgq$?wAbG)_ip)!p8wdbxou1b_1LO4sk2Zk=c)HMtnU^w{D( zg=0D1NS_($N-HShb_dHBZ#TjHxU|vZ`g)uDArI{oTFS@wj<4mkp7kT+(js)D3BHpO zpIh5!%x(4RhO7uuxjg*iZMyorplXlRs)U3E+VCD3u6H=vT0GfxHhSYoEl5S*iyF|e z=VtiM_1cn}y6CpebdU6w;Al_yR`?__thYL*r&jhShPoAAUT+SByk#^00R!R+6s_zj z71#G#9NiW^yMt=U0XfsC9?FZNcC7~`ZN?e4wwyvgY}8Ie+yh0b5v z#X0R|+!d6@YasJrOp}c*DXNm`jF`hLDj) zA1Ypaww{fXE0_HA>Yim776WFq5jGYGUd|fzG;qXz0b?nX{Npq1?R71&9mhv&vNU4C z{D7*j{N?vct?`zO_@RVbh4h#rp=)7Sh3kp+9lxNph@Yn_B|kc>-WwHzX_nl_5KMH~ zVkA>F>fVH8cfyT*zSN0P=~u!l6x@B==r}uf#5`s;N0Jp^VQ9Z5ypMpYl?%LfY;Zs?h7>jB?*GD1a8?C)$PC$0ZsQ+{TNc=M9MQ+T(6F6;J zsz`PFz{j@=BkS`4niEqgdOgO*yIyi_$z|o0fC#R(q@Jy#{MiPKdY2X@yZI`15lzao zyl}m3TwX!P|Mw8JT}2s{o(#En+UC^{3jEIZ(L-CqV)-|Xd0g#M+Xw1zp_KtCL6#xd z``NvoRn383dxP0vdFf4ic<1u_ja9Fz%Et}V|U+`lDPFRCv$<==gMi%#nmQcc-8S%Xa6I(l=-`eA6BpYw;_cGS{W)Ju^v&L>buz7MjU+B<;($N zlT5tn_{GCH{#FSr%$@TU8Y5SV-kdWW^;ii`Fr%y)Y7b?FfLU)p8_mPzUO?7DHBV}i z$nKk2`x&c9 z<|^*A*K2Z#k;uwckg%*{Fqb?$CUrtf1M6*H#^8L?j34tOeJhEwC%oyZ2wg%sBAiAJ zW^gr0nh@|6j&8rC_jhM9X{C*eV zkUAja^T0cKwlzW?;!xsuM0~L{@>!=Yn7;eJuGw1UgL3B*_&O3v zg>uwg+qrKa z`=*7a*fLQ5VtJg1kbpHLx~FjKcZSo=P$`-2r|^bV6+g82y$>dv567V=!AWk8#57Hu z4zK2B%&-JLx3!eyT?WX9Vy#0EGh)R8V&j7Rl`0VMB%fZlj3pQ;nr2kBMC@<$#EFuh z_=n8B^^aqI*v*d~F^{EqbF-RMmK%DLV=ZlC?f(Afyz*TRw*AL|z>K700(p&-?3K$|cwx|cM^#~&yBbSz;jK2I0-@b$ONGeqLIkPyn|8P;lhx;v7>_`$)&QI|9yHNh4C%FuFcOpGVV3 zMrw6pWs3#or%U@`CZ;)`{ zM&4KuoH=JDkV>vcX*0(E5sQ2XArQ2Vd2{w|AJfes7F%3E_g%mKhtyB?_OK%=2J?bc zw$a~FsAB6E1Zd75C%33~xV(A5#&LpzDT?%CLXdjzyj#gB204l~;n*HYaDfmdWv##1 zq;d%_dTP^|vF^X9ZKkauiq8v3AK;W5wNy3w_$6A7>|mN?w}_|v7mE4bOBi9T z)4qLQFD4G|le>P`u*>8~w2tybNsH_dZWsBxUFUet^ETfEgXp4?bbxk_ahl!lOs!}I z`PHNA^tEpy34KV@KprE6E91#m#Z+S}(l(mDn5X5)SJn_KjoKa&O(MzG5w&kXqM3pV zID{tFIPePA_@O$6&?Tl#z{r%5hJ4}u$r+?@)F`+e4R3|3Cz`^-9ZfhIEqHc>_{1>3 zQo1|IEg>O|`bWg$FPN>~L>dB~AmDnRueR%PftbFxcLHu=yi5iX?T1DSLzqvBH(#lf zqV1VgoiNlmbY%Ow?%7%P*VBKK-}A%dMKc_BDC%fCy#zL92Q&<>-bk$-wB9f~MYid# z;^4=ZVa=xX$P%Xqt_a@v`-b3&3Ci+(cfMGitRl2=={=m4<MH6Q=Zkd@H=HYH(|?mcUbmYCzyvc5jbnSy6l+OY5? zZpTNsz?zkGf_1hp7g)TcU7SR8wDoUO>_vX~AEBRfopWx!ImdgfYtFdoAtb!~fik|9 zqoJ}So?#a3On|(_ln}eO#nf0gRz+vnp%Ns3Ib=mv5}%P<77gDRVw$XT=u&Ij?B6`F zvG;g1hGnfNI)~-yH*7hY7etpFJb)6_a>VDInxko9LF?q=GM>tL-&5FUJzcU=8Swm< zgiE=i>g65?z1F2s-*_t_v0~uQYeQ-O9{{vKOTRm5-n@D4SBw$f^log+pBX&dFoy>A z>aX7gxiwu|^HKsf!{H;=hJCv!8?O8eH_oMj`Gxsw`2z!W_{adek=lCd->)9!2Sr5x zn>TNEgRj26K8j28G_`DXNy80IG|<*X4f|UttM;t-b@o%^zFm~#6_>uQ0UFrdLQmQH zSj4Wky^S`8Sl9M+QXTK5UeU6Pa3;w1&I8n`+M$Mze5>b=#~cpRjwg&q9>L3yvfL1` z+`4ruH8nN4KrI`;bzS?Mf26K~`uFkv@X*zFwo;8c|Fyrf&&iGQhvv+uqkr^qN7;7b z^Jw=oUqQ&fXU`sLXlS6ew&0@-Qg;ycE?*aN&R=Tq$hISY9=F4&HNTw&AAe|U4OW3W z_Z^_^yLR)UNxVLJZftLaSR-zjjsB#5BJ_UQn|em5^T2_Eu>|=*3pcQ@i}{9o+JzBc z1;-(NnRKwFosQhGnRs_SbuT=K=CN_Tf7cG$IBz5M@d_FGA^M}Zp*d}iC*siWvUpO# z214%vp1@M-+Q9uK%QtEL+;>L!GxS@wY~hCsi)s z(9v^uJWeY)t|Qw(+m!wToz&gU6ASiH(-F_0Ij!61z;4zv*|~HCh(d?)im}_idnfJk z_y|p@tJbHbXe&(@ecCe+=)}Q!9W*4{{+O9Rm-Kh=q8IDoa@+vl*zMS{gXT1|X%*_5 zhW&dS-u)eY%qKs$$=oV)6y{^Zp${R>cSzsE^E>)YV`F0iU5<4%_a}CcH_Ka|2HSXU z3+E!l4{-k)PUv5t8Q`0%jzL~jo)d*m<>cIyeq$7?vf|e|pPjuh)5!=P{=Fs9Xmf76g zObZt-q(zGsMSK%IFp77`w89ikY~1Lta!Dq>gQZ;o-9d|25pE8SJo3mObrcMzl~x|ia&w)a z%y^qeTXQ2dUYR%k=hkHM?F;!J=WSTdB0BiErxG2m+Mbn)+mbME0e}8?KOMhr4Zi@h zI3^hG$fN#(PXx=0K|F!e3S|IeUpH3B#32pGG45v>7A%oiS~91emK=Mm+tY~lgfd;W zjQ2ajN!_7<*Blna>3{Iw!Uo~j-xtJwkze8jM&hu_zazL2=v%s)8s|3Cu_!CeNqI=e z3#7|drBMcT+sinGF4r?mE7q(nyxGJ4X?}X(93DeA=krKj_o-#zIFI`>js^>F80H;y z0-vw@FfD7I>${sL@P zGe0)SvBCmeYULvuKK&TNa+5s<7E?0oWa-m&>(JRzQ*(^_9M&IG0-4C`Y`(P9PVf6b88M)xr=+dy1J-sxQ7l|&Etu0 z=3L){4uuu;syc`Z4xNoy7b>aG>XrVt3;YN5jFKC4Ntc~ZY;cbu85l03( zimvB6gzv1>9(pv_~4)sKR zaTCrufbf0}_Li67aHEe!mc!?={)Ic3Z=i)AbUpABhZ~%o%r~5)$kRVU7F{{Lf#EN}8wToD9 zhV3lL?#gptbW|fEaH5RAH$Rpk9+++Vly8^KO&S6ixO&eu9`j6;3+%{zY5v}5F9&yg+?bWCL@eKi;7_i~0sf!d^C=a;#O3}KAun-uQaHUQR zi9rzyaAq$Ix%p9?b1>q?9bf~QSF#G6$chBqtRy+`)b$%=!&5trRZL?~G%-JQYrav5&|U6wHF?{IXhIw7w#|bgBYLEn>MUi0-*bQl(I!piE*tbWTA;&l`JBMr2ApSOPs8z zAr2pNka&c}X}KlNzapQk3M0+WqxovT&j;axbXB(``U35pLLcLsLKE@lHkOweu>#-7 zt1|2|Y8z02oH72KU-ml2;-dlrkEBU^d&w4@!U#y)K}78rk;PzxkthZP;^c-JbR|FH z74}ozmvmW)UxF0@Qm&F8dt~7x9E9@!h}Ta6^C^}4bQ$nVc>msj7j9sECr@wiWCt$7 zlULkvW3Y%$Hy2hOeI=<%+)c(<%vT9)5X(hi>Oec*|mmAyamguyQ2#m0549i&W;c^DlT?mi|U_p|z8VF|rl$!S!QZ-!mN5k13d zHC?KM!Jw6jY_XUK`DGwtpkg3tWuwa%I>c#zK{#!ssdzuXE@!NMRg7aL^?{P%WBVHK zzA-4_sm~(%%N4;RGbt0aI6VMufyoz-c(oHHDUZ`iu3Y=VC}x5IfRh&;y?O%bx`+a!Njt$LA4+6)oaQ(fjFM_K2<4)P5czl6YVx$ME5?dAL72 zc3OzgN?u$g4*BuZaX~2lAJN;ILmZf?AIj){mNYKJ7=)4N#xsw1JT}L;7gsP&Bq6*? z8M-e?p8<&&JzlnA!Gcvz;U`-p_~l*qXdiWwm1TMQR*Z46G%3T;aHXVPa$1Q00U1Zf z<45KS^+L4U&VrkYXPh1UK>PT;E(htG-ykOaU(@O~@pEi{GKj%;ghV<049VdLcO=K~ zKseOL?q-Q^Cj9hdmigEyf2c=vf5}tj`AT~99*;DLCT71x%Y5V;z#=Eog(qlqzZM=I z>!^-$gDAXWbbe+(91=^}vAnY{V6(`iwmElBZCqFK7;VgQhXX?ly$dMwii-CpJJ;1A zDW$I>hfgUA2pwh)~28 zcr_a>PZ|r^>xzB-vi~u5qoElx6Q8HXJFN`KBeZFB7C!|MQ0*`6sHKz@SG{uG$$PN1 z)BQ=uNB*!3(U~w@^j0oyAVztLc*Hn@K_h+6mtEqEu&F%B$0`ith8`|yTL|Ly))2W_ zad@))+R47+N*0S^dMz|=HBb1Z4V|A?u*c%G?({L2IZxWTAia3Ok>M`+(~M@}D|48% zwZs<`#xijeeKay8?6G8-D5Q!AD5 z4ekjMNd|@-`y_YUe&Isr;=NH*EKT@m8ts?#F<(O^;)xdRyS)5+?uCvK{sLF)>)FUg zeK8@RzVZPH__4lc){TgbWEHI!_+S=JSo! z0YqUwlpFOY^w9&tfk`-=u>so_GMP<_PRPd~MNW*p zC}JEvG5|z@qzwW+7$r|6R?6Ut(%BQsQwF`xTan-64Y{{HLG<7qj?4bHe~L!1od609 zD=RorqlcBw0-j7zcRr5Kxne8V(T8NlvT_~D87V&w3&w5Xwh&BWSQ(P75P@XD&46wv z$tUS@1?F0z>QhuequWILCBMFUu*ULgy2u~TkP(F+rH39}Zaj;kR0+|3P&TgdA{D!g zzA**A$M|vdW#0<&f9W#5s{At1>2d?g;D6T^qN{((r>JZU!hC|s;)D1ltzxIB==|oN zItcjVLa(wyO6d!7LoJf){v>hwDt$-mOgv!{rwv4jD|Qtl68yA2h~)__E-4#n@C$v6 zw#c(f3oBATElLW@@#&-VDtV-QQ3eRh%G6U%rQzjkgMyGAz7Q8} zoq#fsmlTtZ780J(p6e3EwaAiGHAxeNgRsQO%Jq>?4RXro*oBYu6O;W4dg{3PDjPtsnJjw_$N99soVGQc%`DZj?B0FO{{NIyZFR8pSs zlbblfM&d;_2}`92gbMsFBKV8t(e#R@Qm-DcG5*X*6e4J3RcYxeRRSvyIYeJ3M({Wg zu0R!Axt6!!^$LpOocF4F`tnIqq`){rXvCO4`s9F>oRA@mWfGFfOY%v2bY-3~<60S^ zpwaoXKgzE>1?f@&MHBOS=>_>i;#j`T81MU-^Wb29eydGjEZ z@s(Dyh)+0e!f+8CgFNmD5tj`;Ud|Q*>5EQo?&EM$MiAovg!lrQh`4eOc;P4e&x^ez zC?|%X6kf3&#yRrhWRV8paC}m^?-0qnC&qh{AuC%#> zh}&A6q)XVp#@dw=FFBv+jvEpTbm^yI8%taJH&`+LOe=E9&6%Y8a>1y^jf$SH^t(oq z57#I!^Otc{fN?0Vl%FPN#EbbB@X&?MY`+=T6WqQ-a~HT0vruN}_QD6YhmCwe#+w;gDy*cElfI;{b#fY118^W2TUpOx? z3Zq2j0Ty~Ih~9eS$dpWio626)TZ+C^dJ^-rJR&Au&cs;3O%ZNz@ZkjXS^k`EzE3f+ zv?t0y{lQlzZp7h*sHC#gmpegL;P~^-=B6$JP+`ttY|Hr9x=h+!^qh=6xuKF{jnI~J)wqD(VZ;1n62t&H7(s`hXH70 z3l#Ep@K&Er{y324rx)`Q_`80X%0J8yrE3PtuiHlZ70GPyhP`JtRCQ|9QH<*h?!?6y z5T#PCGO9*ZWWZsd$Yh}vh?F682y4GfcSAj-)c!E4fJVoU!LRSwBbmbTWdg;Z7ULQU zS-Wu0Ze)OhhfK7^wuQ)wv0e$#d?UZYLU~=X$QvmrCP19rAmM9Z2dxV@`776=IRA+K z#O!XmGPwHx;1@eB#M}EQG9X_mkS{C975odXI!^mbrE6OAPXYqc$GEMd@h@=@NBcP_ z`cB6D(iMTs1vjBVbo4L%RK@}lr2oP1R=&9`)#akTjdb71*n`Z%6JiEKjx$6x$Xe`>R;&`ty*iTH~oA+V0+4 z_@h$0CHLsAf<1D_{2@SSkEl&}E{PfZK8T(OB0pr+H|Bc5!S{m`eR z#N_hj;IP;f;NRYoziAQ?)B3pdDfC?#A9|j3`9)r?mkx)a=pwujAy`C-v&SPS&7K-h za!rTARn#o@1B+sYQi0;aAlCy4vdRD}W_XJdsHko-FtrkqIL%1K=|Q7O;8$d^B%Me5 zV?>n(7$U_SsYesb=f%cS?Vt|xFSya7Q$&t-O=K&3@7rYHgw3t zm&dPuR8&HOC}LU35?*rS;a1DQfZHOR+_N$v|n_C^krFfmThAtw}pTiJIN2f9%nFI(5-dZ zF{2C_loR6xhN{9=fg zdK!!Q4&xqt8$!LTF+(~GMx^6KJm|nj?#_Qy$`i5dV3I+N%o30C@N;pDk?1A~2PujF z(aJT9)E%4hG`=2A(xp;jWnC};Lpim=38V6tu(RhQO8W#hr$oYTkTei9zS@rxVqszE6&<7T(ncGU zNy+7xa>J`A2GEGHRj_i+r-i_YB7%&vEhK5>8nnR4oi@T(Ov=UTrPD+_@PLyvSsie3 ztR!Vi56*E$S&PHfv^`s0wrJ(#p}z-V{HMu*GNf*x^Y=dLcGiB-`Yw_;>|+x4>7sB! zg`Z}ZDslOgikKf0ui;5=TM)dUOi?86CJK>pQ z`dCE)!zmaso%N}}=@u&xh;Fu&;UWDC{RU3PUct(>_k<^)R{vqAmHbE%108->Hq%IX zh|@+H!rIB$1HVu-XWU7Aj8+X8&0o`$4zw^DKt5rqSC%P#>Ak zq&+mP_RGA6wh;qeVMwL@?cvG*5%{y*a2g|oLG$|`6aCkl zh%WsmVv=3Ni<5mG$vI|ZY>49YsoOQz+&~Z9|1drGdC#Hlu5P;cN0%fDf6qPl(KT1! zK&Lg;=uAtzgJ=aJPzOo8l>MM;=HAJv5 zLW05%^5Yk+T(eOP? zmFNxc=QGY1ON_gA9C0!YzwP6(xS1B1?)nt%5xfhcrZN~5`UYJ3kyxlIDzSCBM$E{XwqL>0!z1ua<&8e)6JQ+8Ng_KMzcA zj6`fjpCJN%oRMMnTW`IM-u6##q{WLD(W=!e>3Pq4HerQkvR#fpV$xH? ziU%E2?!K|-TJH1*5?0nq39ScbDcUGqjaDdkIDfDrP7k0ozfXs-IGqP^Qmmwp=Bfv5 z#6M&#Sb#<*J~4=;EZN(KbjdGDTpMsgkE`&V!z(Co#lF%gJOfJaU*rri2Q7a7$~9K7 z;WqI8LO7fQi!CA8myC%EHweh5&oB)r`Nf(3_=_zvaZ0*wgrc%?hEUw7{t=Hfk7Nka zW0RQ95-T^Jhrtdh!+5#zkhVcv6d2b`D~2?3yS7JMIG&#IqJZZ${K8O@JL4rp@BB=V zg8uQ}*VC34otOSWjU*n^EN&*~QwKS^iQZ0!lVy_nmyBWTZ^g~3YzvcZL8XjqX^8yDm(}!!Prh z7;~Cd`$ZnfAI?P%8yJ3)4&z&|!b1Ng%~zUtF@FqWWkgDOUw${y`_EzhdvhRzE(`sw zm=hkL*|iz}fh%}wmcYm@qHw&H&naP{Wo*-oUzDqLAWGd5KDC!U@*)F_{`MBNvYw;H z+pebhcU%*k^^f|-WME$4U&Gs1BPTLN5rIe7U*qPH&Gg{3GOq8sYXhBn+9|O%Ipd7e z==R&yqjFB&zrTYHTE2|7Y~D)0`OTH|r~my6tzUl#tzNw0z@@P_b*aE>r#as|yBtrN>sfl8fIGrsPmok_%eMx^6cxah~oy5sqJ;GvK7iz z=T@#C;61}|$3G+ga$kG6S35`v{>5mJINxXx8C<%ORp?{aFkQuCi*KDLMv4z!-5W zfzoj~u9$HltLtv%I@B-dhp{}uORgwK`B82q&d-Z5elkvqZ3`({PAI?7)P*`$AD1a< ziPtc{Ug(jhbt2Opyjm>|Tg75HNkW-djYE=Oj8J(AS{s_0KB+jN#hzz<)8{X82p@Uh z2fVuZ2`b_x4RL-rb-q^ZhbhPZI!Fl<|-?jUBm4)4KZ4YAFGsEO@po>Hm5qn*0;?xp%&I|%F1!)%1Nr?h!ZM+f1P zzjNxE2(8z-a~lmTS{+H*!RgXpYPc-OC~jzOz8}@L^P@O+j2>mV4kGUY{yHP3mo{z# z7M5zV4K$Bmw5n;{NrU&_M*CMBLJK$CO6a{^N1jZLywZ)n)3tRI_0L<*zPzj7z0=i4 zVn%p%ZJ(-FmbY1C!)+~nb$yKcfk<}ABi9TY9KbqP-^caucZz;!&ffeDnGlB8?0)7g zkJ~w}qm%P7Mw)V|_y8c|O)GHhJb z@7=?Ec|PP-vHe_*v}vuLP;@26;75O4yg0rMWen^_m31cQ)6P!bUdOJV&#C8IC+7xO zuXb_!<-!{z^!029>$<@l3F5iiZ*b#ngulAl(bGqZIOhl(KYE;iSv!wc%rDTV!MN_` zzShF1;;oHs&pHhg(*xUQKNMDy7m8f?zbXFgwyJ6G?<0UEApqMD9Qr)N7^pYChf6SUhuUEBxT+&nDp zG+f8C6>phAxRdP%YTr*oJ%LUJX5!G#=dipb zQGy}Qb$w4hNZltqN7%dmzko-L=-76~1vZ24w9SD2H8LFJnCbe|$06#ge!^4?!;ps2 zTgnX&kMNW60s4o3;8hBCt5&U`SH9|{^uK@nGrjQaGYhhJqr)nPGWvK08Af)s-N3*g z^@c0gneZIr0e;j{pRIL@V}Q3C^r~{}!^DR~QB` zfMsu8Kob26<|S_octWcUWs-75mXU0olQ)BvEH>Wo$@3nT0dhm}p-pReYYvR-5uT8N zHH=NT;ih@1ZC=;c?~Kb@R;o}ZL#)Jf{}9>1r#N#k)nM)%ZeXZ>oeL4^r$$CYO zjhbH8Gw4@vQGZvMXF_9gsDbqZ)4{lgVWr0j@}iwYpNcMYW0!RajA_@uxL-PBi^F|< zb2-fW`$azuA2`4UNU|;-*rvOYN8C^FxB#wxDmc#8d|iR7eqGG(6%S2H#{J~=UXfc*`a^> z6ogKvaVEUOAvl9ah7HzUyeQ2EcsA@~nCEh=H!bTu4a$@U-ULwbnQu6)ToU zH1MqBytmx)w^$qk88#9QJM6GnJnr}x@Jo>E)~$=hS0jMVbpU&=+0}-RyXP%i!K($q z3iYxD%@8&y3Xhj^p|V@HDt-Sa8@BTJsduEF7A{;!ixy$JDB{}r{eHU2|u=fG_mYh zUF!>0von0Ywc~-O5NOk;7ZBZfJuTh(AT1gQUbxJjc`mJ8e>l-fmiyt`X;C&uiY1?01xEkNzYjvcfPi&xd=)Y@^2y$R6&EvtiY+SP3 z;Q>7W+{R-vD0TBI^%&3VOwp&@acFLRFouCwJ?qsqt9gvecCZfCU;DXj^`6}j*!o4w zX!W6i{>M#QY@9F+4AV%TLg&DsUe3JdaCxEelEo7coV(6jj#e(@KDUC?ML*#MY3SGl zcVM8K>P7}xJ)rNiZiJ4(7D4FXdOXKshZk{i6ePxh@La-S97|WiCF)ZcP<4S(<+8g| zI9S)P%Qnv|sJ}}{_>1XNKOHyW;Zvby^VNLj(;|LS(@{gA8sC=K2k>j#X}D zGxvct+=qCP!8!CdJUwgONK0*{&y zxR2k#LxT0x}G=eP1X)TvX;i!ur@SOL+h$9t2XmYc9aK%Shh7{870A~v{f zgsyi-a#*b!(qa1wPp$wnO`dgGwz)0D?QIeq?5S3q{80<@I0qqL{|yBzeu#oJ7+3+f zae2FD|F0E!3b(MetpRRgs#dt{XMVMXunzV)45c{m)4aaY5XE6Wt$o2f#|==8Oo4}t zCuf&Tg;WOPReJc5*BHln_tQ8z{zd#nI;Q1AaQ*`AMUd@}LjNoi)rIR*v78_G?U6DT zTIiN z;*IR|S2HmXWl$R#NUO$UWi(CPTta--CE_*l<>_(L;eN)vyDl7d9X-XhR}V1b{Fe8; zD}C^ne>H90wj~`ra46pdVoNsuKl`_TDi`Rz@BQX{{oZ}|-ktYfV}1Jix4rp_D6B2l zO=dV$(h!g&~@a~^cQqnNUa-J5wtAZSOKsQ!$K3&qMVO>Q#rkj z{~O-fTfMMyZG{2Bjm3!DOg!kXkwp#5K9+C}T452+;Ed>H(e^Md8Fr@aSE%*>E9}|F z%Rw5fJS!yF+t5z4{qAIl=Z_#*;G0%+ps@}XR*W4^eR!ZjX>j|I)`TokK%AK1Z~QKv zDKW08|7%5_!oz*aHLbM!`QCMw<}de^haQzBR0ms0xX z?@j4_QnU@=VK4%2yE8xXAM&rcEu}S;J*9rN&L7GWMV?fLjWt*cyT+(kNn8{bs*)amZWvy?T_J5Qb_uO zoTq(Th-2wcjV*C~%xl34UhZIy(0=yyly=~NcXgBuifgN0>=cd_)9rFM!pRZBGvYVi zPPg&6a7Uz@-J%(`1Y~8E2@B9QU|6}vVnckN)*f`nHFR7@^`7#+FC`Mb6mY(&GY=-z zd81r!E>xTr_}p^wp$`^+tTrgifb;6O^*-!8$*`h{qK!~xFrchfuAL9)bhSDD2IInF zO`aEML-2p&AD7r)`CLlh{S&4Xo~`&!BVc4F{$9#oUln2k$VXeb_5_JA;$`LfWTCiJ z<=TUn+qlHOrvI-6c?xT$cv}eXdtXZFHE+e$lbzh_3#=0_{rE2y|C_aB4bNB^%zLq= z#C3xB%1R31%}1)Fyr>&zn5SGjpoYhX`QH7CF#KYlG|rO8f|x*;Tl4{B4C&;)V&z)p zxGbRhw5(~bnziY>e6E8!Nf|;ZvvSSn&H;=Iz^1L`xo3NMUd+GK<@^un_&wH#=^|VC zQnJCu3%|#wmC2(AO8s)Bs&ANziYalNX0(*!pOx#{PR0Y*H<)BU)z5$a^_1TIePz;N zJj*bri9c?oas?^YI>v)c zbT?!bKSM`c4+x~;&fJC8&|0D+<^wmsD7a8KJ@7>#WAeUwi>r}{YvE>Vu<_#4`M2W5 z6?uao<`10ay%wG&Yy>uf4qae6wKa$X9+)T`f8_FobPu+#_G8du&5JzL`t zinhX8=z`beC%-u6BG1@{wH#eBVvq*!&U>ySP8W^FLaxAi*s58o%=pSnEf)taGg!Iy z)*8xd3lx)A*Cz^vKS&WjEswvK$32)7%GbsJ)hti51oE~JZKKNIFPr9<%5dI(g=J2M zeY28nzVPi2QI51*9x`$Nw0Va`o^xmM8re8IJt4?7WbC)7Js&!0qpP z;c4SU!P2({p6gkm@oR@0kEdxL+FyKcyQ`@9tPCOPKJ(x-7KjON4cq~P^b{qDn zTsN+(R=K{KTj-Ro)oVI61wl~Qa?&ur{N3TfiEiE_#IVTx5vT2LWSWGUEv0k>X6XDp zSOwaBijw`?!YQWHhf{^82b73+p>ETS@jN5P4~5g#g(zfIHcDt-LMm-}(OeA5Mv%t^>>M>W`#YF@&1aQ=BsoKAO%x@~)oMns6-F;Mf|6 zAHZPlvNPz{&xALA;5Z*`H}7pQZf?G%w4oVGglu_g(`@sA(=c9>jA+?%Jy8b}a0svE zYW7@vLgD*dnK*e6%$Dma>ae`zyNE?24$NfR_}8IwtJwWFjg7Vt_ene2JGQcViAAu88>9Z*bfhzx5#KSNlb0_^av zr{mDcEMVeH)C!34pb_1?WAXjK52WUn4*Vh*Kc`UxLXjfs1-UH<9f=Mc>kSK)1!R}y zn*8`i67XDZ3l!6hPYWZ2CHw7A=DqhX;^B6=?ccn&3h%+*&o0Zg%6jq*-<)O{T)ZU& zx_l`ML5Kp2rNcm7SV`3Be_MDP!34tKh>-MVo6Px6#H_7mp zdrG$bfiB+6U*1B+wvYzslj034*KGx&!NVb3^Bb=mG6vumFS}<2NO-meFJ!rPzBrr) zmp_$GPQKFoNfZvbTQzUK0_w11Bu&@)Zf3(wO<(M)%~vR~$!y5)-B8sDFn*+Z+CpU4)BZah;tpdZz$InqZ3p{t_9>5aoR;ksm4NsV#wY< zZ&!e(Cp;=^6D=y&`V%3$t^1(s{?5siW|)?ajYmBrzt7we!4x9zu>)xz{B zVdNhdn5QlC6B?u%p#92?JHuXTwQ^0JP*2daV$Ni!NlQdLaOke`1}LZNE%wa@;fCW% ztpRaE&!TMW9~L*14Zu8X<$8VKC5L(C(AL2cv;QC5xI*Soc+LtGiWzwR0^|aHpfQAa zvIjll!n@p#@zOIXJ^w^W#re!Q3cqL&eIQz{UFL-j>6KVZ`=>!%pK(0vj9v5NnS0Gw zQ#Vh2w#0h=czR1A2PmBmw5`xu)^w%k_O`)eLfenb z8*AT3 z^&5{XuLk)1`H-(|qaRxda{I$adApR_n@KE|zX&l~x zWm!P`7?6*hf3$9jEJh$#Ou(4gfh|C6o7iU&DV- zPL#L4|GZZmAK#>rE!R?pfqA@B3-4`-w+72KMxTHAGb#OpA1?>q$yZg3v*p^WoW8jR z`qlbet{z2527t#XFJ;eLn?iUPGq^1z%SM+wZVU0sp2;p%xgN;>CUk8+Roge$tw?7@Q_2Pq1!HW-=q{N3|IT{k zlf~kg0(Is1_KVM^j6&+8StH?ROG}@^E1qS~yyE9ClTybXSWpmbt@nXNR))Bm(MmOK zxkgqgz&NDk`@t7l!6>nA{9j=;&}<7qj#|<6ayxD7!;IYC5W#$B`P(JOj1O*?vY0K` z?q?|1lAS0$U;cE7=?R?a@T}=-TF`F+Jg95tEW5ySzjZ!CT$~pSrek@=yXD1Yvs|l8 zW9py4LiI_w^J7`yd=d8CNoWpfWTo31@e$>A$zuGw59$v3NUn>uE!Me<2q;M zkDtNyV+*nU{vn^ypZ-S%`%=7_i<@BUTlaE_eCIF??TaNsWAg$KrITTCa0^t4rpw zV}x|FP>2H!0y>66Z{B-{pVPgN=RCS9OH{6*kt1!IRx{`-%}cD3wVkNocYRD zS@Ij}FS^P*`T+~YL9sAo{&0MI>go6{BSH3SyFJ`qCnVi;lUr? zvL$h1V}Dr)qrKfvJeljqs&B%zoQb!FFo8GE(f3%Yr_X-z%NCPbD7)y;0Ryzctat|2^z$)}6W>p!fWj z`B(klT%P{7a8y}`Z25}HHTgjp1Hb#P%aeb9$;<<#6;N9pzzOx26%+ghfpJx3V$_PI z^7vMns0a{hWdOGnjK-qKdjqvTLK*v3T%0ED$`U36L+me=&YTAe(sa7{5NEX_TBx_T z0dVzXqg>1C4>VRE4RSZ&>Z<|Y;t`c=D;w~WEs0jHx80u-zdPMwo2np}D5NFD89=!P-z#)aeXjWP zpKlWpuHz!ln1oE!F~etud=QhBI+u(1xcOOLV&I896nG$S3n7kI@4T{#pT`hy$+65N zEP`o!qGUN7ajpY*)fldWl3qbY#Bx0s_t~MenDNhtr6V&bd5`XPSz=gpxAEe;@!auH zHlF-!55s${m^J^>UzO7R^Zj`szOBQ=qT|b}ALL;{MgZ0vr`wNsVQ-NpF>_w3g~Zkc z{82`&T=H(9%pMH!Ls^bE%Xr-;4?4--d9=P%73&@GwE=>#C#gxd*;^PYM=7- z076*@Z40r2*dMdJxgK183uZ!N8?@`Pi&pll0=gaFRujjIWtZPbOxHNB58A=dCx9!0 zCoY~S7?-Ew&iNo1);GLApW*$05u$RPXSFfELL?3EUn5@boGqM4Z>etQ-T0K*7Lv_(+P8k4QJs7q z$j_4*!fw5z44Cuqf=7E6zRTO8b$Tx2!m23*h?Rsa!YdiJ;3F`PTGJ%M%5sQJ*uL z?kuk7bg0P#9xD~&S~89h>j{5^zWuCRTfw=Ucm2jZ+;IOf)_}|v9^{duJ_P7Cg`d1F zBp!@yawI-%Xh5)Z}ya$ByUyW|6nRW`=DR{vbJe>+slHFCw(`;W5tEp=-M6Kvl* z-oxy6S=Jy2P*MT=Q9q56+Ok}G(&q3Iv;SXPPDUIadClXECrdd^{n>Jj+@cP;Djo-?17YfTCT`6Dtos29$qkgiv;Z8*U)?unRh zb0Q7^K9JL_b?a?7CFHvKucg0k%6ml=G+)KXQsOs!yDX*>(lBBpjDm1CqmpZP{&oo> zj>4K*c^hxuyL*??20rvF{T38oQ+84_*9ybFkC}JUjaGIKJVrK7pQ5P_>|`U}g}Pm4 z1Pnmu^An=?9!3I`fs(hM-4tQC) zzPVsDmg_1nyJ&#fk* z+z+XL{2B4T-d*3uxlW!{JGo9udxaD;A83quQC~?bs$h%zm}3Ws?ikmKlKlqa8KFLn zM@RkAC-u>Se_NJ%_^K0ta!sf3DSLSh1-Akvq=S_mCCW520sZkFg===p?^3~?)yg&R zjdFcwk2UJK;Gg@!hcS$`g1WwCJ;8qQ;K-Zzha}q9wC`>83CnWbgWQ=|*gng~d zR|R3VEikk2tA|cl9%K+=Y1>PMRy?g}SW$((+o_g`W%Yqz++07dBSnby6_=S~dyD9B zmvY@C2E9tt_1c8VVI{y;!5F6WXKuT@_;4td!GS|VDAO)8p|&=);x-eL#&TT;rnc25 z#=GIZB5PooSJdEt_|lUrZwrBkZDDi&=qrihSQ(5)g2?0GTe-+QRrwZw@c3D#aKi%q z^WEP|XiATLtUTj??N>vVTipIYA4jo^eZl=F_EGoANN=fb;(8)UoV_oVm|p2xDxJmf zT%Rp@FfW?RLjC>bKPh{`Kla;c#Su+1kWcqNJTFT2uzaMhe6xioP~9;l#GzAmv&-Hl(`kS*|zKGy`w+jR$HAPXM!_M0i$OiD~(U<+@R>tDb=M-j-{x zbhur+WEkaW+Jx~bFjPHI+c(Bw+*w+^CMt7^pEUoj9QxsbFe)G%b@r z-lERV^5*l;X^)>e!-zWGXuY+AJ=*6^wYFstS&Ta#_Gj%mc{YuopG}luAsA<0pW@CQ zJe2PES3i>^X42%F|3sQvxgkl+r?XmW+Wp+~>6Se&C5ikDM|V7*cBXZ@)odj3@s5p0 z4mDvX4joJfUw9$SdC5*OP^?z(+>xaEWth=m_tJ|m_LiQizaP7H?aIz^tUwFEnDWjc zzd2bKsIQsH<7xAW<4I~&+B>v7ZLV=ot5o}5crndsJH_PWq~-!s>6|`%Gb(HRnp3Bf z>`EE-jjT#%cf6ElRvoQA^B+U1t1;E{oZ9HzSq>QvrMnSWd$*_UPkbTm*|R6DTURW} z6T5dNi`8?-j_Rt`7t*XO$kfNgzI_Rm`1sIR(n4SX(SA|s&#usfro!lnBWa`x`oo70 zr|C4Fj?O*r;54K`cZ8Zgt%`N_ z=%l_PtoY*haosL5@!8)?BND26-}k7kcu@BcuW zK7Lw!+2gPCr>7f>$Hbmq*@BAp|qF02cRuZtIfCSbM!0tXpmq=^tKCc!`Z3io_l^|EbZF9y~!VV@b1b}XLG%r z*VSh)5ohnt9Ou}%xpb(ewR`t&eaLfp=6B*C4IuzTs)>2$ufFL4UYr8Vj&x9H{U(Icgw$PJ%N z?%OZkJ(&)!crLBowKL1IGmcv<#xnJcvlg^(ym+s<_G|Cw_9|@*z*_w9k+TBAAuCar=^EI3pUDs<^M5Z3m_%wN2 zznlnB8t0_OnU%BW)3H(2f_MiRH*@ra+UICm^};iG{5z+QjP0b}C8zT0f?_kaZQ-dl zdZATUv=uU$@nu9jv{87Iv%~3#@Q`tvX6BMMbEN${UPvcbA(16>HI7Y)M;TuaPfe#| z;@yqYbIrJLeAAt2t+r#HIdmXRKKD$zO?YS3_Ad%=jdbgkQ!_<|?cSbc*zNjeg6!I9 z@9|UVw9*~lSH@Fx3HasKbJGbad~nxGX=+w0Jk+%SKHjY7dGX-Rojau#%;s|6l$Ir_ z(dp20UrEQ--k$C_epI87c8E?MldK%gX8b$#**{DN*KbWT z;{EGQo?iFLH7v1d6Y69S5F99Y8#nftrWbl?u4Bh^ide8_VqIEW6`C2PGdwI|iRDFt z=H@jk)10hzLnoINe8tGO)#%g>y)Sd7-?#5K2VHMukoDvWBGkb0SuwTt>#J}QM=Fg~H-0LzI48L(laaDw${c$w~SUdEJ10(D3O^oETM zef$vQ!!xJT+@@`5*<0U}#$`3({?nn=MTwwZ@8_LvXnd&F=v(g(qwkNH6@>7sW z8@};7)8XY4eYA5x{To&_jY&~3OlSfzw{Aj)xsq@EXVjYPVt_DaDR@>^lHoveMsdb#JucJX!**XBIe#)ym1l3ew^6Q#RQ}9A zYD>egtSq_yy*)+&d@_CHEW@O_l?GPgtl;zKf^qSS(_$h_+|l^)o{V+re>2hAf9d3| zC<CSm)mmW{0SKgB9HE zkBpmMvnKQI%A3|pW)xYrO#5TkOO6d!(MCTTIxSg*MY}58L$}-5HZn`Gg^vnLLxvHe9>jOg&&9G36>J2KAD7U4=^8|LuWA`2qf1mgNjG@ zf0sNR9y*`KRWF8B$7Gch3`r)t+zwmpX?fa(H*)TTOGRnw=$hsBRT$skLD~a z-*iXGzgE?jRcrFvWus|%%_DU*URPaKstlv@A79(s>Nom|*QF$*#@MtBeIBp$1w~}i zn$cm&|GRTK%Z~2P>5L&u1#33lmR5>Z$&);!T+eF!fNyiS^0597|6Ll@b$_#)?@Uu~ z|E}ymBx^JV#lmmr(^wrl*33+49NCbTseXaAx7?OS`FbVpq1m)TvYc^vRyzuVOke^6Hh< zet^Yu`i!&~30Db~wc`?^_zg+n#vfo0Shb86kNS^NdAz5%tX7oOYu7HG_hX9l87Ww@ z>HwpE=@-&El)K^5VTUy!tXr!&wU@1gGp?-#C`q|q)VZY<5jDdyDUs`xZqBDY^SIGd z!svi&QL^G*VdLrv?X{wTwQxZ=CIu2f%-*jvVqp|Ywgm`QZ^ZsofI)YiCNWrYu_&+_ zQQA~>_S(0kbkD0)=i^23lY-b>ZD#`Q=P}g>=~7PdP5QC_J*8j#u{5mf=wAPO|2&O9 z`bL#ayrdul6N5h;I7xd~VV`sHZ{@(J~K`kcm(HT18sDlgjk zcDaTm?*Pfqp&$QyY5#|RJ>9C0$Beb{3zqt?`n1MM2^M{P<+kiQE<@1xAbiKu^R&j` z)KO z%?gC_a8g3K!jP7U7rg75yytqX5%z4?oJzDgFzM6zD98qvo$HzeBeGJ@ho(Y$<&YxY z2a$JB!rM^GfA;o!dm-MNP2C!D+*aGV9c|&}z|I*BqCoo^tKxg({|c@FTdvD>dTMKb zaL9%M@3#&sF!PGz{nC+NEoVTck2Nv|ehfSXFY;;{;ByzSEX>UneRW?eK^ZE)d#gvj zB_e)3lKAH195?itm@m`oc#djY*^uRp!-*UB4EsYMQEnRdQl8XDGoICPf*YsW;Ie1} zk^x0m*P`xz(|4rw$lKf4y?6IvD z{#M*kx3%R9IND~CA&!5!ErQ&Ej~@KGlz!&_O6e#6S#PqYtwH{t!mN2rhJg&;cg>+j z)9&1JEo@(!V7x-1slT{#n$HY0d#)uP$*cPYGS&6NK6mtjfA#Za5&@p?CpUdR6*lr) zD%W)aH+GKB84#=SQyF& zMnZZ%oJZ_-hs_j)8xL^c=i4uG_!9q|PlT8SV;<0u-~#+Sh%g|1&yT0{&hJU-Pya|y zc`KOZzo~oW&-K;#MZ}idg4TH;ctV50cm8lev{1Ezi7$CKK2_&16hUqXdE>ifdC@@H zPX8vg$)>I4p4URX|KIq~Hht$0%(_jcv4)?j6Di7jT9cIuDA&0T7~K3}7zgI3@Z%v{ zzTk6PZNa@4Bsx*Y;12kRAb)k^X z-=xWfAqs!{F}bL0Vy`AmAB5si<7ruH01q1F?Z*FAPy@DHH+NN=Uo2}Z!+d)~SDCQ5 z0a0FAHsH^MGRhe9En(R$Ci6rVKbFXu?An?wMYj)XAHXV^0lwQ1c$xd1tz2USK_RAZ z@Ut@4N3ivxJ~_;Udfq+LO4t+07{wFQ=yyqO)rZG3@_?oE(;t~n(RJ*@@(WFXVr%O5}zg$i0&JojRj~ZYh z?hcP%bo1T-Tpz$4J$TJ?nwHavf;Dz3pY*MQYzPaGlY@q~S~QRc2?thRaI7F1(bjUm zFXd@L3a;~xpM{YXScEwO@D;C3>CNAwZ4w)LIDP3LYy{AX^osA`f47;f!p7WKmL-ct zSTsFgI^05!v^-JB182<#c(IK69bqWp>H*fY{4_o^J=i*b;Mfm5wiLk^EKh=evRX@o zHZ*yl_&Hyu?`O<2ekj){?w|cHC5;b#Fs1+U?|Sv=G5K}e$e$+WwkLlE9dgPL>nsl#p`Jl-J$Vp9xwbX0?n^9$Zv0;jHL%Pw z&k79p@B;34I*66Q%1(Lf|3`MvS1c2AXy7gSy!#0JX#UH1;xFFgBorl)P1#mh+6qn0 z^RVS3${Y&ypa1JA{lMQSR$p&rq8zrqlipHp#o$UmWq-&Mi=0pWksf`9$%Oj>VV(pS z?^2)ChsQG}@c-eLd+pIjyVkK4GAoOHI2|v}J95MmVz+Y#dY_kM&2Ut55A7=Q1|*M9 zmIJY!nJoFc0an6^gCPKbe^j^2>c0EtDn1n3L6bL6baHya%OHB@Xjxew zQ9k$4+OF`i=~Qz0Rex@e_-S=e>I!7P0C{s+icd2G_v@0L?%*K1yyVk$XhE3=Bl#}|MuApCZ?Q-U zcRsARI*J7`77OYi)(becP7vt&U>Vl=2Pe9DUkr?&%OFJ*K=66Fu=te!DA>WbWrZk{ z8u-YU>mkOoJ8lbMkLGhvsEl=~mpb|Q@1*qa{*Quf>oHf02?pMC{pA0b(jWXkDSh&H zOFR!IykTMC-Sy}#O9iw3H;q`7k%zxArT70-35)z}>raeJJptY`xp#YSQ02ba8*o+C zz=$Wi-i82A((dpy1G$x*#jK*Xw7QSRK0%+iKa)52KWfTuD~PT=lri&S=yN%^($3ap zJWy6RILrOnTkb0~J*i|{K-`kYHl~<2r{^bDcK03pB5vIF5bMKfna1UKx<3EUUnuR+ zwOzn@^IgiU1TF&sW2T4HtF)b`Eo1uv}w!@v3PK?D%tgbp%~YUaGwt z@N(3^yt}UR>~^eVr4fwkWD(pU`~2dI++8>=lm^GkJW;UHQT%CWE7$BDt9!ea9X(K} zk+nwAmOFomu_M1$eBA*pT-teB=sgNJ52RR3fAYJuE#VWg#z?>d>}!j-(Ood=zonR7 zU0=>$A71NaNP{I{VOria@Kwv071ciLmymqGgS6bXg9d2RT*}ks%ZC&d0bv;TT|)yL z3uFX2d5C+iCl8bl54j%N>Ea(P5tJQ^Gw=@8|GU1oxcN#V;Idccy4!LsJTK)E2aCGf zm3J&Gq-#aJYd@&VF0}OMyHk4a_m?ki65r*`ZWJLL0UURgmSTYq|LflNCr^}0$c_K2 zpayz-u4PdJBG02tTDe}xa-HR`_zti&Ira(o)4ZMKWne1O&7+;Wpo+@I`BN{suvbUud=db&Y2qcp{9aC;Fr}aZ3-)Y4vwwZ{YU6#tFRVF0`nq=(Otx0!_O9h)AGu(}mYIT8{=us!&$9>1#!Ven zL~z`$aq?$6Rrcq@m~KOB(XlS`b&(a>ae2~~55FyOc&ZOvSACv0UasUC7@ey!!MEDb z{#F#&0_80k$gx3?<*z2^fcr z=D=^;ng@T94#3Si(JGr2IQ5S%AkS41Z@#wYe1mU`E$LY%<8LJn_vHZUGfv8(Nqw|s zXBLN<5kLK%ToHEf2qXU){WdGz6RE9BI}cS)&!(|gLHdgAPVsi$1K{_wAW)qVDTKjznK{;3{(174OI znAefS{d`COumaZZ*zI?4?dQ&5C5f;>;5b%%qtyaB?O8j5q6!Ot)F#VBA82d_0V$V# zAPHhiwLkCOrEK-Z>3H#;HiC5JUmvgpWeaC}(6h4D7eOyg-6P@n0Z5-kzyjUE)aV|_ zbJ;}C^=biYK~7R`+gxWM&8!5P;1Zv1b1%a|1IsmgtX)Q)7OJ@|lnF0USAjovY=7Y+ z4m5hT!dW2CwtU_DYWtQ0Tesa=1S@Ie{rF*mg`m!pMXk|zKxtx&XdLh-rCf8+#gHc{ z(5i~M2N;LvXQ(^!2*ffMpZXtg?N8lt0PU~-V_nJkkbU*+zzv}{oL62Aj9BLRHiXRc zRZ&I>velOrxPhQ{_bnr~bX!gmhdyt9L|(SpB)&Ty`LL2PK)LSS7Sgs{E3Oqe4%|hh zaiMJ?l$E?nS)Orx%OlERS;VrwZ<@gWax2$VkSF7eC&)7QD}R<%Za3!-I&Ho(ZR2Qs7^)wAOA}!tM?5Rnd4*RIiQ}=* z6ELS?x;&k~>NjJ{G4F9ax=*rM-z;c&xZ?L7%0qi4p`~2U#@N76d-{ogqT8^(FQs>WcS=9>GwJE~|6H1T=uN#WUE{7B%Js|E zz+%oeSV^eaR89Ce(pxS+n;~G;EpB2M{t? za62w}^MK?f$T;IS-eOOe5jNPlxUY<1ox~3iD^&|J*tju~aCxkl4 zvw6{^KJy@~C*Xm?LX@^F95+ z3O`})uejUb{wGCg*2?yXv1Ao^S?#ls`1!WBdn``B~hq+n#tckdey)EQI z4>UTjw*JORh{NKu%F1C|jqjI&2(YDrF#EgRoIh|VQ*0CbfFOUH>uPOSDHuKF+R8g} z)e62RA;$GH^RenMKy#oGKg)Y~6A}3J-Lf+&Hvv}u3x4m=8Jt9 z{LI2g3JGA#V(i#CK+HQ2s$$_KqIq%VMd&G%7P`)ZQQOM(x|_?%VTJQq?hICF>9AJL zEx1jGyja-ZRQLB;(KOCdo*uBkwUyTKxDVQs1S`1?vw-lsp9mU&{JFjc;TFvDUnek) zFvYiSnV9x+8N%J$D6Cw2&vh24Rq?(YgIPPz&R@(kWwjLrrGfG?c_Z$f4`$3U?&wDG zX5Vx(v8zO2FI%qpV2k0QM~ju5G`-Tn(ykTatXvm<9%?HV)Ps2sdMMT?*aik51hw0B z!Mf^bL(2BUKa8QF)At>}9uHD`P14+9WUsW-ra7%MkIkQ3e6 z>adiyDkM7pZMkruCs=;ZPnDHrJ3wEO%*r$QXAr9baoKSxw%jodJpS?WZ2px;Tdo=G zIgG-7ZYE&V2YIk^&DIWI*MuNns^uExx_#3EmOAr}`$dx{Iow`Uq1!NqM|WuM#@lsA zTq`8{BHRtQ`f8xJEu?O%gEy?09@CgEbk1eK_MPCccHDAGH z>i}i-L17zboL`<+h~r0eVtAL^^-CDC*Ou$JH@z!ft)J#VBdZxs_c#7gG2S4%n3!Zx z-VL;mm1OT-CoI(Y@NM|T^u-rSLu>to6vz;%mNAg<)gY2NePRvf4wA6!K^v(MT}DD^?QzPgdCm;eMY z{-)&trG)Aqn&J%uR%;( zSGzoIZVU0UH|=O8VDI+Qe!kOuq1LpFva)B{Kpd3WM$U-8+hCrvtTM~xeo&1jQMpzh zX!iYj&#h@doAVRyKPbeuzEduEw%MN7%7|r?`P-9z_czO5_FOYTX2QYK6N?tPOM1;) zL2CcB@*6Gi5T}k^-xJjsv6MtzK5(g%cH0f2QCAk41C7Y8Tkj~?pZQ36a{G`W$^_cS zd=+I{vs$4xWd#tp|8?c*|GbJIzJxz85M{%Y+7*b(b>sokwoU+^!kR;>yt@u)b5FMH zM>y7F-o0WICzlw02=jIw@Ee(mvobLG-Lx%-=06+KjO%ngK`~~dTvs~gONUig9Neo7((5jm$!uw-@-a>3u(4L)TAuP5fBy< z_Uq+laTnAbaLfn(u{<=U1bRwt@J?7NGa$yOVtt0Igg?T&j>dgU7m z2Ff+O(kR#4i+>)_m2NG6x%)K`rQXedct_>h_q4hmJ#h=sv$LseA?+h$D}g2Y)%3H7qaIziR-_^0`aM}E{3wG38)Kh%qjWtY3h`O!Y+E7v2Ku5%8AORi1vby~~Bh7xgQhyt+gQg&N; zoAy_j>kT~07!_~v-JHlY;5tBlWrbZK(GbC;i96FH4M)achWw8E?qk}Gd{jO+ZFQ_( z8JrKxPFt>xc~fo6hK~66Is<7BPxX*@Aul{bHgH z_|S(woZk7aw|9hGVO}t+g>aW}=YDAmqnP!^PXydK8o!7b-RdwFwiXsK*mUg2yOb`L zq=withV{Ym`z+UWChmAvq#2Anaj>6pmh!al0++Hy!G>S`dQ~I%@LLcp8AQu<)h=^a z{6?5nL4g&$&d8f>Ayx`=y;O^pE&8@xTgY~$aUp&&&(2#{e3mE)pA-+Vw-8{dk9I*A zTQKAAec7f>{rF3qOc+qCZL!WcDA%F|?0u;`!OQ$C`hs}}8u5(EHR=0$KenB4?GwxS zKw~K>uf6};hUCv}XywGs+e4o3^6f0S{WLfMllVu@X_4Fe}NWAM|bQ5YINZ=vetN4M^g)67LBDWxyi1 z`vraUq*_SZC8_c#_jkf)+N2W6Vg9bC*Bs)XSr4w;O_)XT!3;- zJGlOtw7Bh-s9ZOeYZOo{*I3ry|C3TSr5F#IWK&;Wb&Lic*ZDJF%*8pi?ZGt9L_u}1 zx=H=>!-!!TZvPxR{E?o=O}um3u2;Rs?=DPNzN20SL2PgCWM@ox^_$C&I5FJg31tiW zvYUQoS?jWU(gz*t&+Rd&F2TFTLA87Jz+;bnF}?B8YBj6be(PHwO`rM9=Q<1VNB`}U z>5XrEBrW61S64i!(^$E-g(E7F90Z9#N8wNUcAzpmwh#QuYxDozGuyUQ5`OAVg) zhysT|D~M9cW<0Qa&_jXt;OKg>rNRQbjxF=jS*nNI8~o^w@hKPi_Ojh`UoN2x)WnI3 zHTl94Vg3WtWy}gQw5kH?C-iNlu@aEmLH@P;^OS4q+;n4JQS^N}-Ss%AO>288m5w&d zzCFraZR`3XFSD8?0qqa)6yIz1|5}l!&7N!bLE5`5*DSjRwI*>aqikKZY#@$}2?Hm_ zmFFDCJeTR#Np|d4!fOuIdCP?3kRJSLxoVnNIBne>WVyDo|4WxLR%sjDvf&|$0E&S!h)MyPkmIKDQVl}X~=c6T`d zefvEnw|?(-@hUwLxDUFs7^dCIPZk@@qpr(HwbIY@CM(C zOH)3Ton!|o*YK?AZrTck0&HLLsedaE9?Xp6r^`w)7V+uQTQPnvkbz z``(`GYA=k6soOrDA+cZIQw(d~v$AKt0?+(9XtHNoR#7 z#K!V|q5tIF6Qe%MxCk*RLO-xENbY7!2v%!=@d;yt4GmFIEEx4)>7cxl9(C=5-n2Pm z0oy_(7f)@uzb{sPA&$og#!ljUTo|ZaOFc91K!nLwY$wZn%ICcEL773IEMzdBAf!!$ zfJnQMkIHA1(TzN=#G>vM#{|bODPEWVSA$8nX&4mg^YP1WO-R3kLyrGZ+d+QtFZ}ub zfQ~nCKlQ0k^@SWcawMHNaYDRiZtM%bq?i5s%d-4aPd$}Zt*Xm~qBtsx!kG5M-YX?$ zRGsfjpZ-KzyZy;@XHD(o&KJ`Qk3XKKZS}1F%XCfCvQtOXc?lP;bvm+tUz$5JmsU+k zc^UwG6>Q@R&n48YV^inT^Pm5GALjV+Lt5FHNORx*mo&?tP3N9?A`NRcx_NpoX{T1& z`Qjpmoc0&m=9lrK4xhrg}K#B8|2hL>JUWdRWyRY1%2;@$&kQt7WexHDtUN)Vpi zD~rG2=+UFv-Eb})Ja|xLIiHqmaGH}4HQm#PwN$qVVDVUScu$&>5IM7NGUa+wdCtoU zG<@<*l4_XF@7nc=j0#oDxUZh6`| zyevsAPR~8|>2!Sm(M(|N&ZpD8a!pBCe&vggr{$An^1vOsXZMx5UA_NMdPPYt{~tel zEgnjp^=Tr>x+FL`vOJwqJu-bhCyNtYaN^u_ zdN%MBc)*wj>2p%R56>-A+fM5JXqwh!=G^MtxgWV+7%;l(c9{;vF4N5}=U48@pMR&0 z&PjQh4qqvI{`u#1mUBu5ZCU;eqY_^EWTgO7}`T-)xDi>O6&G9G+ExXUN^rz89Jq?IdIq?^v3$g=p1 zHobnOgU*OPs5QqkhY)*j4#`&C&^78!T={EW^Sole^n=fsZZ=Jg9?^@_v0wJ&!#ojG(eWqB$C z(}TbN%W1i;+kAT6*7TZs5byKPzo@*a57fiq$guNPfJJ{Xi1M@xmvT^^sXZ!(o+}O? z%1M2acpuHSC#Zp6Mdn={GDTclpu%S=OScH{*-iVdbG+ zInLDDLy0d0j{s*nab|RRZcFNnx-*$gtNBu4z_zx*YQnJ3dLH4#9@J-K67nmSf2&(IpKR%qne zh$eNfgn;0*J|^_luYNV{J9=C^xl&Q?o5Z(Mjrd-#P9yL#@*r}#w6fz zjV&`RYxT7*Piqx5kE=?#-m>+M^ui0<)6F;cX1K?W9Z##oXc0dCN1sY(&Yn%b^;;jx z-d}#=$+Z8#!Stp_zb?J0I|3FKB5=O7N=T2!GyB(TVpOfFKUtKQcZ+zvz%E(O%98i$p*t2Ry zT6xcdy2Bl@JpgiA&*s2&x863toGi(69bg?O@}UQH`18+wIwc8_Eo&qSN7v@~>o?t& z*59W({Uco{vV2)TtzBQtI1{(sktSaI+F}`1+7GioKF-!_HkU77u9b$n6VE=_^?d*R z);k|B!R*i7t*{DrJ@Be@*K5>fJL;r`15TUo%%LdNXT=j+wrCo_ET4UgC{0)`H5jF> z%F|-NApVldO}%g(9WrM*w&6xMIr zoYuozj*qYR4|e2Xrvw^xa`Vk)O3|PkIiOLt2*PZ+3D+t$IFlOi^UVTM!k_#^}6^lX~cO)w8UAQAqgct(TKy|;*$%=<${c^_Z1F1_yc60v(;EHtq8*c!fD zO8dE9i*ROb$CM$P6DA}cn;X28X-|toMmIq&xwwR}T z-aQw4v#})q&T?%TIGY}RNOTU}l=iF~Ppfsa#@ZuA7O$ANshb~(>;2tOWp#?Zs>tU9 zPs?)riL`0;%CyOO&>4gCPyInofAhVsNSpCT@#Ek+!2QDTFb8_8g>%EUd(sBJs`s&9 zPiv%D%^RPxTw5dAEPmA%2V}8}Z{Uw&Z5) zh5!2GX%{MEnfnyaqw1Hxr=1ZR$C`ZS{7Rnwm#IwQ{zt!Fd!BnM(+yr1{piQ4!rmRl zvMXc5!>_ovjFVWC_exFy?%V43llw}2tnk=}{30&~WIJ-B3tkevk15X@qhHM!y;(nG z;2=9>{W|FFH6+NUrXCG2L_fW_`5kH7Wg@qENr;jc@La>uh7e+77TH zXzR9KSJIvjJRr8ZGp$*pb_Nf84uR(zs_M9uBX|5sUgq%*8Hwy;n-P3l%YJmlM4CKx zI^A{GT_y5PrGZT6P-KN|kGwf;d&TQgI=DyL{t0`R=L3JQd4Da|?$#>LwJuMa!dJ)T zA2r%3T*miTzxu)SPyflkOpk_r*Idc>u6KV^PG!fAooU;)E$MCF@Rs~@(koXiPjC3T zN75~~^ptXkU2#tacLWoH)gO}~lbX@yrKEWomaqhn1t^JtX3z@);qJh3*~yXGbPzQ1 zo3rAmQHC}#$D)HEmIAOtc)3Mmkc6TZrQ@xKg*!rmn$veJq#%<$##)0oIDl7o_Dq*G`vZa>bw3 z;Ba!)EoCL3IdpWySTZ43LC=Z_JZ6YXfTYd9?CbQrba=}h*@`mEUTjt#yxaPCxm+J1ZPIX%$4C2{&2O47sqYT8Q zFImA_D<#mMqh>s4gXLtEQ=6oOk9_fCJ*?0E*W$K3PD;ZtFaHj>$fxPiH~Aqy=wHY} z%L0e7l>_{X-WF2Jf&5}64}$~kSS`T$u(XwS_4g6ir+LD8rKsdFzIPShtH&PikRdNV znK5U%*SpvaS}|2VuM9AL6362~91DfkfO)_k)K`Ss&drySQL-0=N}RL}mV>;v%bn>9 z7ry$3zf+7B9{1b0)SKmKrWJV0X()yOZ{{_(`a|Xb|k`??E_YX z%3Jw29sBXF-&{TfBTs~bmGgL@v2IatwB)PjtYo%@QiHgN!{zbvyq3o@+;LBT#ICaR zW0W_S&vgCXh1>SCFmnAN&?JPWnPf!pAnZIbp>UnqFKlUvQJ3uY9(y7TEJGEjZL4
    be$BYgD-`YJ5{dz2_zTC3fr7yXo4eR0yO=QyeY8!^L4dZ$CQ^*Ylb(M?^K{iFF3WXl{phMdVoE#M>0YzVlrZB+xu*3@ex2*Kq?q~7 zqRVuoI7;ZolM^QRfeqy?pGwWLAz@{|Se82&WKyEuEdL{Igr|8D zNrkga^ymKY?>hIk@2~mPQebiCx&)DwYwez2x-!sLJ&=x;4OQ1nLkhI;80%v6&DOg> zsqvTbDdgzy$wAc8XvD4$kx-O!E#W9E3VjXC4AVfa_vtm`G2}^g*XcOxyQh!EP;L%h-gt^ttYcYIrorvqa08|XBAM26*%a;+7RT>ekU zUB<`HeCBiR>8GA`Z+z41-Pto|-5>waCtId|{`{!>umANo+{u%10?`sXyqs>!HGQsY z8yeA{TLER9+y#g?2g2tffp}MgbWaL`mrrCo9toR~yu=)z0!ySDBh!RX)jg9w0zyX1SJP zN*u5ML0#}FviRIc$)kh=#bb2Pqjyy3SW=#$N(6)B_HmQv_LtlBTz!M^kNDrjt* zJHOr4t*~h#R-@8*r}pI=wlV%Z*zg!s=+bSxLd35wq#52zuCB_I<$C(uLL%okV_=4B zc1vsHft{fOxrw3(h%P(X!Z`ch8C@r$FrUydWv%E*=t%LCHJWj>Bx&Q@bcK6(k8?bH zA8_Io_xWqfAJNE=BzUrbaKp1_-}lqb{l`x@_wW7>=ic?BL2=ToJ}&8(v@|Fyy%b*7 zO@rkcX=zZFBI@xjgf!JANmlfjeYsG=u;^ew0vZscSTbD;Iu1}_nJ=6IlB*P3Tc5IA z=e1oD{gNJ)rplma5^Z*`1nql;wYw^g($kgv%P-ooPU&mSv#?uP>mr?+&vjHhNl#0~ z>QDX**>)MbJnzTG#@z6cqwfBt6(P4NM*iRJo|eXjEqX4aC@q&A~IoU4dOIi?A+WQ44fwk$CL zNdBE<8_Q2x1knyp#edUy>2q;PS3Tx6<$4O6aC$a0g5d@27@v$RYw9le6tlz88mdze zM5ozR9f9fG^}2xd^f$vbTCT?>IGWL5xH_v9KEL_taHzc9+$b70UB0IP;%pwX`YHyG zvzy=(-3~-Uc-eCtoU$o75a_Dc&P=5I!%Hp7BFUZb?R8-e|l5#CP zrYFTV8L&fS^Q*kddR|nymcKRYTQtNYA}?1Zor0hNMDH7}QMhK7H6Y06T9%-OI8zj$ zGFZM5X}$B$YE!+$wVMH7G*xH1ihsG3`|aNu5>c2*GQF)*2`5cX%t(uMEz9-7B>|nh zke*j^YW?u%!q5uGxH)hENv`ol|PkFh83<7=+qz{+sJ)7 z5Bx)of{aHDq0FYNMc*E~0V|{gewmn{RjcrX@eK;U#z89A^xYA)+c7>ESuf)au(xpMz99iNp`25}p!7wBVrlbVua4G_-U)GPxuW~BB@}RT6)Az#DRqvH2hRan( z%gMr>@@s4?@qxp)NdTLOj<|oV>kcflYQWO~6fo<5v;2UL+&+j(bCt z7x9&z+g<(SBCbk~3`cD8Xpgts%}S8c|&ShO_c>9H@h&zt0q3?y77 z(D?9YA9e1xKkVG!{sGzx{6>&*gZIaW`0J|JTQM;|t_=7P{OGHodZufLEZ00WNc)Op z$Pr%M6@^;9t}Y~FP872)FFcKeR(Baxl2{Fok?%?(-zcmxFn(1ct@3%~8?)mI{2twS z=-{KkVRUGxEG7h(fao|@gk~qx^UK+jNX;g7CMz!4M25!IC_2T0%7a#a1 z*ME4Ak9ca)a#uZnzGWSX4kR&-mK$o3qkU*W>B3t;#4D>%EG+pJ(|{ra2JO9H6}FV<6{p>Lkh) z>!HjIwbAcXVN5q`bA!w9^Jsr6iLLs237b}ZdTdAVsi%d(rGhg1``cavr2{gAe2cIu z`c^KW9meqf0{(ekc^QiJ=+VQz&IeD8gt{A;=gY$M=76un-?{WIQy>-T@$sDH?j(I~ zM*K&(gZ$|K_=Cxt#z*2l^w2|fFODBS?kJ1P)1a6Q4Gp>esCZ8F^tq>>c%m-th6m(t z?y>!DOMGz=t1{0%`7J+zFM-aS6Nj>Q=t}Oz7k9e;6Q|vR1=v8=0(!CGxgQg$acl%1 zpLfuw0eiX|9Rc6wKp#$#nS^pXG*-dK5zo24^QYYS-0KmyUU?ooaoCk7$H99&P<$rc zb!W~ytnGQI!1(dZj%Quwj+CUcXU`sY_Uu{L-w$OjLO(vJGUwPyw*k)=hYz_OsIP&O z!|qnNnN%VFo_t}aQ2n(ByghJg*i}%s%tr-h$zM2r4nFrMv8nZ7-!~SbK)AjKif0lo;y0?>sR>4E4^;N;mdw} zY-ZtwidzYU(>tGWCFmqfZ|~@&8!>o_&ucHnlK$T7+|cWP*ezUmz3YAUSvP@Im%))C zSLq!956tm-SDc=0Z+7Rt^%UM=6}Sxy!1umqV$2=ug^a?tEY3k@jUJ~Cz|p94n0s_z z7HJYIKVw5MGyrz(+U4a<4~;=n4CKSDTSNO!G3lSXcn5&9-<6?mJ({&G}*Y$gsQq%rQ5&Qg#&gXAbUl$J&&a87xDo z@5jJM!8&msb$A?g2w^%l&Qt>o;P^yZBgiwpj-l9%oSkq-j~;b{gLWDg@X14c_;AnU z;5j!AVLvc@)UAz6#s|-iy2%3hQrh;)^r)b;60ALFgFg5-vfbE5o`1%J8ai0rc$-13{?1@O+@u>s90KWQbip%hU(#k8c~HWN zRR#5Q;_P|127}35ARXJi%WuS8A6JkLoga4-sJmXQY-~V#%mn3n&Dz!OrI+@&B}<|u z6Zt-M>a?3TmnEpdSHJRgH;Q`v>%R_5;!i*Ij5~tzzWUx*x?8v2Ttkd^_uNzOYV!Ew zkGrKym%8oSw>RKkY5*0-vyXh)lj)tmpjaAvYwg$%hwW|Kw}sUNPUsH$skEop(^GPr zHg9$dR;+f57A4lmG~;yt;pQ}cB#YMM{YZmK#EGmIa{vH907*naRO-zsW_X6%+xf!_ z++6V8#`TccoX{_Vl~pbqigoxpXu(k6B9wYD@qFl0{;7;rlG|=a-kYq9aU#2M=++CeJ_S>6Ne_prMUuFycxd%R);nR|i(C_ws{%Yt0&}lv58h7B%Fh zXflVOszP4-jq0~;_4PI9*a5eF9(vuTZEpYm{k~3?ELl=dk-pV1j+DO{g4f@+-IeA* zMSA)x;TYF=blxr1bFNBW-ap5c{_1yK^}B!EmCgm(F=xeEw|*1a{A^r-?-~AsS>QY`!#OOhFkSmf91*otmFNt|HX?JySdBZvPR1~%SyRKcPO4T z#R)8Arw4A-FXDnmgA3+;NBt6@#BRF#Ueq;k=_axOW#y7G zEDSfV03NK+{%l&HZrFMo%A5#tPbB=z!)CI%#|GTO3Dp0SaL4N_yBph;mr?1iaSUam zI|(<+9fXyFmWovy17W|J2Dhr$y`j4=zHybCH*cQXvPJg0fbM_25Jwp)xak)N@_ulK z6=U-quP}(6-Ev~f^jZp5)SVkfy=j2pNe%Mx7wND*K$w#Vnqg@VY*L&E?X zu){Cgd>CnP0HFH^X|eCDMZL_4eTenRdL$o{pYs3%`T6p=n{(=nn>)M$gYNTg%_6kP z1`KXIbp|v8uYe{8@Se~Pd8969*tTt3-S7U+$K1Wf@0v&Q-S^fv`E2anyWg!_x5mBh z^{@5+%G=yIgYKSJ-VKF0>Bm6e$sp+jN@rY8hvk|y`{lBL)qz53P*LQErCxX(*E_K4 zglF=ct|}-eK?o1^)lsm3BVqN^kmJj4gYed+sq; zdhE+#e-nv84*NRfBq@%YC7dr!KFWY&I^9hecI+_5>YxmlF~0h+4AI=gcJTZ{o}pFQ z|APjK>BoE0pft1{p<7G zb?oVhh>~A}nOOtuF#s{Cy_W#@23f2ri1@T*wgu4;qMl-j|%FDZpe zM|F^p^N)Hb+%u;Fmi)o<;!IVT(v;<;&c3TkZjx9L9!aArJ(f8wx7rq~DjwLEnt`%L z8am31ACDQ@6X?WG@nm*V5^e0=FJWAF=RA#-wMPn^x zL`y5PgiIvFN0pO7$~$S$xIzO2(yijf9(y3j2y|K$h^Ep|IIjv5 z4z0{@{r=iX3mOn?ob+Ba6rNQ@b`#zxjRt(+#SMydY4PAUH`T(ug-4&QUT*6mrol~- zJcWcS06d{Z%5B?0AMu$VePBoFt1Qi~+E_!&(=Wi1LipeQo4@!UDTcJZ5?GIZ^OtFt z@TT@FVXyj;;MK1C8s;?I#d;KF4Br_&)MRfk*|q?-1P_ zUI4{BsGL4S@rQVHEdX$XhX)N_UdcuXeUIsXNU#$PrJ>D8RIc@fN3I+c0n*?HdIZs*Rm9Z zukf_)6h4!#S)Nh10#0ZNJbStsaNMIC7u&?IT0&lm7L{uPPvIc^_m9FI@O-ciTiLJ( z;}+v+qZD!Ndp`S>MqKUZ$Y}^m@rWykqFawX4dspb)tKE)`zT#c!_--pScf;t7o(;5 z?vT{gjDAhIo~f-ThW7)1%%tuu^V<9?{HOm<=YIaToqP9>g|`Z~xOw+knr2X8XMOEE zf)LTMWO;GZFbymkXj_Rdx`nTKd?;O==3vOZ|grdtR$2zpqtK3wgu zj`O3nLr3{_qFmD#j}syaa&9td2dZ0Ip9DA!AcIWcqHMw_C) z{^&o4ml})>>XhG#4@5Y9R=B-#`IG-$S=Pl{7*K4vNnd?|_Zkd&p9_YLEZ1B*rd5iT zu3PV^#VS&!8uqoj0yQuopX+w+SE6OxR$+NpuV<)j^AJp!(8xzwziTzTDgtPH@D^w`fbXoA7zf=#uA* z<=WyDxmTNtMn<s92_2J52)tj|k(*i;$tX#)>VY~5z2M2f8 z?Xvc?(jWO;pRmd`6k>tek%IXb-xor;S?{NQT^Nm5))DLeYoBuNpZ_!rHe6=B_3lR8 zsSZ&+W^~ndS-{lZC_R=rR2cJLq^H@FF6HZJh%7UV!{=}Y$VSn%@n8RL$actM zx;o^&x47xO@+9S&6LVRv<%ezKr1bNYJc~aQ?$bOoPudvGlb$3O)4WpdaZ@!xhLY6u zYBy6;F7Qn0o9}|l$=7_eD$cmBo;c&&tKJ&^8JgyY&uj;MNQOA6UpB*9fu|Xn2rT5? z{fOUAR^`!Dn`%5s^P3l^RhW%$?_0&0h3~E@*E6+I#grQKbyav?zV%MH5+VxEi(|ti zc+*d0Z{F_Q#;vUf3rmOhzLP^qm6EJHHOmXbiNk;1YZk3MrS$WjDQhTbEJXPeXh$tQ zHxEFR60P`pmRkrdk#zZ>vSf}I5_~v0Rf7-A$f(2!s1e^_xkvM9huqNNy~A z#r$P;sPdXY8)eTv6kteYw~TvWCq9<&u@JQ7PUn8`7w`dtwHeB^-K6c3K2LSRb-WQ7 zM``NHG}{eWqC;G};f&8)UzrT97MTdMA9PyCbd@yQ(5T69yM&}n5YM)=e@0(fe``>f zcnT1XqbvroUD#>g~I`b+|>N9~E?bGxr7H_!lIh&7mRl{J@#m_Q4NadgyaLz}mVn)}M;)!yeK z&)z^Y>dxLmUbQ6?W3uD00}&J!p6Y+~P8}_Kt6rD|xv2rg~9YHV@2~lxclSG7ehebm2>=HjH{!@qNR6l>;1 z^{9NADpt_cUTp%A?=)hGf0>}Y&&@!j!}^w62=VBqKt;{+TUy3kdb2B#mh0)XUiW2| zYt(lUsB2Hl1Bk5lk%1w3-IToI*)J~ErIW;r_@2D_qvaE!XLJ#ujcUFRM}D>M6{poz z_*S{ri9kHgvjLI{XjP_$xK}zreLChN?vv?_-zjLw9qUzcTdR<&FCx>Z!e>sH32*%F z5Qgs{xYxZaSfaJBC18Z#ymsZdMprw0^S;oM)NvqRshYF7$+)Nopco8Dz51AnM$HhyPzdUO0kq=2>_)b z+PW%Sc(J30WB6iOgxkqgV*}|bFG|bOVOVqv$%65*<pdZ-mX^|-sxn*!bf(j4jY@Yl%QaS7ih%9^X?y57Q?82;1WpzY zJfMEpif@E3Ts^1B*>EXfj3ZfQ-=}f&p2drlY0RJetp_kM5q%pkrLXuFXU5&fKAidp zE{Adrpj$|Ta;^53QACpuTYg!txv7x`2)>iTO~U@$2MDuq7@j^eOuV6ul9ng=K82Kq zo_Q57w4;&I)U)EX<3-`OyDD!UPi@gI((HSGO}U=QZ7L>Z=TQezSYE#Mt|)#=LxNL+ zM(@?>)Cm>d>MG%(xb5<9^G>>&MeBn!tM}VfeIKu2H|)QG9k{)2bfp+EY9bBdC31pDO;L`bdZEt$rtYnwOWD4$l_$ z&=l9?Ps*=P1L+XV>k!c9SLu)jzvicNmc3!12{GoC_yo!I@H8`jRSe~sxukVI=Din| z>jsbvZRK7is8j+eZCC_0={n zbs5WfPy^uACDY0wf0YC^F(Y0jxu5OXRHtRfFT(g@Wa1XVPwUUdx9{|&Mdio7Yj&>= zDS{oUqWGgLixi(!#vV?ghF3U;gseiNMX2s%x%5@Ke70mq}w0S`cq&1=8}Q#nXK= znl@ZC6{qMmHDFnqMQK<(8$YKhEb45&HyXlhF6LEQHqCB*2h{N0f?LJZURi+fBq%IA zwf~jy)J&c+bl*wm6|Uk+cs0#WESCl~#Z!LT=RCixO0P(r5Z4aSato0~nt7o0g6ETp zuFscg6P*Wr6g$!hmR<OGDHPkmvGb$;N*kRCsN zz#|#~r4zZ)?6|1lrz-u#XLj2}E|wR{Rj2n(vy3ou3ymwn^JGqir+L!F^qPlfMo~%m&nJ z%JoccMF~?X*KL*4q#*K}&g<9-YTctchSK9p37)hQ)rx*f+o0S8dlQgC=v|I0F5Da$o;qPk4)>vK8vPZ@8-!N9h; zCb=zBjqxJ;W|nYU9$%)GM6SK;v_HJ|H(8yY2>d|Jui-*tPL_RnNj6Io}|Z7W+% z{x}}k{Mq}W^`u3pP_Ry-fK^&4C5CI+l<$Nm5)9)Yq;Ni`Amv+bAq6ehRncVmGq;f4DfQa3 zCM(?SPvAMJv@~;bBf~ZzjCHslyV?nxwq+LWQY+<}ap<4O{Ai*OC|sUjrYjz>^*&vx zD?RZWp9z-liVRwcC$}c`pLEbnpF?W1=9ilnAAcO@{Tdh^9(L!>opS>NupCY?965Nv zZHTh&xjp;ccb{+rged7G=bf_ce@K0P}Zd0nNQ|--h~MV?_m$eFHX9lXP(KW zG%^cltDB#WVo(p60t~GJV+8-8*{5jSr8wiRjX&GB56*=h%|R zpMK08xGoqQNVjKf)b*blc5CoeS{$tBUOaTfkq!FJj-dWS8OKJ>`FiL-6=ZIux8IGO z8e{Yf#>U3np+kqU?Wx#vv|&UY7)-==_mHRtF)H#9Wl z&YnH%%A*xG3B%1G{Krf2D9Z79$KAxdXjo}VDE|fP=iHeyZgh0iovW1G#be#KE9<9+ zpN0pF&#$!s+mz6wiG_cfnxA9Gj=3#cwoDDNDWWo?l$p{yO)iGZoAV<{(6tfDZlicTGJlLyhTuT^}~RXJ2fi^qG$xAm*Imaad? zST76#;xG*<*Lu~e%);SS=~Bs)TL^svsYI*oExtCwP?09kaxEW9S+2=92aV+#gE^sO z&&Bp3=I7kWSPpD>wBeC_@rKgZmFYF(m%O})e|5EVh@U>^(YVUP zwr+(Bl7R{E>Y_!9+~&=jP3RQeLMf8vdc$qoi`6ea4pTaUi9TU)j;r#l;RvN;uzqu& zLWGI&G8TjuyM+tkHqi>u-X+c**uHFgT&r}|Y6keo)PGN zZhn*YDGj1blE=scfyxXO_tmRc2Le<4O9ir9ekoBV@)Q(U=Bx2M?-Hv3vbARj`QQ1t zTUNc`dTn{(N>@5I;QA(lvb=8VHdlo^V2SP{(S@W+xr}-!4aNF|;iQ^;OpJ9x<#Ejd z@E7GTz_+Vh41EZGFrg|PrWzjsmhx(wv#v`6JTD%0l*NnP;JS_A-Ffx=fcRBe+(ozG zm3(#f3d)N- zO<}#$M7LwIRG1(7=A-Vr-t`W5{P+p?2fzPu_v0V>QPQU253avhJ(qpJwp@9>_$2g}!n^Oxh`U50fogT2p(+f@+O zO!suKk~K^cvIx_BH#z{(+N4!^?HZz)95}n@r zKGbQXX~vJOKh3EWApgJv54s=v;Sa#Vu@E!Xjqdy2{~q`0Pkpv7COb0~?|bjP$IlKo z-?YhX!T*sXMp>wfag{%GrCi4*jh2Rv6%;80QVv*!Qan0QbcIVH&Myg)ZYviFrK@H2 zEdQObXpZ6{x>QkHM zH4mC?(*;0+qK&K$q zEFTR2{-1ICpWoqDV5zoOdD2yRY8Ia*Nd1&sO(<+V_;!MTS01TcKk=3PO^sR>rf)R! z#KD06?3r5#lxwz`&bPkx?vN7G;7KRgu-HfIwaUY{YGe3#dbm<2g;;*I?Mch}bb#7Y zb)o0FeAtXM%S6P9hU=_8MNiSvXNAw=>vVoZrX2mFD_9|lw$=7EX%z|eZN*C4Cw&%P z-j%rOzwP8!O4st6rA=!pjH=lL&*G&tY`j+YqOquExpr~szE!l=^1Efs%lk5|7pq)j z({~jX+A@s@H2hF*5-9&@lwkYai2W-98F}<Y)9Z`p}BMCX6f}F9d;!M4dRudL{;XFy2&vJj)KrLGK|dgla5nq3kvYfV{+jleyE8t8*s8z-fHpT9ND?=F@M;k>@k6 z@5qqXw@;7vIF@n2f(1?E>P09!tmJ_4-r$4P10hO5S{WU8B`Bq(tZX&E$}Ijd)E%ts z2N1VsvgFFh!zAMM!biD;<-*CaN#6z($m0%HU<1CasFM=@Ehis2b=FNBY+i1X;3kzh zm2p=oO}NS#q;p}?&7E_-gV(dWu;&GLJc(a|pK0&$VgIEu;*7(}JPe#cumv4MCUB1R z(Y?Ff@;%Rb)#31>)vki~C2XR)0iSfIjqk$#m)xNP`&}>UwuHfF0!x?`OavAV9dMlH zo+46yCuQi*al2!@?ynRPKS*#ohHmW<&rfwUA2O~VelyUqKbMa zZ;($W&y9IrVcihKOReZ!M87+YOiwJnw=(l#u+S0)h9 ztlp>$XUes~&uAn!btv({((Y-;a|u2Bs0kOtEKQ43^xnp|_eJB+56M|-mJq)}T2Q=tL8sc&hZ%oJ zi{&g*z8)x{+R4mwL(7QY40_;4#{8C0cNHv=`807Xp2}7vJ;o_T-$3S-z%+fmfiCNX zX@CiG^U?#{as_F|_G3Ns--nYn%1v6NE&3`?4=nX;qe?PJ^>8B#@LPc|BLp?5x(Wm& z)6_)U`+fl)A#ao>$h)Eck(LF9UxTTsfpS;Mb)A+{pOHiv zX!zP*J$CYB=zs}QT zwO?zwMn)$y<@!)<^Ej1UZ4O_uqE;Lln7F|;ckfIK?b_Cr>bpiY++m!_Hm5ejv2iSp zifC6}rpj_{^KR2GdS4em%V^Qqvj(qT>R+K^-EhMqSns0msL8|lc>OU$J9a#WBas$h zhw>_q#ew+kZ@wi5YHJu3T9cCTr`mWo9|74&%}u_U+r}2`pW@v|&?E z*OVt=x#RK`j}+=hXQXnZ^Nc~1ySABLwy~rN%PlOM+-dKlJA_~>vh3I0yG*@9qd@EqO17OERu)Q+x7tcV!>2q_Ccdj-R zN)J20od#RA@f?(;543rT2Q0Vl0_?WSu=l`&=&MWYAHAmx4d9LmdEm zj~{9*`zh2@8Jqrgf-e9AtJVjcehlcG(12eS?s~$%>%H|AwfH#x$*m0eTQGLUEm(}Y z{>$HVZr4+83HqSttGE$m!RqyHL0U#nvAZ9xc#u!qE9@v!+*IUP{~Y+c09vfMv2Mu{ zyN-}UsDi#i9w$GLH`rHF*PfhcY0$aRQ?3f$S;{(PU-i4?Q5N1WN#u>~7b&4x_YRV#aDAl`FU>Jt(UKp%W(N@t(2p3UqnGu^mn_l5el15CM3Z?Wzg|(t%({6=# zrQfM6DqEslPh1~mg1$w-{Cc;LVjO3wk83I>%Z!DEj;;cne}vwz&T?|{GZ zIHYfU;}N0w>+aoq-0C$!*{*vrTOQ=QtR3vOGv$?0@3ZBm4x3mVOM{M<#fl35ygWhS zYYWOZm+~ed^a&jci&i}ubhS*Vv_dN8H^x`lGjeHo_M$N^tr2yQhg*jbA0nRGjY>9! zI-k>XspWGmOAAj5A+TIh$3sX1iu&O_p?IXj^T+<0 zxrMB_^c>r|viFv<9V?pQSo}Ji*j_55#p~u?Ji)jHSfOPSdHHM^VRpWp5teIK?FT;a zUiXPl{JC4Zb~QdY0BbFn=DqLxZeQyk|HD6V|LK=M;{C1Xz#8?*PyU5lw|0#?c<8XV zguUf0Z}fPx@+VWSqZPl7adS$n_Zr;woP(MhBz!5Dy4o)n=0?m3q;g1QEuJ-+gHa30 z(uJ_^R-s?AGFbUlXQZCd#eG(mCekC~mNJ1A@kAr5(^eA}tvu<(&r2ENx&f z!K9J($DQr=3{(vkI#Q2kToag2D%bpg4e8MDfBmiDo!-Y)jDg{Ouv-6C?Hx}R*>YFy z>x^b~q=s}k?o8XL-lsEJrO}Qz+T_81JcKynUA3dy)y8LHYLtu7h%2#Yb4W z11Xm=w)mkNVm7W-R*KLnieHCk`bv>Bgd?6L&x;Rj-i4Q5i{PYjuEcYe&)Bw-mt1|t zs@Q~1LgELv1gr~gXe7iFMZ!auzgZp--`-ntnvgS0ZDQZ2_m+;DUgJ!P+v#&1U$q+F z3J_M_h-&j(AnI&FnK8;W4*~zshknF=eQ3@c?EE&fIx5zJcYf#n&|4~ShdAzD{hC*L zze>HEl~*Z>Stqibn^)razFTEMolK{NC|!l;CD|;DyRPld>t=X9Tpbz$F-tSG0=Fa( zjyF<$HK2lSHjpT-1WAG*!;5vPDp#H6P3^O|pvlcua&5@NO_KGGv~a?JoJ{~2xNMDc zyLUKu1j_X-cY8GdBg-{5HF~g?Ydeuyzcti}Vp4$9O@VF6I;J~>fM0TC`FH{eLDrq{ zboqW&k3L0J{4^%7^u%*|?lejD577~?omQq2!A>^H1bP_geuP*?grG?v$zS* zb<|cSu2q~e+U6#uTSU2HbQ{m!8`5oWwVFa+Rry=dT8c?N-R;s3p|nltN+HB{P}#Jh z5gUyumnS77Q_FSySd9N5*QI9N)~CI5D(xI5B@KwX}@ zqjt5%YNBOax#CdCwu*ILPU!Qy6sL=D!_usWR9S4tta>jCnx2bRlscJ|11Sqq9*WZG z6i@l2KvdqRBu&%NRVI}&&104a2`t5pSJj2^>f#F*`KJ&UC|J+UuF8`J1mbZ}BjA(A zbwK5sitz~cDNsP`3v;Al{e?H(9u#RsR(+}*+-Uc2em1;4ygO{Jp}#(9awE@`09&_3 z>uxIYnej%KiE5w}@2VZuu2Yf6X`*rP;35A_f#;up5xN%mw+Zmm$p7-$FSxIL?Hk^} z(FCm_m^53_ZHUl8fX=z*sVrSVT3*Y4of5RH)3)6$G7?hXY?r3R>o%{-8+l&kZ$*o; zfO}6mIgzDZIi~Y{QnmV0qTe@!bSD>Jb&qglU%2v@ZgP~1T(K+S##oD)e`o7c^_^lG z17hcv>sUz1t(11B&vkrtiE5!Od#CxBMfjIki?gUEW@=HAlLqQ67kPOWV@rY3a~^J9 znDEtUr7TE!$cr--VOhFVuA_206=_P)auAU)!aL*SoX$^zk46(I<0r@y%~M?7ohpPv7vy*SQrdmb>5l=hu4j6y zHdwCPG?|!Lf<=O~XgSr%)M<59cu_i?;wj%}PSyn8l%#0{k`z~^mFBS_Tyf)7ba_iV!{hjdzT?ND zl?M34<4GbrzZGbuyW?vqO#hwgfhpPh=}I|GBYiHKu6R|G(TvyH^w*;FrvYzlY|ITG zIqL3z`&)2U{v3DrJ$Jc{n>ILFuv6eZ7@o+)ZwhSOxZZ8sew#aR!1%1D5iZm7k^6qY zePYww+=YAY>n=sx-$XKsqh5kiD|wL{@*<+YWCm zZ`=D0$z0{=GgtCo6y;+c0a|G1BLD8B-^d(7jwr0`tj#3LIKu|Nr*R1Kg70 zy7P5ko);$P%tRP5WHFLJ1cOjUfFhDW*C@F! zP*B+->y_tR*SY+*fh0?_=ddh~&l$PJf;~7O{c>2O^Zq|k>Lju#U-BU%fX5-nX9XU? zS-I{!G!q%S`@qo#G1{H%P_9pVvr_-`W2OH0ovG_iD?Qck; zj+f)Zv=BIR8NQTHxc~j=yxeCX1wZ_4VT`th_*|zr4&!X`9kPSUGxKR42bELi;f?na zWnPj>&3@QvA}D6Y&RbZkMI}k8N01%JUkptBDoY}D+NcVVzJ}Snyesj zmVfFr{k9YCpCQvs&n=B?emorkiYGr@gq4*ojbi#+Y|+P28R6JS zm9Cd5-ufMuYqWQs9uk#uok}(L|7IKU!`}>Jl`)-!*)DRY>VDO1e`Y%Fq5b~YG!u^r?l+nT+c{{JG*6TQ!9pa(bN#FT$AN`|E`n_?5@VII^kpHf%D38QZZ)a zqJH_oY1x|8Ro;N7&7Vb*MMnC|Wq?rgIMPH6Va_z1=SI++hs>F|b2|ttFK1%WT!bR+ z+^EY-^1}~5rVbuFq&|Mlhh2lhO1)#p4%@Zv?rw#TAzNDfDdV=t9w$eRq^73q-@jjP zsO1qn^YY=NAtk^Su1-kTZK)6$r=_#l|v*zmNDuTQf5 zcj&M>e*8G~6bahnAzL83e+^CFYUu6Io|DbJYO;u>1&kGMa8a(~y;bL zT=-j;+#VykGiG3xCpzhwl5FYWvde>0W~@vfe)wdj7XNfTX2vMG&nnteVBfy|w2S4; zl#0H-e$~>9f!pq|GwXM0zkxH(*s89*_GdKZ>1BwYpj~XeWZB}=31ni^w>at) zX+c>ED?35TT>?A)lERs!zeHmVm2WPwTBK@d#vvXqn&zjbzO~ZMsw_0Gpg1vOnTxtvS=91iWu6Cd6nj`5%bG@4E>s1RDEKp4=X_?k29rag3 zgU4tovG#R|k}tVs1+pJdzaqeDaRo*crNTQM7g%}D9tl8 zPHce=m!S-#ysiJ8*K;a@R;-x=4$n$8D+^_-T&I;KE>EsmThd7MvNM>w3fl2s0`$@_a#p-RSb z&6$q#H{Cus3FgD+et!`UqQDf6f5Xa4Ns0A;O!fHYH*QqhjCt4h+f;RSYt_CUtBm+fM3pgfG9IT(G8+ZSo<6sY2g@s9Lh)!dEik$JN2+s zLmO_j)YEr9Sl^$@)4XckjGPchu*xFuXfrCAo|2NIjshhy+MI4LO{zI6nMMYth|44` z^QvXjLrG|tpgs`H;n2>~bKSX4<4}Y(?^KE<^S>z9B~ukSGLnAs(;=M7k~5lfFn69i z!#mF_ZdypXfR?i9P^QfCJ6Um1r#~y*W?aiJ59Nx}VZ{vcd972${riz~t`B}*GC*XDA5ru;%2a~X76TmdmA>`cOze!z5j7gIa{ zm4W3NityXtajC)%xT{Ht?(XVQ%U3M3EZOk?`~UUV>cojY_3SgxXY4C*;f3d`lTKRW z38^IKTxZL*S^ws7w(zDu_c5lQpW`6unD?`@?fOZJ!(dE-a*Z|f;ezBL6{B?jx5`3& zAa5X(R`B#1{*?11*6zj!^O$$v{(7n`k?u;H?ti1CWR)vQp)Rt_@yF$|rd~~@>32jyCpEo=;g+ggEBWuMt}oO zpYw{S*rgG!E4^aKt1qR_@gXBmMM(L~43Ij0Izudz!=xXc5u zFylA{{bi7ps2k^~^Wsdyc^)7=%+b{jN{iQ;>H*}&8Fb^u^$LGbpy$rDuMz{r8V>&7 zx$$eEvk}(@GNCh+E28s$$raXlE$qD?Bu!gb<6d#*#p@hn3YOpGr9Ja#_g+aA{5qEf z`YrF2%LXd$GjWe%^R4#Rl={k_rLvgEbdZ9VG0p_Twqwo{^T&A}O_arz>u-8ADA(*) zNxr#bKyl56>sEZ{b?3QT9H$@0fhC5mF|0T^!)IqEH+(STWlJwG6^Uu^l0MzB+{jz(%q3M|)UW4$)ppLy&D zhEgtheIF7{Ty%X#pQ-^0pF~c;~G8~uVRP! z=}C+S%qkKV&i*wLp=BXNEZ5~wO+l1vXD1nEa7TxO&WAfDvjZvzUb*s^Yd~^dv(6nI zgynA}#D3NQ8{)Mfb7sXD;<2F%=D)!LbJvZjDIxHG!#4bHcV-?qA3V*6L#D&v>u~L~ za!vKk7IJu+E0Cl@74Cm715$Kgk0ldkdOFSUiP|jJ%-<N7y#PZ7gVd2+v~*=Q(U1!lWH{94-q#IH3RJ@}@1bkvC6mqXZ}dkJob}`3JN32bvaZfcMLkO z9yge?F+0!wlvgX{6{1`_%b)9E!x@%YIsKjI9M1gZG1*U8j>lo0gXU7QDr3`_du~m2 zRu~Z0p2D$ae$Du-pgYUU=ltLVOAZDc%s2e+H>G5;oEgc5J2KNYkK@XLGY#i?kaU@C z8CtG`6f8LnU z!R2zZaiNh`QrUQV%D9kY<(0rCfO($TV z13U}&3eVxpUyj2H7=Nf`hAG!$whp`HpUchHoaedxIjl9{S=u$n^8U4C(6j>6nI9Nl zdA(9+oNr#8Ip(@&#Tr*wCWqoV*F3m@N7|ahK{Dc47YcA-%Wok%X-68i^n;aGEZ6S5 z%F1$WtADf&zK`9@X7gn$u+7uQi7ibZ;W>!a*RV%`Uvo@Bw6J*ykYvGvnzuCiw`ODY z8t&`NK9a{dCzFC@*k?j!xqL>EV8tUqEl*s@6TnU1OkX!+xil~5yaB^>nyi3wr^Dx7 zX_w-4XZfAyW}Y*yrC~b#S&22{TF2?Zn8##ieOTdK4p^>p&|I@QWof$pAC%g14jq%F z+08lg=~i#)G!m=`V6e#uSIlm~0A?Jz6*m$;C$oHnGDmH>PB#Eo`GqK0J_V`bxMqC? zttQOb#qw`KKaAu1J}`FfWCBObv|NV}QMyr#io-6qW}T+{PHS2m3c=%CsVmo|3tV#M zLZG58*W0&mw-tGkzKq7g`h9(UHvgzjIyzE2XYJg%Gh^ARsM1jFoiFd zxSH`%)kr&q!Rej92GUC{X&-<8HJ<1sIb&*aR+p+98BkLVv($JGEdfUki~Hj>FMbQ* zuqc0LXD5Bsp8fzYV4^jXKLvHmglF9y_9HsM8)jF-$vDrRruTwEd{7 zt4p_+<8?`!46K!mL7;8AK~2B!^J*kHOEtXq8tul_qQ<*U*oCCeW_2C3{PA(hXdkTz zU^vw0bk9-R8SVt_&Nrzh+IrPA4JwVi{V#=`Xr(fk)*d5%*h92aSTMbI=Q7*x$N<)M zHwMZ8iKERlHa6c3OhV>WqBv^#{m zQN{oOFvv+nK~x^kCUF>s6SEidL>qJ@r&um13*hg$oy|RjXFn{G&P<7#L7{_wH5e z*RR(L)kh^ammDaaNDqpHcGzL8Grc1hDObsr>vKhSy_V>v#rE+r1{5spGS$+ik_(pU zu-Iu0n+GhVUBwFFptPWDn>K9nDcVTg8cGu%chIhD zi5{9mmv+gev}|2NnsM4Mahx7#21wy0>lJnbOB|ti^g&H>`AME1h+qMHY_zciyNdN6 zr?t-(sU)U?4AMg&tf8lD(WxYG?F7{gc4nKmh_hq+O*B(@e&tE)xF(mawOz+L*`rOh zx_z#qUF4Drsa~m!iH`K5{}ehopDb|rASXgMHa12}zHRj0oy%yy$<-n2obrtg3$R(l zqDAhUtfCQ*1Z^atUC8?dadGeW&`W^TbEuu*EfaR?qrp14BJ=)Bv{1Ke4(_ScEB61g znif)wS}>KtAn7|lRt^%^X{8ODhahPdDlE!|w+CbtarV=ASl83H@OBVWIg)?1syfYB*7#4Mnn)2gH!&GN3x1B+pjb4StDg$V5Xk!bb=Uh*x^KYMP2Tb~sfX9hY*(aDSy`@aHIR>K*d{3VNl?W$iZ>oe6A$kEARaUi zxl2^st0)WlP5ikzR-*AXw1Mt<%W_@f>JY}0a&XWWqG+S#P}UrIEb-QqBj<9-oq*X% zn#b;;-F)g?+S^g#zM;M+r?HbmMXB>0humK6I)>ujk0{}Z# znqh6p=FIfC9?h`kb@)S;Q=T)789(l02gbOQv(3lMx7^H|@0)D$8tE+o{;I^4t>wBv zx;3voU-0GtC36~6uiy%OTAAAgw8C zG-WE!9MH|*N^*Dd?O8txoeOc@-mVt-Ag!aNR9VCKyaL{-HxJ1?On)2S4gidPUhr4ZK1zd=lIiq!o zc5SAR`UE+vH^) zn_>JOJGtd6&u_XV6b?%C?%lg>IXZs)xWcMGSTQDQhn05V4j(=o*(y+t9qn2sRth>a zn$&VphN#`p(1hxqs8f?i(jU}DoIra!tx7{}(E05VCsRkOO-m z&_D?}QLamz1;Qu~Y-P!%fSNL_{2S?+*Ufs>GSlvM~#`LshT?meFl3K$-k;;2((0nFo)J=S+z zwGAiK>{eQNiQXC{lH;_((>ygjC;#>dN;xnT!LZ5#PYYorxs*8$Q>wpC2F`Oc-_C1{l7Bg@ zsi0bZ=|HSh=VQ5sOl%lrebh)66OVX4tL zxG)psJ*yR!lxsDeUI!mm;sDAulo>o6#Fcj_k8#N!oI6joBqr4CX8Nd@EY|v?PEz{R z?EEd|c;JY%Et(A`-Of>`NF!>82PXW*#hvTabH%6-1sfR|2@xyaC^F|dUcQBwZVu?? zagdxim9k2*zSa7KrGj#Q4i7O!t5l?;~*(Gl>+B^5LrbJ>rj|NF)?Ihi_bwU%XNr? z^ZKCZ^r7s+l0=)T=mCH`e^YsLOTx^bdCXx=KU1N$$>qXuqze8ud6>sxiwvge=OAky zepp!*5-+YfoE3tnc*Wp81v!YK?H8{k0aK>_jr%*=I7~Z}ML=m1TnM-k5X-e2m5T8T zsU%g@v=B#SdOj$)Ff+Zf#^*rLuJh5bDLTKNv@>-L2^vxnK77yC=W2W zW`;GxaJ-8BHF!l)Zj))&wo_DxhdyMX;9Rn@F$o zZDZjTTdoTMZvtK;wYLNbQF#QMN`a|(m0f8agNBy^@ z@lD0d(ynbG0-x~QK+FrAPOs$QH3*pb^D3)(-KJbu(F-jbFS8B<*H$Wm$+6t;osbBe z(O|hQPsjIhdFI|%u9vLx6|B+%B~9fT?bEz(E3L3no}v&C<+>=?OFU6?T8OEDm3X#& zWzTb_Z+*RNeqn^W- zPB>Tb4o1vYN+X3uEIb-77Gb z?VM}tJ`UR$Bwoo9FfG@z^?AuwQ#og>qcyd_S1Vo10M1 z2d;TO7#SJ#4C|Iy1^Ah~%;T^{GdV^fFKj*}Uj795899GRBB8_x*rtV;igtFMaWE)&BhlU9O7P3;{!V zn(_2LG8F{#*gIwggm7VIDoN(Ct+X7^9Ff`27BJWef7CdhUL@b^6{eX#^C8JdHQ6$o z&M0r#K!KeZkNv9R*Qlc?7?=+~S@>54R-=%Z7E+#5L(F^GD3BbMKLJyr_Ucp1zqC?< zA`mDU<@(V_pHSC+;%eF+bFSL3VV(NWhu^R6x%<8hBs+KRQBOa$U47=W*JfN3k2nYz z%10dH3n??K@?;{&rZ90BjK{o$SJ?DSCOim)v0P`#YjXY)HM(R?dZmDLVm!aQ+6Ef4 zj?81T4uTw;52I1Y3zBxsVcXk}=stP(?pFgT%Vjq$q>*;+gX7=SnfzZt1jKS(L6upe zMJWm8T5lQJYFN<-*5btrRd4U{3~Z;IvPr$~ig&BJYEuF-NEFC}mBs=k7i%(plpWXd znx5vdnOpYP+O>HhzhkysvlOp(ZY|G;P=f=5>U%f;z<0LF&T_qUb?RQI(v>uPS8O>4H2cX2Zu@dZ zA$gC7KuN3?g89>n7B9*uVXc=UkrBi0;hd8_K0dCdrl!>J@UT5#L}!o>2l>;OCtCoA z2NR@+a{dAwHO9YnKU{8U?xRrbnrb)nxx3 zAu%#)g`cKlHx~s{H@Ug|xDM<1Bh6faBe({pQ0h=tFK5*);?#SGcaw+obQwp-6wA~6 zr?O8iXl`zH6IF74xF5PYyVWN?`B9y>vuWzyz~GRA1$*=6jh^`}-=+9GizWdofk5_K zv=T}uWhfGXl2ESe>+3x~U<|`&-@bjeXi$EKhK6EOd{3M>q0?(`Z&xT&AzVjChfb@Z zp}|V7sk1{LPt6)s6NBmPU9GGB4lEf>Eox$DX6}1;cehrqG2j-$L194|o0^(JC&y*M ztH80z+#hH*_H@u2lU@a$lXb0Dnk-|gu~|(FXDYWfsB^cSW6R)q=WSDW-0?GC%C#+`oCtwZA+T(1YUf+(v&9L4s0oyWay@_k zJW`-KGs-qGF{9LFT=d~_+G(fRLLEAENT07-70<%?0|NtU&z?PM#hP%P-CYC@oN|^HM~(EcNsNvizjn z*==u7)8k`m_3G8CeuJU>u#Dz5r4}vElrct%aR<%S#5_%!)2xnCS`Abu=)gT(Yip}k z1Q#z}oEdUtNU5&FDSy1l@tU^LPgALmccsE(num+S8y%#%Y11Z`tMPc@!FKcJ&8kqA zYbf$)H;Wc!z7fkPA=mBPg-Xp?ka5|M2k!m0wl=kK{W_%%r5~(U&~Xms+K=2)2m`D2 zmMzA;K$(K^!0qiE-WI(6`s=oA_x1IuojZ4`=bn4c<{!lguPSxtopDAGX;9nR@4=-+3}nx*E9v%eaaCnGD zUBvNV0Oc3?N4>`3@IV$vdqG~&UhoDjjyHEuP|;>!yokf$%{z3YINku_9><^a{*9^} z{r&yg01?N7L80EH2VqeX$~7{4_0=Cxx7>1vR-%W7Xx<||)qmiFSLm|b^xa$4-~IiU zgOp3c=1Tx`W3fU-C4DjPN4fS4lSN^MRUFnW%(b6Pzj5)D<)e~J<=W3T2%=m2fEsfU zZU<4+f6Q6+BYqVbHumk?Pj3OpvY&Q+ef_G%WzIMTPUl-KOq0V~#Iw&no1;heyKC1j zb??3Rs!K1uR4rH#?yW#QuP=jV+#*uLLt1xyM|FdcQvmMsBe`U{RXYKYmx=-niVem9;KAE{G z*7&GKT#N)(tdCK6u|#K4jHAj99z-yWV>H}Hajw}2s-Oi~A;dL7@ywgRD({c{@=l|G z0=915qQ3c!8`U;r-Zhp<`#<+0zyCtR1@u8P?u?#5lZBxMPKl_eGy-i!Nw?)TQ`A9%mK`)z0DI&><#ho7q63tAYD=KNN^`9(?rU>Skygf9T)1eC1Iv&)iLKU(`9QK zpq33)qk@b@(2Dg&LE_pm4RuNA@&uuu5?8~5 ziHNRwU00M!+7ZW4I^t!B*Lt$)wq8b*G3c3{FD#Fs;tQEFhf-UP#zazF7sU(VWu+hV zOw-G?2Hn5_4I)1Y*ssq7D^-nt>4=W#U64)Q2^vk5*T2isCX@RbzaKY-%(y|5A7IWH zKhkY4T24G%N&!yu#d7meOmM)szMa6M4Ps20b_g&_HVs?%+aNlyYB+xMA?3;3 zLY{K~?0LE|1tysquyP?Y>(P_~Xi?vkZScC003~3maY@tEr>z!Mi>a_(6l|@nWq|>t^hR0HyBknO`PfpKCwu$*({>LRW$F(PT?NF&jn#`4 zKDV@(Ou|SveQ8h5#>e9y&~OgR7nOj;C1r5rjrT4Lk(99SJ6Ty@w}$1{H7_lhhCgAC4Z0o3Yi%VS z*%jhXy@%~fLro1+1~3c8G11iA{iw(LFCkk^R1Y37jESLmF7lk#T>8(eOqwhyvD$Ao z>ajO;z6@s8T&>6b3|!^9lkr78TA(K1Dmyznwg}6!La|BZMo#D60NC$)$GDX}yz_6K_`w>2lOi>kWp$o!&(I^yZ z2K8Q;)ax_mnmVn4B!X7&aw0C&-}vrck?}m^Hh-1tUFB6jg*!$mIz`4UKaksj7YO9`To9{*yx*1N1Ywr`-6r zuFdtIRP6^LH%Hi?ThhK2{w?*d4-t&?$vYRPim(36FefJ3n3H*kd+iner9=7=1(FM&je#mV9pLzmO4+NyeP<6HZMjKl@J9>oT2Sz&bO%5uoC6lwZS$R{H zCN#zRRj%#G;tVI9obv@%q-W-G;7OppJ#U@ZI)ZEn*6<^H?}1PxYkkY<>d;g%XvSkr zV&6A{oETRNul4G3xwNXvX-?e+e=oiW#++hY2!({gCkkk`XKiz)5oc}Frv4^@E4RS; z-pX%7;Y)gETHLKixD}3-rgfTGfum`nXTquZS(km)KXvLn>uM>-QVn^)*^Rqj1MpW~W!Lj3urXAbt*8LfQVwd+B8T zYc~Q16V)UOaBy)ZjktXvleqMAD_wirMHJp!l3vi_JGAe3xkG!j)FdlJMMWn!6NSpO zRMQL!>7$$7qi22w1O&VmoJ^~3$uGr%tsJhg3gg%LAs@}LG1t8p=*_jw_EjCp5Yo#E zLY3tS(!+OmHRsFJMB#;X+ew^!RE6sqCxPe3kB_{IGbH@K%ry!C(cZ0|(RbeQR9S_V z$M7F#`)vFLG!9jER2Y1Nm`F?uQ9+$VdyOO@J=%S1{3}<2e&N8aXlXkKru?;NhJH!c6zFY$ZO|l>+c&iXW`~IMDgh)q<+2vD2f;=s>#Ow;?{jN6gp_vkc zb7<((Dc@Gy!eExR!x$>gC!o`gxI_twzDoD?F6&n&cs>WvoCO=J!HG4?Xt$Qr&Y?`@ zcchE!pU1>Y#@1d$%bG5J7CP&^#SK4NwM1p@2dEAGxN>Si#xm_CksFE0%`-BGCO((E zx_CYS%yN}-C|;MncX>}2a6T`@Y8K#%JK1B7pzubQOzN$NZuBP<41g{davT~sa6QpT z@VOVFJ>;pPpMpw$3ou@!a-XC2e*RQkLZInb?7U%2?E3@NK^x2O3Wc}pkJ{6k_gf=8 zmQRkL^NGt>OM9)>=^?Gi5wP03Pbo|rEN4KJqU2ct-C`NHe1bP0O|A3#dlOR6;Pa>? z?HxiS{#S=-+7ZlV(?De1dRyS$8>ZeVdZ+Va3V!c?sjP>bUT2$GZ|~`5CxC0^HnW|x zEGy5a!Dw=%g^Axn*UYa(x+`N_iD=QD`!}+%t=JHk@2dk;mgN=bUoGWPN!_I^yZ`ZW zgqeF;w1!#Rl9iW}>2+V$^2H`Ga_OYFGLC3^W+qqnTcS`Uo<^givtL2u@mz&n#;fz> z(I&6`j~u}1D7BcDmKI-D34gDQpmH1OA-fFESMs!azA4;hFFqezLG<)}OA~c$Gh1~g zVZ;2>-Yw_6$OC6^pTW=$1v#ERodtVDXt>>N@_|}UoFEEg7g_`BoGuH~*q`8$Bpjox zySE?^xa6k?InT)HlJM=SjlSw;DJ8$JPs=xETcf+IfpDlAS=r3Zee1lB+^;nFtnKJ% z@@kKi+yPqd+e>LQN$0i8fv068U2(eJaJ{@7m3*4zHAhv-We7X3^7-0nL{R&y#_7kX z#2o58ZOqi+-XX+n;e#YP&0!$FQIpfKRy{DeYT17u>kE%5t5|7Zz(qP?1+W}*3~zaZ zI_wKN@elt>FR|DMvq?cG00I4{y`N#c+sh~WpzqAz8ZSv&>O_7GLkD=9-pmDl5WTt( zD$DD57$^bsZW%&}qZ+@VjIj+ZGC7)fw-R!0gTm$Vl@Y8<^&A&M&_yb$n-x^<>o`-R zYJ#31{5IchW&}SMIYsa1=w72toK_oyWZRrSS`>|R3;M-+5`Rq4PnN zPAwaEJ?9-k`4Mfs`f0paZ1nqua19q%UX!+<52&%Li#mp#SQN6FPW%4Rfgy;iIPqSh zu`C)AL9QGa^*L*fBjyCw6Lx8;vw4lMe4wrCSD2 zm#3ti4MlMs!KN|t$>K?$AY7tmL+;PH2N5zFzL#edo)L#laysLwJ{NgI z_I=SBTPRDlTEwPD5dBMqP-$jcpaNC`3XI84ykZ5T4C?%+D^M1lDqf_@bR{DGs2E%W zXLIb=n3%V;GnG4T|I&3ek{SqyK-jGb`%1G50HZ-@ z9&#ZqRka|pK(Y85_6l>%TgKkr-s?X0_IwcoQ(wp>uI8LAHv?vgH3JT5x@V81mQ{Y4 zTCnlZ&<6Ff_Nr88_NmhY_?Ab|%k3ysMcV}xGLO)tXiYZ3d%=iM;xrOf@SLl5gE3mP z-CK$0ow2ktt>i(+e*yp2L+Lg@XQ%_3%zy>t2i=wF^l4FWpGU&-gj3yC4i4dTCTh+p*rx@Ll)o zX=-^D7wX}Nz=xgrS*P^}$Lyw4n%odriD=Ua7z3(^U(Kwoks> z$&CeT6aB+lr}ZL`?}Vac=aHB>jrrgkc&<6L`DK=At8sBjv4D{>JhaP0k%RwWt3a*Z z9Y583^h6Cl^1Vb7=_(mhsA>jA2L~A)c!RHOI4K;@fhnQZw8;tkNi%LGv_e7Ss(0EM z=KSig*e4epEE%?VUvY_($b+ahu+Nh)!}k%)^+-Y^&&^Z@-d-MEoeo^RWwk*NdLB+u z%+Zo;tBmWUd$(Da?}uc#;scw;l{fp_XZk*SU0kt<)#Apr*qkzlsxv&+0<&d{pFp75 z&xM8|!Rbs%E(VBNPz9FxHvVQta3`i_=W5{DHyM@xGI|Z5gj)Vf5 zoo#9)*3fQ$TLSBm0N~`b44b6_PZ{@Z6HwBx7?HF3S=}+x7(rgzYLXvF{037IWx{SU^Bn+#7^zDiyv+u|KM8uaGD+f7LwN zqlTX}dBMtiOT8&w$lK|avPjlMf`%ll?Sg>EzSsfaeCUeH$gtKAm(!$RMoic_Wa|s`VU9an?L}e1LXCF0(ax-2f;RDRxPc>f=40zyfG0Hbb4S!XAcyIapTi}*(j@V$) zpaTq){`;l;XTa2RtYKYF2ls_vkw<2`W&p6ZeN-g=-o}(CuI_Vz5xD+S3P)b*TDVBC zYyCd>$_ohI4jN{8P+evr9c*(1L@tGMFa7eBBY^azv>ko*YpqHf@1ZcuhFBWnkap};U)6HqR*I$C2Xn8A#d>}7tJ#4WEqxjpHeBjs(Nh-6W&Y@4a2OVI zm#&)QmnWg*X-!p*fP-Ecx|$9~Ksj|k91_Zo!`l2}Q8x73>1n;FtozPoswt=CK{SLN zr-!Q5Gx5vY)aF%7f(`P}Ekn&(i&@H&qbHt>T*W>XiI%+ zI`xLI+>Kw!PrlL-;7YOtPB$%)N4eb@VnL}^%67&kC%~o@17Np@_E;qkHCi5#nw_A~ zofMZ!YIM9^S#EKx}q2%OCh##^Fw89V%r9A0MGvQ-DsycH3GCWZKRTM!2_MF~cJ z75XtTc;Dr0b8#3qX@Ew|bfO}o9keXk9y&;9w`=$uExI7B$p#7__%)~iP!=WOd3 zYPyG&R5q-O*2G;M=pynWXf0_~Rx+{ngF>7(Ton*Cf7>{k>hEa)+exZ#w zlIm^vEpj%tsnjVqS5)5PWl=e?^9}BO-3q-A-yTEizST_Ut*iNwCv;PTagVSHng zV;%Git9wnApR>t@?OQ})fj}veR@01d&le=^Cz0v-=CN5^<{F7)LY*^xqfyi}0rqxZ z=lW%>cw{L*1|+&~s~Rgm5$^CaE>r%veJgd8xk4N)J3vNGp|!B{9ezw?wBK z{q|H0BS)a?arN03%5_5C%*Tnj9Il(d0;T<3!0FA|hHY{LtsLELE8^C3m!%4gbsFE{ zO~eN+WutB^yy80|ebH>&wef-ag5<8#(q9^*=FO6J>hU1~zhfdHE9flIEk0AS`qm|W{06eh1v3D}n#5q+T6|LS`b9H zFyLxd9gk!HHrd=U<@o#?JqK08C)I$t!&r3P=eMI5bX2P4io8zTb}GW$v!flGTT=YG zE*a(`09u^8i+2itj1FT@dHo!0$R)shMv6J9nEP_j({pvy#GY+3r_n**g5hGU%1OhM zmr+uPr{S+A_R9e5a+r4;0SN94KB-B=sLg;tb%TM4Wr^1Jo0o`IVr1t8d)D@{IDh%& znWGB!Xb2PVT#A&)J)U!ATajUhZ#kmy{)THI(u-os0ry-#js_`i-dN*)wjzUG_8!$d zy$6&BnuLq_EhqUYry}ij`}i@^eAHjAzRUhY`aL9U=^LV@J&Z3;UkQ*;?u-W@_@wzj zm#7Y6BzTE-SSiMi@yZ9zp<|Zu!DE#hU_*ceZ;lT8PEJhOQORt7WE((kQW-$Tv8&Kk zd4;I&ac^L8rrCIzysPwF){%ejM2G=$u4utlI12~P-T><6-;r};{$1VbZ*E2lau(`W z=KIk+5WIpIk!+XH*-b!YUJsu3Px+;hlaQu|0X9Z22sEkpjqXhV+f(no8C`7r%UGf< z%YFQy$e1;ikH(`_VDKJ`D!Y4pGXRtm-3iG>Ta6z~tn<>c1K)M&6cPZb*-{fpI|l}% z+$@BT&J;BU_n%6)!WI7NM^a@Z@t3vQ4$lw(ds0vEwX@tzlxIxsz=xL_$gY%L&ziXG zsocvA-dWjRNE)yy0Q8Z1;`mEgO!O&Gp*r5RK2(MxpbuV|LI~dQ#gwi;d-#Kn%Tcqb zQA><}RX_7Dwc%lW9`iW%+^nPLbr1I}PI&w(uUZjo_#f>k=WY!O*vbaIFE6Mwa%#>u zV74ogp#FPP2QS3sW+CWKpNb_=T|6kH8#sxW2n<()W}0aVk%NBsV3xm%_ivXoVnGu$ z4VQEavYV>}G}9QAukHPNO7KxhkfP!>zFHn7HOW5RQ@(J1qFN3P^3ZOXqKbLl5)>4J z`i8Bt{pk(@k*DwZ2o4BLof;52c|=p;wfSCytLJM>dH`116VdcL_9mq>1 zIe;^nlK!dd?-~x6=m+aq8SsCv4#>Hu(PfH-eG`)EV YV)+2S5qX-6bp!Y*%Bjj$$(X= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
    " ], + col: [ 2, "", "
    " ], + tr: [ 2, "", "
    " ], + td: [ 3, "", "
    " ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Automatic differentiation package - torch.autograd
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Automatic differentiation package - torch.autograd

    +

    torch.autograd provides classes and functions implementing automatic +differentiation of arbitrary scalar valued functions. It requires minimal +changes to the existing code - you only need to declare Tensor s +for which gradients should be computed with the requires_grad=True keyword.

    +
    +
    +torch.autograd.backward(tensors, grad_tensors=None, retain_graph=None, create_graph=False, grad_variables=None)[source]
    +

    Computes the sum of gradients of given tensors w.r.t. graph leaves.

    +

    The graph is differentiated using the chain rule. If any of tensors +are non-scalar (i.e. their data has more than one element) and require +gradient, then the Jacobian-vector product would be computed, in this +case the function additionally requires specifying grad_tensors. +It should be a sequence of matching length, that contains the “vector” +in the Jacobian-vector product, usually the gradient of the differentiated +function w.r.t. corresponding tensors (None is an acceptable value for +all tensors that don’t need gradient tensors).

    +

    This function accumulates gradients in the leaves - you might need to zero +them before calling it.

    +
    +
    Parameters
    +
      +
    • tensors (sequence of Tensor) – Tensors of which the derivative will be +computed.

    • +
    • grad_tensors (sequence of (Tensor or None)) – The “vector” in the Jacobian-vector +product, usually gradients w.r.t. each element of corresponding tensors. +None values can be specified for scalar Tensors or ones that don’t require +grad. If a None value would be acceptable for all grad_tensors, then this +argument is optional.

    • +
    • retain_graph (bool, optional) – If False, the graph used to compute the grad +will be freed. Note that in nearly all cases setting this option to True +is not needed and often can be worked around in a much more efficient +way. Defaults to the value of create_graph.

    • +
    • create_graph (bool, optional) – If True, graph of the derivative will +be constructed, allowing to compute higher order derivative products. +Defaults to False.

    • +
    +
    +
    +
    + +
    +
    +torch.autograd.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False)[source]
    +

    Computes and returns the sum of gradients of outputs w.r.t. the inputs.

    +

    grad_outputs should be a sequence of length matching output +containing the “vector” in Jacobian-vector product, usually the pre-computed +gradients w.r.t. each of the outputs. If an output doesn’t require_grad, +then the gradient can be None).

    +

    If only_inputs is True, the function will only return a list of gradients +w.r.t the specified inputs. If it’s False, then gradient w.r.t. all remaining +leaves will still be computed, and will be accumulated into their .grad +attribute.

    +
    +
    Parameters
    +
      +
    • outputs (sequence of Tensor) – outputs of the differentiated function.

    • +
    • inputs (sequence of Tensor) – Inputs w.r.t. which the gradient will be +returned (and not accumulated into .grad).

    • +
    • grad_outputs (sequence of Tensor) – The “vector” in the Jacobian-vector product. +Usually gradients w.r.t. each output. None values can be specified for scalar +Tensors or ones that don’t require grad. If a None value would be acceptable +for all grad_tensors, then this argument is optional. Default: None.

    • +
    • retain_graph (bool, optional) – If False, the graph used to compute the grad +will be freed. Note that in nearly all cases setting this option to True +is not needed and often can be worked around in a much more efficient +way. Defaults to the value of create_graph.

    • +
    • create_graph (bool, optional) – If True, graph of the derivative will +be constructed, allowing to compute higher order derivative products. +Default: False.

    • +
    • allow_unused (bool, optional) – If False, specifying inputs that were not +used when computing outputs (and therefore their grad is always zero) +is an error. Defaults to False.

    • +
    +
    +
    +
    + +
    +

    Locally disabling gradient computation

    +
    +
    +class torch.autograd.no_grad[source]
    +

    Context-manager that disabled gradient calculation.

    +

    Disabling gradient calculation is useful for inference, when you are sure +that you will not call Tensor.backward(). It will reduce memory +consumption for computations that would otherwise have requires_grad=True.

    +

    In this mode, the result of every computation will have +requires_grad=False, even when the inputs have requires_grad=True.

    +

    This mode has no effect when using enable_grad context manager .

    +

    This context manager is thread local; it will not affect computation +in other threads.

    +

    Also functions as a decorator.

    +

    Example:

    +
    >>> x = torch.tensor([1], requires_grad=True)
    +>>> with torch.no_grad():
    +...   y = x * 2
    +>>> y.requires_grad
    +False
    +>>> @torch.no_grad()
    +... def doubler(x):
    +...     return x * 2
    +>>> z = doubler(x)
    +>>> z.requires_grad
    +False
    +
    +
    +
    + +
    +
    +class torch.autograd.enable_grad[source]
    +

    Context-manager that enables gradient calculation.

    +

    Enables gradient calculation, if it has been disabled via no_grad +or set_grad_enabled.

    +

    This context manager is thread local; it will not affect computation +in other threads.

    +

    Also functions as a decorator.

    +

    Example:

    +
    >>> x = torch.tensor([1], requires_grad=True)
    +>>> with torch.no_grad():
    +...   with torch.enable_grad():
    +...     y = x * 2
    +>>> y.requires_grad
    +True
    +>>> y.backward()
    +>>> x.grad
    +>>> @torch.enable_grad()
    +... def doubler(x):
    +...     return x * 2
    +>>> with torch.no_grad():
    +...     z = doubler(x)
    +>>> z.requires_grad
    +True
    +
    +
    +
    + +
    +
    +class torch.autograd.set_grad_enabled(mode)[source]
    +

    Context-manager that sets gradient calculation to on or off.

    +

    set_grad_enabled will enable or disable grads based on its argument mode. +It can be used as a context-manager or as a function.

    +

    When using enable_grad context manager, set_grad_enabled(False) +has no effect.

    +

    This context manager is thread local; it will not affect computation +in other threads.

    +
    +
    Parameters
    +

    mode (bool) – Flag whether to enable grad (True), or disable +(False). This can be used to conditionally enable +gradients.

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1], requires_grad=True)
    +>>> is_train = False
    +>>> with torch.set_grad_enabled(is_train):
    +...   y = x * 2
    +>>> y.requires_grad
    +False
    +>>> torch.set_grad_enabled(True)
    +>>> y = x * 2
    +>>> y.requires_grad
    +True
    +>>> torch.set_grad_enabled(False)
    +>>> y = x * 2
    +>>> y.requires_grad
    +False
    +
    +
    +
    + +
    +
    +

    In-place operations on Tensors

    +

    Supporting in-place operations in autograd is a hard matter, and we discourage +their use in most cases. Autograd’s aggressive buffer freeing and reuse makes +it very efficient and there are very few occasions when in-place operations +actually lower memory usage by any significant amount. Unless you’re operating +under heavy memory pressure, you might never need to use them.

    +
    +

    In-place correctness checks

    +

    All Tensor s keep track of in-place operations applied to them, and +if the implementation detects that a tensor was saved for backward in one of +the functions, but it was modified in-place afterwards, an error will be raised +once backward pass is started. This ensures that if you’re using in-place +functions and not seeing any errors, you can be sure that the computed +gradients are correct.

    +
    +
    +
    +

    Variable (deprecated)

    +
    +

    Warning

    +

    The Variable API has been deprecated: Variables are no longer necessary to +use autograd with tensors. Autograd automatically supports Tensors with +requires_grad set to True. Below please find a quick guide on what +has changed:

    +
      +
    • Variable(tensor) and Variable(tensor, requires_grad) still work as expected, +but they return Tensors instead of Variables.

    • +
    • var.data is the same thing as tensor.data.

    • +
    • Methods such as var.backward(), var.detach(), var.register_hook() now work on tensors +with the same method names.

    • +
    +

    In addition, one can now create tensors with requires_grad=True using factory +methods such as torch.randn(), torch.zeros(), torch.ones(), and others +like the following:

    +

    autograd_tensor = torch.randn((2, 3, 4), requires_grad=True)

    +
    +
    +
    +

    Tensor autograd functions

    +
    +
    +class torch.Tensor
    +
    +
    +backward(gradient=None, retain_graph=None, create_graph=False)[source]
    +

    Computes the gradient of current tensor w.r.t. graph leaves.

    +

    The graph is differentiated using the chain rule. If the tensor is +non-scalar (i.e. its data has more than one element) and requires +gradient, the function additionally requires specifying gradient. +It should be a tensor of matching type and location, that contains +the gradient of the differentiated function w.r.t. self.

    +

    This function accumulates gradients in the leaves - you might need to +zero them before calling it.

    +
    +
    Parameters
    +
      +
    • gradient (Tensor or None) – Gradient w.r.t. the +tensor. If it is a tensor, it will be automatically converted +to a Tensor that does not require grad unless create_graph is True. +None values can be specified for scalar Tensors or ones that +don’t require grad. If a None value would be acceptable then +this argument is optional.

    • +
    • retain_graph (bool, optional) – If False, the graph used to compute +the grads will be freed. Note that in nearly all cases setting +this option to True is not needed and often can be worked around +in a much more efficient way. Defaults to the value of +create_graph.

    • +
    • create_graph (bool, optional) – If True, graph of the derivative will +be constructed, allowing to compute higher order derivative +products. Defaults to False.

    • +
    +
    +
    +
    + +
    +
    +detach()
    +

    Returns a new Tensor, detached from the current graph.

    +

    The result will never require gradient.

    +
    +

    Note

    +

    Returned Tensor shares the same storage with the original one. +In-place modifications on either of them will be seen, and may trigger +errors in correctness checks. +IMPORTANT NOTE: Previously, in-place size / stride / storage changes +(such as resize_ / resize_as_ / set_ / transpose_) to the returned tensor +also update the original tensor. Now, these in-place changes will not update the +original tensor anymore, and will instead trigger an error. +For sparse tensors: +In-place indices / values changes (such as zero_ / copy_ / add_) to the +returned tensor will not update the original tensor anymore, and will instead +trigger an error.

    +
    +
    + +
    +
    +detach_()
    +

    Detaches the Tensor from the graph that created it, making it a leaf. +Views cannot be detached in-place.

    +
    + +
    +
    +grad
    +

    This attribute is None by default and becomes a Tensor the first time a call to +backward() computes gradients for self. +The attribute will then contain the gradients computed and future calls to +backward() will accumulate (add) gradients into it.

    +
    + +
    +
    +is_leaf
    +

    All Tensors that have requires_grad which is False will be leaf Tensors by convention.

    +

    For Tensors that have requires_grad which is True, they will be leaf Tensors if they were +created by the user. This means that they are not the result of an operation and so +grad_fn is None.

    +

    Only leaf Tensors will have their grad populated during a call to backward(). +To get grad populated for non-leaf Tensors, you can use retain_grad().

    +

    Example:

    +
    >>> a = torch.rand(10, requires_grad=True)
    +>>> a.is_leaf
    +True
    +>>> b = torch.rand(10, requires_grad=True).cuda()
    +>>> b.is_leaf
    +False
    +# b was created by the operation that cast a cpu Tensor into a cuda Tensor
    +>>> c = torch.rand(10, requires_grad=True) + 2
    +>>> c.is_leaf
    +False
    +# c was created by the addition operation
    +>>> d = torch.rand(10).cuda()
    +>>> d.is_leaf
    +True
    +# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
    +>>> e = torch.rand(10).cuda().requires_grad_()
    +>>> e.is_leaf
    +True
    +# e requires gradients and has no operations creating it
    +>>> f = torch.rand(10, requires_grad=True, device="cuda")
    +>>> f.is_leaf
    +True
    +# f requires grad, has no operation creating it
    +
    +
    +
    + +
    +
    +register_hook(hook)[source]
    +

    Registers a backward hook.

    +

    The hook will be called every time a gradient with respect to the +Tensor is computed. The hook should have the following signature:

    +
    hook(grad) -> Tensor or None
    +
    +
    +

    The hook should not modify its argument, but it can optionally return +a new gradient which will be used in place of grad.

    +

    This function returns a handle with a method handle.remove() +that removes the hook from the module.

    +

    Example:

    +
    >>> v = torch.tensor([0., 0., 0.], requires_grad=True)
    +>>> h = v.register_hook(lambda grad: grad * 2)  # double the gradient
    +>>> v.backward(torch.tensor([1., 2., 3.]))
    +>>> v.grad
    +
    + 2
    + 4
    + 6
    +[torch.FloatTensor of size (3,)]
    +
    +>>> h.remove()  # removes the hook
    +
    +
    +
    + +
    +
    +requires_grad
    +

    Is True if gradients need to be computed for this Tensor, False otherwise.

    +
    +

    Note

    +

    The fact that gradients need to be computed for a Tensor do not mean that the grad +attribute will be populated, see is_leaf for more details.

    +
    +
    + +
    +
    +retain_grad()[source]
    +

    Enables .grad attribute for non-leaf Tensors.

    +
    + +
    + +
    +
    +

    Function

    +
    +
    +class torch.autograd.Function[source]
    +

    Records operation history and defines formulas for differentiating ops.

    +

    Every operation performed on Tensor s creates a new function +object, that performs the computation, and records that it happened. +The history is retained in the form of a DAG of functions, with edges +denoting data dependencies (input <- output). Then, when backward is +called, the graph is processed in the topological ordering, by calling +backward() methods of each Function object, and passing +returned gradients on to next Function s.

    +

    Normally, the only way users interact with functions is by creating +subclasses and defining new operations. This is a recommended way of +extending torch.autograd.

    +

    Each function object is meant to be used only once (in the forward pass).

    +

    Examples:

    +
    >>> class Exp(Function):
    +>>>
    +>>>     @staticmethod
    +>>>     def forward(ctx, i):
    +>>>         result = i.exp()
    +>>>         ctx.save_for_backward(result)
    +>>>         return result
    +>>>
    +>>>     @staticmethod
    +>>>     def backward(ctx, grad_output):
    +>>>         result, = ctx.saved_tensors
    +>>>         return grad_output * result
    +
    +
    +
    +
    +static backward(ctx, *grad_outputs)[source]
    +

    Defines a formula for differentiating the operation.

    +

    This function is to be overridden by all subclasses.

    +

    It must accept a context ctx as the first argument, followed by +as many outputs did forward() return, and it should return as many +tensors, as there were inputs to forward(). Each argument is the +gradient w.r.t the given output, and each returned value should be the +gradient w.r.t. the corresponding input.

    +

    The context can be used to retrieve tensors saved during the forward +pass. It also has an attribute ctx.needs_input_grad as a tuple +of booleans representing whether each input needs gradient. E.g., +backward() will have ctx.needs_input_grad[0] = True if the +first input to forward() needs gradient computated w.r.t. the +output.

    +
    + +
    +
    +static forward(ctx, *args, **kwargs)[source]
    +

    Performs the operation.

    +

    This function is to be overridden by all subclasses.

    +

    It must accept a context ctx as the first argument, followed by any +number of arguments (tensors or other types).

    +

    The context can be used to store tensors that can be then retrieved +during the backward pass.

    +
    + +
    + +
    +
    +

    Numerical gradient checking

    +
    +
    +torch.autograd.gradcheck(func, inputs, eps=1e-06, atol=1e-05, rtol=0.001, raise_exception=True, check_sparse_nnz=False, nondet_tol=0.0)[source]
    +

    Check gradients computed via small finite differences against analytical +gradients w.r.t. tensors in inputs that are of floating point type +and with requires_grad=True.

    +

    The check between numerical and analytical gradients uses allclose().

    +
    +

    Note

    +

    The default values are designed for input of double precision. +This check will likely fail if input is of less precision, e.g., +FloatTensor.

    +
    +
    +

    Warning

    +

    If any checked tensor in input has overlapping memory, i.e., +different indices pointing to the same memory address (e.g., from +torch.expand()), this check will likely fail because the numerical +gradients computed by point perturbation at such indices will change +values at all other indices that share the same memory address.

    +
    +
    +
    Parameters
    +
      +
    • func (function) – a Python function that takes Tensor inputs and returns +a Tensor or a tuple of Tensors

    • +
    • inputs (tuple of Tensor or Tensor) – inputs to the function

    • +
    • eps (float, optional) – perturbation for finite differences

    • +
    • atol (float, optional) – absolute tolerance

    • +
    • rtol (float, optional) – relative tolerance

    • +
    • raise_exception (bool, optional) – indicating whether to raise an exception if +the check fails. The exception gives more information about the +exact nature of the failure. This is helpful when debugging gradchecks.

    • +
    • check_sparse_nnz (bool, optional) – if True, gradcheck allows for SparseTensor input, +and for any SparseTensor at input, gradcheck will perform check at nnz positions only.

    • +
    • nondet_tol (float, optional) – tolerance for non-determinism. When running +identical inputs through the differentiation, the results must either match +exactly (default, 0.0) or be within this tolerance.

    • +
    +
    +
    Returns
    +

    True if all differences satisfy allclose condition

    +
    +
    +
    + +
    +
    +torch.autograd.gradgradcheck(func, inputs, grad_outputs=None, eps=1e-06, atol=1e-05, rtol=0.001, gen_non_contig_grad_outputs=False, raise_exception=True, nondet_tol=0.0)[source]
    +

    Check gradients of gradients computed via small finite differences +against analytical gradients w.r.t. tensors in inputs and +grad_outputs that are of floating point type and with +requires_grad=True.

    +

    This function checks that backpropagating through the gradients computed +to the given grad_outputs are correct.

    +

    The check between numerical and analytical gradients uses allclose().

    +
    +

    Note

    +

    The default values are designed for input and +grad_outputs of double precision. This check will likely fail if +they are of less precision, e.g., FloatTensor.

    +
    +
    +

    Warning

    +

    If any checked tensor in input and grad_outputs has +overlapping memory, i.e., different indices pointing to the same memory +address (e.g., from torch.expand()), this check will likely fail +because the numerical gradients computed by point perturbation at such +indices will change values at all other indices that share the same +memory address.

    +
    +
    +
    Parameters
    +
      +
    • func (function) – a Python function that takes Tensor inputs and returns +a Tensor or a tuple of Tensors

    • +
    • inputs (tuple of Tensor or Tensor) – inputs to the function

    • +
    • grad_outputs (tuple of Tensor or Tensor, optional) – The gradients with +respect to the function’s outputs.

    • +
    • eps (float, optional) – perturbation for finite differences

    • +
    • atol (float, optional) – absolute tolerance

    • +
    • rtol (float, optional) – relative tolerance

    • +
    • gen_non_contig_grad_outputs (bool, optional) – if grad_outputs is +None and gen_non_contig_grad_outputs is True, the +randomly generated gradient outputs are made to be noncontiguous

    • +
    • raise_exception (bool, optional) – indicating whether to raise an exception if +the check fails. The exception gives more information about the +exact nature of the failure. This is helpful when debugging gradchecks.

    • +
    • nondet_tol (float, optional) – tolerance for non-determinism. When running +identical inputs through the differentiation, the results must either match +exactly (default, 0.0) or be within this tolerance. Note that a small amount +of nondeterminism in the gradient will lead to larger inaccuracies in +the second derivative.

    • +
    +
    +
    Returns
    +

    True if all differences satisfy allclose condition

    +
    +
    +
    + +
    +
    +

    Profiler

    +

    Autograd includes a profiler that lets you inspect the cost of different +operators inside your model - both on the CPU and GPU. There are two modes +implemented at the moment - CPU-only using profile. +and nvprof based (registers both CPU and GPU activity) using +emit_nvtx.

    +
    +
    +class torch.autograd.profiler.profile(enabled=True, use_cuda=False, record_shapes=False)[source]
    +

    Context manager that manages autograd profiler state and holds a summary of results. +Under the hood it just records events of functions being executed in C++ and +exposes those events to Python. You can wrap any code into it and it will +only report runtime of PyTorch functions.

    +
    +
    Parameters
    +
      +
    • enabled (bool, optional) – Setting this to False makes this context manager a no-op. +Default: True.

    • +
    • use_cuda (bool, optional) – Enables timing of CUDA events as well using the cudaEvent API. +Adds approximately 4us of overhead to each tensor operation. +Default: False

    • +
    • record_shapes (bool, optional) – If shapes recording is set, information +about input dimensions will be collected. This allows one to see which +dimensions have been used under the hood and further group by them +using prof.key_averages(group_by_input_shape=True). Please note that +shape recording might skew your profiling data. It is recommended to +use separate runs with and without shape recording to validate the timing. +Most likely the skew will be negligible for bottom most events (in a case +of nested function calls). But for higher level functions the total +self cpu time might be artificially increased because of the shape +collection.

    • +
    +
    +
    +

    Example

    +
    >>> x = torch.randn((1, 1), requires_grad=True)
    +>>> with torch.autograd.profiler.profile() as prof:
    +>>>     for _ in range(100):  # any normal python code, really!
    +>>>         y = x ** 2
    +>>          y.backward()
    +>>> # NOTE: some columns were removed for brevity
    +>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
    +-----------------------------------  ---------------  ---------------  ---------------
    +Name                                 Self CPU total   CPU time avg     Number of Calls
    +-----------------------------------  ---------------  ---------------  ---------------
    +mul                                  32.048ms         32.048ms         200
    +pow                                  27.041ms         27.041ms         200
    +PowBackward0                         9.727ms          55.483ms         100
    +torch::autograd::AccumulateGrad      9.148ms          9.148ms          100
    +torch::autograd::GraphRoot           691.816us        691.816us        100
    +-----------------------------------  ---------------  ---------------  ---------------
    +
    +
    +
    +
    +export_chrome_trace(path)[source]
    +

    Exports an EventList as a Chrome tracing tools file.

    +

    The checkpoint can be later loaded and inspected under chrome://tracing URL.

    +
    +
    Parameters
    +

    path (str) – Path where the trace will be written.

    +
    +
    +
    + +
    +
    +key_averages(group_by_input_shape=False)[source]
    +

    Averages all function events over their keys.

    +

    @param group_by_input_shapes The key would become +(event name, input dimensions) rather than just event name. +This is useful to see which dimensionality contributes to the runtime +the most and may help with dimension specific optimizations or +choosing best candidates for quantization (aka fitting a roof line)

    +
    +
    Returns
    +

    An EventList containing FunctionEventAvg objects.

    +
    +
    +
    + +
    +
    +property self_cpu_time_total
    +

    Returns total time spent on CPU obtained as a sum of +all self times across all the events.

    +
    + +
    +
    +table(sort_by=None, row_limit=100, header=None)[source]
    +

    Prints an EventList as a nicely formatted table.

    +
    +
    Parameters
    +

    sort_by (str, optional) – Attribute used to sort entries. By default +they are printed in the same order as they were registered. +Valid keys include: cpu_time, cuda_time, cpu_time_total, +cuda_time_total, count.

    +
    +
    Returns
    +

    A string containing the table.

    +
    +
    +
    + +
    +
    +total_average()[source]
    +

    Averages all events.

    +
    +
    Returns
    +

    A FunctionEventAvg object.

    +
    +
    +
    + +
    + +
    +
    +class torch.autograd.profiler.emit_nvtx(enabled=True, record_shapes=False)[source]
    +

    Context manager that makes every autograd operation emit an NVTX range.

    +

    It is useful when running the program under nvprof:

    +
    nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
    +
    +
    +

    Unfortunately, there’s no way to force nvprof to flush the data it collected +to disk, so for CUDA profiling one has to use this context manager to annotate +nvprof traces and wait for the process to exit before inspecting them. +Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or +torch.autograd.profiler.load_nvprof() can load the results for inspection +e.g. in Python REPL.

    +
    +
    Parameters
    +
      +
    • enabled (bool, optional, default=True) – Setting enabled=False makes this context manager a no-op. +Default: True.

    • +
    • record_shapes (bool, optional, default=False) – If record_shapes=True, the nvtx range wrapping +each autograd op will append information about the sizes of Tensor arguments received +by that op, in the following format: +[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...] +Non-tensor arguments will be represented by []. +Arguments will be listed in the order they are received by the backend op. +Please note that this order may not match the order in which those arguments were passed +on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.

    • +
    +
    +
    +

    Example

    +
    >>> with torch.cuda.profiler.profile():
    +...     model(x) # Warmup CUDA memory allocator and profiler
    +...     with torch.autograd.profiler.emit_nvtx():
    +...         model(x)
    +
    +
    +

    Forward-backward correlation

    +

    When viewing a profile created using emit_nvtx in the Nvidia Visual Profiler, +correlating each backward-pass op with the corresponding forward-pass op can be difficult. +To ease this task, emit_nvtx appends sequence number information to the ranges it +generates.

    +

    During the forward pass, each function range is decorated with seq=<N>. seq is a running +counter, incremented each time a new backward Function object is created and stashed for backward. +Thus, the seq=<N> annotation associated with each forward function range tells you that +if a backward Function object is created by this forward function, +the backward object will receive sequence number N. +During the backward pass, the top-level range wrapping each C++ backward Function’s +apply() call is decorated with stashed seq=<M>. M is the sequence number that +the backward object was created with. By comparing stashed seq numbers in backward with seq +numbers in forward, you can track down which forward op created each backward Function.

    +

    Any functions executed during the backward pass are also decorated with seq=<N>. During +default backward (with create_graph=False) this information is irrelevant, and in fact, +N may simply be 0 for all such functions. Only the top-level ranges associated with +backward Function objects’ apply() methods are useful, as a way to correlate these Function +objects with the earlier forward pass.

    +

    Double-backward

    +

    If, on the other hand, a backward pass with create_graph=True is underway (in other words, +if you are setting up for a double-backward), each function’s execution during backward +is given a nonzero, useful seq=<N>. Those functions may themselves create Function objects +to be executed later during double-backward, just as the original functions in the forward pass did. +The relationship between backward and double-backward is conceptually the same as the relationship +between forward and backward: The functions still emit current-sequence-number-tagged ranges, +the Function objects they create still stash those sequence numbers, and during the eventual +double-backward, the Function objects’ apply() ranges are still tagged with stashed seq +numbers, which can be compared to seq numbers from the backward pass.

    +
    + +
    +
    +torch.autograd.profiler.load_nvprof(path)[source]
    +

    Opens an nvprof trace file and parses autograd annotations.

    +
    +
    Parameters
    +

    path (str) – path to nvprof trace

    +
    +
    +
    + +
    +
    +

    Anomaly detection

    +
    +
    +class torch.autograd.detect_anomaly[source]
    +

    Context-manager that enable anomaly detection for the autograd engine.

    +

    This does two things: +- Running the forward pass with detection enabled will allow the backward +pass to print the traceback of the forward operation that created the failing +backward function. +- Any backward computation that generate “nan” value will raise an error.

    +

    Example

    +
    >>> import torch
    +>>> from torch import autograd
    +>>> class MyFunc(autograd.Function):
    +...     @staticmethod
    +...     def forward(ctx, inp):
    +...         return inp.clone()
    +...     @staticmethod
    +...     def backward(ctx, gO):
    +...         # Error during the backward pass
    +...         raise RuntimeError("Some error in backward")
    +...         return gO.clone()
    +>>> def run_fn(a):
    +...     out = MyFunc.apply(a)
    +...     return out.sum()
    +>>> inp = torch.rand(10, 10, requires_grad=True)
    +>>> out = run_fn(inp)
    +>>> out.backward()
    +    Traceback (most recent call last):
    +      File "<stdin>", line 1, in <module>
    +      File "/your/pytorch/install/torch/tensor.py", line 93, in backward
    +        torch.autograd.backward(self, gradient, retain_graph, create_graph)
    +      File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
    +        allow_unreachable=True)  # allow_unreachable flag
    +      File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
    +        return self._forward_cls.backward(self, *args)
    +      File "<stdin>", line 8, in backward
    +    RuntimeError: Some error in backward
    +>>> with autograd.detect_anomaly():
    +...     inp = torch.rand(10, 10, requires_grad=True)
    +...     out = run_fn(inp)
    +...     out.backward()
    +    Traceback of forward call that caused the error:
    +      File "tmp.py", line 53, in <module>
    +        out = run_fn(inp)
    +      File "tmp.py", line 44, in run_fn
    +        out = MyFunc.apply(a)
    +    Traceback (most recent call last):
    +      File "<stdin>", line 4, in <module>
    +      File "/your/pytorch/install/torch/tensor.py", line 93, in backward
    +        torch.autograd.backward(self, gradient, retain_graph, create_graph)
    +      File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
    +        allow_unreachable=True)  # allow_unreachable flag
    +      File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
    +        return self._forward_cls.backward(self, *args)
    +      File "<stdin>", line 8, in backward
    +    RuntimeError: Some error in backward
    +
    +
    +
    + +
    +
    +class torch.autograd.set_detect_anomaly(mode)[source]
    +

    Context-manager that sets the anomaly detection for the autograd engine on or off.

    +

    set_detect_anomaly will enable or disable the autograd anomaly detection +based on its argument mode. +It can be used as a context-manager or as a function.

    +

    See detect_anomaly above for details of the anomaly detection behaviour.

    +
    +
    Parameters
    +

    mode (bool) – Flag whether to enable anomaly detection (True), +or disable (False).

    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/bottleneck.html b/docs/stable/bottleneck.html new file mode 100644 index 000000000000..43f62e9360b9 --- /dev/null +++ b/docs/stable/bottleneck.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + torch.utils.bottleneck — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.bottleneck
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.bottleneck

    +

    torch.utils.bottleneck is a tool that can be used as an initial step for +debugging bottlenecks in your program. It summarizes runs of your script with +the Python profiler and PyTorch’s autograd profiler.

    +

    Run it on the command line with

    +
    python -m torch.utils.bottleneck /path/to/source/script.py [args]
    +
    +
    +

    where [args] are any number of arguments to script.py, or run +python -m torch.utils.bottleneck -h for more usage instructions.

    +
    +

    Warning

    +

    Because your script will be profiled, please ensure that it exits in a +finite amount of time.

    +
    +
    +

    Warning

    +

    Due to the asynchronous nature of CUDA kernels, when running against +CUDA code, the cProfile output and CPU-mode autograd profilers may +not show correct timings: the reported CPU time reports the amount of time +used to launch the kernels but does not include the time the kernel +spent executing on a GPU unless the operation does a synchronize. +Ops that do synchronize appear to be extremely expensive under regular +CPU-mode profilers. +In these case where timings are incorrect, the CUDA-mode autograd profiler +may be helpful.

    +
    +
    +

    Note

    +

    To decide which (CPU-only-mode or CUDA-mode) autograd profiler output to +look at, you should first check if your script is CPU-bound +(“CPU total time is much greater than CUDA total time”). +If it is CPU-bound, looking at the results of the CPU-mode autograd +profiler will help. If on the other hand your script spends most of its +time executing on the GPU, then it makes sense to start +looking for responsible CUDA operators in the output of the CUDA-mode +autograd profiler.

    +

    Of course the reality is much more complicated and your script might not be +in one of those two extremes depending on the part of the model you’re +evaluating. If the profiler outputs don’t help, you could try looking at +the result of torch.autograd.profiler.emit_nvtx() with nvprof. +However, please take into account that the NVTX overhead is very high and +often gives a heavily skewed timeline.

    +
    +
    +

    Warning

    +

    If you are profiling CUDA code, the first profiler that bottleneck runs +(cProfile) will include the CUDA startup time (CUDA buffer allocation cost) +in its time reporting. This should not matter if your bottlenecks result +in code much slower than the CUDA startup time.

    +
    +

    For more complicated uses of the profilers (like in a multi-GPU case), +please see https://docs.python.org/3/library/profile.html +or torch.autograd.profiler.profile() for more information.

    +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/checkpoint.html b/docs/stable/checkpoint.html new file mode 100644 index 000000000000..f9e9639bea7e --- /dev/null +++ b/docs/stable/checkpoint.html @@ -0,0 +1,637 @@ + + + + + + + + + + + + torch.utils.checkpoint — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.checkpoint
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.checkpoint

    +
    +

    Note

    +

    Checkpointing is implemented by rerunning a forward-pass segment for +each checkpointed segment during backward. This can cause persistent +states like the RNG state to be advanced than they would without +checkpointing. By default, checkpointing includes logic to juggle +the RNG state such that checkpointed passes making use of RNG +(through dropout for example) have deterministic output as +compared to non-checkpointed passes. The logic to stash and restore +RNG states can incur a moderate performance hit depending on the runtime +of checkpointed operations. If deterministic output compared to +non-checkpointed passes is not required, supply preserve_rng_state=False +to checkpoint or checkpoint_sequential to omit stashing and +restoring the RNG state during each checkpoint.

    +

    The stashing logic saves and restores the RNG state for the current device +and the device of all cuda Tensor arguments to the run_fn. +However, the logic has no way to anticipate if the user will move +Tensors to a new device within the run_fn itself. Therefore, if you move +Tensors to a new device (“new” meaning not belonging to the set of +[current device + devices of Tensor arguments]) within run_fn, deterministic +output compared to non-checkpointed passes is never guaranteed.

    +
    +
    +
    +torch.utils.checkpoint.checkpoint(function, *args, **kwargs)[source]
    +

    Checkpoint a model or part of the model

    +

    Checkpointing works by trading compute for memory. Rather than storing all +intermediate activations of the entire computation graph for computing +backward, the checkpointed part does not save intermediate activations, +and instead recomputes them in backward pass. It can be applied on any part +of a model.

    +

    Specifically, in the forward pass, function will run in +torch.no_grad() manner, i.e., not storing the intermediate +activations. Instead, the forward pass saves the inputs tuple and the +function parameter. In the backwards pass, the saved inputs and +function is retreived, and the forward pass is computed on +function again, now tracking the intermediate activations, and then +the gradients are calculated using these activation values.

    +
    +

    Warning

    +

    Checkpointing doesn’t work with torch.autograd.grad(), but only +with torch.autograd.backward().

    +
    +
    +

    Warning

    +

    If function invocation during backward does anything different +than the one during forward, e.g., due to some global variable, the +checkpointed version won’t be equivalent, and unfortunately it can’t be +detected.

    +
    +
    +
    Parameters
    +
      +
    • function – describes what to run in the forward pass of the model or +part of the model. It should also know how to handle the inputs +passed as the tuple. For example, in LSTM, if user passes +(activation, hidden), function should correctly use the +first input as activation and the second input as hidden

    • +
    • preserve_rng_state (bool, optional, default=True) – Omit stashing and restoring +the RNG state during each checkpoint.

    • +
    • args – tuple containing inputs to the function

    • +
    +
    +
    Returns
    +

    Output of running function on *args

    +
    +
    +
    + +
    +
    +torch.utils.checkpoint.checkpoint_sequential(functions, segments, *inputs, **kwargs)[source]
    +

    A helper function for checkpointing sequential models.

    +

    Sequential models execute a list of modules/functions in order +(sequentially). Therefore, we can divide such a model in various segments +and checkpoint each segment. All segments except the last will run in +torch.no_grad() manner, i.e., not storing the intermediate +activations. The inputs of each checkpointed segment will be saved for +re-running the segment in the backward pass.

    +

    See checkpoint() on how checkpointing works.

    +
    +

    Warning

    +

    Checkpointing doesn’t work with torch.autograd.grad(), but only +with torch.autograd.backward().

    +
    +
    +
    Parameters
    +
      +
    • functions – A torch.nn.Sequential or the list of modules or +functions (comprising the model) to run sequentially.

    • +
    • segments – Number of chunks to create in the model

    • +
    • inputs – tuple of Tensors that are inputs to functions

    • +
    • preserve_rng_state (bool, optional, default=True) – Omit stashing and restoring +the RNG state during each checkpoint.

    • +
    +
    +
    Returns
    +

    Output of running functions sequentially on *inputs

    +
    +
    +

    Example

    +
    >>> model = nn.Sequential(...)
    +>>> input_var = checkpoint_sequential(model, chunks, input_var)
    +
    +
    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/community/contribution_guide.html b/docs/stable/community/contribution_guide.html new file mode 100644 index 000000000000..875a657e4908 --- /dev/null +++ b/docs/stable/community/contribution_guide.html @@ -0,0 +1,906 @@ + + + + + + + + + + + + PyTorch Contribution Guide — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • PyTorch Contribution Guide
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    PyTorch Contribution Guide

    +

    PyTorch is a GPU-accelerated Python tensor computation package for +building deep neural networks built on tape-based autograd systems.

    +
    +

    The PyTorch Contribution Process

    +

    The PyTorch organization is governed by PyTorch +Governance.

    +

    The PyTorch development process involves a healthy amount of open +discussions between the core development team and the community.

    +

    PyTorch operates similar to most open source projects on GitHub. +However, if you’ve never contributed to an open source project before, +here is the basic process.

    +
      +
    • Figure out what you’re going to work on. The majority of open +source contributions come from people scratching their own itches. +However, if you don’t know what you want to work on, or are just +looking to get more acquainted with the project, here are some tips +for how to find appropriate tasks:

      +
        +
      • Look through the issue +tracker and see if +there are any issues you know how to fix. Issues that are +confirmed by other contributors tend to be better to investigate. +We also maintain some labels for issues which are likely to be +good for new people, e.g., bootcamp and 1hr, although +these labels are less well maintained.

      • +
      • Join us on Slack and let us know you’re interested in getting to +know PyTorch. We’re very happy to help out researchers and +partners get up to speed with the codebase.

      • +
      +
    • +
    • Figure out the scope of your change and reach out for design +comments on a GitHub issue if it’s large. The majority of pull +requests are small; in that case, no need to let us know about what +you want to do, just get cracking. But if the change is going to be +large, it’s usually a good idea to get some design comments about it +first.

      +
        +
      • If you don’t know how big a change is going to be, we can help you +figure it out! Just post about it on issues or Slack.

      • +
      • Some feature additions are very standardized; for example, lots of +people add new operators or optimizers to PyTorch. Design +discussion in these cases boils down mostly to, “Do we want this +operator/optimizer?” Giving evidence for its utility, e.g., usage +in peer reviewed papers, or existence in other frameworks, helps a +bit when making this case. +- Adding operators / algorithms from recently-released research

        +
        +

        is generally not accepted, unless there is overwhelming evidence that +this newly published work has ground-breaking results and will eventually +become a standard in the field. If you are not sure where your method falls, +open an issue first before implementing a PR.

        +
        +
      • +
      • Core changes and refactors can be quite difficult to coordinate, +as the pace of development on PyTorch master is quite fast. +Definitely reach out about fundamental or cross-cutting changes; +we can often give guidance about how to stage such changes into +more easily reviewable pieces.

      • +
      +
    • +
    • Code it out!

      +
        +
      • See the technical guide for advice for working with PyTorch in a +technical form.

      • +
      +
    • +
    • Open a pull request.

      +
        +
      • If you are not ready for the pull request to be reviewed, tag it +with [WIP]. We will ignore it when doing review passes. If you are +working on a complex change, it’s good to start things off as WIP, +because you will need to spend time looking at CI results to see +if things worked out or not.

      • +
      • Find an appropriate reviewer for your change. We have some folks +who regularly go through the PR queue and try to review +everything, but if you happen to know who the maintainer for a +given subsystem affected by your patch is, feel free to include +them directly on the pull request. You can learn more about this +structure at PyTorch Subsystem Ownership.

      • +
      +
    • +
    • Iterate on the pull request until it’s accepted!

      +
        +
      • We’ll try our best to minimize the number of review roundtrips and +block PRs only when there are major issues. For the most common +issues in pull requests, take a look at Common Mistakes.

      • +
      • Once a pull request is accepted and CI is passing, there is +nothing else you need to do; we will merge the PR for you.

      • +
      +
    • +
    +
    +
    +

    Getting Started

    +
    +

    Proposing new features

    +

    New feature ideas are best discussed on a specific issue. Please include +as much information as you can, any accompanying data, and your proposed +solution. The PyTorch team and community frequently reviews new issues +and comments where they think they can help. If you feel confident in +your solution, go ahead and implement it.

    +
    +
    +

    Reporting Issues

    +

    If you’ve identified an issue, first search through the list of +existing issues on the +repo. If you are unable to find a similar issue, then create a new one. +Supply as much information you can to reproduce the problematic +behavior. Also, include any additional insights like the behavior you +expect.

    +
    +
    +

    Implementing Features or Fixing Bugs

    +

    If you want to fix a specific issue, it’s best to comment on the +individual issue with your intent. However, we do not lock or assign +issues except in cases where we have worked with the developer before. +It’s best to strike up a conversation on the issue and discuss your +proposed solution. The PyTorch team can provide guidance that saves you +time.

    +

    Issues that are labeled first-new-issue, low, or medium priority provide +the best entrance point are great places to start.

    +
    +
    +

    Adding Tutorials

    +

    A great deal of the tutorials on pytorch.org +come from the community itself and we welcome additional contributions. +To learn more about how to contribute a new tutorial you can learn more +here: PyTorch.org Tutorial Contribution Guide on +Github

    +
    +
    +

    Improving Documentation & Tutorials

    +

    We aim to produce high quality documentation and tutorials. On rare +occasions that content includes typos or bugs. If you find something you +can fix, send us a pull request for consideration.

    +

    Take a look at the Documentation section to learn how our system +works.

    +
    +
    +

    Participating in online discussions

    +

    You can find active discussions happening on the PyTorch Discussion +forum.

    +
    +
    +

    Submitting pull requests to fix open issues

    +

    You can view a list of all open issues +here. Commenting on an +issue is a great way to get the attention of the team. From here you can +share your ideas and how you plan to resolve the issue.

    +

    For more challenging issues, the team will provide feedback and +direction for how to best solve the issue.

    +

    If you’re not able to fix the issue itself, commenting and sharing +whether you can reproduce the issue can be useful for helping the team +identify problem areas.

    +
    +
    +

    Reviewing open pull requests

    +

    We appreciate your help reviewing and commenting on pull requests. Our +team strives to keep the number of open pull requests at a manageable +size, we respond quickly for more information if we need it, and we +merge PRs that we think are useful. However, due to the high level of +interest, additional eyes on pull requests is appreciated.

    +
    +
    +

    Improving code readability

    +

    Improve code readability helps everyone. It is often better to submit a +small number of pull requests that touch few files versus a large pull +request that touches many files. Starting a discussion in the PyTorch +forum here or on an issue related to +your improvement is the best way to get started.

    +
    +
    +

    Adding test cases to make the codebase more robust

    +

    Additional test coverage is appreciated.

    +
    +
    +

    Promoting PyTorch

    +

    Your use of PyTorch in your projects, research papers, write ups, blogs, +or general discussions around the internet helps to raise awareness for +PyTorch and our growing community. Please reach out to +pytorch-marketing@fb.com +for marketing support.

    +
    +
    +

    Triaging issues

    +

    If you feel that an issue could benefit from a particular tag or level +of complexity comment on the issue and share your opinion. If an you +feel an issue isn’t categorized properly comment and let the team know.

    +
    +
    +
    +

    About open source development

    +

    If this is your first time contributing to an open source project, some +aspects of the development process may seem unusual to you.

    +
      +
    • There is no way to “claim” issues. People often want to “claim” +an issue when they decide to work on it, to ensure that there isn’t +wasted work when someone else ends up working on it. This doesn’t +really work too well in open source, since someone may decide to work +on something, and end up not having time to do it. Feel free to give +information in an advisory fashion, but at the end of the day, we +will take running code and rough consensus.

    • +
    • There is a high bar for new functionality that is added. Unlike +in a corporate environment, where the person who wrote code +implicitly “owns” it and can be expected to take care of it in the +beginning of its lifetime, once a pull request is merged into an open +source project, it immediately becomes the collective responsibility +of all maintainers on the project. When we merge code, we are saying +that we, the maintainers, are able to review subsequent changes and +make a bugfix to the code. This naturally leads to a higher standard +of contribution.

    • +
    +
    +
    +

    Common Mistakes To Avoid

    +
      +
    • Did you add tests? (Or if the change is hard to test, did you +describe how you tested your change?)

      +
        +
      • We have a few motivations for why we ask for tests:

        +
          +
        1. to help us tell if we break it later

        2. +
        3. to help us tell if the patch is correct in the first place +(yes, we did review it, but as Knuth says, “beware of the +following code, for I have not run it, merely proven it +correct”)

        4. +
        +
      • +
      • When is it OK not to add a test? Sometimes a change can’t be +conveniently tested, or the change is so obviously correct (and +unlikely to be broken) that it’s OK not to test it. On the +contrary, if a change is seems likely (or is known to be likely) +to be accidentally broken, it’s important to put in the time to +work out a testing strategy.

      • +
      +
    • +
    • Is your PR too long?

      +
        +
      • It’s easier for us to review and merge small PRs. Difficulty of +reviewing a PR scales nonlinearly with its size.

      • +
      • When is it OK to submit a large PR? It helps a lot if there was a +corresponding design discussion in an issue, with sign off from +the people who are going to review your diff. We can also help +give advice about how to split up a large change into individually +shippable parts. Similarly, it helps if there is a complete +description of the contents of the PR: it’s easier to review code +if we know what’s inside!

      • +
      +
    • +
    • Comments for subtle things? In cases where behavior of your code +is nuanced, please include extra comments and documentation to allow +us to better understand the intention of your code.

    • +
    • Did you add a hack? Sometimes a hack is the right answer. But +usually we will have to discuss it.

    • +
    • Do you want to touch a very core component? In order to prevent +major regressions, pull requests that touch core components receive +extra scrutiny. Make sure you’ve discussed your changes with the team +before undertaking major changes.

    • +
    • Want to add a new feature? If you want to add new features, +comment your intention on the related issue. Our team tries to +comment on and provide feedback to the community. It’s better to have +an open discussion with the team and the rest of the community prior +to building new features. This helps us stay aware of what you’re +working on and increases the chance that it’ll be merged.

    • +
    • Did you touch unrelated code to the PR? To aid in code review, +please only include files in your pull request that are directly +related to your changes.

    • +
    +

    Frequently asked questions

    +
      +
    • How can I contribute as a reviewer? There is lots of value if +community developer reproduce issues, try out new functionality, or +otherwise help us identify or troubleshoot issues. Commenting on +tasks or pull requests with your enviroment details is helpful and +appreciated.

    • +
    • CI tests failed, what does it mean? Maybe you need to merge with +master or rebase with latest changes. Pushing your changes should +re-trigger CI tests. If the tests persist, you’ll want to trace +through the error messages and resolve the related issues.

    • +
    • What are the most high risk changes? Anything that touches build +configuration is an risky area. Please avoid changing these unless +you’ve had a discussion with the team beforehand.

    • +
    • Hey, a commit showed up on my branch, what’s up with that? +Sometimes another community member will provide a patch or fix to +your pull request or branch. This is often needed for getting CI tests +to pass.

    • +
    +
    +
    +

    On Documentation

    +
    +

    Python Docs

    +

    PyTorch documentation is generated from python source using +Sphinx. Generated HTML is +copied to the docs folder in the master branch of +pytorch.github.io, +and is served via GitHub pages.

    + +
    +
    +

    C++ Docs

    +

    For C++ code we use Doxygen to generate the content files. The C++ docs +are built on a special server and the resulting files are copied to the +https://github.com/pytorch/cppdocs repo, and are served from GitHub +pages.

    + +
    +
    +
    +

    Tutorials

    +

    PyTorch tutorials are documents used to help understand using PyTorch to +accomplish specific tasks or to understand more holistic concepts. +Tutorials are built using +Sphinx-Gallery +from executable python sources files, or from restructured-text (rst) +files.

    + +
    +

    Tutorials Build Overview

    +

    For tutorials, pull +requests trigger a +rebuild the entire site using CircleCI to test the effects of the +change. This build is sharded into 9 worker builds and takes around 40 +minutes total. At the same time, we do a Netlify build using make +html-noplot, which builds the site without rendering the notebook +output into pages for quick review.

    +

    After a PR is accepted, the site is rebuilt and deployed from CircleCI.

    +
    +
    +

    Contributing a new Tutorial

    +

    PyTorch.org Tutorial Contribution +Guide

    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/community/governance.html b/docs/stable/community/governance.html new file mode 100644 index 000000000000..15c0863ae6bc --- /dev/null +++ b/docs/stable/community/governance.html @@ -0,0 +1,684 @@ + + + + + + + + + + + + PyTorch Governance — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • PyTorch Governance
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    PyTorch Governance

    +
    +

    Governance Philosophy and Guiding Tenets

    +

    PyTorch adopts a governance structure with a small set of maintainers +driving the overall project direction with a strong bias towards +PyTorch’s design philosophy where design and code contributions are +valued. Beyond the core maintainers, there is also a slightly broader +set of core developers that have the ability to directly merge pull +requests and own various parts of the core code base.

    +

    Beyond the maintainers and core devs, the community is encouraged to +contribute, file issues, make proposals, review pull requests and be +present in the community. Given contributions and willingness to +invest, anyone can be provided write access or ownership of parts of +the codebase.

    +

    Based on this governance structure, the project has the following core +operating tenets by which decisions are made and overall culture is +derived:

    +
      +
    1. Code contributions matter much more than corporate sponsorship +and independent developers are highly valued.

    2. +
    3. Project influence is gained through contributions (whether PRs, +forum answers, code reviews or otherwise)

    4. +
    +
    +
    +

    Key people and their functions

    +
    +

    Project Maintainers

    +

    Project maintainers provide leadership and direction for the PyTorch +project. Specifics include:

    +
      +
    • Articulate a cohesive long-term vision for the project

    • +
    • Possess a deep understanding of the PyTorch code base

    • +
    • Negotiate and resolve contentious issues in ways acceptable to all +parties involved

    • +
    +

    PyTorch Maintainers:

    + +
    +
    +

    Core Developers

    +

    The PyTorch project is developed by a team of core developers. You can +find the list of core developers at PyTorch Governance | Persons of +Interest.

    +

    While membership is determined by presence in the “PyTorch core” team in +the “PyTorch” +organization on +GitHub, contribution takes many forms:

    +
      +
    • committing changes to the repository;

    • +
    • reviewing pull requests by others;

    • +
    • triaging bug reports on the issue tracker;

    • +
    • discussing topics on official PyTorch communication channels.

    • +
    +
    +
    +

    Moderators

    +

    There is a group of people, some of which are not core developers, +responsible for ensuring that discussions on official communication +channels adhere to the Code of Conduct. They take action in view of +violations and help to support a healthy community. You can find the +list of moderators here.

    +
    +
    +
    +

    Decision Making

    +
    +

    Uncontroversial Changes

    +

    Primary work happens through bug tracker issues and pull requests on +GitHub. Core developers should avoid pushing their changes directly to +the PyTorch repository, instead relying on pull requests. Approving a +pull request by a core developer allows it to be merged without further +process. Core Developers and Project Maintainers ultimately approve +these changes.

    +

    Notifying relevant experts about a bug tracker issue or a pull request +is important. Reviews from experts in the given interest area are +strongly preferred, especially on pull request approvals. Failure to do +so might end up with the change being reverted by the relevant expert.

    +
    +
    +

    Controversial decision process

    +

    Substantial changes in a given interest area require a GitHub issue to +be opened for discussion. This includes:

    +
      +
    • Any semantic or syntactic change to the framework.

    • +
    • Backwards-incompatible changes to the Python or Cpp API.

    • +
    • Additions to the core framework, including substantial new +functionality within an existing library.

    • +
    • Removing core features

    • +
    +

    Project Maintainers ultimately approve these changes.

    +
    +
    +
    +

    FAQ

    +

    Q: What if I would like to own (or partly own) a part of the project +such as a domain api (i.e. Torch Vision)? This is absolutely possible. +The first step is to start contributing to the existing project area and +contributing to its health and success. In addition to this, you can +make a proposal through a GitHub issue for new functionality or changes +to improve the project area.

    +

    Q: What if I am a company looking to use PyTorch internally for +development, can I be granted or purchase a board seat to drive the +project direction? No, the PyTorch project is strictly driven by the +maintainer-driven project philosophy and does not have a board or +vehicle to take financial contributions relating to gaining influence +over technical direction.

    +

    Q: Does the PyTorch project support grants or ways to support +independent developers using or contributing to the project? No, not +at this point. We are however looking at ways to better support the +community of independent developers around PyTorch. If you have +suggestions or inputs, please reach out on the PyTorch forums to +discuss.

    +

    Q: How do I contribute code to the project? If the change is +relatively minor, a pull request on GitHub can be opened up immediately +for review and merge by the project committers. For larger changes, +please open an issue to make a proposal to discuss prior. Please also +see the PyTorch Contributor +Guide for contribution +guidelines.

    +

    Q: Can I become a committer on the project? Unfortunately, the +current commit process to PyTorch involves an interaction with Facebook +infrastructure that can only be triggered by Facebook employees. We are +however looking at ways to expand the committer base to individuals +outside of Facebook and will provide an update when the tooling exists +to allow this.

    +

    Q: What if i would like to deliver a PyTorch tutorial at a conference +or otherwise? Do I need to be ‘officially’ a committer to do this? No, +we encourage community members to showcase their work wherever and +whenever they can. Please reach out to +pytorch-marketing@fb.com +for marketing support.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/community/persons_of_interest.html b/docs/stable/community/persons_of_interest.html new file mode 100644 index 000000000000..69e2c9722c51 --- /dev/null +++ b/docs/stable/community/persons_of_interest.html @@ -0,0 +1,699 @@ + + + + + + + + + + + + PyTorch Governance | Persons of Interest — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • PyTorch Governance | Persons of Interest
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    PyTorch Governance | Persons of Interest

    +
    +

    General Maintainers

    + +
    +
    +

    Module-level maintainers

    +
    +

    JIT

    + +
    +
    +

    Distributed

    + +
    +
    +

    Autograd Engine

    + +
    +
    +

    Multiprocessing and DataLoaders

    + +
    +
    +

    CUDA

    + +
    +
    +

    C++

    + +
    +
    +

    Build + CI

    +
      +
    • Will Feng (yf225)

    • +
    • Edward Yang (ezyang)

    • +
    • Jesse Hellemn (pjh5)

    • +
    • Soumith Chintala (soumith)

    • +
    • (sunsetting) Orion Reblitz-Richardson +(orionr)

    • +
    +
    +
    +

    Distributions & RNG

    + +
    +
    +

    C10

    + +
    +
    +

    ONNX <-> PyTorch

    + +
    +
    +

    torch.nn

    + +
    +
    +

    CPU Performance / SIMD

    + +
    +
    +

    AMD/ROCm/HIP

    + +
    +
    +

    Windows

    + +
    +
    +

    MKLDNN

    + +
    +
    +

    XLA

    + +
    +
    +

    PPC

    + +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cpp_extension.html b/docs/stable/cpp_extension.html new file mode 100644 index 000000000000..10ea2fae8417 --- /dev/null +++ b/docs/stable/cpp_extension.html @@ -0,0 +1,770 @@ + + + + + + + + + + + + torch.utils.cpp_extension — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.cpp_extension
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.cpp_extension

    +
    +
    +torch.utils.cpp_extension.CppExtension(name, sources, *args, **kwargs)[source]
    +

    Creates a setuptools.Extension for C++.

    +

    Convenience method that creates a setuptools.Extension with the +bare minimum (but often sufficient) arguments to build a C++ extension.

    +

    All arguments are forwarded to the setuptools.Extension +constructor.

    +

    Example

    +
    >>> from setuptools import setup
    +>>> from torch.utils.cpp_extension import BuildExtension, CppExtension
    +>>> setup(
    +        name='extension',
    +        ext_modules=[
    +            CppExtension(
    +                name='extension',
    +                sources=['extension.cpp'],
    +                extra_compile_args=['-g']),
    +        ],
    +        cmdclass={
    +            'build_ext': BuildExtension
    +        })
    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.CUDAExtension(name, sources, *args, **kwargs)[source]
    +

    Creates a setuptools.Extension for CUDA/C++.

    +

    Convenience method that creates a setuptools.Extension with the +bare minimum (but often sufficient) arguments to build a CUDA/C++ +extension. This includes the CUDA include path, library path and runtime +library.

    +

    All arguments are forwarded to the setuptools.Extension +constructor.

    +

    Example

    +
    >>> from setuptools import setup
    +>>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension
    +>>> setup(
    +        name='cuda_extension',
    +        ext_modules=[
    +            CUDAExtension(
    +                    name='cuda_extension',
    +                    sources=['extension.cpp', 'extension_kernel.cu'],
    +                    extra_compile_args={'cxx': ['-g'],
    +                                        'nvcc': ['-O2']})
    +        ],
    +        cmdclass={
    +            'build_ext': BuildExtension
    +        })
    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.BuildExtension(*args, **kwargs)[source]
    +

    A custom setuptools build extension .

    +

    This setuptools.build_ext subclass takes care of passing the +minimum required compiler flags (e.g. -std=c++11) as well as mixed +C++/CUDA compilation (and support for CUDA files in general).

    +

    When using BuildExtension, it is allowed to supply a dictionary +for extra_compile_args (rather than the usual list) that maps from +languages (cxx or cuda) to a list of additional compiler flags to +supply to the compiler. This makes it possible to supply different flags to +the C++ and CUDA compiler during mixed compilation.

    +
    + +
    +
    +torch.utils.cpp_extension.load(name, sources, extra_cflags=None, extra_cuda_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda=None, is_python_module=True)[source]
    +

    Loads a PyTorch C++ extension just-in-time (JIT).

    +

    To load an extension, a Ninja build file is emitted, which is used to +compile the given sources into a dynamic library. This library is +subsequently loaded into the current Python process as a module and +returned from this function, ready for use.

    +

    By default, the directory to which the build file is emitted and the +resulting library compiled to is <tmp>/torch_extensions/<name>, where +<tmp> is the temporary folder on the current platform and <name> +the name of the extension. This location can be overridden in two ways. +First, if the TORCH_EXTENSIONS_DIR environment variable is set, it +replaces <tmp>/torch_extensions and all extensions will be compiled +into subfolders of this directory. Second, if the build_directory +argument to this function is supplied, it overrides the entire path, i.e. +the library will be compiled into that folder directly.

    +

    To compile the sources, the default system compiler (c++) is used, +which can be overridden by setting the CXX environment variable. To pass +additional arguments to the compilation process, extra_cflags or +extra_ldflags can be provided. For example, to compile your extension +with optimizations, pass extra_cflags=['-O3']. You can also use +extra_cflags to pass further include directories.

    +

    CUDA support with mixed compilation is provided. Simply pass CUDA source +files (.cu or .cuh) along with other sources. Such files will be +detected and compiled with nvcc rather than the C++ compiler. This includes +passing the CUDA lib64 directory as a library directory, and linking +cudart. You can pass additional flags to nvcc via +extra_cuda_cflags, just like with extra_cflags for C++. Various +heuristics for finding the CUDA install directory are used, which usually +work fine. If not, setting the CUDA_HOME environment variable is the +safest option.

    +
    +
    Parameters
    +
      +
    • name – The name of the extension to build. This MUST be the same as the +name of the pybind11 module!

    • +
    • sources – A list of relative or absolute paths to C++ source files.

    • +
    • extra_cflags – optional list of compiler flags to forward to the build.

    • +
    • extra_cuda_cflags – optional list of compiler flags to forward to nvcc +when building CUDA sources.

    • +
    • extra_ldflags – optional list of linker flags to forward to the build.

    • +
    • extra_include_paths – optional list of include directories to forward +to the build.

    • +
    • build_directory – optional path to use as build workspace.

    • +
    • verbose – If True, turns on verbose logging of load steps.

    • +
    • with_cuda – Determines whether CUDA headers and libraries are added to +the build. If set to None (default), this value is +automatically determined based on the existence of .cu or +.cuh in sources. Set it to True` to force CUDA headers +and libraries to be included.

    • +
    • is_python_module – If True (default), imports the produced shared +library as a Python module. If False, loads it into the process +as a plain dynamic library.

    • +
    +
    +
    Returns
    +

    If is_python_module is True, returns the loaded PyTorch +extension as a Python module. If is_python_module is False +returns nothing (the shared library is loaded into the process as a side +effect).

    +
    +
    +

    Example

    +
    >>> from torch.utils.cpp_extension import load
    +>>> module = load(
    +        name='extension',
    +        sources=['extension.cpp', 'extension_kernel.cu'],
    +        extra_cflags=['-O2'],
    +        verbose=True)
    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.load_inline(name, cpp_sources, cuda_sources=None, functions=None, extra_cflags=None, extra_cuda_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda=None, is_python_module=True)[source]
    +

    Loads a PyTorch C++ extension just-in-time (JIT) from string sources.

    +

    This function behaves exactly like load(), but takes its sources as +strings rather than filenames. These strings are stored to files in the +build directory, after which the behavior of load_inline() is +identical to load().

    +

    See the +tests +for good examples of using this function.

    +

    Sources may omit two required parts of a typical non-inline C++ extension: +the necessary header includes, as well as the (pybind11) binding code. More +precisely, strings passed to cpp_sources are first concatenated into a +single .cpp file. This file is then prepended with #include +<torch/extension.h>.

    +

    Furthermore, if the functions argument is supplied, bindings will be +automatically generated for each function specified. functions can +either be a list of function names, or a dictionary mapping from function +names to docstrings. If a list is given, the name of each function is used +as its docstring.

    +

    The sources in cuda_sources are concatenated into a separate .cu +file and prepended with torch/types.h, cuda.h and +cuda_runtime.h includes. The .cpp and .cu files are compiled +separately, but ultimately linked into a single library. Note that no +bindings are generated for functions in cuda_sources per se. To bind +to a CUDA kernel, you must create a C++ function that calls it, and either +declare or define this C++ function in one of the cpp_sources (and +include its name in functions).

    +

    See load() for a description of arguments omitted below.

    +
    +
    Parameters
    +
      +
    • cpp_sources – A string, or list of strings, containing C++ source code.

    • +
    • cuda_sources – A string, or list of strings, containing CUDA source code.

    • +
    • functions – A list of function names for which to generate function +bindings. If a dictionary is given, it should map function names to +docstrings (which are otherwise just the function names).

    • +
    • with_cuda – Determines whether CUDA headers and libraries are added to +the build. If set to None (default), this value is +automatically determined based on whether cuda_sources is +provided. Set it to True` to force CUDA headers +and libraries to be included.

    • +
    +
    +
    +

    Example

    +
    >>> from torch.utils.cpp_extension import load_inline
    +>>> source = '''
    +at::Tensor sin_add(at::Tensor x, at::Tensor y) {
    +  return x.sin() + y.sin();
    +}
    +'''
    +>>> module = load_inline(name='inline_extension',
    +                         cpp_sources=[source],
    +                         functions=['sin_add'])
    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.include_paths(cuda=False)[source]
    +

    Get the include paths required to build a C++ or CUDA extension.

    +
    +
    Parameters
    +

    cuda – If True, includes CUDA-specific include paths.

    +
    +
    Returns
    +

    A list of include path strings.

    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.check_compiler_abi_compatibility(compiler)[source]
    +

    Verifies that the given compiler is ABI-compatible with PyTorch.

    +
    +
    Parameters
    +

    compiler (str) – The compiler executable name to check (e.g. g++). +Must be executable in a shell process.

    +
    +
    Returns
    +

    False if the compiler is (likely) ABI-incompatible with PyTorch, +else True.

    +
    +
    +
    + +
    +
    +torch.utils.cpp_extension.verify_ninja_availability()[source]
    +

    Returns True if the ninja build system is +available on the system.

    +
    + +
    + + +
    + +
    + + +
    +
    + +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cuda.html b/docs/stable/cuda.html new file mode 100644 index 000000000000..b391c6b65eed --- /dev/null +++ b/docs/stable/cuda.html @@ -0,0 +1,1453 @@ + + + + + + + + + + + + torch.cuda — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.cuda

    +

    This package adds support for CUDA tensor types, that implement the same +function as CPU tensors, but they utilize GPUs for computation.

    +

    It is lazily initialized, so you can always import it, and use +is_available() to determine if your system supports CUDA.

    +

    CUDA semantics has more details about working with CUDA.

    +
    +
    +torch.cuda.current_blas_handle()[source]
    +

    Returns cublasHandle_t pointer to current cuBLAS handle

    +
    + +
    +
    +torch.cuda.current_device()[source]
    +

    Returns the index of a currently selected device.

    +
    + +
    +
    +torch.cuda.current_stream(device=None)[source]
    +

    Returns the currently selected Stream for a given device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +the currently selected Stream for the current device, given +by current_device(), if device is None +(default).

    +
    +
    +
    + +
    +
    +torch.cuda.default_stream(device=None)[source]
    +

    Returns the default Stream for a given device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +the default Stream for the current device, given by +current_device(), if device is None +(default).

    +
    +
    +
    + +
    +
    +class torch.cuda.device(device)[source]
    +

    Context-manager that changes the selected device.

    +
    +
    Parameters
    +

    device (torch.device or int) – device index to select. It’s a no-op if +this argument is a negative integer or None.

    +
    +
    +
    + +
    +
    +torch.cuda.device_count()[source]
    +

    Returns the number of GPUs available.

    +
    + +
    +
    +class torch.cuda.device_of(obj)[source]
    +

    Context-manager that changes the current device to that of given object.

    +

    You can use both tensors and storages as arguments. If a given object is +not allocated on a GPU, this is a no-op.

    +
    +
    Parameters
    +

    obj (Tensor or Storage) – object allocated on the selected device.

    +
    +
    +
    + +
    +
    +torch.cuda.empty_cache()[source]
    +

    Releases all unoccupied cached memory currently held by the caching +allocator so that those can be used in other GPU application and visible in +nvidia-smi.

    +
    +

    Note

    +

    empty_cache() doesn’t increase the amount of GPU +memory available for PyTorch. See Memory management for +more details about GPU memory management.

    +
    +
    + +
    +
    +torch.cuda.get_device_capability(device=None)[source]
    +

    Gets the cuda capability of a device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – device for which to return the +device capability. This function is a no-op if this argument is +a negative integer. It uses the current device, given by +current_device(), if device is None +(default).

    +
    +
    Returns
    +

    the major and minor cuda capability of the device

    +
    +
    Return type
    +

    tuple(int, int)

    +
    +
    +
    + +
    +
    +torch.cuda.get_device_name(device=None)[source]
    +

    Gets the name of a device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – device for which to return the +name. This function is a no-op if this argument is a negative +integer. It uses the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    + +
    +
    +torch.cuda.init()[source]
    +

    Initialize PyTorch’s CUDA state. You may need to call +this explicitly if you are interacting with PyTorch via +its C API, as Python bindings for CUDA functionality will not +be until this initialization takes place. Ordinary users +should not need this, as all of PyTorch’s CUDA methods +automatically initialize CUDA state on-demand.

    +

    Does nothing if the CUDA state is already initialized.

    +
    + +
    +
    +torch.cuda.ipc_collect()[source]
    +

    Force collects GPU memory after it has been released by CUDA IPC.

    +
    +

    Note

    +

    Checks if any sent CUDA tensors could be cleaned from the memory. Force +closes shared memory file used for reference counting if there is no +active counters. Useful when the producer process stopped actively sending +tensors and want to release unused memory.

    +
    +
    + +
    +
    +torch.cuda.is_available()[source]
    +

    Returns a bool indicating if CUDA is currently available.

    +
    + +
    +
    +torch.cuda.max_memory_allocated(device=None)[source]
    +

    Returns the maximum GPU memory occupied by tensors in bytes for a given +device.

    +

    By default, this returns the peak allocated memory since the beginning of +this program. reset_max_memory_allocated() can be used to +reset the starting point in tracking this metric. For example, these two +functions can measure the peak allocated memory usage of each iteration in a +training loop.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.max_memory_cached(device=None)[source]
    +

    Returns the maximum GPU memory managed by the caching allocator in bytes +for a given device.

    +

    By default, this returns the peak cached memory since the beginning of this +program. reset_max_memory_cached() can be used to reset +the starting point in tracking this metric. For example, these two functions +can measure the peak cached memory amount of each iteration in a training +loop.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.memory_allocated(device=None)[source]
    +

    Returns the current GPU memory occupied by tensors in bytes for a given +device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    This is likely less than the amount shown in nvidia-smi since some +unused memory can be held by the caching allocator and some context +needs to be created on GPU. See Memory management for more +details about GPU memory management.

    +
    +
    + +
    +
    +torch.cuda.memory_cached(device=None)[source]
    +

    Returns the current GPU memory managed by the caching allocator in bytes +for a given device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.reset_max_memory_allocated(device=None)[source]
    +

    Resets the starting point in tracking maximum GPU memory occupied by +tensors for a given device.

    +

    See max_memory_allocated() for details.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.reset_max_memory_cached(device=None)[source]
    +

    Resets the starting point in tracking maximum GPU memory managed by the +caching allocator for a given device.

    +

    See max_memory_cached() for details.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.set_device(device)[source]
    +

    Sets the current device.

    +

    Usage of this function is discouraged in favor of device. In most +cases it’s better to use CUDA_VISIBLE_DEVICES environmental variable.

    +
    +
    Parameters
    +

    device (torch.device or int) – selected device. This function is a no-op +if this argument is negative.

    +
    +
    +
    + +
    +
    +torch.cuda.stream(stream)[source]
    +

    Context-manager that selects a given stream.

    +

    All CUDA kernels queued within its context will be enqueued on a selected +stream.

    +
    +
    Parameters
    +

    stream (Stream) – selected stream. This manager is a no-op if it’s +None.

    +
    +
    +
    +

    Note

    +

    Streams are per-device. If the selected stream is not on the +current device, this function will also change the current device to +match the stream.

    +
    +
    + +
    +
    +torch.cuda.synchronize(device=None)[source]
    +

    Waits for all kernels in all streams on a CUDA device to complete.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – device for which to synchronize. +It uses the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    + +
    +

    Random Number Generator

    +
    +
    +torch.cuda.get_rng_state(device='cuda')[source]
    +

    Returns the random number generator state of the specified GPU as a ByteTensor.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – The device to return the RNG state of. +Default: 'cuda' (i.e., torch.device('cuda'), the current CUDA device).

    +
    +
    +
    +

    Warning

    +

    This function eagerly initializes CUDA.

    +
    +
    + +
    +
    +torch.cuda.get_rng_state_all()[source]
    +

    Returns a tuple of ByteTensor representing the random number states of all devices.

    +
    + +
    +
    +torch.cuda.set_rng_state(new_state, device='cuda')[source]
    +

    Sets the random number generator state of the specified GPU.

    +
    +
    Parameters
    +
      +
    • new_state (torch.ByteTensor) – The desired state

    • +
    • device (torch.device or int, optional) – The device to set the RNG state. +Default: 'cuda' (i.e., torch.device('cuda'), the current CUDA device).

    • +
    +
    +
    +
    + +
    +
    +torch.cuda.set_rng_state_all(new_states)[source]
    +

    Sets the random number generator state of all devices.

    +
    +
    Parameters
    +

    new_state (tuple of torch.ByteTensor) – The desired state for each device

    +
    +
    +
    + +
    +
    +torch.cuda.manual_seed(seed)[source]
    +

    Sets the seed for generating random numbers for the current GPU. +It’s safe to call this function if CUDA is not available; in that +case, it is silently ignored.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    +
    +

    Warning

    +

    If you are working with a multi-GPU model, this function is insufficient +to get determinism. To seed all GPUs, use manual_seed_all().

    +
    +
    + +
    +
    +torch.cuda.manual_seed_all(seed)[source]
    +

    Sets the seed for generating random numbers on all GPUs. +It’s safe to call this function if CUDA is not available; in that +case, it is silently ignored.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    +
    + +
    +
    +torch.cuda.seed()[source]
    +

    Sets the seed for generating random numbers to a random number for the current GPU. +It’s safe to call this function if CUDA is not available; in that +case, it is silently ignored.

    +
    +

    Warning

    +

    If you are working with a multi-GPU model, this function will only initialize +the seed on one GPU. To initialize all GPUs, use seed_all().

    +
    +
    + +
    +
    +torch.cuda.seed_all()[source]
    +

    Sets the seed for generating random numbers to a random number on all GPUs. +It’s safe to call this function if CUDA is not available; in that +case, it is silently ignored.

    +
    + +
    +
    +torch.cuda.initial_seed()[source]
    +

    Returns the current random seed of the current GPU.

    +
    +

    Warning

    +

    This function eagerly initializes CUDA.

    +
    +
    + +
    +
    +

    Communication collectives

    +
    +
    +torch.cuda.comm.broadcast(tensor, devices)[source]
    +

    Broadcasts a tensor to a number of GPUs.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – tensor to broadcast.

    • +
    • devices (Iterable) – an iterable of devices among which to broadcast. +Note that it should be like (src, dst1, dst2, …), the first element +of which is the source device to broadcast from.

    • +
    +
    +
    Returns
    +

    A tuple containing copies of the tensor, placed on devices +corresponding to indices from devices.

    +
    +
    +
    + +
    +
    +torch.cuda.comm.broadcast_coalesced(tensors, devices, buffer_size=10485760)[source]
    +

    Broadcasts a sequence tensors to the specified GPUs. +Small tensors are first coalesced into a buffer to reduce the number +of synchronizations.

    +
    +
    Parameters
    +
      +
    • tensors (sequence) – tensors to broadcast.

    • +
    • devices (Iterable) – an iterable of devices among which to broadcast. +Note that it should be like (src, dst1, dst2, …), the first element +of which is the source device to broadcast from.

    • +
    • buffer_size (int) – maximum size of the buffer used for coalescing

    • +
    +
    +
    Returns
    +

    A tuple containing copies of the tensor, placed on devices +corresponding to indices from devices.

    +
    +
    +
    + +
    +
    +torch.cuda.comm.reduce_add(inputs, destination=None)[source]
    +

    Sums tensors from multiple GPUs.

    +

    All inputs should have matching shapes.

    +
    +
    Parameters
    +
      +
    • inputs (Iterable[Tensor]) – an iterable of tensors to add.

    • +
    • destination (int, optional) – a device on which the output will be +placed (default: current device).

    • +
    +
    +
    Returns
    +

    A tensor containing an elementwise sum of all inputs, placed on the +destination device.

    +
    +
    +
    + +
    +
    +torch.cuda.comm.scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None)[source]
    +

    Scatters tensor across multiple GPUs.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – tensor to scatter.

    • +
    • devices (Iterable[int]) – iterable of ints, specifying among which +devices the tensor should be scattered.

    • +
    • chunk_sizes (Iterable[int], optional) – sizes of chunks to be placed on +each device. It should match devices in length and sum to +tensor.size(dim). If not specified, the tensor will be divided +into equal chunks.

    • +
    • dim (int, optional) – A dimension along which to chunk the tensor.

    • +
    +
    +
    Returns
    +

    A tuple containing chunks of the tensor, spread across given +devices.

    +
    +
    +
    + +
    +
    +torch.cuda.comm.gather(tensors, dim=0, destination=None)[source]
    +

    Gathers tensors from multiple GPUs.

    +

    Tensor sizes in all dimension different than dim have to match.

    +
    +
    Parameters
    +
      +
    • tensors (Iterable[Tensor]) – iterable of tensors to gather.

    • +
    • dim (int) – a dimension along which the tensors will be concatenated.

    • +
    • destination (int, optional) – output device (-1 means CPU, default: +current device)

    • +
    +
    +
    Returns
    +

    A tensor located on destination device, that is a result of +concatenating tensors along dim.

    +
    +
    +
    + +
    +
    +

    Streams and events

    +
    +
    +class torch.cuda.Stream[source]
    +

    Wrapper around a CUDA stream.

    +

    A CUDA stream is a linear sequence of execution that belongs to a specific +device, independent from other streams. See CUDA semantics for +details.

    +
    +
    Parameters
    +
      +
    • device (torch.device or int, optional) – a device on which to allocate +the stream. If device is None (default) or a negative +integer, this will use the current device.

    • +
    • priority (int, optional) – priority of the stream. Lower numbers +represent higher priorities.

    • +
    +
    +
    +
    +
    +query()[source]
    +

    Checks if all the work submitted has been completed.

    +
    +
    Returns
    +

    A boolean indicating if all kernels in this stream are completed.

    +
    +
    +
    + +
    +
    +record_event(event=None)[source]
    +

    Records an event.

    +
    +
    Parameters
    +

    event (Event, optional) – event to record. If not given, a new one +will be allocated.

    +
    +
    Returns
    +

    Recorded event.

    +
    +
    +
    + +
    +
    +synchronize()[source]
    +

    Wait for all the kernels in this stream to complete.

    +
    +

    Note

    +

    This is a wrapper around cudaStreamSynchronize(): see +`CUDA documentation`_ for more info.

    +
    +
    + +
    +
    +wait_event(event)[source]
    +

    Makes all future work submitted to the stream wait for an event.

    +
    +
    Parameters
    +

    event (Event) – an event to wait for.

    +
    +
    +
    +

    Note

    +

    This is a wrapper around cudaStreamWaitEvent(): see `CUDA +documentation`_ for more info.

    +

    This function returns without waiting for event: only future +operations are affected.

    +
    +
    + +
    +
    +wait_stream(stream)[source]
    +

    Synchronizes with another stream.

    +

    All future work submitted to this stream will wait until all kernels +submitted to a given stream at the time of call complete.

    +
    +
    Parameters
    +

    stream (Stream) – a stream to synchronize.

    +
    +
    +
    +

    Note

    +

    This function returns without waiting for currently enqueued +kernels in stream: only future operations are affected.

    +
    +
    + +
    + +
    +
    +class torch.cuda.Event[source]
    +

    Wrapper around a CUDA event.

    +

    CUDA events are synchronization markers that can be used to monitor the +device’s progress, to accurately measure timing, and to synchronize CUDA +streams.

    +

    The underlying CUDA events are lazily initialized when the event is first +recorded or exported to another process. After creation, only streams on the +same device may record the event. However, streams on any device can wait on +the event.

    +
    +
    Parameters
    +
      +
    • enable_timing (bool, optional) – indicates if the event should measure time +(default: False)

    • +
    • blocking (bool, optional) – if True, wait() will be blocking (default: False)

    • +
    • interprocess () – if True, the event can be shared between processes +(default: False)

    • +
    +
    +
    +
    +
    +elapsed_time(end_event)[source]
    +

    Returns the time elapsed in milliseconds after the event was +recorded and before the end_event was recorded.

    +
    + +
    +
    +classmethod from_ipc_handle(device, handle)[source]
    +

    Reconstruct an event from an IPC handle on the given device.

    +
    + +
    +
    +ipc_handle()[source]
    +

    Returns an IPC handle of this event. If not recorded yet, the event +will use the current device.

    +
    + +
    +
    +query()[source]
    +

    Checks if all work currently captured by event has completed.

    +
    +
    Returns
    +

    A boolean indicating if all work currently captured by event has +completed.

    +
    +
    +
    + +
    +
    +record(stream=None)[source]
    +

    Records the event in a given stream.

    +

    Uses torch.cuda.current_stream() if no stream is specified. The +stream’s device must match the event’s device.

    +
    + +
    +
    +synchronize()[source]
    +

    Waits for the event to complete.

    +

    Waits until the completion of all work currently captured in this event. +This prevents the CPU thread from proceeding until the event completes.

    +
    +
    +

    Note

    +

    This is a wrapper around cudaEventSynchronize(): see `CUDA +documentation`_ for more info.

    +
    +
    +
    + +
    +
    +wait(stream=None)[source]
    +

    Makes all future work submitted to the given stream wait for this +event.

    +

    Use torch.cuda.current_stream() if no stream is specified.

    +
    + +
    + +
    +
    +

    Memory management

    +
    +
    +torch.cuda.empty_cache()[source]
    +

    Releases all unoccupied cached memory currently held by the caching +allocator so that those can be used in other GPU application and visible in +nvidia-smi.

    +
    +

    Note

    +

    empty_cache() doesn’t increase the amount of GPU +memory available for PyTorch. See Memory management for +more details about GPU memory management.

    +
    +
    + +
    +
    +torch.cuda.memory_allocated(device=None)[source]
    +

    Returns the current GPU memory occupied by tensors in bytes for a given +device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    This is likely less than the amount shown in nvidia-smi since some +unused memory can be held by the caching allocator and some context +needs to be created on GPU. See Memory management for more +details about GPU memory management.

    +
    +
    + +
    +
    +torch.cuda.max_memory_allocated(device=None)[source]
    +

    Returns the maximum GPU memory occupied by tensors in bytes for a given +device.

    +

    By default, this returns the peak allocated memory since the beginning of +this program. reset_max_memory_allocated() can be used to +reset the starting point in tracking this metric. For example, these two +functions can measure the peak allocated memory usage of each iteration in a +training loop.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.reset_max_memory_allocated(device=None)[source]
    +

    Resets the starting point in tracking maximum GPU memory occupied by +tensors for a given device.

    +

    See max_memory_allocated() for details.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.memory_cached(device=None)[source]
    +

    Returns the current GPU memory managed by the caching allocator in bytes +for a given device.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.max_memory_cached(device=None)[source]
    +

    Returns the maximum GPU memory managed by the caching allocator in bytes +for a given device.

    +

    By default, this returns the peak cached memory since the beginning of this +program. reset_max_memory_cached() can be used to reset +the starting point in tracking this metric. For example, these two functions +can measure the peak cached memory amount of each iteration in a training +loop.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +torch.cuda.reset_max_memory_cached(device=None)[source]
    +

    Resets the starting point in tracking maximum GPU memory managed by the +caching allocator for a given device.

    +

    See max_memory_cached() for details.

    +
    +
    Parameters
    +

    device (torch.device or int, optional) – selected device. Returns +statistic for the current device, given by current_device(), +if device is None (default).

    +
    +
    +
    +

    Note

    +

    See Memory management for more details about GPU memory +management.

    +
    +
    + +
    +
    +

    NVIDIA Tools Extension (NVTX)

    +
    +
    +torch.cuda.nvtx.mark(msg)[source]
    +

    Describe an instantaneous event that occurred at some point.

    +
    +
    Parameters
    +

    msg (string) – ASCII message to associate with the event.

    +
    +
    +
    + +
    +
    +torch.cuda.nvtx.range_push(msg)[source]
    +

    Pushes a range onto a stack of nested range span. Returns zero-based +depth of the range that is started.

    +
    +
    Parameters
    +

    msg (string) – ASCII message to associate with range

    +
    +
    +
    + +
    +
    +torch.cuda.nvtx.range_pop()[source]
    +

    Pops a range off of a stack of nested range spans. Returns the +zero-based depth of the range that is ended.

    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cuda_deterministic.html b/docs/stable/cuda_deterministic.html new file mode 100644 index 000000000000..e90b7900154e --- /dev/null +++ b/docs/stable/cuda_deterministic.html @@ -0,0 +1,521 @@ + + + + + + + + + + + + <no title> — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    +
      +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cuda_deterministic_backward.html b/docs/stable/cuda_deterministic_backward.html new file mode 100644 index 000000000000..7c47dc840b6d --- /dev/null +++ b/docs/stable/cuda_deterministic_backward.html @@ -0,0 +1,521 @@ + + + + + + + + + + + + <no title> — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    +
      +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cudnn_deterministic.html b/docs/stable/cudnn_deterministic.html new file mode 100644 index 000000000000..90891abdb3e5 --- /dev/null +++ b/docs/stable/cudnn_deterministic.html @@ -0,0 +1,524 @@ + + + + + + + + + + + + <no title> — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    +
      +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/cudnn_persistent_rnn.html b/docs/stable/cudnn_persistent_rnn.html new file mode 100644 index 000000000000..c9a9920225fd --- /dev/null +++ b/docs/stable/cudnn_persistent_rnn.html @@ -0,0 +1,525 @@ + + + + + + + + + + + + <no title> — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Note

    +

    If the following conditions are satisfied: +1) cudnn is enabled, +2) input data is on the GPU +3) input data has dtype torch.float16 +4) V100 GPU is used, +5) input data is not in PackedSequence format +persistent algorithm can be selected to improve performance.

    +
    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    +
      +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/data.html b/docs/stable/data.html new file mode 100644 index 000000000000..fcda05deba1a --- /dev/null +++ b/docs/stable/data.html @@ -0,0 +1,1304 @@ + + + + + + + + + + + + torch.utils.data — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.data

    +

    At the heart of PyTorch data loading utility is the torch.utils.data.DataLoader +class. It represents a Python iterable over a dataset, with support for

    + +

    These options are configured by the constructor arguments of a +DataLoader, which has signature:

    +
    DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
    +           batch_sampler=None, num_workers=0, collate_fn=None,
    +           pin_memory=False, drop_last=False, timeout=0,
    +           worker_init_fn=None)
    +
    +
    +

    The sections below describe in details the effects and usages of these options.

    +
    +

    Dataset Types

    +

    The most important argument of DataLoader +constructor is dataset, which indicates a dataset object to load data +from. PyTorch supports two different types of datasets:

    + +
    +

    Map-style datasets

    +

    A map-style dataset is one that implements the __getitem__() and +__len__() protocols, and represents a map from (possibly non-integral) +indices/keys to data samples.

    +

    For example, such a dataset, when accessed with dataset[idx], could read +the idx-th image and its corresponding label from a folder on the disk.

    +

    See Dataset for more details.

    +
    +
    +

    Iterable-style datasets

    +

    An iterable-style dataset is an instance of a subclass of IterableDataset +that implements the __iter__() protocol, and represents an iterable over +data samples. This type of datasets is particularly suitable for cases where +random reads are expensive or even improbable, and where the batch size depends +on the fetched data.

    +

    For example, such a dataset, when called iter(dataset), could return a +stream of data reading from a database, a remote server, or even logs generated +in real time.

    +

    See IterableDataset for more details.

    +
    +

    Note

    +

    When using an IterableDataset with +multi-process data loading. The same +dataset object is replicated on each worker process, and thus the +replicas must be configured differently to avoid duplicated data. See +IterableDataset documentations for how to +achieve this.

    +
    +
    +
    +
    +

    Data Loading Order and Sampler

    +

    For iterable-style datasets, data loading order +is entirely controlled by the user-defined iterable. This allows easier +implementations of chunk-reading and dynamic batch size (e.g., by yielding a +batched sample at each time).

    +

    The rest of this section concerns the case with +map-style datasets. torch.utils.data.Sampler +classes are used to specify the sequence of indices/keys used in data loading. +They represent iterable objects over the indices to datasets. E.g., in the +common case with stochastic gradient decent (SGD), a +Sampler could randomly permute a list of indices +and yield each one at a time, or yield a small number of them for mini-batch +SGD.

    +

    A sequential or shuffled sampler will be automatically constructed based on the shuffle argument to a DataLoader. +Alternatively, users may use the sampler argument to specify a +custom Sampler object that at each time yields +the next index/key to fetch.

    +

    A custom Sampler that yields a list of batch +indices at a time can be passed as the batch_sampler argument. +Automatic batching can also be enabled via batch_size and +drop_last arguments. See +the next section for more details +on this.

    +
    +

    Note

    +

    Neither sampler nor batch_sampler is compatible with +iterable-style datasets, since such datasets have no notion of a key or an +index.

    +
    +
    +
    +

    Loading Batched and Non-Batched Data

    +

    DataLoader supports automatically collating +individual fetched data samples into batches via arguments +batch_size, drop_last, and batch_sampler.

    +
    +

    Automatic batching (default)

    +

    This is the most common case, and corresponds to fetching a minibatch of +data and collating them into batched samples, i.e., containing Tensors with +one dimension being the batch dimension (usually the first).

    +

    When batch_size (default 1) is not None, the data loader yields +batched samples instead of individual samples. batch_size and +drop_last arguments are used to specify how the data loader obtains +batches of dataset keys. For map-style datasets, users can alternatively +specify batch_sampler, which yields a list of keys at a time.

    +
    +

    Note

    +

    The batch_size and drop_last arguments essentially are used +to construct a batch_sampler from sampler. For map-style +datasets, the sampler is either provided by user or constructed +based on the shuffle argument. For iterable-style datasets, the +sampler is a dummy infinite one. See +this section on more details on +samplers.

    +
    +
    +

    Note

    +

    When fetching from +iterable-style datasets with +multi-processing, the drop_last +argument drops the last non-full batch of each worker’s dataset replica.

    +
    +

    After fetching a list of samples using the indices from sampler, the function +passed as the collate_fn argument is used to collate lists of samples +into batches.

    +

    In this case, loading from a map-style dataset is roughly equivalent with:

    +
    for indices in batch_sampler:
    +    yield collate_fn([dataset[i] for i in indices])
    +
    +
    +

    and loading from an iterable-style dataset is roughly equivalent with:

    +
    dataset_iter = iter(dataset)
    +for indices in batch_sampler:
    +    yield collate_fn([next(dataset_iter) for _ in indices])
    +
    +
    +

    A custom collate_fn can be used to customize collation, e.g., padding +sequential data to max length of a batch. See +this section on more about collate_fn.

    +
    +
    +

    Disable automatic batching

    +

    In certain cases, users may want to handle batching manually in dataset code, +or simply load individual samples. For example, it could cheaper to directly +load batched data (e.g., bulk reads from a database or reading continuous +chunks of memory), or the batch size is data dependent, or the program is +designed to work on individual samples. Under these scenarios, it’s likely +better to not use automatic batching (where collate_fn is used to +collate the samples), but let the data loader directly return each member of +the dataset object.

    +

    When both batch_size and batch_sampler are None, automatic +batching is disabled. Each sample obtained from the dataset is +processed with the function passed as the collate_fn argument.

    +

    When automatic batching is disabled, the default collate_fn simply +converts NumPy arrays into PyTorch Tensors, and keeps everything else untouched.

    +

    In this case, loading from a map-style dataset is roughly equivalent with:

    +
    for index in sampler:
    +    yield collate_fn(dataset[index])
    +
    +
    +

    and loading from an iterable-style dataset is roughly equivalent with:

    +
    for data in iter(dataset):
    +    yield collate_fn(data)
    +
    +
    +

    See this section on more about collate_fn.

    +
    +
    +

    Working with collate_fn

    +

    The use of collate_fn is slightly different when automatic batching is +enabled or disabled.

    +

    When automatic batching is disabled, collate_fn is called with +each individual data sample, and the output is yielded from the data loader +iterator. In this case, the default collate_fn simply converts NumPy +arrays in PyTorch tensors.

    +

    When automatic batching is enabled, collate_fn is called with a list +of data samples at each time. It is expected to collate the input samples into +a batch for yielding from the data loader iterator. The rest of this section +describes behavior of the default collate_fn in this case.

    +

    For instance, if each data sample consists of a 3-channel image and an integral +class label, i.e., each element of the dataset returns a tuple +(image, class_index), the default collate_fn collates a list of +such tuples into a single tuple of a batched image tensor and a batched class +label Tensor. In particular, the default collate_fn has the following +properties:

    +
      +
    • It always prepends a new dimension as the batch dimension.

    • +
    • It automatically converts NumPy arrays and Python numerical values into +PyTorch Tensors.

    • +
    • It preserves the data structure, e.g., if each sample is a dictionary, it +outputs a dictionary with the same set of keys but batched Tensors as values +(or lists if the values can not be converted into Tensors). Same +for list s, tuple s, namedtuple s, etc.

    • +
    +

    Users may use customized collate_fn to achieve custom batching, e.g., +collating along a dimension other than the first, padding sequences of +various lengths, or adding support for custom data types.

    +
    +
    +
    +

    Single- and Multi-process Data Loading

    +

    A DataLoader uses single-process data loading by +default.

    +

    Within a Python process, the +Global Interpreter Lock (GIL) +prevents true fully parallelizing Python code across threads. To avoid blocking +computation code with data loading, PyTorch provides an easy switch to perform +multi-process data loading by simply setting the argument num_workers +to a positive integer.

    +
    +

    Single-process data loading (default)

    +

    In this mode, data fetching is done in the same process a +DataLoader is initialized. Therefore, data loading +may block computing. However, this mode may be preferred when resource(s) used +for sharing data among processes (e.g., shared memory, file descriptors) is +limited, or when the entire dataset is small and can be loaded entirely in +memory. Additionally, single-process loading often shows more readable error +traces and thus is useful for debugging.

    +
    +
    +

    Multi-process data loading

    +

    Setting the argument num_workers as a positive integer will +turn on multi-process data loading with the specified number of loader worker +processes.

    +

    In this mode, each time an iterator of a DataLoader +is created (e.g., when you call enumerate(dataloader)), num_workers +worker processes are created. At this point, the dataset, +collate_fn, and worker_init_fn are passed to each +worker, where they are used to initialize, and fetch data. This means that +dataset access together with its internal IO, transforms +(including collate_fn) runs in the worker process.

    +

    torch.utils.data.get_worker_info() returns various useful information +in a worker process (including the worker id, dataset replica, initial seed, +etc.), and returns None in main process. Users may use this function in +dataset code and/or worker_init_fn to individually configure each +dataset replica, and to determine whether the code is running in a worker +process. For example, this can be particularly helpful in sharding the dataset.

    +

    For map-style datasets, the main process generates the indices using +sampler and sends them to the workers. So any shuffle randomization is +done in the main process which guides loading by assigning indices to load.

    +

    For iterable-style datasets, since each worker process gets a replica of the +dataset object, naive multi-process loading will often result in +duplicated data. Using torch.utils.data.get_worker_info() and/or +worker_init_fn, users may configure each replica independently. (See +IterableDataset documentations for how to achieve +this. ) For similar reasons, in multi-process loading, the drop_last +argument drops the last non-full batch of each worker’s iterable-style dataset +replica.

    +

    Workers are shut down once the end of the iteration is reached, or when the +iterator becomes garbage collected.

    +
    +

    Warning

    +

    It is generally not recommended to return CUDA tensors in multi-process +loading because of many subtleties in using CUDA and sharing CUDA tensors in +multiprocessing (see CUDA in multiprocessing). Instead, we recommend +using automatic memory pinning (i.e., setting +pin_memory=True), which enables fast data transfer to CUDA-enabled +GPUs.

    +
    +
    +

    Platform-specific behaviors

    +

    Since workers rely on Python multiprocessing, worker launch behavior is +different on Windows compared to Unix.

    +
      +
    • On Unix, fork() is the default multiprocessing start method. +Using fork(), child workers typically can access the dataset and +Python argument functions directly through the cloned address space.

    • +
    • On Windows, spawn() is the default multiprocessing start method. +Using spawn(), another interpreter is launched which runs your main script, +followed by the internal worker function that receives the dataset, +collate_fn and other arguments through pickle serialization.

    • +
    +

    This separate serialization means that you should take two steps to ensure you +are compatible with Windows while using multi-process data loading:

    +
      +
    • Wrap most of you main script’s code within if __name__ == '__main__': block, +to make sure it doesn’t run again (most likely generating error) when each worker +process is launched. You can place your dataset and DataLoader +instance creation logic here, as it doesn’t need to be re-executed in workers.

    • +
    • Make sure that any custom collate_fn, worker_init_fn +or dataset code is declared as top level definitions, outside of the +__main__ check. This ensures that they are available in worker processes. +(this is needed since functions are pickled as references only, not bytecode.)

    • +
    +
    +
    +

    Randomness in multi-process data loading

    +

    By default, each worker will have its PyTorch seed set to base_seed + worker_id, +where base_seed is a long generated by main process using its RNG (thereby, +consuming a RNG state mandatorily). However, seeds for other libraries may be +duplicated upon initializing workers (w.g., NumPy), causing each worker to return +identical random numbers. (See this section in FAQ.).

    +

    In worker_init_fn, you may access the PyTorch seed set for each worker +with either torch.utils.data.get_worker_info().seed +or torch.initial_seed(), and use it to seed other libraries before data +loading.

    +
    +
    +
    +
    +

    Memory Pinning

    +

    Host to GPU copies are much faster when they originate from pinned (page-locked) +memory. See Use pinned memory buffers for more details on when and how to use +pinned memory generally.

    +

    For data loading, passing pin_memory=True to a +DataLoader will automatically put the fetched data +Tensors in pinned memory, and thus enables faster data transfer to CUDA-enabled +GPUs.

    +

    The default memory pinning logic only recognizes Tensors and maps and iterables +containing Tensors. By default, if the pinning logic sees a batch that is a +custom type (which will occur if you have a collate_fn that returns a +custom batch type), or if each element of your batch is a custom type, the +pinning logic will not recognize them, and it will return that batch (or those +elements) without pinning the memory. To enable memory pinning for custom +batch or data type(s), define a pin_memory() method on your custom +type(s).

    +

    See the example below.

    +

    Example:

    +
    class SimpleCustomBatch:
    +    def __init__(self, data):
    +        transposed_data = list(zip(*data))
    +        self.inp = torch.stack(transposed_data[0], 0)
    +        self.tgt = torch.stack(transposed_data[1], 0)
    +
    +    # custom memory pinning method on custom type
    +    def pin_memory(self):
    +        self.inp = self.inp.pin_memory()
    +        self.tgt = self.tgt.pin_memory()
    +        return self
    +
    +def collate_wrapper(batch):
    +    return SimpleCustomBatch(batch)
    +
    +inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
    +tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
    +dataset = TensorDataset(inps, tgts)
    +
    +loader = DataLoader(dataset, batch_size=2, collate_fn=collate_wrapper,
    +                    pin_memory=True)
    +
    +for batch_ndx, sample in enumerate(loader):
    +    print(sample.inp.is_pinned())
    +    print(sample.tgt.is_pinned())
    +
    +
    +
    +
    +class torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None)[source]
    +

    Data loader. Combines a dataset and a sampler, and provides an iterable over +the given dataset.

    +

    The DataLoader supports both map-style and +iterable-style datasets with single- or multi-process loading, customizing +loading order and optional automatic batching (collation) and memory pinning.

    +

    See torch.utils.data documentation page for more details.

    +
    +
    Parameters
    +
      +
    • dataset (Dataset) – dataset from which to load the data.

    • +
    • batch_size (int, optional) – how many samples per batch to load +(default: 1).

    • +
    • shuffle (bool, optional) – set to True to have the data reshuffled +at every epoch (default: False).

    • +
    • sampler (Sampler, optional) – defines the strategy to draw samples from +the dataset. If specified, shuffle must be False.

    • +
    • batch_sampler (Sampler, optional) – like sampler, but returns a batch of +indices at a time. Mutually exclusive with batch_size, +shuffle, sampler, and drop_last.

    • +
    • num_workers (int, optional) – how many subprocesses to use for data +loading. 0 means that the data will be loaded in the main process. +(default: 0)

    • +
    • collate_fn (callable, optional) – merges a list of samples to form a +mini-batch of Tensor(s). Used when using batched loading from a +map-style dataset.

    • +
    • pin_memory (bool, optional) – If True, the data loader will copy Tensors +into CUDA pinned memory before returning them. If your data elements +are a custom type, or your collate_fn returns a batch that is a custom type, +see the example below.

    • +
    • drop_last (bool, optional) – set to True to drop the last incomplete batch, +if the dataset size is not divisible by the batch size. If False and +the size of dataset is not divisible by the batch size, then the last batch +will be smaller. (default: False)

    • +
    • timeout (numeric, optional) – if positive, the timeout value for collecting a batch +from workers. Should always be non-negative. (default: 0)

    • +
    • worker_init_fn (callable, optional) – If not None, this will be called on each +worker subprocess with the worker id (an int in [0, num_workers - 1]) as +input, after seeding and before data loading. (default: None)

    • +
    +
    +
    +
    +

    Warning

    +

    If the spawn start method is used, worker_init_fn +cannot be an unpicklable object, e.g., a lambda function. See +Multiprocessing best practices on more details related +to multiprocessing in PyTorch.

    +
    +
    +

    Note

    +

    len(dataloader) heuristic is based on the length of the sampler used. +When dataset is an IterableDataset, +an infinite sampler is used, whose __len__() is not +implemented, because the actual length depends on both the +iterable as well as multi-process loading configurations. So one +should not query this method unless they work with a map-style +dataset. See Dataset Types for more details on these two types +of datasets.

    +
    +
    + +
    +
    +class torch.utils.data.Dataset[source]
    +

    An abstract class representing a Dataset.

    +

    All datasets that represent a map from keys to data samples should subclass +it. All subclasses should overrite __getitem__(), supporting fetching a +data sample for a given key. Subclasses could also optionally overwrite +__len__(), which is expected to return the size of the dataset by many +Sampler implementations and the default options +of DataLoader.

    +
    +

    Note

    +

    DataLoader by default constructs a index +sampler that yields integral indices. To make it work with a map-style +dataset with non-integral indices/keys, a custom sampler must be provided.

    +
    +
    + +
    +
    +class torch.utils.data.IterableDataset[source]
    +

    An iterable Dataset.

    +

    All datasets that represent an iterable of data samples should subclass it. +Such form of datasets is particularly useful when data come from a stream.

    +

    All subclasses should overrite __iter__(), which would return an +iterator of samples in this dataset.

    +

    When a subclass is used with DataLoader, each +item in the dataset will be yielded from the DataLoader +iterator. When num_workers > 0, each worker process will have a +different copy of the dataset object, so it is often desired to configure +each copy independently to avoid having duplicate data returned from the +workers. get_worker_info(), when called in a worker +process, returns information about the worker. It can be used in either the +dataset’s __iter__() method or the DataLoader ‘s +worker_init_fn option to modify each copy’s behavior.

    +

    Example 1: splitting workload across all workers in __iter__():

    +
    >>> class MyIterableDataset(torch.utils.data.IterableDataset):
    +...     def __init__(self, start, end):
    +...         super(MyIterableDataset).__init__()
    +...         assert end > start, "this example code only works with end >= start"
    +...         self.start = start
    +...         self.end = end
    +...
    +...     def __iter__(self):
    +...         worker_info = torch.utils.data.get_worker_info()
    +...         if worker_info is None:  # single-process data loading, return the full iterator
    +...             iter_start = self.start
    +...             iter_end = self.end
    +...         else:  # in a worker process
    +...             # split workload
    +...             per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
    +...             worker_id = worker_info.id
    +...             iter_start = self.start + worker_id * per_worker
    +...             iter_end = min(iter_start + per_worker, self.end)
    +...         return iter(range(iter_start, iter_end))
    +...
    +>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
    +>>> ds = MyIterableDataset(start=3, end=7)
    +
    +>>> # Single-process loading
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
    +[3, 4, 5, 6]
    +
    +>>> # Mult-process loading with two worker processes
    +>>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
    +[3, 5, 4, 6]
    +
    +>>> # With even more workers
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=20)))
    +[3, 4, 5, 6]
    +
    +
    +

    Example 2: splitting workload across all workers using worker_init_fn:

    +
    >>> class MyIterableDataset(torch.utils.data.IterableDataset):
    +...     def __init__(self, start, end):
    +...         super(MyIterableDataset).__init__()
    +...         assert end > start, "this example code only works with end >= start"
    +...         self.start = start
    +...         self.end = end
    +...
    +...     def __iter__(self):
    +...         return iter(range(self.start, self.end))
    +...
    +>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
    +>>> ds = MyIterableDataset(start=3, end=7)
    +
    +>>> # Single-process loading
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
    +[3, 4, 5, 6]
    +>>>
    +>>> # Directly doing multi-process loading yields duplicate data
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
    +[3, 3, 4, 4, 5, 5, 6, 6]
    +
    +>>> # Define a `worker_init_fn` that configures each dataset copy differently
    +>>> def worker_init_fn(worker_id):
    +...     worker_info = torch.utils.data.get_worker_info()
    +...     dataset = worker_info.dataset  # the dataset copy in this worker process
    +...     overall_start = dataset.start
    +...     overall_end = dataset.end
    +...     # configure the dataset to only process the split workload
    +...     per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
    +...     worker_id = worker_info.id
    +...     dataset.start = overall_start + worker_id * per_worker
    +...     dataset.end = min(dataset.start + per_worker, overall_end)
    +...
    +
    +>>> # Mult-process loading with the custom `worker_init_fn`
    +>>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
    +[3, 5, 4, 6]
    +
    +>>> # With even more workers
    +>>> print(list(torch.utils.data.DataLoader(ds, num_workers=20, worker_init_fn=worker_init_fn)))
    +[3, 4, 5, 6]
    +
    +
    +
    + +
    +
    +class torch.utils.data.TensorDataset(*tensors)[source]
    +

    Dataset wrapping tensors.

    +

    Each sample will be retrieved by indexing tensors along the first dimension.

    +
    +
    Parameters
    +

    *tensors (Tensor) – tensors that have the same size of the first dimension.

    +
    +
    +
    + +
    +
    +class torch.utils.data.ConcatDataset(datasets)[source]
    +

    Dataset as a concatenation of multiple datasets.

    +

    This class is useful to assemble different existing datasets.

    +
    +
    Parameters
    +

    datasets (sequence) – List of datasets to be concatenated

    +
    +
    +
    + +
    +
    +class torch.utils.data.ChainDataset(datasets)[source]
    +

    Dataset for chainning multiple IterableDataset s.

    +

    This class is useful to assemble different existing dataset streams. The +chainning operation is done on-the-fly, so concatenating large-scale +datasets with this class will be efficient.

    +
    +
    Parameters
    +

    datasets (iterable of IterableDataset) – datasets to be chained together

    +
    +
    +
    + +
    +
    +class torch.utils.data.Subset(dataset, indices)[source]
    +

    Subset of a dataset at specified indices.

    +
    +
    Parameters
    +
      +
    • dataset (Dataset) – The whole Dataset

    • +
    • indices (sequence) – Indices in the whole set selected for subset

    • +
    +
    +
    +
    + +
    +
    +torch.utils.data.get_worker_info()[source]
    +

    Returns the information about the current +DataLoader iterator worker process.

    +

    When called in a worker, this returns an object guaranteed to have the +following attributes:

    +
      +
    • id: the current worker id.

    • +
    • num_workers: the total number of workers.

    • +
    • seed: the random seed set for the current worker. This value is +determined by main process RNG and the worker id. See +DataLoader’s documentation for more details.

    • +
    • dataset: the copy of the dataset object in this process. Note +that this will be a different object in a different process than the one +in the main process.

    • +
    +

    When called in the main process, this returns None.

    +
    +

    Note

    +

    When used in a worker_init_fn passed over to +DataLoader, this method can be useful to +set up each worker process differently, for instance, using worker_id +to configure the dataset object to only read a specific fraction of a +sharded dataset, or use seed to seed other libraries used in dataset +code (e.g., NumPy).

    +
    +
    + +
    +
    +torch.utils.data.random_split(dataset, lengths)[source]
    +

    Randomly split a dataset into non-overlapping new datasets of given lengths.

    +
    +
    Parameters
    +
      +
    • dataset (Dataset) – Dataset to be split

    • +
    • lengths (sequence) – lengths of splits to be produced

    • +
    +
    +
    +
    + +
    +
    +class torch.utils.data.Sampler(data_source)[source]
    +

    Base class for all Samplers.

    +

    Every Sampler subclass has to provide an __iter__() method, providing a +way to iterate over indices of dataset elements, and a __len__() method +that returns the length of the returned iterators.

    +
    +

    Note

    +

    The __len__() method isn’t strictly required by +DataLoader, but is expected in any +calculation involving the length of a DataLoader.

    +
    +
    + +
    +
    +class torch.utils.data.SequentialSampler(data_source)[source]
    +

    Samples elements sequentially, always in the same order.

    +
    +
    Parameters
    +

    data_source (Dataset) – dataset to sample from

    +
    +
    +
    + +
    +
    +class torch.utils.data.RandomSampler(data_source, replacement=False, num_samples=None)[source]
    +

    Samples elements randomly. If without replacement, then sample from a shuffled dataset. +If with replacement, then user can specify num_samples to draw.

    +
    +
    Parameters
    +
      +
    • data_source (Dataset) – dataset to sample from

    • +
    • replacement (bool) – samples are drawn with replacement if True, default=``False``

    • +
    • num_samples (int) – number of samples to draw, default=`len(dataset)`. This argument +is supposed to be specified only when replacement is True.

    • +
    +
    +
    +
    + +
    +
    +class torch.utils.data.SubsetRandomSampler(indices)[source]
    +

    Samples elements randomly from a given list of indices, without replacement.

    +
    +
    Parameters
    +

    indices (sequence) – a sequence of indices

    +
    +
    +
    + +
    +
    +class torch.utils.data.WeightedRandomSampler(weights, num_samples, replacement=True)[source]
    +

    Samples elements from [0,..,len(weights)-1] with given probabilities (weights).

    +
    +
    Parameters
    +
      +
    • weights (sequence) – a sequence of weights, not necessary summing up to one

    • +
    • num_samples (int) – number of samples to draw

    • +
    • replacement (bool) – if True, samples are drawn with replacement. +If not, they are drawn without replacement, which means that when a +sample index is drawn for a row, it cannot be drawn again for that row.

    • +
    +
    +
    +

    Example

    +
    >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
    +[0, 0, 0, 1, 0]
    +>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
    +[0, 1, 4, 3, 2]
    +
    +
    +
    + +
    +
    +class torch.utils.data.BatchSampler(sampler, batch_size, drop_last)[source]
    +

    Wraps another sampler to yield a mini-batch of indices.

    +
    +
    Parameters
    +
      +
    • sampler (Sampler) – Base sampler.

    • +
    • batch_size (int) – Size of mini-batch.

    • +
    • drop_last (bool) – If True, the sampler will drop the last batch if +its size would be less than batch_size

    • +
    +
    +
    +

    Example

    +
    >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
    +[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
    +>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
    +[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
    +
    +
    +
    + +
    +
    +class torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=None, rank=None, shuffle=True)[source]
    +

    Sampler that restricts data loading to a subset of the dataset.

    +

    It is especially useful in conjunction with +torch.nn.parallel.DistributedDataParallel. In such case, each +process can pass a DistributedSampler instance as a DataLoader sampler, +and load a subset of the original dataset that is exclusive to it.

    +
    +

    Note

    +

    Dataset is assumed to be of constant size.

    +
    +
    +
    Parameters
    +
      +
    • dataset – Dataset used for sampling.

    • +
    • num_replicas (optional) – Number of processes participating in +distributed training.

    • +
    • rank (optional) – Rank of the current process within num_replicas.

    • +
    • shuffle (optional) – If true (default), sampler will shuffle the indices

    • +
    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/distributed.html b/docs/stable/distributed.html new file mode 100644 index 000000000000..9c3bce6a8ff0 --- /dev/null +++ b/docs/stable/distributed.html @@ -0,0 +1,1520 @@ + + + + + + + + + + + + Distributed communication package - torch.distributed — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Distributed communication package - torch.distributed
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Distributed communication package - torch.distributed

    +
    +

    Backends

    +

    torch.distributed supports three backends, each with +different capabilities. The table below shows which functions are available +for use with CPU / CUDA tensors. +MPI supports CUDA only if the implementation used to build PyTorch supports it.

    + +++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Backend

    gloo

    mpi

    nccl

    Device

    CPU

    GPU

    CPU

    GPU

    CPU

    GPU

    send

    ?

    recv

    ?

    broadcast

    ?

    all_reduce

    ?

    reduce

    ?

    all_gather

    ?

    gather

    ?

    scatter

    ?

    barrier

    ?

    +
    +

    Backends that come with PyTorch

    +

    PyTorch distributed currently only supports Linux. By default, the Gloo and NCCL backends +are built and included in PyTorch distributed (NCCL only when building with CUDA). +MPI is an +optional backend that can only be included if you build PyTorch from source. (e.g. +building PyTorch on a host that has MPI installed.)

    +
    +
    +

    Which backend to use?

    +

    In the past, we were often asked: “which backend should I use?”.

    +
      +
    • Rule of thumb

      +
        +
      • Use the NCCL backend for distributed GPU training

      • +
      • Use the Gloo backend for distributed CPU training.

      • +
      +
    • +
    • GPU hosts with InfiniBand interconnect

      +
        +
      • Use NCCL, since it’s the only backend that currently supports +InfiniBand and GPUDirect.

      • +
      +
    • +
    • GPU hosts with Ethernet interconnect

      +
        +
      • Use NCCL, since it currently provides the best distributed GPU +training performance, especially for multiprocess single-node or +multi-node distributed training. If you encounter any problem with +NCCL, use Gloo as the fallback option. (Note that Gloo currently +runs slower than NCCL for GPUs.)

      • +
      +
    • +
    • CPU hosts with InfiniBand interconnect

      +
        +
      • If your InfiniBand has enabled IP over IB, use Gloo, otherwise, +use MPI instead. We are planning on adding InfiniBand support for +Gloo in the upcoming releases.

      • +
      +
    • +
    • CPU hosts with Ethernet interconnect

      +
        +
      • Use Gloo, unless you have specific reasons to use MPI.

      • +
      +
    • +
    +
    +
    +

    Common environment variables

    +
    +

    Choosing the network interface to use

    +

    By default, both the NCCL and Gloo backends will try to find the right network interface to use. +If the automatically detected interface is not correct, you can override it using the following +environment variables (applicable to the respective backend):

    +
      +
    • NCCL_SOCKET_IFNAME, for example export NCCL_SOCKET_IFNAME=eth0

    • +
    • GLOO_SOCKET_IFNAME, for example export GLOO_SOCKET_IFNAME=eth0

    • +
    +

    If you’re using the Gloo backend, you can specify multiple interfaces by separating +them by a comma, like this: export GLOO_SOCKET_IFNAME=eth0,eth1,eth2,eth3. +The backend will dispatch operations in a round-robin fashion across these interfaces. +It is imperative that all processes specify the same number of interfaces in this variable.

    +
    +
    +

    Other NCCL environment variables

    +

    NCCL has also provided a number of environment variables for fine-tuning purposes.

    +

    Commonly used ones include the following for debugging purposes:

    +
      +
    • export NCCL_DEBUG=INFO

    • +
    • export NCCL_DEBUG_SUBSYS=ALL

    • +
    +

    For the full list of NCCL environment variables, please refer to +NVIDIA NCCL’s official documentation

    +
    +
    +
    +
    +

    Basics

    +

    The torch.distributed package provides PyTorch support and communication primitives +for multiprocess parallelism across several computation nodes running on one or more +machines. The class torch.nn.parallel.DistributedDataParallel() builds on this +functionality to provide synchronous distributed training as a wrapper around any +PyTorch model. This differs from the kinds of parallelism provided by +Multiprocessing package - torch.multiprocessing and torch.nn.DataParallel() in that it supports +multiple network-connected machines and in that the user must explicitly launch a separate +copy of the main training script for each process.

    +

    In the single-machine synchronous case, torch.distributed or the +torch.nn.parallel.DistributedDataParallel() wrapper may still have advantages over other +approaches to data-parallelism, including torch.nn.DataParallel():

    +
      +
    • Each process maintains its own optimizer and performs a complete optimization step with each +iteration. While this may appear redundant, since the gradients have already been gathered +together and averaged across processes and are thus the same for every process, this means +that no parameter broadcast step is needed, reducing time spent transferring tensors between +nodes.

    • +
    • Each process contains an independent Python interpreter, eliminating the extra interpreter +overhead and “GIL-thrashing” that comes from driving several execution threads, model +replicas, or GPUs from a single Python process. This is especially important for models that +make heavy use of the Python runtime, including models with recurrent layers or many small +components.

    • +
    +
    +
    +

    Initialization

    +

    The package needs to be initialized using the torch.distributed.init_process_group() +function before calling any other methods. This blocks until all processes have +joined.

    +
    +
    +torch.distributed.init_process_group(backend, init_method=None, timeout=datetime.timedelta(0, 1800), world_size=-1, rank=-1, store=None, group_name='')[source]
    +

    Initializes the default distributed process group, and this will also +initialize the distributed package.

    +
    +
    There are 2 main ways to initialize a process group:
      +
    1. Specify store, rank, and world_size explicitly.

    2. +
    3. Specify init_method (a URL string) which indicates where/how +to discover peers. Optionally specify rank and world_size, +or encode all required parameters in the URL and omit them.

    4. +
    +

    If neither is specified, init_method is assumed to be “env://”.

    +
    +
    +
    +
    Parameters
    +
      +
    • backend (str or Backend) – The backend to use. Depending on +build-time configurations, valid values include mpi, gloo, +and nccl. This field should be given as a lowercase string +(e.g., "gloo"), which can also be accessed via +Backend attributes (e.g., Backend.GLOO). If using +multiple processes per machine with nccl backend, each process +must have exclusive access to every GPU it uses, as sharing GPUs +between processes can result in deadlocks.

    • +
    • init_method (str, optional) – URL specifying how to initialize the +process group. Default is “env://” if no +init_method or store is specified. +Mutually exclusive with store.

    • +
    • world_size (int, optional) – Number of processes participating in +the job. Required if store is specified.

    • +
    • rank (int, optional) – Rank of the current process. +Required if store is specified.

    • +
    • store (Store, optional) – Key/value store accessible to all workers, used +to exchange connection/address information. +Mutually exclusive with init_method.

    • +
    • timeout (timedelta, optional) – Timeout for operations executed against +the process group. Default value equals 30 minutes. +This is only applicable for the gloo backend.

    • +
    • group_name (str, optional, deprecated) – Group name.

    • +
    +
    +
    +

    To enable backend == Backend.MPI, PyTorch needs to built from source +on a system that supports MPI. The same applies to NCCL as well.

    +
    + +
    +
    +class torch.distributed.Backend[source]
    +

    An enum-like class of available backends: GLOO, NCCL, and MPI.

    +

    The values of this class are lowercase strings, e.g., "gloo". They can +be accessed as attributes, e.g., Backend.NCCL.

    +

    This class can be directly called to parse the string, e.g., +Backend(backend_str) will check if backend_str is valid, and +return the parsed lowercase string if so. It also accepts uppercase strings, +e.g., Backend("GLOO") returns "gloo".

    +
    +

    Note

    +

    The entry Backend.UNDEFINED is present but only used as +initial value of some fields. Users should neither use it directly +nor assume its existence.

    +
    +
    + +
    +
    +torch.distributed.get_backend(group=<object object>)[source]
    +

    Returns the backend of the given process group.

    +
    +
    Parameters
    +

    group (ProcessGroup, optional) – The process group to work on. The +default is the general main process group. If another specific group +is specified, the calling process must be part of group.

    +
    +
    Returns
    +

    The backend of the given process group as a lower case string.

    +
    +
    +
    + +
    +
    +torch.distributed.get_rank(group=<object object>)[source]
    +

    Returns the rank of current process group

    +

    Rank is a unique identifier assigned to each process within a distributed +process group. They are always consecutive integers ranging from 0 to +world_size.

    +
    +
    Parameters
    +

    group (ProcessGroup, optional) – The process group to work on

    +
    +
    Returns
    +

    The rank of the process group +-1, if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.get_world_size(group=<object object>)[source]
    +

    Returns the number of processes in the current process group

    +
    +
    Parameters
    +

    group (ProcessGroup, optional) – The process group to work on

    +
    +
    Returns
    +

    The world size of the process group +-1, if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.is_initialized()[source]
    +

    Checking if the default process group has been initialized

    +
    + +
    +
    +torch.distributed.is_mpi_available()[source]
    +

    Checks if the MPI backend is available.

    +
    + +
    +
    +torch.distributed.is_nccl_available()[source]
    +

    Checks if the NCCL backend is available.

    +
    + +
    +

    Currently three initialization methods are supported:

    +
    +

    TCP initialization

    +

    There are two ways to initialize using TCP, both requiring a network address +reachable from all processes and a desired world_size. The first way +requires specifying an address that belongs to the rank 0 process. This +initialization method requires that all processes have manually specified ranks.

    +

    Note that multicast address is not supported anymore in the latest distributed +package. group_name is deprecated as well.

    +
    import torch.distributed as dist
    +
    +# Use address of one of the machines
    +dist.init_process_group(backend, init_method='tcp://10.1.1.20:23456',
    +                        rank=args.rank, world_size=4)
    +
    +
    +
    +
    +

    Shared file-system initialization

    +

    Another initialization method makes use of a file system that is shared and +visible from all machines in a group, along with a desired world_size. The URL should start +with file:// and contain a path to a non-existent file (in an existing +directory) on a shared file system. File-system initialization will automatically +create that file if it doesn’t exist, but will not delete the file. Therefore, it +is your responsibility to make sure that the file is cleaned up before the next +init_process_group() call on the same file path/name.

    +

    Note that automatic rank assignment is not supported anymore in the latest +distributed package and group_name is deprecated as well.

    +
    +

    Warning

    +

    This method assumes that the file system supports locking using fcntl - most +local systems and NFS support it.

    +
    +
    +

    Warning

    +

    This method will always create the file and try its best to clean up and remove +the file at the end of the program. In other words, each initialization with +the file init method will need a brand new empty file in order for the initialization +to succeed. If the same file used by the previous initialization (which happens not +to get cleaned up) is used again, this is unexpected behavior and can often cause +deadlocks and failures. Therefore, even though this method will try its best to clean up +the file, if the auto-delete happens to be unsuccessful, it is your responsibility +to ensure that the file is removed at the end of the training to prevent the same +file to be reused again during the next time. This is especially important +if you plan to call init_process_group() multiple times on the same file name. +In other words, if the file is not removed/cleaned up and you call +init_process_group() again on that file, failures are expected. +The rule of thumb here is that, make sure that the file is non-existent or +empty everytime init_process_group() is called.

    +
    +
    import torch.distributed as dist
    +
    +# rank should always be specified
    +dist.init_process_group(backend, init_method='file:///mnt/nfs/sharedfile',
    +                        world_size=4, rank=args.rank)
    +
    +
    +
    +
    +

    Environment variable initialization

    +

    This method will read the configuration from environment variables, allowing +one to fully customize how the information is obtained. The variables to be set +are:

    +
      +
    • MASTER_PORT - required; has to be a free port on machine with rank 0

    • +
    • MASTER_ADDR - required (except for rank 0); address of rank 0 node

    • +
    • WORLD_SIZE - required; can be set either here, or in a call to init function

    • +
    • RANK - required; can be set either here, or in a call to init function

    • +
    +

    The machine with rank 0 will be used to set up all connections.

    +

    This is the default method, meaning that init_method does not have to be specified (or +can be env://).

    +
    +
    +
    +

    Groups

    +

    By default collectives operate on the default group (also called the world) and +require all processes to enter the distributed function call. However, some workloads can benefit +from more fine-grained communication. This is where distributed groups come +into play. new_group() function can be +used to create new groups, with arbitrary subsets of all processes. It returns +an opaque group handle that can be given as a group argument to all collectives +(collectives are distributed functions to exchange information in certain well-known programming patterns).

    +
    +
    +torch.distributed.new_group(ranks=None, timeout=datetime.timedelta(0, 1800), backend=None)[source]
    +

    Creates a new distributed group.

    +

    This function requires that all processes in the main group (i.e. all +processes that are part of the distributed job) enter this function, even +if they are not going to be members of the group. Additionally, groups +should be created in the same order in all processes.

    +
    +
    Parameters
    +
      +
    • ranks (list[int]) – List of ranks of group members.

    • +
    • timeout (timedelta, optional) – Timeout for operations executed against +the process group. Default value equals 30 minutes. +This is only applicable for the gloo backend.

    • +
    • backend (str or Backend, optional) – The backend to use. Depending on +build-time configurations, valid values are gloo and nccl. +By default uses the same backend as the global group. This field +should be given as a lowercase string (e.g., "gloo"), which can +also be accessed via Backend attributes (e.g., +Backend.GLOO).

    • +
    +
    +
    Returns
    +

    A handle of distributed group that can be given to collective calls.

    +
    +
    +
    + +
    +
    +

    Point-to-point communication

    +
    +
    +torch.distributed.send(tensor, dst, group=<object object>, tag=0)[source]
    +

    Sends a tensor synchronously.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Tensor to send.

    • +
    • dst (int) – Destination rank.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • tag (int, optional) – Tag to match send with remote recv

    • +
    +
    +
    +
    + +
    +
    +torch.distributed.recv(tensor, src=None, group=<object object>, tag=0)[source]
    +

    Receives a tensor synchronously.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Tensor to fill with received data.

    • +
    • src (int, optional) – Source rank. Will receive from any +process if unspecified.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • tag (int, optional) – Tag to match recv with remote send

    • +
    +
    +
    Returns
    +

    Sender rank +-1, if not part of the group

    +
    +
    +
    + +

    isend() and irecv() +return distributed request objects when used. In general, the type of this object is unspecified +as they should never be created manually, but they are guaranteed to support two methods:

    +
      +
    • is_completed() - returns True if the operation has finished

    • +
    • wait() - will block the process until the operation is finished. +is_completed() is guaranteed to return True once it returns.

    • +
    +
    +
    +torch.distributed.isend(tensor, dst, group=<object object>, tag=0)[source]
    +

    Sends a tensor asynchronously.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Tensor to send.

    • +
    • dst (int) – Destination rank.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • tag (int, optional) – Tag to match send with remote recv

    • +
    +
    +
    Returns
    +

    A distributed request object. +None, if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.irecv(tensor, src, group=<object object>, tag=0)[source]
    +

    Receives a tensor asynchronously.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Tensor to fill with received data.

    • +
    • src (int) – Source rank.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • tag (int, optional) – Tag to match recv with remote send

    • +
    +
    +
    Returns
    +

    A distributed request object. +None, if not part of the group

    +
    +
    +
    + +
    +
    +

    Synchronous and asynchronous collective operations

    +

    Every collective operation function supports the following two kinds of operations:

    +

    synchronous operation - the default mode, when async_op is set to False. +when the function returns, it is guaranteed that +the collective operation is performed (not necessarily completed if it’s a CUDA op since all +CUDA ops are asynchronous), and any further function calls depending on the data of the +collective operation can be called. In the synchronous mode, the collective function does not +return anything

    +

    asynchronous operation - when async_op is set to True. The collective operation function +returns a distributed request object. In general, you don’t need to create it manually and it +is guaranteed to support two methods:

    +
      +
    • is_completed() - returns True if the operation has finished

    • +
    • wait() - will block the process until the operation is finished.

    • +
    +
    +
    +

    Collective functions

    +
    +
    +torch.distributed.broadcast(tensor, src, group=<object object>, async_op=False)[source]
    +

    Broadcasts the tensor to the whole group.

    +

    tensor must have the same number of elements in all processes +participating in the collective.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Data to be sent if src is the rank of current +process, and tensor to be used to save received data otherwise.

    • +
    • src (int) – Source rank.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=<object object>, async_op=False)[source]
    +

    Reduces the tensor data across all machines in such a way that all get +the final result.

    +

    After the call tensor is going to be bitwise identical in all processes.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Input and output of the collective. The function +operates in-place.

    • +
    • op (optional) – One of the values from +torch.distributed.ReduceOp +enum. Specifies an operation used for element-wise reductions.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=<object object>, async_op=False)[source]
    +

    Reduces the tensor data across all machines.

    +

    Only the process with rank dst is going to receive the final result.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Input and output of the collective. The function +operates in-place.

    • +
    • dst (int) – Destination rank

    • +
    • op (optional) – One of the values from +torch.distributed.ReduceOp +enum. Specifies an operation used for element-wise reductions.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.all_gather(tensor_list, tensor, group=<object object>, async_op=False)[source]
    +

    Gathers tensors from the whole group in a list.

    +
    +
    Parameters
    +
      +
    • tensor_list (list[Tensor]) – Output list. It should contain +correctly-sized tensors to be used for output of the collective.

    • +
    • tensor (Tensor) – Tensor to be broadcast from current process.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.gather(tensor, gather_list, dst, group=<object object>, async_op=False)[source]
    +

    Gathers a list of tensors in a single process.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Input tensor.

    • +
    • gather_list (list[Tensor]) – List of appropriately-sized tensors to +use for received data. Required only in the receiving process.

    • +
    • dst (int) – Destination rank. Required in all processes except the one +that is receiveing the data.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.scatter(tensor, scatter_list, src, group=<object object>, async_op=False)[source]
    +

    Scatters a list of tensors to all processes in a group.

    +

    Each process will receive exactly one tensor and store its data in the +tensor argument.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Output tensor.

    • +
    • scatter_list (list[Tensor]) – List of tensors to scatter. Required only +in the process that is sending the data.

    • +
    • src (int) – Source rank. Required in all processes except the one that +is sending the data.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.barrier(group=<object object>, async_op=False)[source]
    +

    Synchronizes all processes.

    +

    This collective blocks processes until the whole group enters this function, +if async_op is False, or if async work handle is called on wait().

    +
    +
    Parameters
    +
      +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +class torch.distributed.ReduceOp
    +

    An enum-like class of available reduce operations: SUM, PRODUCT, +MIN, and MAX.

    +

    The values of this class can be accessed as attributes, e.g., ReduceOp.SUM. +They are used in specifying strategies for reduction collectives, e.g., +reduce(), all_reduce_multigpu(), etc.

    +

    Members:

    +
    +

    SUM

    +

    PRODUCT

    +

    MIN

    +

    MAX

    +
    +
    + +
    +
    +class torch.distributed.reduce_op[source]
    +

    Deprecated enum-like class for reduction operations: SUM, PRODUCT, +MIN, and MAX.

    +

    ReduceOp is recommended to use instead.

    +
    + +
    +
    +

    Multi-GPU collective functions

    +

    If you have more than one GPU on each node, when using the NCCL and Gloo backend, +broadcast_multigpu() +all_reduce_multigpu() +reduce_multigpu() and +all_gather_multigpu() support distributed collective +operations among multiple GPUs within each node. These functions can potentially +improve the overall distributed training performance and be easily used by +passing a list of tensors. Each Tensor in the passed tensor list needs +to be on a separate GPU device of the host where the function is called. Note +that the length of the tensor list needs to be identical among all the +distributed processes. Also note that currently the multi-GPU collective +functions are only supported by the NCCL backend.

    +

    For example, if the system we use for distributed training has 2 nodes, each +of which has 8 GPUs. On each of the 16 GPUs, there is a tensor that we would +like to all-reduce. The following code can serve as a reference:

    +

    Code running on Node 0

    +
    import torch
    +import torch.distributed as dist
    +
    +dist.init_process_group(backend="nccl",
    +                        init_method="file:///distributed_test",
    +                        world_size=2,
    +                        rank=0)
    +tensor_list = []
    +for dev_idx in range(torch.cuda.device_count()):
    +    tensor_list.append(torch.FloatTensor([1]).cuda(dev_idx))
    +
    +dist.all_reduce_multigpu(tensor_list)
    +
    +
    +

    Code running on Node 1

    +
    import torch
    +import torch.distributed as dist
    +
    +dist.init_process_group(backend="nccl",
    +                        init_method="file:///distributed_test",
    +                        world_size=2,
    +                        rank=1)
    +tensor_list = []
    +for dev_idx in range(torch.cuda.device_count()):
    +    tensor_list.append(torch.FloatTensor([1]).cuda(dev_idx))
    +
    +dist.all_reduce_multigpu(tensor_list)
    +
    +
    +

    After the call, all 16 tensors on the two nodes will have the all-reduced value +of 16

    +
    +
    +torch.distributed.broadcast_multigpu(tensor_list, src, group=<object object>, async_op=False, src_tensor=0)[source]
    +

    Broadcasts the tensor to the whole group with multiple GPU tensors +per node.

    +

    tensor must have the same number of elements in all the GPUs from +all processes participating in the collective. each tensor in the list must +be on a different GPU

    +

    Only nccl and gloo backend are currently supported +tensors should only be GPU tensors

    +
    +
    Parameters
    +
      +
    • tensor_list (List[Tensor]) – Tensors that participate in the collective +operation. If src is the rank, then the specified src_tensor +element of tensor_list (tensor_list[src_tensor]) will be +broadcast to all other tensors (on different GPUs) in the src process +and all tensors in tensor_list of other non-src processes. +You also need to make sure that len(tensor_list) is the same +for all the distributed processes calling this function.

    • +
    • src (int) – Source rank.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    • src_tensor (int, optional) – Source tensor rank within tensor_list

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.all_reduce_multigpu(tensor_list, op=ReduceOp.SUM, group=<object object>, async_op=False)[source]
    +

    Reduces the tensor data across all machines in such a way that all get +the final result. This function reduces a number of tensors on every node, +while each tensor resides on different GPUs. +Therefore, the input tensor in the tensor list needs to be GPU tensors. +Also, each tensor in the tensor list needs to reside on a different GPU.

    +

    After the call, all tensor in tensor_list is going to be bitwise +identical in all processes.

    +

    Only nccl and gloo backend is currently supported +tensors should only be GPU tensors

    +
    +
    Parameters
    +
      +
    • list (tensor) – List of input and output tensors of +the collective. The function operates in-place and requires that +each tensor to be a GPU tensor on different GPUs. +You also need to make sure that len(tensor_list) is the same for +all the distributed processes calling this function.

    • +
    • op (optional) – One of the values from +torch.distributed.ReduceOp +enum. Specifies an operation used for element-wise reductions.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +torch.distributed.reduce_multigpu(tensor_list, dst, op=ReduceOp.SUM, group=<object object>, async_op=False, dst_tensor=0)[source]
    +

    Reduces the tensor data on multiple GPUs across all machines. Each tensor +in tensor_list should reside on a separate GPU

    +

    Only the GPU of tensor_list[dst_tensor] on the process with rank dst +is going to receive the final result.

    +

    Only nccl backend is currently supported +tensors should only be GPU tensors

    +
    +
    Parameters
    +
      +
    • tensor_list (List[Tensor]) – Input and output GPU tensors of the +collective. The function operates in-place. +You also need to make sure that len(tensor_list) is the same for +all the distributed processes calling this function.

    • +
    • dst (int) – Destination rank

    • +
    • op (optional) – One of the values from +torch.distributed.ReduceOp +enum. Specifies an operation used for element-wise reductions.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    • dst_tensor (int, optional) – Destination tensor rank within +tensor_list

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, otherwise

    +
    +
    +
    + +
    +
    +torch.distributed.all_gather_multigpu(output_tensor_lists, input_tensor_list, group=<object object>, async_op=False)[source]
    +

    Gathers tensors from the whole group in a list. +Each tensor in tensor_list should reside on a separate GPU

    +

    Only nccl backend is currently supported +tensors should only be GPU tensors

    +
    +
    Parameters
    +
      +
    • output_tensor_lists (List[List[Tensor]]) –

      Output lists. It should +contain correctly-sized tensors on each GPU to be used for output +of the collective, e.g. output_tensor_lists[i] contains the +all_gather result that resides on the GPU of +input_tensor_list[i].

      +

      Note that each element of output_tensor_lists has the size of +world_size * len(input_tensor_list), since the function all +gathers the result from every single GPU in the group. To interpret +each element of output_tensor_lists[i], note that +input_tensor_list[j] of rank k will be appear in +output_tensor_lists[i][k * world_size + j]

      +

      Also note that len(output_tensor_lists), and the size of each +element in output_tensor_lists (each element is a list, +therefore len(output_tensor_lists[i])) need to be the same +for all the distributed processes calling this function.

      +

    • +
    • input_tensor_list (List[Tensor]) – List of tensors(on different GPUs) to +be broadcast from current process. +Note that len(input_tensor_list) needs to be the same for +all the distributed processes calling this function.

    • +
    • group (ProcessGroup, optional) – The process group to work on

    • +
    • async_op (bool, optional) – Whether this op should be an async op

    • +
    +
    +
    Returns
    +

    Async work handle, if async_op is set to True. +None, if not async_op or if not part of the group

    +
    +
    +
    + +
    +
    +

    Launch utility

    +

    The torch.distributed package also provides a launch utility in +torch.distributed.launch. This helper utility can be used to launch +multiple processes per node for distributed training. This utility also supports +both python2 and python3.

    +
    +
    +

    Spawn utility

    +

    The torch.multiprocessing package also provides a spawn +function in torch.multiprocessing.spawn(). This helper function +can be used to spawn multiple processes. It works by passing in the +function that you want to run and spawns N processes to run it. This +can be used for multiprocess distributed training as well.

    +

    For references on how to use it, please refer to PyTorch example - ImageNet +implementation

    +

    Note that this function requires Python 3.4 or higher.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/distributions.html b/docs/stable/distributions.html new file mode 100644 index 000000000000..3d7e7e2e1a52 --- /dev/null +++ b/docs/stable/distributions.html @@ -0,0 +1,3791 @@ + + + + + + + + + + + + Probability distributions - torch.distributions — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Probability distributions - torch.distributions
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Probability distributions - torch.distributions

    +

    The distributions package contains parameterizable probability distributions +and sampling functions. This allows the construction of stochastic computation +graphs and stochastic gradient estimators for optimization. This package +generally follows the design of the TensorFlow Distributions package.

    +

    It is not possible to directly backpropagate through random samples. However, +there are two main methods for creating surrogate functions that can be +backpropagated through. These are the score function estimator/likelihood ratio +estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly +seen as the basis for policy gradient methods in reinforcement learning, and the +pathwise derivative estimator is commonly seen in the reparameterization trick +in variational autoencoders. Whilst the score function only requires the value +of samples \(f(x)\), the pathwise derivative requires the derivative +\(f'(x)\). The next sections discuss these two in a reinforcement learning +example. For more details see +Gradient Estimation Using Stochastic Computation Graphs .

    +
    +

    Score function

    +

    When the probability density function is differentiable with respect to its +parameters, we only need sample() and +log_prob() to implement REINFORCE:

    +
    +\[\Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta}\]
    +

    where \(\theta\) are the parameters, \(\alpha\) is the learning rate, +\(r\) is the reward and \(p(a|\pi^\theta(s))\) is the probability of +taking action \(a\) in state \(s\) given policy \(\pi^\theta\).

    +

    In practice we would sample an action from the output of a network, apply this +action in an environment, and then use log_prob to construct an equivalent +loss function. Note that we use a negative because optimizers use gradient +descent, whilst the rule above assumes gradient ascent. With a categorical +policy, the code for implementing REINFORCE would be as follows:

    +
    probs = policy_network(state)
    +# Note that this is equivalent to what used to be called multinomial
    +m = Categorical(probs)
    +action = m.sample()
    +next_state, reward = env.step(action)
    +loss = -m.log_prob(action) * reward
    +loss.backward()
    +
    +
    +
    +
    +

    Pathwise derivative

    +

    The other way to implement these stochastic/policy gradients would be to use the +reparameterization trick from the +rsample() method, where the +parameterized random variable can be constructed via a parameterized +deterministic function of a parameter-free random variable. The reparameterized +sample therefore becomes differentiable. The code for implementing the pathwise +derivative would be as follows:

    +
    params = policy_network(state)
    +m = Normal(*params)
    +# Any distribution with .has_rsample == True could work based on the application
    +action = m.rsample()
    +next_state, reward = env.step(action)  # Assuming that reward is differentiable
    +loss = -reward
    +loss.backward()
    +
    +
    +
    +
    +

    Distribution

    +
    +
    +class torch.distributions.distribution.Distribution(batch_shape=torch.Size([]), event_shape=torch.Size([]), validate_args=None)[source]
    +

    Bases: object

    +

    Distribution is the abstract base class for probability distributions.

    +
    +
    +property arg_constraints
    +

    Returns a dictionary from argument names to +Constraint objects that +should be satisfied by each argument of this distribution. Args that +are not tensors need not appear in this dict.

    +
    + +
    +
    +property batch_shape
    +

    Returns the shape over which parameters are batched.

    +
    + +
    +
    +cdf(value)[source]
    +

    Returns the cumulative density/mass function evaluated at +value.

    +
    +
    Parameters
    +

    value (Tensor) –

    +
    +
    +
    + +
    +
    +entropy()[source]
    +

    Returns entropy of distribution, batched over batch_shape.

    +
    +
    Returns
    +

    Tensor of shape batch_shape.

    +
    +
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +

    Returns tensor containing all values supported by a discrete +distribution. The result will enumerate over dimension 0, so the shape +of the result will be (cardinality,) + batch_shape + event_shape +(where event_shape = () for univariate distributions).

    +

    Note that this enumerates over all batched tensors in lock-step +[[0, 0], [1, 1], …]. With expand=False, enumeration happens +along dim 0, but with the remaining batch dimensions being +singleton dimensions, [[0], [1], ...

    +

    To iterate over the full Cartesian product use +itertools.product(m.enumerate_support()).

    +
    +
    Parameters
    +

    expand (bool) – whether to expand the support over the +batch dims to match the distribution’s batch_shape.

    +
    +
    Returns
    +

    Tensor iterating over dimension 0.

    +
    +
    +
    + +
    +
    +property event_shape
    +

    Returns the shape of a single sample (without batching).

    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +

    Returns a new distribution instance (or populates an existing instance +provided by a derived class) with batch dimensions expanded to +batch_shape. This method calls expand on +the distribution’s parameters. As such, this does not allocate new +memory for the expanded distribution instance. Additionally, +this does not repeat any args checking or parameter broadcasting in +__init__.py, when an instance is first created.

    +
    +
    Parameters
    +
      +
    • batch_shape (torch.Size) – the desired expanded size.

    • +
    • _instance – new instance provided by subclasses that +need to override .expand.

    • +
    +
    +
    Returns
    +

    New distribution instance with batch dimensions expanded to +batch_size.

    +
    +
    +
    + +
    +
    +icdf(value)[source]
    +

    Returns the inverse cumulative density/mass function evaluated at +value.

    +
    +
    Parameters
    +

    value (Tensor) –

    +
    +
    +
    + +
    +
    +log_prob(value)[source]
    +

    Returns the log of the probability density/mass function evaluated at +value.

    +
    +
    Parameters
    +

    value (Tensor) –

    +
    +
    +
    + +
    +
    +property mean
    +

    Returns the mean of the distribution.

    +
    + +
    +
    +perplexity()[source]
    +

    Returns perplexity of distribution, batched over batch_shape.

    +
    +
    Returns
    +

    Tensor of shape batch_shape.

    +
    +
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +

    Generates a sample_shape shaped reparameterized sample or sample_shape +shaped batch of reparameterized samples if the distribution parameters +are batched.

    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +

    Generates a sample_shape shaped sample or sample_shape shaped batch of +samples if the distribution parameters are batched.

    +
    + +
    +
    +sample_n(n)[source]
    +

    Generates n samples or n batches of samples if the distribution +parameters are batched.

    +
    + +
    +
    +property stddev
    +

    Returns the standard deviation of the distribution.

    +
    + +
    +
    +property support
    +

    Returns a Constraint object +representing this distribution’s support.

    +
    + +
    +
    +property variance
    +

    Returns the variance of the distribution.

    +
    + +
    + +
    +
    +

    ExponentialFamily

    +
    +
    +class torch.distributions.exp_family.ExponentialFamily(batch_shape=torch.Size([]), event_shape=torch.Size([]), validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    ExponentialFamily is the abstract base class for probability distributions belonging to an +exponential family, whose probability mass/density function has the form is defined below

    +
    +\[p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))\]
    +

    where \(\theta\) denotes the natural parameters, \(t(x)\) denotes the sufficient statistic, +\(F(\theta)\) is the log normalizer function for a given family and \(k(x)\) is the carrier +measure.

    +
    +

    Note

    +

    This class is an intermediary between the Distribution class and distributions which belong +to an exponential family mainly to check the correctness of the .entropy() and analytic KL +divergence methods. We use this class to compute the entropy and KL divergence using the AD +framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and +Cross-entropies of Exponential Families).

    +
    +
    +
    +entropy()[source]
    +

    Method to compute the entropy using Bregman divergence of the log normalizer.

    +
    + +
    + +
    +
    +

    Bernoulli

    +
    +
    +class torch.distributions.bernoulli.Bernoulli(probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a Bernoulli distribution parameterized by probs +or logits (but not both).

    +

    Samples are binary (0 or 1). They take the value 1 with probability p +and 0 with probability 1 - p.

    +

    Example:

    +
    >>> m = Bernoulli(torch.tensor([0.3]))
    +>>> m.sample()  # 30% chance 1; 70% chance 0
    +tensor([ 0.])
    +
    +
    +
    +
    Parameters
    +
      +
    • probs (Number, Tensor) – the probability of sampling 1

    • +
    • logits (Number, Tensor) – the log-odds of sampling 1

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Interval(lower_bound=0.0, upper_bound=1.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_enumerate_support = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = Boolean()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Beta

    +
    +
    +class torch.distributions.beta.Beta(concentration1, concentration0, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Beta distribution parameterized by concentration1 and concentration0.

    +

    Example:

    +
    >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
    +>>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
      +
    • concentration1 (float or Tensor) – 1st concentration parameter of the distribution +(often referred to as alpha)

    • +
    • concentration0 (float or Tensor) – 2nd concentration parameter of the distribution +(often referred to as beta)

    • +
    +
    +
    +
    +
    +arg_constraints = {'concentration0': GreaterThan(lower_bound=0.0), 'concentration1': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +property concentration0
    +
    + +
    +
    +property concentration1
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=())[source]
    +
    + +
    +
    +support = Interval(lower_bound=0.0, upper_bound=1.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Binomial

    +
    +
    +class torch.distributions.binomial.Binomial(total_count=1, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Binomial distribution parameterized by total_count and +either probs or logits (but not both). total_count must be +broadcastable with probs/logits.

    +

    Example:

    +
    >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
    +>>> x = m.sample()
    +tensor([   0.,   22.,   71.,  100.])
    +
    +>>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
    +>>> x = m.sample()
    +tensor([[ 4.,  5.],
    +        [ 7.,  6.]])
    +
    +
    +
    +
    Parameters
    +
      +
    • total_count (int or Tensor) – number of Bernoulli trials

    • +
    • probs (Tensor) – Event probabilities

    • +
    • logits (Tensor) – Event log-odds

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Interval(lower_bound=0.0, upper_bound=1.0), 'total_count': IntegerGreaterThan(lower_bound=0)}
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_enumerate_support = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Categorical

    +
    +
    +class torch.distributions.categorical.Categorical(probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a categorical distribution parameterized by either probs or +logits (but not both).

    +
    +

    Note

    +

    It is equivalent to the distribution that torch.multinomial() +samples from.

    +
    +

    Samples are integers from \(\{0, \ldots, K-1\}\) where K is probs.size(-1).

    +

    If probs is 1D with length-K, each element is the relative +probability of sampling the class at that index.

    +

    If probs is 2D, it is treated as a batch of relative probability +vectors.

    +
    +

    Note

    +

    probs must be non-negative, finite and have a non-zero sum, +and it will be normalized to sum to 1.

    +
    +

    See also: torch.multinomial()

    +

    Example:

    +
    >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
    +>>> m.sample()  # equal probability of 0, 1, 2, 3
    +tensor(3)
    +
    +
    +
    +
    Parameters
    +
      +
    • probs (Tensor) – event probabilities

    • +
    • logits (Tensor) – event log-odds

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Simplex()}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_enumerate_support = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Cauchy

    +
    +
    +class torch.distributions.cauchy.Cauchy(loc, scale, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of +independent normally distributed random variables with means 0 follows a +Cauchy distribution.

    +

    Example:

    +
    >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))
    +>>> m.sample()  # sample from a Cauchy distribution with loc=0 and scale=1
    +tensor([ 2.3214])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (float or Tensor) – mode or median of the distribution.

    • +
    • scale (float or Tensor) – half width at half maximum.

    • +
    +
    +
    +
    +
    +arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(value)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Chi2

    +
    +
    +class torch.distributions.chi2.Chi2(df, validate_args=None)[source]
    +

    Bases: torch.distributions.gamma.Gamma

    +

    Creates a Chi2 distribution parameterized by shape parameter df. +This is exactly equivalent to Gamma(alpha=0.5*df, beta=0.5)

    +

    Example:

    +
    >>> m = Chi2(torch.tensor([1.0]))
    +>>> m.sample()  # Chi2 distributed with shape df=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +

    df (float or Tensor) – shape parameter of the distribution

    +
    +
    +
    +
    +arg_constraints = {'df': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +property df
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    + +
    +
    +

    Dirichlet

    +
    +
    +class torch.distributions.dirichlet.Dirichlet(concentration, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a Dirichlet distribution parameterized by concentration concentration.

    +

    Example:

    +
    >>> m = Dirichlet(torch.tensor([0.5, 0.5]))
    +>>> m.sample()  # Dirichlet distributed with concentrarion concentration
    +tensor([ 0.1046,  0.8954])
    +
    +
    +
    +
    Parameters
    +

    concentration (Tensor) – concentration parameter of the distribution +(often referred to as alpha)

    +
    +
    +
    +
    +arg_constraints = {'concentration': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=())[source]
    +
    + +
    +
    +support = Simplex()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Exponential

    +
    +
    +class torch.distributions.exponential.Exponential(rate, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a Exponential distribution parameterized by rate.

    +

    Example:

    +
    >>> m = Exponential(torch.tensor([1.0]))
    +>>> m.sample()  # Exponential distributed with rate=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +

    rate (float or Tensor) – rate = 1 / scale of the distribution

    +
    +
    +
    +
    +arg_constraints = {'rate': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(value)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property stddev
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    FisherSnedecor

    +
    +
    +class torch.distributions.fishersnedecor.FisherSnedecor(df1, df2, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Fisher-Snedecor distribution parameterized by df1 and df2.

    +

    Example:

    +
    >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
    +>>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
    +tensor([ 0.2453])
    +
    +
    +
    +
    Parameters
    +
      +
    • df1 (float or Tensor) – degrees of freedom parameter 1

    • +
    • df2 (float or Tensor) – degrees of freedom parameter 2

    • +
    +
    +
    +
    +
    +arg_constraints = {'df1': GreaterThan(lower_bound=0.0), 'df2': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Gamma

    +
    +
    +class torch.distributions.gamma.Gamma(concentration, rate, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a Gamma distribution parameterized by shape concentration and rate.

    +

    Example:

    +
    >>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0]))
    +>>> m.sample()  # Gamma distributed with concentration=1 and rate=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
      +
    • concentration (float or Tensor) – shape parameter of the distribution +(often referred to as alpha)

    • +
    • rate (float or Tensor) – rate = 1 / scale of the distribution +(often referred to as beta)

    • +
    +
    +
    +
    +
    +arg_constraints = {'concentration': GreaterThan(lower_bound=0.0), 'rate': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Geometric

    +
    +
    +class torch.distributions.geometric.Geometric(probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Geometric distribution parameterized by probs, +where probs is the probability of success of Bernoulli trials. +It represents the probability that in \(k + 1\) Bernoulli trials, the +first \(k\) trials failed, before seeing a success.

    +

    Samples are non-negative integers [0, \(\inf\)).

    +

    Example:

    +
    >>> m = Geometric(torch.tensor([0.3]))
    +>>> m.sample()  # underlying Bernoulli has 30% chance 1; 70% chance 0
    +tensor([ 2.])
    +
    +
    +
    +
    Parameters
    +
      +
    • probs (Number, Tensor) – the probability of sampling 1. Must be in range (0, 1]

    • +
    • logits (Number, Tensor) – the log-odds of sampling 1.

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Interval(lower_bound=0.0, upper_bound=1.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = IntegerGreaterThan(lower_bound=0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Gumbel

    +
    +
    +class torch.distributions.gumbel.Gumbel(loc, scale, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Samples from a Gumbel Distribution.

    +

    Examples:

    +
    >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))
    +>>> m.sample()  # sample from Gumbel distribution with loc=1, scale=2
    +tensor([ 1.0124])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (float or Tensor) – Location parameter of the distribution

    • +
    • scale (float or Tensor) – Scale parameter of the distribution

    • +
    +
    +
    +
    +
    +arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property stddev
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    HalfCauchy

    +
    +
    +class torch.distributions.half_cauchy.HalfCauchy(scale, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Creates a half-normal distribution parameterized by scale where:

    +
    X ~ Cauchy(0, scale)
    +Y = |X| ~ HalfCauchy(scale)
    +
    +
    +

    Example:

    +
    >>> m = HalfCauchy(torch.tensor([1.0]))
    +>>> m.sample()  # half-cauchy distributed with scale=1
    +tensor([ 2.3214])
    +
    +
    +
    +
    Parameters
    +

    scale (float or Tensor) – scale of the full Cauchy distribution

    +
    +
    +
    +
    +arg_constraints = {'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(prob)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property scale
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    HalfNormal

    +
    +
    +class torch.distributions.half_normal.HalfNormal(scale, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Creates a half-normal distribution parameterized by scale where:

    +
    X ~ Normal(0, scale)
    +Y = |X| ~ HalfNormal(scale)
    +
    +
    +

    Example:

    +
    >>> m = HalfNormal(torch.tensor([1.0]))
    +>>> m.sample()  # half-normal distributed with scale=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +

    scale (float or Tensor) – scale of the full Normal distribution

    +
    +
    +
    +
    +arg_constraints = {'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(prob)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property scale
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Independent

    +
    +
    +class torch.distributions.independent.Independent(base_distribution, reinterpreted_batch_ndims, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Reinterprets some of the batch dims of a distribution as event dims.

    +

    This is mainly useful for changing the shape of the result of +log_prob(). For example to create a diagonal Normal distribution with +the same shape as a Multivariate Normal distribution (so they are +interchangeable), you can:

    +
    >>> loc = torch.zeros(3)
    +>>> scale = torch.ones(3)
    +>>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale))
    +>>> [mvn.batch_shape, mvn.event_shape]
    +[torch.Size(()), torch.Size((3,))]
    +>>> normal = Normal(loc, scale)
    +>>> [normal.batch_shape, normal.event_shape]
    +[torch.Size((3,)), torch.Size(())]
    +>>> diagn = Independent(normal, 1)
    +>>> [diagn.batch_shape, diagn.event_shape]
    +[torch.Size(()), torch.Size((3,))]
    +
    +
    +
    +
    Parameters
    +
    +
    +
    +
    +
    +arg_constraints = {}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +property has_enumerate_support
    +
    + +
    +
    +property has_rsample
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Laplace

    +
    +
    +class torch.distributions.laplace.Laplace(loc, scale, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Laplace distribution parameterized by loc and :attr:’scale’.

    +

    Example:

    +
    >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0]))
    +>>> m.sample()  # Laplace distributed with loc=0, scale=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
    +
    +
    +
    +
    +arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(value)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property stddev
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    LogNormal

    +
    +
    +class torch.distributions.log_normal.LogNormal(loc, scale, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Creates a log-normal distribution parameterized by +loc and scale where:

    +
    X ~ Normal(loc, scale)
    +Y = exp(X) ~ LogNormal(loc, scale)
    +
    +
    +

    Example:

    +
    >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
    +>>> m.sample()  # log-normal distributed with mean=0 and stddev=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (float or Tensor) – mean of log of distribution

    • +
    • scale (float or Tensor) – standard deviation of log of the distribution

    • +
    +
    +
    +
    +
    +arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +property loc
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property scale
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    LowRankMultivariateNormal

    +
    +
    +class torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal(loc, cov_factor, cov_diag, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a multivariate normal distribution with covariance matrix having a low-rank form +parameterized by cov_factor and cov_diag:

    +
    covariance_matrix = cov_factor @ cov_factor.T + cov_diag
    +
    +
    +

    Example

    +
    >>> m = LowRankMultivariateNormal(torch.zeros(2), torch.tensor([1, 0]), torch.tensor([1, 1]))
    +>>> m.sample()  # normally distributed with mean=`[0,0]`, cov_factor=`[1,0]`, cov_diag=`[1,1]`
    +tensor([-0.2102, -0.5429])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (Tensor) – mean of the distribution with shape batch_shape + event_shape

    • +
    • cov_factor (Tensor) – factor part of low-rank form of covariance matrix with shape +batch_shape + event_shape + (rank,)

    • +
    • cov_diag (Tensor) – diagonal part of low-rank form of covariance matrix with shape +batch_shape + event_shape

    • +
    +
    +
    +
    +

    Note

    +

    The computation for determinant and inverse of covariance matrix is avoided when +cov_factor.shape[1] << cov_factor.shape[0] thanks to Woodbury matrix identity and +matrix determinant lemma. +Thanks to these formulas, we just need to compute the determinant and inverse of +the small size “capacitance” matrix:

    +
    capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor
    +
    +
    +
    +
    +
    +arg_constraints = {'cov_diag': GreaterThan(lower_bound=0.0), 'cov_factor': Real(), 'loc': Real()}
    +
    + +
    +
    +covariance_matrix[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +precision_matrix[source]
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +scale_tril[source]
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +variance[source]
    +
    + +
    + +
    +
    +

    Multinomial

    +
    +
    +class torch.distributions.multinomial.Multinomial(total_count=1, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Multinomial distribution parameterized by total_count and +either probs or logits (but not both). The innermost dimension of +probs indexes over categories. All other dimensions index over batches.

    +

    Note that total_count need not be specified if only log_prob() is +called (see example below)

    +
    +

    Note

    +

    probs must be non-negative, finite and have a non-zero sum, +and it will be normalized to sum to 1.

    +
    +
      +
    • sample() requires a single shared total_count for all +parameters and samples.

    • +
    • log_prob() allows different total_count for each parameter and +sample.

    • +
    +

    Example:

    +
    >>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.]))
    +>>> x = m.sample()  # equal probability of 0, 1, 2, 3
    +tensor([ 21.,  24.,  30.,  25.])
    +
    +>>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x)
    +tensor([-4.1338])
    +
    +
    +
    +
    Parameters
    +
      +
    • total_count (int) – number of trials

    • +
    • probs (Tensor) – event probabilities

    • +
    • logits (Tensor) – event log probabilities

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Simplex()}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property logits
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +property probs
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    MultivariateNormal

    +
    +
    +class torch.distributions.multivariate_normal.MultivariateNormal(loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a multivariate normal (also called Gaussian) distribution +parameterized by a mean vector and a covariance matrix.

    +

    The multivariate normal distribution can be parameterized either +in terms of a positive definite covariance matrix \(\mathbf{\Sigma}\) +or a positive definite precision matrix \(\mathbf{\Sigma}^{-1}\) +or a lower-triangular matrix \(\mathbf{L}\) with positive-valued +diagonal entries, such that +\(\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top\). This triangular matrix +can be obtained via e.g. Cholesky decomposition of the covariance.

    +

    Example

    +
    >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
    +>>> m.sample()  # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
    +tensor([-0.2102, -0.5429])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (Tensor) – mean of the distribution

    • +
    • covariance_matrix (Tensor) – positive-definite covariance matrix

    • +
    • precision_matrix (Tensor) – positive-definite precision matrix

    • +
    • scale_tril (Tensor) – lower-triangular factor of covariance, with positive-valued diagonal

    • +
    +
    +
    +
    +

    Note

    +

    Only one of covariance_matrix or precision_matrix or +scale_tril can be specified.

    +

    Using scale_tril will be more efficient: all computations internally +are based on scale_tril. If covariance_matrix or +precision_matrix is passed instead, it is only used to compute +the corresponding lower triangular matrices using a Cholesky decomposition.

    +
    +
    +
    +arg_constraints = {'covariance_matrix': PositiveDefinite(), 'loc': RealVector(), 'precision_matrix': PositiveDefinite(), 'scale_tril': LowerCholesky()}
    +
    + +
    +
    +covariance_matrix[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +precision_matrix[source]
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +scale_tril[source]
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    NegativeBinomial

    +
    +
    +class torch.distributions.negative_binomial.NegativeBinomial(total_count, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Negative Binomial distribution, i.e. distribution +of the number of independent identical Bernoulli trials +needed before total_count failures are achieved. The probability +of success of each Bernoulli trial is probs.

    +
    +
    Parameters
    +
      +
    • total_count (float or Tensor) – non-negative number of negative Bernoulli +trials to stop, although the distribution is still valid for real +valued count

    • +
    • probs (Tensor) – Event probabilities of success in the half open interval [0, 1)

    • +
    • logits (Tensor) – Event log-odds for probabilities of success

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': HalfOpenInterval(lower_bound=0.0, upper_bound=1.0), 'total_count': GreaterThanEq(lower_bound=0)}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = IntegerGreaterThan(lower_bound=0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Normal

    +
    +
    +class torch.distributions.normal.Normal(loc, scale, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a normal (also called Gaussian) distribution parameterized by +loc and scale.

    +

    Example:

    +
    >>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
    +>>> m.sample()  # normally distributed with loc=0 and scale=1
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
      +
    • loc (float or Tensor) – mean of the distribution (often referred to as mu)

    • +
    • scale (float or Tensor) – standard deviation of the distribution +(often referred to as sigma)

    • +
    +
    +
    +
    +
    +arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(value)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property stddev
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    OneHotCategorical

    +
    +
    +class torch.distributions.one_hot_categorical.OneHotCategorical(probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a one-hot categorical distribution parameterized by probs or +logits.

    +

    Samples are one-hot coded vectors of size probs.size(-1).

    +
    +

    Note

    +

    probs must be non-negative, finite and have a non-zero sum, +and it will be normalized to sum to 1.

    +
    +

    See also: torch.distributions.Categorical() for specifications of +probs and logits.

    +

    Example:

    +
    >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
    +>>> m.sample()  # equal probability of 0, 1, 2, 3
    +tensor([ 0.,  0.,  0.,  1.])
    +
    +
    +
    +
    Parameters
    +
      +
    • probs (Tensor) – event probabilities

    • +
    • logits (Tensor) – event log probabilities

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Simplex()}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +enumerate_support(expand=True)[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_enumerate_support = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property logits
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +property probs
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = Simplex()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Pareto

    +
    +
    +class torch.distributions.pareto.Pareto(scale, alpha, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Samples from a Pareto Type 1 distribution.

    +

    Example:

    +
    >>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
    +>>> m.sample()  # sample from a Pareto distribution with scale=1 and alpha=1
    +tensor([ 1.5623])
    +
    +
    +
    +
    Parameters
    +
      +
    • scale (float or Tensor) – Scale parameter of the distribution

    • +
    • alpha (float or Tensor) – Shape parameter of the distribution

    • +
    +
    +
    +
    +
    +arg_constraints = {'alpha': GreaterThan(lower_bound=0.0), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Poisson

    +
    +
    +class torch.distributions.poisson.Poisson(rate, validate_args=None)[source]
    +

    Bases: torch.distributions.exp_family.ExponentialFamily

    +

    Creates a Poisson distribution parameterized by rate, the rate parameter.

    +

    Samples are nonnegative integers, with a pmf given by

    +
    +\[\mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!} + +\]
    +

    Example:

    +
    >>> m = Poisson(torch.tensor([4]))
    +>>> m.sample()
    +tensor([ 3.])
    +
    +
    +
    +
    Parameters
    +

    rate (Number, Tensor) – the rate parameter

    +
    +
    +
    +
    +arg_constraints = {'rate': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = IntegerGreaterThan(lower_bound=0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    RelaxedBernoulli

    +
    +
    +class torch.distributions.relaxed_bernoulli.RelaxedBernoulli(temperature, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Creates a RelaxedBernoulli distribution, parametrized by +temperature, and either probs or logits +(but not both). This is a relaxed version of the Bernoulli distribution, +so the values are in (0, 1), and has reparametrizable samples.

    +

    Example:

    +
    >>> m = RelaxedBernoulli(torch.tensor([2.2]),
    +                         torch.tensor([0.1, 0.2, 0.3, 0.99]))
    +>>> m.sample()
    +tensor([ 0.2951,  0.3442,  0.8918,  0.9021])
    +
    +
    +
    +
    Parameters
    +
      +
    • temperature (Tensor) – relaxation temperature

    • +
    • probs (Number, Tensor) – the probability of sampling 1

    • +
    • logits (Number, Tensor) – the log-odds of sampling 1

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Interval(lower_bound=0.0, upper_bound=1.0)}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +property logits
    +
    + +
    +
    +property probs
    +
    + +
    +
    +support = Interval(lower_bound=0.0, upper_bound=1.0)
    +
    + +
    +
    +property temperature
    +
    + +
    + +
    +
    +

    LogitRelaxedBernoulli

    +
    +
    +class torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli(temperature, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a LogitRelaxedBernoulli distribution parameterized by probs +or logits (but not both), which is the logit of a RelaxedBernoulli +distribution.

    +

    Samples are logits of values in (0, 1). See [1] for more details.

    +
    +
    Parameters
    +
      +
    • temperature (Tensor) – relaxation temperature

    • +
    • probs (Number, Tensor) – the probability of sampling 1

    • +
    • logits (Number, Tensor) – the log-odds of sampling 1

    • +
    +
    +
    +

    [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random +Variables (Maddison et al, 2017)

    +

    [2] Categorical Reparametrization with Gumbel-Softmax +(Jang et al, 2017)

    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Interval(lower_bound=0.0, upper_bound=1.0)}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +logits[source]
    +
    + +
    +
    +property param_shape
    +
    + +
    +
    +probs[source]
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = Real()
    +
    + +
    + +
    +
    +

    RelaxedOneHotCategorical

    +
    +
    +class torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(temperature, probs=None, logits=None, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Creates a RelaxedOneHotCategorical distribution parametrized by +temperature, and either probs or logits. +This is a relaxed version of the OneHotCategorical distribution, so +its samples are on simplex, and are reparametrizable.

    +

    Example:

    +
    >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]),
    +                                 torch.tensor([0.1, 0.2, 0.3, 0.4]))
    +>>> m.sample()
    +tensor([ 0.1294,  0.2324,  0.3859,  0.2523])
    +
    +
    +
    +
    Parameters
    +
      +
    • temperature (Tensor) – relaxation temperature

    • +
    • probs (Tensor) – event probabilities

    • +
    • logits (Tensor) – the log probability of each event.

    • +
    +
    +
    +
    +
    +arg_constraints = {'logits': Real(), 'probs': Simplex()}
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +property logits
    +
    + +
    +
    +property probs
    +
    + +
    +
    +support = Simplex()
    +
    + +
    +
    +property temperature
    +
    + +
    + +
    +
    +

    StudentT

    +
    +
    +class torch.distributions.studentT.StudentT(df, loc=0.0, scale=1.0, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Creates a Student’s t-distribution parameterized by degree of +freedom df, mean loc and scale scale.

    +

    Example:

    +
    >>> m = StudentT(torch.tensor([2.0]))
    +>>> m.sample()  # Student's t-distributed with degrees of freedom=2
    +tensor([ 0.1046])
    +
    +
    +
    +
    Parameters
    +
    +
    +
    +
    +
    +arg_constraints = {'df': GreaterThan(lower_bound=0.0), 'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +support = Real()
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    TransformedDistribution

    +
    +
    +class torch.distributions.transformed_distribution.TransformedDistribution(base_distribution, transforms, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Extension of the Distribution class, which applies a sequence of Transforms +to a base distribution. Let f be the composition of transforms applied:

    +
    X ~ BaseDistribution
    +Y = f(X) ~ TransformedDistribution(BaseDistribution, f)
    +log p(Y) = log p(X) + log |det (dX/dY)|
    +
    +
    +

    Note that the .event_shape of a TransformedDistribution is the +maximum shape of its base distribution and its transforms, since transforms +can introduce correlations among events.

    +

    An example for the usage of TransformedDistribution would be:

    +
    # Building a Logistic Distribution
    +# X ~ Uniform(0, 1)
    +# f = a + b * logit(X)
    +# Y ~ f(X) ~ Logistic(a, b)
    +base_distribution = Uniform(0, 1)
    +transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)]
    +logistic = TransformedDistribution(base_distribution, transforms)
    +
    +
    +

    For more examples, please look at the implementations of +Gumbel, +HalfCauchy, +HalfNormal, +LogNormal, +Pareto, +Weibull, +RelaxedBernoulli and +RelaxedOneHotCategorical

    +
    +
    +arg_constraints = {}
    +
    + +
    +
    +cdf(value)[source]
    +

    Computes the cumulative distribution function by inverting the +transform(s) and computing the score of the base distribution.

    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +property has_rsample
    +
    + +
    +
    +icdf(value)[source]
    +

    Computes the inverse cumulative distribution function using +transform(s) and computing the score of the base distribution.

    +
    + +
    +
    +log_prob(value)[source]
    +

    Scores the sample by inverting the transform(s) and computing the score +using the score of the base distribution and the log abs det jacobian.

    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +

    Generates a sample_shape shaped reparameterized sample or sample_shape +shaped batch of reparameterized samples if the distribution parameters +are batched. Samples first from base distribution and applies +transform() for every transform in the list.

    +
    + +
    +
    +sample(sample_shape=torch.Size([]))[source]
    +

    Generates a sample_shape shaped sample or sample_shape shaped batch of +samples if the distribution parameters are batched. Samples first from +base distribution and applies transform() for every transform in the +list.

    +
    + +
    +
    +property support
    +
    + +
    + +
    +
    +

    Uniform

    +
    +
    +class torch.distributions.uniform.Uniform(low, high, validate_args=None)[source]
    +

    Bases: torch.distributions.distribution.Distribution

    +

    Generates uniformly distributed random samples from the half-open interval +[low, high).

    +

    Example:

    +
    >>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
    +>>> m.sample()  # uniformly distributed in the range [0.0, 5.0)
    +tensor([ 2.3418])
    +
    +
    +
    +
    Parameters
    +
    +
    +
    +
    +
    +arg_constraints = {'high': Dependent(), 'low': Dependent()}
    +
    + +
    +
    +cdf(value)[source]
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +has_rsample = True
    +
    + +
    +
    +icdf(value)[source]
    +
    + +
    +
    +log_prob(value)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +rsample(sample_shape=torch.Size([]))[source]
    +
    + +
    +
    +property stddev
    +
    + +
    +
    +property support
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    Weibull

    +
    +
    +class torch.distributions.weibull.Weibull(scale, concentration, validate_args=None)[source]
    +

    Bases: torch.distributions.transformed_distribution.TransformedDistribution

    +

    Samples from a two-parameter Weibull distribution.

    +

    Example

    +
    >>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0]))
    +>>> m.sample()  # sample from a Weibull distribution with scale=1, concentration=1
    +tensor([ 0.4784])
    +
    +
    +
    +
    Parameters
    +
      +
    • scale (float or Tensor) – Scale parameter of distribution (lambda).

    • +
    • concentration (float or Tensor) – Concentration parameter of distribution (k/shape).

    • +
    +
    +
    +
    +
    +arg_constraints = {'concentration': GreaterThan(lower_bound=0.0), 'scale': GreaterThan(lower_bound=0.0)}
    +
    + +
    +
    +entropy()[source]
    +
    + +
    +
    +expand(batch_shape, _instance=None)[source]
    +
    + +
    +
    +property mean
    +
    + +
    +
    +support = GreaterThan(lower_bound=0.0)
    +
    + +
    +
    +property variance
    +
    + +
    + +
    +
    +

    KL Divergence

    +
    +
    +torch.distributions.kl.kl_divergence(p, q)[source]
    +

    Compute Kullback-Leibler divergence \(KL(p \| q)\) between two distributions.

    +
    +\[KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx\]
    +
    +
    Parameters
    +
    +
    +
    Returns
    +

    A batch of KL divergences of shape batch_shape.

    +
    +
    Return type
    +

    Tensor

    +
    +
    Raises
    +

    NotImplementedError – If the distribution types have not been registered via + register_kl().

    +
    +
    +
    + +
    +
    +torch.distributions.kl.register_kl(type_p, type_q)[source]
    +

    Decorator to register a pairwise function with kl_divergence(). +Usage:

    +
    @register_kl(Normal, Normal)
    +def kl_normal_normal(p, q):
    +    # insert implementation here
    +
    +
    +

    Lookup returns the most specific (type,type) match ordered by subclass. If +the match is ambiguous, a RuntimeWarning is raised. For example to +resolve the ambiguous situation:

    +
    @register_kl(BaseP, DerivedQ)
    +def kl_version1(p, q): ...
    +@register_kl(DerivedP, BaseQ)
    +def kl_version2(p, q): ...
    +
    +
    +

    you should register a third most-specific implementation, e.g.:

    +
    register_kl(DerivedP, DerivedQ)(kl_version1)  # Break the tie.
    +
    +
    +
    +
    Parameters
    +
      +
    • type_p (type) – A subclass of Distribution.

    • +
    • type_q (type) – A subclass of Distribution.

    • +
    +
    +
    +
    + +
    +
    +

    Transforms

    +
    +
    +class torch.distributions.transforms.Transform(cache_size=0)[source]
    +

    Abstract class for invertable transformations with computable log +det jacobians. They are primarily used in +torch.distributions.TransformedDistribution.

    +

    Caching is useful for tranforms whose inverses are either expensive or +numerically unstable. Note that care must be taken with memoized values +since the autograd graph may be reversed. For example while the following +works with or without caching:

    +
    y = t(x)
    +t.log_abs_det_jacobian(x, y).backward()  # x will receive gradients.
    +
    +
    +

    However the following will error when caching due to dependency reversal:

    +
    y = t(x)
    +z = t.inv(y)
    +grad(z.sum(), [y])  # error because z is x
    +
    +
    +

    Derived classes should implement one or both of _call() or +_inverse(). Derived classes that set bijective=True should also +implement log_abs_det_jacobian().

    +
    +
    Parameters
    +

    cache_size (int) – Size of cache. If zero, no caching is done. If one, +the latest single value is cached. Only 0 and 1 are supported.

    +
    +
    Variables
    +
      +
    • ~Transform.domain (Constraint) – The constraint representing valid inputs to this transform.

    • +
    • ~Transform.codomain (Constraint) – The constraint representing valid outputs to this transform +which are inputs to the inverse transform.

    • +
    • ~Transform.bijective (bool) – Whether this transform is bijective. A transform +t is bijective iff t.inv(t(x)) == x and +t(t.inv(y)) == y for every x in the domain and y in +the codomain. Transforms that are not bijective should at least +maintain the weaker pseudoinverse properties +t(t.inv(t(x)) == t(x) and t.inv(t(t.inv(y))) == t.inv(y).

    • +
    • ~Transform.sign (int or Tensor) – For bijective univariate transforms, this +should be +1 or -1 depending on whether transform is monotone +increasing or decreasing.

    • +
    • ~Transform.event_dim (int) – Number of dimensions that are correlated together in +the transform event_shape. This should be 0 for pointwise +transforms, 1 for transforms that act jointly on vectors, 2 for +transforms that act jointly on matrices, etc.

    • +
    +
    +
    +
    +
    +property inv
    +

    Returns the inverse Transform of this transform. +This should satisfy t.inv.inv is t.

    +
    + +
    +
    +property sign
    +

    Returns the sign of the determinant of the Jacobian, if applicable. +In general this only makes sense for bijective transforms.

    +
    + +
    +
    +log_abs_det_jacobian(x, y)[source]
    +

    Computes the log det jacobian log |dy/dx| given input and output.

    +
    + +
    + +
    +
    +class torch.distributions.transforms.ComposeTransform(parts)[source]
    +

    Composes multiple transforms in a chain. +The transforms being composed are responsible for caching.

    +
    +
    Parameters
    +

    parts (list of Transform) – A list of transforms to compose.

    +
    +
    +
    + +
    +
    +class torch.distributions.transforms.ExpTransform(cache_size=0)[source]
    +

    Transform via the mapping \(y = \exp(x)\).

    +
    + +
    +
    +class torch.distributions.transforms.PowerTransform(exponent, cache_size=0)[source]
    +

    Transform via the mapping \(y = x^{\text{exponent}}\).

    +
    + +
    +
    +class torch.distributions.transforms.SigmoidTransform(cache_size=0)[source]
    +

    Transform via the mapping \(y = \frac{1}{1 + \exp(-x)}\) and \(x = \text{logit}(y)\).

    +
    + +
    +
    +class torch.distributions.transforms.AbsTransform(cache_size=0)[source]
    +

    Transform via the mapping \(y = |x|\).

    +
    + +
    +
    +class torch.distributions.transforms.AffineTransform(loc, scale, event_dim=0, cache_size=0)[source]
    +

    Transform via the pointwise affine mapping \(y = \text{loc} + \text{scale} \times x\).

    +
    +
    Parameters
    +
      +
    • loc (Tensor or float) – Location parameter.

    • +
    • scale (Tensor or float) – Scale parameter.

    • +
    • event_dim (int) – Optional size of event_shape. This should be zero +for univariate random variables, 1 for distributions over vectors, +2 for distributions over matrices, etc.

    • +
    +
    +
    +
    + +
    +
    +class torch.distributions.transforms.SoftmaxTransform(cache_size=0)[source]
    +

    Transform from unconstrained space to the simplex via \(y = \exp(x)\) then +normalizing.

    +

    This is not bijective and cannot be used for HMC. However this acts mostly +coordinate-wise (except for the final normalization), and thus is +appropriate for coordinate-wise optimization algorithms.

    +
    + +
    +
    +class torch.distributions.transforms.StickBreakingTransform(cache_size=0)[source]
    +

    Transform from unconstrained space to the simplex of one additional +dimension via a stick-breaking process.

    +

    This transform arises as an iterated sigmoid transform in a stick-breaking +construction of the Dirichlet distribution: the first logit is +transformed via sigmoid to the first probability and the probability of +everything else, and then the process recurses.

    +

    This is bijective and appropriate for use in HMC; however it mixes +coordinates together and is less appropriate for optimization.

    +
    + +
    +
    +class torch.distributions.transforms.LowerCholeskyTransform(cache_size=0)[source]
    +

    Transform from unconstrained matrices to lower-triangular matrices with +nonnegative diagonal entries.

    +

    This is useful for parameterizing positive definite matrices in terms of +their Cholesky factorization.

    +
    + +
    +
    +class torch.distributions.transforms.CatTransform(tseq, dim=0, lengths=None)[source]
    +

    Transform functor that applies a sequence of transforms tseq +component-wise to each submatrix at dim, of length lengths[dim], +in a way compatible with torch.cat().

    +
    +
    Example::

    x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) +x = torch.cat([x0, x0], dim=0) +t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) +t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) +y = t(x)

    +
    +
    +
    + +
    +
    +class torch.distributions.transforms.StackTransform(tseq, dim=0)[source]
    +

    Transform functor that applies a sequence of transforms tseq +component-wise to each submatrix at dim +in a way compatible with torch.stack().

    +
    +
    Example::

    x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1) +t = StackTransform([ExpTransform(), identity_transform], dim=1) +y = t(x)

    +
    +
    +
    + +
    +
    +

    Constraints

    +

    The following constraints are implemented:

    +
      +
    • constraints.boolean

    • +
    • constraints.cat

    • +
    • constraints.dependent

    • +
    • constraints.greater_than(lower_bound)

    • +
    • constraints.integer_interval(lower_bound, upper_bound)

    • +
    • constraints.interval(lower_bound, upper_bound)

    • +
    • constraints.lower_cholesky

    • +
    • constraints.lower_triangular

    • +
    • constraints.nonnegative_integer

    • +
    • constraints.positive

    • +
    • constraints.positive_definite

    • +
    • constraints.positive_integer

    • +
    • constraints.real

    • +
    • constraints.real_vector

    • +
    • constraints.simplex

    • +
    • constraints.stack

    • +
    • constraints.unit_interval

    • +
    +
    +
    +class torch.distributions.constraints.Constraint[source]
    +

    Abstract base class for constraints.

    +

    A constraint object represents a region over which a variable is valid, +e.g. within which a variable can be optimized.

    +
    +
    +check(value)[source]
    +

    Returns a byte tensor of sample_shape + batch_shape indicating +whether each event in value satisfies this constraint.

    +
    + +
    + +
    +
    +torch.distributions.constraints.dependent_property
    +

    alias of torch.distributions.constraints._DependentProperty

    +
    + +
    +
    +torch.distributions.constraints.integer_interval
    +

    alias of torch.distributions.constraints._IntegerInterval

    +
    + +
    +
    +torch.distributions.constraints.greater_than
    +

    alias of torch.distributions.constraints._GreaterThan

    +
    + +
    +
    +torch.distributions.constraints.greater_than_eq
    +

    alias of torch.distributions.constraints._GreaterThanEq

    +
    + +
    +
    +torch.distributions.constraints.less_than
    +

    alias of torch.distributions.constraints._LessThan

    +
    + +
    +
    +torch.distributions.constraints.interval
    +

    alias of torch.distributions.constraints._Interval

    +
    + +
    +
    +torch.distributions.constraints.half_open_interval
    +

    alias of torch.distributions.constraints._HalfOpenInterval

    +
    + +
    +
    +torch.distributions.constraints.cat
    +

    alias of torch.distributions.constraints._Cat

    +
    + +
    +
    +torch.distributions.constraints.stack
    +

    alias of torch.distributions.constraints._Stack

    +
    + +
    +
    +

    Constraint Registry

    +

    PyTorch provides two global ConstraintRegistry objects that link +Constraint objects to +Transform objects. These objects both +input constraints and return transforms, but they have different guarantees on +bijectivity.

    +
      +
    1. biject_to(constraint) looks up a bijective +Transform from constraints.real +to the given constraint. The returned transform is guaranteed to have +.bijective = True and should implement .log_abs_det_jacobian().

    2. +
    3. transform_to(constraint) looks up a not-necessarily bijective +Transform from constraints.real +to the given constraint. The returned transform is not guaranteed to +implement .log_abs_det_jacobian().

    4. +
    +

    The transform_to() registry is useful for performing unconstrained +optimization on constrained parameters of probability distributions, which are +indicated by each distribution’s .arg_constraints dict. These transforms often +overparameterize a space in order to avoid rotation; they are thus more +suitable for coordinate-wise optimization algorithms like Adam:

    +
    loc = torch.zeros(100, requires_grad=True)
    +unconstrained = torch.zeros(100, requires_grad=True)
    +scale = transform_to(Normal.arg_constraints['scale'])(unconstrained)
    +loss = -Normal(loc, scale).log_prob(data).sum()
    +
    +
    +

    The biject_to() registry is useful for Hamiltonian Monte Carlo, where +samples from a probability distribution with constrained .support are +propagated in an unconstrained space, and algorithms are typically rotation +invariant.:

    +
    dist = Exponential(rate)
    +unconstrained = torch.zeros(100, requires_grad=True)
    +sample = biject_to(dist.support)(unconstrained)
    +potential_energy = -dist.log_prob(sample).sum()
    +
    +
    +
    +

    Note

    +

    An example where transform_to and biject_to differ is +constraints.simplex: transform_to(constraints.simplex) returns a +SoftmaxTransform that simply +exponentiates and normalizes its inputs; this is a cheap and mostly +coordinate-wise operation appropriate for algorithms like SVI. In +contrast, biject_to(constraints.simplex) returns a +StickBreakingTransform that +bijects its input down to a one-fewer-dimensional space; this a more +expensive less numerically stable transform but is needed for algorithms +like HMC.

    +
    +

    The biject_to and transform_to objects can be extended by user-defined +constraints and transforms using their .register() method either as a +function on singleton constraints:

    +
    transform_to.register(my_constraint, my_transform)
    +
    +
    +

    or as a decorator on parameterized constraints:

    +
    @transform_to.register(MyConstraintClass)
    +def my_factory(constraint):
    +    assert isinstance(constraint, MyConstraintClass)
    +    return MyTransform(constraint.param1, constraint.param2)
    +
    +
    +

    You can create your own registry by creating a new ConstraintRegistry +object.

    +
    +
    +class torch.distributions.constraint_registry.ConstraintRegistry[source]
    +

    Registry to link constraints to transforms.

    +
    +
    +register(constraint, factory=None)[source]
    +

    Registers a Constraint +subclass in this registry. Usage:

    +
    @my_registry.register(MyConstraintClass)
    +def construct_transform(constraint):
    +    assert isinstance(constraint, MyConstraint)
    +    return MyTransform(constraint.arg_constraints)
    +
    +
    +
    +
    Parameters
    +
      +
    • constraint (subclass of Constraint) – A subclass of Constraint, or +a singleton object of the desired class.

    • +
    • factory (callable) – A callable that inputs a constraint object and returns +a Transform object.

    • +
    +
    +
    +
    + +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/dlpack.html b/docs/stable/dlpack.html new file mode 100644 index 000000000000..b4aa6e884aeb --- /dev/null +++ b/docs/stable/dlpack.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + torch.utils.dlpack — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.dlpack
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.dlpack

    +
    +
    +torch.utils.dlpack.from_dlpack(dlpack) → Tensor
    +

    Decodes a DLPack to a tensor.

    +
    +
    Parameters
    +

    dlpack – a PyCapsule object with the dltensor

    +
    +
    +

    The tensor will share the memory with the object represented +in the dlpack. +Note that each dlpack can only be consumed once.

    +
    + +
    +
    +torch.utils.dlpack.to_dlpack(tensor) → PyCapsule
    +

    Returns a DLPack representing the tensor.

    +
    +
    Parameters
    +

    tensor – a tensor to be exported

    +
    +
    +

    The dlpack shares the tensors memory. +Note that each dlpack can only be consumed once.

    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/genindex.html b/docs/stable/genindex.html new file mode 100644 index 000000000000..b744665c20fe --- /dev/null +++ b/docs/stable/genindex.html @@ -0,0 +1,4675 @@ + + + + + + + + + + + + + Index — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + + +

    Index

    + +
    + _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | J + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | X + | Z + +
    +

    _

    + + + +
    + +

    A

    + + + +
    + +

    B

    + + + +
    + +

    C

    + + + +
    + +

    D

    + + + +
    + +

    E

    + + + +
    + +

    F

    + + + +
    + +

    G

    + + + +
    + +

    H

    + + + +
    + +

    I

    + + + +
    + +

    J

    + + +
    + +

    K

    + + + +
    + +

    L

    + + + +
    + +

    M

    + + + +
    + +

    N

    + + + +
    + +

    O

    + + + +
    + +

    P

    + + + +
    + +

    Q

    + + + +
    + +

    R

    + + + +
    + +

    S

    + + + +
    + +

    T

    + + + +
    + +

    U

    + + + +
    + +

    V

    + + + +
    + +

    W

    + + + +
    + +

    X

    + + + +
    + +

    Z

    + + + +
    + + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/hub.html b/docs/stable/hub.html new file mode 100644 index 000000000000..7428938f2194 --- /dev/null +++ b/docs/stable/hub.html @@ -0,0 +1,748 @@ + + + + + + + + + + + + torch.hub — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.hub

    +

    Pytorch Hub is a pre-trained model repository designed to facilitate research reproducibility.

    +
    +

    Publishing models

    +

    Pytorch Hub supports publishing pre-trained models(model definitions and pre-trained weights) +to a github repository by adding a simple hubconf.py file;

    +

    hubconf.py can have multiple entrypoints. Each entrypoint is defined as a python function +(example: a pre-trained model you want to publish).

    +
    def entrypoint_name(*args, **kwargs):
    +    # args & kwargs are optional, for models which take positional/keyword arguments.
    +    ...
    +
    +
    +
    +

    How to implement an entrypoint?

    +

    Here is a code snippet specifies an entrypoint for resnet18 model if we expand +the implementation in pytorch/vision/hubconf.py. +In most case importing the right function in hubconf.py is sufficient. Here we +just want to use the expanded version as an example to show how it works. +You can see the full script in +pytorch/vision repo

    +
    dependencies = ['torch']
    +from torchvision.models.resnet import resnet18 as _resnet18
    +
    +# resnet18 is the name of entrypoint
    +def resnet18(pretrained=False, **kwargs):
    +    """ # This docstring shows up in hub.help()
    +    Resnet18 model
    +    pretrained (bool): kwargs, load pretrained weights into the model
    +    """
    +    # Call the model, load pretrained weights
    +    model = _resnet18(pretrained=pretrained, **kwargs)
    +    return model
    +
    +
    +
      +
    • dependencies variable is a list of package names required to load the model. Note this might +be slightly different from dependencies required for training a model.

    • +
    • args and kwargs are passed along to the real callable function.

    • +
    • Docstring of the function works as a help message. It explains what does the model do and what +are the allowed positional/keyword arguments. It’s highly recommended to add a few examples here.

    • +
    • Entrypoint function can either return a model(nn.module), or auxiliary tools to make the user workflow smoother, e.g. tokenizers.

    • +
    • Callables prefixed with underscore are considered as helper functions which won’t show up in torch.hub.list().

    • +
    • Pretrained weights can either be stored locally in the github repo, or loadable by +torch.hub.load_state_dict_from_url(). If less than 2GB, it’s recommended to attach it to a project release +and use the url from the release. +In the example above torchvision.models.resnet.resnet18 handles pretrained, alternatively you can put the following logic in the entrypoint definition.

    • +
    +
    if pretrained:
    +    # For checkpoint saved in local github repo, e.g. <RELATIVE_PATH_TO_CHECKPOINT>=weights/save.pth
    +    dirname = os.path.dirname(__file__)
    +    checkpoint = os.path.join(dirname, <RELATIVE_PATH_TO_CHECKPOINT>)
    +    state_dict = torch.load(checkpoint)
    +    model.load_state_dict(state_dict)
    +
    +    # For checkpoint saved elsewhere
    +    checkpoint = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
    +    model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False))
    +
    +
    +
    +
    +

    Important Notice

    +
      +
    • The published models should be at least in a branch/tag. It can’t be a random commit.

    • +
    +
    +
    +
    +

    Loading models from Hub

    +

    Pytorch Hub provides convenient APIs to explore all available models in hub through torch.hub.list(), +show docstring and examples through torch.hub.help() and load the pre-trained models using torch.hub.load()

    +
    +
    +torch.hub.list(github, force_reload=False)[source]
    +

    List all entrypoints available in github hubconf.

    +
    +
    Parameters
    +
      +
    • github – Required, a string with format “repo_owner/repo_name[:tag_name]” with an optional +tag/branch. The default branch is master if not specified. +Example: ‘pytorch/vision[:hub]’

    • +
    • force_reload – Optional, whether to discard the existing cache and force a fresh download. +Default is False.

    • +
    +
    +
    Returns
    +

    a list of available entrypoint names

    +
    +
    Return type
    +

    entrypoints

    +
    +
    +

    Example

    +
    >>> entrypoints = torch.hub.list('pytorch/vision', force_reload=True)
    +
    +
    +
    + +
    +
    +torch.hub.help(github, model, force_reload=False)[source]
    +

    Show the docstring of entrypoint model.

    +
    +
    Parameters
    +
      +
    • github – Required, a string with format <repo_owner/repo_name[:tag_name]> with an optional +tag/branch. The default branch is master if not specified. +Example: ‘pytorch/vision[:hub]’

    • +
    • model – Required, a string of entrypoint name defined in repo’s hubconf.py

    • +
    • force_reload – Optional, whether to discard the existing cache and force a fresh download. +Default is False.

    • +
    +
    +
    +

    Example

    +
    >>> print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True))
    +
    +
    +
    + +
    +
    +torch.hub.load(github, model, *args, **kwargs)[source]
    +

    Load a model from a github repo, with pretrained weights.

    +
    +
    Parameters
    +
      +
    • github – Required, a string with format “repo_owner/repo_name[:tag_name]” with an optional +tag/branch. The default branch is master if not specified. +Example: ‘pytorch/vision[:hub]’

    • +
    • model – Required, a string of entrypoint name defined in repo’s hubconf.py

    • +
    • *args – Optional, the corresponding args for callable model.

    • +
    • force_reload – Optional, whether to force a fresh download of github repo unconditionally. +Default is False.

    • +
    • **kwargs – Optional, the corresponding kwargs for callable model.

    • +
    +
    +
    Returns
    +

    a single model with corresponding pretrained weights.

    +
    +
    +

    Example

    +
    >>> model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)
    +
    +
    +
    + +
    +

    Running a loaded model:

    +

    Note that *args, **kwargs in torch.load() are used to instantiate a model. +After you loaded a model, how can you find out what you can do with the model? +A suggested workflow is

    +
      +
    • dir(model) to see all avaialble methods of the model.

    • +
    • help(model.foo) to check what arguments model.foo takes to run

    • +
    +

    To help users explore without refering to documentation back and forth, we strongly +recommend repo owners make function help messages clear and succinct. It’s also helpful +to include a minimal working example.

    +
    +
    +

    Where are my downloaded models saved?

    +

    The locations are used in the order of

    +
      +
    • Calling hub.set_dir(<PATH_TO_HUB_DIR>)

    • +
    • $TORCH_HOME/hub, if environment variable TORCH_HOME is set.

    • +
    • $XDG_CACHE_HOME/torch/hub, if environment variable XDG_CACHE_HOME is set.

    • +
    • ~/.cache/torch/hub

    • +
    +
    +
    +torch.hub.set_dir(d)[source]
    +

    Optionally set hub_dir to a local dir to save downloaded models & weights.

    +

    If set_dir is not called, default path is $TORCH_HOME/hub where +environment variable $TORCH_HOME defaults to $XDG_CACHE_HOME/torch. +$XDG_CACHE_HOME follows the X Design Group specification of the Linux +filesytem layout, with a default value ~/.cache if the environment +variable is not set.

    +
    +
    Parameters
    +

    d – path to a local folder to save downloaded models & weights.

    +
    +
    +
    + +
    +
    +

    Caching logic

    +

    By default, we don’t clean up files after loading it. Hub uses the cache by default if it already exists in hub_dir.

    +

    Users can force a reload by calling hub.load(..., force_reload=True). This will delete +the existing github folder and downloaded weights, reinitialize a fresh download. This is useful +when updates are published to the same branch, users can keep up with the latest release.

    +
    +
    +

    Known limitations:

    +

    Torch hub works by importing the package as if it was installed. There’re some side effects +introduced by importing in Python. For example, you can see new items in Python caches +sys.modules and sys.path_importer_cache which is normal Python behavior.

    +

    A known limitation that worth mentioning here is user CANNOT load two different branches of +the same repo in the same python process. It’s just like installing two packages with the +same name in Python, which is not good. Cache might join the party and give you surprises if you +actually try that. Of course it’s totally fine to load them in separate processes.

    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/index.html b/docs/stable/index.html new file mode 100644 index 000000000000..5a640b76a31d --- /dev/null +++ b/docs/stable/index.html @@ -0,0 +1,610 @@ + + + + + + + + + + + + PyTorch documentation — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/jit.html b/docs/stable/jit.html new file mode 100644 index 000000000000..424eeb69970d --- /dev/null +++ b/docs/stable/jit.html @@ -0,0 +1,2159 @@ + + + + + + + + + + + + TorchScript — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    TorchScript

    + +

    TorchScript is a way to create serializable and optimizable models from PyTorch code. +Any TorchScript program can be saved from a Python +process and loaded in a process where there is no Python dependency.

    +

    We provide tools to incrementally transition a model from a pure Python program +to a TorchScript program that can be run independently from Python, such as in a standalone C++ program. +This makes it possible to train models in PyTorch using familiar tools in Python and then export +the model via TorchScript to a production environment where Python programs may be disadvantageous. +for performance and multi-threading reasons.

    +
    +

    Creating TorchScript Code

    +
    +
    +torch.jit.script(obj, optimize=None, _frames_up=0, _rcb=None)[source]
    +

    Scripting a function or nn.Module will inspect the source code, compile +it as TorchScript code using the TorchScript compiler, and return a ScriptModule or +torch._C.Function.

    +
    +
    Scripting a function

    The @torch.jit.script decorator will construct a torch._C.Function.

    +

    Example (scripting a function):

    +
    import torch
    +@torch.jit.script
    +def foo(x, y):
    +    if x.max() > y.max():
    +        r = x
    +    else:
    +        r = y
    +    return r
    +
    +
    +
    +
    Scripting an nn.Module

    Scripting an nn.Module by default will compile the forward method and recursively +compile any methods, submodules, and functions called by forward. If a nn.Module only uses +features supported in TorchScript, no changes to the original module code should be necessary.

    +

    Example (scripting a simple module with a Parameter):

    +
    import torch
    +
    +class MyModule(torch.nn.Module):
    +    def __init__(self, N, M):
    +        super(MyModule, self).__init__()
    +        # This parameter will be copied to the new ScriptModule
    +        self.weight = torch.nn.Parameter(torch.rand(N, M))
    +
    +        # When this submodule is used, it will be compiled
    +        self.linear = torch.nn.Linear(N, M)
    +
    +    def forward(self, input):
    +        output = self.weight.mv(input)
    +
    +        # This calls the `forward` method of the `nn.Linear` module, which will
    +        # cause the `self.linear` submodule to be compiled to a `ScriptModule` here
    +        output = self.linear(output)
    +        return output
    +
    +scripted_module = torch.jit.script(MyModule())
    +
    +
    +

    Example (scripting a module with traced submodules):

    +
    import torch
    +import torch.nn as nn
    +import torch.nn.functional as F
    +
    +class MyModule(nn.Module):
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        # torch.jit.trace produces a ScriptModule's conv1 and conv2
    +        self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
    +        self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
    +
    +    def forward(self, input):
    +      input = F.relu(self.conv1(input))
    +      input = F.relu(self.conv2(input))
    +      return input
    +
    +scripted_module = torch.jit.script(MyModule())
    +
    +
    +

    To compile a method other than forward (and recursively compile anything it calls), add +the @torch.jit.export decorator to the method.

    +
    +
    +
    + +
    +
    +torch.jit.trace(func, example_inputs, optimize=None, check_trace=True, check_inputs=None, check_tolerance=1e-05, _force_outplace=False, _module_class=None, _compilation_unit=<torch._C.CompilationUnit object>)[source]
    +

    Trace a function and return an executable ScriptModule or torch.jit._C.Function +that will be optimized using just-in-time compilation.

    +
    +

    Warning

    +

    Tracing only correctly records functions and modules which are not data +dependent (e.g., do not have conditionals on data in tensors) and do not have +any untracked external dependencies (e.g., perform input/output or +access global variables). If you trace such models, you may silently get +incorrect results on subsequent invocations of the model. The tracer +will try to emit warnings when doing something that may cause an +incorrect trace to be produced.

    +
    +
    +
    Parameters
    +
      +
    • func (callable or torch.nn.Module) – a Python function or torch.nn.Module +that will be run with example_inputs. +arguments and returns to func must be tensors +or (possibly nested) tuples that +contain tensors.

    • +
    • example_inputs (tuple) – a tuple of example inputs that will be passed to the function +while tracing. The resulting trace can be run with +inputs of different types and shapes assuming the traced operations +support those types and shapes. example_inputs may also be a single +Tensor in which case it is automatically wrapped in a tuple

    • +
    +
    +
    Keyword Arguments
    +
      +
    • check_trace (bool, optional) – check if the same inputs run through +traced code produce the same outputs. Default: True. You might want +to disable this if, for example, your network contains non- +deterministic ops or if you are sure that the network is correct despite +a checker failure.

    • +
    • check_inputs (list of tuples, optional) – A list of tuples of input arguments that should be used +to check the trace against what is expected. Each tuple +is equivalent to a set of input arguments that would +be specified in example_inputs. For best results, pass in a +set of checking inputs representative of the space of +shapes and types of inputs you expect the network to see. +If not specified, the original example_inputs are used for checking

    • +
    • check_tolerance (float, optional) – Floating-point comparison tolerance to use in the checker procedure. +This can be used to relax the checker strictness in the event that +results diverge numerically for a known reason, such as operator fusion.

    • +
    +
    +
    Returns
    +

    if callable is nn.Module or forward() of nn.Module, trace returns +a ScriptModule object with a single forward() method containing the traced code. +The returned ScriptModule will have the same set of sub-modules and parameters as the +original nn.Module. +If callable is a standalone function, trace returns torch.jit._C.Function

    +
    +
    +

    Example:

    +
    class Net(nn.Module):
    +    def __init__(self):
    +        super(Net, self).__init__()
    +        self.conv = nn.Conv2d(1, 1, 3)
    +
    +    def forward(self, x):
    +        return self.conv(x)
    +
    +    def weighted_kernel_sum(self, weight):
    +        return weight * self.conv.weight
    +
    +example_weight = torch.rand(1, 1, 3, 3)
    +example_forward_input = torch.rand(1, 1, 3, 3)
    +n = Net()
    +# the following two calls are equivalent
    +module = torch.jit.trace_module(n, example_forward_input)
    +module = torch.jit.trace_module(n.forward, example_forward_input)
    +
    +
    +
    + +
    +
    +class torch.jit.ScriptModule(optimize=None, _qualified_name=None, _compilation_unit=None, _cpp_module=None)[source]
    +

    The core data structure in TorchScript is the ScriptModule. It is an +analogue of torch’s nn.Module and represents an entire model as a tree of +submodules. Like normal modules, each individual module in a ScriptModule can +have submodules, parameters, and methods. In nn.Modules methods are implemented +as Python functions, but in ScriptModules methods are implemented as +TorchScript functions, a statically-typed subset of Python that contains all +of PyTorch’s built-in Tensor operations. This difference allows your +ScriptModules code to run without the need for a Python interpreter.

    +

    ScriptModules be created in two ways:

    +

    Tracing:

    +
    +

    Using torch.jit.trace and torch.jit.trace_module, you can turn an existing module or Python +function into a TorchScript torch._C.Function or ScriptModule. You must provide example inputs, +and we run the function, recording the operations performed on all the tensors. +* The resulting recording of a standalone function produces torch._C.Function. +* The resulting recording of forward function of nn.Module or nn.Module produces ScriptModule. +This module also contains any parameters that the original +module had as well.

    +

    Example (tracing a function):

    +
    import torch
    +def foo(x, y):
    +    return 2 * x + y
    +traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
    +
    +
    +
    +

    Note

    +

    Tracing a standalone function will construct a torch._C.Function +Tracing nn.Module``s ``forward will construct a ScriptModule

    +
    +

    Example (tracing an existing module):

    +
    import torch
    +class Net(nn.Module):
    +    def __init__(self):
    +        super(Net, self).__init__()
    +        self.conv = nn.Conv2d(1, 1, 3)
    +
    +    def forward(self, x):
    +        return self.conv(x)
    +
    +    def weighted_kernel_sum(self, weight):
    +        return weight * self.conv.weight
    +
    +
    +n = Net()
    +example_weight = torch.rand(1, 1, 3, 3)
    +example_forward_input = torch.rand(1, 1, 3, 3)
    +
    +# all three trace calls below are equivalent
    +# and construct `ScriptModule` with a single `forward` method
    +module = torch.jit.trace(n.forward, example_forward_input) # produces ScriptModule with `forward`
    +module = torch.jit.trace(n, example_forward_input) # produces ScriptModule with `forward`
    +module = torch.jit.trace_module(n, inputs) # produces ScriptModule with `forward`
    +
    +inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
    +# trace_module produces `ScriptModule` with two methods:
    +# `forward` and `weighted_kernel_sum`
    +module = torch.jit.trace_module(n, inputs, True, True)
    +
    +
    +
    +

    Note

    +
      +
    • The first three trace/trace_module calls are equivalent and return ScriptModule

    • +
    +

    with a single forward method. +* The last trace_module call produces a ScriptModule with two methods. +Tracing only records operations done when the given function is run on the given +tensors. Therefore, the returned ScriptModule will always run the same traced +graph on any input. This has some important implications when your module is +expected to run different sets of operations, depending on the input and/or the +module state. For example,

    +
    +
      +
    • Tracing will not record any control-flow like if-statements or loops. When +this control-flow is constant across your module, this is fine and it often +inlines the control-flow decisions. But sometimes the control-flow is +actually part of the model itself. For instance, a recurrent network is +a loop over the (possibly dynamic) length of an input sequence.

    • +
    • In the returned ScriptModule, operations that have different behaviors +in training and eval modes will always behave as if it is in the +mode it was in during tracing, no matter which mode the ScriptModule +is in.

    • +
    +
    +

    In cases like these, tracing would not be appropriate and scripting is a better +choice.

    +
    +
    +

    Scripting:

    +
    +

    You can write TorchScript code directly using Python syntax. You do this +using the @torch.jit.script decorator for functions and modules. You can +also call torch.jit.script directly with the function or module you wish to +compile. On functions, the body of the function is compiled to TorchScript. If +applied to an nn.Module, by default the forward method and any methods it +calls are compiled, and all buffer and Parameters of the original module are copied +to a new ScriptModule. You should not need to construct a ScriptModule manually. +TorchScript itself is a subset of the Python language, so not all +features in Python work, but we provide enough functionality to compute on +tensors and do control-dependent operations.

    +
    +
    + +
    +
    +torch.jit.save(m, f, _extra_files=ExtraFilesMap{})[source]
    +

    Save an offline version of this module for use in a separate process. The saved +module serializes all of the methods, submodules, parameters, and attributes of this +module. It can be loaded into the C++ API using torch::jit::load(filename) or into the Python +API with load.

    +

    To be able to save a module, it must not make any calls to native Python functions. +This means that all submodules must be subclasses of torch.jit.ScriptModule as well.

    +
    +

    Danger

    +

    All modules, no matter their device, are always loaded onto the CPU during loading. +This is different from load’s semantics and may change in the future.

    +
    +
    +
    Parameters
    +
      +
    • m – a ScriptModule to save

    • +
    • f – a file-like object (has to implement write and flush) or a string +containing a file name

    • +
    • _extra_files – Map from filename to contents which will be stored as part of ‘f’

    • +
    +
    +
    +
    +

    Warning

    +

    If you are using Python 2, torch.save does NOT support StringIO.StringIO +as a valid file-like object. This is because the write method should return +the number of bytes written; StringIO.write() does not do this.

    +

    Please use something like io.BytesIO instead.

    +
    +

    Example:

    +
    import torch
    +import io
    +
    +
    +class MyModule(torch.nn.Module):
    +    def forward(self, x):
    +        return x + 10
    +
    +m = torch.jit.script(MyModule())
    +
    +# Save to file
    +torch.jit.save(m, 'scriptmodule.pt')
    +
    +# Save to io.BytesIO buffer
    +buffer = io.BytesIO()
    +torch.jit.save(m, buffer)
    +
    +# Save with extra files
    +extra_files = torch._C.ExtraFilesMap()
    +extra_files['foo.txt'] = 'bar'
    +torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files)
    +
    +
    +
    + +
    +
    +torch.jit.load(f, map_location=None, _extra_files=ExtraFilesMap{})[source]
    +

    Load a ScriptModule previously saved with save

    +

    All previously saved modules, no matter their device, are first loaded onto CPU, +and then are moved to the devices they were saved from. If this fails (e.g. because +the run time system doesn’t have certain devices), an exception is raised. +However, storages can be dynamically remapped to an alternative set of devices +using the map_location argument. Comparing to torch.load(), map_location +in this function is simplified, which only accepts a string (e.g., ‘cpu’, ‘cuda:0’), +or torch.device (e.g., torch.device(‘cpu’))

    +
    +
    Parameters
    +
      +
    • f – a file-like object (has to implement read, readline, tell, and seek), +or a string containing a file name

    • +
    • map_location – can a string (e.g., ‘cpu’, ‘cuda:0’), a device (e.g., +torch.device(‘cpu’))

    • +
    • _extra_files – map from filename to content. The extra +filenames given in the map would be loaded and their content +would be stored in the provided map.

    • +
    +
    +
    Returns
    +

    A ScriptModule object.

    +
    +
    +

    Example:

    +
    torch.jit.load('scriptmodule.pt')
    +
    +# Load ScriptModule from io.BytesIO object
    +with open('scriptmodule.pt', 'rb') as f:
    +    buffer = io.BytesIO(f.read())
    +
    +# Load all tensors to the original device
    +torch.jit.load(buffer)
    +
    +# Load all tensors onto CPU, using a device
    +torch.jit.load(buffer, map_location=torch.device('cpu'))
    +
    +# Load all tensors onto CPU, using a string
    +torch.jit.load(buffer, map_location='cpu')
    +
    +# Load with extra files.
    +files = {'metadata.json' : ''}
    +torch.jit.load('scriptmodule.pt', _extra_files = files)
    +print (files['metadata.json'])
    +
    +
    +
    + +
    +
    +

    Mixing Tracing and Scripting

    +

    In many cases either tracing or scripting is an easier approach for converting a model to TorchScript. +Tracing and scripting can be composed to suit the particular requirements +of a part of a model.

    +

    Scripted functions can call traced functions. This is particularly useful when you need +to use control-flow around a simple feed-forward model. For instance the beam search +of a sequence to sequence model will typically be written in script but can call an +encoder module generated using tracing.

    +

    Example:

    +
    import torch
    +
    +def foo(x, y):
    +    return 2 * x + y
    +traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
    +
    +@torch.jit.script
    +def bar(x):
    +    return traced_foo(x, x)
    +
    +
    +

    Traced functions can call script functions. This is useful when a small part of +a model requires some control-flow even though most of the model is just a feed-forward +network. Control-flow inside of a script function called by a traced function is +preserved correctly:

    +

    Example:

    +
    import torch
    +
    +@torch.jit.script
    +def foo(x, y):
    +    if x.max() > y.max():
    +        r = x
    +    else:
    +        r = y
    +    return r
    +
    +
    +def bar(x, y, z):
    +    return foo(x, y) + z
    +
    +traced_bar = torch.jit.trace(bar, (torch.rand(3), torch.rand(3), torch.rand(3)))
    +
    +
    +

    This composition also works for nn.Modules as well, where it can be used to generate +a submodule using tracing that can be called from the methods of a script module:

    +

    Example:

    +
    import torch
    +import torchvision
    +
    +class MyScriptModule(torch.nn.Module):
    +    def __init__(self):
    +        super(MyScriptModule, self).__init__()
    +        self.means = torch.nn.Parameter(torch.tensor([103.939, 116.779, 123.68])
    +                                        .resize_(1, 3, 1, 1))
    +        self.resnet = torch.jit.trace(torchvision.models.resnet18(),
    +                                      torch.rand(1, 3, 224, 224))
    +
    +    def forward(self, input):
    +        return self.resnet(input - self.means)
    +
    +my_script_module = torch.jit.script(MyScriptModule())
    +
    +
    +
    +
    +

    TorchScript Language Reference

    +

    TorchScript is a statically typed subset of Python that can either be written directly (using +the @torch.jit.script decorator) or generated automatically from Python code via +tracing. When using tracing, code is automatically converted into this subset of +Python by recording only the actual operators on tensors and simply executing and +discarding the other surrounding Python code.

    +

    When writing TorchScript directly using @torch.jit.script decorator, the programmer must +only use the subset of Python supported in TorchScript. This section documents +what is supported in TorchScript as if it were a language reference for a stand +alone language. Any features of Python not mentioned in this reference are not +part of TorchScript.

    +

    As a subset of Python any valid TorchScript function is also a valid Python +function. This makes it possible to remove the @torch.jit.script decorator and debug the +function using standard Python tools like pdb. The reverse is not true: there +are many valid python programs that are not valid TorchScript programs. +Instead, TorchScript focuses specifically on the features of Python that are +needed to represent neural network models in Torch.

    +
    +
    +PYTORCH_JIT=1
    +

    Setting the environment variable PYTORCH_JIT=0 will disable all script +and tracing annotations. If there is hard-to-debug error in one of your +ScriptModules, you can use this flag to force everything to run using native +Python. This allows the use of tools like pdb to debug code.

    +
    + +
    +

    Types

    +

    The largest difference between TorchScript and the full Python language is that +TorchScript only supports a small set of types that are needed to express neural +net models. In particular, TorchScript supports:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Type

    Description

    Tensor

    A PyTorch tensor of any dtype, dimension, or backend

    Tuple[T0, T1, ...]

    A tuple containing subtypes T0, T1, etc. (e.g. Tuple[Tensor, Tensor])

    bool

    A boolean value

    int

    A scalar integer

    float

    A scalar floating point number

    List[T]

    A list of which all members are type T

    Optional[T]

    A value which is either None or type T

    Dict[K, V]

    A dict with key type K and value type V. Only str, int, and float are allowed as key types.

    +

    Unlike Python, each variable in TorchScript function must have a single static type. +This makes it easier to optimize TorchScript functions.

    +

    Example (a type mismatch):

    +
    @torch.jit.script
    +def an_error(x):
    +    if x:
    +        r = torch.rand(1)
    +    else:
    +        r = 4
    +    return r # Type mismatch: r is set to type Tensor in the true branch
    +             # and type int in the false branch
    +
    +
    +
    +

    Default Types

    +

    By default, all parameters to a TorchScript function are assumed to be Tensor. +To specify that an argument to a TorchScript function is another type, it is possible to use +MyPy-style type annotations using the types listed above:

    +

    Example:

    +
    @torch.jit.script
    +def foo(x, tup):
    +    # type: (int, Tuple[Tensor, Tensor]) -> Tensor
    +    t0, t1 = tup
    +    return t0 + t1 + x
    +
    +print(foo(3, (torch.rand(3), torch.rand(3))))
    +
    +
    +
    +

    Note

    +

    It is also possible to annotate types with Python 3 type annotations. +In our examples, we use comment-based annotations to ensure Python 2 +compatibility as well.

    +
    +

    An empty list is assumed to be List[Tensor] and empty dicts +Dict[str, Tensor]. To instantiate an empty list or dict of other types, +use torch.jit.annotate.

    +

    Example:

    +
    import torch
    +from torch.jit import Tensor
    +from typing import List, Tuple
    +
    +class EmptyDataStructures(torch.jit.ScriptModule):
    +    def __init__(self):
    +        super(EmptyDataStructures, self).__init__()
    +
    +    @torch.jit.script_method
    +    def forward(self, x):
    +        # type: (Tensor) -> Tuple[List[Tuple[int, float]], Dict[str, int]]
    +
    +        # This annotates the list to be a `List[Tuple[int, float]]`
    +        my_list = torch.jit.annotate(List[Tuple[int, float]], [])
    +        for i in range(10):
    +            my_list.append((x, x))
    +
    +        my_dict = torch.jit.annotate(Dict[str, int], {})
    +        return my_list, my_dict
    +
    +
    +
    +
    +

    Optional Type Refinement

    +

    TorchScript will refine the type of a variable of type Optional[T] when +a comparison to None is made inside the conditional of an if-statement. +The compiler can reason about multiple None checks that are combined with +and, or, and not. Refinement will also occur for else blocks of if-statements +that are not explicitly written.

    +

    The expression must be emitted within the conditional; assigning +a None check to a variable and using it in the conditional will not refine types. +An attribute like self.x will not be refined, but assigning self.x to a local +variable first will work.

    +

    Example:

    +
    @torch.jit.script_method
    +def optional_unwrap(self, x, y):
    +  # type: (Optional[int], Optional[int]) -> int
    +  if x is None:
    +    x = 1
    +  x = x + 1
    +
    +  z = self.z
    +  if y is not None and z is not None:
    +    x = y + z
    +  return x
    +
    +
    +
    +
    +

    User Defined Types

    +

    Python classes can be used in TorchScript if they are annotated with @torch.jit.script, +similar to how you would declare a TorchScript function:

    +
    @torch.jit.script
    +class Foo:
    +  def __init__(self, x, y):
    +    self.x = x
    +
    +  def aug_add_x(self, inc):
    +    self.x += inc
    +
    +
    +

    This subset is restricted:

    +
      +
    • All functions must be valid TorchScript functions (including __init__())

    • +
    • Classes must be new-style classes, as we use __new__() to construct them with pybind11

    • +
    • TorchScript classes are statically typed. Members are declared by assigning to +self in the __init__() method

      +
      +

      For example, assigning outside of the __init__() method:

      +
      @torch.jit.script
      +class Foo:
      +  def assign_x(self):
      +    self.x = torch.rand(2, 3)
      +
      +
      +

      Will result in:

      +
      RuntimeError:
      +Tried to set nonexistent attribute: x. Did you forget to initialize it in __init__()?:
      +def assign_x(self):
      +  self.x = torch.rand(2, 3)
      +  ~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
      +
      +
      +
      +
    • +
    • No expressions except method definitions are allowed in the body of the class

    • +
    • No support for inheritance or any other polymorphism strategy, except for inheriting +from object to specify a new-style class

    • +
    +

    After a class is defined, it can be used in both TorchScript and Python interchangeably +like any other TorchScript type:

    +
    @torch.jit.script
    +class Pair:
    +  def __init__(self, first, second):
    +    self.first = first
    +    self.second = second
    +
    +@torch.jit.script
    +def sum_pair(p):
    +  # type: (Pair) -> Tensor
    +  return p.first + p.second
    +
    +p = Pair(torch.rand(2, 3), torch.rand(2, 3))
    +print(sum_pair(p))
    +
    +
    +
    +
    +
    +

    Expressions

    +

    The following Python Expressions are supported

    +
    +

    Literals

    +
    +

    True, False, None, 'string literals', "string literals", +number literals 3 (interpreted as int) 3.4 (interpreted as a float)

    +
    +
    +
    List Construction
    +
    +

    [3, 4], [], [torch.rand(3), torch.rand(4)]

    +
    +

    Note

    +

    An empty list is assumed have type List[Tensor]. +The types of other list literals are derived from the type of the members. +To denote an empty list of another type, use torch.jit.annotate.

    +
    +
    +
    +
    +
    Tuple Construction
    +
    +

    (3, 4), (3,)

    +
    +
    +
    +
    Dict Construction
    +
    +

    {'hello': 3}, {}, {'a': torch.rand(3), 'b': torch.rand(4)}

    +
    +

    Note

    +

    An empty dict is assumed have type Dict[str, Tensor]. +The types of other dict literals are derived from the type of the members. +To denote an empty dict of another type, use torch.jit.annotate.

    +
    +
    +
    +
    +
    +

    Variables

    +
    +

    my_variable_name

    +
    +

    Note

    +

    See Variable Resolution for how variables are resolved.

    +
    +
    +
    +
    +

    Arithmetic Operators

    +
    +

    a + b

    +

    a - b

    +

    a * b

    +

    a / b

    +

    a ^ b

    +

    a @ b

    +
    +
    +
    +

    Comparison Operators

    +
    +

    a == b

    +

    a != b

    +

    a < b

    +

    a > b

    +

    a <= b

    +

    a >= b

    +
    +
    +
    +

    Logical Operators

    +
    +

    a and b

    +

    a or b

    +

    not b

    +
    +
    +
    +

    Subscripts

    +
    +

    t[0]

    +

    t[-1]

    +

    t[0:2]

    +

    t[1:]

    +

    t[:1]

    +

    t[:]

    +

    t[0, 1]

    +

    t[0, 1:2]

    +

    t[0, :1]

    +

    t[-1, 1:, 0]

    +

    t[1:, -1, 0]

    +

    t[i:j, i]

    +
    +
    +
    +

    Function Calls

    +
    +

    Calls to built-in functions: torch.rand(3, dtype=torch.int)

    +

    Calls to other script functions:

    +
    import torch
    +
    +@torch.jit.script
    +def foo(x):
    +  return x + 1
    +
    +@torch.jit.script
    +def bar(x):
    +  return foo(x)
    +
    +
    +
    +
    +
    +

    Method Calls

    +
    +

    Calls to methods of builtin types like tensor: x.mm(y)

    +

    When defining a Script method inside of a ScriptModule, the @script_method +annotation is used. Inside of these methods it is possible to call other methods +of this class or access methods on the submodules.

    +

    Calling a submodule directly (e.g. self.resnet(input)) is equivalent to +calling its forward method (e.g. self.resnet.forward(input))

    +
    import torch
    +
    +class MyScriptModule(torch.jit.ScriptModule):
    +    def __init__(self):
    +        super(MyScriptModule, self).__init__()
    +        self.means = torch.nn.Parameter(torch.tensor([103.939, 116.779, 123.68])
    +                                        .resize_(1, 3, 1, 1))
    +        self.resnet = torch.jit.trace(torchvision.models.resnet18(),
    +                                      torch.rand(1, 3, 224, 224))
    +
    +    @torch.jit.script_method
    +    def helper(self, input):
    +      return self.resnet(input - self.means)
    +
    +    @torch.jit.script_method
    +    def forward(self, input):
    +        return self.helper(input)
    +
    +
    +
    +
    +
    +

    Ternary Expressions

    +
    +

    x if x > y else y

    +
    +
    +
    +

    Casts

    +
    +

    float(ten)

    +

    int(3.5)

    +

    bool(ten)

    +
    +
    +
    +

    Accessing Module Parameters

    +
    +

    self.my_parameter

    +

    self.my_submodule.my_parameter

    +
    +
    +
    +
    +

    Statements

    +

    TorchScript supports the following types of statements:

    +
    +
    Simple Assignments
    a = b
    +a += b # short-hand for a = a + b, does not operate in-place on a
    +a -= b
    +
    +
    +
    +
    Pattern Matching Assignments
    a, b = tuple_or_list
    +a, b, *c = a_tuple
    +
    +
    +
    +
    +

    Print Statements

    +
    +

    print("the result of an add:", a + b)

    +
    +

    If Statements

    +
    +
    if a < 4:
    +    r = -a
    +elif a < 3:
    +    r = a + a
    +else:
    +    r = 3 * a
    +
    +
    +
    +

    In addition to bools, floats, ints, and Tensors can be used in a conditional +and will be implicitly casted to a boolean.

    +

    While Loops

    +
    +
    a = 0
    +while a < 4:
    +    print(a)
    +    a += 1
    +
    +
    +
    +

    For loops with range

    +
    +
    x = 0
    +for i in range(10):
    +    x *= i
    +
    +
    +
    +

    For loops over tuples:

    +
    +
    tup = (3, torch.rand(4))
    +for x in tup:
    +    print(x)
    +
    +
    +
    +

    Note

    +

    for loops over tuples will unroll the loop, generating a body for +each member of the tuple. The body must type-check correctly for each member.

    +
    +
    +

    For loops over constant torch.nn.ModuleList

    +
    +
    class SubModule(torch.jit.ScriptModule):
    +    def __init__(self):
    +        super(Sub, self).__init__()
    +        self.weight = nn.Parameter(torch.randn(2))
    +
    +    @torch.jit.script_method
    +    def forward(self, input):
    +        return self.weight + input
    +
    +class MyModule(torch.jit.ScriptModule):
    +    __constants__ = ['mods']
    +
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        self.mods = torch.nn.ModuleList([SubModule() for i in range(10)])
    +
    +    @torch.jit.script_method
    +    def forward(self, v):
    +        for module in self.mods:
    +            v = m(v)
    +        return v
    +
    +
    +
    +

    Note

    +

    To use a nn.ModuleList inside a @script_method it must be marked +constant by adding the name of the attribute to the __constants__ +list for the type. For loops over a nn.ModuleList will unroll the body of the +loop at compile time, with each member of the constant module list.

    +
    +
    +

    Break and Continue

    +
    +
    for i in range(5):
    +  if i == 1:
    +    continue
    +  if i == 3:
    +    break
    +  print(i)
    +
    +
    +
    +
    +
    Return

    return a, b

    +
    +

    Note

    +
    +
    TorchScript allows returns in the following circumstances:
      +
    1. At the end of a function

    2. +
    3. In an if-statement where <true> and <false> both return

    4. +
    5. In an if-statement where <true> returns and <false> is empty (an early return)

    6. +
    +
    +
    +
    +
    +
    +
    +
    +

    Variable Resolution

    +

    TorchScript supports a subset of Python’s variable resolution (i.e. scoping) +rules. Local variables behave the same as in Python, except for the restriction +that a variable must have the same type along all paths through a function. +If a variable has a different type on different sides of an if statement, it +is an error to use it after the end of the if statement.

    +

    Similarly, a variable is not allowed to be used if it is only defined along some +paths through the function.

    +

    Example:

    +
    @torch.jit.script
    +def foo(x):
    +    if x < 0:
    +        y = 4
    +    print(y) # Error: undefined value y
    +
    +
    +

    Non-local variables are resolved to Python values at compile time when the +function is defined. These values are then converted into TorchScript values using +the rules described in Use of Python Values.

    +
    +
    +

    Use of Python Values

    +

    To make writing TorchScript more convenient, we allow script code to refer +to Python values in the surrounding scope. For instance, any time there is a +reference to torch, the TorchScript compiler is actually resolving it to the +torch Python module when the function is declared. These Python values are +not a first class part of TorchScript. Instead they are de-sugared at compile-time +into the primitive types that TorchScript supports. This depends +on the dynamic type of the Python valued referenced when compilation occurs. +This section describes the rules that are used when accessing Python values in TorchScript.

    +
    +

    Functions

    +
    +

    TorchScript can call Python functions. This functionality is very useful when +incrementally converting a model to TorchScript. The model can be moved function-by-function +to TorchScript, leaving calls to Python functions in place. This way you can incrementally +check the correctness of the model as you go.

    +

    Example:

    +
    def foo(x):
    +  print("I am called with {}".format(x))
    +  import pdb; pdb.set_trace()
    +  return x
    +
    +@torch.jit.script
    +def bar(x)
    +  return foo(x + 1)
    +
    +
    +

    Attempting to call save on a ScriptModule that contains calls to Python +functions will fail. The intention is that this pathway is used for debugging +and the calls removed or turned into script functions before saving. If you +want to export a module with a Python function, add the @torch.jit.ignore +decorator to the function which will replace these function calls with an +exception when the model is saved:

    +
    class M(torch.jit.ScriptModule):
    +  def __init__(self):
    +    super(M, self).__init__()
    +
    +  @torch.jit.script_method
    +  def forward(self, x):
    +    self.ignored_code(x)
    +    return x + 2
    +
    +  @torch.jit.ignore
    +  def ignored_code(self, x):
    +    # non-TorchScript code
    +    import pdb; pdb.set_trace()
    +
    +m = M()
    +# Runs, makes upcall to Python to run `ignored_code`
    +m(torch.ones(2, 2))
    +
    +# Replaces all calls to `ignored_code` with a `raise`
    +m.save("m.pt")
    +loaded = torch.jit.load("m.pt")
    +
    +# This runs `ignored_code` after saving which will raise an Exception!
    +loaded(torch.ones(2, 2))
    +
    +
    +
    +
    +
    +

    Attribute Lookup On Python Modules

    +
    +

    TorchScript can lookup attributes on modules. Builtin functions like torch.add +are accessed this way. This allows TorchScript to call functions defined in +other modules.

    +
    +
    +
    +

    Python-defined Constants

    +
    +

    TorchScript also provides a way to use constants that are defined in Python. +These can be used to hard-code hyper-parameters into the function, or to +define universal constants. There are two ways of specifying that a Python +value should be treated as a constant.

    +
      +
    1. Values looked up as attributes of a module are assumed to be constant. +Example: math.pi

    2. +
    3. Attributes of a ScriptModule can be marked constant by listing them +as a member of the __constants__ property of the class:

      +

      Example:

      +
      class Foo(torch.jit.ScriptModule):
      +    __constants__ = ['a']
      +
      +    def __init__(self):
      +        super(Foo, self).__init__(False)
      +        self.a = 1 + 4
      +
      +   @torch.jit.script_method
      +   def forward(self, input):
      +       return self.a + input
      +
      +
      +
    4. +
    +

    Supported constant Python Values are

    +
      +
    • int

    • +
    • float

    • +
    • bool

    • +
    • torch.device

    • +
    • torch.layout

    • +
    • torch.dtype

    • +
    • tuples containing supported types

    • +
    • torch.nn.ModuleList which can be used in a TorchScript for loop

    • +
    +
    +
    +
    +

    Module Attributes

    +

    The torch.nn.Parameter wrapper and register_buffer can be used to assign +tensors to a ScriptModule. In a similar vein, attributes of any type can be +assign on a ScriptModule by wrapping them with torch.jit.Attribute and +specifying the type. All types available in TorchScript are supported. These +attributes are mutable and are saved in a separate archive in the serialized +model binary. Tensor attributes are semantically the same as buffers.

    +

    Example:

    +
    class Foo(torch.jit.ScriptModule):
    +  def __init__(self, a_dict):
    +    super(Foo, self).__init__(False)
    +    self.words = torch.jit.Attribute([], List[str])
    +    self.some_dict = torch.jit.Attribute(a_dict, Dict[str, int])
    +
    +  @torch.jit.script_method
    +  def forward(self, input):
    +    # type: (str) -> int
    +    self.words.append(input)
    +    return self.some_dict[input]
    +
    +
    +
    +
    +
    +

    Debugging

    +
    +

    Disable JIT for Debugging

    +
    +

    If you want to disable all JIT modes (tracing and scripting) so you can +debug your program in raw Python, you can use the PYTORCH_JIT environment +variable. PYTORCH_JIT can be used to globally disable the +JIT by setting its value to 0. Given an example script:

    +
    @torch.jit.script
    +def scripted_fn(x : torch.Tensor):
    +    for i in range(12):
    +        x = x + x
    +    return x
    +
    +
    +def fn(x):
    +    x = torch.neg(x)
    +    import pdb; pdb.set_trace()
    +    return scripted_fn(x)
    +
    +traced_fn = torch.jit.trace(fn, (torch.rand(4, 5),))
    +
    +traced_fn(torch.rand(3, 4))
    +
    +
    +

    Debugging this script with PDB works except for when we invoke the @torch.jit.script +function. We can globally disable JIT, so that we can call the @torch.jit.script +function as a normal python function and not compile it. If the above script +is called disable_jit_example.py, we can invoke it like so:

    +
    $ PYTORCH_JIT=0 python disable_jit_example.py
    +
    +
    +

    and we will be able to step into the @torch.jit.script function as a normal Python +function.

    +
    +
    +
    +

    Inspecting Code

    +
    +

    TorchScript provides a code pretty-printer for all ScriptModule instances. This +pretty-printer gives an interpretation of the script method’s code as valid +Python syntax. For example:

    +
    @torch.jit.script
    +def foo(len):
    +    # type: (int) -> torch.Tensor
    +    rv = torch.zeros(3, 4)
    +    for i in range(len):
    +        if i < 10:
    +            rv = rv - 1.0
    +        else:
    +            rv = rv + 1.0
    +        return rv
    +
    +print(foo.code)
    +
    +
    +

    A ScriptModule with a single forward method will have an attribute +code, which you can use to inspect the ScriptModule’s code. +If the ScriptModule has more than one method, you will need to access +.code on the method itself and not the module. We can inspect the +code of a method named bar on a ScriptModule by accessing .bar.code.

    +

    The example script above produces the code:

    +
    def forward(self,
    +            len: int) -> Tensor:
    +    rv = torch.zeros([3, 4], dtype=None, layout=None, device=None)
    +    rv0 = rv
    +    for i in range(len):
    +        if torch.lt(i, 10):
    +            rv1 = torch.sub(rv0, 1., 1)
    +        else:
    +            rv1 = torch.add(rv0, 1., 1)
    +        rv0 = rv1
    +    return rv0
    +
    +
    +

    This is TorchScript’s compilation of the code for the forward method. +You can use this to ensure TorchScript (tracing or scripting) has captured +your model code correctly.

    +
    +
    +
    +

    Interpreting Graphs

    +
    +

    TorchScript also has a representation at a lower level than the code pretty- +printer, in the form of IR graphs.

    +

    TorchScript uses a static single assignment (SSA) intermediate representation +(IR) to represent computation. The instructions in this format consist of +ATen (the C++ backend of PyTorch) operators and other primitive operators, +including control flow operators for loops and conditionals. As an example:

    +
    @torch.jit.script
    +def foo(len):
    +  # type: (int) -> torch.Tensor
    +  rv = torch.zeros(3, 4)
    +  for i in range(len):
    +    if i < 10:
    +        rv = rv - 1.0
    +    else:
    +        rv = rv + 1.0
    +  return rv
    +
    +print(foo.graph)
    +
    +
    +

    .graph follows the same rules described in the Inspecting Code section +with regard to forward method lookup.

    +

    The example script above produces the graph:

    +
    graph(%len : int) {
    +  %15 : int = prim::Constant[value=1]()
    +  %9 : bool = prim::Constant[value=1]()
    +  %7 : Device = prim::Constant[value="cpu"]()
    +  %6 : int = prim::Constant[value=0]()
    +  %5 : int = prim::Constant[value=6]()
    +  %1 : int = prim::Constant[value=3]()
    +  %2 : int = prim::Constant[value=4]()
    +  %11 : int = prim::Constant[value=10]()
    +  %14 : float = prim::Constant[value=1]()
    +  %4 : int[] = prim::ListConstruct(%1, %2)
    +  %rv.1 : Tensor = aten::zeros(%4, %5, %6, %7)
    +  %rv : Tensor = prim::Loop(%len, %9, %rv.1)
    +    block0(%i : int, %13 : Tensor) {
    +      %12 : bool = aten::lt(%i, %11)
    +      %rv.4 : Tensor = prim::If(%12)
    +        block0() {
    +          %rv.2 : Tensor = aten::sub(%13, %14, %15)
    +          -> (%rv.2)
    +        }
    +        block1() {
    +          %rv.3 : Tensor = aten::add(%13, %14, %15)
    +          -> (%rv.3)
    +        }
    +      -> (%9, %rv.4)
    +    }
    +  return (%rv);
    +}
    +
    +
    +

    Take the instruction %rv.1 : Dynamic = aten::zeros(%3, %4, %5, %6) for +example. %rv.1 : Dynamic means we assign the output to a (unique) +value named rv.1, and that value is of Dynamic type, i.e. we do +not know its concrete shape. aten::zeros is the operator (equivalent +to torch.zeros) and the input list (%3, %4, %5, %6) specifies which +values in scope should be passed as inputs. The schema for built-in functions +like aten::zeros can be found at Builtin Functions.

    +

    Notice that operators can also have associated blocks, namely the +prim::Loop and prim::If operators. In the graph print-out, these +operators are formatted to reflect their equivalent source code forms +to facilitate easy debugging.

    +

    Graphs can be inspected as shown to confirm that the computation described +by a ScriptModule is correct, in both automated and manual fashion, as +described below.

    +
    +
    +
    +

    Tracing Edge Cases

    +
    +

    There are some edge cases that exist where the trace of a given Python +function/module will not be representative of the underlying code. These +cases can include:

    +
      +
    • Tracing of control flow that is dependent on inputs (e.g. tensor shapes)

    • +
    • Tracing of in-place operations of tensor views (e.g. indexing on the +left-hand side of an assignment)

    • +
    +

    Note that these cases may in fact be traceable in the future.

    +
    +
    +
    +

    Automatic Trace Checking

    +
    +

    One way to automatically catch many errors in traces is by using check_inputs +on the torch.jit.trace() API. check_inputs takes a list of tuples +of inputs that will be used to re-trace the computation and verify the +results. For example:

    +
    def loop_in_traced_fn(x):
    +    result = x[0]
    +    for i in range(x.size(0)):
    +        result = result * x[i]
    +    return result
    +
    +inputs = (torch.rand(3, 4, 5),)
    +check_inputs = [(torch.rand(4, 5, 6),), (torch.rand(2, 3, 4),)]
    +
    +traced = torch.jit.trace(loop_in_traced_fn, inputs, check_inputs=check_inputs)
    +
    +
    +
    +
    Gives us the following diagnostic information::

    ERROR: Graphs differed across invocations! +Graph diff:

    +
      graph(%x : Tensor) {
    +    %1 : int = prim::Constant[value=0]()
    +    %2 : int = prim::Constant[value=0]()
    +    %result.1 : Tensor = aten::select(%x, %1, %2)
    +    %4 : int = prim::Constant[value=0]()
    +    %5 : int = prim::Constant[value=0]()
    +    %6 : Tensor = aten::select(%x, %4, %5)
    +    %result.2 : Tensor = aten::mul(%result.1, %6)
    +    %8 : int = prim::Constant[value=0]()
    +    %9 : int = prim::Constant[value=1]()
    +    %10 : Tensor = aten::select(%x, %8, %9)
    +-   %result : Tensor = aten::mul(%result.2, %10)
    ++   %result.3 : Tensor = aten::mul(%result.2, %10)
    +?          ++
    +    %12 : int = prim::Constant[value=0]()
    +    %13 : int = prim::Constant[value=2]()
    +    %14 : Tensor = aten::select(%x, %12, %13)
    ++   %result : Tensor = aten::mul(%result.3, %14)
    ++   %16 : int = prim::Constant[value=0]()
    ++   %17 : int = prim::Constant[value=3]()
    ++   %18 : Tensor = aten::select(%x, %16, %17)
    +-   %15 : Tensor = aten::mul(%result, %14)
    +?     ^                                 ^
    ++   %19 : Tensor = aten::mul(%result, %18)
    +?     ^                                 ^
    +-   return (%15);
    +?             ^
    ++   return (%19);
    +?             ^
    +  }
    +
    +
    +
    +
    +

    This message indicates to us that the computation differed between when +we first traced it and when we traced it with the check_inputs. Indeed, +the loop within the body of loop_in_traced_fn depends on the shape +of the input x, and thus when we try another x with a different +shape, the trace differs.

    +

    In this case, data-dependent control flow like this can be captured using +script instead:

    +
    def fn(x):
    +    result = x[0]
    +    for i in range(x.size(0)):
    +        result = result * x[i]
    +    return result
    +
    +inputs = (torch.rand(3, 4, 5),)
    +check_inputs = [(torch.rand(4, 5, 6),), (torch.rand(2, 3, 4),)]
    +
    +scripted_fn = torch.jit.script(fn)
    +print(scripted_fn.graph)
    +
    +for input_tuple in [inputs] + check_inputs:
    +    torch.testing.assert_allclose(fn(*input_tuple), scripted_fn(*input_tuple))
    +
    +
    +

    Which produces:

    +
    graph(%x : Tensor) {
    +  %5 : bool = prim::Constant[value=1]()
    +  %1 : int = prim::Constant[value=0]()
    +  %result.1 : Tensor = aten::select(%x, %1, %1)
    +  %4 : int = aten::size(%x, %1)
    +  %result : Tensor = prim::Loop(%4, %5, %result.1)
    +    block0(%i : int, %7 : Tensor) {
    +      %10 : Tensor = aten::select(%x, %1, %i)
    +      %result.2 : Tensor = aten::mul(%7, %10)
    +      -> (%5, %result.2)
    +    }
    +  return (%result);
    +}
    +
    +
    +
    +
    +
    +

    Tracer Warnings

    +
    +

    The tracer produces warnings for several problematic patterns in traced +computation. As an example, take a trace of a function that contains an +in-place assignment on a slice (a view) of a Tensor:

    +
    def fill_row_zero(x):
    +    x[0] = torch.rand(*x.shape[1:2])
    +    return x
    +
    +traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),))
    +print(traced.graph)
    +
    +
    +

    Produces several warnings and a graph which simply returns the input:

    +
    fill_row_zero.py:4: TracerWarning: There are 2 live references to the data region being modified when tracing in-place operator copy_ (possibly due to an assignment). This might cause the trace to be incorrect, because all other views that also reference this data will not reflect this change in the trace! On the other hand, if all other views use the same memory chunk, but are disjoint (e.g. are outputs of torch.split), this might still be safe.
    +  x[0] = torch.rand(*x.shape[1:2])
    +fill_row_zero.py:6: TracerWarning: Output nr 1. of the traced function does not match the corresponding output of the Python function. Detailed error:
    +Not within tolerance rtol=1e-05 atol=1e-05 at input[0, 1] (0.09115803241729736 vs. 0.6782537698745728) and 3 other locations (33.00%)
    +  traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),))
    +graph(%0 : Float(3, 4)) {
    +  return (%0);
    +}
    +
    +
    +

    We can fix this by modifying the code to not use the in-place update, but +rather build up the result tensor out-of-place with torch.cat:

    +
    def fill_row_zero(x):
    +    x = torch.cat((torch.rand(1, *x.shape[1:2]), x[1:2]), dim=0)
    +    return x
    +
    +traced = torch.jit.trace(fill_row_zero, (torch.rand(3, 4),))
    +print(traced.graph)
    +
    +
    +
    +
    +
    +
    +
    +

    Frequently Asked Questions

    +

    Q: I would like to train a model on GPU and do inference on CPU. What are the +best practices?

    +
    +

    First convert your model from GPU to CPU and then save it, like so:

    +
    cpu_model = gpu_model.cpu()
    +sample_input_cpu = sample_input_gpu.cpu()
    +traced_cpu = torch.jit.trace(traced_cpu, sample_input_cpu)
    +torch.jit.save(traced_cpu, "cpu.pth")
    +
    +traced_gpu = torch.jit.trace(traced_gpu, sample_input_gpu)
    +torch.jit.save(traced_gpu, "gpu.pth")
    +
    +# ... later, when using the model:
    +
    +if use_gpu:
    +  model = torch.jit.load("gpu.pth")
    +else:
    +  model = torch.jit.load("cpu.pth")
    +
    +model(input)
    +
    +
    +

    This is recommended because the tracer may witness tensor creation on a +specific device, so casting an already-loaded model may have unexpected +effects. Casting the model before saving it ensures that the tracer has +the correct device information.

    +
    +

    Q: How do I store attributes on a ScriptModule?

    +
    +

    Say we have a model like:

    +
    class Model(torch.jit.ScriptModule):
    +  def __init__(self):
    +    super(Model, self).__init__()
    +    self.x = 2
    +
    +  @torch.jit.script_method
    +  def forward(self):
    +    return self.x
    +
    +
    +

    If Model is instantiated it will result in a compilation error +since the compiler doesn’t know about x. There are 4 ways to inform the +compiler of attributes on ScriptModule:

    +

    1. nn.Parameter - values wrapped in nn.Parameter will work as they +do on nn.Modules

    +

    2. register_buffer - values wrapped in register_buffer will work as +they do on nn.Modules

    +

    3. __constants__ - adding a list called __constants__ at the +class definition level will mark the contained names as constants. Constants +are saved directly in the code of the model. See +Python-defined Constants.

    +

    4. torch.jit.Attribute - values wrapped in torch.jit.Attribute can +be any TorchScript type, be mutated and are saved outside of the code of +the model. See Module Attributes.

    +
    +

    Q: I would like to trace module’s method but I keep getting this error:

    +

    RuntimeError: Cannot insert a Tensor that requires grad as a constant. Consider making it a parameter or input, or detaching the gradient

    +
    +

    This error usually means that, the method you are tracing, uses module’s parameters and +you are passing module’s method instead of a module instance (e.g. my_module_instance.forward vs my_module_instance).

    +
    +
      +
    • Invoking trace with module’s method captures module parameters (which may require gradients) as constants.

    • +
    • On the other hand, invoking trace with module’s instance (e.g. my_module) creates a new module and correctly copies parameters into the new module, so they can accumulate gradients if required.

    • +
    +
    +

    Given that trace treats my_module_instance.forward as a standalone function, it also means there is not currently a way to trace +arbitrary methods in the module except for forward that use module’s parameters. +Version 1.1.1 will add a new API trace_module that will allow users to trace any method in the module and more than one method

    +
    class Net(nn.Module):
    +    def __init__(self):
    +        super(Net, self).__init__()
    +        self.conv = nn.Conv2d(1, 1, 3)
    +
    +    def forward(self, x):
    +        return self.conv(x)
    +
    +    def weighted_kernel_sum(self, weight):
    +        return weight * self.conv.weight
    +
    +example_weight = torch.rand(1, 1, 3, 3)
    +example_forward_input = torch.rand(1, 1, 3, 3)
    +n = Net()
    +inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
    +module = torch.jit.trace_module(n, inputs)
    +
    +
    +
    +
    +

    Builtin Functions

    +

    TorchScript supports a subset of the builtin tensor and neural network +functions that PyTorch provides. Most methods on Tensor as well as functions in +the torch namespace, all functions in torch.nn.functional and all +modules from torch.nn are supported in TorchScript, excluding those in the +table below. For unsupported modules, we suggest using torch.jit.trace().

    +

    Unsupported torch.nn Modules

    +
    torch.nn.modules.adaptive.AdaptiveLogSoftmaxWithLoss
    +torch.nn.modules.normalization.CrossMapLRN2d
    +torch.nn.modules.fold.Fold
    +torch.nn.modules.fold.Unfold
    +torch.nn.modules.rnn.GRU
    +torch.nn.modules.rnn.RNN
    +
    +
    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/model_zoo.html b/docs/stable/model_zoo.html new file mode 100644 index 000000000000..83564eba3b92 --- /dev/null +++ b/docs/stable/model_zoo.html @@ -0,0 +1,560 @@ + + + + + + + + + + + + torch.utils.model_zoo — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.model_zoo
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.model_zoo

    +

    Moved to torch.hub.

    +
    +
    +torch.utils.model_zoo.load_url(url, model_dir=None, map_location=None, progress=True)
    +

    Loads the Torch serialized object at the given URL.

    +

    If the object is already present in model_dir, it’s deserialized and +returned. The filename part of the URL should follow the naming convention +filename-<sha256>.ext where <sha256> is the first eight or more +digits of the SHA256 hash of the contents of the file. The hash is used to +ensure unique names and to verify the contents of the file.

    +

    The default value of model_dir is $TORCH_HOME/checkpoints where +environment variable $TORCH_HOME defaults to $XDG_CACHE_HOME/torch. +$XDG_CACHE_HOME follows the X Design Group specification of the Linux +filesytem layout, with a default value ~/.cache if not set.

    +
    +
    Parameters
    +
      +
    • url (string) – URL of the object to download

    • +
    • model_dir (string, optional) – directory in which to save the object

    • +
    • map_location (optional) – a function or a dict specifying how to remap storage locations (see torch.load)

    • +
    • progress (bool, optional) – whether or not to display a progress bar to stderr

    • +
    +
    +
    +

    Example

    +
    >>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
    +
    +
    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/multiprocessing.html b/docs/stable/multiprocessing.html new file mode 100644 index 000000000000..c3bcdef61423 --- /dev/null +++ b/docs/stable/multiprocessing.html @@ -0,0 +1,769 @@ + + + + + + + + + + + + Multiprocessing package - torch.multiprocessing — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Multiprocessing package - torch.multiprocessing
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Multiprocessing package - torch.multiprocessing

    +

    torch.multiprocessing is a wrapper around the native multiprocessing +module. It registers custom reducers, that use shared memory to provide shared +views on the same data in different processes. Once the tensor/storage is moved +to shared_memory (see share_memory_()), it will be possible +to send it to other processes without making any copies.

    +

    The API is 100% compatible with the original module - it’s enough to change +import multiprocessing to import torch.multiprocessing to have all the +tensors sent through the queues or shared via other mechanisms, moved to shared +memory.

    +

    Because of the similarity of APIs we do not document most of this package +contents, and we recommend referring to very good docs of the original module.

    +
    +

    Warning

    +

    If the main process exits abruptly (e.g. because of an incoming signal), +Python’s multiprocessing sometimes fails to clean up its children. +It’s a known caveat, so if you’re seeing any resource leaks after +interrupting the interpreter, it probably means that this has just happened +to you.

    +
    +
    +

    Strategy management

    +
    +
    +torch.multiprocessing.get_all_sharing_strategies()[source]
    +

    Returns a set of sharing strategies supported on a current system.

    +
    + +
    +
    +torch.multiprocessing.get_sharing_strategy()[source]
    +

    Returns the current strategy for sharing CPU tensors.

    +
    + +
    +
    +torch.multiprocessing.set_sharing_strategy(new_strategy)[source]
    +

    Sets the strategy for sharing CPU tensors.

    +
    +
    Parameters
    +

    new_strategy (str) – Name of the selected strategy. Should be one of +the values returned by get_all_sharing_strategies().

    +
    +
    +
    + +
    +
    +

    Sharing CUDA tensors

    +

    Sharing CUDA tensors between processes is supported only in Python 3, using +a spawn or forkserver start methods. multiprocessing in +Python 2 can only create subprocesses using fork, and it’s not supported +by the CUDA runtime.

    +

    Unlike CPU tensors, the sending process is required to keep the original tensor +as long as the receiving process retains a copy of the tensor. The refcounting is +implemented under the hood but requires users to follow the next best practices.

    +
    +

    Warning

    +

    If the consumer process dies abnormally to a fatal signal, the shared tensor +could be forever kept in memory as long as the sending process is running.

    +
    +
      +
    1. Release memory ASAP in the consumer.

    2. +
    +
    ## Good
    +x = queue.get()
    +# do somethings with x
    +del x
    +
    +
    +
    ## Bad
    +x = queue.get()
    +# do somethings with x
    +# do everything else (producer have to keep x in memory)
    +
    +
    +

    2. Keep producer process running until all consumers exits. This will prevent +the situation when the producer process releasing memory which is still in use +by the consumer.

    +
    ## producer
    +# send tensors, do something
    +event.wait()
    +
    +
    +
    ## consumer
    +# receive tensors and use them
    +event.set()
    +
    +
    +
      +
    1. Don’t pass received tensors.

    2. +
    +
    # not going to work
    +x = queue.get()
    +queue_2.put(x)
    +
    +
    +
    # you need to create a process-local copy
    +x = queue.get()
    +x_clone = x.clone()
    +queue_2.put(x_clone)
    +
    +
    +
    # putting and getting from the same queue in the same process will likely end up with segfault
    +queue.put(tensor)
    +x = queue.get()
    +
    +
    +
    +
    +

    Sharing strategies

    +

    This section provides a brief overview into how different sharing strategies +work. Note that it applies only to CPU tensor - CUDA tensors will always use +the CUDA API, as that’s the only way they can be shared.

    +
    +

    File descriptor - file_descriptor

    +
    +

    Note

    +

    This is the default strategy (except for macOS and OS X where it’s not +supported).

    +
    +

    This strategy will use file descriptors as shared memory handles. Whenever a +storage is moved to shared memory, a file descriptor obtained from shm_open +is cached with the object, and when it’s going to be sent to other processes, +the file descriptor will be transferred (e.g. via UNIX sockets) to it. The +receiver will also cache the file descriptor and mmap it, to obtain a shared +view onto the storage data.

    +

    Note that if there will be a lot of tensors shared, this strategy will keep a +large number of file descriptors open most of the time. If your system has low +limits for the number of open file descriptors, and you can’t raise them, you +should use the file_system strategy.

    +
    +
    +

    File system - file_system

    +

    This strategy will use file names given to shm_open to identify the shared +memory regions. This has a benefit of not requiring the implementation to cache +the file descriptors obtained from it, but at the same time is prone to shared +memory leaks. The file can’t be deleted right after its creation, because other +processes need to access it to open their views. If the processes fatally +crash, or are killed, and don’t call the storage destructors, the files will +remain in the system. This is very serious, because they keep using up the +memory until the system is restarted, or they’re freed manually.

    +

    To counter the problem of shared memory file leaks, torch.multiprocessing +will spawn a daemon named torch_shm_manager that will isolate itself from +the current process group, and will keep track of all shared memory allocations. +Once all processes connected to it exit, it will wait a moment to ensure there +will be no new connections, and will iterate over all shared memory files +allocated by the group. If it finds that any of them still exist, they will be +deallocated. We’ve tested this method and it proved to be robust to various +failures. Still, if your system has high enough limits, and file_descriptor +is a supported strategy, we do not recommend switching to this one.

    +
    +
    +
    +

    Spawning subprocesses

    +
    +

    Note

    +

    Available for Python >= 3.4.

    +

    This depends on the spawn start method in Python’s +multiprocessing package.

    +
    +

    Spawning a number of subprocesses to perform some function can be done +by creating Process instances and calling join to wait for +their completion. This approach works fine when dealing with a single +subprocess but presents potential issues when dealing with multiple +processes.

    +

    Namely, joining processes sequentially implies they will terminate +sequentially. If they don’t, and the first process does not terminate, +the process termination will go unnoticed. Also, there are no native +facilities for error propagation.

    +

    The spawn function below addresses these concerns and takes care +of error propagation, out of order termination, and will actively +terminate processes upon detecting an error in one of them.

    +
    +
    +torch.multiprocessing.spawn(fn, args=(), nprocs=1, join=True, daemon=False)[source]
    +

    Spawns nprocs processes that run fn with args.

    +

    If one of the processes exits with a non-zero exit status, the +remaining processes are killed and an exception is raised with the +cause of termination. In the case an exception was caught in the +child process, it is forwarded and its traceback is included in +the exception raised in the parent process.

    +
    +
    Parameters
    +
      +
    • fn (function) –

      Function is called as the entrypoint of the +spawned process. This function must be defined at the top +level of a module so it can be pickled and spawned. This +is a requirement imposed by multiprocessing.

      +

      The function is called as fn(i, *args), where i is +the process index and args is the passed through tuple +of arguments.

      +

    • +
    • args (tuple) – Arguments passed to fn.

    • +
    • nprocs (int) – Number of processes to spawn.

    • +
    • join (bool) – Perform a blocking join on all processes.

    • +
    • daemon (bool) – The spawned processes’ daemon flag. If set to True, +daemonic processes will be created.

    • +
    +
    +
    Returns
    +

    None if join is True, +SpawnContext if join is False

    +
    +
    +
    + +
    +
    +class torch.multiprocessing.SpawnContext[source]
    +

    Returned by spawn() when called with join=False.

    +
    +
    +join(timeout=None)[source]
    +

    Tries to join one or more processes in this spawn context. +If one of them exited with a non-zero exit status, this function +kills the remaining processes and raises an exception with the cause +of the first process exiting.

    +

    Returns True if all processes have been joined successfully, +False if there are more processes that need to be joined.

    +
    +
    Parameters
    +

    timeout (float) – Wait this long before giving up on waiting.

    +
    +
    +
    + +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/nn.functional.html b/docs/stable/nn.functional.html new file mode 100644 index 000000000000..1eaf31f530c0 --- /dev/null +++ b/docs/stable/nn.functional.html @@ -0,0 +1,2986 @@ + + + + + + + + + + + + torch.nn.functional — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.nn.functional
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.nn.functional

    +
    +

    Convolution functions

    +
    +

    conv1d

    +
    +
    +torch.nn.functional.conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) → Tensor
    +

    Applies a 1D convolution over an input signal composed of several input +planes.

    +

    See Conv1d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iW)\)

    • +
    • weight – filters of shape \((\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)\)

    • +
    • bias – optional bias of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or +a one-element tuple (sW,). Default: 1

    • +
    • padding – implicit paddings on both sides of the input. Can be a +single number or a one-element tuple (padW,). Default: 0

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a one-element tuple (dW,). Default: 1

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by +the number of groups. Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> filters = torch.randn(33, 16, 3)
    +>>> inputs = torch.randn(20, 16, 50)
    +>>> F.conv1d(inputs, filters)
    +
    +
    +
    + +
    +
    +

    conv2d

    +
    +
    +torch.nn.functional.conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) → Tensor
    +

    Applies a 2D convolution over an input image composed of several input +planes.

    +

    See Conv2d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iH , iW)\)

    • +
    • weight – filters of shape \((\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)\)

    • +
    • bias – optional bias tensor of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or a +tuple (sH, sW). Default: 1

    • +
    • padding – implicit paddings on both sides of the input. Can be a +single number or a tuple (padH, padW). Default: 0

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a tuple (dH, dW). Default: 1

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by the +number of groups. Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> filters = torch.randn(8,4,3,3)
    +>>> inputs = torch.randn(1,4,5,5)
    +>>> F.conv2d(inputs, filters, padding=1)
    +
    +
    +
    + +
    +
    +

    conv3d

    +
    +
    +torch.nn.functional.conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) → Tensor
    +

    Applies a 3D convolution over an input image composed of several input +planes.

    +

    See Conv3d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iT , iH , iW)\)

    • +
    • weight – filters of shape \((\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)\)

    • +
    • bias – optional bias tensor of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or a +tuple (sT, sH, sW). Default: 1

    • +
    • padding – implicit paddings on both sides of the input. Can be a +single number or a tuple (padT, padH, padW). Default: 0

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a tuple (dT, dH, dW). Default: 1

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by +the number of groups. Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> filters = torch.randn(33, 16, 3, 3, 3)
    +>>> inputs = torch.randn(20, 16, 50, 10, 20)
    +>>> F.conv3d(inputs, filters)
    +
    +
    +
    + +
    +
    +

    conv_transpose1d

    +
    +
    +torch.nn.functional.conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) → Tensor
    +

    Applies a 1D transposed convolution operator over an input signal +composed of several input planes, sometimes also called “deconvolution”.

    +

    See ConvTranspose1d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iW)\)

    • +
    • weight – filters of shape \((\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)\)

    • +
    • bias – optional bias of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or a +tuple (sW,). Default: 1

    • +
    • paddingdilation * (kernel_size - 1) - padding zero-padding will be added to both +sides of each dimension in the input. Can be a single number or a tuple +(padW,). Default: 0

    • +
    • output_padding – additional size added to one side of each dimension in the +output shape. Can be a single number or a tuple (out_padW). Default: 0

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by the +number of groups. Default: 1

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a tuple (dW,). Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> inputs = torch.randn(20, 16, 50)
    +>>> weights = torch.randn(16, 33, 5)
    +>>> F.conv_transpose1d(inputs, weights)
    +
    +
    +
    + +
    +
    +

    conv_transpose2d

    +
    +
    +torch.nn.functional.conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) → Tensor
    +

    Applies a 2D transposed convolution operator over an input image +composed of several input planes, sometimes also called “deconvolution”.

    +

    See ConvTranspose2d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iH , iW)\)

    • +
    • weight – filters of shape \((\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)\)

    • +
    • bias – optional bias of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or a +tuple (sH, sW). Default: 1

    • +
    • paddingdilation * (kernel_size - 1) - padding zero-padding will be added to both +sides of each dimension in the input. Can be a single number or a tuple +(padH, padW). Default: 0

    • +
    • output_padding – additional size added to one side of each dimension in the +output shape. Can be a single number or a tuple (out_padH, out_padW). +Default: 0

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by the +number of groups. Default: 1

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a tuple (dH, dW). Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> inputs = torch.randn(1, 4, 5, 5)
    +>>> weights = torch.randn(4, 8, 3, 3)
    +>>> F.conv_transpose2d(inputs, weights, padding=1)
    +
    +
    +
    + +
    +
    +

    conv_transpose3d

    +
    +
    +torch.nn.functional.conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) → Tensor
    +

    Applies a 3D transposed convolution operator over an input image +composed of several input planes, sometimes also called “deconvolution”

    +

    See ConvTranspose3d for details and output shape.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iT , iH , iW)\)

    • +
    • weight – filters of shape \((\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)\)

    • +
    • bias – optional bias of shape \((\text{out\_channels})\). Default: None

    • +
    • stride – the stride of the convolving kernel. Can be a single number or a +tuple (sT, sH, sW). Default: 1

    • +
    • paddingdilation * (kernel_size - 1) - padding zero-padding will be added to both +sides of each dimension in the input. Can be a single number or a tuple +(padT, padH, padW). Default: 0

    • +
    • output_padding – additional size added to one side of each dimension in the +output shape. Can be a single number or a tuple +(out_padT, out_padH, out_padW). Default: 0

    • +
    • groups – split input into groups, \(\text{in\_channels}\) should be divisible by the +number of groups. Default: 1

    • +
    • dilation – the spacing between kernel elements. Can be a single number or +a tuple (dT, dH, dW). Default: 1

    • +
    +
    +
    +

    Examples:

    +
    >>> inputs = torch.randn(20, 16, 50, 10, 20)
    +>>> weights = torch.randn(16, 33, 3, 3, 3)
    +>>> F.conv_transpose3d(inputs, weights)
    +
    +
    +
    + +
    +
    +

    unfold

    +
    +
    +torch.nn.functional.unfold(input, kernel_size, dilation=1, padding=0, stride=1)[source]
    +

    Extracts sliding local blocks from an batched input tensor.

    +
    +

    Warning

    +

    Currently, only 4-D input tensors (batched image-like tensors) are +supported.

    +
    +
    +

    Warning

    +

    More than one element of the unfolded tensor may refer to a single +memory location. As a result, in-place operations (especially ones that +are vectorized) may result in incorrect behavior. If you need to write +to the tensor, please clone it first.

    +
    +

    See torch.nn.Unfold for details

    +
    + +
    +
    +

    fold

    +
    +
    +torch.nn.functional.fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1)[source]
    +

    Combines an array of sliding local blocks into a large containing +tensor.

    +
    +

    Warning

    +

    Currently, only 4-D output tensors (batched image-like tensors) are +supported.

    +
    +

    See torch.nn.Fold for details

    +
    + +
    +
    +
    +

    Pooling functions

    +
    +

    avg_pool1d

    +
    +
    +torch.nn.functional.avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) → Tensor
    +

    Applies a 1D average pooling over an input signal composed of several +input planes.

    +

    See AvgPool1d for details and output shape.

    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \((\text{minibatch} , \text{in\_channels} , iW)\)

    • +
    • kernel_size – the size of the window. Can be a single number or a +tuple (kW,)

    • +
    • stride – the stride of the window. Can be a single number or a tuple +(sW,). Default: kernel_size

    • +
    • padding – implicit zero paddings on both sides of the input. Can be a +single number or a tuple (padW,). Default: 0

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the +output shape. Default: False

    • +
    • count_include_pad – when True, will include the zero-padding in the +averaging calculation. Default: True

    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
    +>>> F.avg_pool1d(input, kernel_size=3, stride=2)
    +tensor([[[ 2.,  4.,  6.]]])
    +
    +
    +
    + +
    +
    +

    avg_pool2d

    +
    +
    +torch.nn.functional.avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) → Tensor
    +

    Applies 2D average-pooling operation in \(kH \times kW\) regions by step size +\(sH \times sW\) steps. The number of output features is equal to the number of +input planes.

    +

    See AvgPool2d for details and output shape.

    +
    +
    Parameters
    +
      +
    • input – input tensor \((\text{minibatch} , \text{in\_channels} , iH , iW)\)

    • +
    • kernel_size – size of the pooling region. Can be a single number or a +tuple (kH, kW)

    • +
    • stride – stride of the pooling operation. Can be a single number or a +tuple (sH, sW). Default: kernel_size

    • +
    • padding – implicit zero paddings on both sides of the input. Can be a +single number or a tuple (padH, padW). Default: 0

    • +
    • ceil_mode – when True, will use ceil instead of floor in the formula +to compute the output shape. Default: False

    • +
    • count_include_pad – when True, will include the zero-padding in the +averaging calculation. Default: True

    • +
    • divisor_override – if specified, it will be used as divisor, otherwise +size of the pooling region will be used. Default: None

    • +
    +
    +
    +
    + +
    +
    +

    avg_pool3d

    +
    +
    +torch.nn.functional.avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) → Tensor
    +

    Applies 3D average-pooling operation in \(kT \times kH \times kW\) regions by step +size \(sT \times sH \times sW\) steps. The number of output features is equal to +\(\lfloor\frac{\text{input planes}}{sT}\rfloor\).

    +

    See AvgPool3d for details and output shape.

    +
    +
    Parameters
    +
      +
    • input – input tensor \((\text{minibatch} , \text{in\_channels} , iT \times iH , iW)\)

    • +
    • kernel_size – size of the pooling region. Can be a single number or a +tuple (kT, kH, kW)

    • +
    • stride – stride of the pooling operation. Can be a single number or a +tuple (sT, sH, sW). Default: kernel_size

    • +
    • padding – implicit zero paddings on both sides of the input. Can be a +single number or a tuple (padT, padH, padW), Default: 0

    • +
    • ceil_mode – when True, will use ceil instead of floor in the formula +to compute the output shape

    • +
    • count_include_pad – when True, will include the zero-padding in the +averaging calculation

    • +
    • divisor_override – if specified, it will be used as divisor, otherwise +size of the pooling region will be used. Default: None

    • +
    +
    +
    +
    + +
    +
    +

    max_pool1d

    +
    +
    +torch.nn.functional.max_pool1d(*args, **kwargs)
    +

    Applies a 1D max pooling over an input signal composed of several input +planes.

    +

    See MaxPool1d for details.

    +
    + +
    +
    +

    max_pool2d

    +
    +
    +torch.nn.functional.max_pool2d(*args, **kwargs)
    +

    Applies a 2D max pooling over an input signal composed of several input +planes.

    +

    See MaxPool2d for details.

    +
    + +
    +
    +

    max_pool3d

    +
    +
    +torch.nn.functional.max_pool3d(*args, **kwargs)
    +

    Applies a 3D max pooling over an input signal composed of several input +planes.

    +

    See MaxPool3d for details.

    +
    + +
    +
    +

    max_unpool1d

    +
    +
    +torch.nn.functional.max_unpool1d(input, indices, kernel_size, stride=None, padding=0, output_size=None)[source]
    +

    Computes a partial inverse of MaxPool1d.

    +

    See MaxUnpool1d for details.

    +
    + +
    +
    +

    max_unpool2d

    +
    +
    +torch.nn.functional.max_unpool2d(input, indices, kernel_size, stride=None, padding=0, output_size=None)[source]
    +

    Computes a partial inverse of MaxPool2d.

    +

    See MaxUnpool2d for details.

    +
    + +
    +
    +

    max_unpool3d

    +
    +
    +torch.nn.functional.max_unpool3d(input, indices, kernel_size, stride=None, padding=0, output_size=None)[source]
    +

    Computes a partial inverse of MaxPool3d.

    +

    See MaxUnpool3d for details.

    +
    + +
    +
    +

    lp_pool1d

    +
    +
    +torch.nn.functional.lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False)[source]
    +

    Applies a 1D power-average pooling over an input signal composed of +several input planes. If the sum of all inputs to the power of p is +zero, the gradient is set to zero as well.

    +

    See LPPool1d for details.

    +
    + +
    +
    +

    lp_pool2d

    +
    +
    +torch.nn.functional.lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False)[source]
    +

    Applies a 2D power-average pooling over an input signal composed of +several input planes. If the sum of all inputs to the power of p is +zero, the gradient is set to zero as well.

    +

    See LPPool2d for details.

    +
    + +
    +
    +

    adaptive_max_pool1d

    +
    +
    +torch.nn.functional.adaptive_max_pool1d(*args, **kwargs)
    +

    Applies a 1D adaptive max pooling over an input signal composed of +several input planes.

    +

    See AdaptiveMaxPool1d for details and output shape.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size (single integer)

    • +
    • return_indices – whether to return pooling indices. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    adaptive_max_pool2d

    +
    +
    +torch.nn.functional.adaptive_max_pool2d(*args, **kwargs)
    +

    Applies a 2D adaptive max pooling over an input signal composed of +several input planes.

    +

    See AdaptiveMaxPool2d for details and output shape.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size (single integer or +double-integer tuple)

    • +
    • return_indices – whether to return pooling indices. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    adaptive_max_pool3d

    +
    +
    +torch.nn.functional.adaptive_max_pool3d(*args, **kwargs)
    +

    Applies a 3D adaptive max pooling over an input signal composed of +several input planes.

    +

    See AdaptiveMaxPool3d for details and output shape.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size (single integer or +triple-integer tuple)

    • +
    • return_indices – whether to return pooling indices. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    adaptive_avg_pool1d

    +
    +
    +torch.nn.functional.adaptive_avg_pool1d(input, output_size) → Tensor
    +

    Applies a 1D adaptive average pooling over an input signal composed of +several input planes.

    +

    See AdaptiveAvgPool1d for details and output shape.

    +
    +
    Parameters
    +

    output_size – the target output size (single integer)

    +
    +
    +
    + +
    +
    +

    adaptive_avg_pool2d

    +
    +
    +torch.nn.functional.adaptive_avg_pool2d(input, output_size)[source]
    +

    Applies a 2D adaptive average pooling over an input signal composed of +several input planes.

    +

    See AdaptiveAvgPool2d for details and output shape.

    +
    +
    Parameters
    +

    output_size – the target output size (single integer or +double-integer tuple)

    +
    +
    +
    + +
    +
    +

    adaptive_avg_pool3d

    +
    +
    +torch.nn.functional.adaptive_avg_pool3d(input, output_size)[source]
    +

    Applies a 3D adaptive average pooling over an input signal composed of +several input planes.

    +

    See AdaptiveAvgPool3d for details and output shape.

    +
    +
    Parameters
    +

    output_size – the target output size (single integer or +triple-integer tuple)

    +
    +
    +
    + +
    +
    +
    +

    Non-linear activation functions

    +
    +

    threshold

    +
    +
    +torch.nn.functional.threshold(input, threshold, value, inplace=False)[source]
    +

    Thresholds each element of the input Tensor.

    +

    See Threshold for more details.

    +
    + +
    +
    +torch.nn.functional.threshold_(input, threshold, value) → Tensor
    +

    In-place version of threshold().

    +
    + +
    +
    +

    relu

    +
    +
    +torch.nn.functional.relu(input, inplace=False) → Tensor[source]
    +

    Applies the rectified linear unit function element-wise. See +ReLU for more details.

    +
    + +
    +
    +torch.nn.functional.relu_(input) → Tensor
    +

    In-place version of relu().

    +
    + +
    +
    +

    hardtanh

    +
    +
    +torch.nn.functional.hardtanh(input, min_val=-1., max_val=1., inplace=False) → Tensor[source]
    +

    Applies the HardTanh function element-wise. See Hardtanh for more +details.

    +
    + +
    +
    +torch.nn.functional.hardtanh_(input, min_val=-1., max_val=1.) → Tensor
    +

    In-place version of hardtanh().

    +
    + +
    +
    +

    relu6

    +
    +
    +torch.nn.functional.relu6(input, inplace=False) → Tensor[source]
    +

    Applies the element-wise function \(\text{ReLU6}(x) = \min(\max(0,x), 6)\).

    +

    See ReLU6 for more details.

    +
    + +
    +
    +

    elu

    +
    +
    +torch.nn.functional.elu(input, alpha=1.0, inplace=False)[source]
    +

    Applies element-wise, +\(\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))\).

    +

    See ELU for more details.

    +
    + +
    +
    +torch.nn.functional.elu_(input, alpha=1.) → Tensor
    +

    In-place version of elu().

    +
    + +
    +
    +

    selu

    +
    +
    +torch.nn.functional.selu(input, inplace=False) → Tensor[source]
    +

    Applies element-wise, +\(\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))\), +with \(\alpha=1.6732632423543772848170429916717\) and +\(scale=1.0507009873554804934193349852946\).

    +

    See SELU for more details.

    +
    + +
    +
    +

    celu

    +
    +
    +torch.nn.functional.celu(input, alpha=1., inplace=False) → Tensor[source]
    +

    Applies element-wise, +\(\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))\).

    +

    See CELU for more details.

    +
    + +
    +
    +

    leaky_relu

    +
    +
    +torch.nn.functional.leaky_relu(input, negative_slope=0.01, inplace=False) → Tensor[source]
    +

    Applies element-wise, +\(\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)\)

    +

    See LeakyReLU for more details.

    +
    + +
    +
    +torch.nn.functional.leaky_relu_(input, negative_slope=0.01) → Tensor
    +

    In-place version of leaky_relu().

    +
    + +
    +
    +

    prelu

    +
    +
    +torch.nn.functional.prelu(input, weight) → Tensor[source]
    +

    Applies element-wise the function +\(\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)\) where weight is a +learnable parameter.

    +

    See PReLU for more details.

    +
    + +
    +
    +

    rrelu

    +
    +
    +torch.nn.functional.rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) → Tensor[source]
    +

    Randomized leaky ReLU.

    +

    See RReLU for more details.

    +
    + +
    +
    +torch.nn.functional.rrelu_(input, lower=1./8, upper=1./3, training=False) → Tensor
    +

    In-place version of rrelu().

    +
    + +
    +
    +

    glu

    +
    +
    +torch.nn.functional.glu(input, dim=-1) → Tensor[source]
    +

    The gated linear unit. Computes:

    +
    +\[\text{GLU}(a, b) = a \otimes \sigma(b) + +\]
    +

    where input is split in half along dim to form a and b, \(\sigma\) +is the sigmoid function and \(\otimes\) is the element-wise product between matrices.

    +

    See Language Modeling with Gated Convolutional Networks.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input tensor

    • +
    • dim (int) – dimension on which to split the input. Default: -1

    • +
    +
    +
    +
    + +
    +
    +

    gelu

    +
    +
    +torch.nn.functional.gelu(input) → Tensor[source]
    +

    Applies element-wise the function +\(\text{GeLU}(x) = x * \Phi(x)\)

    +

    where \(\Phi(x)\) is the Cumulative Distribution Function for Gaussian Distribution.

    +

    See Gaussian Error Linear Units (GELUs).

    +
    + +
    +
    +

    logsigmoid

    +
    +
    +torch.nn.functional.logsigmoid(input) → Tensor
    +

    Applies element-wise \(\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)\)

    +

    See LogSigmoid for more details.

    +
    + +
    +
    +

    hardshrink

    +
    +
    +torch.nn.functional.hardshrink(input, lambd=0.5) → Tensor[source]
    +

    Applies the hard shrinkage function element-wise

    +

    See Hardshrink for more details.

    +
    + +
    +
    +

    tanhshrink

    +
    +
    +torch.nn.functional.tanhshrink(input) → Tensor[source]
    +

    Applies element-wise, \(\text{Tanhshrink}(x) = x - \text{Tanh}(x)\)

    +

    See Tanhshrink for more details.

    +
    + +
    +
    +

    softsign

    +
    +
    +torch.nn.functional.softsign(input) → Tensor[source]
    +

    Applies element-wise, the function \(\text{SoftSign}(x) = \frac{x}{1 + |x|}\)

    +

    See Softsign for more details.

    +
    + +
    +
    +

    softplus

    +
    +
    +torch.nn.functional.softplus(input, beta=1, threshold=20) → Tensor
    +
    + +
    +
    +

    softmin

    +
    +
    +torch.nn.functional.softmin(input, dim=None, _stacklevel=3, dtype=None)[source]
    +

    Applies a softmin function.

    +

    Note that \(\text{Softmin}(x) = \text{Softmax}(-x)\). See softmax definition for mathematical formula.

    +

    See Softmin for more details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input

    • +
    • dim (int) – A dimension along which softmin will be computed (so every slice +along dim will sum to 1).

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +
    + +
    +
    +

    softmax

    +
    +
    +torch.nn.functional.softmax(input, dim=None, _stacklevel=3, dtype=None)[source]
    +

    Applies a softmax function.

    +

    Softmax is defined as:

    +

    \(\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}\)

    +

    It is applied to all slices along dim, and will re-scale them so that the elements +lie in the range [0, 1] and sum to 1.

    +

    See Softmax for more details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input

    • +
    • dim (int) – A dimension along which softmax will be computed.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +
    +

    Note

    +

    This function doesn’t work directly with NLLLoss, +which expects the Log to be computed between the Softmax and itself. +Use log_softmax instead (it’s faster and has better numerical properties).

    +
    +
    + +
    +
    +

    softshrink

    +
    +
    +torch.nn.functional.softshrink(input, lambd=0.5) → Tensor
    +

    Applies the soft shrinkage function elementwise

    +

    See Softshrink for more details.

    +
    + +
    +
    +

    gumbel_softmax

    +
    +
    +torch.nn.functional.gumbel_softmax(logits, tau=1, hard=False, eps=1e-10, dim=-1)[source]
    +

    Samples from the Gumbel-Softmax distribution (Link 1 Link 2) and optionally discretizes.

    +
    +
    Parameters
    +
      +
    • logits[…, num_features] unnormalized log probabilities

    • +
    • tau – non-negative scalar temperature

    • +
    • hard – if True, the returned samples will be discretized as one-hot vectors, +but will be differentiated as if it is the soft sample in autograd

    • +
    • dim (int) – A dimension along which softmax will be computed. Default: -1.

    • +
    +
    +
    Returns
    +

    Sampled tensor of same shape as logits from the Gumbel-Softmax distribution. +If hard=True, the returned samples will be one-hot, otherwise they will +be probability distributions that sum to 1 across dim.

    +
    +
    +
    +

    Note

    +

    This function is here for legacy reasons, may be removed from nn.Functional in the future.

    +
    +
    +

    Note

    +

    The main trick for hard is to do y_hard - y_soft.detach() + y_soft

    +

    It achieves two things: +- makes the output value exactly one-hot +(since we add then subtract y_soft value) +- makes the gradient equal to y_soft gradient +(since we strip all other gradients)

    +
    +
    +
    Examples::
    >>> logits = torch.randn(20, 32)
    +>>> # Sample soft categorical using reparametrization trick:
    +>>> F.gumbel_softmax(logits, tau=1, hard=False)
    +>>> # Sample hard categorical using "Straight-through" trick:
    +>>> F.gumbel_softmax(logits, tau=1, hard=True)
    +
    +
    +
    +
    +
    + +
    +
    +

    log_softmax

    +
    +
    +torch.nn.functional.log_softmax(input, dim=None, _stacklevel=3, dtype=None)[source]
    +

    Applies a softmax followed by a logarithm.

    +

    While mathematically equivalent to log(softmax(x)), doing these two +operations separately is slower, and numerically unstable. This function +uses an alternative formulation to compute the output and gradient correctly.

    +

    See LogSoftmax for more details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input

    • +
    • dim (int) – A dimension along which log_softmax will be computed.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +
    + +
    +
    +

    tanh

    +
    +
    +torch.nn.functional.tanh(input) → Tensor[source]
    +

    Applies element-wise, +\(\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}\)

    +

    See Tanh for more details.

    +
    + +
    +
    +

    sigmoid

    +
    +
    +torch.nn.functional.sigmoid(input) → Tensor[source]
    +

    Applies the element-wise function \(\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}\)

    +

    See Sigmoid for more details.

    +
    + +
    +
    +
    +

    Normalization functions

    +
    +

    batch_norm

    +
    +
    +torch.nn.functional.batch_norm(input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-05)[source]
    +

    Applies Batch Normalization for each channel across a batch of data.

    +

    See BatchNorm1d, BatchNorm2d, +BatchNorm3d for details.

    +
    + +
    +
    +

    instance_norm

    +
    +
    +torch.nn.functional.instance_norm(input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05)[source]
    +

    Applies Instance Normalization for each channel in each data sample in a +batch.

    +

    See InstanceNorm1d, InstanceNorm2d, +InstanceNorm3d for details.

    +
    + +
    +
    +

    layer_norm

    +
    +
    +torch.nn.functional.layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-05)[source]
    +

    Applies Layer Normalization for last certain number of dimensions.

    +

    See LayerNorm for details.

    +
    + +
    +
    +

    local_response_norm

    +
    +
    +torch.nn.functional.local_response_norm(input, size, alpha=0.0001, beta=0.75, k=1.0)[source]
    +

    Applies local response normalization over an input signal composed of +several input planes, where channels occupy the second dimension. +Applies normalization across channels.

    +

    See LocalResponseNorm for details.

    +
    + +
    +
    +

    normalize

    +
    +
    +torch.nn.functional.normalize(input, p=2, dim=1, eps=1e-12, out=None)[source]
    +

    Performs \(L_p\) normalization of inputs over specified dimension.

    +

    For a tensor input of sizes \((n_0, ..., n_{dim}, ..., n_k)\), each +\(n_{dim}\) -element vector \(v\) along dimension dim is transformed as

    +
    +\[v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}. + +\]
    +

    With the default arguments it uses the Euclidean norm over vectors along dimension \(1\) for normalization.

    +
    +
    Parameters
    +
      +
    • input – input tensor of any shape

    • +
    • p (float) – the exponent value in the norm formulation. Default: 2

    • +
    • dim (int) – the dimension to reduce. Default: 1

    • +
    • eps (float) – small value to avoid division by zero. Default: 1e-12

    • +
    • out (Tensor, optional) – the output tensor. If out is used, this +operation won’t be differentiable.

    • +
    +
    +
    +
    + +
    +
    +
    +

    Linear functions

    +
    +

    linear

    +
    +
    +torch.nn.functional.linear(input, weight, bias=None)[source]
    +

    Applies a linear transformation to the incoming data: \(y = xA^T + b\).

    +

    Shape:

    +
    +
      +
    • Input: \((N, *, in\_features)\) where * means any number of +additional dimensions

    • +
    • Weight: \((out\_features, in\_features)\)

    • +
    • Bias: \((out\_features)\)

    • +
    • Output: \((N, *, out\_features)\)

    • +
    +
    +
    + +
    +
    +

    bilinear

    +
    +
    +torch.nn.functional.bilinear(input1, input2, weight, bias=None)[source]
    +
    + +
    +
    +
    +

    Dropout functions

    +
    +

    dropout

    +
    +
    +torch.nn.functional.dropout(input, p=0.5, training=True, inplace=False)[source]
    +

    During training, randomly zeroes some of the elements of the input +tensor with probability p using samples from a Bernoulli +distribution.

    +

    See Dropout for details.

    +
    +
    Parameters
    +
      +
    • p – probability of an element to be zeroed. Default: 0.5

    • +
    • training – apply dropout if is True. Default: True

    • +
    • inplace – If set to True, will do this operation in-place. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    alpha_dropout

    +
    +
    +torch.nn.functional.alpha_dropout(input, p=0.5, training=False, inplace=False)[source]
    +

    Applies alpha dropout to the input.

    +

    See AlphaDropout for details.

    +
    + +
    +
    +

    dropout2d

    +
    +
    +torch.nn.functional.dropout2d(input, p=0.5, training=True, inplace=False)[source]
    +

    Randomly zero out entire channels (a channel is a 2D feature map, +e.g., the \(j\)-th channel of the \(i\)-th sample in the +batched input is a 2D tensor \(\text{input}[i, j]\)) of the input tensor). +Each channel will be zeroed out independently on every forward call with +probability p using samples from a Bernoulli distribution.

    +

    See Dropout2d for details.

    +
    +
    Parameters
    +
      +
    • p – probability of a channel to be zeroed. Default: 0.5

    • +
    • training – apply dropout if is True. Default: True

    • +
    • inplace – If set to True, will do this operation in-place. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    dropout3d

    +
    +
    +torch.nn.functional.dropout3d(input, p=0.5, training=True, inplace=False)[source]
    +

    Randomly zero out entire channels (a channel is a 3D feature map, +e.g., the \(j\)-th channel of the \(i\)-th sample in the +batched input is a 3D tensor \(\text{input}[i, j]\)) of the input tensor). +Each channel will be zeroed out independently on every forward call with +probability p using samples from a Bernoulli distribution.

    +

    See Dropout3d for details.

    +
    +
    Parameters
    +
      +
    • p – probability of a channel to be zeroed. Default: 0.5

    • +
    • training – apply dropout if is True. Default: True

    • +
    • inplace – If set to True, will do this operation in-place. Default: False

    • +
    +
    +
    +
    + +
    +
    +
    +

    Sparse functions

    +
    +

    embedding

    +
    +
    +torch.nn.functional.embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False)[source]
    +

    A simple lookup table that looks up embeddings in a fixed dictionary and size.

    +

    This module is often used to retrieve word embeddings using indices. +The input to the module is a list of indices, and the embedding matrix, +and the output is the corresponding word embeddings.

    +

    See torch.nn.Embedding for more details.

    +
    +
    Parameters
    +
      +
    • input (LongTensor) – Tensor containing indices into the embedding matrix

    • +
    • weight (Tensor) – The embedding matrix with number of rows equal to the maximum possible index + 1, +and number of columns equal to the embedding size

    • +
    • padding_idx (int, optional) – If given, pads the output with the embedding vector at padding_idx +(initialized to zeros) whenever it encounters the index.

    • +
    • max_norm (float, optional) – If given, each embedding vector with norm larger than max_norm +is renormalized to have norm max_norm. +Note: this will modify weight in-place.

    • +
    • norm_type (float, optional) – The p of the p-norm to compute for the max_norm option. Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – If given, this will scale gradients by the inverse of frequency of +the words in the mini-batch. Default False.

    • +
    • sparse (bool, optional) – If True, gradient w.r.t. weight will be a sparse tensor. See Notes under +torch.nn.Embedding for more details regarding sparse gradients.

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: LongTensor of arbitrary shape containing the indices to extract

    • +
    • +
      Weight: Embedding matrix of floating point type with shape (V, embedding_dim),

      where V = maximum index + 1 and embedding_dim = the embedding size

      +
      +
      +
    • +
    • Output: (*, embedding_dim), where * is the input shape

    • +
    +
    +
    +

    Examples:

    +
    >>> # a batch of 2 samples of 4 indices each
    +>>> input = torch.tensor([[1,2,4,5],[4,3,2,9]])
    +>>> # an embedding matrix containing 10 tensors of size 3
    +>>> embedding_matrix = torch.rand(10, 3)
    +>>> F.embedding(input, embedding_matrix)
    +tensor([[[ 0.8490,  0.9625,  0.6753],
    +         [ 0.9666,  0.7761,  0.6108],
    +         [ 0.6246,  0.9751,  0.3618],
    +         [ 0.4161,  0.2419,  0.7383]],
    +
    +        [[ 0.6246,  0.9751,  0.3618],
    +         [ 0.0237,  0.7794,  0.0528],
    +         [ 0.9666,  0.7761,  0.6108],
    +         [ 0.3385,  0.8612,  0.1867]]])
    +
    +>>> # example with padding_idx
    +>>> weights = torch.rand(10, 3)
    +>>> weights[0, :].zero_()
    +>>> embedding_matrix = weights
    +>>> input = torch.tensor([[0,2,0,5]])
    +>>> F.embedding(input, embedding_matrix, padding_idx=0)
    +tensor([[[ 0.0000,  0.0000,  0.0000],
    +         [ 0.5609,  0.5384,  0.8720],
    +         [ 0.0000,  0.0000,  0.0000],
    +         [ 0.6262,  0.2438,  0.7471]]])
    +
    +
    +
    + +
    +
    +

    embedding_bag

    +
    +
    +torch.nn.functional.embedding_bag(input, weight, offsets=None, max_norm=None, norm_type=2, scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None)[source]
    +

    Computes sums, means or maxes of bags of embeddings, without instantiating the +intermediate embeddings.

    +

    See torch.nn.EmbeddingBag for more details.

    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input (LongTensor) – Tensor containing bags of indices into the embedding matrix

    • +
    • weight (Tensor) – The embedding matrix with number of rows equal to the maximum possible index + 1, +and number of columns equal to the embedding size

    • +
    • offsets (LongTensor, optional) – Only used when input is 1D. offsets determines +the starting index position of each bag (sequence) in input.

    • +
    • max_norm (float, optional) – If given, each embedding vector with norm larger than max_norm +is renormalized to have norm max_norm. +Note: this will modify weight in-place.

    • +
    • norm_type (float, optional) – The p in the p-norm to compute for the max_norm option. +Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – if given, this will scale gradients by the inverse of frequency of +the words in the mini-batch. Default False. +Note: this option is not supported when mode="max".

    • +
    • mode (string, optional) – "sum", "mean" or "max". Specifies the way to reduce the bag. +Default: "mean"

    • +
    • sparse (bool, optional) – if True, gradient w.r.t. weight will be a sparse tensor. See Notes under +torch.nn.Embedding for more details regarding sparse gradients. +Note: this option is not supported when mode="max".

    • +
    • per_sample_weights (Tensor, optional) – a tensor of float / double weights, or None +to indicate all weights should be taken to be 1. If specified, per_sample_weights +must have exactly the same shape as input and is treated as having the same +offsets, if those are not None.

    • +
    +
    +
    +

    Shape:

    +
    +
      +
    • input (LongTensor) and offsets (LongTensor, optional)

      +
        +
      • If input is 2D of shape (B, N),

        +

        it will be treated as B bags (sequences) each of fixed length N, and +this will return B values aggregated in a way depending on the mode. +offsets is ignored and required to be None in this case.

        +
      • +
      • If input is 1D of shape (N),

        +

        it will be treated as a concatenation of multiple bags (sequences). +offsets is required to be a 1D tensor containing the +starting index positions of each bag in input. Therefore, +for offsets of shape (B), input will be viewed as +having B bags. Empty bags (i.e., having 0-length) will have +returned vectors filled by zeros.

        +
      • +
      +
    • +
    • weight (Tensor): the learnable weights of the module of +shape (num_embeddings, embedding_dim)

    • +
    • per_sample_weights (Tensor, optional). Has the same shape as +input.

    • +
    • output: aggregated embedding values of shape (B, embedding_dim)

    • +
    +
    +

    Examples:

    +
    >>> # an Embedding module containing 10 tensors of size 3
    +>>> embedding_matrix = torch.rand(10, 3)
    +>>> # a batch of 2 samples of 4 indices each
    +>>> input = torch.tensor([1,2,4,5,4,3,2,9])
    +>>> offsets = torch.tensor([0,4])
    +>>> F.embedding_bag(embedding_matrix, input, offsets)
    +tensor([[ 0.3397,  0.3552,  0.5545],
    +        [ 0.5893,  0.4386,  0.5882]])
    +
    +
    +
    + +
    +
    +

    one_hot

    +
    +
    +torch.nn.functional.one_hot(tensor, num_classes=-1) → LongTensor
    +

    Takes LongTensor with index values of shape (*) and returns a tensor +of shape (*, num_classes) that have zeros everywhere except where the +index of last dimension matches the corresponding value of the input tensor, +in which case it will be 1.

    +

    See also One-hot on Wikipedia .

    +
    +
    Parameters
    +
      +
    • tensor (LongTensor) – class values of any shape.

    • +
    • num_classes (int) – Total number of classes. If set to -1, the number +of classes will be inferred as one greater than the largest class +value in the input tensor.

    • +
    +
    +
    Returns
    +

    LongTensor that has one more dimension with 1 values at the +index of last dimension indicated by the input, and 0 everywhere +else.

    +
    +
    +

    Examples

    +
    >>> F.one_hot(torch.arange(0, 5) % 3)
    +tensor([[1, 0, 0],
    +        [0, 1, 0],
    +        [0, 0, 1],
    +        [1, 0, 0],
    +        [0, 1, 0]])
    +>>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5)
    +tensor([[1, 0, 0, 0, 0],
    +        [0, 1, 0, 0, 0],
    +        [0, 0, 1, 0, 0],
    +        [1, 0, 0, 0, 0],
    +        [0, 1, 0, 0, 0]])
    +>>> F.one_hot(torch.arange(0, 6).view(3,2) % 3)
    +tensor([[[1, 0, 0],
    +         [0, 1, 0]],
    +        [[0, 0, 1],
    +         [1, 0, 0]],
    +        [[0, 1, 0],
    +         [0, 0, 1]]])
    +
    +
    +
    + +
    +
    +
    +

    Distance functions

    +
    +

    pairwise_distance

    +
    +
    +torch.nn.functional.pairwise_distance(x1, x2, p=2.0, eps=1e-06, keepdim=False)[source]
    +

    See torch.nn.PairwiseDistance for details

    +
    + +
    +
    +

    cosine_similarity

    +
    +
    +torch.nn.functional.cosine_similarity(x1, x2, dim=1, eps=1e-8) → Tensor
    +

    Returns cosine similarity between x1 and x2, computed along dim.

    +
    +\[\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)} + +\]
    +
    +
    Parameters
    +
      +
    • x1 (Tensor) – First input.

    • +
    • x2 (Tensor) – Second input (of size matching x1).

    • +
    • dim (int, optional) – Dimension of vectors. Default: 1

    • +
    • eps (float, optional) – Small value to avoid division by zero. +Default: 1e-8

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((\ast_1, D, \ast_2)\) where D is at position dim.

    • +
    • Output: \((\ast_1, \ast_2)\) where 1 is at position dim.

    • +
    +
    +
    +

    Example:

    +
    >>> input1 = torch.randn(100, 128)
    +>>> input2 = torch.randn(100, 128)
    +>>> output = F.cosine_similarity(input1, input2)
    +>>> print(output)
    +
    +
    +
    + +
    +
    +

    pdist

    +
    +
    +torch.nn.functional.pdist(input, p=2) → Tensor
    +

    Computes the p-norm distance between every pair of row vectors in the input. +This is identical to the upper triangular portion, excluding the diagonal, of +torch.norm(input[:, None] - input, dim=2, p=p). This function will be faster +if the rows are contiguous.

    +

    If input has shape \(N \times M\) then the output will have shape +\(\frac{1}{2} N (N - 1)\).

    +

    This function is equivalent to scipy.spatial.distance.pdist(input, +‘minkowski’, p=p) if \(p \in (0, \infty)\). When \(p = 0\) it is +equivalent to scipy.spatial.distance.pdist(input, ‘hamming’) * M. +When \(p = \infty\), the closest scipy function is +scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max()).

    +
    +
    Parameters
    +
      +
    • input – input tensor of shape \(N \times M\).

    • +
    • p – p value for the p-norm distance to calculate between each vector pair +\(\in [0, \infty]\).

    • +
    +
    +
    +
    + +
    +
    +
    +

    Loss functions

    +
    +

    binary_cross_entropy

    +
    +
    +torch.nn.functional.binary_cross_entropy(input, target, weight=None, size_average=None, reduce=None, reduction='mean')[source]
    +

    Function that measures the Binary Cross Entropy +between the target and the output.

    +

    See BCELoss for details.

    +
    +
    Parameters
    +
      +
    • input – Tensor of arbitrary shape

    • +
    • target – Tensor of the same shape as input

    • +
    • weight (Tensor, optional) – a manual rescaling weight +if provided it’s repeated to match input tensor shape

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn((3, 2), requires_grad=True)
    +>>> target = torch.rand((3, 2), requires_grad=False)
    +>>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
    +>>> loss.backward()
    +
    +
    +
    + +
    +
    +

    binary_cross_entropy_with_logits

    +
    +
    +torch.nn.functional.binary_cross_entropy_with_logits(input, target, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None)[source]
    +

    Function that measures Binary Cross Entropy between target and output +logits.

    +

    See BCEWithLogitsLoss for details.

    +
    +
    Parameters
    +
      +
    • input – Tensor of arbitrary shape

    • +
    • target – Tensor of the same shape as input

    • +
    • weight (Tensor, optional) – a manual rescaling weight +if provided it’s repeated to match input tensor shape

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    • pos_weight (Tensor, optional) – a weight of positive examples. +Must be a vector with length equal to the number of classes.

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn(3, requires_grad=True)
    +>>> target = torch.empty(3).random_(2)
    +>>> loss = F.binary_cross_entropy_with_logits(input, target)
    +>>> loss.backward()
    +
    +
    +
    + +
    +
    +

    poisson_nll_loss

    +
    +
    +torch.nn.functional.poisson_nll_loss(input, target, log_input=True, full=False, size_average=None, eps=1e-08, reduce=None, reduction='mean')[source]
    +

    Poisson negative log likelihood loss.

    +

    See PoissonNLLLoss for details.

    +
    +
    Parameters
    +
      +
    • input – expectation of underlying Poisson distribution.

    • +
    • target – random sample \(target \sim \text{Poisson}(input)\).

    • +
    • log_input – if True the loss is computed as +\(\exp(\text{input}) - \text{target} * \text{input}\), if False then loss is +\(\text{input} - \text{target} * \log(\text{input}+\text{eps})\). Default: True

    • +
    • full – whether to compute full loss, i. e. to add the Stirling +approximation term. Default: False +\(\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})\).

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • eps (float, optional) – Small value to avoid evaluation of \(\log(0)\) when +log_input`=``False`. Default: 1e-8

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    + +
    +
    +

    cosine_embedding_loss

    +
    +
    +torch.nn.functional.cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    See CosineEmbeddingLoss for details.

    +
    + +
    +
    +

    cross_entropy

    +
    +
    +torch.nn.functional.cross_entropy(input, target, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')[source]
    +

    This criterion combines log_softmax and nll_loss in a single +function.

    +

    See CrossEntropyLoss for details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – \((N, C)\) where C = number of classes or \((N, C, H, W)\) +in case of 2D Loss, or \((N, C, d_1, d_2, ..., d_K)\) where \(K \geq 1\) +in the case of K-dimensional loss.

    • +
    • target (Tensor) – \((N)\) where each value is \(0 \leq \text{targets}[i] \leq C-1\), +or \((N, d_1, d_2, ..., d_K)\) where \(K \geq 1\) for +K-dimensional loss.

    • +
    • weight (Tensor, optional) – a manual rescaling weight given to each +class. If given, has to be a Tensor of size C

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • ignore_index (int, optional) – Specifies a target value that is ignored +and does not contribute to the input gradient. When size_average is +True, the loss is averaged over non-ignored targets. Default: -100

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn(3, 5, requires_grad=True)
    +>>> target = torch.randint(5, (3,), dtype=torch.int64)
    +>>> loss = F.cross_entropy(input, target)
    +>>> loss.backward()
    +
    +
    +
    + +
    +
    +

    ctc_loss

    +
    +
    +torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean', zero_infinity=False)[source]
    +

    The Connectionist Temporal Classification loss.

    +

    See CTCLoss for details.

    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • log_probs\((T, N, C)\) where C = number of characters in alphabet including blank, +T = input length, and N = batch size. +The logarithmized probabilities of the outputs +(e.g. obtained with torch.nn.functional.log_softmax()).

    • +
    • targets\((N, S)\) or (sum(target_lengths)). +Targets cannot be blank. In the second form, the targets are assumed to be concatenated.

    • +
    • input_lengths\((N)\). +Lengths of the inputs (must each be \(\leq T\))

    • +
    • target_lengths\((N)\). +Lengths of the targets

    • +
    • blank (int, optional) – Blank label. Default \(0\).

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the output losses will be divided by the target lengths and +then the mean over the batch is taken, 'sum': the output will be +summed. Default: 'mean'

    • +
    • zero_infinity (bool, optional) – Whether to zero infinite losses and the associated gradients. +Default: False +Infinite losses mainly occur when the inputs are too short +to be aligned to the targets.

    • +
    +
    +
    +

    Example:

    +
    >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_()
    +>>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
    +>>> input_lengths = torch.full((16,), 50, dtype=torch.long)
    +>>> target_lengths = torch.randint(10,30,(16,), dtype=torch.long)
    +>>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
    +>>> loss.backward()
    +
    +
    +
    + +
    +
    +

    hinge_embedding_loss

    +
    +
    +torch.nn.functional.hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    See HingeEmbeddingLoss for details.

    +
    + +
    +
    +

    kl_div

    +
    +
    +torch.nn.functional.kl_div(input, target, size_average=None, reduce=None, reduction='mean')[source]
    +

    The `Kullback-Leibler divergence`_ Loss.

    +

    See KLDivLoss for details.

    +
    +
    Parameters
    +
      +
    • input – Tensor of arbitrary shape

    • +
    • target – Tensor of the same shape as input

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'batchmean' | 'sum' | 'mean'. +'none': no reduction will be applied +'batchmean': the sum of the output will be divided by the batchsize +'sum': the output will be summed +'mean': the output will be divided by the number of elements in the output +Default: 'mean'

    • +
    +
    +
    +
    +

    Note

    +

    size_average and reduce are in the process of being deprecated, +and in the meantime, specifying either of those two args will override reduction.

    +
    +
    +

    Note

    +

    :attr:reduction = 'mean' doesn’t return the true kl divergence value, please use +:attr:reduction = 'batchmean' which aligns with KL math definition. +In the next major release, 'mean' will be changed to be the same as ‘batchmean’.

    +
    +
    + +
    +
    +

    l1_loss

    +
    +
    +torch.nn.functional.l1_loss(input, target, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    Function that takes the mean element-wise absolute value difference.

    +

    See L1Loss for details.

    +
    + +
    +
    +

    mse_loss

    +
    +
    +torch.nn.functional.mse_loss(input, target, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    Measures the element-wise mean squared error.

    +

    See MSELoss for details.

    +
    + +
    +
    +

    margin_ranking_loss

    +
    +
    +torch.nn.functional.margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    See MarginRankingLoss for details.

    +
    + +
    +
    +

    multilabel_margin_loss

    +
    +
    +torch.nn.functional.multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    See MultiLabelMarginLoss for details.

    +
    + +
    +
    +

    multilabel_soft_margin_loss

    +
    +
    +torch.nn.functional.multilabel_soft_margin_loss(input, target, weight=None, size_average=None) → Tensor[source]
    +

    See MultiLabelSoftMarginLoss for details.

    +
    + +
    +
    +

    multi_margin_loss

    +
    +
    +torch.nn.functional.multi_margin_loss(input, target, p=1, margin=1.0, weight=None, size_average=None, reduce=None, reduction='mean')[source]
    +
    +
    multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None,

    reduce=None, reduction=’mean’) -> Tensor

    +
    +
    +

    See MultiMarginLoss for details.

    +
    + +
    +
    +

    nll_loss

    +
    +
    +torch.nn.functional.nll_loss(input, target, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')[source]
    +

    The negative log likelihood loss.

    +

    See NLLLoss for details.

    +
    +
    Parameters
    +
      +
    • input\((N, C)\) where C = number of classes or \((N, C, H, W)\) +in case of 2D Loss, or \((N, C, d_1, d_2, ..., d_K)\) where \(K \geq 1\) +in the case of K-dimensional loss.

    • +
    • target\((N)\) where each value is \(0 \leq \text{targets}[i] \leq C-1\), +or \((N, d_1, d_2, ..., d_K)\) where \(K \geq 1\) for +K-dimensional loss.

    • +
    • weight (Tensor, optional) – a manual rescaling weight given to each +class. If given, has to be a Tensor of size C

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • ignore_index (int, optional) – Specifies a target value that is ignored +and does not contribute to the input gradient. When size_average is +True, the loss is averaged over non-ignored targets. Default: -100

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +

    Example:

    +
    >>> # input is of size N x C = 3 x 5
    +>>> input = torch.randn(3, 5, requires_grad=True)
    +>>> # each element in target has to have 0 <= value < C
    +>>> target = torch.tensor([1, 0, 4])
    +>>> output = F.nll_loss(F.log_softmax(input), target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    smooth_l1_loss

    +
    +
    +torch.nn.functional.smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean')[source]
    +

    Function that uses a squared term if the absolute +element-wise error falls below 1 and an L1 term otherwise.

    +

    See SmoothL1Loss for details.

    +
    + +
    +
    +

    soft_margin_loss

    +
    +
    +torch.nn.functional.soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') → Tensor[source]
    +

    See SoftMarginLoss for details.

    +
    + +
    +
    +

    triplet_margin_loss

    +
    +
    +torch.nn.functional.triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False, size_average=None, reduce=None, reduction='mean')[source]
    +

    See TripletMarginLoss for details

    +
    + +
    +
    +
    +

    Vision functions

    +
    +

    pixel_shuffle

    +
    +
    +torch.nn.functional.pixel_shuffle()
    +

    Rearranges elements in a tensor of shape \((*, C \times r^2, H, W)\) to a +tensor of shape \((*, C, H \times r, W \times r)\).

    +

    See PixelShuffle for details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • upscale_factor (int) – factor to increase spatial resolution by

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn(1, 9, 4, 4)
    +>>> output = torch.nn.functional.pixel_shuffle(input, 3)
    +>>> print(output.size())
    +torch.Size([1, 1, 12, 12])
    +
    +
    +
    + +
    +
    +

    pad

    +
    +
    +torch.nn.functional.pad(input, pad, mode='constant', value=0)[source]
    +

    Pads tensor.

    +
    +
    Padding size:

    The padding size by which to pad some dimensions of input +are described starting from the last dimension and moving forward. +\(\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor\) dimensions +of input will be padded. +For example, to pad only the last dimension of the input tensor, then +pad has the form +\((\text{padding\_left}, \text{padding\_right})\); +to pad the last 2 dimensions of the input tensor, then use +\((\text{padding\_left}, \text{padding\_right},\) +\(\text{padding\_top}, \text{padding\_bottom})\); +to pad the last 3 dimensions, use +\((\text{padding\_left}, \text{padding\_right},\) +\(\text{padding\_top}, \text{padding\_bottom}\) +\(\text{padding\_front}, \text{padding\_back})\).

    +
    +
    Padding mode:

    See torch.nn.ConstantPad2d, torch.nn.ReflectionPad2d, and +torch.nn.ReplicationPad2d for concrete examples on how each of the +padding modes works. Constant padding is implemented for arbitrary dimensions. +Replicate padding is implemented for padding the last 3 dimensions of 5D input +tensor, or the last 2 dimensions of 4D input tensor, or the last dimension of +3D input tensor. Reflect padding is only implemented for padding the last 2 +dimensions of 4D input tensor, or the last dimension of 3D input tensor.

    +
    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – N-dimensional tensor

    • +
    • pad (tuple) – m-elements tuple, where +\(\frac{m}{2} \leq\) input dimensions and \(m\) is even.

    • +
    • mode'constant', 'reflect', 'replicate' or 'circular'. +Default: 'constant'

    • +
    • value – fill value for 'constant' padding. Default: 0

    • +
    +
    +
    +

    Examples:

    +
    >>> t4d = torch.empty(3, 3, 4, 2)
    +>>> p1d = (1, 1) # pad last dim by 1 on each side
    +>>> out = F.pad(t4d, p1d, "constant", 0)  # effectively zero padding
    +>>> print(out.data.size())
    +torch.Size([3, 3, 4, 4])
    +>>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)
    +>>> out = F.pad(t4d, p2d, "constant", 0)
    +>>> print(out.data.size())
    +torch.Size([3, 3, 8, 4])
    +>>> t4d = torch.empty(3, 3, 4, 2)
    +>>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
    +>>> out = F.pad(t4d, p3d, "constant", 0)
    +>>> print(out.data.size())
    +torch.Size([3, 9, 7, 3])
    +
    +
    +
    + +
    +
    +

    interpolate

    +
    +
    +torch.nn.functional.interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None)[source]
    +

    Down/up samples the input to either the given size or the given +scale_factor

    +

    The algorithm used for interpolation is determined by mode.

    +

    Currently temporal, spatial and volumetric sampling are supported, i.e. +expected inputs are 3-D, 4-D or 5-D in shape.

    +

    The input dimensions are interpreted in the form: +mini-batch x channels x [optional depth] x [optional height] x width.

    +

    The modes available for resizing are: nearest, linear (3D-only), +bilinear, bicubic (4D-only), trilinear (5D-only), area

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]) – output spatial size.

    • +
    • scale_factor (float or Tuple[float]) – multiplier for spatial size. Has to match input size if it is a tuple.

    • +
    • mode (str) – algorithm used for upsampling: +'nearest' | 'linear' | 'bilinear' | 'bicubic' | +'trilinear' | 'area'. Default: 'nearest'

    • +
    • align_corners (bool, optional) – Geometrically, we consider the pixels of the +input and output as squares rather than points. +If set to False, the input and output tensors are aligned by the +center points of their corner pixels. If set to True, the input and +output tensors are aligned by the corner points of their corner +pixels, and the interpolation uses edge value padding for out-of-boundary values. +This only has effect when mode is 'linear', +'bilinear', 'bicubic', or 'trilinear'. +Default: False

    • +
    +
    +
    +
    +

    Note

    +

    With mode='bicubic', it’s possible to cause overshoot, in other words it can produce +negative values or values greater than 255 for images. +Explicitly call result.clamp(min=0, max=255) if you want to reduce the overshoot +when displaying the image.

    +
    +
    +

    Warning

    +

    With align_corners = True, the linearly interpolating modes +(linear, bilinear, and trilinear) don’t proportionally align the +output and input pixels, and thus the output values can depend on the +input size. This was the default behavior for these modes up to version +0.3.1. Since then, the default behavior is align_corners = False. +See Upsample for concrete examples on how this +affects the outputs.

    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    + +
    +
    +

    upsample

    +
    +
    +torch.nn.functional.upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None)[source]
    +

    Upsamples the input to either the given size or the given +scale_factor

    +
    +

    Warning

    +

    This function is deprecated in favor of torch.nn.functional.interpolate(). +This is equivalent with nn.functional.interpolate(...).

    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +

    The algorithm used for upsampling is determined by mode.

    +

    Currently temporal, spatial and volumetric upsampling are supported, i.e. +expected inputs are 3-D, 4-D or 5-D in shape.

    +

    The input dimensions are interpreted in the form: +mini-batch x channels x [optional depth] x [optional height] x width.

    +

    The modes available for upsampling are: nearest, linear (3D-only), +bilinear, bicubic (4D-only), trilinear (5D-only)

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]) – output spatial size.

    • +
    • scale_factor (float or Tuple[float]) – multiplier for spatial size. Has to be an integer.

    • +
    • mode (string) – algorithm used for upsampling: +'nearest' | 'linear' | 'bilinear' | 'bicubic' | +'trilinear'. Default: 'nearest'

    • +
    • align_corners (bool, optional) – Geometrically, we consider the pixels of the +input and output as squares rather than points. +If set to False, the input and output tensors are aligned by the +center points of their corner pixels. If set to True, the input and +output tensors are aligned by the corner points of their corner +pixels, and the interpolation uses edge value padding for out-of-boundary values. +This only has effect when mode is 'linear', +'bilinear', 'bicubic' or 'trilinear'. +Default: False

    • +
    +
    +
    +
    +

    Note

    +

    With mode='bicubic', it’s possible to cause overshoot, in other words it can produce +negative values or values greater than 255 for images. +Explicitly call result.clamp(min=0, max=255) if you want to reduce the overshoot +when displaying the image.

    +
    +
    +

    Warning

    +

    With align_corners = True, the linearly interpolating modes +(linear, bilinear, and trilinear) don’t proportionally align the +output and input pixels, and thus the output values can depend on the +input size. This was the default behavior for these modes up to version +0.3.1. Since then, the default behavior is align_corners = False. +See Upsample for concrete examples on how this +affects the outputs.

    +
    +
    + +
    +
    +

    upsample_nearest

    +
    +
    +torch.nn.functional.upsample_nearest(input, size=None, scale_factor=None)[source]
    +

    Upsamples the input, using nearest neighbours’ pixel values.

    +
    +

    Warning

    +

    This function is deprecated in favor of torch.nn.functional.interpolate(). +This is equivalent with nn.functional.interpolate(..., mode='nearest').

    +
    +

    Currently spatial and volumetric upsampling are supported (i.e. expected +inputs are 4 or 5 dimensional).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input

    • +
    • size (int or Tuple[int, int] or Tuple[int, int, int]) – output spatia +size.

    • +
    • scale_factor (int) – multiplier for spatial size. Has to be an integer.

    • +
    +
    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    + +
    +
    +

    upsample_bilinear

    +
    +
    +torch.nn.functional.upsample_bilinear(input, size=None, scale_factor=None)[source]
    +

    Upsamples the input, using bilinear upsampling.

    +
    +

    Warning

    +

    This function is deprecated in favor of torch.nn.functional.interpolate(). +This is equivalent with +nn.functional.interpolate(..., mode='bilinear', align_corners=True).

    +
    +

    Expected inputs are spatial (4 dimensional). Use upsample_trilinear fo +volumetric (5 dimensional) inputs.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – input

    • +
    • size (int or Tuple[int, int]) – output spatial size.

    • +
    • scale_factor (int or Tuple[int, int]) – multiplier for spatial size

    • +
    +
    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    + +
    +
    +

    grid_sample

    +
    +
    +torch.nn.functional.grid_sample(input, grid, mode='bilinear', padding_mode='zeros')[source]
    +

    Given an input and a flow-field grid, computes the +output using input values and pixel locations from grid.

    +

    Currently, only spatial (4-D) and volumetric (5-D) input are +supported.

    +

    In the spatial (4-D) case, for input with shape +\((N, C, H_\text{in}, W_\text{in})\) and grid with shape +\((N, H_\text{out}, W_\text{out}, 2)\), the output will have shape +\((N, C, H_\text{out}, W_\text{out})\).

    +

    For each output location output[n, :, h, w], the size-2 vector +grid[n, h, w] specifies input pixel locations x and y, +which are used to interpolate the output value output[n, :, h, w]. +In the case of 5D inputs, grid[n, d, h, w] specifies the +x, y, z pixel locations for interpolating +output[n, :, d, h, w]. mode argument specifies nearest or +bilinear interpolation method to sample the input pixels.

    +

    grid specifies the sampling pixel locations normalized by the +input spatial dimensions. Therefore, it should have most values in +the range of [-1, 1]. For example, values x = -1, y = -1 is the +left-top pixel of input, and values x = 1, y = 1 is the +right-bottom pixel of input.

    +

    If grid has values outside the range of [-1, 1], the corresponding +outputs are handled as defined by padding_mode. Options are

    +
    +
      +
    • padding_mode="zeros": use 0 for out-of-bound grid locations,

    • +
    • padding_mode="border": use border values for out-of-bound grid locations,

    • +
    • padding_mode="reflection": use values at locations reflected by +the border for out-of-bound grid locations. For location far away +from the border, it will keep being reflected until becoming in bound, +e.g., (normalized) pixel location x = -3.5 reflects by border -1 +and becomes x' = 1.5, then reflects by border 1 and becomes +x'' = -0.5.

    • +
    +
    +
    +

    Note

    +

    This function is often used in building Spatial Transformer Networks .

    +
    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour in be backward that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – input of shape \((N, C, H_\text{in}, W_\text{in})\) (4-D case) +or \((N, C, D_\text{in}, H_\text{in}, W_\text{in})\) (5-D case)

    • +
    • grid (Tensor) – flow-field of shape \((N, H_\text{out}, W_\text{out}, 2)\) (4-D case) +or \((N, D_\text{out}, H_\text{out}, W_\text{out}, 3)\) (5-D case)

    • +
    • mode (str) – interpolation mode to calculate output values +'bilinear' | 'nearest'. Default: 'bilinear'

    • +
    • padding_mode (str) – padding mode for outside grid values +'zeros' | 'border' | 'reflection'. Default: 'zeros'

    • +
    +
    +
    Returns
    +

    output Tensor

    +
    +
    Return type
    +

    output (Tensor)

    +
    +
    +
    + +
    +
    +

    affine_grid

    +
    +
    +torch.nn.functional.affine_grid(theta, size)[source]
    +

    Generates a 2d flow field, given a batch of affine matrices theta. +Generally used in conjunction with grid_sample() to +implement Spatial Transformer Networks.

    +
    +
    Parameters
    +
      +
    • theta (Tensor) – input batch of affine matrices (\(N \times 2 \times 3\))

    • +
    • size (torch.Size) – the target output image size (\(N \times C \times H \times W\)). +Example: torch.Size((32, 3, 24, 24))

    • +
    +
    +
    Returns
    +

    output Tensor of size (\(N \times H \times W \times 2\))

    +
    +
    Return type
    +

    output (Tensor)

    +
    +
    +
    + +
    +
    +
    +

    DataParallel functions (multi-GPU, distributed)

    +
    +

    data_parallel

    +
    +
    +torch.nn.parallel.data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None)[source]
    +

    Evaluates module(input) in parallel across the GPUs given in device_ids.

    +

    This is the functional version of the DataParallel module.

    +
    +
    Parameters
    +
      +
    • module (Module) – the module to evaluate in parallel

    • +
    • inputs (Tensor) – inputs to the module

    • +
    • device_ids (list of python:int or torch.device) – GPU ids on which to replicate module

    • +
    • output_device (list of python:int or torch.device) – GPU location of the output Use -1 to indicate the CPU. +(default: device_ids[0])

    • +
    +
    +
    Returns
    +

    a Tensor containing the result of module(input) located on +output_device

    +
    +
    +
    + +
    +
    +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/nn.html b/docs/stable/nn.html new file mode 100644 index 000000000000..1e2103305590 --- /dev/null +++ b/docs/stable/nn.html @@ -0,0 +1,9793 @@ + + + + + + + + + + + + torch.nn — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.nn

    +
    +

    Parameters

    +
    +
    +class torch.nn.Parameter[source]
    +

    A kind of Tensor that is to be considered a module parameter.

    +

    Parameters are Tensor subclasses, that have a +very special property when used with Module s - when they’re +assigned as Module attributes they are automatically added to the list of +its parameters, and will appear e.g. in parameters() iterator. +Assigning a Tensor doesn’t have such effect. This is because one might +want to cache some temporary state, like last hidden state of the RNN, in +the model. If there was no such class as Parameter, these +temporaries would get registered too.

    +
    +
    Parameters
    +
    +
    +
    +
    + +
    +
    +

    Containers

    +
    +

    Module

    +
    +
    +class torch.nn.Module[source]
    +

    Base class for all neural network modules.

    +

    Your models should also subclass this class.

    +

    Modules can also contain other Modules, allowing to nest them in +a tree structure. You can assign the submodules as regular attributes:

    +
    import torch.nn as nn
    +import torch.nn.functional as F
    +
    +class Model(nn.Module):
    +    def __init__(self):
    +        super(Model, self).__init__()
    +        self.conv1 = nn.Conv2d(1, 20, 5)
    +        self.conv2 = nn.Conv2d(20, 20, 5)
    +
    +    def forward(self, x):
    +        x = F.relu(self.conv1(x))
    +        return F.relu(self.conv2(x))
    +
    +
    +

    Submodules assigned in this way will be registered, and will have their +parameters converted too when you call to(), etc.

    +
    +
    +add_module(name, module)[source]
    +

    Adds a child module to the current module.

    +

    The module can be accessed as an attribute using the given name.

    +
    +
    Parameters
    +
      +
    • name (string) – name of the child module. The child module can be +accessed from this module using the given name

    • +
    • module (Module) – child module to be added to the module.

    • +
    +
    +
    +
    + +
    +
    +apply(fn)[source]
    +

    Applies fn recursively to every submodule (as returned by .children()) +as well as self. Typical use includes initializing the parameters of a model +(see also torch-nn-init).

    +
    +
    Parameters
    +

    fn (Module -> None) – function to be applied to each submodule

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +

    Example:

    +
    >>> def init_weights(m):
    +>>>     print(m)
    +>>>     if type(m) == nn.Linear:
    +>>>         m.weight.data.fill_(1.0)
    +>>>         print(m.weight)
    +>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
    +>>> net.apply(init_weights)
    +Linear(in_features=2, out_features=2, bias=True)
    +Parameter containing:
    +tensor([[ 1.,  1.],
    +        [ 1.,  1.]])
    +Linear(in_features=2, out_features=2, bias=True)
    +Parameter containing:
    +tensor([[ 1.,  1.],
    +        [ 1.,  1.]])
    +Sequential(
    +  (0): Linear(in_features=2, out_features=2, bias=True)
    +  (1): Linear(in_features=2, out_features=2, bias=True)
    +)
    +Sequential(
    +  (0): Linear(in_features=2, out_features=2, bias=True)
    +  (1): Linear(in_features=2, out_features=2, bias=True)
    +)
    +
    +
    +
    + +
    +
    +buffers(recurse=True)[source]
    +

    Returns an iterator over module buffers.

    +
    +
    Parameters
    +

    recurse (bool) – if True, then yields buffers of this module +and all submodules. Otherwise, yields only buffers that +are direct members of this module.

    +
    +
    Yields
    +

    torch.Tensor – module buffer

    +
    +
    +

    Example:

    +
    >>> for buf in model.buffers():
    +>>>     print(type(buf.data), buf.size())
    +<class 'torch.FloatTensor'> (20L,)
    +<class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)
    +
    +
    +
    + +
    +
    +children()[source]
    +

    Returns an iterator over immediate children modules.

    +
    +
    Yields
    +

    Module – a child module

    +
    +
    +
    + +
    +
    +cpu()[source]
    +

    Moves all model parameters and buffers to the CPU.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +cuda(device=None)[source]
    +

    Moves all model parameters and buffers to the GPU.

    +

    This also makes associated parameters and buffers different objects. So +it should be called before constructing optimizer if the module will +live on GPU while being optimized.

    +
    +
    Parameters
    +

    device (int, optional) – if specified, all parameters will be +copied to that device

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +double()[source]
    +

    Casts all floating point parameters and buffers to double datatype.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +dump_patches = False
    +

    This allows better BC support for load_state_dict(). In +state_dict(), the version number will be saved as in the attribute +_metadata of the returned state dict, and thus pickled. _metadata is a +dictionary with keys that follow the naming convention of state dict. See +_load_from_state_dict on how to use this information in loading.

    +

    If new parameters/buffers are added/removed from a module, this number shall +be bumped, and the module’s _load_from_state_dict method can compare the +version number and do appropriate changes if the state dict is from before +the change.

    +
    + +
    +
    +eval()[source]
    +

    Sets the module in evaluation mode.

    +

    This has any effect only on certain modules. See documentations of +particular modules for details of their behaviors in training/evaluation +mode, if they are affected, e.g. Dropout, BatchNorm, +etc.

    +

    This is equivalent with self.train(False).

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +extra_repr()[source]
    +

    Set the extra representation of the module

    +

    To print customized extra information, you should reimplement +this method in your own modules. Both single-line and multi-line +strings are acceptable.

    +
    + +
    +
    +float()[source]
    +

    Casts all floating point parameters and buffers to float datatype.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +forward(*input)[source]
    +

    Defines the computation performed at every call.

    +

    Should be overridden by all subclasses.

    +
    +

    Note

    +

    Although the recipe for forward pass needs to be defined within +this function, one should call the Module instance afterwards +instead of this since the former takes care of running the +registered hooks while the latter silently ignores them.

    +
    +
    + +
    +
    +half()[source]
    +

    Casts all floating point parameters and buffers to half datatype.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +load_state_dict(state_dict, strict=True)[source]
    +

    Copies parameters and buffers from state_dict into +this module and its descendants. If strict is True, then +the keys of state_dict must exactly match the keys returned +by this module’s state_dict() function.

    +
    +
    Parameters
    +
      +
    • state_dict (dict) – a dict containing parameters and +persistent buffers.

    • +
    • strict (bool, optional) – whether to strictly enforce that the keys +in state_dict match the keys returned by this module’s +state_dict() function. Default: True

    • +
    +
    +
    Returns
    +

      +
    • missing_keys is a list of str containing the missing keys

    • +
    • unexpected_keys is a list of str containing the unexpected keys

    • +
    +

    +
    +
    Return type
    +

    NamedTuple with missing_keys and unexpected_keys fields

    +
    +
    +
    + +
    +
    +modules()[source]
    +

    Returns an iterator over all modules in the network.

    +
    +
    Yields
    +

    Module – a module in the network

    +
    +
    +
    +

    Note

    +

    Duplicate modules are returned only once. In the following +example, l will be returned only once.

    +
    +

    Example:

    +
    >>> l = nn.Linear(2, 2)
    +>>> net = nn.Sequential(l, l)
    +>>> for idx, m in enumerate(net.modules()):
    +        print(idx, '->', m)
    +
    +0 -> Sequential(
    +  (0): Linear(in_features=2, out_features=2, bias=True)
    +  (1): Linear(in_features=2, out_features=2, bias=True)
    +)
    +1 -> Linear(in_features=2, out_features=2, bias=True)
    +
    +
    +
    + +
    +
    +named_buffers(prefix='', recurse=True)[source]
    +

    Returns an iterator over module buffers, yielding both the +name of the buffer as well as the buffer itself.

    +
    +
    Parameters
    +
      +
    • prefix (str) – prefix to prepend to all buffer names.

    • +
    • recurse (bool) – if True, then yields buffers of this module +and all submodules. Otherwise, yields only buffers that +are direct members of this module.

    • +
    +
    +
    Yields
    +

    (string, torch.Tensor) – Tuple containing the name and buffer

    +
    +
    +

    Example:

    +
    >>> for name, buf in self.named_buffers():
    +>>>    if name in ['running_var']:
    +>>>        print(buf.size())
    +
    +
    +
    + +
    +
    +named_children()[source]
    +

    Returns an iterator over immediate children modules, yielding both +the name of the module as well as the module itself.

    +
    +
    Yields
    +

    (string, Module) – Tuple containing a name and child module

    +
    +
    +

    Example:

    +
    >>> for name, module in model.named_children():
    +>>>     if name in ['conv4', 'conv5']:
    +>>>         print(module)
    +
    +
    +
    + +
    +
    +named_modules(memo=None, prefix='')[source]
    +

    Returns an iterator over all modules in the network, yielding +both the name of the module as well as the module itself.

    +
    +
    Yields
    +

    (string, Module) – Tuple of name and module

    +
    +
    +
    +

    Note

    +

    Duplicate modules are returned only once. In the following +example, l will be returned only once.

    +
    +

    Example:

    +
    >>> l = nn.Linear(2, 2)
    +>>> net = nn.Sequential(l, l)
    +>>> for idx, m in enumerate(net.named_modules()):
    +        print(idx, '->', m)
    +
    +0 -> ('', Sequential(
    +  (0): Linear(in_features=2, out_features=2, bias=True)
    +  (1): Linear(in_features=2, out_features=2, bias=True)
    +))
    +1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
    +
    +
    +
    + +
    +
    +named_parameters(prefix='', recurse=True)[source]
    +

    Returns an iterator over module parameters, yielding both the +name of the parameter as well as the parameter itself.

    +
    +
    Parameters
    +
      +
    • prefix (str) – prefix to prepend to all parameter names.

    • +
    • recurse (bool) – if True, then yields parameters of this module +and all submodules. Otherwise, yields only parameters that +are direct members of this module.

    • +
    +
    +
    Yields
    +

    (string, Parameter) – Tuple containing the name and parameter

    +
    +
    +

    Example:

    +
    >>> for name, param in self.named_parameters():
    +>>>    if name in ['bias']:
    +>>>        print(param.size())
    +
    +
    +
    + +
    +
    +parameters(recurse=True)[source]
    +

    Returns an iterator over module parameters.

    +

    This is typically passed to an optimizer.

    +
    +
    Parameters
    +

    recurse (bool) – if True, then yields parameters of this module +and all submodules. Otherwise, yields only parameters that +are direct members of this module.

    +
    +
    Yields
    +

    Parameter – module parameter

    +
    +
    +

    Example:

    +
    >>> for param in model.parameters():
    +>>>     print(type(param.data), param.size())
    +<class 'torch.FloatTensor'> (20L,)
    +<class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)
    +
    +
    +
    + +
    +
    +register_backward_hook(hook)[source]
    +

    Registers a backward hook on the module.

    +

    The hook will be called every time the gradients with respect to module +inputs are computed. The hook should have the following signature:

    +
    hook(module, grad_input, grad_output) -> Tensor or None
    +
    +
    +

    The grad_input and grad_output may be tuples if the +module has multiple inputs or outputs. The hook should not modify its +arguments, but it can optionally return a new gradient with respect to +input that will be used in place of grad_input in subsequent +computations.

    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +
    +

    Warning

    +

    The current implementation will not have the presented behavior +for complex Module that perform many operations. +In some failure cases, grad_input and grad_output will only +contain the gradients for a subset of the inputs and outputs. +For such Module, you should use torch.Tensor.register_hook() +directly on a specific input or output to get the required gradients.

    +
    +
    + +
    +
    +register_buffer(name, tensor)[source]
    +

    Adds a persistent buffer to the module.

    +

    This is typically used to register a buffer that should not to be +considered a model parameter. For example, BatchNorm’s running_mean +is not a parameter, but is part of the persistent state.

    +

    Buffers can be accessed as attributes using given names.

    +
    +
    Parameters
    +
      +
    • name (string) – name of the buffer. The buffer can be accessed +from this module using the given name

    • +
    • tensor (Tensor) – buffer to be registered.

    • +
    +
    +
    +

    Example:

    +
    >>> self.register_buffer('running_mean', torch.zeros(num_features))
    +
    +
    +
    + +
    +
    +register_forward_hook(hook)[source]
    +

    Registers a forward hook on the module.

    +

    The hook will be called every time after forward() has computed an output. +It should have the following signature:

    +
    hook(module, input, output) -> None or modified output
    +
    +
    +

    The hook can modify the output. It can modify the input inplace but +it will not have effect on forward since this is called after +forward() is called.

    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +
    + +
    +
    +register_forward_pre_hook(hook)[source]
    +

    Registers a forward pre-hook on the module.

    +

    The hook will be called every time before forward() is invoked. +It should have the following signature:

    +
    hook(module, input) -> None or modified input
    +
    +
    +

    The hook can modify the input. User can either return a tuple or a +single modified value in the hook. We will wrap the value into a tuple +if a single value is returned(unless that value is already a tuple).

    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +
    + +
    +
    +register_parameter(name, param)[source]
    +

    Adds a parameter to the module.

    +

    The parameter can be accessed as an attribute using given name.

    +
    +
    Parameters
    +
      +
    • name (string) – name of the parameter. The parameter can be accessed +from this module using the given name

    • +
    • param (Parameter) – parameter to be added to the module.

    • +
    +
    +
    +
    + +
    +
    +requires_grad_(requires_grad=True)[source]
    +

    Change if autograd should record operations on parameters in this +module.

    +

    This method sets the parameters’ requires_grad attributes +in-place.

    +

    This method is helpful for freezing part of the module for finetuning +or training parts of a model individually (e.g., GAN training).

    +
    +
    Parameters
    +

    requires_grad (bool) – whether autograd should record operations on +parameters in this module. Default: True.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +state_dict(destination=None, prefix='', keep_vars=False)[source]
    +

    Returns a dictionary containing a whole state of the module.

    +

    Both parameters and persistent buffers (e.g. running averages) are +included. Keys are corresponding parameter and buffer names.

    +
    +
    Returns
    +

    a dictionary containing a whole state of the module

    +
    +
    Return type
    +

    dict

    +
    +
    +

    Example:

    +
    >>> module.state_dict().keys()
    +['bias', 'weight']
    +
    +
    +
    + +
    +
    +to(*args, **kwargs)[source]
    +

    Moves and/or casts the parameters and buffers.

    +

    This can be called as

    +
    +
    +to(device=None, dtype=None, non_blocking=False)[source]
    +
    + +
    +
    +to(dtype, non_blocking=False)[source]
    +
    + +
    +
    +to(tensor, non_blocking=False)[source]
    +
    + +

    Its signature is similar to torch.Tensor.to(), but only accepts +floating point desired dtype s. In addition, this method will +only cast the floating point parameters and buffers to dtype +(if given). The integral parameters and buffers will be moved +device, if that is given, but with dtypes unchanged. When +non_blocking is set, it tries to convert/move asynchronously +with respect to the host if possible, e.g., moving CPU Tensors with +pinned memory to CUDA devices.

    +

    See below for examples.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    +
    +
    Parameters
    +
      +
    • device (torch.device) – the desired device of the parameters +and buffers in this module

    • +
    • dtype (torch.dtype) – the desired floating point type of +the floating point parameters and buffers in this module

    • +
    • tensor (torch.Tensor) – Tensor whose dtype and device are the desired +dtype and device for all parameters and buffers in this module

    • +
    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +

    Example:

    +
    >>> linear = nn.Linear(2, 2)
    +>>> linear.weight
    +Parameter containing:
    +tensor([[ 0.1913, -0.3420],
    +        [-0.5113, -0.2325]])
    +>>> linear.to(torch.double)
    +Linear(in_features=2, out_features=2, bias=True)
    +>>> linear.weight
    +Parameter containing:
    +tensor([[ 0.1913, -0.3420],
    +        [-0.5113, -0.2325]], dtype=torch.float64)
    +>>> gpu1 = torch.device("cuda:1")
    +>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
    +Linear(in_features=2, out_features=2, bias=True)
    +>>> linear.weight
    +Parameter containing:
    +tensor([[ 0.1914, -0.3420],
    +        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
    +>>> cpu = torch.device("cpu")
    +>>> linear.to(cpu)
    +Linear(in_features=2, out_features=2, bias=True)
    +>>> linear.weight
    +Parameter containing:
    +tensor([[ 0.1914, -0.3420],
    +        [-0.5112, -0.2324]], dtype=torch.float16)
    +
    +
    +
    + +
    +
    +train(mode=True)[source]
    +

    Sets the module in training mode.

    +

    This has any effect only on certain modules. See documentations of +particular modules for details of their behaviors in training/evaluation +mode, if they are affected, e.g. Dropout, BatchNorm, +etc.

    +
    +
    Parameters
    +

    mode (bool) – whether to set training mode (True) or evaluation +mode (False). Default: True.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +type(dst_type)[source]
    +

    Casts all parameters and buffers to dst_type.

    +
    +
    Parameters
    +

    dst_type (type or string) – the desired type

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    + +
    +
    +zero_grad()[source]
    +

    Sets gradients of all model parameters to zero.

    +
    + +
    + +
    +
    +

    Sequential

    +
    +
    +class torch.nn.Sequential(*args)[source]
    +

    A sequential container. +Modules will be added to it in the order they are passed in the constructor. +Alternatively, an ordered dict of modules can also be passed in.

    +

    To make it easier to understand, here is a small example:

    +
    # Example of using Sequential
    +model = nn.Sequential(
    +          nn.Conv2d(1,20,5),
    +          nn.ReLU(),
    +          nn.Conv2d(20,64,5),
    +          nn.ReLU()
    +        )
    +
    +# Example of using Sequential with OrderedDict
    +model = nn.Sequential(OrderedDict([
    +          ('conv1', nn.Conv2d(1,20,5)),
    +          ('relu1', nn.ReLU()),
    +          ('conv2', nn.Conv2d(20,64,5)),
    +          ('relu2', nn.ReLU())
    +        ]))
    +
    +
    +
    + +
    +
    +

    ModuleList

    +
    +
    +class torch.nn.ModuleList(modules=None)[source]
    +

    Holds submodules in a list.

    +

    ModuleList can be indexed like a regular Python list, but +modules it contains are properly registered, and will be visible by all +Module methods.

    +
    +
    Parameters
    +

    modules (iterable, optional) – an iterable of modules to add

    +
    +
    +

    Example:

    +
    class MyModule(nn.Module):
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
    +
    +    def forward(self, x):
    +        # ModuleList can act as an iterable, or be indexed using ints
    +        for i, l in enumerate(self.linears):
    +            x = self.linears[i // 2](x) + l(x)
    +        return x
    +
    +
    +
    +
    +append(module)[source]
    +

    Appends a given module to the end of the list.

    +
    +
    Parameters
    +

    module (nn.Module) – module to append

    +
    +
    +
    + +
    +
    +extend(modules)[source]
    +

    Appends modules from a Python iterable to the end of the list.

    +
    +
    Parameters
    +

    modules (iterable) – iterable of modules to append

    +
    +
    +
    + +
    +
    +insert(index, module)[source]
    +

    Insert a given module before a given index in the list.

    +
    +
    Parameters
    +
      +
    • index (int) – index to insert.

    • +
    • module (nn.Module) – module to insert

    • +
    +
    +
    +
    + +
    + +
    +
    +

    ModuleDict

    +
    +
    +class torch.nn.ModuleDict(modules=None)[source]
    +

    Holds submodules in a dictionary.

    +

    ModuleDict can be indexed like a regular Python dictionary, +but modules it contains are properly registered, and will be visible by all +Module methods.

    +

    ModuleDict is an ordered dictionary that respects

    +
      +
    • the order of insertion, and

    • +
    • in update(), the order of the merged OrderedDict +or another ModuleDict (the argument to update()).

    • +
    +

    Note that update() with other unordered mapping +types (e.g., Python’s plain dict) does not preserve the order of the +merged mapping.

    +
    +
    Parameters
    +

    modules (iterable, optional) – a mapping (dictionary) of (string: module) +or an iterable of key-value pairs of type (string, module)

    +
    +
    +

    Example:

    +
    class MyModule(nn.Module):
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        self.choices = nn.ModuleDict({
    +                'conv': nn.Conv2d(10, 10, 3),
    +                'pool': nn.MaxPool2d(3)
    +        })
    +        self.activations = nn.ModuleDict([
    +                ['lrelu', nn.LeakyReLU()],
    +                ['prelu', nn.PReLU()]
    +        ])
    +
    +    def forward(self, x, choice, act):
    +        x = self.choices[choice](x)
    +        x = self.activations[act](x)
    +        return x
    +
    +
    +
    +
    +clear()[source]
    +

    Remove all items from the ModuleDict.

    +
    + +
    +
    +items()[source]
    +

    Return an iterable of the ModuleDict key/value pairs.

    +
    + +
    +
    +keys()[source]
    +

    Return an iterable of the ModuleDict keys.

    +
    + +
    +
    +pop(key)[source]
    +

    Remove key from the ModuleDict and return its module.

    +
    +
    Parameters
    +

    key (string) – key to pop from the ModuleDict

    +
    +
    +
    + +
    +
    +update(modules)[source]
    +

    Update the ModuleDict with the key-value pairs from a +mapping or an iterable, overwriting existing keys.

    +
    +

    Note

    +

    If modules is an OrderedDict, a ModuleDict, or +an iterable of key-value pairs, the order of new elements in it is preserved.

    +
    +
    +
    Parameters
    +

    modules (iterable) – a mapping (dictionary) from string to Module, +or an iterable of key-value pairs of type (string, Module)

    +
    +
    +
    + +
    +
    +values()[source]
    +

    Return an iterable of the ModuleDict values.

    +
    + +
    + +
    +
    +

    ParameterList

    +
    +
    +class torch.nn.ParameterList(parameters=None)[source]
    +

    Holds parameters in a list.

    +

    ParameterList can be indexed like a regular Python +list, but parameters it contains are properly registered, and will be +visible by all Module methods.

    +
    +
    Parameters
    +

    parameters (iterable, optional) – an iterable of Parameter to add

    +
    +
    +

    Example:

    +
    class MyModule(nn.Module):
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
    +
    +    def forward(self, x):
    +        # ParameterList can act as an iterable, or be indexed using ints
    +        for i, p in enumerate(self.params):
    +            x = self.params[i // 2].mm(x) + p.mm(x)
    +        return x
    +
    +
    +
    +
    +append(parameter)[source]
    +

    Appends a given parameter at the end of the list.

    +
    +
    Parameters
    +

    parameter (nn.Parameter) – parameter to append

    +
    +
    +
    + +
    +
    +extend(parameters)[source]
    +

    Appends parameters from a Python iterable to the end of the list.

    +
    +
    Parameters
    +

    parameters (iterable) – iterable of parameters to append

    +
    +
    +
    + +
    + +
    +
    +

    ParameterDict

    +
    +
    +class torch.nn.ParameterDict(parameters=None)[source]
    +

    Holds parameters in a dictionary.

    +

    ParameterDict can be indexed like a regular Python dictionary, but parameters it +contains are properly registered, and will be visible by all Module methods.

    +

    ParameterDict is an ordered dictionary that respects

    + +

    Note that update() with other unordered mapping +types (e.g., Python’s plain dict) does not preserve the order of the +merged mapping.

    +
    +
    Parameters
    +

    parameters (iterable, optional) – a mapping (dictionary) of +(string : Parameter) or an iterable of key-value pairs +of type (string, Parameter)

    +
    +
    +

    Example:

    +
    class MyModule(nn.Module):
    +    def __init__(self):
    +        super(MyModule, self).__init__()
    +        self.params = nn.ParameterDict({
    +                'left': nn.Parameter(torch.randn(5, 10)),
    +                'right': nn.Parameter(torch.randn(5, 10))
    +        })
    +
    +    def forward(self, x, choice):
    +        x = self.params[choice].mm(x)
    +        return x
    +
    +
    +
    +
    +clear()[source]
    +

    Remove all items from the ParameterDict.

    +
    + +
    +
    +items()[source]
    +

    Return an iterable of the ParameterDict key/value pairs.

    +
    + +
    +
    +keys()[source]
    +

    Return an iterable of the ParameterDict keys.

    +
    + +
    +
    +pop(key)[source]
    +

    Remove key from the ParameterDict and return its parameter.

    +
    +
    Parameters
    +

    key (string) – key to pop from the ParameterDict

    +
    +
    +
    + +
    +
    +update(parameters)[source]
    +

    Update the ParameterDict with the key-value pairs from a +mapping or an iterable, overwriting existing keys.

    +
    +

    Note

    +

    If parameters is an OrderedDict, a ParameterDict, or +an iterable of key-value pairs, the order of new elements in it is preserved.

    +
    +
    +
    Parameters
    +

    parameters (iterable) – a mapping (dictionary) from string to +Parameter, or an iterable of +key-value pairs of type (string, Parameter)

    +
    +
    +
    + +
    +
    +values()[source]
    +

    Return an iterable of the ParameterDict values.

    +
    + +
    + +
    +
    +
    +

    Convolution layers

    +
    +

    Conv1d

    +
    +
    +class torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')[source]
    +

    Applies a 1D convolution over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size +\((N, C_{\text{in}}, L)\) and output \((N, C_{\text{out}}, L_{\text{out}})\) can be +precisely described as:

    +
    +\[\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + +\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k) +\star \text{input}(N_i, k) + +\]
    +

    where \(\star\) is the valid cross-correlation operator, +\(N\) is a batch size, \(C\) denotes a number of channels, +\(L\) is a length of signal sequence.

    +
      +
    • stride controls the stride for the cross-correlation, a single +number or a one-element tuple.

    • +
    • padding controls the amount of implicit zero-paddings on both sides +for padding number of points.

    • +
    • dilation controls the spacing between the kernel points; also +known as the à trous algorithm. It is harder to describe, but this link +has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters, +of size +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\).

      • +
      +
      +
    • +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid +cross-correlation, and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    When groups == in_channels and out_channels == K * in_channels, +where K is a positive integer, this operation is also termed in +literature as depthwise convolution.

    +

    In other words, for an input of size \((N, C_{in}, L_{in})\), +a depthwise convolution with a depthwise multiplier K, can be constructed by arguments +\((C_\text{in}=C_{in}, C_\text{out}=C_{in} \times K, ..., \text{groups}=C_{in})\).

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – Zero-padding added to both sides of +the input. Default: 0

    • +
    • padding_mode (string, optional) – zeros

    • +
    • dilation (int or tuple, optional) – Spacing between kernel +elements. Default: 1

    • +
    • groups (int, optional) – Number of blocked connections from input +channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, L_{in})\)

    • +
    • Output: \((N, C_{out}, L_{out})\) where

      +
      +\[L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation} + \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~Conv1d.weight (Tensor) – the learnable weights of the module of shape +\((\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \text{kernel\_size}}\)

    • +
    • ~Conv1d.bias (Tensor) – the learnable bias of the module of shape +(out_channels). If bias is True, then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \text{kernel\_size}}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Conv1d(16, 33, 3, stride=2)
    +>>> input = torch.randn(20, 16, 50)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Conv2d

    +
    +
    +class torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')[source]
    +

    Applies a 2D convolution over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size +\((N, C_{\text{in}}, H, W)\) and output \((N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})\) +can be precisely described as:

    +
    +\[\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) + +\sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k) + +\]
    +

    where \(\star\) is the valid 2D cross-correlation operator, +\(N\) is a batch size, \(C\) denotes a number of channels, +\(H\) is a height of input planes in pixels, and \(W\) is +width in pixels.

    +
      +
    • stride controls the stride for the cross-correlation, a single +number or a tuple.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for padding number of points for each dimension.

    • +
    • dilation controls the spacing between the kernel points; also +known as the à trous algorithm. It is harder to describe, but this link +has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters, of size: +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\).

      • +
      +
      +
    • +
    +

    The parameters kernel_size, stride, padding, dilation can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the height and width dimension

    • +
    • a tuple of two ints – in which case, the first int is used for the height dimension, +and the second int for the width dimension

    • +
    +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid cross-correlation, +and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    When groups == in_channels and out_channels == K * in_channels, +where K is a positive integer, this operation is also termed in +literature as depthwise convolution.

    +

    In other words, for an input of size \((N, C_{in}, H_{in}, W_{in})\), +a depthwise convolution with a depthwise multiplier K, can be constructed by arguments +\((in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})\).

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – Zero-padding added to both sides of the input. Default: 0

    • +
    • padding_mode (string, optional) – zeros

    • +
    • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1

    • +
    • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C_{out}, H_{out}, W_{out})\) where

      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~Conv2d.weight (Tensor) – the learnable weights of the module of shape +\((\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},\) +\(\text{kernel\_size[0]}, \text{kernel\_size[1]})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}\)

    • +
    • ~Conv2d.bias (Tensor) – the learnable bias of the module of shape (out_channels). If bias is True, +then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> m = nn.Conv2d(16, 33, 3, stride=2)
    +>>> # non-square kernels and unequal stride and with padding
    +>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
    +>>> # non-square kernels and unequal stride and with padding and dilation
    +>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
    +>>> input = torch.randn(20, 16, 50, 100)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Conv3d

    +
    +
    +class torch.nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')[source]
    +

    Applies a 3D convolution over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C_{in}, D, H, W)\) +and output \((N, C_{out}, D_{out}, H_{out}, W_{out})\) can be precisely described as:

    +
    +\[out(N_i, C_{out_j}) = bias(C_{out_j}) + + \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k) + +\]
    +

    where \(\star\) is the valid 3D cross-correlation operator

    +
      +
    • stride controls the stride for the cross-correlation.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for padding number of points for each dimension.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters, of size +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\).

      • +
      +
      +
    • +
    +

    The parameters kernel_size, stride, padding, dilation can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the depth, height and width dimension

    • +
    • a tuple of three ints – in which case, the first int is used for the depth dimension, +the second int for the height dimension and the third int for the width dimension

    • +
    +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid cross-correlation, +and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    When groups == in_channels and out_channels == K * in_channels, +where K is a positive integer, this operation is also termed in +literature as depthwise convolution.

    +

    In other words, for an input of size \((N, C_{in}, D_{in}, H_{in}, W_{in})\), +a depthwise convolution with a depthwise multiplier K, can be constructed by arguments +\((in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})\).

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – Zero-padding added to all three sides of the input. Default: 0

    • +
    • padding_mode (string, optional) – zeros

    • +
    • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1

    • +
    • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C_{out}, D_{out}, H_{out}, W_{out})\) where

      +
      +\[D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] + \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~Conv3d.weight (Tensor) – the learnable weights of the module of shape +\((\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},\) +\(\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}\)

    • +
    • ~Conv3d.bias (Tensor) – the learnable bias of the module of shape (out_channels). If bias is True, +then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> m = nn.Conv3d(16, 33, 3, stride=2)
    +>>> # non-square kernels and unequal stride and with padding
    +>>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
    +>>> input = torch.randn(20, 16, 10, 50, 100)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    ConvTranspose1d

    +
    +
    +class torch.nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros')[source]
    +

    Applies a 1D transposed convolution operator over an input image +composed of several input planes.

    +

    This module can be seen as the gradient of Conv1d with respect to its input. +It is also known as a fractionally-strided convolution or +a deconvolution (although it is not an actual deconvolution operation).

    +
      +
    • stride controls the stride for the cross-correlation.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for dilation * (kernel_size - 1) - padding number of points. See note +below for details.

    • +
    • output_padding controls the additional size added to one side +of the output shape. See note below for details.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters (of size +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\)).

      • +
      +
      +
    • +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid cross-correlation, +and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    The padding argument effectively adds dilation * (kernel_size - 1) - padding +amount of zero padding to both sizes of the input. This is set so that +when a Conv1d and a ConvTranspose1d +are initialized with same parameters, they are inverses of each other in +regard to the input and output shapes. However, when stride > 1, +Conv1d maps multiple input shapes to the same output +shape. output_padding is provided to resolve this ambiguity by +effectively increasing the calculated output shape on one side. Note +that output_padding is only used to find output shape, but does +not actually add zero-padding to output.

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – dilation * (kernel_size - 1) - padding zero-padding +will be added to both sides of the input. Default: 0

    • +
    • output_padding (int or tuple, optional) – Additional size added to one side +of the output shape. Default: 0

    • +
    • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, L_{in})\)

    • +
    • Output: \((N, C_{out}, L_{out})\) where

      +
      +\[L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation} + \times (\text{kernel\_size} - 1) + \text{output\_padding} + 1 + +\]
      +
    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~ConvTranspose1d.weight (Tensor) – the learnable weights of the module of shape +\((\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},\) +\(\text{kernel\_size})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \text{kernel\_size}}\)

    • +
    • ~ConvTranspose1d.bias (Tensor) – the learnable bias of the module of shape (out_channels). +If bias is True, then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \text{kernel\_size}}\)

    • +
    +
    +
    +
    + +
    +
    +

    ConvTranspose2d

    +
    +
    +class torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros')[source]
    +

    Applies a 2D transposed convolution operator over an input image +composed of several input planes.

    +

    This module can be seen as the gradient of Conv2d with respect to its input. +It is also known as a fractionally-strided convolution or +a deconvolution (although it is not an actual deconvolution operation).

    +
      +
    • stride controls the stride for the cross-correlation.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for dilation * (kernel_size - 1) - padding number of points. See note +below for details.

    • +
    • output_padding controls the additional size added to one side +of the output shape. See note below for details.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters (of size +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\)).

      • +
      +
      +
    • +
    +

    The parameters kernel_size, stride, padding, output_padding +can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the height and width dimensions

    • +
    • a tuple of two ints – in which case, the first int is used for the height dimension, +and the second int for the width dimension

    • +
    +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid cross-correlation, +and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    The padding argument effectively adds dilation * (kernel_size - 1) - padding +amount of zero padding to both sizes of the input. This is set so that +when a Conv2d and a ConvTranspose2d +are initialized with same parameters, they are inverses of each other in +regard to the input and output shapes. However, when stride > 1, +Conv2d maps multiple input shapes to the same output +shape. output_padding is provided to resolve this ambiguity by +effectively increasing the calculated output shape on one side. Note +that output_padding is only used to find output shape, but does +not actually add zero-padding to output.

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – dilation * (kernel_size - 1) - padding zero-padding +will be added to both sides of each dimension in the input. Default: 0

    • +
    • output_padding (int or tuple, optional) – Additional size added to one side +of each dimension in the output shape. Default: 0

    • +
    • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C_{out}, H_{out}, W_{out})\) where

    • +
    +
    +\[H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1 + +\]
    +
    +\[W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1 + +\]
    +
    +
    +
    +
    Variables
    +
      +
    • ~ConvTranspose2d.weight (Tensor) – the learnable weights of the module of shape +\((\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},\) +\(\text{kernel\_size[0]}, \text{kernel\_size[1]})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}\)

    • +
    • ~ConvTranspose2d.bias (Tensor) – the learnable bias of the module of shape (out_channels) +If bias is True, then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
    +>>> # non-square kernels and unequal stride and with padding
    +>>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
    +>>> input = torch.randn(20, 16, 50, 100)
    +>>> output = m(input)
    +>>> # exact output size can be also specified as an argument
    +>>> input = torch.randn(1, 16, 12, 12)
    +>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
    +>>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
    +>>> h = downsample(input)
    +>>> h.size()
    +torch.Size([1, 16, 6, 6])
    +>>> output = upsample(h, output_size=input.size())
    +>>> output.size()
    +torch.Size([1, 16, 12, 12])
    +
    +
    +
    + +
    +
    +

    ConvTranspose3d

    +
    +
    +class torch.nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros')[source]
    +

    Applies a 3D transposed convolution operator over an input image composed of several input +planes. +The transposed convolution operator multiplies each input value element-wise by a learnable kernel, +and sums over the outputs from all input feature planes.

    +

    This module can be seen as the gradient of Conv3d with respect to its input. +It is also known as a fractionally-strided convolution or +a deconvolution (although it is not an actual deconvolution operation).

    +
      +
    • stride controls the stride for the cross-correlation.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for dilation * (kernel_size - 1) - padding number of points. See note +below for details.

    • +
    • output_padding controls the additional size added to one side +of the output shape. See note below for details.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    • groups controls the connections between inputs and outputs. +in_channels and out_channels must both be divisible by +groups. For example,

      +
      +
        +
      • At groups=1, all inputs are convolved to all outputs.

      • +
      • At groups=2, the operation becomes equivalent to having two conv +layers side by side, each seeing half the input channels, +and producing half the output channels, and both subsequently +concatenated.

      • +
      • At groups= in_channels, each input channel is convolved with +its own set of filters (of size +\(\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor\)).

      • +
      +
      +
    • +
    +

    The parameters kernel_size, stride, padding, output_padding +can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the depth, height and width dimensions

    • +
    • a tuple of three ints – in which case, the first int is used for the depth dimension, +the second int for the height dimension and the third int for the width dimension

    • +
    +
    +
    +

    Note

    +

    Depending of the size of your kernel, several (of the last) +columns of the input might be lost, because it is a valid cross-correlation, +and not a full cross-correlation. +It is up to the user to add proper padding.

    +
    +
    +

    Note

    +

    The padding argument effectively adds dilation * (kernel_size - 1) - padding +amount of zero padding to both sizes of the input. This is set so that +when a Conv3d and a ConvTranspose3d +are initialized with same parameters, they are inverses of each other in +regard to the input and output shapes. However, when stride > 1, +Conv3d maps multiple input shapes to the same output +shape. output_padding is provided to resolve this ambiguity by +effectively increasing the calculated output shape on one side. Note +that output_padding is only used to find output shape, but does +not actually add zero-padding to output.

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • in_channels (int) – Number of channels in the input image

    • +
    • out_channels (int) – Number of channels produced by the convolution

    • +
    • kernel_size (int or tuple) – Size of the convolving kernel

    • +
    • stride (int or tuple, optional) – Stride of the convolution. Default: 1

    • +
    • padding (int or tuple, optional) – dilation * (kernel_size - 1) - padding zero-padding +will be added to both sides of each dimension in the input. Default: 0

    • +
    • output_padding (int or tuple, optional) – Additional size added to one side +of each dimension in the output shape. Default: 0

    • +
    • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1

    • +
    • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

    • +
    • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C_{in}, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C_{out}, D_{out}, H_{out}, W_{out})\) where

    • +
    +
    +\[D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0] + \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1 + +\]
    +
    +\[H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1] + \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1 + +\]
    +
    +\[W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2] + \times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1 + +\]
    +
    +
    +
    +
    Variables
    +
      +
    • ~ConvTranspose3d.weight (Tensor) – the learnable weights of the module of shape +\((\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},\) +\(\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})\). +The values of these weights are sampled from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}\)

    • +
    • ~ConvTranspose3d.bias (Tensor) – the learnable bias of the module of shape (out_channels) +If bias is True, then the values of these weights are +sampled from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With square kernels and equal stride
    +>>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
    +>>> # non-square kernels and unequal stride and with padding
    +>>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
    +>>> input = torch.randn(20, 16, 10, 50, 100)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Unfold

    +
    +
    +class torch.nn.Unfold(kernel_size, dilation=1, padding=0, stride=1)[source]
    +

    Extracts sliding local blocks from a batched input tensor.

    +

    Consider an batched input tensor of shape \((N, C, *)\), +where \(N\) is the batch dimension, \(C\) is the channel dimension, +and \(*\) represent arbitrary spatial dimensions. This operation flattens +each sliding kernel_size-sized block within the spatial dimensions +of input into a column (i.e., last dimension) of a 3-D output +tensor of shape \((N, C \times \prod(\text{kernel\_size}), L)\), where +\(C \times \prod(\text{kernel\_size})\) is the total number of values +within each block (a block has \(\prod(\text{kernel\_size})\) spatial +locations each containing a \(C\)-channeled vector), and \(L\) is +the total number of such blocks:

    +
    +\[L = \prod_d \left\lfloor\frac{\text{spatial\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + +\]
    +

    where \(\text{spatial\_size}\) is formed by the spatial dimensions +of input (\(*\) above), and \(d\) is over all spatial +dimensions.

    +

    Therefore, indexing output at the last dimension (column dimension) +gives all values within a certain block.

    +

    The padding, stride and dilation arguments specify +how the sliding blocks are retrieved.

    +
      +
    • stride controls the stride for the sliding blocks.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for padding number of points for each dimension before +reshaping.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    +
    +
    Parameters
    +
      +
    • kernel_size (int or tuple) – the size of the sliding blocks

    • +
    • stride (int or tuple, optional) – the stride of the sliding blocks in the input +spatial dimensions. Default: 1

    • +
    • padding (int or tuple, optional) – implicit zero padding to be added on +both sides of input. Default: 0

    • +
    • dilation (int or tuple, optional) – a parameter that controls the +stride of elements within the +neighborhood. Default: 1

    • +
    +
    +
    +
      +
    • If kernel_size, dilation, padding or +stride is an int or a tuple of length 1, their values will be +replicated across all spatial dimensions.

    • +
    • For the case of two input spatial dimensions this operation is sometimes +called im2col.

    • +
    +
    +

    Note

    +

    Fold calculates each combined value in the resulting +large tensor by summing all values from all containing blocks. +Unfold extracts the values in the local blocks by +copying from the large tensor. So, if the blocks overlap, they are not +inverses of each other.

    +
    +
    +

    Warning

    +

    Currently, only 4-D input tensors (batched image-like tensors) are +supported.

    +
    +
    +
    Shape:
      +
    • Input: \((N, C, *)\)

    • +
    • Output: \((N, C \times \prod(\text{kernel\_size}), L)\) as described above

    • +
    +
    +
    +

    Examples:

    +
    >>> unfold = nn.Unfold(kernel_size=(2, 3))
    +>>> input = torch.randn(2, 5, 3, 4)
    +>>> output = unfold(input)
    +>>> # each patch contains 30 values (2x3=6 vectors, each of 5 channels)
    +>>> # 4 blocks (2x3 kernels) in total in the 3x4 input
    +>>> output.size()
    +torch.Size([2, 30, 4])
    +
    +>>> # Convolution is equivalent with Unfold + Matrix Multiplication + Fold (or view to output shape)
    +>>> inp = torch.randn(1, 3, 10, 12)
    +>>> w = torch.randn(2, 3, 4, 5)
    +>>> inp_unf = torch.nn.functional.unfold(inp, (4, 5))
    +>>> out_unf = inp_unf.transpose(1, 2).matmul(w.view(w.size(0), -1).t()).transpose(1, 2)
    +>>> out = torch.nn.functional.fold(out_unf, (7, 8), (1, 1))
    +>>> # or equivalently (and avoiding a copy),
    +>>> # out = out_unf.view(1, 2, 7, 8)
    +>>> (torch.nn.functional.conv2d(inp, w) - out).abs().max()
    +tensor(1.9073e-06)
    +
    +
    +
    + +
    +
    +

    Fold

    +
    +
    +class torch.nn.Fold(output_size, kernel_size, dilation=1, padding=0, stride=1)[source]
    +

    Combines an array of sliding local blocks into a large containing +tensor.

    +

    Consider a batched input tensor containing sliding local blocks, +e.g., patches of images, of shape \((N, C \times \prod(\text{kernel\_size}), L)\), +where \(N\) is batch dimension, \(C \times \prod(\text{kernel\_size})\) +is the number of values within a block (a block has \(\prod(\text{kernel\_size})\) +spatial locations each containing a \(C\)-channeled vector), and +\(L\) is the total number of blocks. (This is exactly the +same specification as the output shape of Unfold.) This +operation combines these local blocks into the large output tensor +of shape \((N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)\) +by summing the overlapping values. Similar to Unfold, the +arguments must satisfy

    +
    +\[L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] % + - \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor, + +\]
    +

    where \(d\) is over all spatial dimensions.

    +
      +
    • output_size describes the spatial shape of the large containing +tensor of the sliding local blocks. It is useful to resolve the ambiguity +when multiple input shapes map to same number of sliding blocks, e.g., +with stride > 0.

    • +
    +

    The padding, stride and dilation arguments specify +how the sliding blocks are retrieved.

    +
      +
    • stride controls the stride for the sliding blocks.

    • +
    • padding controls the amount of implicit zero-paddings on both +sides for padding number of points for each dimension before +reshaping.

    • +
    • dilation controls the spacing between the kernel points; also known as the à trous algorithm. +It is harder to describe, but this link has a nice visualization of what dilation does.

    • +
    +
    +
    Parameters
    +
      +
    • output_size (int or tuple) – the shape of the spatial dimensions of the +output (i.e., output.sizes()[2:])

    • +
    • kernel_size (int or tuple) – the size of the sliding blocks

    • +
    • stride (int or tuple) – the stride of the sliding blocks in the input +spatial dimensions. Default: 1

    • +
    • padding (int or tuple, optional) – implicit zero padding to be added on +both sides of input. Default: 0

    • +
    • dilation (int or tuple, optional) – a parameter that controls the +stride of elements within the +neighborhood. Default: 1

    • +
    +
    +
    +
      +
    • If output_size, kernel_size, dilation, +padding or stride is an int or a tuple of length 1 then +their values will be replicated across all spatial dimensions.

    • +
    • For the case of two output spatial dimensions this operation is sometimes +called col2im.

    • +
    +
    +

    Note

    +

    Fold calculates each combined value in the resulting +large tensor by summing all values from all containing blocks. +Unfold extracts the values in the local blocks by +copying from the large tensor. So, if the blocks overlap, they are not +inverses of each other.

    +
    +
    +

    Warning

    +

    Currently, only 4-D output tensors (batched image-like tensors) are +supported.

    +
    +
    +
    Shape:
      +
    • Input: \((N, C \times \prod(\text{kernel\_size}), L)\)

    • +
    • Output: \((N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)\) as described above

    • +
    +
    +
    +

    Examples:

    +
    >>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
    +>>> input = torch.randn(1, 3 * 2 * 2, 12)
    +>>> output = fold(input)
    +>>> output.size()
    +torch.Size([1, 3, 4, 5])
    +
    +
    +
    + +
    +
    +
    +

    Pooling layers

    +
    +

    MaxPool1d

    +
    +
    +class torch.nn.MaxPool1d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)[source]
    +

    Applies a 1D max pooling over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, L)\) +and output \((N, C, L_{out})\) can be precisely described as:

    +
    +\[out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1} + input(N_i, C_j, stride \times k + m) + +\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on both sides +for padding number of points. dilation controls the spacing between the kernel points. +It is harder to describe, but this link has a nice visualization of what dilation does.

    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window to take a max over

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on both sides

    • +
    • dilation – a parameter that controls the stride of elements in the window

    • +
    • return_indices – if True, will return the max indices along with the outputs. +Useful for torch.nn.MaxUnpool1d later

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, L_{in})\)

    • +
    • Output: \((N, C, L_{out})\), where

      +
      +\[L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation} + \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of size=3, stride=2
    +>>> m = nn.MaxPool1d(3, stride=2)
    +>>> input = torch.randn(20, 16, 50)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    MaxPool2d

    +
    +
    +class torch.nn.MaxPool2d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)[source]
    +

    Applies a 2D max pooling over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, H, W)\), +output \((N, C, H_{out}, W_{out})\) and kernel_size \((kH, kW)\) +can be precisely described as:

    +
    +\[\begin{aligned} + out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ + & \text{input}(N_i, C_j, \text{stride[0]} \times h + m, + \text{stride[1]} \times w + n) +\end{aligned} + +\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on both sides +for padding number of points. dilation controls the spacing between the kernel points. +It is harder to describe, but this link has a nice visualization of what dilation does.

    +

    The parameters kernel_size, stride, padding, dilation can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the height and width dimension

    • +
    • a tuple of two ints – in which case, the first int is used for the height dimension, +and the second int for the width dimension

    • +
    +
    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window to take a max over

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on both sides

    • +
    • dilation – a parameter that controls the stride of elements in the window

    • +
    • return_indices – if True, will return the max indices along with the outputs. +Useful for torch.nn.MaxUnpool2d later

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\), where

      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]} + \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]} + \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> m = nn.MaxPool2d(3, stride=2)
    +>>> # pool of non-square window
    +>>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
    +>>> input = torch.randn(20, 16, 50, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    MaxPool3d

    +
    +
    +class torch.nn.MaxPool3d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)[source]
    +

    Applies a 3D max pooling over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, D, H, W)\), +output \((N, C, D_{out}, H_{out}, W_{out})\) and kernel_size \((kD, kH, kW)\) +can be precisely described as:

    +
    +\[\begin{aligned} + \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\ + & \text{input}(N_i, C_j, \text{stride[0]} \times d + k, + \text{stride[1]} \times h + m, \text{stride[2]} \times w + n) +\end{aligned} + +\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on both sides +for padding number of points. dilation controls the spacing between the kernel points. +It is harder to describe, but this link has a nice visualization of what dilation does.

    +

    The parameters kernel_size, stride, padding, dilation can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the depth, height and width dimension

    • +
    • a tuple of three ints – in which case, the first int is used for the depth dimension, +the second int for the height dimension and the third int for the width dimension

    • +
    +
    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window to take a max over

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on all three sides

    • +
    • dilation – a parameter that controls the stride of elements in the window

    • +
    • return_indices – if True, will return the max indices along with the outputs. +Useful for torch.nn.MaxUnpool3d later

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, D_{out}, H_{out}, W_{out})\), where

      +
      +\[D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times + (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times + (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times + (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> m = nn.MaxPool3d(3, stride=2)
    +>>> # pool of non-square window
    +>>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
    +>>> input = torch.randn(20, 16, 50,44, 31)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    MaxUnpool1d

    +
    +
    +class torch.nn.MaxUnpool1d(kernel_size, stride=None, padding=0)[source]
    +

    Computes a partial inverse of MaxPool1d.

    +

    MaxPool1d is not fully invertible, since the non-maximal values are lost.

    +

    MaxUnpool1d takes in as input the output of MaxPool1d +including the indices of the maximal values and computes a partial inverse +in which all non-maximal values are set to zero.

    +
    +

    Note

    +

    MaxPool1d can map several input sizes to the same output +sizes. Hence, the inversion process can get ambiguous. +To accommodate this, you can provide the needed output size +as an additional argument output_size in the forward call. +See the Inputs and Example below.

    +
    +
    +
    Parameters
    +
      +
    • kernel_size (int or tuple) – Size of the max pooling window.

    • +
    • stride (int or tuple) – Stride of the max pooling window. +It is set to kernel_size by default.

    • +
    • padding (int or tuple) – Padding that was added to the input

    • +
    +
    +
    +
    +
    Inputs:
      +
    • input: the input Tensor to invert

    • +
    • indices: the indices given out by MaxPool1d

    • +
    • output_size (optional): the targeted output size

    • +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in})\)

    • +
    • Output: \((N, C, H_{out})\), where

      +
      +\[H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0] + +\]
      +

      or as given by output_size in the call operator

      +
    • +
    +
    +
    +

    Example:

    +
    >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
    +>>> unpool = nn.MaxUnpool1d(2, stride=2)
    +>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
    +>>> output, indices = pool(input)
    +>>> unpool(output, indices)
    +tensor([[[ 0.,  2.,  0.,  4.,  0.,  6.,  0., 8.]]])
    +
    +>>> # Example showcasing the use of output_size
    +>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
    +>>> output, indices = pool(input)
    +>>> unpool(output, indices, output_size=input.size())
    +tensor([[[ 0.,  2.,  0.,  4.,  0.,  6.,  0., 8.,  0.]]])
    +
    +>>> unpool(output, indices)
    +tensor([[[ 0.,  2.,  0.,  4.,  0.,  6.,  0., 8.]]])
    +
    +
    +
    + +
    +
    +

    MaxUnpool2d

    +
    +
    +class torch.nn.MaxUnpool2d(kernel_size, stride=None, padding=0)[source]
    +

    Computes a partial inverse of MaxPool2d.

    +

    MaxPool2d is not fully invertible, since the non-maximal values are lost.

    +

    MaxUnpool2d takes in as input the output of MaxPool2d +including the indices of the maximal values and computes a partial inverse +in which all non-maximal values are set to zero.

    +
    +

    Note

    +

    MaxPool2d can map several input sizes to the same output +sizes. Hence, the inversion process can get ambiguous. +To accommodate this, you can provide the needed output size +as an additional argument output_size in the forward call. +See the Inputs and Example below.

    +
    +
    +
    Parameters
    +
      +
    • kernel_size (int or tuple) – Size of the max pooling window.

    • +
    • stride (int or tuple) – Stride of the max pooling window. +It is set to kernel_size by default.

    • +
    • padding (int or tuple) – Padding that was added to the input

    • +
    +
    +
    +
    +
    Inputs:
      +
    • input: the input Tensor to invert

    • +
    • indices: the indices given out by MaxPool2d

    • +
    • output_size (optional): the targeted output size

    • +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\), where

      +
      +\[H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} + +\]
      +
      +\[W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} + +\]
      +

      or as given by output_size in the call operator

      +
    • +
    +
    +
    +

    Example:

    +
    >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
    +>>> unpool = nn.MaxUnpool2d(2, stride=2)
    +>>> input = torch.tensor([[[[ 1.,  2,  3,  4],
    +                            [ 5,  6,  7,  8],
    +                            [ 9, 10, 11, 12],
    +                            [13, 14, 15, 16]]]])
    +>>> output, indices = pool(input)
    +>>> unpool(output, indices)
    +tensor([[[[  0.,   0.,   0.,   0.],
    +          [  0.,   6.,   0.,   8.],
    +          [  0.,   0.,   0.,   0.],
    +          [  0.,  14.,   0.,  16.]]]])
    +
    +>>> # specify a different output size than input size
    +>>> unpool(output, indices, output_size=torch.Size([1, 1, 5, 5]))
    +tensor([[[[  0.,   0.,   0.,   0.,   0.],
    +          [  6.,   0.,   8.,   0.,   0.],
    +          [  0.,   0.,   0.,  14.,   0.],
    +          [ 16.,   0.,   0.,   0.,   0.],
    +          [  0.,   0.,   0.,   0.,   0.]]]])
    +
    +
    +
    + +
    +
    +

    MaxUnpool3d

    +
    +
    +class torch.nn.MaxUnpool3d(kernel_size, stride=None, padding=0)[source]
    +

    Computes a partial inverse of MaxPool3d.

    +

    MaxPool3d is not fully invertible, since the non-maximal values are lost. +MaxUnpool3d takes in as input the output of MaxPool3d +including the indices of the maximal values and computes a partial inverse +in which all non-maximal values are set to zero.

    +
    +

    Note

    +

    MaxPool3d can map several input sizes to the same output +sizes. Hence, the inversion process can get ambiguous. +To accommodate this, you can provide the needed output size +as an additional argument output_size in the forward call. +See the Inputs section below.

    +
    +
    +
    Parameters
    +
      +
    • kernel_size (int or tuple) – Size of the max pooling window.

    • +
    • stride (int or tuple) – Stride of the max pooling window. +It is set to kernel_size by default.

    • +
    • padding (int or tuple) – Padding that was added to the input

    • +
    +
    +
    +
    +
    Inputs:
      +
    • input: the input Tensor to invert

    • +
    • indices: the indices given out by MaxPool3d

    • +
    • output_size (optional): the targeted output size

    • +
    +
    +
    Shape:
      +
    • Input: \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, D_{out}, H_{out}, W_{out})\), where

      +
      +\[D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]} + +\]
      +
      +\[H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]} + +\]
      +
      +\[W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]} + +\]
      +

      or as given by output_size in the call operator

      +
    • +
    +
    +
    +

    Example:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
    +>>> unpool = nn.MaxUnpool3d(3, stride=2)
    +>>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
    +>>> unpooled_output = unpool(output, indices)
    +>>> unpooled_output.size()
    +torch.Size([20, 16, 51, 33, 15])
    +
    +
    +
    + +
    +
    +

    AvgPool1d

    +
    +
    +class torch.nn.AvgPool1d(kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True)[source]
    +

    Applies a 1D average pooling over an input signal composed of several +input planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, L)\), +output \((N, C, L_{out})\) and kernel_size \(k\) +can be precisely described as:

    +
    +\[\text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1} + \text{input}(N_i, C_j, \text{stride} \times l + m)\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on both sides +for padding number of points.

    +

    The parameters kernel_size, stride, padding can each be +an int or a one-element tuple.

    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on both sides

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    • count_include_pad – when True, will include the zero-padding in the averaging calculation

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, L_{in})\)

    • +
    • Output: \((N, C, L_{out})\), where

      +
      +\[L_{out} = \left\lfloor \frac{L_{in} + +2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool with window of size=3, stride=2
    +>>> m = nn.AvgPool1d(3, stride=2)
    +>>> m(torch.tensor([[[1.,2,3,4,5,6,7]]]))
    +tensor([[[ 2.,  4.,  6.]]])
    +
    +
    +
    + +
    +
    +

    AvgPool2d

    +
    +
    +class torch.nn.AvgPool2d(kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)[source]
    +

    Applies a 2D average pooling over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, H, W)\), +output \((N, C, H_{out}, W_{out})\) and kernel_size \((kH, kW)\) +can be precisely described as:

    +
    +\[out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} + input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on both sides +for padding number of points.

    +

    The parameters kernel_size, stride, padding can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the height and width dimension

    • +
    • a tuple of two ints – in which case, the first int is used for the height dimension, +and the second int for the width dimension

    • +
    +
    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on both sides

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    • count_include_pad – when True, will include the zero-padding in the averaging calculation

    • +
    • divisor_override – if specified, it will be used as divisor, otherwise attr:kernel_size will be used

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\), where

      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - + \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - + \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> m = nn.AvgPool2d(3, stride=2)
    +>>> # pool of non-square window
    +>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
    +>>> input = torch.randn(20, 16, 50, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AvgPool3d

    +
    +
    +class torch.nn.AvgPool3d(kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)[source]
    +

    Applies a 3D average pooling over an input signal composed of several input +planes.

    +

    In the simplest case, the output value of the layer with input size \((N, C, D, H, W)\), +output \((N, C, D_{out}, H_{out}, W_{out})\) and kernel_size \((kD, kH, kW)\) +can be precisely described as:

    +
    +\[\begin{aligned} + \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\ + & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k, + \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)} + {kD \times kH \times kW} +\end{aligned} + +\]
    +

    If padding is non-zero, then the input is implicitly zero-padded on all three sides +for padding number of points.

    +

    The parameters kernel_size, stride can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the depth, height and width dimension

    • +
    • a tuple of three ints – in which case, the first int is used for the depth dimension, +the second int for the height dimension and the third int for the width dimension

    • +
    +
    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • padding – implicit zero padding to be added on all three sides

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    • count_include_pad – when True, will include the zero-padding in the averaging calculation

    • +
    • divisor_override – if specified, it will be used as divisor, otherwise attr:kernel_size will be used

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, D_{out}, H_{out}, W_{out})\), where

      +
      +\[D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - + \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - + \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - + \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # pool of square window of size=3, stride=2
    +>>> m = nn.AvgPool3d(3, stride=2)
    +>>> # pool of non-square window
    +>>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
    +>>> input = torch.randn(20, 16, 50,44, 31)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    FractionalMaxPool2d

    +
    +
    +class torch.nn.FractionalMaxPool2d(kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None)[source]
    +

    Applies a 2D fractional max pooling over an input signal composed of several input planes.

    +

    Fractional MaxPooling is described in detail in the paper Fractional MaxPooling by Ben Graham

    +

    The max-pooling operation is applied in \(kH \times kW\) regions by a stochastic +step size determined by the target output size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window to take a max over. +Can be a single number k (for a square kernel of k x k) or a tuple (kh, kw)

    • +
    • output_size – the target output size of the image of the form oH x oW. +Can be a tuple (oH, oW) or a single number oH for a square image oH x oH

    • +
    • output_ratio – If one wants to have an output size as a ratio of the input size, this option can be given. +This has to be a number or tuple in the range (0, 1)

    • +
    • return_indices – if True, will return the indices along with the outputs. +Useful to pass to nn.MaxUnpool2d(). Default: False

    • +
    +
    +
    +

    Examples

    +
    >>> # pool of square window of size=3, and target output size 13x12
    +>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
    +>>> # pool of square window and target output size being half of input image size
    +>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
    +>>> input = torch.randn(20, 16, 50, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LPPool1d

    +
    +
    +class torch.nn.LPPool1d(norm_type, kernel_size, stride=None, ceil_mode=False)[source]
    +

    Applies a 1D power-average pooling over an input signal composed of several input +planes.

    +

    On each window, the function computed is:

    +
    +\[f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} + +\]
    +
      +
    • At p = \(\infty\), one gets Max Pooling

    • +
    • At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)

    • +
    +
    +

    Note

    +

    If the sum to the power of p is zero, the gradient of this function is +not defined. This implementation will set the gradient to zero in this case.

    +
    +
    +
    Parameters
    +
      +
    • kernel_size – a single int, the size of the window

    • +
    • stride – a single int, the stride of the window. Default value is kernel_size

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, L_{in})\)

    • +
    • Output: \((N, C, L_{out})\), where

      +
      +\[L_{out} = \left\lfloor\frac{L_{in} + +2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    Examples::
    >>> # power-2 pool of window of length 3, with stride 2.
    +>>> m = nn.LPPool1d(2, 3, stride=2)
    +>>> input = torch.randn(20, 16, 50)
    +>>> output = m(input)
    +
    +
    +
    +
    +
    + +
    +
    +

    LPPool2d

    +
    +
    +class torch.nn.LPPool2d(norm_type, kernel_size, stride=None, ceil_mode=False)[source]
    +

    Applies a 2D power-average pooling over an input signal composed of several input +planes.

    +

    On each window, the function computed is:

    +
    +\[f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} + +\]
    +
      +
    • At p = \(\infty\), one gets Max Pooling

    • +
    • At p = 1, one gets Sum Pooling (which is proportional to average pooling)

    • +
    +

    The parameters kernel_size, stride can either be:

    +
    +
      +
    • a single int – in which case the same value is used for the height and width dimension

    • +
    • a tuple of two ints – in which case, the first int is used for the height dimension, +and the second int for the width dimension

    • +
    +
    +
    +

    Note

    +

    If the sum to the power of p is zero, the gradient of this function is +not defined. This implementation will set the gradient to zero in this case.

    +
    +
    +
    Parameters
    +
      +
    • kernel_size – the size of the window

    • +
    • stride – the stride of the window. Default value is kernel_size

    • +
    • ceil_mode – when True, will use ceil instead of floor to compute the output shape

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\), where

      +
      +\[H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times + (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor + +\]
      +
      +\[W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times + (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor + +\]
      +
    • +
    +
    +
    +

    Examples:

    +
    >>> # power-2 pool of square window of size=3, stride=2
    +>>> m = nn.LPPool2d(2, 3, stride=2)
    +>>> # pool of non-square window of power 1.2
    +>>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
    +>>> input = torch.randn(20, 16, 50, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveMaxPool1d

    +
    +
    +class torch.nn.AdaptiveMaxPool1d(output_size, return_indices=False)[source]
    +

    Applies a 1D adaptive max pooling over an input signal composed of several input planes.

    +

    The output size is H, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size H

    • +
    • return_indices – if True, will return the indices along with the outputs. +Useful to pass to nn.MaxUnpool1d. Default: False

    • +
    +
    +
    +

    Examples

    +
    >>> # target output size of 5
    +>>> m = nn.AdaptiveMaxPool1d(5)
    +>>> input = torch.randn(1, 64, 8)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveMaxPool2d

    +
    +
    +class torch.nn.AdaptiveMaxPool2d(output_size, return_indices=False)[source]
    +

    Applies a 2D adaptive max pooling over an input signal composed of several input planes.

    +

    The output is of size H x W, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size of the image of the form H x W. +Can be a tuple (H, W) or a single H for a square image H x H. +H and W can be either a int, or None which means the size will +be the same as that of the input.

    • +
    • return_indices – if True, will return the indices along with the outputs. +Useful to pass to nn.MaxUnpool2d. Default: False

    • +
    +
    +
    +

    Examples

    +
    >>> # target output size of 5x7
    +>>> m = nn.AdaptiveMaxPool2d((5,7))
    +>>> input = torch.randn(1, 64, 8, 9)
    +>>> output = m(input)
    +>>> # target output size of 7x7 (square)
    +>>> m = nn.AdaptiveMaxPool2d(7)
    +>>> input = torch.randn(1, 64, 10, 9)
    +>>> output = m(input)
    +>>> # target output size of 10x7
    +>>> m = nn.AdaptiveMaxPool2d((None, 7))
    +>>> input = torch.randn(1, 64, 10, 9)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveMaxPool3d

    +
    +
    +class torch.nn.AdaptiveMaxPool3d(output_size, return_indices=False)[source]
    +

    Applies a 3D adaptive max pooling over an input signal composed of several input planes.

    +

    The output is of size D x H x W, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +
      +
    • output_size – the target output size of the image of the form D x H x W. +Can be a tuple (D, H, W) or a single D for a cube D x D x D. +D, H and W can be either a int, or None which means the size will +be the same as that of the input.

    • +
    • return_indices – if True, will return the indices along with the outputs. +Useful to pass to nn.MaxUnpool3d. Default: False

    • +
    +
    +
    +

    Examples

    +
    >>> # target output size of 5x7x9
    +>>> m = nn.AdaptiveMaxPool3d((5,7,9))
    +>>> input = torch.randn(1, 64, 8, 9, 10)
    +>>> output = m(input)
    +>>> # target output size of 7x7x7 (cube)
    +>>> m = nn.AdaptiveMaxPool3d(7)
    +>>> input = torch.randn(1, 64, 10, 9, 8)
    +>>> output = m(input)
    +>>> # target output size of 7x9x8
    +>>> m = nn.AdaptiveMaxPool3d((7, None, None))
    +>>> input = torch.randn(1, 64, 10, 9, 8)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveAvgPool1d

    +
    +
    +class torch.nn.AdaptiveAvgPool1d(output_size)[source]
    +

    Applies a 1D adaptive average pooling over an input signal composed of several input planes.

    +

    The output size is H, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +

    output_size – the target output size H

    +
    +
    +

    Examples

    +
    >>> # target output size of 5
    +>>> m = nn.AdaptiveAvgPool1d(5)
    +>>> input = torch.randn(1, 64, 8)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveAvgPool2d

    +
    +
    +class torch.nn.AdaptiveAvgPool2d(output_size)[source]
    +

    Applies a 2D adaptive average pooling over an input signal composed of several input planes.

    +

    The output is of size H x W, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +

    output_size – the target output size of the image of the form H x W. +Can be a tuple (H, W) or a single H for a square image H x H. +H and W can be either a int, or None which means the size will +be the same as that of the input.

    +
    +
    +

    Examples

    +
    >>> # target output size of 5x7
    +>>> m = nn.AdaptiveAvgPool2d((5,7))
    +>>> input = torch.randn(1, 64, 8, 9)
    +>>> output = m(input)
    +>>> # target output size of 7x7 (square)
    +>>> m = nn.AdaptiveAvgPool2d(7)
    +>>> input = torch.randn(1, 64, 10, 9)
    +>>> output = m(input)
    +>>> # target output size of 10x7
    +>>> m = nn.AdaptiveMaxPool2d((None, 7))
    +>>> input = torch.randn(1, 64, 10, 9)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveAvgPool3d

    +
    +
    +class torch.nn.AdaptiveAvgPool3d(output_size)[source]
    +

    Applies a 3D adaptive average pooling over an input signal composed of several input planes.

    +

    The output is of size D x H x W, for any input size. +The number of output features is equal to the number of input planes.

    +
    +
    Parameters
    +

    output_size – the target output size of the form D x H x W. +Can be a tuple (D, H, W) or a single number D for a cube D x D x D. +D, H and W can be either a int, or None which means the size will +be the same as that of the input.

    +
    +
    +

    Examples

    +
    >>> # target output size of 5x7x9
    +>>> m = nn.AdaptiveAvgPool3d((5,7,9))
    +>>> input = torch.randn(1, 64, 8, 9, 10)
    +>>> output = m(input)
    +>>> # target output size of 7x7x7 (cube)
    +>>> m = nn.AdaptiveAvgPool3d(7)
    +>>> input = torch.randn(1, 64, 10, 9, 8)
    +>>> output = m(input)
    +>>> # target output size of 7x9x8
    +>>> m = nn.AdaptiveMaxPool3d((7, None, None))
    +>>> input = torch.randn(1, 64, 10, 9, 8)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +
    +

    Padding layers

    +
    +

    ReflectionPad1d

    +
    +
    +class torch.nn.ReflectionPad1d(padding)[source]
    +

    Pads the input tensor using the reflection of the input boundary.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 2-tuple, uses +(\(\text{padding\_left}\), \(\text{padding\_right}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, W_{in})\)

    • +
    • Output: \((N, C, W_{out})\) where

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ReflectionPad1d(2)
    +>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
    +>>> input
    +tensor([[[0., 1., 2., 3.],
    +         [4., 5., 6., 7.]]])
    +>>> m(input)
    +tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
    +         [6., 5., 4., 5., 6., 7., 6., 5.]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ReflectionPad1d((3, 1))
    +>>> m(input)
    +tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
    +         [7., 6., 5., 4., 5., 6., 7., 6.]]])
    +
    +
    +
    + +
    +
    +

    ReflectionPad2d

    +
    +
    +class torch.nn.ReflectionPad2d(padding)[source]
    +

    Pads the input tensor using the reflection of the input boundary.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 4-tuple, uses (\(\text{padding\_left}\), +\(\text{padding\_right}\), \(\text{padding\_top}\), \(\text{padding\_bottom}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ReflectionPad2d(2)
    +>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
    +>>> input
    +tensor([[[[0., 1., 2.],
    +          [3., 4., 5.],
    +          [6., 7., 8.]]]])
    +>>> m(input)
    +tensor([[[[8., 7., 6., 7., 8., 7., 6.],
    +          [5., 4., 3., 4., 5., 4., 3.],
    +          [2., 1., 0., 1., 2., 1., 0.],
    +          [5., 4., 3., 4., 5., 4., 3.],
    +          [8., 7., 6., 7., 8., 7., 6.],
    +          [5., 4., 3., 4., 5., 4., 3.],
    +          [2., 1., 0., 1., 2., 1., 0.]]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
    +>>> m(input)
    +tensor([[[[7., 6., 7., 8., 7.],
    +          [4., 3., 4., 5., 4.],
    +          [1., 0., 1., 2., 1.],
    +          [4., 3., 4., 5., 4.],
    +          [7., 6., 7., 8., 7.]]]])
    +
    +
    +
    + +
    +
    +

    ReplicationPad1d

    +
    +
    +class torch.nn.ReplicationPad1d(padding)[source]
    +

    Pads the input tensor using replication of the input boundary.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 2-tuple, uses +(\(\text{padding\_left}\), \(\text{padding\_right}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, W_{in})\)

    • +
    • Output: \((N, C, W_{out})\) where

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ReplicationPad1d(2)
    +>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
    +>>> input
    +tensor([[[0., 1., 2., 3.],
    +         [4., 5., 6., 7.]]])
    +>>> m(input)
    +tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
    +         [4., 4., 4., 5., 6., 7., 7., 7.]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ReplicationPad1d((3, 1))
    +>>> m(input)
    +tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
    +         [4., 4., 4., 4., 5., 6., 7., 7.]]])
    +
    +
    +
    + +
    +
    +

    ReplicationPad2d

    +
    +
    +class torch.nn.ReplicationPad2d(padding)[source]
    +

    Pads the input tensor using replication of the input boundary.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 4-tuple, uses (\(\text{padding\_left}\), +\(\text{padding\_right}\), \(\text{padding\_top}\), \(\text{padding\_bottom}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ReplicationPad2d(2)
    +>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
    +>>> input
    +tensor([[[[0., 1., 2.],
    +          [3., 4., 5.],
    +          [6., 7., 8.]]]])
    +>>> m(input)
    +tensor([[[[0., 0., 0., 1., 2., 2., 2.],
    +          [0., 0., 0., 1., 2., 2., 2.],
    +          [0., 0., 0., 1., 2., 2., 2.],
    +          [3., 3., 3., 4., 5., 5., 5.],
    +          [6., 6., 6., 7., 8., 8., 8.],
    +          [6., 6., 6., 7., 8., 8., 8.],
    +          [6., 6., 6., 7., 8., 8., 8.]]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
    +>>> m(input)
    +tensor([[[[0., 0., 1., 2., 2.],
    +          [0., 0., 1., 2., 2.],
    +          [0., 0., 1., 2., 2.],
    +          [3., 3., 4., 5., 5.],
    +          [6., 6., 7., 8., 8.]]]])
    +
    +
    +
    + +
    +
    +

    ReplicationPad3d

    +
    +
    +class torch.nn.ReplicationPad3d(padding)[source]
    +

    Pads the input tensor using replication of the input boundary.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 6-tuple, uses +(\(\text{padding\_left}\), \(\text{padding\_right}\), +\(\text{padding\_top}\), \(\text{padding\_bottom}\), +\(\text{padding\_front}\), \(\text{padding\_back}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, D_{out}, H_{out}, W_{out})\) where

      +

      \(D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}\)

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ReplicationPad3d(3)
    +>>> input = torch.randn(16, 3, 8, 320, 480)
    +>>> output = m(input)
    +>>> # using different paddings for different sides
    +>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    ZeroPad2d

    +
    +
    +class torch.nn.ZeroPad2d(padding)[source]
    +

    Pads the input tensor boundaries with zero.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 4-tuple, uses (\(\text{padding\_left}\), +\(\text{padding\_right}\), \(\text{padding\_top}\), \(\text{padding\_bottom}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ZeroPad2d(2)
    +>>> input = torch.randn(1, 1, 3, 3)
    +>>> input
    +tensor([[[[-0.1678, -0.4418,  1.9466],
    +          [ 0.9604, -0.4219, -0.5241],
    +          [-0.9162, -0.5436, -0.6446]]]])
    +>>> m(input)
    +tensor([[[[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000, -0.1678, -0.4418,  1.9466,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000,  0.9604, -0.4219, -0.5241,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000, -0.9162, -0.5436, -0.6446,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ZeroPad2d((1, 1, 2, 0))
    +>>> m(input)
    +tensor([[[[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +          [ 0.0000, -0.1678, -0.4418,  1.9466,  0.0000],
    +          [ 0.0000,  0.9604, -0.4219, -0.5241,  0.0000],
    +          [ 0.0000, -0.9162, -0.5436, -0.6446,  0.0000]]]])
    +
    +
    +
    + +
    +
    +

    ConstantPad1d

    +
    +
    +class torch.nn.ConstantPad1d(padding, value)[source]
    +

    Pads the input tensor boundaries with a constant value.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in both boundaries. If a 2-tuple, uses +(\(\text{padding\_left}\), \(\text{padding\_right}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, W_{in})\)

    • +
    • Output: \((N, C, W_{out})\) where

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ConstantPad1d(2, 3.5)
    +>>> input = torch.randn(1, 2, 4)
    +>>> input
    +tensor([[[-1.0491, -0.7152, -0.0749,  0.8530],
    +         [-1.3287,  1.8966,  0.1466, -0.2771]]])
    +>>> m(input)
    +tensor([[[ 3.5000,  3.5000, -1.0491, -0.7152, -0.0749,  0.8530,  3.5000,
    +           3.5000],
    +         [ 3.5000,  3.5000, -1.3287,  1.8966,  0.1466, -0.2771,  3.5000,
    +           3.5000]]])
    +>>> m = nn.ConstantPad1d(2, 3.5)
    +>>> input = torch.randn(1, 2, 3)
    +>>> input
    +tensor([[[ 1.6616,  1.4523, -1.1255],
    +         [-3.6372,  0.1182, -1.8652]]])
    +>>> m(input)
    +tensor([[[ 3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000,  3.5000]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ConstantPad1d((3, 1), 3.5)
    +>>> m(input)
    +tensor([[[ 3.5000,  3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000]]])
    +
    +
    +
    + +
    +
    +

    ConstantPad2d

    +
    +
    +class torch.nn.ConstantPad2d(padding, value)[source]
    +

    Pads the input tensor boundaries with a constant value.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 4-tuple, uses (\(\text{padding\_left}\), +\(\text{padding\_right}\), \(\text{padding\_top}\), \(\text{padding\_bottom}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ConstantPad2d(2, 3.5)
    +>>> input = torch.randn(1, 2, 2)
    +>>> input
    +tensor([[[ 1.6585,  0.4320],
    +         [-0.8701, -0.4649]]])
    +>>> m(input)
    +tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  1.6585,  0.4320,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000, -0.8701, -0.4649,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
    +>>> # using different paddings for different sides
    +>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
    +>>> m(input)
    +tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
    +         [ 3.5000,  3.5000,  3.5000,  1.6585,  0.4320],
    +         [ 3.5000,  3.5000,  3.5000, -0.8701, -0.4649],
    +         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
    +
    +
    +
    + +
    +
    +

    ConstantPad3d

    +
    +
    +class torch.nn.ConstantPad3d(padding, value)[source]
    +

    Pads the input tensor boundaries with a constant value.

    +

    For N-dimensional padding, use torch.nn.functional.pad().

    +
    +
    Parameters
    +

    padding (int, tuple) – the size of the padding. If is int, uses the same +padding in all boundaries. If a 6-tuple, uses +(\(\text{padding\_left}\), \(\text{padding\_right}\), +\(\text{padding\_top}\), \(\text{padding\_bottom}\), +\(\text{padding\_front}\), \(\text{padding\_back}\))

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, D_{out}, H_{out}, W_{out})\) where

      +

      \(D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}\)

      +

      \(H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}\)

      +

      \(W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}\)

      +
    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.ConstantPad3d(3, 3.5)
    +>>> input = torch.randn(16, 3, 10, 20, 30)
    +>>> output = m(input)
    +>>> # using different paddings for different sides
    +>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +
    +

    Non-linear activations (weighted sum, nonlinearity)

    +
    +

    ELU

    +
    +
    +class torch.nn.ELU(alpha=1.0, inplace=False)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1)) + +\]
    +
    +
    Parameters
    +
      +
    • alpha – the \(\alpha\) value for the ELU formulation. Default: 1.0

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/ELU.png +

    Examples:

    +
    >>> m = nn.ELU()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Hardshrink

    +
    +
    +class torch.nn.Hardshrink(lambd=0.5)[source]
    +

    Applies the hard shrinkage function element-wise:

    +
    +\[\text{HardShrink}(x) = +\begin{cases} +x, & \text{ if } x > \lambda \\ +x, & \text{ if } x < -\lambda \\ +0, & \text{ otherwise } +\end{cases} + +\]
    +
    +
    Parameters
    +

    lambd – the \(\lambda\) value for the Hardshrink formulation. Default: 0.5

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Hardshrink.png +

    Examples:

    +
    >>> m = nn.Hardshrink()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Hardtanh

    +
    +
    +class torch.nn.Hardtanh(min_val=-1.0, max_val=1.0, inplace=False, min_value=None, max_value=None)[source]
    +

    Applies the HardTanh function element-wise

    +

    HardTanh is defined as:

    +
    +\[\text{HardTanh}(x) = \begin{cases} + 1 & \text{ if } x > 1 \\ + -1 & \text{ if } x < -1 \\ + x & \text{ otherwise } \\ +\end{cases} + +\]
    +

    The range of the linear region \([-1, 1]\) can be adjusted using +min_val and max_val.

    +
    +
    Parameters
    +
      +
    • min_val – minimum value of the linear region range. Default: -1

    • +
    • max_val – maximum value of the linear region range. Default: 1

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +

    Keyword arguments min_value and max_value +have been deprecated in favor of min_val and max_val.

    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Hardtanh.png +

    Examples:

    +
    >>> m = nn.Hardtanh(-2, 2)
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LeakyReLU

    +
    +
    +class torch.nn.LeakyReLU(negative_slope=0.01, inplace=False)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x) + +\]
    +

    or

    +
    +\[\text{LeakyRELU}(x) = +\begin{cases} +x, & \text{ if } x \geq 0 \\ +\text{negative\_slope} \times x, & \text{ otherwise } +\end{cases} + +\]
    +
    +
    Parameters
    +
      +
    • negative_slope – Controls the angle of the negative slope. Default: 1e-2

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/LeakyReLU.png +

    Examples:

    +
    >>> m = nn.LeakyReLU(0.1)
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LogSigmoid

    +
    +
    +class torch.nn.LogSigmoid[source]
    +

    Applies the element-wise function:

    +
    +\[\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right) + +\]
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/LogSigmoid.png +

    Examples:

    +
    >>> m = nn.LogSigmoid()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    MultiheadAttention

    +
    +
    +class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None)[source]
    +

    Allows the model to jointly attend to information +from different representation subspaces. +See reference: Attention Is All You Need

    +
    +\[\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O +\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + +\]
    +
    +
    Parameters
    +
      +
    • embed_dim – total dimension of the model.

    • +
    • num_heads – parallel attention heads.

    • +
    • dropout – a Dropout layer on attn_output_weights. Default: 0.0.

    • +
    • bias – add bias as module parameter. Default: True.

    • +
    • add_bias_kv – add bias to the key and value sequences at dim=0.

    • +
    • add_zero_attn – add a new batch of zeros to the key and +value sequences at dim=1.

    • +
    • kdim – total number of features in key. Default: None.

    • +
    • vdim – total number of features in key. Default: None.

    • +
    • Note – if kdim and vdim are None, they will be set to embed_dim such that

    • +
    • key, and value have the same number of features. (query,) –

    • +
    +
    +
    +

    Examples:

    +
    >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
    +>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
    +
    +
    +
    +
    +forward(query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None)[source]
    +
    +
    Parameters
    +
      +
    • key, value (query,) – map a query and a set of key-value pairs to an output. +See “Attention Is All You Need” for more details.

    • +
    • key_padding_mask – if provided, specified padding elements in the key will +be ignored by the attention. This is an binary mask. When the value is True, +the corresponding value on the attention layer will be filled with -inf.

    • +
    • need_weights – output attn_output_weights.

    • +
    • attn_mask – mask that prevents attention to certain positions. This is an additive mask +(i.e. the values will be added to the attention layer).

    • +
    +
    +
    +
    +
    Shape:
      +
    • Inputs:

    • +
    • query: \((L, N, E)\) where L is the target sequence length, N is the batch size, E is +the embedding dimension.

    • +
    • key: \((S, N, E)\), where S is the source sequence length, N is the batch size, E is +the embedding dimension.

    • +
    • value: \((S, N, E)\) where S is the source sequence length, N is the batch size, E is +the embedding dimension.

    • +
    • key_padding_mask: \((N, S)\), ByteTensor, where N is the batch size, S is the source sequence length.

    • +
    • attn_mask: \((L, S)\) where L is the target sequence length, S is the source sequence length.

    • +
    • Outputs:

    • +
    • attn_output: \((L, N, E)\) where L is the target sequence length, N is the batch size, +E is the embedding dimension.

    • +
    • attn_output_weights: \((N, L, S)\) where N is the batch size, +L is the target sequence length, S is the source sequence length.

    • +
    +
    +
    +
    + +
    + +
    +
    +

    PReLU

    +
    +
    +class torch.nn.PReLU(num_parameters=1, init=0.25)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{PReLU}(x) = \max(0,x) + a * \min(0,x) + +\]
    +

    or

    +
    +\[\text{PReLU}(x) = +\begin{cases} +x, & \text{ if } x \geq 0 \\ +ax, & \text{ otherwise } +\end{cases} + +\]
    +

    Here \(a\) is a learnable parameter. When called without arguments, nn.PReLU() uses a single +parameter \(a\) across all input channels. If called with nn.PReLU(nChannels), +a separate \(a\) is used for each input channel.

    +
    +

    Note

    +

    weight decay should not be used when learning \(a\) for good performance.

    +
    +
    +

    Note

    +

    Channel dim is the 2nd dim of input. When input has dims < 2, then there is +no channel dim and the number of channels = 1.

    +
    +
    +
    Parameters
    +
      +
    • num_parameters (int) – number of \(a\) to learn. +Although it takes an int as input, there is only two values are legitimate: +1, or the number of channels at input. Default: 1

    • +
    • init (float) – the initial value of \(a\). Default: 0.25

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +
    +
    Variables
    +

    ~PReLU.weight (Tensor) – the learnable weights of shape (num_parameters).

    +
    +
    +_images/PReLU.png +

    Examples:

    +
    >>> m = nn.PReLU()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    ReLU

    +
    +
    +class torch.nn.ReLU(inplace=False)[source]
    +

    Applies the rectified linear unit function element-wise:

    +

    \(\text{ReLU}(x)= \max(0, x)\)

    +
    +
    Parameters
    +

    inplace – can optionally do the operation in-place. Default: False

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/ReLU.png +

    Examples:

    +
      >>> m = nn.ReLU()
    +  >>> input = torch.randn(2)
    +  >>> output = m(input)
    +
    +
    +An implementation of CReLU - https://arxiv.org/abs/1603.05201
    +
    +  >>> m = nn.ReLU()
    +  >>> input = torch.randn(2).unsqueeze(0)
    +  >>> output = torch.cat((m(input),m(-input)))
    +
    +
    +
    + +
    +
    +

    ReLU6

    +
    +
    +class torch.nn.ReLU6(inplace=False)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{ReLU6}(x) = \min(\max(0,x), 6) + +\]
    +
    +
    Parameters
    +

    inplace – can optionally do the operation in-place. Default: False

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/ReLU6.png +

    Examples:

    +
    >>> m = nn.ReLU6()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    RReLU

    +
    +
    +class torch.nn.RReLU(lower=0.125, upper=0.3333333333333333, inplace=False)[source]
    +

    Applies the randomized leaky rectified liner unit function, element-wise, +as described in the paper:

    +

    Empirical Evaluation of Rectified Activations in Convolutional Network.

    +

    The function is defined as:

    +
    +\[\text{RReLU}(x) = +\begin{cases} + x & \text{if } x \geq 0 \\ + ax & \text{ otherwise } +\end{cases} + +\]
    +

    where \(a\) is randomly sampled from uniform distribution +\(\mathcal{U}(\text{lower}, \text{upper})\).

    +
    +
    +
    +
    Parameters
    +
      +
    • lower – lower bound of the uniform distribution. Default: \(\frac{1}{8}\)

    • +
    • upper – upper bound of the uniform distribution. Default: \(\frac{1}{3}\)

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.RReLU(0.1, 0.3)
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    SELU

    +
    +
    +class torch.nn.SELU(inplace=False)[source]
    +

    Applied element-wise, as:

    +
    +\[\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))) + +\]
    +

    with \(\alpha = 1.6732632423543772848170429916717\) and +\(\text{scale} = 1.0507009873554804934193349852946\).

    +

    More details can be found in the paper Self-Normalizing Neural Networks .

    +
    +
    Parameters
    +

    inplace (bool, optional) – can optionally do the operation in-place. Default: False

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/SELU.png +

    Examples:

    +
    >>> m = nn.SELU()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    CELU

    +
    +
    +class torch.nn.CELU(alpha=1.0, inplace=False)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1)) + +\]
    +

    More details can be found in the paper Continuously Differentiable Exponential Linear Units .

    +
    +
    Parameters
    +
      +
    • alpha – the \(\alpha\) value for the CELU formulation. Default: 1.0

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/CELU.png +

    Examples:

    +
    >>> m = nn.CELU()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Sigmoid

    +
    +
    +class torch.nn.Sigmoid[source]
    +

    Applies the element-wise function:

    +
    +\[\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)} + +\]
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Sigmoid.png +

    Examples:

    +
    >>> m = nn.Sigmoid()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Softplus

    +
    +
    +class torch.nn.Softplus(beta=1, threshold=20)[source]
    +

    Applies the element-wise function:

    +
    +\[\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) + +\]
    +

    SoftPlus is a smooth approximation to the ReLU function and can be used +to constrain the output of a machine to always be positive.

    +

    For numerical stability the implementation reverts to the linear function +for inputs above a certain value.

    +
    +
    Parameters
    +
      +
    • beta – the \(\beta\) value for the Softplus formulation. Default: 1

    • +
    • threshold – values above this revert to a linear function. Default: 20

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Softplus.png +

    Examples:

    +
    >>> m = nn.Softplus()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Softshrink

    +
    +
    +class torch.nn.Softshrink(lambd=0.5)[source]
    +

    Applies the soft shrinkage function elementwise:

    +
    +\[\text{SoftShrinkage}(x) = +\begin{cases} +x - \lambda, & \text{ if } x > \lambda \\ +x + \lambda, & \text{ if } x < -\lambda \\ +0, & \text{ otherwise } +\end{cases} + +\]
    +
    +
    Parameters
    +

    lambd – the \(\lambda\) value for the Softshrink formulation. Default: 0.5

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Softshrink.png +

    Examples:

    +
    >>> m = nn.Softshrink()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Softsign

    +
    +
    +class torch.nn.Softsign[source]
    +

    Applies the element-wise function:

    +
    +\[\text{SoftSign}(x) = \frac{x}{ 1 + |x|} + +\]
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Softsign.png +

    Examples:

    +
    >>> m = nn.Softsign()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Tanh

    +
    +
    +class torch.nn.Tanh[source]
    +

    Applies the element-wise function:

    +
    +\[\text{Tanh}(x) = \tanh(x) = \frac{e^x - e^{-x}} {e^x + e^{-x}} + +\]
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Tanh.png +

    Examples:

    +
    >>> m = nn.Tanh()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Tanhshrink

    +
    +
    +class torch.nn.Tanhshrink[source]
    +

    Applies the element-wise function:

    +
    +\[\text{Tanhshrink}(x) = x - \text{Tanh}(x) + +\]
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +_images/Tanhshrink.png +

    Examples:

    +
    >>> m = nn.Tanhshrink()
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Threshold

    +
    +
    +class torch.nn.Threshold(threshold, value, inplace=False)[source]
    +

    Thresholds each element of the input Tensor.

    +

    Threshold is defined as:

    +
    +\[y = +\begin{cases} +x, &\text{ if } x > \text{threshold} \\ +\text{value}, &\text{ otherwise } +\end{cases} + +\]
    +
    +
    Parameters
    +
      +
    • threshold – The value to threshold at

    • +
    • value – The value to replace with

    • +
    • inplace – can optionally do the operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where * means, any number of additional +dimensions

    • +
    • Output: \((N, *)\), same shape as the input

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Threshold(0.1, 20)
    +>>> input = torch.randn(2)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +
    +

    Non-linear activations (other)

    +
    +

    Softmin

    +
    +
    +class torch.nn.Softmin(dim=None)[source]
    +

    Applies the Softmin function to an n-dimensional input Tensor +rescaling them so that the elements of the n-dimensional output Tensor +lie in the range [0, 1] and sum to 1.

    +

    Softmin is defined as:

    +
    +\[\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)} + +\]
    +
    +
    Shape:
      +
    • Input: \((*)\) where * means, any number of additional +dimensions

    • +
    • Output: \((*)\), same shape as the input

    • +
    +
    +
    +
    +
    Parameters
    +

    dim (int) – A dimension along which Softmin will be computed (so every slice +along dim will sum to 1).

    +
    +
    Returns
    +

    a Tensor of the same dimension and shape as the input, with +values in the range [0, 1]

    +
    +
    +

    Examples:

    +
    >>> m = nn.Softmin()
    +>>> input = torch.randn(2, 3)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Softmax

    +
    +
    +class torch.nn.Softmax(dim=None)[source]
    +

    Applies the Softmax function to an n-dimensional input Tensor +rescaling them so that the elements of the n-dimensional output Tensor +lie in the range [0,1] and sum to 1.

    +

    Softmax is defined as:

    +
    +\[\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} + +\]
    +
    +
    Shape:
      +
    • Input: \((*)\) where * means, any number of additional +dimensions

    • +
    • Output: \((*)\), same shape as the input

    • +
    +
    +
    +
    +
    Returns
    +

    a Tensor of the same dimension and shape as the input with +values in the range [0, 1]

    +
    +
    Parameters
    +

    dim (int) – A dimension along which Softmax will be computed (so every slice +along dim will sum to 1).

    +
    +
    +
    +

    Note

    +

    This module doesn’t work directly with NLLLoss, +which expects the Log to be computed between the Softmax and itself. +Use LogSoftmax instead (it’s faster and has better numerical properties).

    +
    +

    Examples:

    +
    >>> m = nn.Softmax(dim=1)
    +>>> input = torch.randn(2, 3)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Softmax2d

    +
    +
    +class torch.nn.Softmax2d[source]
    +

    Applies SoftMax over features to each spatial location.

    +

    When given an image of Channels x Height x Width, it will +apply Softmax to each location \((Channels, h_i, w_j)\)

    +
    +
    Shape:
      +
    • Input: \((N, C, H, W)\)

    • +
    • Output: \((N, C, H, W)\) (same shape as input)

    • +
    +
    +
    +
    +
    Returns
    +

    a Tensor of the same dimension and shape as the input with +values in the range [0, 1]

    +
    +
    +

    Examples:

    +
    >>> m = nn.Softmax2d()
    +>>> # you softmax over the 2nd dimension
    +>>> input = torch.randn(2, 3, 12, 13)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LogSoftmax

    +
    +
    +class torch.nn.LogSoftmax(dim=None)[source]
    +

    Applies the \(\log(\text{Softmax}(x))\) function to an n-dimensional +input Tensor. The LogSoftmax formulation can be simplified as:

    +
    +\[\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) + +\]
    +
    +
    Shape:
      +
    • Input: \((*)\) where * means, any number of additional +dimensions

    • +
    • Output: \((*)\), same shape as the input

    • +
    +
    +
    +
    +
    Parameters
    +

    dim (int) – A dimension along which LogSoftmax will be computed.

    +
    +
    Returns
    +

    a Tensor of the same dimension and shape as the input with +values in the range [-inf, 0)

    +
    +
    +

    Examples:

    +
    >>> m = nn.LogSoftmax()
    +>>> input = torch.randn(2, 3)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AdaptiveLogSoftmaxWithLoss

    +
    +
    +class torch.nn.AdaptiveLogSoftmaxWithLoss(in_features, n_classes, cutoffs, div_value=4.0, head_bias=False)[source]
    +

    Efficient softmax approximation as described in +Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin, +Moustapha Cissé, David Grangier, and Hervé Jégou.

    +

    Adaptive softmax is an approximate strategy for training models with large +output spaces. It is most effective when the label distribution is highly +imbalanced, for example in natural language modelling, where the word +frequency distribution approximately follows the Zipf’s law.

    +

    Adaptive softmax partitions the labels into several clusters, according to +their frequency. These clusters may contain different number of targets +each. +Additionally, clusters containing less frequent labels assign lower +dimensional embeddings to those labels, which speeds up the computation. +For each minibatch, only clusters for which at least one target is +present are evaluated.

    +

    The idea is that the clusters which are accessed frequently +(like the first one, containing most frequent labels), should also be cheap +to compute – that is, contain a small number of assigned labels.

    +

    We highly recommend taking a look at the original paper for more details.

    +
      +
    • cutoffs should be an ordered Sequence of integers sorted +in the increasing order. +It controls number of clusters and the partitioning of targets into +clusters. For example setting cutoffs = [10, 100, 1000] +means that first 10 targets will be assigned +to the ‘head’ of the adaptive softmax, targets 11, 12, …, 100 will be +assigned to the first cluster, and targets 101, 102, …, 1000 will be +assigned to the second cluster, while targets +1001, 1002, …, n_classes - 1 will be assigned +to the last, third cluster.

    • +
    • div_value is used to compute the size of each additional cluster, +which is given as +\(\left\lfloor\frac{in\_features}{div\_value^{idx}}\right\rfloor\), +where \(idx\) is the cluster index (with clusters +for less frequent words having larger indices, +and indices starting from \(1\)).

    • +
    • head_bias if set to True, adds a bias term to the ‘head’ of the +adaptive softmax. See paper for details. Set to False in the official +implementation.

    • +
    +
    +

    Warning

    +

    Labels passed as inputs to this module should be sorted accoridng to +their frequency. This means that the most frequent label should be +represented by the index 0, and the least frequent +label should be represented by the index n_classes - 1.

    +
    +
    +

    Note

    +

    This module returns a NamedTuple with output +and loss fields. See further documentation for details.

    +
    +
    +

    Note

    +

    To compute log-probabilities for all classes, the log_prob +method can be used.

    +
    +
    +
    Parameters
    +
      +
    • in_features (int) – Number of features in the input tensor

    • +
    • n_classes (int) – Number of classes in the dataset

    • +
    • cutoffs (Sequence) – Cutoffs used to assign targets to their buckets

    • +
    • div_value (float, optional) – value used as an exponent to compute sizes +of the clusters. Default: 4.0

    • +
    • head_bias (bool, optional) – If True, adds a bias term to the ‘head’ of the +adaptive softmax. Default: False

    • +
    +
    +
    Returns
    +

      +
    • output is a Tensor of size N containing computed target +log probabilities for each example

    • +
    • loss is a Scalar representing the computed negative +log likelihood loss

    • +
    +

    +
    +
    Return type
    +

    NamedTuple with output and loss fields

    +
    +
    +
    +
    Shape:
      +
    • input: \((N, in\_features)\)

    • +
    • target: \((N)\) where each value satisfies \(0 <= target[i] <= n\_classes\)

    • +
    • output1: \((N)\)

    • +
    • output2: Scalar

    • +
    +
    +
    +
    +
    +log_prob(input)[source]
    +

    Computes log probabilities for all \(n\_classes\)

    +
    +
    Parameters
    +

    input (Tensor) – a minibatch of examples

    +
    +
    Returns
    +

    log-probabilities of for each class \(c\) +in range \(0 <= c <= n\_classes\), where \(n\_classes\) is a +parameter passed to AdaptiveLogSoftmaxWithLoss constructor.

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, in\_features)\)

    • +
    • Output: \((N, n\_classes)\)

    • +
    +
    +
    +
    + +
    +
    +predict(input)[source]
    +

    This is equivalent to self.log_pob(input).argmax(dim=1), +but is more efficient in some cases.

    +
    +
    Parameters
    +

    input (Tensor) – a minibatch of examples

    +
    +
    Returns
    +

    a class with the highest probability for each example

    +
    +
    Return type
    +

    output (Tensor)

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, in\_features)\)

    • +
    • Output: \((N)\)

    • +
    +
    +
    +
    + +
    + +
    +
    +
    +

    Normalization layers

    +
    +

    BatchNorm1d

    +
    +
    +class torch.nn.BatchNorm1d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)[source]
    +

    Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D +inputs with optional additional channel dimension) as described in the paper +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension over +the mini-batches and \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size). By default, the elements of \(\gamma\) are set +to 1 and the elements of \(\beta\) are set to 0.

    +

    Also by default, during training this layer keeps running estimates of its +computed mean and variance, which are then used for normalization during +evaluation. The running estimates are kept with a default momentum +of 0.1.

    +

    If track_running_stats is set to False, this layer then does not +keep running estimates, and batch statistics are instead used during +evaluation time as well.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +

    Because the Batch Normalization is done over the C dimension, computing statistics +on (N, L) slices, it’s common terminology to call this Temporal Batch Normalization.

    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, L)\) or \(L\) from input of size \((N, L)\)

    • +
    • eps – a value added to the denominator for numerical stability. +Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var +computation. Can be set to None for cumulative moving average +(i.e. simple average). Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters. Default: True

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C)\) or \((N, C, L)\)

    • +
    • Output: \((N, C)\) or \((N, C, L)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With Learnable Parameters
    +>>> m = nn.BatchNorm1d(100)
    +>>> # Without Learnable Parameters
    +>>> m = nn.BatchNorm1d(100, affine=False)
    +>>> input = torch.randn(20, 100)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    BatchNorm2d

    +
    +
    +class torch.nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)[source]
    +

    Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs +with additional channel dimension) as described in the paper +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension over +the mini-batches and \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size). By default, the elements of \(\gamma\) are set +to 1 and the elements of \(\beta\) are set to 0.

    +

    Also by default, during training this layer keeps running estimates of its +computed mean and variance, which are then used for normalization during +evaluation. The running estimates are kept with a default momentum +of 0.1.

    +

    If track_running_stats is set to False, this layer then does not +keep running estimates, and batch statistics are instead used during +evaluation time as well.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +

    Because the Batch Normalization is done over the C dimension, computing statistics +on (N, H, W) slices, it’s common terminology to call this Spatial Batch Normalization.

    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, H, W)\)

    • +
    • eps – a value added to the denominator for numerical stability. +Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var +computation. Can be set to None for cumulative moving average +(i.e. simple average). Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters. Default: True

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H, W)\)

    • +
    • Output: \((N, C, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With Learnable Parameters
    +>>> m = nn.BatchNorm2d(100)
    +>>> # Without Learnable Parameters
    +>>> m = nn.BatchNorm2d(100, affine=False)
    +>>> input = torch.randn(20, 100, 35, 45)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    BatchNorm3d

    +
    +
    +class torch.nn.BatchNorm3d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)[source]
    +

    Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs +with additional channel dimension) as described in the paper +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension over +the mini-batches and \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size). By default, the elements of \(\gamma\) are set +to 1 and the elements of \(\beta\) are set to 0.

    +

    Also by default, during training this layer keeps running estimates of its +computed mean and variance, which are then used for normalization during +evaluation. The running estimates are kept with a default momentum +of 0.1.

    +

    If track_running_stats is set to False, this layer then does not +keep running estimates, and batch statistics are instead used during +evaluation time as well.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +

    Because the Batch Normalization is done over the C dimension, computing statistics +on (N, D, H, W) slices, it’s common terminology to call this Volumetric Batch Normalization +or Spatio-temporal Batch Normalization.

    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, D, H, W)\)

    • +
    • eps – a value added to the denominator for numerical stability. +Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var +computation. Can be set to None for cumulative moving average +(i.e. simple average). Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters. Default: True

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D, H, W)\)

    • +
    • Output: \((N, C, D, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With Learnable Parameters
    +>>> m = nn.BatchNorm3d(100)
    +>>> # Without Learnable Parameters
    +>>> m = nn.BatchNorm3d(100, affine=False)
    +>>> input = torch.randn(20, 100, 35, 45, 10)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    GroupNorm

    +
    +
    +class torch.nn.GroupNorm(num_groups, num_channels, eps=1e-05, affine=True)[source]
    +

    Applies Group Normalization over a mini-batch of inputs as described in +the paper Group Normalization .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + +\]
    +

    The input channels are separated into num_groups groups, each containing +num_channels / num_groups channels. The mean and standard-deviation are calculated +separately over the each group. \(\gamma\) and \(\beta\) are learnable +per-channel affine transform parameter vectors of size num_channels if +affine is True.

    +

    This layer uses statistics computed from input data in both training and +evaluation modes.

    +
    +
    Parameters
    +
      +
    • num_groups (int) – number of groups to separate the channels into

    • +
    • num_channels (int) – number of channels expected in input

    • +
    • eps – a value added to the denominator for numerical stability. Default: 1e-5

    • +
    • affine – a boolean value that when set to True, this module +has learnable per-channel affine parameters initialized to ones (for weights) +and zeros (for biases). Default: True.

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, *)\) where \(C=\text{num\_channels}\)

    • +
    • Output: \((N, C, *)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn(20, 6, 10, 10)
    +>>> # Separate 6 channels into 3 groups
    +>>> m = nn.GroupNorm(3, 6)
    +>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
    +>>> m = nn.GroupNorm(6, 6)
    +>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
    +>>> m = nn.GroupNorm(1, 6)
    +>>> # Activating the module
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    SyncBatchNorm

    +
    +
    +class torch.nn.SyncBatchNorm(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, process_group=None)[source]
    +

    Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs +with additional channel dimension) as described in the paper +Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension over all +mini-batches of the same process groups. \(\gamma\) and \(\beta\) +are learnable parameter vectors of size C (where C is the input size). +By default, the elements of \(\gamma\) are sampled from +\(\mathcal{U}(0, 1)\) and the elements of \(\beta\) are set to 0.

    +

    Also by default, during training this layer keeps running estimates of its +computed mean and variance, which are then used for normalization during +evaluation. The running estimates are kept with a default momentum +of 0.1.

    +

    If track_running_stats is set to False, this layer then does not +keep running estimates, and batch statistics are instead used during +evaluation time as well.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +

    Because the Batch Normalization is done over the C dimension, computing statistics +on (N, +) slices, it’s common terminology to call this Volumetric Batch Normalization +or Spatio-temporal Batch Normalization.

    +

    Currently SyncBatchNorm only supports DistributedDataParallel with single GPU per process. Use +torch.nn.SyncBatchNorm.convert_sync_batchnorm() to convert BatchNorm layer to SyncBatchNorm before wrapping +Network with DDP.

    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, +)\)

    • +
    • eps – a value added to the denominator for numerical stability. +Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var +computation. Can be set to None for cumulative moving average +(i.e. simple average). Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters. Default: True

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: True

    • +
    • process_group – synchronization of stats happen within each process group +individually. Default behavior is synchronization across the whole +world

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, +)\)

    • +
    • Output: \((N, C, +)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # With Learnable Parameters
    +>>> m = nn.SyncBatchNorm(100)
    +>>> # creating process group (optional)
    +>>> # process_ids is a list of int identifying rank ids.
    +>>> process_group = torch.distributed.new_group(process_ids)
    +>>> # Without Learnable Parameters
    +>>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
    +>>> input = torch.randn(20, 100, 35, 45, 10)
    +>>> output = m(input)
    +
    +>>> # network is nn.BatchNorm layer
    +>>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
    +>>> # only single gpu per process is currently supported
    +>>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
    +>>>                         sync_bn_network,
    +>>>                         device_ids=[args.local_rank],
    +>>>                         output_device=args.local_rank)
    +
    +
    +
    +
    +classmethod convert_sync_batchnorm(module, process_group=None)[source]
    +

    Helper function to convert torch.nn.BatchNormND layer in the model to +torch.nn.SyncBatchNorm layer.

    +
    +
    Parameters
    +
      +
    • module (nn.Module) – containing module

    • +
    • process_group (optional) – process group to scope synchronization,

    • +
    +
    +
    +

    default is the whole world

    +
    +
    Returns
    +

    The original module with the converted torch.nn.SyncBatchNorm layer

    +
    +
    +

    Example:

    +
    >>> # Network with nn.BatchNorm layer
    +>>> module = torch.nn.Sequential(
    +>>>            torch.nn.Linear(20, 100),
    +>>>            torch.nn.BatchNorm1d(100)
    +>>>          ).cuda()
    +>>> # creating process group (optional)
    +>>> # process_ids is a list of int identifying rank ids.
    +>>> process_group = torch.distributed.new_group(process_ids)
    +>>> sync_bn_module = convert_sync_batchnorm(module, process_group)
    +
    +
    +
    + +
    + +
    +
    +

    InstanceNorm1d

    +
    +
    +class torch.nn.InstanceNorm1d(num_features, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)[source]
    +

    Applies Instance Normalization over a 3D input (a mini-batch of 1D +inputs with optional additional channel dimension) as described in the paper +Instance Normalization: The Missing Ingredient for Fast Stylization .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension separately +for each object in a mini-batch. \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size) if affine is True.

    +

    By default, this layer uses instance statistics computed from input data in +both training and evaluation modes.

    +

    If track_running_stats is set to True, during training this +layer keeps running estimates of its computed mean and variance, which are +then used for normalization during evaluation. The running estimates are +kept with a default momentum of 0.1.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +
    +

    Note

    +

    InstanceNorm1d and LayerNorm are very similar, but +have some subtle differences. InstanceNorm1d is applied +on each channel of channeled data like multidimensional time series, but +LayerNorm is usually applied on entire sample and often in NLP +tasks. Additionaly, LayerNorm applies elementwise affine +transform, while InstanceNorm1d usually don’t apply affine +transform.

    +
    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, L)\) or \(L\) from input of size \((N, L)\)

    • +
    • eps – a value added to the denominator for numerical stability. Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var computation. Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters, initialized the same way as done for batch normalization. +Default: False.

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, L)\)

    • +
    • Output: \((N, C, L)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # Without Learnable Parameters
    +>>> m = nn.InstanceNorm1d(100)
    +>>> # With Learnable Parameters
    +>>> m = nn.InstanceNorm1d(100, affine=True)
    +>>> input = torch.randn(20, 100, 40)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    InstanceNorm2d

    +
    +
    +class torch.nn.InstanceNorm2d(num_features, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)[source]
    +

    Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs +with additional channel dimension) as described in the paper +Instance Normalization: The Missing Ingredient for Fast Stylization .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension separately +for each object in a mini-batch. \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size) if affine is True.

    +

    By default, this layer uses instance statistics computed from input data in +both training and evaluation modes.

    +

    If track_running_stats is set to True, during training this +layer keeps running estimates of its computed mean and variance, which are +then used for normalization during evaluation. The running estimates are +kept with a default momentum of 0.1.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +
    +

    Note

    +

    InstanceNorm2d and LayerNorm are very similar, but +have some subtle differences. InstanceNorm2d is applied +on each channel of channeled data like RGB images, but +LayerNorm is usually applied on entire sample and often in NLP +tasks. Additionaly, LayerNorm applies elementwise affine +transform, while InstanceNorm2d usually don’t apply affine +transform.

    +
    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, H, W)\)

    • +
    • eps – a value added to the denominator for numerical stability. Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var computation. Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters, initialized the same way as done for batch normalization. +Default: False.

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H, W)\)

    • +
    • Output: \((N, C, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # Without Learnable Parameters
    +>>> m = nn.InstanceNorm2d(100)
    +>>> # With Learnable Parameters
    +>>> m = nn.InstanceNorm2d(100, affine=True)
    +>>> input = torch.randn(20, 100, 35, 45)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    InstanceNorm3d

    +
    +
    +class torch.nn.InstanceNorm3d(num_features, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False)[source]
    +

    Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs +with additional channel dimension) as described in the paper +Instance Normalization: The Missing Ingredient for Fast Stylization .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta\]
    +

    The mean and standard-deviation are calculated per-dimension separately +for each object in a mini-batch. \(\gamma\) and \(\beta\) are learnable parameter vectors +of size C (where C is the input size) if affine is True.

    +

    By default, this layer uses instance statistics computed from input data in +both training and evaluation modes.

    +

    If track_running_stats is set to True, during training this +layer keeps running estimates of its computed mean and variance, which are +then used for normalization during evaluation. The running estimates are +kept with a default momentum of 0.1.

    +
    +

    Note

    +

    This momentum argument is different from one used in optimizer +classes and the conventional notion of momentum. Mathematically, the +update rule for running statistics here is +\(\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t\), +where \(\hat{x}\) is the estimated statistic and \(x_t\) is the +new observed value.

    +
    +
    +

    Note

    +

    InstanceNorm3d and LayerNorm are very similar, but +have some subtle differences. InstanceNorm3d is applied +on each channel of channeled data like 3D models with RGB color, but +LayerNorm is usually applied on entire sample and often in NLP +tasks. Additionaly, LayerNorm applies elementwise affine +transform, while InstanceNorm3d usually don’t apply affine +transform.

    +
    +
    +
    Parameters
    +
      +
    • num_features\(C\) from an expected input of size +\((N, C, D, H, W)\)

    • +
    • eps – a value added to the denominator for numerical stability. Default: 1e-5

    • +
    • momentum – the value used for the running_mean and running_var computation. Default: 0.1

    • +
    • affine – a boolean value that when set to True, this module has +learnable affine parameters, initialized the same way as done for batch normalization. +Default: False.

    • +
    • track_running_stats – a boolean value that when set to True, this +module tracks the running mean and variance, and when set to False, +this module does not track such statistics and always uses batch +statistics in both training and eval modes. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D, H, W)\)

    • +
    • Output: \((N, C, D, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> # Without Learnable Parameters
    +>>> m = nn.InstanceNorm3d(100)
    +>>> # With Learnable Parameters
    +>>> m = nn.InstanceNorm3d(100, affine=True)
    +>>> input = torch.randn(20, 100, 35, 45, 10)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LayerNorm

    +
    +
    +class torch.nn.LayerNorm(normalized_shape, eps=1e-05, elementwise_affine=True)[source]
    +

    Applies Layer Normalization over a mini-batch of inputs as described in +the paper Layer Normalization .

    +
    +\[y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + +\]
    +

    The mean and standard-deviation are calculated separately over the last +certain number dimensions which have to be of the shape specified by +normalized_shape. +\(\gamma\) and \(\beta\) are learnable affine transform parameters of +normalized_shape if elementwise_affine is True.

    +
    +

    Note

    +

    Unlike Batch Normalization and Instance Normalization, which applies +scalar scale and bias for each entire channel/plane with the +affine option, Layer Normalization applies per-element scale and +bias with elementwise_affine.

    +
    +

    This layer uses statistics computed from input data in both training and +evaluation modes.

    +
    +
    Parameters
    +
      +
    • normalized_shape (int or list or torch.Size) –

      input shape from an expected input +of size

      +
      +\[[* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1] + \times \ldots \times \text{normalized\_shape}[-1]] + +\]
      +

      If a single integer is used, it is treated as a singleton list, and this module will +normalize over the last dimension which is expected to be of that specific size.

      +

    • +
    • eps – a value added to the denominator for numerical stability. Default: 1e-5

    • +
    • elementwise_affine – a boolean value that when set to True, this module +has learnable per-element affine parameters initialized to ones (for weights) +and zeros (for biases). Default: True.

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\)

    • +
    • Output: \((N, *)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> input = torch.randn(20, 5, 10, 10)
    +>>> # With Learnable Parameters
    +>>> m = nn.LayerNorm(input.size()[1:])
    +>>> # Without Learnable Parameters
    +>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
    +>>> # Normalize over last two dimensions
    +>>> m = nn.LayerNorm([10, 10])
    +>>> # Normalize over last dimension of size 10
    +>>> m = nn.LayerNorm(10)
    +>>> # Activating the module
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    LocalResponseNorm

    +
    +
    +class torch.nn.LocalResponseNorm(size, alpha=0.0001, beta=0.75, k=1.0)[source]
    +

    Applies local response normalization over an input signal composed +of several input planes, where channels occupy the second dimension. +Applies normalization across channels.

    +
    +\[b_{c} = a_{c}\left(k + \frac{\alpha}{n} +\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta} + +\]
    +
    +
    Parameters
    +
      +
    • size – amount of neighbouring channels used for normalization

    • +
    • alpha – multiplicative factor. Default: 0.0001

    • +
    • beta – exponent. Default: 0.75

    • +
    • k – additive factor. Default: 1

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, *)\)

    • +
    • Output: \((N, C, *)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> lrn = nn.LocalResponseNorm(2)
    +>>> signal_2d = torch.randn(32, 5, 24, 24)
    +>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
    +>>> output_2d = lrn(signal_2d)
    +>>> output_4d = lrn(signal_4d)
    +
    +
    +
    + +
    +
    +
    +

    Recurrent layers

    +
    +

    RNN

    +
    +
    +class torch.nn.RNN(*args, **kwargs)[source]
    +

    Applies a multi-layer Elman RNN with \(tanh\) or \(ReLU\) non-linearity to an +input sequence.

    +

    For each element in the input sequence, each layer computes the following +function:

    +
    +\[h_t = \text{tanh}(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh}) + +\]
    +

    where \(h_t\) is the hidden state at time t, \(x_t\) is +the input at time t, and \(h_{(t-1)}\) is the hidden state of the +previous layer at time t-1 or the initial hidden state at time 0. +If nonlinearity is 'relu', then ReLU is used instead of tanh.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • num_layers – Number of recurrent layers. E.g., setting num_layers=2 +would mean stacking two RNNs together to form a stacked RNN, +with the second RNN taking in outputs of the first RNN and +computing the final results. Default: 1

    • +
    • nonlinearity – The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'

    • +
    • bias – If False, then the layer does not use bias weights b_ih and b_hh. +Default: True

    • +
    • batch_first – If True, then the input and output tensors are provided +as (batch, seq, feature). Default: False

    • +
    • dropout – If non-zero, introduces a Dropout layer on the outputs of each +RNN layer except the last layer, with dropout probability equal to +dropout. Default: 0

    • +
    • bidirectional – If True, becomes a bidirectional RNN. Default: False

    • +
    +
    +
    +
    +
    Inputs: input, h_0
      +
    • input of shape (seq_len, batch, input_size): tensor containing the features +of the input sequence. The input can also be a packed variable length +sequence. See torch.nn.utils.rnn.pack_padded_sequence() +or torch.nn.utils.rnn.pack_sequence() +for details.

    • +
    • h_0 of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the initial hidden state for each element in the batch. +Defaults to zero if not provided. If the RNN is bidirectional, +num_directions should be 2, else it should be 1.

    • +
    +
    +
    Outputs: output, h_n
      +
    • output of shape (seq_len, batch, num_directions * hidden_size): tensor +containing the output features (h_t) from the last layer of the RNN, +for each t. If a torch.nn.utils.rnn.PackedSequence has +been given as the input, the output will also be a packed sequence.

      +

      For the unpacked case, the directions can be separated +using output.view(seq_len, batch, num_directions, hidden_size), +with forward and backward being direction 0 and 1 respectively. +Similarly, the directions can be separated in the packed case.

      +
    • +
    • h_n of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the hidden state for t = seq_len.

      +

      Like output, the layers can be separated using +h_n.view(num_layers, num_directions, batch, hidden_size).

      +
    • +
    +
    +
    Shape:
      +
    • Input1: \((L, N, H_{in})\) tensor containing input features where +\(H_{in}=\text{input\_size}\) and L represents a sequence length.

    • +
    • Input2: \((S, N, H_{out})\) tensor +containing the initial hidden state for each element in the batch. +\(H_{out}=\text{hidden\_size}\) +Defaults to zero if not provided. where \(S=\text{num\_layers} * \text{num\_directions}\) +If the RNN is bidirectional, num_directions should be 2, else it should be 1.

    • +
    • Output1: \((L, N, H_{all})\) where \(H_{all}=\text{num\_directions} * \text{hidden\_size}\)

    • +
    • Output2: \((S, N, H_{out})\) tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~RNN.weight_ih_l[k] – the learnable input-hidden weights of the k-th layer, +of shape (hidden_size, input_size) for k = 0. Otherwise, the shape is +(hidden_size, num_directions * hidden_size)

    • +
    • ~RNN.weight_hh_l[k] – the learnable hidden-hidden weights of the k-th layer, +of shape (hidden_size, hidden_size)

    • +
    • ~RNN.bias_ih_l[k] – the learnable input-hidden bias of the k-th layer, +of shape (hidden_size)

    • +
    • ~RNN.bias_hh_l[k] – the learnable hidden-hidden bias of the k-th layer, +of shape (hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +
    +

    Note

    +

    If the following conditions are satisfied: +1) cudnn is enabled, +2) input data is on the GPU +3) input data has dtype torch.float16 +4) V100 GPU is used, +5) input data is not in PackedSequence format +persistent algorithm can be selected to improve performance.

    +
    +

    Examples:

    +
    >>> rnn = nn.RNN(10, 20, 2)
    +>>> input = torch.randn(5, 3, 10)
    +>>> h0 = torch.randn(2, 3, 20)
    +>>> output, hn = rnn(input, h0)
    +
    +
    +
    + +
    +
    +

    LSTM

    +
    +
    +class torch.nn.LSTM(*args, **kwargs)[source]
    +

    Applies a multi-layer long short-term memory (LSTM) RNN to an input +sequence.

    +

    For each element in the input sequence, each layer computes the following +function:

    +
    +\[\begin{array}{ll} \\ + i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{(t-1)} + b_{hg}) \\ + o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + c_t = f_t * c_{(t-1)} + i_t * g_t \\ + h_t = o_t * \tanh(c_t) \\ +\end{array} + +\]
    +

    where \(h_t\) is the hidden state at time t, \(c_t\) is the cell +state at time t, \(x_t\) is the input at time t, \(h_{(t-1)}\) +is the hidden state of the layer at time t-1 or the initial hidden +state at time 0, and \(i_t\), \(f_t\), \(g_t\), +\(o_t\) are the input, forget, cell, and output gates, respectively. +\(\sigma\) is the sigmoid function, and \(*\) is the Hadamard product.

    +

    In a multilayer LSTM, the input \(x^{(l)}_t\) of the \(l\) -th layer +(\(l >= 2\)) is the hidden state \(h^{(l-1)}_t\) of the previous layer multiplied by +dropout \(\delta^{(l-1)}_t\) where each \(\delta^{(l-1)}_t\) is a Bernoulli random +variable which is \(0\) with probability dropout.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • num_layers – Number of recurrent layers. E.g., setting num_layers=2 +would mean stacking two LSTMs together to form a stacked LSTM, +with the second LSTM taking in outputs of the first LSTM and +computing the final results. Default: 1

    • +
    • bias – If False, then the layer does not use bias weights b_ih and b_hh. +Default: True

    • +
    • batch_first – If True, then the input and output tensors are provided +as (batch, seq, feature). Default: False

    • +
    • dropout – If non-zero, introduces a Dropout layer on the outputs of each +LSTM layer except the last layer, with dropout probability equal to +dropout. Default: 0

    • +
    • bidirectional – If True, becomes a bidirectional LSTM. Default: False

    • +
    +
    +
    +
    +
    Inputs: input, (h_0, c_0)
      +
    • input of shape (seq_len, batch, input_size): tensor containing the features +of the input sequence. +The input can also be a packed variable length sequence. +See torch.nn.utils.rnn.pack_padded_sequence() or +torch.nn.utils.rnn.pack_sequence() for details.

    • +
    • h_0 of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the initial hidden state for each element in the batch. +If the LSTM is bidirectional, num_directions should be 2, else it should be 1.

    • +
    • c_0 of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the initial cell state for each element in the batch.

      +

      If (h_0, c_0) is not provided, both h_0 and c_0 default to zero.

      +
    • +
    +
    +
    Outputs: output, (h_n, c_n)
      +
    • output of shape (seq_len, batch, num_directions * hidden_size): tensor +containing the output features (h_t) from the last layer of the LSTM, +for each t. If a torch.nn.utils.rnn.PackedSequence has been +given as the input, the output will also be a packed sequence.

      +

      For the unpacked case, the directions can be separated +using output.view(seq_len, batch, num_directions, hidden_size), +with forward and backward being direction 0 and 1 respectively. +Similarly, the directions can be separated in the packed case.

      +
    • +
    • h_n of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the hidden state for t = seq_len.

      +

      Like output, the layers can be separated using +h_n.view(num_layers, num_directions, batch, hidden_size) and similarly for c_n.

      +
    • +
    • c_n of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the cell state for t = seq_len.

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~LSTM.weight_ih_l[k] – the learnable input-hidden weights of the \(\text{k}^{th}\) layer +(W_ii|W_if|W_ig|W_io), of shape (4*hidden_size, input_size) for k = 0. +Otherwise, the shape is (4*hidden_size, num_directions * hidden_size)

    • +
    • ~LSTM.weight_hh_l[k] – the learnable hidden-hidden weights of the \(\text{k}^{th}\) layer +(W_hi|W_hf|W_hg|W_ho), of shape (4*hidden_size, hidden_size)

    • +
    • ~LSTM.bias_ih_l[k] – the learnable input-hidden bias of the \(\text{k}^{th}\) layer +(b_ii|b_if|b_ig|b_io), of shape (4*hidden_size)

    • +
    • ~LSTM.bias_hh_l[k] – the learnable hidden-hidden bias of the \(\text{k}^{th}\) layer +(b_hi|b_hf|b_hg|b_ho), of shape (4*hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +
    +

    Note

    +

    If the following conditions are satisfied: +1) cudnn is enabled, +2) input data is on the GPU +3) input data has dtype torch.float16 +4) V100 GPU is used, +5) input data is not in PackedSequence format +persistent algorithm can be selected to improve performance.

    +
    +

    Examples:

    +
    >>> rnn = nn.LSTM(10, 20, 2)
    +>>> input = torch.randn(5, 3, 10)
    +>>> h0 = torch.randn(2, 3, 20)
    +>>> c0 = torch.randn(2, 3, 20)
    +>>> output, (hn, cn) = rnn(input, (h0, c0))
    +
    +
    +
    + +
    +
    +

    GRU

    +
    +
    +class torch.nn.GRU(*args, **kwargs)[source]
    +

    Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.

    +

    For each element in the input sequence, each layer computes the following +function:

    +
    +\[\begin{array}{ll} + r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} +\end{array} + +\]
    +

    where \(h_t\) is the hidden state at time t, \(x_t\) is the input +at time t, \(h_{(t-1)}\) is the hidden state of the layer +at time t-1 or the initial hidden state at time 0, and \(r_t\), +\(z_t\), \(n_t\) are the reset, update, and new gates, respectively. +\(\sigma\) is the sigmoid function, and \(*\) is the Hadamard product.

    +

    In a multilayer GRU, the input \(x^{(l)}_t\) of the \(l\) -th layer +(\(l >= 2\)) is the hidden state \(h^{(l-1)}_t\) of the previous layer multiplied by +dropout \(\delta^{(l-1)}_t\) where each \(\delta^{(l-1)}_t\) is a Bernoulli random +variable which is \(0\) with probability dropout.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • num_layers – Number of recurrent layers. E.g., setting num_layers=2 +would mean stacking two GRUs together to form a stacked GRU, +with the second GRU taking in outputs of the first GRU and +computing the final results. Default: 1

    • +
    • bias – If False, then the layer does not use bias weights b_ih and b_hh. +Default: True

    • +
    • batch_first – If True, then the input and output tensors are provided +as (batch, seq, feature). Default: False

    • +
    • dropout – If non-zero, introduces a Dropout layer on the outputs of each +GRU layer except the last layer, with dropout probability equal to +dropout. Default: 0

    • +
    • bidirectional – If True, becomes a bidirectional GRU. Default: False

    • +
    +
    +
    +
    +
    Inputs: input, h_0
      +
    • input of shape (seq_len, batch, input_size): tensor containing the features +of the input sequence. The input can also be a packed variable length +sequence. See torch.nn.utils.rnn.pack_padded_sequence() +for details.

    • +
    • h_0 of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the initial hidden state for each element in the batch. +Defaults to zero if not provided. If the RNN is bidirectional, +num_directions should be 2, else it should be 1.

    • +
    +
    +
    Outputs: output, h_n
      +
    • output of shape (seq_len, batch, num_directions * hidden_size): tensor +containing the output features h_t from the last layer of the GRU, +for each t. If a torch.nn.utils.rnn.PackedSequence has been +given as the input, the output will also be a packed sequence. +For the unpacked case, the directions can be separated +using output.view(seq_len, batch, num_directions, hidden_size), +with forward and backward being direction 0 and 1 respectively.

      +

      Similarly, the directions can be separated in the packed case.

      +
    • +
    • h_n of shape (num_layers * num_directions, batch, hidden_size): tensor +containing the hidden state for t = seq_len

      +

      Like output, the layers can be separated using +h_n.view(num_layers, num_directions, batch, hidden_size).

      +
    • +
    +
    +
    Shape:
      +
    • Input1: \((L, N, H_{in})\) tensor containing input features where +\(H_{in}=\text{input\_size}\) and L represents a sequence length.

    • +
    • Input2: \((S, N, H_{out})\) tensor +containing the initial hidden state for each element in the batch. +\(H_{out}=\text{hidden\_size}\) +Defaults to zero if not provided. where \(S=\text{num\_layers} * \text{num\_directions}\) +If the RNN is bidirectional, num_directions should be 2, else it should be 1.

    • +
    • Output1: \((L, N, H_{all})\) where \(H_{all}=\text{num\_directions} * \text{hidden\_size}\)

    • +
    • Output2: \((S, N, H_{out})\) tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~GRU.weight_ih_l[k] – the learnable input-hidden weights of the \(\text{k}^{th}\) layer +(W_ir|W_iz|W_in), of shape (3*hidden_size, input_size) for k = 0. +Otherwise, the shape is (3*hidden_size, num_directions * hidden_size)

    • +
    • ~GRU.weight_hh_l[k] – the learnable hidden-hidden weights of the \(\text{k}^{th}\) layer +(W_hr|W_hz|W_hn), of shape (3*hidden_size, hidden_size)

    • +
    • ~GRU.bias_ih_l[k] – the learnable input-hidden bias of the \(\text{k}^{th}\) layer +(b_ir|b_iz|b_in), of shape (3*hidden_size)

    • +
    • ~GRU.bias_hh_l[k] – the learnable hidden-hidden bias of the \(\text{k}^{th}\) layer +(b_hr|b_hz|b_hn), of shape (3*hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +
    +

    Note

    +

    If the following conditions are satisfied: +1) cudnn is enabled, +2) input data is on the GPU +3) input data has dtype torch.float16 +4) V100 GPU is used, +5) input data is not in PackedSequence format +persistent algorithm can be selected to improve performance.

    +
    +

    Examples:

    +
    >>> rnn = nn.GRU(10, 20, 2)
    +>>> input = torch.randn(5, 3, 10)
    +>>> h0 = torch.randn(2, 3, 20)
    +>>> output, hn = rnn(input, h0)
    +
    +
    +
    + +
    +
    +

    RNNCell

    +
    +
    +class torch.nn.RNNCell(input_size, hidden_size, bias=True, nonlinearity='tanh')[source]
    +

    An Elman RNN cell with tanh or ReLU non-linearity.

    +
    +\[h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})\]
    +

    If nonlinearity is ‘relu’, then ReLU is used in place of tanh.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • bias – If False, then the layer does not use bias weights b_ih and b_hh. +Default: True

    • +
    • nonlinearity – The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'

    • +
    +
    +
    +
    +
    Inputs: input, hidden
      +
    • input of shape (batch, input_size): tensor containing input features

    • +
    • hidden of shape (batch, hidden_size): tensor containing the initial hidden +state for each element in the batch. +Defaults to zero if not provided.

    • +
    +
    +
    Outputs: h’
      +
    • h’ of shape (batch, hidden_size): tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    Shape:
      +
    • Input1: \((N, H_{in})\) tensor containing input features where +\(H_{in}\) = input_size

    • +
    • Input2: \((N, H_{out})\) tensor containing the initial hidden +state for each element in the batch where \(H_{out}\) = hidden_size +Defaults to zero if not provided.

    • +
    • Output: \((N, H_{out})\) tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~RNNCell.weight_ih – the learnable input-hidden weights, of shape +(hidden_size, input_size)

    • +
    • ~RNNCell.weight_hh – the learnable hidden-hidden weights, of shape +(hidden_size, hidden_size)

    • +
    • ~RNNCell.bias_ih – the learnable input-hidden bias, of shape (hidden_size)

    • +
    • ~RNNCell.bias_hh – the learnable hidden-hidden bias, of shape (hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +

    Examples:

    +
    >>> rnn = nn.RNNCell(10, 20)
    +>>> input = torch.randn(6, 3, 10)
    +>>> hx = torch.randn(3, 20)
    +>>> output = []
    +>>> for i in range(6):
    +        hx = rnn(input[i], hx)
    +        output.append(hx)
    +
    +
    +
    + +
    +
    +

    LSTMCell

    +
    +
    +class torch.nn.LSTMCell(input_size, hidden_size, bias=True)[source]
    +

    A long short-term memory (LSTM) cell.

    +
    +\[\begin{array}{ll} +i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ +f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\ +g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\ +o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\ +c' = f * c + i * g \\ +h' = o * \tanh(c') \\ +\end{array}\]
    +

    where \(\sigma\) is the sigmoid function, and \(*\) is the Hadamard product.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • bias – If False, then the layer does not use bias weights b_ih and +b_hh. Default: True

    • +
    +
    +
    +
    +
    Inputs: input, (h_0, c_0)
      +
    • input of shape (batch, input_size): tensor containing input features

    • +
    • h_0 of shape (batch, hidden_size): tensor containing the initial hidden +state for each element in the batch.

    • +
    • c_0 of shape (batch, hidden_size): tensor containing the initial cell state +for each element in the batch.

      +

      If (h_0, c_0) is not provided, both h_0 and c_0 default to zero.

      +
    • +
    +
    +
    Outputs: (h_1, c_1)
      +
    • h_1 of shape (batch, hidden_size): tensor containing the next hidden state +for each element in the batch

    • +
    • c_1 of shape (batch, hidden_size): tensor containing the next cell state +for each element in the batch

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~LSTMCell.weight_ih – the learnable input-hidden weights, of shape +(4*hidden_size, input_size)

    • +
    • ~LSTMCell.weight_hh – the learnable hidden-hidden weights, of shape +(4*hidden_size, hidden_size)

    • +
    • ~LSTMCell.bias_ih – the learnable input-hidden bias, of shape (4*hidden_size)

    • +
    • ~LSTMCell.bias_hh – the learnable hidden-hidden bias, of shape (4*hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +

    Examples:

    +
    >>> rnn = nn.LSTMCell(10, 20)
    +>>> input = torch.randn(6, 3, 10)
    +>>> hx = torch.randn(3, 20)
    +>>> cx = torch.randn(3, 20)
    +>>> output = []
    +>>> for i in range(6):
    +        hx, cx = rnn(input[i], (hx, cx))
    +        output.append(hx)
    +
    +
    +
    + +
    +
    +

    GRUCell

    +
    +
    +class torch.nn.GRUCell(input_size, hidden_size, bias=True)[source]
    +

    A gated recurrent unit (GRU) cell

    +
    +\[\begin{array}{ll} +r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\ +z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\ +n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\ +h' = (1 - z) * n + z * h +\end{array}\]
    +

    where \(\sigma\) is the sigmoid function, and \(*\) is the Hadamard product.

    +
    +
    Parameters
    +
      +
    • input_size – The number of expected features in the input x

    • +
    • hidden_size – The number of features in the hidden state h

    • +
    • bias – If False, then the layer does not use bias weights b_ih and +b_hh. Default: True

    • +
    +
    +
    +
    +
    Inputs: input, hidden
      +
    • input of shape (batch, input_size): tensor containing input features

    • +
    • hidden of shape (batch, hidden_size): tensor containing the initial hidden +state for each element in the batch. +Defaults to zero if not provided.

    • +
    +
    +
    Outputs: h’
      +
    • h’ of shape (batch, hidden_size): tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    Shape:
      +
    • Input1: \((N, H_{in})\) tensor containing input features where +\(H_{in}\) = input_size

    • +
    • Input2: \((N, H_{out})\) tensor containing the initial hidden +state for each element in the batch where \(H_{out}\) = hidden_size +Defaults to zero if not provided.

    • +
    • Output: \((N, H_{out})\) tensor containing the next hidden state +for each element in the batch

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~GRUCell.weight_ih – the learnable input-hidden weights, of shape +(3*hidden_size, input_size)

    • +
    • ~GRUCell.weight_hh – the learnable hidden-hidden weights, of shape +(3*hidden_size, hidden_size)

    • +
    • ~GRUCell.bias_ih – the learnable input-hidden bias, of shape (3*hidden_size)

    • +
    • ~GRUCell.bias_hh – the learnable hidden-hidden bias, of shape (3*hidden_size)

    • +
    +
    +
    +
    +

    Note

    +

    All the weights and biases are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) +where \(k = \frac{1}{\text{hidden\_size}}\)

    +
    +

    Examples:

    +
    >>> rnn = nn.GRUCell(10, 20)
    +>>> input = torch.randn(6, 3, 10)
    +>>> hx = torch.randn(3, 20)
    +>>> output = []
    +>>> for i in range(6):
    +        hx = rnn(input[i], hx)
    +        output.append(hx)
    +
    +
    +
    + +
    +
    +
    +

    Transformer layers

    +
    +

    Transformer

    +
    +
    +class torch.nn.Transformer(d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, custom_encoder=None, custom_decoder=None)[source]
    +

    A transformer model. User is able to modify the attributes as needed. The architechture +is based on the paper “Attention Is All You Need”. Ashish Vaswani, Noam Shazeer, +Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and +Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information +Processing Systems, pages 6000-6010.

    +
    +
    Parameters
    +
      +
    • d_model – the number of expected features in the encoder/decoder inputs (default=512).

    • +
    • nhead – the number of heads in the multiheadattention models (default=8).

    • +
    • num_encoder_layers – the number of sub-encoder-layers in the encoder (default=6).

    • +
    • num_decoder_layers – the number of sub-decoder-layers in the decoder (default=6).

    • +
    • dim_feedforward – the dimension of the feedforward network model (default=2048).

    • +
    • dropout – the dropout value (default=0.1).

    • +
    • custom_encoder – custom encoder (default=None).

    • +
    • custom_decoder – custom decoder (default=None).

    • +
    +
    +
    +
    +
    Examples::
    >>> transformer_model = nn.Transformer(src_vocab, tgt_vocab)
    +>>> transformer_model = nn.Transformer(src_vocab, tgt_vocab, nhead=16, num_encoder_layers=12)
    +
    +
    +
    +
    +
    +
    +forward(src, tgt, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)[source]
    +

    Take in and process masked source/target sequences.

    +
    +
    Parameters
    +
      +
    • src – the sequence to the encoder (required).

    • +
    • tgt – the sequence to the decoder (required).

    • +
    • src_mask – the additive mask for the src sequence (optional).

    • +
    • tgt_mask – the additive mask for the tgt sequence (optional).

    • +
    • memory_mask – the additive mask for the encoder output (optional).

    • +
    • src_key_padding_mask – the ByteTensor mask for src keys per batch (optional).

    • +
    • tgt_key_padding_mask – the ByteTensor mask for tgt keys per batch (optional).

    • +
    • memory_key_padding_mask – the ByteTensor mask for memory keys per batch (optional).

    • +
    +
    +
    +
    +
    Shape:
      +
    • src: \((S, N, E)\).

    • +
    • tgt: \((T, N, E)\).

    • +
    • src_mask: \((S, S)\).

    • +
    • tgt_mask: \((T, T)\).

    • +
    • memory_mask: \((T, S)\).

    • +
    • src_key_padding_mask: \((N, S)\).

    • +
    • tgt_key_padding_mask: \((N, T)\).

    • +
    • memory_key_padding_mask: \((N, S)\).

    • +
    +

    Note: [src/tgt/memory]_mask should be filled with +float(‘-inf’) for the masked positions and float(0.0) else. These masks +ensure that predictions for position i depend only on the unmasked positions +j and are applied identically for each sequence in a batch. +[src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions +that should be masked with float(‘-inf’) and False values will be unchanged. +This mask ensures that no information will be taken from position i if +it is masked, and has a separate mask for each sequence in a batch.

    +
      +
    • output: \((T, N, E)\).

    • +
    +

    Note: Due to the multi-head attention architecture in the transformer model, +the output sequence length of a transformer is same as the input sequence +(i.e. target) length of the decode.

    +

    where S is the source sequence length, T is the target sequence length, N is the +batch size, E is the feature number

    +
    +
    +

    Examples

    +
    >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
    +
    +
    +
    + +
    +
    +generate_square_subsequent_mask(sz)[source]
    +

    Generate a square mask for the sequence. The masked positions are filled with float(‘-inf’). +Unmasked positions are filled with float(0.0).

    +
    + +
    + +
    +
    +

    TransformerEncoder

    +
    +
    +class torch.nn.TransformerEncoder(encoder_layer, num_layers, norm=None)[source]
    +

    TransformerEncoder is a stack of N encoder layers

    +
    +
    Parameters
    +
      +
    • encoder_layer – an instance of the TransformerEncoderLayer() class (required).

    • +
    • num_layers – the number of sub-encoder-layers in the encoder (required).

    • +
    • norm – the layer normalization component (optional).

    • +
    +
    +
    +
    +
    Examples::
    >>> encoder_layer = nn.TransformerEncoderLayer(d_model, nhead)
    +>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers)
    +
    +
    +
    +
    +
    +
    +forward(src, mask=None, src_key_padding_mask=None)[source]
    +

    Pass the input through the endocder layers in turn.

    +
    +
    Parameters
    +
      +
    • src – the sequnce to the encoder (required).

    • +
    • mask – the mask for the src sequence (optional).

    • +
    • src_key_padding_mask – the mask for the src keys per batch (optional).

    • +
    +
    +
    +
    +
    Shape:

    see the docs in Transformer class.

    +
    +
    +
    + +
    + +
    +
    +

    TransformerDecoder

    +
    +
    +class torch.nn.TransformerDecoder(decoder_layer, num_layers, norm=None)[source]
    +

    TransformerDecoder is a stack of N decoder layers

    +
    +
    Parameters
    +
      +
    • decoder_layer – an instance of the TransformerDecoderLayer() class (required).

    • +
    • num_layers – the number of sub-decoder-layers in the decoder (required).

    • +
    • norm – the layer normalization component (optional).

    • +
    +
    +
    +
    +
    Examples::
    >>> decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
    +>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers)
    +
    +
    +
    +
    +
    +
    +forward(tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)[source]
    +

    Pass the inputs (and mask) through the decoder layer in turn.

    +
    +
    Parameters
    +
      +
    • tgt – the sequence to the decoder (required).

    • +
    • memory – the sequnce from the last layer of the encoder (required).

    • +
    • tgt_mask – the mask for the tgt sequence (optional).

    • +
    • memory_mask – the mask for the memory sequence (optional).

    • +
    • tgt_key_padding_mask – the mask for the tgt keys per batch (optional).

    • +
    • memory_key_padding_mask – the mask for the memory keys per batch (optional).

    • +
    +
    +
    +
    +
    Shape:

    see the docs in Transformer class.

    +
    +
    +
    + +
    + +
    +
    +

    TransformerEncoderLayer

    +
    +
    +class torch.nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward=2048, dropout=0.1)[source]
    +

    TransformerEncoderLayer is made up of self-attn and feedforward network. +This standard encoder layer is based on the paper “Attention Is All You Need”. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, +Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in +Neural Information Processing Systems, pages 6000-6010. Users may modify or implement +in a different way during application.

    +
    +
    Parameters
    +
      +
    • d_model – the number of expected features in the input (required).

    • +
    • nhead – the number of heads in the multiheadattention models (required).

    • +
    • dim_feedforward – the dimension of the feedforward network model (default=2048).

    • +
    • dropout – the dropout value (default=0.1).

    • +
    +
    +
    +
    +
    Examples::
    >>> encoder_layer = nn.TransformerEncoderLayer(d_model, nhead)
    +
    +
    +
    +
    +
    +
    +forward(src, src_mask=None, src_key_padding_mask=None)[source]
    +

    Pass the input through the endocder layer.

    +
    +
    Parameters
    +
      +
    • src – the sequnce to the encoder layer (required).

    • +
    • src_mask – the mask for the src sequence (optional).

    • +
    • src_key_padding_mask – the mask for the src keys per batch (optional).

    • +
    +
    +
    +
    +
    Shape:

    see the docs in Transformer class.

    +
    +
    +
    + +
    + +
    +
    +

    TransformerDecoderLayer

    +
    +
    +class torch.nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward=2048, dropout=0.1)[source]
    +

    TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. +This standard decoder layer is based on the paper “Attention Is All You Need”. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, +Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in +Neural Information Processing Systems, pages 6000-6010. Users may modify or implement +in a different way during application.

    +
    +
    Parameters
    +
      +
    • d_model – the number of expected features in the input (required).

    • +
    • nhead – the number of heads in the multiheadattention models (required).

    • +
    • dim_feedforward – the dimension of the feedforward network model (default=2048).

    • +
    • dropout – the dropout value (default=0.1).

    • +
    +
    +
    +
    +
    Examples::
    >>> decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
    +
    +
    +
    +
    +
    +
    +forward(tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)[source]
    +

    Pass the inputs (and mask) through the decoder layer.

    +
    +
    Parameters
    +
      +
    • tgt – the sequence to the decoder layer (required).

    • +
    • memory – the sequnce from the last layer of the encoder (required).

    • +
    • tgt_mask – the mask for the tgt sequence (optional).

    • +
    • memory_mask – the mask for the memory sequence (optional).

    • +
    • tgt_key_padding_mask – the mask for the tgt keys per batch (optional).

    • +
    • memory_key_padding_mask – the mask for the memory keys per batch (optional).

    • +
    +
    +
    +
    +
    Shape:

    see the docs in Transformer class.

    +
    +
    +
    + +
    + +
    +
    +
    +

    Linear layers

    +
    +

    Identity

    +
    +
    +class torch.nn.Identity(*args, **kwargs)[source]
    +

    A placeholder identity operator that is argument-insensitive.

    +
    +
    Parameters
    +
      +
    • args – any argument (unused)

    • +
    • kwargs – any keyword argument (unused)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
    +>>> input = torch.randn(128, 20)
    +>>> output = m(input)
    +>>> print(output.size())
    +torch.Size([128, 20])
    +
    +
    +
    + +
    +
    +

    Linear

    +
    +
    +class torch.nn.Linear(in_features, out_features, bias=True)[source]
    +

    Applies a linear transformation to the incoming data: \(y = xA^T + b\)

    +
    +
    Parameters
    +
      +
    • in_features – size of each input sample

    • +
    • out_features – size of each output sample

    • +
    • bias – If set to False, the layer will not learn an additive bias. +Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *, H_{in})\) where \(*\) means any number of +additional dimensions and \(H_{in} = \text{in\_features}\)

    • +
    • Output: \((N, *, H_{out})\) where all but the last dimension +are the same shape as the input and \(H_{out} = \text{out\_features}\).

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~Linear.weight – the learnable weights of the module of shape +\((\text{out\_features}, \text{in\_features})\). The values are +initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\), where +\(k = \frac{1}{\text{in\_features}}\)

    • +
    • ~Linear.bias – the learnable bias of the module of shape \((\text{out\_features})\). +If bias is True, the values are initialized from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\) where +\(k = \frac{1}{\text{in\_features}}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Linear(20, 30)
    +>>> input = torch.randn(128, 20)
    +>>> output = m(input)
    +>>> print(output.size())
    +torch.Size([128, 30])
    +
    +
    +
    + +
    +
    +

    Bilinear

    +
    +
    +class torch.nn.Bilinear(in1_features, in2_features, out_features, bias=True)[source]
    +

    Applies a bilinear transformation to the incoming data: +\(y = x_1 A x_2 + b\)

    +
    +
    Parameters
    +
      +
    • in1_features – size of each first input sample

    • +
    • in2_features – size of each second input sample

    • +
    • out_features – size of each output sample

    • +
    • bias – If set to False, the layer will not learn an additive bias. +Default: True

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input1: \((N, *, H_{in1})\) where \(H_{in1}=\text{in1\_features}\) and +\(*\) means any number of additional dimensions. All but the last dimension +of the inputs should be the same.

    • +
    • Input2: \((N, *, H_{in2})\) where \(H_{in2}=\text{in2\_features}\).

    • +
    • Output: \((N, *, H_{out})\) where \(H_{out}=\text{out\_features}\) +and all but the last dimension are the same shape as the input.

    • +
    +
    +
    +
    +
    Variables
    +
      +
    • ~Bilinear.weight – the learnable weights of the module of shape +\((\text{out\_features}, \text{in1\_features}, \text{in2\_features})\). +The values are initialized from \(\mathcal{U}(-\sqrt{k}, \sqrt{k})\), where +\(k = \frac{1}{\text{in1\_features}}\)

    • +
    • ~Bilinear.bias – the learnable bias of the module of shape \((\text{out\_features})\). +If bias is True, the values are initialized from +\(\mathcal{U}(-\sqrt{k}, \sqrt{k})\), where +\(k = \frac{1}{\text{in1\_features}}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Bilinear(20, 30, 40)
    +>>> input1 = torch.randn(128, 20)
    +>>> input2 = torch.randn(128, 30)
    +>>> output = m(input1, input2)
    +>>> print(output.size())
    +torch.Size([128, 40])
    +
    +
    +
    + +
    +
    +
    +

    Dropout layers

    +
    +

    Dropout

    +
    +
    +class torch.nn.Dropout(p=0.5, inplace=False)[source]
    +

    During training, randomly zeroes some of the elements of the input +tensor with probability p using samples from a Bernoulli +distribution. Each channel will be zeroed out independently on every forward +call.

    +

    This has proven to be an effective technique for regularization and +preventing the co-adaptation of neurons as described in the paper +Improving neural networks by preventing co-adaptation of feature +detectors .

    +

    Furthermore, the outputs are scaled by a factor of \(\frac{1}{1-p}\) during +training. This means that during evaluation the module simply computes an +identity function.

    +
    +
    Parameters
    +
      +
    • p – probability of an element to be zeroed. Default: 0.5

    • +
    • inplace – If set to True, will do this operation in-place. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((*)\). Input can be of any shape

    • +
    • Output: \((*)\). Output is of the same shape as input

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Dropout(p=0.2)
    +>>> input = torch.randn(20, 16)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Dropout2d

    +
    +
    +class torch.nn.Dropout2d(p=0.5, inplace=False)[source]
    +

    Randomly zero out entire channels (a channel is a 2D feature map, +e.g., the \(j\)-th channel of the \(i\)-th sample in the +batched input is a 2D tensor \(\text{input}[i, j]\)). +Each channel will be zeroed out independently on every forward call with +probability p using samples from a Bernoulli distribution.

    +

    Usually the input comes from nn.Conv2d modules.

    +

    As described in the paper +Efficient Object Localization Using Convolutional Networks , +if adjacent pixels within feature maps are strongly correlated +(as is normally the case in early convolution layers) then i.i.d. dropout +will not regularize the activations and will otherwise just result +in an effective learning rate decrease.

    +

    In this case, nn.Dropout2d() will help promote independence between +feature maps and should be used instead.

    +
    +
    Parameters
    +
      +
    • p (float, optional) – probability of an element to be zero-ed.

    • +
    • inplace (bool, optional) – If set to True, will do this operation +in-place

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H, W)\)

    • +
    • Output: \((N, C, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Dropout2d(p=0.2)
    +>>> input = torch.randn(20, 16, 32, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    Dropout3d

    +
    +
    +class torch.nn.Dropout3d(p=0.5, inplace=False)[source]
    +

    Randomly zero out entire channels (a channel is a 3D feature map, +e.g., the \(j\)-th channel of the \(i\)-th sample in the +batched input is a 3D tensor \(\text{input}[i, j]\)). +Each channel will be zeroed out independently on every forward call with +probability p using samples from a Bernoulli distribution.

    +

    Usually the input comes from nn.Conv3d modules.

    +

    As described in the paper +Efficient Object Localization Using Convolutional Networks , +if adjacent pixels within feature maps are strongly correlated +(as is normally the case in early convolution layers) then i.i.d. dropout +will not regularize the activations and will otherwise just result +in an effective learning rate decrease.

    +

    In this case, nn.Dropout3d() will help promote independence between +feature maps and should be used instead.

    +
    +
    Parameters
    +
      +
    • p (float, optional) – probability of an element to be zeroed.

    • +
    • inplace (bool, optional) – If set to True, will do this operation +in-place

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, D, H, W)\)

    • +
    • Output: \((N, C, D, H, W)\) (same shape as input)

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Dropout3d(p=0.2)
    +>>> input = torch.randn(20, 16, 4, 32, 32)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +

    AlphaDropout

    +
    +
    +class torch.nn.AlphaDropout(p=0.5, inplace=False)[source]
    +

    Applies Alpha Dropout over the input.

    +

    Alpha Dropout is a type of Dropout that maintains the self-normalizing +property. +For an input with zero mean and unit standard deviation, the output of +Alpha Dropout maintains the original mean and standard deviation of the +input. +Alpha Dropout goes hand-in-hand with SELU activation function, which ensures +that the outputs have zero mean and unit standard deviation.

    +

    During training, it randomly masks some of the elements of the input +tensor with probability p using samples from a bernoulli distribution. +The elements to masked are randomized on every forward call, and scaled +and shifted to maintain zero mean and unit standard deviation.

    +

    During evaluation the module simply computes an identity function.

    +

    More details can be found in the paper Self-Normalizing Neural Networks .

    +
    +
    Parameters
    +
      +
    • p (float) – probability of an element to be dropped. Default: 0.5

    • +
    • inplace (bool, optional) – If set to True, will do this operation +in-place

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((*)\). Input can be of any shape

    • +
    • Output: \((*)\). Output is of the same shape as input

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.AlphaDropout(p=0.2)
    +>>> input = torch.randn(20, 16)
    +>>> output = m(input)
    +
    +
    +
    + +
    +
    +
    +

    Sparse layers

    +
    +

    Embedding

    +
    +
    +class torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None)[source]
    +

    A simple lookup table that stores embeddings of a fixed dictionary and size.

    +

    This module is often used to store word embeddings and retrieve them using indices. +The input to the module is a list of indices, and the output is the corresponding +word embeddings.

    +
    +
    Parameters
    +
      +
    • num_embeddings (int) – size of the dictionary of embeddings

    • +
    • embedding_dim (int) – the size of each embedding vector

    • +
    • padding_idx (int, optional) – If given, pads the output with the embedding vector at padding_idx +(initialized to zeros) whenever it encounters the index.

    • +
    • max_norm (float, optional) – If given, each embedding vector with norm larger than max_norm +is renormalized to have norm max_norm.

    • +
    • norm_type (float, optional) – The p of the p-norm to compute for the max_norm option. Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – If given, this will scale gradients by the inverse of frequency of +the words in the mini-batch. Default False.

    • +
    • sparse (bool, optional) – If True, gradient w.r.t. weight matrix will be a sparse tensor. +See Notes for more details regarding sparse gradients.

    • +
    +
    +
    Variables
    +

    ~Embedding.weight (Tensor) – the learnable weights of the module of shape (num_embeddings, embedding_dim) +initialized from \(\mathcal{N}(0, 1)\)

    +
    +
    +
    +
    Shape:
      +
    • Input: \((*)\), LongTensor of arbitrary shape containing the indices to extract

    • +
    • Output: \((*, H)\), where * is the input shape and \(H=\text{embedding\_dim}\)

    • +
    +
    +
    +
    +

    Note

    +

    Keep in mind that only a limited number of optimizers support +sparse gradients: currently it’s optim.SGD (CUDA and CPU), +optim.SparseAdam (CUDA and CPU) and optim.Adagrad (CPU)

    +
    +
    +

    Note

    +

    With padding_idx set, the embedding vector at +padding_idx is initialized to all zeros. However, note that this +vector can be modified afterwards, e.g., using a customized +initialization method, and thus changing the vector used to pad the +output. The gradient for this vector from Embedding +is always zero.

    +
    +

    Examples:

    +
    >>> # an Embedding module containing 10 tensors of size 3
    +>>> embedding = nn.Embedding(10, 3)
    +>>> # a batch of 2 samples of 4 indices each
    +>>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])
    +>>> embedding(input)
    +tensor([[[-0.0251, -1.6902,  0.7172],
    +         [-0.6431,  0.0748,  0.6969],
    +         [ 1.4970,  1.3448, -0.9685],
    +         [-0.3677, -2.7265, -0.1685]],
    +
    +        [[ 1.4970,  1.3448, -0.9685],
    +         [ 0.4362, -0.4004,  0.9400],
    +         [-0.6431,  0.0748,  0.6969],
    +         [ 0.9124, -2.3616,  1.1151]]])
    +
    +
    +>>> # example with padding_idx
    +>>> embedding = nn.Embedding(10, 3, padding_idx=0)
    +>>> input = torch.LongTensor([[0,2,0,5]])
    +>>> embedding(input)
    +tensor([[[ 0.0000,  0.0000,  0.0000],
    +         [ 0.1535, -2.0309,  0.9315],
    +         [ 0.0000,  0.0000,  0.0000],
    +         [-0.1655,  0.9897,  0.0635]]])
    +
    +
    +
    +
    +classmethod from_pretrained(embeddings, freeze=True, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False)[source]
    +

    Creates Embedding instance from given 2-dimensional FloatTensor.

    +
    +
    Parameters
    +
      +
    • embeddings (Tensor) – FloatTensor containing weights for the Embedding. +First dimension is being passed to Embedding as num_embeddings, second as embedding_dim.

    • +
    • freeze (boolean, optional) – If True, the tensor does not get updated in the learning process. +Equivalent to embedding.weight.requires_grad = False. Default: True

    • +
    • padding_idx (int, optional) – See module initialization documentation.

    • +
    • max_norm (float, optional) – See module initialization documentation.

    • +
    • norm_type (float, optional) – See module initialization documentation. Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – See module initialization documentation. Default False.

    • +
    • sparse (bool, optional) – See module initialization documentation.

    • +
    +
    +
    +

    Examples:

    +
    >>> # FloatTensor containing pretrained weights
    +>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
    +>>> embedding = nn.Embedding.from_pretrained(weight)
    +>>> # Get embeddings for index 1
    +>>> input = torch.LongTensor([1])
    +>>> embedding(input)
    +tensor([[ 4.0000,  5.1000,  6.3000]])
    +
    +
    +
    + +
    + +
    +
    +

    EmbeddingBag

    +
    +
    +class torch.nn.EmbeddingBag(num_embeddings, embedding_dim, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, mode='mean', sparse=False, _weight=None)[source]
    +

    Computes sums or means of ‘bags’ of embeddings, without instantiating the +intermediate embeddings.

    +

    For bags of constant length and no per_sample_weights, this class

    +
    +
      +
    • with mode="sum" is equivalent to Embedding followed by torch.sum(dim=0),

    • +
    • with mode="mean" is equivalent to Embedding followed by torch.mean(dim=0),

    • +
    • with mode="max" is equivalent to Embedding followed by torch.max(dim=0).

    • +
    +
    +

    However, EmbeddingBag is much more time and memory efficient than using a chain of these +operations.

    +

    EmbeddingBag also supports per-sample weights as an argument to the forward +pass. This scales the output of the Embedding before performing a weighted +reduction as specified by mode. If per_sample_weights` is passed, the +only supported mode is "sum", which computes a weighted sum according to +per_sample_weights.

    +
    +
    Parameters
    +
      +
    • num_embeddings (int) – size of the dictionary of embeddings

    • +
    • embedding_dim (int) – the size of each embedding vector

    • +
    • max_norm (float, optional) – If given, each embedding vector with norm larger than max_norm +is renormalized to have norm max_norm.

    • +
    • norm_type (float, optional) – The p of the p-norm to compute for the max_norm option. Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – if given, this will scale gradients by the inverse of frequency of +the words in the mini-batch. Default False. +Note: this option is not supported when mode="max".

    • +
    • mode (string, optional) – "sum", "mean" or "max". Specifies the way to reduce the bag. +"sum" computes the weighted sum, taking per_sample_weights +into consideration. "mean" computes the average of the values +in the bag, "max" computes the max value over each bag. +Default: "mean"

    • +
    • sparse (bool, optional) – if True, gradient w.r.t. weight matrix will be a sparse tensor. See +Notes for more details regarding sparse gradients. Note: this option is not +supported when mode="max".

    • +
    +
    +
    Variables
    +

    ~EmbeddingBag.weight (Tensor) – the learnable weights of the module of shape (num_embeddings, embedding_dim) +initialized from \(\mathcal{N}(0, 1)\).

    +
    +
    +
    +
    Inputs: input (LongTensor), offsets (LongTensor, optional), and

    per_index_weights (Tensor, optional)

    +
      +
    • If input is 2D of shape (B, N),

      +

      it will be treated as B bags (sequences) each of fixed length N, and +this will return B values aggregated in a way depending on the mode. +offsets is ignored and required to be None in this case.

      +
    • +
    • If input is 1D of shape (N),

      +

      it will be treated as a concatenation of multiple bags (sequences). +offsets is required to be a 1D tensor containing the +starting index positions of each bag in input. Therefore, +for offsets of shape (B), input will be viewed as +having B bags. Empty bags (i.e., having 0-length) will have +returned vectors filled by zeros.

      +
    • +
    +
    +
    per_sample_weights (Tensor, optional): a tensor of float / double weights, or None

    to indicate all weights should be taken to be 1. If specified, per_sample_weights +must have exactly the same shape as input and is treated as having the same +offsets, if those are not None. Only supported for mode='sum'.

    +
    +
    +
    +
    +

    Output shape: (B, embedding_dim)

    +

    Examples:

    +
    >>> # an Embedding module containing 10 tensors of size 3
    +>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
    +>>> # a batch of 2 samples of 4 indices each
    +>>> input = torch.LongTensor([1,2,4,5,4,3,2,9])
    +>>> offsets = torch.LongTensor([0,4])
    +>>> embedding_sum(input, offsets)
    +tensor([[-0.8861, -5.4350, -0.0523],
    +        [ 1.1306, -2.5798, -1.0044]])
    +
    +
    +
    +
    +classmethod from_pretrained(embeddings, freeze=True, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, mode='mean', sparse=False)[source]
    +

    Creates EmbeddingBag instance from given 2-dimensional FloatTensor.

    +
    +
    Parameters
    +
      +
    • embeddings (Tensor) – FloatTensor containing weights for the EmbeddingBag. +First dimension is being passed to EmbeddingBag as ‘num_embeddings’, second as ‘embedding_dim’.

    • +
    • freeze (boolean, optional) – If True, the tensor does not get updated in the learning process. +Equivalent to embeddingbag.weight.requires_grad = False. Default: True

    • +
    • max_norm (float, optional) – See module initialization documentation. Default: None

    • +
    • norm_type (float, optional) – See module initialization documentation. Default 2.

    • +
    • scale_grad_by_freq (boolean, optional) – See module initialization documentation. Default False.

    • +
    • mode (string, optional) – See module initialization documentation. Default: "mean"

    • +
    • sparse (bool, optional) – See module initialization documentation. Default: False.

    • +
    +
    +
    +

    Examples:

    +
    >>> # FloatTensor containing pretrained weights
    +>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
    +>>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
    +>>> # Get embeddings for index 1
    +>>> input = torch.LongTensor([[1, 0]])
    +>>> embeddingbag(input)
    +tensor([[ 2.5000,  3.7000,  4.6500]])
    +
    +
    +
    + +
    + +
    +
    +
    +

    Distance functions

    +
    +

    CosineSimilarity

    +
    +
    +class torch.nn.CosineSimilarity(dim=1, eps=1e-08)[source]
    +

    Returns cosine similarity between \(x_1\) and \(x_2\), computed along dim.

    +
    +\[\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}. + +\]
    +
    +
    Parameters
    +
      +
    • dim (int, optional) – Dimension where cosine similarity is computed. Default: 1

    • +
    • eps (float, optional) – Small value to avoid division by zero. +Default: 1e-8

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input1: \((\ast_1, D, \ast_2)\) where D is at position dim

    • +
    • Input2: \((\ast_1, D, \ast_2)\), same shape as the Input1

    • +
    • Output: \((\ast_1, \ast_2)\)

    • +
    +
    +
    Examples::
    >>> input1 = torch.randn(100, 128)
    +>>> input2 = torch.randn(100, 128)
    +>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
    +>>> output = cos(input1, input2)
    +
    +
    +
    +
    +
    + +
    +
    +

    PairwiseDistance

    +
    +
    +class torch.nn.PairwiseDistance(p=2.0, eps=1e-06, keepdim=False)[source]
    +

    Computes the batchwise pairwise distance between vectors \(v_1\), \(v_2\) using the p-norm:

    +
    +\[\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}. + +\]
    +
    +
    Parameters
    +
      +
    • p (real) – the norm degree. Default: 2

    • +
    • eps (float, optional) – Small value to avoid division by zero. +Default: 1e-6

    • +
    • keepdim (bool, optional) – Determines whether or not to keep the vector dimension. +Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input1: \((N, D)\) where D = vector dimension

    • +
    • Input2: \((N, D)\), same shape as the Input1

    • +
    • Output: \((N)\). If keepdim is True, then \((N, 1)\).

    • +
    +
    +
    Examples::
    >>> pdist = nn.PairwiseDistance(p=2)
    +>>> input1 = torch.randn(100, 128)
    +>>> input2 = torch.randn(100, 128)
    +>>> output = pdist(input1, input2)
    +
    +
    +
    +
    +
    + +
    +
    +
    +

    Loss functions

    +
    +

    L1Loss

    +
    +
    +class torch.nn.L1Loss(size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the mean absolute error (MAE) between each element in +the input \(x\) and target \(y\).

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad +l_n = \left| x_n - y_n \right|, + +\]
    +

    where \(N\) is the batch size. If reduction is not 'none' +(default 'mean'), then:

    +
    +\[\ell(x, y) = +\begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    \(x\) and \(y\) are tensors of arbitrary shapes with a total +of \(n\) elements each.

    +

    The sum operation still operates over all the elements, and divides by \(n\).

    +

    The division by \(n\) can be avoided if one sets reduction = 'sum'.

    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then +\((N, *)\), same shape as the input

    • +
    +
    +
    +

    Examples:

    +
    >>> loss = nn.L1Loss()
    +>>> input = torch.randn(3, 5, requires_grad=True)
    +>>> target = torch.randn(3, 5)
    +>>> output = loss(input, target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    MSELoss

    +
    +
    +class torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the mean squared error (squared L2 norm) between +each element in the input \(x\) and target \(y\).

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad +l_n = \left( x_n - y_n \right)^2, + +\]
    +

    where \(N\) is the batch size. If reduction is not 'none' +(default 'mean'), then:

    +
    +\[\ell(x, y) = +\begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    \(x\) and \(y\) are tensors of arbitrary shapes with a total +of \(n\) elements each.

    +

    The sum operation still operates over all the elements, and divides by \(n\).

    +

    The division by \(n\) can be avoided if one sets reduction = 'sum'.

    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    +
    +
    +

    Examples:

    +
    >>> loss = nn.MSELoss()
    +>>> input = torch.randn(3, 5, requires_grad=True)
    +>>> target = torch.randn(3, 5)
    +>>> output = loss(input, target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    CrossEntropyLoss

    +
    +
    +class torch.nn.CrossEntropyLoss(weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')[source]
    +

    This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.

    +

    It is useful when training a classification problem with C classes. +If provided, the optional argument weight should be a 1D Tensor +assigning weight to each of the classes. +This is particularly useful when you have an unbalanced training set.

    +

    The input is expected to contain raw, unnormalized scores for each class.

    +

    input has to be a Tensor of size either \((minibatch, C)\) or +\((minibatch, C, d_1, d_2, ..., d_K)\) +with \(K \geq 1\) for the K-dimensional case (described later).

    +

    This criterion expects a class index in the range \([0, C-1]\) as the +target for each value of a 1D tensor of size minibatch; if ignore_index +is specified, this criterion also accepts this class index (this index may not +necessarily be in the class range).

    +

    The loss can be described as:

    +
    +\[\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right) + = -x[class] + \log\left(\sum_j \exp(x[j])\right) + +\]
    +

    or in the case of the weight argument being specified:

    +
    +\[\text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right) + +\]
    +

    The losses are averaged across observations for each minibatch.

    +

    Can also be used for higher dimension inputs, such as 2D images, by providing +an input of size \((minibatch, C, d_1, d_2, ..., d_K)\) with \(K \geq 1\), +where \(K\) is the number of dimensions, and a target of appropriate shape +(see below).

    +
    +
    Parameters
    +
      +
    • weight (Tensor, optional) – a manual rescaling weight given to each class. +If given, has to be a Tensor of size C

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • ignore_index (int, optional) – Specifies a target value that is ignored +and does not contribute to the input gradient. When size_average is +True, the loss is averaged over non-ignored targets.

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C)\) where C = number of classes, or +\((N, C, d_1, d_2, ..., d_K)\) with \(K \geq 1\) +in the case of K-dimensional loss.

    • +
    • Target: \((N)\) where each value is \(0 \leq \text{targets}[i] \leq C-1\), or +\((N, d_1, d_2, ..., d_K)\) with \(K \geq 1\) in the case of +K-dimensional loss.

    • +
    • Output: scalar. +If reduction is 'none', then the same size as the target: +\((N)\), or +\((N, d_1, d_2, ..., d_K)\) with \(K \geq 1\) in the case +of K-dimensional loss.

    • +
    +
    +
    +

    Examples:

    +
    >>> loss = nn.CrossEntropyLoss()
    +>>> input = torch.randn(3, 5, requires_grad=True)
    +>>> target = torch.empty(3, dtype=torch.long).random_(5)
    +>>> output = loss(input, target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    CTCLoss

    +
    +
    +class torch.nn.CTCLoss(blank=0, reduction='mean', zero_infinity=False)[source]
    +

    The Connectionist Temporal Classification loss.

    +

    Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the +probability of possible alignments of input to target, producing a loss value which is differentiable +with respect to each input node. The alignment of input to target is assumed to be “many-to-one”, which +limits the length of the target sequence such that it must be \(\leq\) the input length.

    +
    +
    Parameters
    +
      +
    • blank (int, optional) – blank label. Default \(0\).

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the output losses will be divided by the target lengths and +then the mean over the batch is taken. Default: 'mean'

    • +
    • zero_infinity (bool, optional) – Whether to zero infinite losses and the associated gradients. +Default: False +Infinite losses mainly occur when the inputs are too short +to be aligned to the targets.

    • +
    +
    +
    +
    +
    Shape:
      +
    • Log_probs: Tensor of size \((T, N, C)\), +where \(T = \text{input length}\), +\(N = \text{batch size}\), and +\(C = \text{number of classes (including blank)}\). +The logarithmized probabilities of the outputs (e.g. obtained with +torch.nn.functional.log_softmax()).

    • +
    • Targets: Tensor of size \((N, S)\) or +\((\operatorname{sum}(\text{target\_lengths}))\), +where \(N = \text{batch size}\) and +\(S = \text{max target length, if shape is } (N, S)\). +It represent the target sequences. Each element in the target +sequence is a class index. And the target index cannot be blank (default=0). +In the \((N, S)\) form, targets are padded to the +length of the longest sequence, and stacked. +In the \((\operatorname{sum}(\text{target\_lengths}))\) form, +the targets are assumed to be un-padded and +concatenated within 1 dimension.

    • +
    • Input_lengths: Tuple or tensor of size \((N)\), +where \(N = \text{batch size}\). It represent the lengths of the +inputs (must each be \(\leq T\)). And the lengths are specified +for each sequence to achieve masking under the assumption that sequences +are padded to equal lengths.

    • +
    • Target_lengths: Tuple or tensor of size \((N)\), +where \(N = \text{batch size}\). It represent lengths of the targets. +Lengths are specified for each sequence to achieve masking under the +assumption that sequences are padded to equal lengths. If target shape is +\((N,S)\), target_lengths are effectively the stop index +\(s_n\) for each target sequence, such that target_n = targets[n,0:s_n] for +each target in a batch. Lengths must each be \(\leq S\) +If the targets are given as a 1d tensor that is the concatenation of individual +targets, the target_lengths must add up to the total length of the tensor.

    • +
    • Output: scalar. If reduction is 'none', then +\((N)\), where \(N = \text{batch size}\).

    • +
    +
    +
    +

    Example:

    +
    >>> T = 50      # Input sequence length
    +>>> C = 20      # Number of classes (including blank)
    +>>> N = 16      # Batch size
    +>>> S = 30      # Target sequence length of longest target in batch
    +>>> S_min = 10  # Minimum target length, for demonstration purposes
    +>>>
    +>>> # Initialize random batch of input vectors, for *size = (T,N,C)
    +>>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
    +>>>
    +>>> # Initialize random batch of targets (0 = blank, 1:C = classes)
    +>>> target = torch.randint(low=1, high=C, size=(N, S), dtype=torch.long)
    +>>>
    +>>> input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long)
    +>>> target_lengths = torch.randint(low=S_min, high=S, size=(N,), dtype=torch.long)
    +>>> ctc_loss = nn.CTCLoss()
    +>>> loss = ctc_loss(input, target, input_lengths, target_lengths)
    +>>> loss.backward()
    +
    +
    +
    +
    Reference:

    A. Graves et al.: Connectionist Temporal Classification: +Labelling Unsegmented Sequence Data with Recurrent Neural Networks: +https://www.cs.toronto.edu/~graves/icml_2006.pdf

    +
    +
    +
    +

    Note

    +

    In order to use CuDNN, the following must be satisfied: targets must be +in concatenated format, all input_lengths must be T. \(blank=0\), +target_lengths \(\leq 256\), the integer arguments must be of +dtype torch.int32.

    +

    The regular implementation uses the (more common in PyTorch) torch.long dtype.

    +
    +
    +

    Note

    +

    In some circumstances when using the CUDA backend with CuDNN, this operator +may select a nondeterministic algorithm to increase performance. If this is +undesirable, you can try to make the operation deterministic (potentially at +a performance cost) by setting torch.backends.cudnn.deterministic = +True. +Please see the notes on Reproducibility for background.

    +
    +
    + +
    +
    +

    NLLLoss

    +
    +
    +class torch.nn.NLLLoss(weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean')[source]
    +

    The negative log likelihood loss. It is useful to train a classification +problem with C classes.

    +

    If provided, the optional argument weight should be a 1D Tensor assigning +weight to each of the classes. This is particularly useful when you have an +unbalanced training set.

    +

    The input given through a forward call is expected to contain +log-probabilities of each class. input has to be a Tensor of size either +\((minibatch, C)\) or \((minibatch, C, d_1, d_2, ..., d_K)\) +with \(K \geq 1\) for the K-dimensional case (described later).

    +

    Obtaining log-probabilities in a neural network is easily achieved by +adding a LogSoftmax layer in the last layer of your network. +You may use CrossEntropyLoss instead, if you prefer not to add an extra +layer.

    +

    The target that this loss expects should be a class index in the range \([0, C-1]\) +where C = number of classes; if ignore_index is specified, this loss also accepts +this class index (this index may not necessarily be in the class range).

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad +l_n = - w_{y_n} x_{n,y_n}, \quad +w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}, + +\]
    +

    where \(N\) is the batch size. If reduction is not 'none' +(default 'mean'), then

    +
    +\[\ell(x, y) = \begin{cases} + \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & + \text{if reduction} = \text{'mean';}\\ + \sum_{n=1}^N l_n, & + \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    Can also be used for higher dimension inputs, such as 2D images, by providing +an input of size \((minibatch, C, d_1, d_2, ..., d_K)\) with \(K \geq 1\), +where \(K\) is the number of dimensions, and a target of appropriate shape +(see below). In the case of images, it computes NLL loss per-pixel.

    +
    +
    Parameters
    +
      +
    • weight (Tensor, optional) – a manual rescaling weight given to each +class. If given, it has to be a Tensor of size C. Otherwise, it is +treated as if having all ones.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • ignore_index (int, optional) – Specifies a target value that is ignored +and does not contribute to the input gradient. When +size_average is True, the loss is averaged over +non-ignored targets.

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C)\) where C = number of classes, or +\((N, C, d_1, d_2, ..., d_K)\) with \(K \geq 1\) +in the case of K-dimensional loss.

    • +
    • Target: \((N)\) where each value is \(0 \leq \text{targets}[i] \leq C-1\), or +\((N, d_1, d_2, ..., d_K)\) with \(K \geq 1\) in the case of +K-dimensional loss.

    • +
    • Output: scalar. +If reduction is 'none', then the same size as the target: \((N)\), or +\((N, d_1, d_2, ..., d_K)\) with \(K \geq 1\) in the case +of K-dimensional loss.

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.LogSoftmax(dim=1)
    +>>> loss = nn.NLLLoss()
    +>>> # input is of size N x C = 3 x 5
    +>>> input = torch.randn(3, 5, requires_grad=True)
    +>>> # each element in target has to have 0 <= value < C
    +>>> target = torch.tensor([1, 0, 4])
    +>>> output = loss(m(input), target)
    +>>> output.backward()
    +>>>
    +>>>
    +>>> # 2D loss example (used, for example, with image inputs)
    +>>> N, C = 5, 4
    +>>> loss = nn.NLLLoss()
    +>>> # input is of size N x C x height x width
    +>>> data = torch.randn(N, 16, 10, 10)
    +>>> conv = nn.Conv2d(16, C, (3, 3))
    +>>> m = nn.LogSoftmax(dim=1)
    +>>> # each element in target has to have 0 <= value < C
    +>>> target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
    +>>> output = loss(m(conv(data)), target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    PoissonNLLLoss

    +
    +
    +class torch.nn.PoissonNLLLoss(log_input=True, full=False, size_average=None, eps=1e-08, reduce=None, reduction='mean')[source]
    +

    Negative log likelihood loss with Poisson distribution of target.

    +

    The loss can be described as:

    +
    +\[\text{target} \sim \mathrm{Poisson}(\text{input}) + +\text{loss}(\text{input}, \text{target}) = \text{input} - \text{target} * \log(\text{input}) + + \log(\text{target!})\]
    +

    The last term can be omitted or approximated with Stirling formula. The +approximation is used for target values more than 1. For targets less or +equal to 1 zeros are added to the loss.

    +
    +
    Parameters
    +
      +
    • log_input (bool, optional) – if True the loss is computed as +\(\exp(\text{input}) - \text{target}*\text{input}\), if False the loss is +\(\text{input} - \text{target}*\log(\text{input}+\text{eps})\).

    • +
    • full (bool, optional) –

      whether to compute full loss, i. e. to add the +Stirling approximation term

      +
      +\[\text{target}*\log(\text{target}) - \text{target} + 0.5 * \log(2\pi\text{target}). + +\]
      +

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • eps (float, optional) – Small value to avoid evaluation of \(\log(0)\) when +log_input = False. Default: 1e-8

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +

    Examples:

    +
    >>> loss = nn.PoissonNLLLoss()
    +>>> log_input = torch.randn(5, 2, requires_grad=True)
    +>>> target = torch.randn(5, 2)
    +>>> output = loss(log_input, target)
    +>>> output.backward()
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar by default. If reduction is 'none', then \((N, *)\), +the same shape as the input

    • +
    +
    +
    +
    + +
    +
    +

    KLDivLoss

    +
    +
    +class torch.nn.KLDivLoss(size_average=None, reduce=None, reduction='mean')[source]
    +

    The Kullback-Leibler divergence Loss

    +

    KL divergence is a useful distance measure for continuous distributions +and is often useful when performing direct regression over the space of +(discretely sampled) continuous output distributions.

    +

    As with NLLLoss, the input given is expected to contain +log-probabilities and is not restricted to a 2D Tensor. +The targets are given as probabilities (i.e. without taking the logarithm).

    +

    This criterion expects a target Tensor of the same size as the +input Tensor.

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[l(x,y) = L = \{ l_1,\dots,l_N \}, \quad +l_n = y_n \cdot \left( \log y_n - x_n \right) + +\]
    +

    where the index \(N\) spans all dimensions of input and \(L\) has the same +shape as input. If reduction is not 'none' (default 'mean'), then:

    +
    +\[\ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';} \\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    In default reduction mode 'mean', the losses are averaged for each minibatch over observations +as well as over dimensions. 'batchmean' mode gives the correct KL divergence where losses +are averaged over batch dimension only. 'mean' mode’s behavior will be changed to the same as +'batchmean' in the next major release.

    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'batchmean' | 'sum' | 'mean'. +'none': no reduction will be applied. +'batchmean': the sum of the output will be divided by batchsize. +'sum': the output will be summed. +'mean': the output will be divided by the number of elements in the output. +Default: 'mean'

    • +
    +
    +
    +
    +

    Note

    +

    size_average and reduce are in the process of being deprecated, +and in the meantime, specifying either of those two args will override reduction.

    +
    +
    +

    Note

    +

    reduction = 'mean' doesn’t return the true kl divergence value, please use +reduction = 'batchmean' which aligns with KL math definition. +In the next major release, 'mean' will be changed to be the same as 'batchmean'.

    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar by default. If :attr:reduction is 'none', then \((N, *)\), +the same shape as the input

    • +
    +
    +
    +
    + +
    +
    +

    BCELoss

    +
    +
    +class torch.nn.BCELoss(weight=None, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the Binary Cross Entropy +between the target and the output:

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad +l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right], + +\]
    +

    where \(N\) is the batch size. If reduction is not 'none' +(default 'mean'), then

    +
    +\[\ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    This is used for measuring the error of a reconstruction in for example +an auto-encoder. Note that the targets \(y\) should be numbers +between 0 and 1.

    +
    +
    Parameters
    +
      +
    • weight (Tensor, optional) – a manual rescaling weight given to the loss +of each batch element. If given, has to be a Tensor of size nbatch.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then \((N, *)\), same +shape as input.

    • +
    +
    +
    +

    Examples:

    +
    >>> m = nn.Sigmoid()
    +>>> loss = nn.BCELoss()
    +>>> input = torch.randn(3, requires_grad=True)
    +>>> target = torch.empty(3).random_(2)
    +>>> output = loss(m(input), target)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +

    BCEWithLogitsLoss

    +
    +
    +class torch.nn.BCEWithLogitsLoss(weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None)[source]
    +

    This loss combines a Sigmoid layer and the BCELoss in one single +class. This version is more numerically stable than using a plain Sigmoid +followed by a BCELoss as, by combining the operations into one layer, +we take advantage of the log-sum-exp trick for numerical stability.

    +

    The unreduced (i.e. with reduction set to 'none') loss can be described as:

    +
    +\[\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad +l_n = - w_n \left[ y_n \cdot \log \sigma(x_n) ++ (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right], + +\]
    +

    where \(N\) is the batch size. If reduction is not 'none' +(default 'mean'), then

    +
    +\[\ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    This is used for measuring the error of a reconstruction in for example +an auto-encoder. Note that the targets t[i] should be numbers +between 0 and 1.

    +

    It’s possible to trade off recall and precision by adding weights to positive examples. +In the case of multi-label classification the loss can be described as:

    +
    +\[\ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad +l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c}) ++ (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right], + +\]
    +

    where \(c\) is the class number (\(c > 1\) for multi-label binary classification, +\(c = 1\) for single-label binary classification), +\(n\) is the number of the sample in the batch and +\(p_c\) is the weight of the positive answer for the class \(c\).

    +

    \(p_c > 1\) increases the recall, \(p_c < 1\) increases the precision.

    +

    For example, if a dataset contains 100 positive and 300 negative examples of a single class, +then pos_weight for the class should be equal to \(\frac{300}{100}=3\). +The loss would act as if the dataset contains \(3\times 100=300\) positive examples.

    +

    Examples:

    +
    >>> target = torch.ones([10, 64], dtype=torch.float32)  # 64 classes, batch size = 10
    +>>> output = torch.full([10, 64], 0.999)  # A prediction (logit)
    +>>> pos_weight = torch.ones([64])  # All weights are equal to 1
    +>>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    +>>> criterion(output, target)  # -log(sigmoid(0.999))
    +tensor(0.3135)
    +
    +
    +
    +
    Parameters
    +
      +
    • weight (Tensor, optional) – a manual rescaling weight given to the loss +of each batch element. If given, has to be a Tensor of size nbatch.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    • pos_weight (Tensor, optional) – a weight of positive examples. +Must be a vector with length equal to the number of classes.

    • +
    +
    +
    +
    +
    Shape:
    +
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then \((N, *)\), same +shape as input.

    • +
    +
    +

    Examples:

    +
    >>> loss = nn.BCEWithLogitsLoss()
    +>>> input = torch.randn(3, requires_grad=True)
    +>>> target = torch.empty(3).random_(2)
    +>>> output = loss(input, target)
    +>>> output.backward()
    +
    +
    +
    +
    +
    + +
    +
    +

    MarginRankingLoss

    +
    +
    +class torch.nn.MarginRankingLoss(margin=0.0, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the loss given +inputs \(x1\), \(x2\), two 1D mini-batch Tensors, +and a label 1D mini-batch tensor \(y\) (containing 1 or -1).

    +

    If \(y = 1\) then it assumed the first input should be ranked higher +(have a larger value) than the second input, and vice-versa for \(y = -1\).

    +

    The loss function for each sample in the mini-batch is:

    +
    +\[\text{loss}(x, y) = \max(0, -y * (x1 - x2) + \text{margin}) + +\]
    +
    +
    Parameters
    +
      +
    • margin (float, optional) – Has a default value of \(0\).

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, D)\) where N is the batch size and D is the size of a sample.

    • +
    • Target: \((N)\)

    • +
    • Output: scalar. If reduction is 'none', then \((N)\).

    • +
    +
    +
    +
    + +
    +
    +

    HingeEmbeddingLoss

    +
    +
    +class torch.nn.HingeEmbeddingLoss(margin=1.0, size_average=None, reduce=None, reduction='mean')[source]
    +

    Measures the loss given an input tensor \(x\) and a labels tensor \(y\) +(containing 1 or -1). +This is usually used for measuring whether two inputs are similar or +dissimilar, e.g. using the L1 pairwise distance as \(x\), and is typically +used for learning nonlinear embeddings or semi-supervised learning.

    +

    The loss function for \(n\)-th sample in the mini-batch is

    +
    +\[l_n = \begin{cases} + x_n, & \text{if}\; y_n = 1,\\ + \max \{0, \Delta - x_n\}, & \text{if}\; y_n = -1, +\end{cases} + +\]
    +

    and the total loss functions is

    +
    +\[\ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} +\end{cases} + +\]
    +

    where \(L = \{l_1,\dots,l_N\}^\top\).

    +
    +
    Parameters
    +
      +
    • margin (float, optional) – Has a default value of 1.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((*)\) where \(*\) means, any number of dimensions. The sum operation +operates over all the elements.

    • +
    • Target: \((*)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then same shape as the input

    • +
    +
    +
    +
    + +
    +
    +

    MultiLabelMarginLoss

    +
    +
    +class torch.nn.MultiLabelMarginLoss(size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that optimizes a multi-class multi-classification +hinge loss (margin-based loss) between input \(x\) (a 2D mini-batch Tensor) +and output \(y\) (which is a 2D Tensor of target class indices). +For each sample in the mini-batch:

    +
    +\[\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} + +\]
    +

    where \(x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}\), \(y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}\), \(0 \leq y[j] \leq \text{x.size}(0)-1\), and \(i \neq y[j]\) for all \(i\) and \(j\).

    +

    \(y\) and \(x\) must have the same size.

    +

    The criterion only considers a contiguous block of non-negative targets that +starts at the front.

    +

    This allows for different samples to have variable amounts of target classes.

    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((C)\) or \((N, C)\) where N is the batch size and C +is the number of classes.

    • +
    • Target: \((C)\) or \((N, C)\), label targets padded by -1 ensuring same shape as the input.

    • +
    • Output: scalar. If reduction is 'none', then \((N)\).

    • +
    +
    +
    +

    Examples:

    +
    >>> loss = nn.MultiLabelMarginLoss()
    +>>> x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
    +>>> # for target y, only consider labels 3 and 0, not after label -1
    +>>> y = torch.LongTensor([[3, 0, -1, 1]])
    +>>> loss(x, y)
    +>>> # 0.25 * ((1-(0.1-0.2)) + (1-(0.1-0.4)) + (1-(0.8-0.2)) + (1-(0.8-0.4)))
    +tensor(0.8500)
    +
    +
    +
    + +
    +
    +

    SmoothL1Loss

    +
    +
    +class torch.nn.SmoothL1Loss(size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that uses a squared term if the absolute +element-wise error falls below 1 and an L1 term otherwise. +It is less sensitive to outliers than the MSELoss and in some cases +prevents exploding gradients (e.g. see Fast R-CNN paper by Ross Girshick). +Also known as the Huber loss:

    +
    +\[\text{loss}(x, y) = \frac{1}{n} \sum_{i} z_{i} + +\]
    +

    where \(z_{i}\) is given by:

    +
    +\[z_{i} = +\begin{cases} +0.5 (x_i - y_i)^2, & \text{if } |x_i - y_i| < 1 \\ +|x_i - y_i| - 0.5, & \text{otherwise } +\end{cases} + +\]
    +

    \(x\) and \(y\) arbitrary shapes with a total of \(n\) elements each +the sum operation still operates over all the elements, and divides by \(n\).

    +

    The division by \(n\) can be avoided if sets reduction = 'sum'.

    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, *)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((N, *)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then +\((N, *)\), same shape as the input

    • +
    +
    +
    +
    + +
    +
    +

    SoftMarginLoss

    +
    +
    +class torch.nn.SoftMarginLoss(size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that optimizes a two-class classification +logistic loss between input tensor \(x\) and target tensor \(y\) +(containing 1 or -1).

    +
    +\[\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} + +\]
    +
    +
    Parameters
    +
      +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((*)\) where \(*\) means, any number of additional +dimensions

    • +
    • Target: \((*)\), same shape as the input

    • +
    • Output: scalar. If reduction is 'none', then same shape as the input

    • +
    +
    +
    +
    + +
    +
    +

    MultiLabelSoftMarginLoss

    +
    +
    +class torch.nn.MultiLabelSoftMarginLoss(weight=None, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that optimizes a multi-label one-versus-all +loss based on max-entropy, between input \(x\) and target \(y\) of size +\((N, C)\). +For each sample in the minibatch:

    +
    +\[loss(x, y) = - \frac{1}{C} * \sum_i y[i] * \log((1 + \exp(-x[i]))^{-1}) + + (1-y[i]) * \log\left(\frac{\exp(-x[i])}{(1 + \exp(-x[i]))}\right) + +\]
    +

    where \(i \in \left\{0, \; \cdots , \; \text{x.nElement}() - 1\right\}\), +\(y[i] \in \left\{0, \; 1\right\}\).

    +
    +
    Parameters
    +
      +
    • weight (Tensor, optional) – a manual rescaling weight given to each +class. If given, it has to be a Tensor of size C. Otherwise, it is +treated as if having all ones.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C)\) where N is the batch size and C is the number of classes.

    • +
    • Target: \((N, C)\), label targets padded by -1 ensuring same shape as the input.

    • +
    • Output: scalar. If reduction is 'none', then \((N)\).

    • +
    +
    +
    +
    + +
    +
    +

    CosineEmbeddingLoss

    +
    +
    +class torch.nn.CosineEmbeddingLoss(margin=0.0, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the loss given input tensors +\(x_1\), \(x_2\) and a Tensor label \(y\) with values 1 or -1. +This is used for measuring whether two inputs are similar or dissimilar, +using the cosine distance, and is typically used for learning nonlinear +embeddings or semi-supervised learning.

    +

    The loss function for each sample is:

    +
    +\[\text{loss}(x, y) = +\begin{cases} +1 - \cos(x_1, x_2), & \text{if } y = 1 \\ +\max(0, \cos(x_1, x_2) - \text{margin}), & \text{if } y = -1 +\end{cases} + +\]
    +
    +
    Parameters
    +
      +
    • margin (float, optional) – Should be a number from \(-1\) to \(1\), +\(0\) to \(0.5\) is suggested. If margin is missing, the +default value is \(0\).

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    + +
    +
    +

    MultiMarginLoss

    +
    +
    +class torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=None, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that optimizes a multi-class classification hinge +loss (margin-based loss) between input \(x\) (a 2D mini-batch Tensor) and +output \(y\) (which is a 1D tensor of target class indices, +\(0 \leq y \leq \text{x.size}(1)-1\)):

    +

    For each mini-batch sample, the loss in terms of the 1D input \(x\) and scalar +output \(y\) is:

    +
    +\[\text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)} + +\]
    +

    where \(x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}\) +and \(i \neq y\).

    +

    Optionally, you can give non-equal weighting on the classes by passing +a 1D weight tensor into the constructor.

    +

    The loss function then becomes:

    +
    +\[\text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p)}{\text{x.size}(0)} + +\]
    +
    +
    Parameters
    +
      +
    • p (int, optional) – Has a default value of \(1\). \(1\) and \(2\) +are the only supported values.

    • +
    • margin (float, optional) – Has a default value of \(1\).

    • +
    • weight (Tensor, optional) – a manual rescaling weight given to each +class. If given, it has to be a Tensor of size C. Otherwise, it is +treated as if having all ones.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    + +
    +
    +

    TripletMarginLoss

    +
    +
    +class torch.nn.TripletMarginLoss(margin=1.0, p=2.0, eps=1e-06, swap=False, size_average=None, reduce=None, reduction='mean')[source]
    +

    Creates a criterion that measures the triplet loss given an input +tensors \(x1\), \(x2\), \(x3\) and a margin with a value greater than \(0\). +This is used for measuring a relative similarity between samples. A triplet +is composed by a, p and n (i.e., anchor, positive examples and negative +examples respectively). The shapes of all input tensors should be +\((N, D)\).

    +

    The distance swap is described in detail in the paper Learning shallow +convolutional feature descriptors with triplet losses by +V. Balntas, E. Riba et al.

    +

    The loss function for each sample in the mini-batch is:

    +
    +\[L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} + +\]
    +

    where

    +
    +\[d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p + +\]
    +
    +
    Parameters
    +
      +
    • margin (float, optional) – Default: \(1\).

    • +
    • p (int, optional) – The norm degree for pairwise distance. Default: \(2\).

    • +
    • swap (bool, optional) – The distance swap is described in detail in the paper +Learning shallow convolutional feature descriptors with triplet losses by +V. Balntas, E. Riba et al. Default: False.

    • +
    • size_average (bool, optional) – Deprecated (see reduction). By default, +the losses are averaged over each loss element in the batch. Note that for +some losses, there are multiple elements per sample. If the field size_average +is set to False, the losses are instead summed for each minibatch. Ignored +when reduce is False. Default: True

    • +
    • reduce (bool, optional) – Deprecated (see reduction). By default, the +losses are averaged or summed over observations for each minibatch depending +on size_average. When reduce is False, returns a loss per +batch element instead and ignores size_average. Default: True

    • +
    • reduction (string, optional) – Specifies the reduction to apply to the output: +'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +'mean': the sum of the output will be divided by the number of +elements in the output, 'sum': the output will be summed. Note: size_average +and reduce are in the process of being deprecated, and in the meantime, +specifying either of those two args will override reduction. Default: 'mean'

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, D)\) where \(D\) is the vector dimension.

    • +
    • Output: scalar. If reduction is 'none', then \((N)\).

    • +
    +
    +
    +
    >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
    +>>> anchor = torch.randn(100, 128, requires_grad=True)
    +>>> positive = torch.randn(100, 128, requires_grad=True)
    +>>> negative = torch.randn(100, 128, requires_grad=True)
    +>>> output = triplet_loss(anchor, positive, negative)
    +>>> output.backward()
    +
    +
    +
    + +
    +
    +
    +

    Vision layers

    +
    +

    PixelShuffle

    +
    +
    +class torch.nn.PixelShuffle(upscale_factor)[source]
    +

    Rearranges elements in a tensor of shape \((*, C \times r^2, H, W)\) +to a tensor of shape \((*, C, H \times r, W \times r)\).

    +

    This is useful for implementing efficient sub-pixel convolution +with a stride of \(1/r\).

    +

    Look at the paper: +Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network +by Shi et. al (2016) for more details.

    +
    +
    Parameters
    +

    upscale_factor (int) – factor to increase spatial resolution by

    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, L, H_{in}, W_{in})\) where \(L=C \times \text{upscale\_factor}^2\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where +\(H_{out} = H_{in} \times \text{upscale\_factor}\) +and \(W_{out} = W_{in} \times \text{upscale\_factor}\)

    • +
    +
    +
    +

    Examples:

    +
    >>> pixel_shuffle = nn.PixelShuffle(3)
    +>>> input = torch.randn(1, 9, 4, 4)
    +>>> output = pixel_shuffle(input)
    +>>> print(output.size())
    +torch.Size([1, 1, 12, 12])
    +
    +
    +
    + +
    +
    +

    Upsample

    +
    +
    +class torch.nn.Upsample(size=None, scale_factor=None, mode='nearest', align_corners=None)[source]
    +

    Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.

    +

    The input data is assumed to be of the form +minibatch x channels x [optional depth] x [optional height] x width. +Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.

    +

    The algorithms available for upsampling are nearest neighbor and linear, +bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor, +respectively.

    +

    One can either give a scale_factor or the target output size to +calculate the output size. (You cannot give both, as it is ambiguous)

    +
    +
    Parameters
    +
      +
    • size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional) – output spatial sizes

    • +
    • scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional) – multiplier for spatial size. Has to match input size if it is a tuple.

    • +
    • mode (str, optional) – the upsampling algorithm: one of 'nearest', +'linear', 'bilinear', 'bicubic' and 'trilinear'. +Default: 'nearest'

    • +
    • align_corners (bool, optional) – if True, the corner pixels of the input +and output tensors are aligned, and thus preserving the values at +those pixels. This only has effect when mode is +'linear', 'bilinear', or 'trilinear'. Default: False

    • +
    +
    +
    +
    +
    Shape:
      +
    • Input: \((N, C, W_{in})\), \((N, C, H_{in}, W_{in})\) or \((N, C, D_{in}, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, W_{out})\), \((N, C, H_{out}, W_{out})\) +or \((N, C, D_{out}, H_{out}, W_{out})\), where

    • +
    +
    +
    +
    +\[D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +
    +\[H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +
    +\[W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +
    +

    Warning

    +

    With align_corners = True, the linearly interpolating modes +(linear, bilinear, bicubic, and trilinear) don’t proportionally +align the output and input pixels, and thus the output values can depend +on the input size. This was the default behavior for these modes up to +version 0.3.1. Since then, the default behavior is +align_corners = False. See below for concrete examples on how this +affects the outputs.

    +
    +
    +

    Note

    +

    If you want downsampling/general resizing, you should use interpolate().

    +
    +

    Examples:

    +
    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    +>>> input
    +tensor([[[[ 1.,  2.],
    +          [ 3.,  4.]]]])
    +
    +>>> m = nn.Upsample(scale_factor=2, mode='nearest')
    +>>> m(input)
    +tensor([[[[ 1.,  1.,  2.,  2.],
    +          [ 1.,  1.,  2.,  2.],
    +          [ 3.,  3.,  4.,  4.],
    +          [ 3.,  3.,  4.,  4.]]]])
    +
    +>>> m = nn.Upsample(scale_factor=2, mode='bilinear')  # align_corners=False
    +>>> m(input)
    +tensor([[[[ 1.0000,  1.2500,  1.7500,  2.0000],
    +          [ 1.5000,  1.7500,  2.2500,  2.5000],
    +          [ 2.5000,  2.7500,  3.2500,  3.5000],
    +          [ 3.0000,  3.2500,  3.7500,  4.0000]]]])
    +
    +>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
    +>>> m(input)
    +tensor([[[[ 1.0000,  1.3333,  1.6667,  2.0000],
    +          [ 1.6667,  2.0000,  2.3333,  2.6667],
    +          [ 2.3333,  2.6667,  3.0000,  3.3333],
    +          [ 3.0000,  3.3333,  3.6667,  4.0000]]]])
    +
    +>>> # Try scaling the same data in a larger tensor
    +>>>
    +>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
    +>>> input_3x3[:, :, :2, :2].copy_(input)
    +tensor([[[[ 1.,  2.],
    +          [ 3.,  4.]]]])
    +>>> input_3x3
    +tensor([[[[ 1.,  2.,  0.],
    +          [ 3.,  4.,  0.],
    +          [ 0.,  0.,  0.]]]])
    +
    +>>> m = nn.Upsample(scale_factor=2, mode='bilinear')  # align_corners=False
    +>>> # Notice that values in top left corner are the same with the small input (except at boundary)
    +>>> m(input_3x3)
    +tensor([[[[ 1.0000,  1.2500,  1.7500,  1.5000,  0.5000,  0.0000],
    +          [ 1.5000,  1.7500,  2.2500,  1.8750,  0.6250,  0.0000],
    +          [ 2.5000,  2.7500,  3.2500,  2.6250,  0.8750,  0.0000],
    +          [ 2.2500,  2.4375,  2.8125,  2.2500,  0.7500,  0.0000],
    +          [ 0.7500,  0.8125,  0.9375,  0.7500,  0.2500,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]]])
    +
    +>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
    +>>> # Notice that values in top left corner are now changed
    +>>> m(input_3x3)
    +tensor([[[[ 1.0000,  1.4000,  1.8000,  1.6000,  0.8000,  0.0000],
    +          [ 1.8000,  2.2000,  2.6000,  2.2400,  1.1200,  0.0000],
    +          [ 2.6000,  3.0000,  3.4000,  2.8800,  1.4400,  0.0000],
    +          [ 2.4000,  2.7200,  3.0400,  2.5600,  1.2800,  0.0000],
    +          [ 1.2000,  1.3600,  1.5200,  1.2800,  0.6400,  0.0000],
    +          [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000]]]])
    +
    +
    +
    + +
    +
    +

    UpsamplingNearest2d

    +
    +
    +class torch.nn.UpsamplingNearest2d(size=None, scale_factor=None)[source]
    +

    Applies a 2D nearest neighbor upsampling to an input signal composed of several input +channels.

    +

    To specify the scale, it takes either the size or the scale_factor +as it’s constructor argument.

    +

    When size is given, it is the output size of the image (h, w).

    +
    +
    Parameters
    +
      +
    • size (int or Tuple[int, int], optional) – output spatial sizes

    • +
    • scale_factor (float or Tuple[float, float], optional) – multiplier for +spatial size.

    • +
    +
    +
    +
    +

    Warning

    +

    This class is deprecated in favor of interpolate().

    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

    • +
    +
    +
    +
    +\[H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +
    +\[W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +

    Examples:

    +
    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    +>>> input
    +tensor([[[[ 1.,  2.],
    +          [ 3.,  4.]]]])
    +
    +>>> m = nn.UpsamplingNearest2d(scale_factor=2)
    +>>> m(input)
    +tensor([[[[ 1.,  1.,  2.,  2.],
    +          [ 1.,  1.,  2.,  2.],
    +          [ 3.,  3.,  4.,  4.],
    +          [ 3.,  3.,  4.,  4.]]]])
    +
    +
    +
    + +
    +
    +

    UpsamplingBilinear2d

    +
    +
    +class torch.nn.UpsamplingBilinear2d(size=None, scale_factor=None)[source]
    +

    Applies a 2D bilinear upsampling to an input signal composed of several input +channels.

    +

    To specify the scale, it takes either the size or the scale_factor +as it’s constructor argument.

    +

    When size is given, it is the output size of the image (h, w).

    +
    +
    Parameters
    +
      +
    • size (int or Tuple[int, int], optional) – output spatial sizes

    • +
    • scale_factor (float or Tuple[float, float], optional) – multiplier for +spatial size.

    • +
    +
    +
    +
    +

    Warning

    +

    This class is deprecated in favor of interpolate(). It is +equivalent to nn.functional.interpolate(..., mode='bilinear', align_corners=True).

    +
    +
    +
    Shape:
      +
    • Input: \((N, C, H_{in}, W_{in})\)

    • +
    • Output: \((N, C, H_{out}, W_{out})\) where

    • +
    +
    +
    +
    +\[H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +
    +\[W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor + +\]
    +

    Examples:

    +
    >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
    +>>> input
    +tensor([[[[ 1.,  2.],
    +          [ 3.,  4.]]]])
    +
    +>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
    +>>> m(input)
    +tensor([[[[ 1.0000,  1.3333,  1.6667,  2.0000],
    +          [ 1.6667,  2.0000,  2.3333,  2.6667],
    +          [ 2.3333,  2.6667,  3.0000,  3.3333],
    +          [ 3.0000,  3.3333,  3.6667,  4.0000]]]])
    +
    +
    +
    + +
    +
    +
    +

    DataParallel layers (multi-GPU, distributed)

    +
    +

    DataParallel

    +
    +
    +class torch.nn.DataParallel(module, device_ids=None, output_device=None, dim=0)[source]
    +

    Implements data parallelism at the module level.

    +

    This container parallelizes the application of the given module by +splitting the input across the specified devices by chunking in the batch +dimension (other objects will be copied once per device). In the forward +pass, the module is replicated on each device, and each replica handles a +portion of the input. During the backwards pass, gradients from each replica +are summed into the original module.

    +

    The batch size should be larger than the number of GPUs used.

    +

    See also: Use nn.DataParallel instead of multiprocessing

    +

    Arbitrary positional and keyword inputs are allowed to be passed into +DataParallel but some types are specially handled. tensors will be +scattered on dim specified (default 0). tuple, list and dict types will +be shallow copied. The other types will be shared among different threads +and can be corrupted if written to in the model’s forward pass.

    +

    The parallelized module must have its parameters and buffers on +device_ids[0] before running this DataParallel +module.

    +
    +

    Warning

    +

    In each forward, module is replicated on each device, so any +updates to the running module in forward will be lost. For example, +if module has a counter attribute that is incremented in each +forward, it will always stay at the initial value because the update +is done on the replicas which are destroyed after forward. However, +DataParallel guarantees that the replica on +device[0] will have its parameters and buffers sharing storage with +the base parallelized module. So in-place updates to the +parameters or buffers on device[0] will be recorded. E.g., +BatchNorm2d and spectral_norm() +rely on this behavior to update the buffers.

    +
    +
    +

    Warning

    +

    Forward and backward hooks defined on module and its submodules +will be invoked len(device_ids) times, each with inputs located on +a particular device. Particularly, the hooks are only guaranteed to be +executed in correct order with respect to operations on corresponding +devices. For example, it is not guaranteed that hooks set via +register_forward_pre_hook() be executed before +all len(device_ids) forward() calls, but +that each such hook be executed before the corresponding +forward() call of that device.

    +
    +
    +

    Warning

    +

    When module returns a scalar (i.e., 0-dimensional tensor) in +forward(), this wrapper will return a vector of length equal to +number of devices used in data parallelism, containing the result from +each device.

    +
    +
    +

    Note

    +

    There is a subtlety in using the +pack sequence -> recurrent network -> unpack sequence pattern in a +Module wrapped in DataParallel. +See My recurrent network doesn’t work with data parallelism section in FAQ for +details.

    +
    +
    +
    Parameters
    +
      +
    • module (Module) – module to be parallelized

    • +
    • device_ids (list of python:int or torch.device) – CUDA devices (default: all devices)

    • +
    • output_device (int or torch.device) – device location of output (default: device_ids[0])

    • +
    +
    +
    Variables
    +

    ~DataParallel.module (Module) – the module to be parallelized

    +
    +
    +

    Example:

    +
    >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
    +>>> output = net(input_var)  # input_var can be on any device, including CPU
    +
    +
    +
    + +
    +
    +

    DistributedDataParallel

    +
    +
    +class torch.nn.parallel.DistributedDataParallel(module, device_ids=None, output_device=None, dim=0, broadcast_buffers=True, process_group=None, bucket_cap_mb=25, find_unused_parameters=False, check_reduction=False)[source]
    +

    Implements distributed data parallelism that is based on +torch.distributed package at the module level.

    +

    This container parallelizes the application of the given module by +splitting the input across the specified devices by chunking in the batch +dimension. The module is replicated on each machine and each device, and +each such replica handles a portion of the input. During the backwards +pass, gradients from each node are averaged.

    +

    The batch size should be larger than the number of GPUs used locally.

    +

    See also: Basics and Use nn.DataParallel instead of multiprocessing. +The same constraints on input as in torch.nn.DataParallel apply.

    +

    Creation of this class requires that torch.distributed to be already +initialized, by calling torch.distributed.init_process_group().

    +

    DistributedDataParallel can be used in the following two ways:

    +
      +
    1. Single-Process Multi-GPU

    2. +
    +

    In this case, a single process will be +spawned on each host/node and each process will operate on all the GPUs +of the node where it’s running. To use DistributedDataParallel in +this way, you can simply construct the model as the following:

    +
    >>> torch.distributed.init_process_group(backend="nccl")
    +>>> model = DistributedDataParallel(model) # device_ids will include all GPU devices by default
    +
    +
    +
      +
    1. Multi-Process Single-GPU

    2. +
    +

    This is the highly recommended way to use DistributedDataParallel, with +multiple processes, each of which operates on a single GPU. This is +currently the fastest approach to do data parallel training using PyTorch +and applies to both single-node(multi-GPU) and multi-node data +parallel training. It is proven to be significantly faster than +torch.nn.DataParallel for single-node multi-GPU data +parallel training.

    +

    Here is how to use it: on each host with N GPUs, you should spawn up N +processes, while ensuring that each process individually works on a single GPU +from 0 to N-1. Therefore, it is your job to ensure that your training script +operates on a single given GPU by calling:

    +
    >>> torch.cuda.set_device(i)
    +
    +
    +

    where i is from 0 to N-1. In each process, you should refer the following +to construct this module:

    +
    >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
    +>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
    +
    +
    +

    In order to spawn up multiple processes per node, you can use either +torch.distributed.launch or torch.multiprocessing.spawn

    +
    +

    Note

    +

    nccl backend is currently the fastest and +highly recommended backend to be used with Multi-Process Single-GPU +distributed training and this applies to both single-node and multi-node +distributed training

    +
    +
    +

    Note

    +

    This module also supports mixed-precision distributed training. +This means that your model can have different types of parameters such +as mixed types of fp16 and fp32, the gradient reduction on these +mixed types of parameters will just work fine. +Also note that nccl backend is currently the fastest and highly +recommended backend for fp16/fp32 mixed-precision training.

    +
    +
    +

    Note

    +

    If you use torch.save on one process to checkpoint the module, +and torch.load on some other processes to recover it, make sure that +map_location is configured properly for every process. Without +map_location, torch.load would recover the module to devices +where the module was saved from.

    +
    +
    +

    Warning

    +

    This module works only with the gloo and nccl backends.

    +
    +
    +

    Warning

    +

    Constructor, forward method, and differentiation of the output (or a +function of the output of this module) is a distributed synchronization +point. Take that into account in case different processes might be +executing different code.

    +
    +
    +

    Warning

    +

    This module assumes all parameters are registered in the model by the +time it is created. No parameters should be added nor removed later. +Same applies to buffers.

    +
    +
    +

    Warning

    +

    This module assumes all parameters are registered in the model of each +distributed processes are in the same order. The module itself will +conduct gradient all-reduction following the reverse order of the +registered parameters of the model. In other words, it is users’ +responsibility to ensure that each distributed process has the exact +same model and thus the exact same parameter registration order.

    +
    +
    +

    Warning

    +

    This module assumes all buffers and gradients are dense.

    +
    +
    +

    Warning

    +

    This module doesn’t work with torch.autograd.grad() (i.e. it will +only work if gradients are to be accumulated in .grad attributes of +parameters).

    +
    +
    +

    Warning

    +

    If you plan on using this module with a nccl backend or a gloo +backend (that uses Infiniband), together with a DataLoader that uses +multiple workers, please change the multiprocessing start method to +forkserver (Python 3 only) or spawn. Unfortunately +Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will +likely experience deadlocks if you don’t change this setting.

    +
    +
    +

    Warning

    +

    Forward and backward hooks defined on module and its submodules +won’t be invoked anymore, unless the hooks are initialized in the +forward() method.

    +
    +
    +

    Warning

    +

    You should never try to change your model’s parameters after wrapping +up your model with DistributedDataParallel. In other words, when +wrapping up your model with DistributedDataParallel, the constructor of +DistributedDataParallel will register the additional gradient +reduction functions on all the parameters of the model itself at the +time of construction. If you change the model’s parameters after +the DistributedDataParallel construction, this is not supported and +unexpected behaviors can happen, since some parameters’ gradient +reduction functions might not get called.

    +
    +
    +

    Note

    +

    Parameters are never broadcast between processes. The module performs +an all-reduce step on gradients and assumes that they will be modified +by the optimizer in all processes in the same way. Buffers +(e.g. BatchNorm stats) are broadcast from the module in process of rank +0, to all other replicas in the system in every iteration.

    +
    +
    +
    Parameters
    +
      +
    • module (Module) – module to be parallelized

    • +
    • device_ids (list of python:int or torch.device) – CUDA devices. This should +only be provided when the input module resides on a single +CUDA device. For single-device modules, the i``th +:attr:`module` replica is placed on ``device_ids[i]. For +multi-device modules and CPU modules, device_ids must be None +or an empty list, and input data for the forward pass must be +placed on the correct device. (default: all devices for +single-device modules)

    • +
    • output_device (int or torch.device) – device location of output for +single-device CUDA modules. For multi-device modules and +CPU modules, it must be None, and the module itself +dictates the output location. (default: device_ids[0] for +single-device modules)

    • +
    • broadcast_buffers (bool) – flag that enables syncing (broadcasting) buffers of +the module at beginning of the forward function. +(default: True)

    • +
    • process_group – the process group to be used for distributed data +all-reduction. If None, the default process group, which +is created by `torch.distributed.init_process_group`, +will be used. (default: None)

    • +
    • bucket_cap_mb – DistributedDataParallel will bucket parameters into +multiple buckets so that gradient reduction of each +bucket can potentially overlap with backward computation. +bucket_cap_mb controls the bucket size in MegaBytes (MB) +(default: 25)

    • +
    • find_unused_parameters (bool) – Traverse the autograd graph of all tensors +contained in the return value of the wrapped +module’s forward function. +Parameters that don’t receive gradients as +part of this graph are preemptively marked +as being ready to be reduced. Note that all +forward outputs that are derived from +module parameters must participate in +calculating loss and later the gradient +computation. If they don’t, this wrapper will +hang waiting for autograd to produce gradients +for those parameters. Any outputs derived from +module parameters that are otherwise unused can +be detached from the autograd graph using +torch.Tensor.detach. (default: False)

    • +
    • check_reduction – when setting to True, it enables DistributedDataParallel +to automatically check if the previous iteration’s +backward reductions were successfully issued at the +beginning of every iteration’s forward function. +You normally don’t need this option enabled unless you +are observing weird behaviors such as different ranks +are getting different gradients, which should not +happen if DistributedDataParallel is correctly used. +(default: False)

    • +
    +
    +
    Variables
    +

    ~DistributedDataParallel.module (Module) – the module to be parallelized

    +
    +
    +

    Example:

    +
    >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
    +>>> net = torch.nn.DistributedDataParallel(model, pg)
    +
    +
    +
    +
    +no_sync()[source]
    +

    A context manager to disable gradient synchronizations across DDP +processes. Within this context, gradients will be accumulated on module +variables, which will later be synchronized in the first +forward-backward pass exiting the context.

    +

    Example:

    +
    >>> ddp = torch.nn.DistributedDataParallel(model, pg)
    +>>> with ddp.no_sync():
    +...   for input in inputs:
    +...     ddp(input).backward()  # no synchronization, accumulate grads
    +... ddp(another_input).backward()  # synchronize grads
    +
    +
    +
    + +
    + +
    +
    +
    +

    Utilities

    +
    +

    clip_grad_norm_

    +
    +
    +torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=2)[source]
    +

    Clips gradient norm of an iterable of parameters.

    +

    The norm is computed over all gradients together, as if they were +concatenated into a single vector. Gradients are modified in-place.

    +
    +
    Parameters
    +
      +
    • parameters (Iterable[Tensor] or Tensor) – an iterable of Tensors or a +single Tensor that will have gradients normalized

    • +
    • max_norm (float or int) – max norm of the gradients

    • +
    • norm_type (float or int) – type of the used p-norm. Can be 'inf' for +infinity norm.

    • +
    +
    +
    Returns
    +

    Total norm of the parameters (viewed as a single vector).

    +
    +
    +
    + +
    +
    +

    clip_grad_value_

    +
    +
    +torch.nn.utils.clip_grad_value_(parameters, clip_value)[source]
    +

    Clips gradient of an iterable of parameters at specified value.

    +

    Gradients are modified in-place.

    +
    +
    Parameters
    +
      +
    • parameters (Iterable[Tensor] or Tensor) – an iterable of Tensors or a +single Tensor that will have gradients normalized

    • +
    • clip_value (float or int) – maximum allowed value of the gradients. +The gradients are clipped in the range +\(\left[\text{-clip\_value}, \text{clip\_value}\right]\)

    • +
    +
    +
    +
    + +
    +
    +

    parameters_to_vector

    +
    +
    +torch.nn.utils.parameters_to_vector(parameters)[source]
    +

    Convert parameters to one vector

    +
    +
    Parameters
    +

    parameters (Iterable[Tensor]) – an iterator of Tensors that are the +parameters of a model.

    +
    +
    Returns
    +

    The parameters represented by a single vector

    +
    +
    +
    + +
    +
    +

    vector_to_parameters

    +
    +
    +torch.nn.utils.vector_to_parameters(vec, parameters)[source]
    +

    Convert one vector to the parameters

    +
    +
    Parameters
    +
      +
    • vec (Tensor) – a single vector represents the parameters of a model.

    • +
    • parameters (Iterable[Tensor]) – an iterator of Tensors that are the +parameters of a model.

    • +
    +
    +
    +
    + +
    +
    +

    weight_norm

    +
    +
    +torch.nn.utils.weight_norm(module, name='weight', dim=0)[source]
    +

    Applies weight normalization to a parameter in the given module.

    +
    +\[\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + +\]
    +

    Weight normalization is a reparameterization that decouples the magnitude +of a weight tensor from its direction. This replaces the parameter specified +by name (e.g. 'weight') with two parameters: one specifying the magnitude +(e.g. 'weight_g') and one specifying the direction (e.g. 'weight_v'). +Weight normalization is implemented via a hook that recomputes the weight +tensor from the magnitude and direction before every forward() +call.

    +

    By default, with dim=0, the norm is computed independently per output +channel/plane. To compute a norm over the entire weight tensor, use +dim=None.

    +

    See https://arxiv.org/abs/1602.07868

    +
    +
    Parameters
    +
      +
    • module (Module) – containing module

    • +
    • name (str, optional) – name of weight parameter

    • +
    • dim (int, optional) – dimension over which to compute the norm

    • +
    +
    +
    Returns
    +

    The original module with the weight norm hook

    +
    +
    +

    Example:

    +
    >>> m = weight_norm(nn.Linear(20, 40), name='weight')
    +>>> m
    +Linear(in_features=20, out_features=40, bias=True)
    +>>> m.weight_g.size()
    +torch.Size([40, 1])
    +>>> m.weight_v.size()
    +torch.Size([40, 20])
    +
    +
    +
    + +
    +
    +

    remove_weight_norm

    +
    +
    +torch.nn.utils.remove_weight_norm(module, name='weight')[source]
    +

    Removes the weight normalization reparameterization from a module.

    +
    +
    Parameters
    +
      +
    • module (Module) – containing module

    • +
    • name (str, optional) – name of weight parameter

    • +
    +
    +
    +

    Example

    +
    >>> m = weight_norm(nn.Linear(20, 40))
    +>>> remove_weight_norm(m)
    +
    +
    +
    + +
    +
    +

    spectral_norm

    +
    +
    +torch.nn.utils.spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None)[source]
    +

    Applies spectral normalization to a parameter in the given module.

    +
    +\[\mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, +\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + +\]
    +

    Spectral normalization stabilizes the training of discriminators (critics) +in Generative Adversarial Networks (GANs) by rescaling the weight tensor +with spectral norm \(\sigma\) of the weight matrix calculated using +power iteration method. If the dimension of the weight tensor is greater +than 2, it is reshaped to 2D in power iteration method to get spectral +norm. This is implemented via a hook that calculates spectral norm and +rescales weight before every forward() call.

    +

    See Spectral Normalization for Generative Adversarial Networks .

    +
    +
    Parameters
    +
      +
    • module (nn.Module) – containing module

    • +
    • name (str, optional) – name of weight parameter

    • +
    • n_power_iterations (int, optional) – number of power iterations to +calculate spectral norm

    • +
    • eps (float, optional) – epsilon for numerical stability in +calculating norms

    • +
    • dim (int, optional) – dimension corresponding to number of outputs, +the default is 0, except for modules that are instances of +ConvTranspose{1,2,3}d, when it is 1

    • +
    +
    +
    Returns
    +

    The original module with the spectral norm hook

    +
    +
    +

    Example:

    +
    >>> m = spectral_norm(nn.Linear(20, 40))
    +>>> m
    +Linear(in_features=20, out_features=40, bias=True)
    +>>> m.weight_u.size()
    +torch.Size([40])
    +
    +
    +
    + +
    +
    +

    remove_spectral_norm

    +
    +
    +torch.nn.utils.remove_spectral_norm(module, name='weight')[source]
    +

    Removes the spectral normalization reparameterization from a module.

    +
    +
    Parameters
    +
      +
    • module (Module) – containing module

    • +
    • name (str, optional) – name of weight parameter

    • +
    +
    +
    +

    Example

    +
    >>> m = spectral_norm(nn.Linear(40, 10))
    +>>> remove_spectral_norm(m)
    +
    +
    +
    + +
    +
    +

    PackedSequence

    +
    +
    +torch.nn.utils.rnn.PackedSequence(data, batch_sizes=None, sorted_indices=None, unsorted_indices=None)[source]
    +

    Holds the data and list of batch_sizes of a packed sequence.

    +

    All RNN modules accept packed sequences as inputs.

    +
    +

    Note

    +

    Instances of this class should never be created manually. They are meant +to be instantiated by functions like pack_padded_sequence().

    +

    Batch sizes represent the number elements at each sequence step in +the batch, not the varying sequence lengths passed to +pack_padded_sequence(). For instance, given data abc and x +the PackedSequence would contain data axbc with +batch_sizes=[2,1,1].

    +
    +
    +
    Variables
    +
      +
    • ~PackedSequence.data (Tensor) – Tensor containing packed sequence

    • +
    • ~PackedSequence.batch_sizes (Tensor) – Tensor of integers holding +information about the batch size at each sequence step

    • +
    • ~PackedSequence.sorted_indices (Tensor, optional) – Tensor of integers holding how this +PackedSequence is constructed from sequences.

    • +
    • ~PackedSequence.unsorted_indices (Tensor, optional) – Tensor of integers holding how this +to recover the original sequences with correct order.

    • +
    +
    +
    +
    +

    Note

    +

    data can be on arbitrary device and of arbitrary dtype. +sorted_indices and unsorted_indices must be torch.int64 +tensors on the same device as data.

    +

    However, batch_sizes should always be a CPU torch.int64 tensor.

    +

    This invariant is maintained throughout PackedSequence class, +and all functions that construct a :class:PackedSequence in PyTorch +(i.e., they only pass in tensors conforming to this constraint).

    +
    +
    + +
    +
    +

    pack_padded_sequence

    +
    +
    +torch.nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=False, enforce_sorted=True)[source]
    +

    Packs a Tensor containing padded sequences of variable length.

    +

    input can be of size T x B x * where T is the length of the +longest sequence (equal to lengths[0]), B is the batch size, and +* is any number of dimensions (including 0). If batch_first is +True, B x T x * input is expected.

    +

    For unsorted sequences, use enforce_sorted = False. If enforce_sorted is +True, the sequences should be sorted by length in a decreasing order, i.e. +input[:,0] should be the longest sequence, and input[:,B-1] the shortest +one. enforce_sorted = True is only necessary for ONNX export.

    +
    +

    Note

    +

    This function accepts any input that has at least two dimensions. You +can apply it to pack the labels, and use the output of the RNN with +them to compute the loss directly. A Tensor can be retrieved from +a PackedSequence object by accessing its .data attribute.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – padded batch of variable length sequences.

    • +
    • lengths (Tensor) – list of sequences lengths of each batch element.

    • +
    • batch_first (bool, optional) – if True, the input is expected in B x T x * +format.

    • +
    • enforce_sorted (bool, optional) – if True, the input is expected to +contain sequences sorted by length in a decreasing order. If +False, this condition is not checked. Default: True.

    • +
    +
    +
    Returns
    +

    a PackedSequence object

    +
    +
    +
    + +
    +
    +

    pad_packed_sequence

    +
    +
    +torch.nn.utils.rnn.pad_packed_sequence(sequence, batch_first=False, padding_value=0.0, total_length=None)[source]
    +

    Pads a packed batch of variable length sequences.

    +

    It is an inverse operation to pack_padded_sequence().

    +

    The returned Tensor’s data will be of size T x B x *, where T is the length +of the longest sequence and B is the batch size. If batch_first is True, +the data will be transposed into B x T x * format.

    +

    Batch elements will be ordered decreasingly by their length.

    +
    +

    Note

    +

    total_length is useful to implement the +pack sequence -> recurrent network -> unpack sequence pattern in a +Module wrapped in DataParallel. +See this FAQ section for +details.

    +
    +
    +
    Parameters
    +
      +
    • sequence (PackedSequence) – batch to pad

    • +
    • batch_first (bool, optional) – if True, the output will be in B x T x * +format.

    • +
    • padding_value (float, optional) – values for padded elements.

    • +
    • total_length (int, optional) – if not None, the output will be padded to +have length total_length. This method will throw ValueError +if total_length is less than the max sequence length in +sequence.

    • +
    +
    +
    Returns
    +

    Tuple of Tensor containing the padded sequence, and a Tensor +containing the list of lengths of each sequence in the batch.

    +
    +
    +
    + +
    +
    +

    pad_sequence

    +
    +
    +torch.nn.utils.rnn.pad_sequence(sequences, batch_first=False, padding_value=0)[source]
    +

    Pad a list of variable length Tensors with padding_value

    +

    pad_sequence stacks a list of Tensors along a new dimension, +and pads them to equal length. For example, if the input is list of +sequences with size L x * and if batch_first is False, and T x B x * +otherwise.

    +

    B is batch size. It is equal to the number of elements in sequences. +T is length of the longest sequence. +L is length of the sequence. +* is any number of trailing dimensions, including none.

    +

    Example

    +
    >>> from torch.nn.utils.rnn import pad_sequence
    +>>> a = torch.ones(25, 300)
    +>>> b = torch.ones(22, 300)
    +>>> c = torch.ones(15, 300)
    +>>> pad_sequence([a, b, c]).size()
    +torch.Size([25, 3, 300])
    +
    +
    +
    +

    Note

    +

    This function returns a Tensor of size T x B x * or B x T x * +where T is the length of the longest sequence. This function assumes +trailing dimensions and type of all the Tensors in sequences are same.

    +
    +
    +
    Parameters
    +
      +
    • sequences (list[Tensor]) – list of variable length sequences.

    • +
    • batch_first (bool, optional) – output will be in B x T x * if True, or in +T x B x * otherwise

    • +
    • padding_value (float, optional) – value for padded elements. Default: 0.

    • +
    +
    +
    Returns
    +

    Tensor of size T x B x * if batch_first is False. +Tensor of size B x T x * otherwise

    +
    +
    +
    + +
    +
    +

    pack_sequence

    +
    +
    +torch.nn.utils.rnn.pack_sequence(sequences, enforce_sorted=True)[source]
    +

    Packs a list of variable length Tensors

    +

    sequences should be a list of Tensors of size L x *, where L is +the length of a sequence and * is any number of trailing dimensions, +including zero.

    +

    For unsorted sequences, use enforce_sorted = False. If enforce_sorted +is True, the sequences should be sorted in the order of decreasing length. +enforce_sorted = True is only necessary for ONNX export.

    +

    Example

    +
    >>> from torch.nn.utils.rnn import pack_sequence
    +>>> a = torch.tensor([1,2,3])
    +>>> b = torch.tensor([4,5])
    +>>> c = torch.tensor([6])
    +>>> pack_sequence([a, b, c])
    +PackedSequence(data=tensor([ 1,  4,  6,  2,  5,  3]), batch_sizes=tensor([ 3,  2,  1]))
    +
    +
    +
    +
    Parameters
    +
      +
    • sequences (list[Tensor]) – A list of sequences of decreasing length.

    • +
    • enforce_sorted (bool, optional) – if True, checks that the input +contains sequences sorted by length in a decreasing order. If +False, this condition is not checked. Default: True.

    • +
    +
    +
    Returns
    +

    a PackedSequence object

    +
    +
    +
    + +
    +
    +

    Flatten

    +
    +
    +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/nn.init.html b/docs/stable/nn.init.html new file mode 100644 index 000000000000..dcf3559db2d6 --- /dev/null +++ b/docs/stable/nn.init.html @@ -0,0 +1,882 @@ + + + + + + + + + + + + torch.nn.init — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.nn.init

    +
    +
    +torch.nn.init.calculate_gain(nonlinearity, param=None)[source]
    +

    Return the recommended gain value for the given nonlinearity function. +The values are as follows:

    + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + +

    nonlinearity

    gain

    Linear / Identity

    \(1\)

    Conv{1,2,3}D

    \(1\)

    Sigmoid

    \(1\)

    Tanh

    \(\frac{5}{3}\)

    ReLU

    \(\sqrt{2}\)

    Leaky Relu

    \(\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}\)

    +
    +
    Parameters
    +
      +
    • nonlinearity – the non-linear function (nn.functional name)

    • +
    • param – optional parameter for the non-linear function

    • +
    +
    +
    +

    Examples

    +
    >>> gain = nn.init.calculate_gain('leaky_relu', 0.2)  # leaky_relu with negative_slope=0.2
    +
    +
    +
    + +
    +
    +torch.nn.init.uniform_(tensor, a=0.0, b=1.0)[source]
    +

    Fills the input Tensor with values drawn from the uniform +distribution \(\mathcal{U}(a, b)\).

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • a – the lower bound of the uniform distribution

    • +
    • b – the upper bound of the uniform distribution

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.uniform_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.normal_(tensor, mean=0.0, std=1.0)[source]
    +

    Fills the input Tensor with values drawn from the normal +distribution \(\mathcal{N}(\text{mean}, \text{std}^2)\).

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • mean – the mean of the normal distribution

    • +
    • std – the standard deviation of the normal distribution

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.normal_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.constant_(tensor, val)[source]
    +

    Fills the input Tensor with the value \(\text{val}\).

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • val – the value to fill the tensor with

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.constant_(w, 0.3)
    +
    +
    +
    + +
    +
    +torch.nn.init.ones_(tensor)[source]
    +

    Fills the input Tensor with the scalar value 1.

    +
    +
    Parameters
    +

    tensor – an n-dimensional torch.Tensor

    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.ones_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.zeros_(tensor)[source]
    +

    Fills the input Tensor with the scalar value 0.

    +
    +
    Parameters
    +

    tensor – an n-dimensional torch.Tensor

    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.zeros_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.eye_(tensor)[source]
    +

    Fills the 2-dimensional input Tensor with the identity +matrix. Preserves the identity of the inputs in Linear layers, where as +many inputs are preserved as possible.

    +
    +
    Parameters
    +

    tensor – a 2-dimensional torch.Tensor

    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.eye_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.dirac_(tensor)[source]
    +

    Fills the {3, 4, 5}-dimensional input Tensor with the Dirac +delta function. Preserves the identity of the inputs in Convolutional +layers, where as many input channels are preserved as possible.

    +
    +
    Parameters
    +

    tensor – a {3, 4, 5}-dimensional torch.Tensor

    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 16, 5, 5)
    +>>> nn.init.dirac_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.xavier_uniform_(tensor, gain=1.0)[source]
    +

    Fills the input Tensor with values according to the method +described in Understanding the difficulty of training deep feedforward +neural networks - Glorot, X. & Bengio, Y. (2010), using a uniform +distribution. The resulting tensor will have values sampled from +\(\mathcal{U}(-a, a)\) where

    +
    +\[a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} + +\]
    +

    Also known as Glorot initialization.

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • gain – an optional scaling factor

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
    +
    +
    +
    + +
    +
    +torch.nn.init.xavier_normal_(tensor, gain=1.0)[source]
    +

    Fills the input Tensor with values according to the method +described in Understanding the difficulty of training deep feedforward +neural networks - Glorot, X. & Bengio, Y. (2010), using a normal +distribution. The resulting tensor will have values sampled from +\(\mathcal{N}(0, \text{std}^2)\) where

    +
    +\[\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} + +\]
    +

    Also known as Glorot initialization.

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • gain – an optional scaling factor

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.xavier_normal_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu')[source]
    +

    Fills the input Tensor with values according to the method +described in Delving deep into rectifiers: Surpassing human-level +performance on ImageNet classification - He, K. et al. (2015), using a +uniform distribution. The resulting tensor will have values sampled from +\(\mathcal{U}(-\text{bound}, \text{bound})\) where

    +
    +\[\text{bound} = \sqrt{\frac{6}{(1 + a^2) \times \text{fan\_in}}} + +\]
    +

    Also known as He initialization.

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • a – the negative slope of the rectifier used after this layer (0 for ReLU +by default)

    • +
    • mode – either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' +preserves the magnitude of the variance of the weights in the +forward pass. Choosing 'fan_out' preserves the magnitudes in the +backwards pass.

    • +
    • nonlinearity – the non-linear function (nn.functional name), +recommended to use only with 'relu' or 'leaky_relu' (default).

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
    +
    +
    +
    + +
    +
    +torch.nn.init.kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu')[source]
    +

    Fills the input Tensor with values according to the method +described in Delving deep into rectifiers: Surpassing human-level +performance on ImageNet classification - He, K. et al. (2015), using a +normal distribution. The resulting tensor will have values sampled from +\(\mathcal{N}(0, \text{std}^2)\) where

    +
    +\[\text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan\_in}}} + +\]
    +

    Also known as He initialization.

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • a – the negative slope of the rectifier used after this layer (0 for ReLU +by default)

    • +
    • mode – either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' +preserves the magnitude of the variance of the weights in the +forward pass. Choosing 'fan_out' preserves the magnitudes in the +backwards pass.

    • +
    • nonlinearity – the non-linear function (nn.functional name), +recommended to use only with 'relu' or 'leaky_relu' (default).

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
    +
    +
    +
    + +
    +
    +torch.nn.init.orthogonal_(tensor, gain=1)[source]
    +

    Fills the input Tensor with a (semi) orthogonal matrix, as +described in Exact solutions to the nonlinear dynamics of learning in deep +linear neural networks - Saxe, A. et al. (2013). The input tensor must have +at least 2 dimensions, and for tensors with more than 2 dimensions the +trailing dimensions are flattened.

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor, where \(n \geq 2\)

    • +
    • gain – optional scaling factor

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.orthogonal_(w)
    +
    +
    +
    + +
    +
    +torch.nn.init.sparse_(tensor, sparsity, std=0.01)[source]
    +

    Fills the 2D input Tensor as a sparse matrix, where the +non-zero elements will be drawn from the normal distribution +\(\mathcal{N}(0, 0.01)\), as described in Deep learning via +Hessian-free optimization - Martens, J. (2010).

    +
    +
    Parameters
    +
      +
    • tensor – an n-dimensional torch.Tensor

    • +
    • sparsity – The fraction of elements in each column to be set to zero

    • +
    • std – the standard deviation of the normal distribution used to generate +the non-zero values

    • +
    +
    +
    +

    Examples

    +
    >>> w = torch.empty(3, 5)
    +>>> nn.init.sparse_(w, sparsity=0.1)
    +
    +
    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/autograd.html b/docs/stable/notes/autograd.html new file mode 100644 index 000000000000..9b6d2fcc6f47 --- /dev/null +++ b/docs/stable/notes/autograd.html @@ -0,0 +1,638 @@ + + + + + + + + + + + + Autograd mechanics — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Autograd mechanics
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Autograd mechanics

    +

    This note will present an overview of how autograd works and records the +operations. It’s not strictly necessary to understand all this, but we recommend +getting familiar with it, as it will help you write more efficient, cleaner +programs, and can aid you in debugging.

    +
    +

    Excluding subgraphs from backward

    +

    Every Tensor has a flag: requires_grad that allows for fine grained +exclusion of subgraphs from gradient computation and can increase efficiency.

    +
    +

    requires_grad

    +

    If there’s a single input to an operation that requires gradient, its output +will also require gradient. Conversely, only if all inputs don’t require +gradient, the output also won’t require it. Backward computation is never +performed in the subgraphs, where all Tensors didn’t require gradients.

    +
    >>> x = torch.randn(5, 5)  # requires_grad=False by default
    +>>> y = torch.randn(5, 5)  # requires_grad=False by default
    +>>> z = torch.randn((5, 5), requires_grad=True)
    +>>> a = x + y
    +>>> a.requires_grad
    +False
    +>>> b = a + z
    +>>> b.requires_grad
    +True
    +
    +
    +

    This is especially useful when you want to freeze part of your model, or you +know in advance that you’re not going to use gradients w.r.t. some parameters. +For example if you want to finetune a pretrained CNN, it’s enough to switch the +requires_grad flags in the frozen base, and no intermediate buffers will +be saved, until the computation gets to the last layer, where the affine +transform will use weights that require gradient, and the output of the network +will also require them.

    +
    model = torchvision.models.resnet18(pretrained=True)
    +for param in model.parameters():
    +    param.requires_grad = False
    +# Replace the last fully-connected layer
    +# Parameters of newly constructed modules have requires_grad=True by default
    +model.fc = nn.Linear(512, 100)
    +
    +# Optimize only the classifier
    +optimizer = optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9)
    +
    +
    +
    +
    +
    +

    How autograd encodes the history

    +

    Autograd is reverse automatic differentiation system. Conceptually, +autograd records a graph recording all of the operations that created +the data as you execute operations, giving you a directed acyclic graph +whose leaves are the input tensors and roots are the output tensors. +By tracing this graph from roots to leaves, you can automatically +compute the gradients using the chain rule.

    +

    Internally, autograd represents this graph as a graph of +Function objects (really expressions), which can be +apply() ed to compute the result of +evaluating the graph. When computing the forwards pass, autograd +simultaneously performs the requested computations and builds up a graph +representing the function that computes the gradient (the .grad_fn +attribute of each torch.Tensor is an entry point into this graph). +When the forwards pass is completed, we evaluate this graph in the +backwards pass to compute the gradients.

    +

    An important thing to note is that the graph is recreated from scratch at every +iteration, and this is exactly what allows for using arbitrary Python control +flow statements, that can change the overall shape and size of the graph at +every iteration. You don’t have to encode all possible paths before you +launch the training - what you run is what you differentiate.

    +
    +
    +

    In-place operations with autograd

    +

    Supporting in-place operations in autograd is a hard matter, and we discourage +their use in most cases. Autograd’s aggressive buffer freeing and reuse makes +it very efficient and there are very few occasions when in-place operations +actually lower memory usage by any significant amount. Unless you’re operating +under heavy memory pressure, you might never need to use them.

    +

    There are two main reasons that limit the applicability of in-place operations:

    +
      +
    1. In-place operations can potentially overwrite values required to compute +gradients.

    2. +
    3. Every in-place operation actually requires the implementation to rewrite the +computational graph. Out-of-place versions simply allocate new objects and +keep references to the old graph, while in-place operations, require +changing the creator of all inputs to the Function representing +this operation. This can be tricky, especially if there are many Tensors +that reference the same storage (e.g. created by indexing or transposing), +and in-place functions will actually raise an error if the storage of +modified inputs is referenced by any other Tensor.

    4. +
    +
    +
    +

    In-place correctness checks

    +

    Every tensor keeps a version counter, that is incremented every time it is +marked dirty in any operation. When a Function saves any tensors for backward, +a version counter of their containing Tensor is saved as well. Once you access +self.saved_tensors it is checked, and if it is greater than the saved value +an error is raised. This ensures that if you’re using in-place +functions and not seeing any errors, you can be sure that the computed +gradients are correct.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/broadcasting.html b/docs/stable/notes/broadcasting.html new file mode 100644 index 000000000000..f596571bb896 --- /dev/null +++ b/docs/stable/notes/broadcasting.html @@ -0,0 +1,642 @@ + + + + + + + + + + + + Broadcasting semantics — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Broadcasting semantics
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Broadcasting semantics

    +

    Many PyTorch operations support NumPy Broadcasting Semantics.

    +

    In short, if a PyTorch operation supports broadcast, then its Tensor arguments can be +automatically expanded to be of equal sizes (without making copies of the data).

    +
    +

    General semantics

    +

    Two tensors are “broadcastable” if the following rules hold:

    +
      +
    • Each tensor has at least one dimension.

    • +
    • When iterating over the dimension sizes, starting at the trailing dimension, +the dimension sizes must either be equal, one of them is 1, or one of them +does not exist.

    • +
    +

    For Example:

    +
    >>> x=torch.empty(5,7,3)
    +>>> y=torch.empty(5,7,3)
    +# same shapes are always broadcastable (i.e. the above rules always hold)
    +
    +>>> x=torch.empty((0,))
    +>>> y=torch.empty(2,2)
    +# x and y are not broadcastable, because x does not have at least 1 dimension
    +
    +# can line up trailing dimensions
    +>>> x=torch.empty(5,3,4,1)
    +>>> y=torch.empty(  3,1,1)
    +# x and y are broadcastable.
    +# 1st trailing dimension: both have size 1
    +# 2nd trailing dimension: y has size 1
    +# 3rd trailing dimension: x size == y size
    +# 4th trailing dimension: y dimension doesn't exist
    +
    +# but:
    +>>> x=torch.empty(5,2,4,1)
    +>>> y=torch.empty(  3,1,1)
    +# x and y are not broadcastable, because in the 3rd trailing dimension 2 != 3
    +
    +
    +

    If two tensors x, y are “broadcastable”, the resulting tensor size +is calculated as follows:

    +
      +
    • If the number of dimensions of x and y are not equal, prepend 1 +to the dimensions of the tensor with fewer dimensions to make them equal length.

    • +
    • Then, for each dimension size, the resulting dimension size is the max of the sizes of +x and y along that dimension.

    • +
    +

    For Example:

    +
    # can line up trailing dimensions to make reading easier
    +>>> x=torch.empty(5,1,4,1)
    +>>> y=torch.empty(  3,1,1)
    +>>> (x+y).size()
    +torch.Size([5, 3, 4, 1])
    +
    +# but not necessary:
    +>>> x=torch.empty(1)
    +>>> y=torch.empty(3,1,7)
    +>>> (x+y).size()
    +torch.Size([3, 1, 7])
    +
    +>>> x=torch.empty(5,2,4,1)
    +>>> y=torch.empty(3,1,1)
    +>>> (x+y).size()
    +RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 1
    +
    +
    +
    +
    +

    In-place semantics

    +

    One complication is that in-place operations do not allow the in-place tensor to change shape +as a result of the broadcast.

    +

    For Example:

    +
    >>> x=torch.empty(5,3,4,1)
    +>>> y=torch.empty(3,1,1)
    +>>> (x.add_(y)).size()
    +torch.Size([5, 3, 4, 1])
    +
    +# but:
    +>>> x=torch.empty(1,3,1)
    +>>> y=torch.empty(3,1,7)
    +>>> (x.add_(y)).size()
    +RuntimeError: The expanded size of the tensor (1) must match the existing size (7) at non-singleton dimension 2.
    +
    +
    +
    +
    +

    Backwards compatibility

    +

    Prior versions of PyTorch allowed certain pointwise functions to execute on tensors with different shapes, +as long as the number of elements in each tensor was equal. The pointwise operation would then be carried +out by viewing each tensor as 1-dimensional. PyTorch now supports broadcasting and the “1-dimensional” +pointwise behavior is considered deprecated and will generate a Python warning in cases where tensors are +not broadcastable, but have the same number of elements.

    +

    Note that the introduction of broadcasting can cause backwards incompatible changes in the case where +two tensors do not have the same shape, but are broadcastable and have the same number of elements. +For Example:

    +
    >>> torch.add(torch.ones(4,1), torch.randn(4))
    +
    +
    +

    would previously produce a Tensor with size: torch.Size([4,1]), but now produces a Tensor with size: torch.Size([4,4]). +In order to help identify cases in your code where backwards incompatibilities introduced by broadcasting may exist, +you may set torch.utils.backcompat.broadcast_warning.enabled to True, which will generate a python warning +in such cases.

    +

    For Example:

    +
    >>> torch.utils.backcompat.broadcast_warning.enabled=True
    +>>> torch.add(torch.ones(4,1), torch.ones(4))
    +__main__:1: UserWarning: self and other do not have the same shape, but are broadcastable, and have the same number of elements.
    +Changing behavior in a backwards incompatible manner to broadcasting rather than viewing as 1-dimensional.
    +
    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/cpu_threading_torchscript_inference.html b/docs/stable/notes/cpu_threading_torchscript_inference.html new file mode 100644 index 000000000000..7a7e7f7d0d2b --- /dev/null +++ b/docs/stable/notes/cpu_threading_torchscript_inference.html @@ -0,0 +1,675 @@ + + + + + + + + + + + + CPU threading and TorchScript inference — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • CPU threading and TorchScript inference
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    CPU threading and TorchScript inference

    +

    PyTorch allows using multiple CPU threads during TorchScript model inference. +The following figure shows different levels of parallelism one would find in a +typical application:

    +../_images/cpu_threading_torchscript_inference.svg +

    One or more inference threads execute a model’s forward pass on the given inputs. +Each inference thread invokes a JIT interpreter that executes the ops +of a model inline, one by one. A model can utilize a fork TorchScript +primitive to launch an asynchronous task. Forking several operations at once +results in a task that is executed in parallel. The fork operator returns a +future object which can be used to synchronize on later, for example:

    +
    @torch.jit.script
    +def compute_z(x):
    +    return torch.mm(x, self.w_z)
    +
    +@torch.jit.script
    +def forward(x):
    +    # launch compute_z asynchronously:
    +    fut = torch.jit._fork(compute_z, x)
    +    # execute the next operation in parallel to compute_z:
    +    y = torch.mm(x, self.w_y)
    +    # wait for the result of compute_z:
    +    z = torch.jit._wait(fut)
    +    return y + z
    +
    +
    +

    PyTorch uses a single thread pool for the inter-op parallelism, this thread pool +is shared by all inference tasks that are forked within the application process.

    +

    In addition to the inter-op parallelism, PyTorch can also utilize multiple threads +within the ops (intra-op parallelism). This can be useful in many cases, +including element-wise ops on large tensors, convolutions, GEMMs, embedding +lookups and others.

    +
    +

    Build options

    +

    PyTorch uses an internal ATen library to implement ops. In addition to that, +PyTorch can also be built with support of external libraries, such as MKL and MKL-DNN, +to speed up computations on CPU.

    +

    ATen, MKL and MKL-DNN support intra-op parallelism and depend on the +following parallelization libraries to implement it:

    +
    +
      +
    • OpenMP - a standard (and a library, usually shipped with a compiler), widely used in external libraries;

    • +
    • TBB - a newer parallelization library optimized for task-based parallelism and concurrent environments.

    • +
    +
    +

    OpenMP historically has been used by a large number of libraries. It is known +for a relative ease of use and support for loop-based parallelism and other primitives. +At the same time OpenMP is not known for a good interoperability with other threading +libraries used by the application. In particular, OpenMP does not guarantee that a single per-process intra-op thread +pool is going to be used in the application. On the contrary, two different inter-op +threads will likely use different OpenMP thread pools for intra-op work. +This might result in a large number of threads used by the application.

    +

    TBB is used to a lesser extent in external libraries, but, at the same time, +is optimized for the concurrent environments. PyTorch’s TBB backend guarantees that +there’s a separate, single, per-process intra-op thread pool used by all of the +ops running in the application.

    +

    Depending of the use case, one might find one or another parallelization +library a better choice in their application.

    +

    PyTorch allows selecting of the parallelization backend used by ATen and other +libraries at the build time with the following build options:

    + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + +

    Library

    Build Option

    Values

    Notes

    ATen

    ATEN_THREADING

    OMP (default), TBB

    MKL

    MKL_THREADING

    (same)

    To enable MKL use BLAS=MKL

    MKL-DNN

    MKLDNN_THREADING

    (same)

    To enable MKL-DNN use USE_MKLDNN=1

    +

    It is strongly recommended not to mix OpenMP and TBB within one build.

    +

    Any of the TBB values above require USE_TBB=1 build setting (default: OFF). +A separate setting USE_OPENMP=1 (default: ON) is required for OpenMP parallelism.

    +
    +
    +

    Runtime API

    +

    The following API is used to control thread settings:

    + +++++ + + + + + + + + + + + + + + + +

    Type of parallelism

    Settings

    Notes

    Inter-op parallelism

    at::set_num_interop_threads, +at::get_num_interop_threads (C++)

    +

    set_num_interop_threads, +get_num_interop_threads (Python, torch module)

    +

    set* functions can only be called once and only +during the startup, before the actual operators running;

    +

    Default number of threads: number of CPU cores.

    +

    Intra-op parallelism

    at::set_num_threads, +at::get_num_threads (C++) +set_num_threads, +get_num_threads (Python, torch module)

    +

    Environment variables: +OMP_NUM_THREADS and MKL_NUM_THREADS

    +
    +

    For the intra-op parallelism settings, at::set_num_threads, torch.set_num_threads always take precedence +over environment variables, MKL_NUM_THREADS variable takes precedence over OMP_NUM_THREADS.

    +
    +

    Note

    +

    parallel_info utility prints information about thread settings and can be used for debugging. +Similar output can be also obtained in Python with torch.__config__.parallel_info() call.

    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/cuda.html b/docs/stable/notes/cuda.html new file mode 100644 index 000000000000..ef5c2775ab17 --- /dev/null +++ b/docs/stable/notes/cuda.html @@ -0,0 +1,793 @@ + + + + + + + + + + + + CUDA semantics — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    CUDA semantics

    +

    torch.cuda is used to set up and run CUDA operations. It keeps track of +the currently selected GPU, and all CUDA tensors you allocate will by default be +created on that device. The selected device can be changed with a +torch.cuda.device context manager.

    +

    However, once a tensor is allocated, you can do operations on it irrespective +of the selected device, and the results will be always placed in on the same +device as the tensor.

    +

    Cross-GPU operations are not allowed by default, with the exception of +copy_() and other methods with copy-like functionality +such as to() and cuda(). +Unless you enable peer-to-peer memory access, any attempts to launch ops on +tensors spread across different devices will raise an error.

    +

    Below you can find a small example showcasing this:

    +
    cuda = torch.device('cuda')     # Default CUDA device
    +cuda0 = torch.device('cuda:0')
    +cuda2 = torch.device('cuda:2')  # GPU 2 (these are 0-indexed)
    +
    +x = torch.tensor([1., 2.], device=cuda0)
    +# x.device is device(type='cuda', index=0)
    +y = torch.tensor([1., 2.]).cuda()
    +# y.device is device(type='cuda', index=0)
    +
    +with torch.cuda.device(1):
    +    # allocates a tensor on GPU 1
    +    a = torch.tensor([1., 2.], device=cuda)
    +
    +    # transfers a tensor from CPU to GPU 1
    +    b = torch.tensor([1., 2.]).cuda()
    +    # a.device and b.device are device(type='cuda', index=1)
    +
    +    # You can also use ``Tensor.to`` to transfer a tensor:
    +    b2 = torch.tensor([1., 2.]).to(device=cuda)
    +    # b.device and b2.device are device(type='cuda', index=1)
    +
    +    c = a + b
    +    # c.device is device(type='cuda', index=1)
    +
    +    z = x + y
    +    # z.device is device(type='cuda', index=0)
    +
    +    # even within a context, you can specify the device
    +    # (or give a GPU index to the .cuda call)
    +    d = torch.randn(2, device=cuda2)
    +    e = torch.randn(2).to(cuda2)
    +    f = torch.randn(2).cuda(cuda2)
    +    # d.device, e.device, and f.device are all device(type='cuda', index=2)
    +
    +
    +
    +

    Asynchronous execution

    +

    By default, GPU operations are asynchronous. When you call a function that +uses the GPU, the operations are enqueued to the particular device, but not +necessarily executed until later. This allows us to execute more computations +in parallel, including operations on CPU or other GPUs.

    +

    In general, the effect of asynchronous computation is invisible to the caller, +because (1) each device executes operations in the order they are queued, and +(2) PyTorch automatically performs necessary synchronization when copying data +between CPU and GPU or between two GPUs. Hence, computation will proceed as if +every operation was executed synchronously.

    +

    You can force synchronous computation by setting environment variable +CUDA_LAUNCH_BLOCKING=1. This can be handy when an error occurs on the GPU. +(With asynchronous execution, such an error isn’t reported until after the +operation is actually executed, so the stack trace does not show where it was +requested.)

    +

    As an exception, several functions such as to() and +copy_() admit an explicit non_blocking argument, +which lets the caller bypass synchronization when it is unnecessary. +Another exception is CUDA streams, explained below.

    +
    +

    CUDA streams

    +

    A CUDA stream is a linear sequence of execution that belongs to a specific +device. You normally do not need to create one explicitly: by default, each +device uses its own “default” stream.

    +

    Operations inside each stream are serialized in the order they are created, +but operations from different streams can execute concurrently in any +relative order, unless explicit synchronization functions (such as +synchronize() or wait_stream()) are +used. For example, the following code is incorrect:

    +
    cuda = torch.device('cuda')
    +s = torch.cuda.Stream()  # Create a new stream.
    +A = torch.empty((100, 100), device=cuda).normal_(0.0, 1.0)
    +with torch.cuda.stream(s):
    +    # sum() may start execution before normal_() finishes!
    +    B = torch.sum(A)
    +
    +
    +

    When the “current stream” is the default stream, PyTorch automatically performs +necessary synchronization when data is moved around, as explained above. +However, when using non-default streams, it is the user’s responsibility to +ensure proper synchronization.

    +
    +
    +
    +

    Memory management

    +

    PyTorch uses a caching memory allocator to speed up memory allocations. This +allows fast memory deallocation without device synchronizations. However, the +unused memory managed by the allocator will still show as if used in +nvidia-smi. You can use memory_allocated() and +max_memory_allocated() to monitor memory occupied by +tensors, and use memory_cached() and +max_memory_cached() to monitor memory managed by the caching +allocator. Calling empty_cache() releases all unused +cached memory from PyTorch so that those can be used by other GPU applications. +However, the occupied GPU memory by tensors will not be freed so it can not +increase the amount of GPU memory available for PyTorch.

    +
    +
    +

    cuFFT plan cache

    +

    For each CUDA device, an LRU cache of cuFFT plans is used to speed up repeatedly +running FFT methods (e.g., torch.fft()) on CUDA tensors of same geometry +with same configuration. Because some cuFFT plans may allocate GPU memory, +these caches have a maximum capacity.

    +

    You may control and query the properties of the cache of current device with +the following APIs:

    +
      +
    • torch.backends.cuda.cufft_plan_cache.max_size gives the capacity of the +cache (default is 4096 on CUDA 10 and newer, and 1023 on older CUDA versions). +Setting this value directly modifies the capacity.

    • +
    • torch.backends.cuda.cufft_plan_cache.size gives the number of plans +currently residing in the cache.

    • +
    • torch.backends.cuda.cufft_plan_cache.clear() clears the cache.

    • +
    +

    To control and query plan caches of a non-default device, you can index the +torch.backends.cuda.cufft_plan_cache object with either a torch.device +object or a device index, and access one of the above attributes. E.g., to set +the capacity of the cache for device 1, one can write +torch.backends.cuda.cufft_plan_cache[1].max_size = 10.

    +
    +
    +

    Best practices

    +
    +

    Device-agnostic code

    +

    Due to the structure of PyTorch, you may need to explicitly write +device-agnostic (CPU or GPU) code; an example may be creating a new tensor as +the initial hidden state of a recurrent neural network.

    +

    The first step is to determine whether the GPU should be used or not. A common +pattern is to use Python’s argparse module to read in user arguments, and +have a flag that can be used to disable CUDA, in combination with +is_available(). In the following, args.device results in a +torch.device object that can be used to move tensors to CPU or CUDA.

    +
    import argparse
    +import torch
    +
    +parser = argparse.ArgumentParser(description='PyTorch Example')
    +parser.add_argument('--disable-cuda', action='store_true',
    +                    help='Disable CUDA')
    +args = parser.parse_args()
    +args.device = None
    +if not args.disable_cuda and torch.cuda.is_available():
    +    args.device = torch.device('cuda')
    +else:
    +    args.device = torch.device('cpu')
    +
    +
    +

    Now that we have args.device, we can use it to create a Tensor on the +desired device.

    +
    x = torch.empty((8, 42), device=args.device)
    +net = Network().to(device=args.device)
    +
    +
    +

    This can be used in a number of cases to produce device agnostic code. Below +is an example when using a dataloader:

    +
    cuda0 = torch.device('cuda:0')  # CUDA GPU 0
    +for i, x in enumerate(train_loader):
    +    x = x.to(cuda0)
    +
    +
    +

    When working with multiple GPUs on a system, you can use the +CUDA_VISIBLE_DEVICES environment flag to manage which GPUs are available to +PyTorch. As mentioned above, to manually control which GPU a tensor is created +on, the best practice is to use a torch.cuda.device context manager.

    +
    print("Outside device is 0")  # On device 0 (default in most scenarios)
    +with torch.cuda.device(1):
    +    print("Inside device is 1")  # On device 1
    +print("Outside device is still 0")  # On device 0
    +
    +
    +

    If you have a tensor and would like to create a new tensor of the same type on +the same device, then you can use a torch.Tensor.new_* method +(see torch.Tensor). +Whilst the previously mentioned torch.* factory functions +(Creation Ops) depend on the current GPU context and +the attributes arguments you pass in, torch.Tensor.new_* methods preserve +the device and other attributes of the tensor.

    +

    This is the recommended practice when creating modules in which new +tensors need to be created internally during the forward pass.

    +
    cuda = torch.device('cuda')
    +x_cpu = torch.empty(2)
    +x_gpu = torch.empty(2, device=cuda)
    +x_cpu_long = torch.empty(2, dtype=torch.int64)
    +
    +y_cpu = x_cpu.new_full([3, 2], fill_value=0.3)
    +print(y_cpu)
    +
    +    tensor([[ 0.3000,  0.3000],
    +            [ 0.3000,  0.3000],
    +            [ 0.3000,  0.3000]])
    +
    +y_gpu = x_gpu.new_full([3, 2], fill_value=-5)
    +print(y_gpu)
    +
    +    tensor([[-5.0000, -5.0000],
    +            [-5.0000, -5.0000],
    +            [-5.0000, -5.0000]], device='cuda:0')
    +
    +y_cpu_long = x_cpu_long.new_tensor([[1, 2, 3]])
    +print(y_cpu_long)
    +
    +    tensor([[ 1,  2,  3]])
    +
    +
    +

    If you want to create a tensor of the same type and size of another tensor, and +fill it with either ones or zeros, ones_like() or +zeros_like() are provided as convenient helper functions (which +also preserve torch.device and torch.dtype of a Tensor).

    +
    x_cpu = torch.empty(2, 3)
    +x_gpu = torch.empty(2, 3)
    +
    +y_cpu = torch.ones_like(x_cpu)
    +y_gpu = torch.zeros_like(x_gpu)
    +
    +
    +
    +
    +

    Use pinned memory buffers

    +

    Host to GPU copies are much faster when they originate from pinned (page-locked) +memory. CPU tensors and storages expose a pin_memory() +method, that returns a copy of the object, with data put in a pinned region.

    +

    Also, once you pin a tensor or storage, you can use asynchronous GPU copies. +Just pass an additional non_blocking=True argument to a +to() or a cuda() call. This can be used +to overlap data transfers with computation.

    +

    You can make the DataLoader return batches placed in +pinned memory by passing pin_memory=True to its constructor.

    +
    +
    +

    Use nn.DataParallel instead of multiprocessing

    +

    Most use cases involving batched inputs and multiple GPUs should default to +using DataParallel to utilize more than one GPU. Even with +the GIL, a single Python process can saturate multiple GPUs.

    +

    As of version 0.1.9, large numbers of GPUs (8+) might not be fully utilized. +However, this is a known issue that is under active development. As always, +test your use case.

    +

    There are significant caveats to using CUDA models with +multiprocessing; unless care is taken to meet the data handling +requirements exactly, it is likely that your program will have incorrect or +undefined behavior.

    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/extending.html b/docs/stable/notes/extending.html new file mode 100644 index 000000000000..65787784f8f3 --- /dev/null +++ b/docs/stable/notes/extending.html @@ -0,0 +1,734 @@ + + + + + + + + + + + + Extending PyTorch — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Extending PyTorch
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Extending PyTorch

    +

    In this note we’ll cover ways of extending torch.nn, +torch.autograd, and writing custom C extensions utilizing our C +libraries.

    +
    +

    Extending torch.autograd

    +

    Adding operations to autograd requires implementing a new +Function subclass for each operation. Recall that Function s +are what autograd uses to compute the results and gradients, and +encode the operation history. Every new function requires you to implement 2 +methods:

    +
      +
    • forward() - the code that performs the operation. It can take +as many arguments as you want, with some of them being optional, if you +specify the default values. All kinds of Python objects are accepted here. +Tensor arguments that track history (i.e., with +requires_grad=True) will be converted to ones that don’t track history +before the call, and their use will be registered in the graph. Note that this +logic won’t traverse lists/dicts/any other data structures and will only +consider Tensor s that are direct arguments to the call. You can +return either a single Tensor output, or a tuple of +Tensor s if there are multiple outputs. Also, please refer to the +docs of Function to find descriptions of useful methods that can be +called only from forward().

    • +
    • backward() - gradient formula. It will be given +as many Tensor arguments as there were outputs, with each of them +representing gradient w.r.t. that output. It should return as many +Tensor s as there were inputs, with each of them containing the +gradient w.r.t. its corresponding input. If your inputs didn’t require +gradient (needs_input_grad is a tuple of booleans indicating +whether each input needs gradient computation), or were non-Tensor +objects, you can return None. Also, if you have optional +arguments to forward() you can return more gradients than there +were inputs, as long as they’re all None.

    • +
    +

    Below you can find code for a Linear function from torch.nn, with +additional comments:

    +
    # Inherit from Function
    +class LinearFunction(Function):
    +
    +    # Note that both forward and backward are @staticmethods
    +    @staticmethod
    +    # bias is an optional argument
    +    def forward(ctx, input, weight, bias=None):
    +        ctx.save_for_backward(input, weight, bias)
    +        output = input.mm(weight.t())
    +        if bias is not None:
    +            output += bias.unsqueeze(0).expand_as(output)
    +        return output
    +
    +    # This function has only a single output, so it gets only one gradient
    +    @staticmethod
    +    def backward(ctx, grad_output):
    +        # This is a pattern that is very convenient - at the top of backward
    +        # unpack saved_tensors and initialize all gradients w.r.t. inputs to
    +        # None. Thanks to the fact that additional trailing Nones are
    +        # ignored, the return statement is simple even when the function has
    +        # optional inputs.
    +        input, weight, bias = ctx.saved_tensors
    +        grad_input = grad_weight = grad_bias = None
    +
    +        # These needs_input_grad checks are optional and there only to
    +        # improve efficiency. If you want to make your code simpler, you can
    +        # skip them. Returning gradients for inputs that don't require it is
    +        # not an error.
    +        if ctx.needs_input_grad[0]:
    +            grad_input = grad_output.mm(weight)
    +        if ctx.needs_input_grad[1]:
    +            grad_weight = grad_output.t().mm(input)
    +        if bias is not None and ctx.needs_input_grad[2]:
    +            grad_bias = grad_output.sum(0).squeeze(0)
    +
    +        return grad_input, grad_weight, grad_bias
    +
    +
    +

    Now, to make it easier to use these custom ops, we recommend aliasing their +apply method:

    +
    linear = LinearFunction.apply
    +
    +
    +

    Here, we give an additional example of a function that is parametrized by +non-Tensor arguments:

    +
    class MulConstant(Function):
    +    @staticmethod
    +    def forward(ctx, tensor, constant):
    +        # ctx is a context object that can be used to stash information
    +        # for backward computation
    +        ctx.constant = constant
    +        return tensor * constant
    +
    +    @staticmethod
    +    def backward(ctx, grad_output):
    +        # We return as many input gradients as there were arguments.
    +        # Gradients of non-Tensor arguments to forward must be None.
    +        return grad_output * ctx.constant, None
    +
    +
    +
    +

    Note

    +

    Inputs to backward, i.e., grad_output, can also be Tensors that +track history. So if backward is implemented with differentiable +operations, (e.g., invocation of another custom +function), higher order derivatives will work.

    +
    +

    You probably want to check if the backward method you implemented actually +computes the derivatives of your function. It is possible by comparing with +numerical approximations using small finite differences:

    +
    from torch.autograd import gradcheck
    +
    +# gradcheck takes a tuple of tensors as input, check if your gradient
    +# evaluated with these tensors are close enough to numerical
    +# approximations and returns True if they all verify this condition.
    +input = (torch.randn(20,20,dtype=torch.double,requires_grad=True), torch.randn(30,20,dtype=torch.double,requires_grad=True))
    +test = gradcheck(linear, input, eps=1e-6, atol=1e-4)
    +print(test)
    +
    +
    +

    See Numerical gradient checking for more details on finite-difference gradient comparisons.

    +
    +
    +

    Extending torch.nn

    +

    nn exports two kinds of interfaces - modules and their functional +versions. You can extend it in both ways, but we recommend using modules for +all kinds of layers, that hold any parameters or buffers, and recommend using +a functional form parameter-less operations like activation functions, pooling, +etc.

    +

    Adding a functional version of an operation is already fully covered in the +section above.

    +
    +

    Adding a Module

    +

    Since nn heavily utilizes autograd, adding a new +Module requires implementing a Function +that performs the operation and can compute the gradient. From now on let’s +assume that we want to implement a Linear module and we have the function +implemented as in the listing above. There’s very little code required to +add this. Now, there are two functions that need to be implemented:

    +
      +
    • __init__ (optional) - takes in arguments such as kernel sizes, numbers +of features, etc. and initializes parameters and buffers.

    • +
    • forward() - instantiates a Function and +uses it to perform the operation. It’s very similar to a functional wrapper +shown above.

    • +
    +

    This is how a Linear module can be implemented:

    +
    class Linear(nn.Module):
    +    def __init__(self, input_features, output_features, bias=True):
    +        super(Linear, self).__init__()
    +        self.input_features = input_features
    +        self.output_features = output_features
    +
    +        # nn.Parameter is a special kind of Tensor, that will get
    +        # automatically registered as Module's parameter once it's assigned
    +        # as an attribute. Parameters and buffers need to be registered, or
    +        # they won't appear in .parameters() (doesn't apply to buffers), and
    +        # won't be converted when e.g. .cuda() is called. You can use
    +        # .register_buffer() to register buffers.
    +        # nn.Parameters require gradients by default.
    +        self.weight = nn.Parameter(torch.Tensor(output_features, input_features))
    +        if bias:
    +            self.bias = nn.Parameter(torch.Tensor(output_features))
    +        else:
    +            # You should always register all possible parameters, but the
    +            # optional ones can be None if you want.
    +            self.register_parameter('bias', None)
    +
    +        # Not a very smart way to initialize weights
    +        self.weight.data.uniform_(-0.1, 0.1)
    +        if bias is not None:
    +            self.bias.data.uniform_(-0.1, 0.1)
    +
    +    def forward(self, input):
    +        # See the autograd section for explanation of what happens here.
    +        return LinearFunction.apply(input, self.weight, self.bias)
    +
    +    def extra_repr(self):
    +        # (Optional)Set the extra information about this module. You can test
    +        # it by printing an object of this class.
    +        return 'in_features={}, out_features={}, bias={}'.format(
    +            self.in_features, self.out_features, self.bias is not None
    +        )
    +
    +
    +
    +
    +
    +

    Writing custom C++ extensions

    +

    See this +PyTorch tutorial +for a detailed explanation and examples.

    +

    Documentations are available at torch.utils.cpp_extension.

    +
    +
    +

    Writing custom C extensions

    +

    Example available at +this GitHub repository.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/faq.html b/docs/stable/notes/faq.html new file mode 100644 index 000000000000..c1e29eaefe6b --- /dev/null +++ b/docs/stable/notes/faq.html @@ -0,0 +1,663 @@ + + + + + + + + + + + + Frequently Asked Questions — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Frequently Asked Questions
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Frequently Asked Questions

    +
    +

    My model reports “cuda runtime error(2): out of memory”

    +

    As the error message suggests, you have run out of memory on your +GPU. Since we often deal with large amounts of data in PyTorch, +small mistakes can rapidly cause your program to use up all of your +GPU; fortunately, the fixes in these cases are often simple. +Here are a few common things to check:

    +

    Don’t accumulate history across your training loop. +By default, computations involving variables that require gradients +will keep history. This means that you should avoid using such +variables in computations which will live beyond your training loops, +e.g., when tracking statistics. Instead, you should detach the variable +or access its underlying data.

    +

    Sometimes, it can be non-obvious when differentiable variables can +occur. Consider the following training loop (abridged from source):

    +
    total_loss = 0
    +for i in range(10000):
    +    optimizer.zero_grad()
    +    output = model(input)
    +    loss = criterion(output)
    +    loss.backward()
    +    optimizer.step()
    +    total_loss += loss
    +
    +
    +

    Here, total_loss is accumulating history across your training loop, since +loss is a differentiable variable with autograd history. You can fix this by +writing total_loss += float(loss) instead.

    +

    Other instances of this problem: +1.

    +

    Don’t hold onto tensors and variables you don’t need. +If you assign a Tensor or Variable to a local, Python will not +deallocate until the local goes out of scope. You can free +this reference by using del x. Similarly, if you assign +a Tensor or Variable to a member variable of an object, it will +not deallocate until the object goes out of scope. You will +get the best memory usage if you don’t hold onto temporaries +you don’t need.

    +

    The scopes of locals can be larger than you expect. For example:

    +
    for i in range(5):
    +    intermediate = f(input[i])
    +    result += g(intermediate)
    +output = h(result)
    +return output
    +
    +
    +

    Here, intermediate remains live even while h is executing, +because its scope extrudes past the end of the loop. To free it +earlier, you should del intermediate when you are done with it.

    +

    Don’t run RNNs on sequences that are too large. +The amount of memory required to backpropagate through an RNN scales +linearly with the length of the RNN input; thus, you will run out of memory +if you try to feed an RNN a sequence that is too long.

    +

    The technical term for this phenomenon is backpropagation through time, +and there are plenty of references for how to implement truncated +BPTT, including in the word language model example; truncation is handled by the +repackage function as described in +this forum post.

    +

    Don’t use linear layers that are too large. +A linear layer nn.Linear(m, n) uses \(O(nm)\) memory: that is to say, +the memory requirements of the weights +scales quadratically with the number of features. It is very easy +to blow through your memory +this way (and remember that you will need at least twice the size of the +weights, since you also need to store the gradients.)

    +
    +
    +

    My GPU memory isn’t freed properly

    +

    PyTorch uses a caching memory allocator to speed up memory allocations. As a +result, the values shown in nvidia-smi usually don’t reflect the true +memory usage. See Memory management for more details about GPU +memory management.

    +

    If your GPU memory isn’t freed even after Python quits, it is very likely that +some Python subprocesses are still alive. You may find them via +ps -elf | grep python and manually kill them with kill -9 [pid].

    +
    +
    +

    My data loader workers return identical random numbers

    +

    You are likely using other libraries to generate random numbers in the dataset. +For example, NumPy’s RNG is duplicated when worker subprocesses are started via +fork. See torch.utils.data.DataLoader’s documentation for how to +properly set up random seeds in workers with its worker_init_fn option.

    +
    +
    +

    My recurrent network doesn’t work with data parallelism

    +

    There is a subtlety in using the +pack sequence -> recurrent network -> unpack sequence pattern in a +Module with DataParallel or +data_parallel(). Input to each the forward() on +each device will only be part of the entire input. Because the unpack operation +torch.nn.utils.rnn.pad_packed_sequence() by default only pads up to the +longest input it sees, i.e., the longest on that particular device, size +mismatches will happen when results are gathered together. Therefore, you can +instead take advantage of the total_length argument of +pad_packed_sequence() to make sure that the +forward() calls return sequences of same length. For example, you can +write:

    +
    from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
    +
    +class MyModule(nn.Module):
    +    # ... __init__, other methods, etc.
    +
    +    # padded_input is of shape [B x T x *] (batch_first mode) and contains
    +    # the sequences sorted by lengths
    +    #   B is the batch size
    +    #   T is max sequence length
    +    def forward(self, padded_input, input_lengths):
    +        total_length = padded_input.size(1)  # get the max sequence length
    +        packed_input = pack_padded_sequence(padded_input, input_lengths,
    +                                            batch_first=True)
    +        packed_output, _ = self.my_lstm(packed_input)
    +        output, _ = pad_packed_sequence(packed_output, batch_first=True,
    +                                        total_length=total_length)
    +        return output
    +
    +
    +m = MyModule().cuda()
    +dp_m = nn.DataParallel(m)
    +
    +
    +

    Additionally, extra care needs to be taken when batch dimension is dim 1 +(i.e., batch_first=False) with data parallelism. In this case, the first +argument of pack_padded_sequence padding_input will be of shape +[T x B x *] and should be scattered along dim 1, but the second argument +input_lengths will be of shape [B] and should be scattered along dim +0. Extra code to manipulate the tensor shapes will be needed.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/large_scale_deployments.html b/docs/stable/notes/large_scale_deployments.html new file mode 100644 index 000000000000..ccd407fc95be --- /dev/null +++ b/docs/stable/notes/large_scale_deployments.html @@ -0,0 +1,659 @@ + + + + + + + + + + + + Features for large-scale deployments — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Features for large-scale deployments
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Features for large-scale deployments

    + +

    This note talks about several extension points and tricks that might be useful +when running PyTorch within a larger system or operating multiple systems using +PyTorch in a larger organization.

    +

    It doesn’t cover topics of deploying models to production. Check +torch.jit or one of the corresponding tutorials.

    +

    The note assumes that you either build PyTorch from source in your +organization or have an ability to statically link additional code to be loaded +when PyTorch is used. Therefore, many of the hooks are exposed as C++ APIs that +can be triggered once in a centralized place, e.g. in static initialization +code.

    +
    +

    Fleet-wide operator profiling

    +

    PyTorch comes with torch.autograd.profiler capable of measuring time +taken by individual operators on demand. One can use the same mechanism to do +“always ON” measurements for any process running PyTorch. It might be useful for +gathering information about PyTorch workloads running in a given process or +across the entire set of machines.

    +

    New callbacks for any operator invocation can be added with +torch::autograd::profiler::pushCallback. Hooks will be called with +torch::autograd::profiler::RecordFunction struct that describes invocation +context (e.g. name). If enabled, RecordFunction::inputs() contains arguments +of the function represented as torch::IValue variant type. Note, that inputs +logging is relatively expensive and thus has to be enabled explicitly.

    +

    Invoking callbacks adds some overhead, so usually it’s useful to just randomly +sample operator invocations. This can be enabled on per-callback basis with a +global sampling rate specified by +torch::autograd::profiler::setSamplingProbability.

    +

    Note, that pushCallback and setSamplingProbability are not thread-safe +and can be called only when no PyTorch operator is running. Usually, it’s a good +idea to call them once during initialization.

    +

    Here’s an example:

    +
    // Called somewhere in the program beginning
    +void init() {
    +    // Sample one in a hundred operator runs randomly
    +    torch::autograd::setSamplingProbability(0.01);
    +    pushCallback(
    +        &onFunctionEnter,
    +        &onFunctionExit,
    +        /* needs_inputs */ true,
    +        /* sampled */ true
    +    );
    +}
    +
    +void onFunctionEnter(const RecordFunction& fn) {
    +    std::cerr << "Before function " << fn.name()
    +              << " with " << fn.inputs().size() << " inputs" << std::endl;
    +}
    +
    +void onFunctionExit(const RecordFunction& fn) {
    +    std::cerr << "After function " << fn.name();
    +}
    +
    +
    +
    +
    +

    API usage logging

    +

    When running in a broader ecosystem, for example in managed job scheduler, it’s +often useful to track which binaries invoke particular PyTorch APIs. There +exists simple instrumentation injected at several important API points that +triggers a given callback. Because usually PyTorch is invoked in one-off python +scripts, the callback fires only once for a given process for each of the APIs.

    +

    c10::SetAPIUsageHandler can be used to register API usage instrumentation +handler. Passed argument is going to be an “api key” identifying used point, for +example python.import for PyTorch extension import or +torch.script.compile if TorchScript compilation was triggered.

    +
    SetAPIUsageLogger([](const std::string& event_name) {
    +    std::cerr << "API was used: " << event_name << std::endl;
    +});
    +
    +
    +

    Note for developers: new API trigger points can be added in code with +C10_LOG_API_USAGE_ONCE("my_api") in C++ or +torch._C._log_api_usage_once("my.api") in Python.

    +
    +
    +

    Attaching metadata to saved TorchScript models

    +

    TorchScript modules can be saved as an archive file that bundles serialized +parameters and module code as TorchScript (see torch.jit.save()). It’s +often convenient to bundle additional information together with the model, for +example, description of model producer or auxiliary artifacts.

    +

    It can be achieved by passing the _extra_files argument to +torch.jit.save() and torch::jit::load to store and retrieve +arbitrary binary blobs during saving process. Since TorchScript files are +regular ZIP archives, extra information gets stored as regular files inside +archive’s extra/ directory.

    +

    There’s also a global hook allowing to attach extra files to any TorchScript +archive produced in the current process. It might be useful to tag models with +producer metadata, akin to JPEG metadata produced by digital cameras. Example +usage might look like:

    +
    SetExportModuleExtraFilesHook([](const script::Module&) {
    +    script::ExtraFilesMap files;
    +    files["producer_info.json"] = "{\"user\": \"" + getenv("USER") + "\"}";
    +    return files;
    +});
    +
    +
    +
    +
    +

    Build environment considerations

    +

    TorchScript’s compilation needs to have access to the original python files as +it uses python’s inspect.getsource call. In certain production environments +it might require explicitly deploying .py files along with precompiled +.pyc.

    +
    +
    +

    Common extension points

    +

    PyTorch APIs are generally loosely coupled and it’s easy to replace a component +with specialized version. Common extension points include:

    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/multiprocessing.html b/docs/stable/notes/multiprocessing.html new file mode 100644 index 000000000000..e63a3155074b --- /dev/null +++ b/docs/stable/notes/multiprocessing.html @@ -0,0 +1,660 @@ + + + + + + + + + + + + Multiprocessing best practices — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Multiprocessing best practices
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Multiprocessing best practices

    +

    torch.multiprocessing is a drop in replacement for Python’s +multiprocessing module. It supports the exact same operations, +but extends it, so that all tensors sent through a +multiprocessing.Queue, will have their data moved into shared +memory and will only send a handle to another process.

    +
    +

    Note

    +

    When a Tensor is sent to another process, the +Tensor data is shared. If torch.Tensor.grad is +not None, it is also shared. After a Tensor without +a torch.Tensor.grad field is sent to the other process, it +creates a standard process-specific .grad Tensor that +is not automatically shared across all processes, unlike how the +Tensor’s data has been shared.

    +
    +

    This allows to implement various training methods, like Hogwild, A3C, or any +others that require asynchronous operation.

    +
    +

    CUDA in multiprocessing

    +

    The CUDA runtime does not support the fork start method. However, +multiprocessing in Python 2 can only create subprocesses using +fork. So Python 3 and either spawn or forkserver start method are +required to use CUDA in subprocesses.

    +
    +

    Note

    +

    The start method can be set via either creating a context with +multiprocessing.get_context(...) or directly using +multiprocessing.set_start_method(...).

    +
    +

    Unlike CPU tensors, the sending process is required to keep the original tensor +as long as the receiving process retains a copy of the tensor. It is implemented +under the hood but requires users to follow the best practices for the program +to run correctly. For example, the sending process must stay alive as long as +the consumer process has references to the tensor, and the refcounting can not +save you if the consumer process exits abnormally via a fatal signal. See +this section.

    +

    See also: Use nn.DataParallel instead of multiprocessing

    +
    +
    +

    Best practices and tips

    +
    +

    Avoiding and fighting deadlocks

    +

    There are a lot of things that can go wrong when a new process is spawned, with +the most common cause of deadlocks being background threads. If there’s any +thread that holds a lock or imports a module, and fork is called, it’s very +likely that the subprocess will be in a corrupted state and will deadlock or +fail in a different way. Note that even if you don’t, Python built in +libraries do - no need to look further than multiprocessing. +multiprocessing.Queue is actually a very complex class, that +spawns multiple threads used to serialize, send and receive objects, and they +can cause aforementioned problems too. If you find yourself in such situation +try using a multiprocessing.queues.SimpleQueue, that doesn’t +use any additional threads.

    +

    We’re trying our best to make it easy for you and ensure these deadlocks don’t +happen but some things are out of our control. If you have any issues you can’t +cope with for a while, try reaching out on forums, and we’ll see if it’s an +issue we can fix.

    +
    +
    +

    Reuse buffers passed through a Queue

    +

    Remember that each time you put a Tensor into a +multiprocessing.Queue, it has to be moved into shared memory. +If it’s already shared, it is a no-op, otherwise it will incur an additional +memory copy that can slow down the whole process. Even if you have a pool of +processes sending data to a single one, make it send the buffers back - this +is nearly free and will let you avoid a copy when sending next batch.

    +
    +
    +

    Asynchronous multiprocess training (e.g. Hogwild)

    +

    Using torch.multiprocessing, it is possible to train a model +asynchronously, with parameters either shared all the time, or being +periodically synchronized. In the first case, we recommend sending over the whole +model object, while in the latter, we advise to only send the +state_dict().

    +

    We recommend using multiprocessing.Queue for passing all kinds +of PyTorch objects between processes. It is possible to e.g. inherit the tensors +and storages already in shared memory, when using the fork start method, +however it is very bug prone and should be used with care, and only by advanced +users. Queues, even though they’re sometimes a less elegant solution, will work +properly in all cases.

    +
    +

    Warning

    +

    You should be careful about having global statements, that are not guarded +with an if __name__ == '__main__'. If a different start method than +fork is used, they will be executed in all subprocesses.

    +
    +
    +

    Hogwild

    +

    A concrete Hogwild implementation can be found in the examples repository, +but to showcase the overall structure of the code, there’s also a minimal +example below as well:

    +
    import torch.multiprocessing as mp
    +from model import MyModel
    +
    +def train(model):
    +    # Construct data_loader, optimizer, etc.
    +    for data, labels in data_loader:
    +        optimizer.zero_grad()
    +        loss_fn(model(data), labels).backward()
    +        optimizer.step()  # This will update the shared parameters
    +
    +if __name__ == '__main__':
    +    num_processes = 4
    +    model = MyModel()
    +    # NOTE: this is required for the ``fork`` method to work
    +    model.share_memory()
    +    processes = []
    +    for rank in range(num_processes):
    +        p = mp.Process(target=train, args=(model,))
    +        p.start()
    +        processes.append(p)
    +    for p in processes:
    +        p.join()
    +
    +
    +
    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/randomness.html b/docs/stable/notes/randomness.html new file mode 100644 index 000000000000..7966cd48ccec --- /dev/null +++ b/docs/stable/notes/randomness.html @@ -0,0 +1,585 @@ + + + + + + + + + + + + Reproducibility — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Reproducibility

    +

    Completely reproducible results are not guaranteed across PyTorch releases, +individual commits or different platforms. Furthermore, results need not be +reproducible between CPU and GPU executions, even when using identical seeds.

    +

    However, in order to make computations deterministic on your specific problem on +one specific platform and PyTorch release, there are a couple of steps to take.

    +

    There are two pseudorandom number generators involved in PyTorch, which you will +need to seed manually to make runs reproducible. Furthermore, you should ensure +that all other libraries your code relies on and which use random numbers also +use a fixed seed.

    +
    +

    PyTorch

    +

    You can use torch.manual_seed() to seed the RNG for all devices (both +CPU and CUDA):

    +
    import torch
    +torch.manual_seed(0)
    +
    +
    +

    There are some PyTorch functions that use CUDA functions that can be a source +of non-determinism. One class of such CUDA functions are atomic operations, +in particular atomicAdd, where the order of parallel additions to the +same value is undetermined and, for floating-point variables, a source of +variance in the result. PyTorch functions that use atomicAdd in the forward +include torch.Tensor.index_add_(), torch.Tensor.scatter_add_(), +torch.bincount().

    +

    A number of operations have backwards that use atomicAdd, in particular +torch.nn.functional.embedding_bag(), +torch.nn.functional.ctc_loss() and many forms of pooling, padding, and sampling. +There currently is no simple way of avoiding non-determinism in these functions.

    +
    +
    +

    CuDNN

    +

    When running on the CuDNN backend, two further options must be set:

    +
    torch.backends.cudnn.deterministic = True
    +torch.backends.cudnn.benchmark = False
    +
    +
    +
    +

    Warning

    +

    Deterministic mode can have a performance impact, depending on your model. This means that due to the deterministic nature of the model, the processing speed (i.e. processed batch items per second) can be lower than when the model is non-deterministic.

    +
    +
    +
    +

    Numpy

    +

    If you or any of the libraries you are using rely on Numpy, you should seed the +Numpy RNG as well. This can be done with:

    +
    import numpy as np
    +np.random.seed(0)
    +
    +
    +
    +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/serialization.html b/docs/stable/notes/serialization.html new file mode 100644 index 000000000000..d6a6d5956e4f --- /dev/null +++ b/docs/stable/notes/serialization.html @@ -0,0 +1,563 @@ + + + + + + + + + + + + Serialization semantics — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Serialization semantics
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Serialization semantics

    +
    +

    Best practices

    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/notes/windows.html b/docs/stable/notes/windows.html new file mode 100644 index 000000000000..cae346d5e8d7 --- /dev/null +++ b/docs/stable/notes/windows.html @@ -0,0 +1,800 @@ + + + + + + + + + + + + Windows FAQ — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Windows FAQ

    +
    +

    Building from source

    +
    +

    Include optional components

    +

    There are two supported components for Windows PyTorch: +MKL and MAGMA. Here are the steps to build with them.

    +
    REM Make sure you have 7z and curl installed.
    +
    +REM Download MKL files
    +curl https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z -k -O
    +7z x -aoa mkl_2018.2.185.7z -omkl
    +
    +REM Download MAGMA files
    +REM cuda100/cuda101 is also available for `CUDA_PREFIX`. There are also 2.4.0 binaries for cuda80/cuda92.
    +REM The configuration could be `debug` or `release` for 2.5.0. Only `release` is available for 2.4.0.
    +set CUDA_PREFIX=cuda90
    +set CONFIG=release
    +curl -k https://s3.amazonaws.com/ossci-windows/magma_2.5.0_%CUDA_PREFIX%_%CONFIG%.7z -o magma.7z
    +7z x -aoa magma.7z -omagma
    +
    +REM Setting essential environment variables
    +set "CMAKE_INCLUDE_PATH=%cd%\\mkl\\include"
    +set "LIB=%cd%\\mkl\\lib;%LIB%"
    +set "MAGMA_HOME=%cd%\\magma"
    +
    +
    +
    +
    +

    Speeding CUDA build for Windows

    +

    Visual Studio doesn’t support parallel custom task currently. +As an alternative, we can use Ninja to parallelize CUDA +build tasks. It can be used by typing only a few lines of code.

    +
    REM Let's install ninja first.
    +pip install ninja
    +
    +REM Set it as the cmake generator
    +set CMAKE_GENERATOR=Ninja
    +
    +
    +
    +
    +

    One key install script

    +

    You can take a look at this set of scripts. +It will lead the way for you.

    +
    +
    +
    +

    Extension

    +
    +

    CFFI Extension

    +

    The support for CFFI Extension is very experimental. There’re +generally two steps to enable it under Windows.

    +

    First, specify additional libraries in Extension +object to make it build on Windows.

    +
    ffi = create_extension(
    +    '_ext.my_lib',
    +    headers=headers,
    +    sources=sources,
    +    define_macros=defines,
    +    relative_to=__file__,
    +    with_cuda=with_cuda,
    +    extra_compile_args=["-std=c99"],
    +    libraries=['ATen', '_C'] # Append cuda libaries when necessary, like cudart
    +)
    +
    +
    +

    Second, here is a workground for “unresolved external symbol +state caused by extern THCState *state;

    +

    Change the source code from C to C++. An example is listed below.

    +
    #include <THC/THC.h>
    +#include <ATen/ATen.h>
    +
    +THCState *state = at::globalContext().thc_state;
    +
    +extern "C" int my_lib_add_forward_cuda(THCudaTensor *input1, THCudaTensor *input2,
    +                                        THCudaTensor *output)
    +{
    +    if (!THCudaTensor_isSameSizeAs(state, input1, input2))
    +    return 0;
    +    THCudaTensor_resizeAs(state, output, input1);
    +    THCudaTensor_cadd(state, output, input1, 1.0, input2);
    +    return 1;
    +}
    +
    +extern "C" int my_lib_add_backward_cuda(THCudaTensor *grad_output, THCudaTensor *grad_input)
    +{
    +    THCudaTensor_resizeAs(state, grad_input, grad_output);
    +    THCudaTensor_fill(state, grad_input, 1);
    +    return 1;
    +}
    +
    +
    +
    +
    +

    Cpp Extension

    +

    This type of extension has better support compared with +the previous one. However, it still needs some manual +configuration. First, you should open the +x86_x64 Cross Tools Command Prompt for VS 2017. +And then, you can start your compiling process.

    +
    +
    +
    +

    Installation

    +
    +

    Package not found in win-32 channel.

    +
    Solving environment: failed
    +
    +PackagesNotFoundError: The following packages are not available from current channels:
    +
    +- pytorch
    +
    +Current channels:
    +- https://conda.anaconda.org/pytorch/win-32
    +- https://conda.anaconda.org/pytorch/noarch
    +- https://repo.continuum.io/pkgs/main/win-32
    +- https://repo.continuum.io/pkgs/main/noarch
    +- https://repo.continuum.io/pkgs/free/win-32
    +- https://repo.continuum.io/pkgs/free/noarch
    +- https://repo.continuum.io/pkgs/r/win-32
    +- https://repo.continuum.io/pkgs/r/noarch
    +- https://repo.continuum.io/pkgs/pro/win-32
    +- https://repo.continuum.io/pkgs/pro/noarch
    +- https://repo.continuum.io/pkgs/msys2/win-32
    +- https://repo.continuum.io/pkgs/msys2/noarch
    +
    +
    +

    PyTorch doesn’t work on 32-bit system. Please use Windows and +Python 64-bit version.

    +
    +
    +

    Why are there no Python 2 packages for Windows?

    +

    Because it’s not stable enough. There’re some issues that need to +be solved before we officially release it. You can build it by yourself.

    +
    +
    +

    Import error

    +
    from torch._C import *
    +
    +ImportError: DLL load failed: The specified module could not be found.
    +
    +
    +

    The problem is caused by the missing of the essential files. Actually, +we include almost all the essential files that PyTorch need for the conda +package except VC2017 redistributable and some mkl libraries. +You can resolve this by typing the following command.

    +
    conda install -c peterjc123 vc vs2017_runtime
    +conda install mkl_fft intel_openmp numpy mkl
    +
    +
    +

    As for the wheels package, since we didn’t pack some libaries and VS2017 +redistributable files in, please make sure you install them manually. +The VS 2017 redistributable installer can be downloaded. +And you should also pay attention to your installation of Numpy. Make sure it +uses MKL instead of OpenBLAS. You may type in the following command.

    +
    pip install numpy mkl intel-openmp mkl_fft
    +
    +
    +

    Another possible cause may be you are using GPU version without NVIDIA +graphics cards. Please replace your GPU package with the CPU one.

    +
    from torch._C import *
    +
    +ImportError: DLL load failed: The operating system cannot run %1.
    +
    +
    +

    This is actually an upstream issue of Anaconda. When you initialize your +environment with conda-forge channel, this issue will emerge. You may fix +the intel-openmp libraries through this command.

    +
    conda install -c defaults intel-openmp -f
    +
    +
    +
    +
    +
    +

    Usage (multiprocessing)

    +
    +

    Multiprocessing error without if-clause protection

    +
    RuntimeError:
    +       An attempt has been made to start a new process before the
    +       current process has finished its bootstrapping phase.
    +
    +   This probably means that you are not using fork to start your
    +   child processes and you have forgotten to use the proper idiom
    +   in the main module:
    +
    +       if __name__ == '__main__':
    +           freeze_support()
    +           ...
    +
    +   The "freeze_support()" line can be omitted if the program
    +   is not going to be frozen to produce an executable.
    +
    +
    +

    The implementation of multiprocessing is different on Windows, which +uses spawn instead of fork. So we have to wrap the code with an +if-clause to protect the code from executing multiple times. Refactor +your code into the following structure.

    +
    import torch
    +
    +def main()
    +    for i, data in enumerate(dataloader):
    +        # do something here
    +
    +if __name__ == '__main__':
    +    main()
    +
    +
    +
    +
    +

    Multiprocessing error “Broken pipe”

    +
    ForkingPickler(file, protocol).dump(obj)
    +
    +BrokenPipeError: [Errno 32] Broken pipe
    +
    +
    +

    This issue happens when the child process ends before the parent process +finishes sending data. There may be something wrong with your code. You +can debug your code by reducing the num_worker of +DataLoader to zero and see if the issue persists.

    +
    +
    +

    Multiprocessing error “driver shut down”

    +
    Couldn’t open shared file mapping: <torch_14808_1591070686>, error code: <1455> at torch\lib\TH\THAllocator.c:154
    +
    +[windows] driver shut down
    +
    +
    +

    Please update your graphics driver. If this persists, this may be that your +graphics card is too old or the calculation is too heavy for your card. Please +update the TDR settings according to this post.

    +
    +
    +

    CUDA IPC operations

    +
    THCudaCheck FAIL file=torch\csrc\generic\StorageSharing.cpp line=252 error=63 : OS call failed or operation not supported on this OS
    +
    +
    +

    They are not supported on Windows. Something like doing multiprocessing on CUDA +tensors cannot succeed, there are two alternatives for this.

    +

    1. Don’t use multiprocessing. Set the num_worker of +DataLoader to zero.

    +

    2. Share CPU tensors instead. Make sure your custom +DataSet returns CPU tensors.

    +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv new file mode 100644 index 0000000000000000000000000000000000000000..2b1eff0f3d26d62cc9ebe3e40d9b3f03cbf61b87 GIT binary patch literal 11478 zcmV;{EGg3?AX9K?X>NERX>N99Zgg*Qc_4OWa&u{KZXhxWBOp+6Z)#;@bUGkVc~oz5 zV`vH^AXa5^b7^mGIv{Ofb97~LASf{|GA=M-Fe_qWG-hTnFd!)kBOp|0Wgv28ZDDC{ zWMy(7Z)PBLXlZjGW@&6?AZc?TV{dJ6a%FRKWn>_Ab7^j8AbM%=Hf!8EWtqof2MFL~+BZToW6{L93pUIAPlccJz&cWB4 zt@-Qfm!2IEQHT^z(s@IZ;ysagQYyHEDUlx|JS#8poH8M3QbGz6uf{lHt@vNXj%KHCEDEGe>RQ*O6Zx^Z=4@j2X+ zbc-f2j?W%bQBy^+k5E%{#yL$~j4IvO9ho_i%Bjw2SFuA|fNifX9}mnscMl~X}D5`K-vW7C7dZ%F+3Ok{$hV7=J}@6I+< za#5x!bK#&2O%44PD2&IVBfUy#yC2RgXspO2qhqN|c*!sY;{=9I@n&7<@{y9E(R4^JwN_0WWyLcBLp4ij?E3 zmcXeir1%~QN68PSR~FDxC{$jD6EREhESDm|F;t0hOOvxa3l#kijS_}3L!U~btKJWa zA{96^iT7wigknt1Y%OpY*!1WpK$&>SacHDC z;R%`$5ryMqS6DnU7dkl7oen`ouG=l99gp7v0Pv0TMG>`s*9a7Yd# zafS+g3>+mdNsNSJ#xn&EsM;|YCjE$U3ber#X?CSJsOp+siV(i*kW1Jr$*t zY;lZc878biWyHn7W)H9ON@mS2aZr^nIqvSd9lNx zuijws$mCq*SUl3eH4=`L$HKXg8EzDyS-4p-O;BZq8!e8Os4T!$p)m6XDJuP_o-Zv? zVD$WdMo={_BkGYR*yy6eaxkW1GftGwL>H6TNFyT!Zh3D zs)z9?0_qJp>gA#y`l6~auJCc|N;F&tI3i=H=|0p-k?fFSr=lAQCMTRoDW1^;?7)TB zbKdYc(lGQVCNLEW^`4NU0u@eoI5bYYn#)E}o+Fy&#SzKbmMBMUD1xI#Oomdls{;|F zA5@~=s&c^%1;vmV5{`>qxQ;_o93Its|sBwa%PbHI7Ma>kn3lEn&wI?~n zGjQ+hrZxtE7)0Sng|0*B!o$t{NrCF@r6~UX28AzCuTU)>RY>XywXE0TF$VA>Nrxe1 zL&3dO=&(@6ume>y7m9(;?tfY0;Z|rXw#x)d@QesY;Yc`69;c2K-y`8TdDN;Jg`Ty; z&PyRz_*c{D$R&I=$;|nnSSdp#4d>L*KJBTb6!-8d2Fgf*YMKSA+ft{k*EmKbwp&FV zOhuz@Z%Z_@c?3f!H#juvVW5`7YIIZx?6Ns^Kt1qdhFi>sd1k2AP>@H8f^!@imll-E zHgJ@@K!Ky|F-};mVh%y8|%=razSjBIsn~Kk~F1h z#I$oTDqHR53iCeeYNRtMphwKlU|xM*LI*pUujb@~MO-yi0f5yzL;=X=OaTbAetQdm zg3V`8;}V4HY@A=j>hNI)L^fOHA;@ZW0f20V&Wq6Au6O|Y^qOxO3`}hufJ1QCAYfUD zc9OMe$IX{Bt+@U9Vq)A-Sa=uGAUpSxTNb0C#m(~K1nKNK%+^i+^U)6L)5WVagi2e$;& zAM92mSw!((ii}3%VHaNUksc#*pl1F>c}gjoZlgFaqk@^liDK}^5wBQ~;oe0D4U>*0 zi#RQ1H-k90Nkg~q*Q&+|s=6C^Z@wY2NNG_-&w89Ric+R2-BUOg!JT60J7hyvns-QE*jr;2?Pzr0*sMP$QZRG@|GM2JOP zb6CS7usNln35m=XGvSc^>BFI3pVqjM6JZLurYXslE)wIAd#-PRXOD7Zc^vHtPf`@s zjrCO0!P=z-f~2t+gk=j1b>yboF1==$iyjGgUKNOOZ4D@s=C??l6qsn$f55ske zOGI?;Yo5uzgE>o6ZGkNFbnDGd&U?^!fGj;OC)#brqmk2j{%Hd(m?CLX#xxoa(9hF4 z3mrh5Qc82Sg6=I-69ttb}e!nc`VZ$`)Ho1&yYHktI$QuP<7#Yj~rW z=z$5;z-gnf=H6saID^?uBn4Ao8F^{e9 zRym5fW@(a|5tq+X7%AY22UEwel}k~qH&-s+Lb-YiocAAuBa4&_JTPm|C0jz)3#Are z^|4wI-03_m2)a2)3xaJ<&|1c}`<0fVs(|9E_gDSt+s^khLzXV$ogVO0-kSm$2~0EZ zZLIZM?m*SmH=2y@(EP}5HrrKa_PGgFF{3ZvGa(xyC37u~x$qoI_soOO6E(rm=2R8k z1a!~z=+1r?GELfrUw91@YMFVem@2b;UzE9{jvh-M9H}a*13sOL#91TD*=#rmSK)D- z`pY<1h?Ho`-orDRO?MuT(fujd8)hK55V75 zHA;TIq*0N+?XnRH(Ha{q?>1N;ak75bIqSrwEe>@;w#kupxQ=}+?bjMcdciyOkp=H| zGNs4@KQXT+I8qDInU6(%yYi8ur^6WQg|6(r-owm(?}TKr*19=;cJv`LY5_gToCVLm zbY3=)o0>J?<*epZL%XawJHC$Vj4peXYU1O><}?GivRP?Qhc>6{=hkLbU#E%qyEtTlD_S=D;AvlhjXI7hK<*wDbqmQX-03VO$ANxdP9nM?>DlZ7xx%7%!_(Ef_dRU6H#YO9PHBm*C}mZcQ&WX zUga_RJG41vKeslgs^d59+g@1l3N_E$an1?9u5R=S#jJ`Xp@&z{eXS<#+n1QzI=3Wa z>2dwxbcf|f`;B9Fa!%6MeRl5I?25ambV@B?C^=`r!>7$jzD~({y7f8LFp=t_>D5#w^n4eOOlf+y!YM`n);OW+-73p$L(}yq`;TjT>}rH;i9KE4 zHrN;QYkfT}?{?Qy^J*MDCBJdXK+C`K_Vj$Kt$n3gFVFZ@fA(7QG%XgeBF`A`7@p54 zdbW`{Rji`KW5&&c{T_Zi9Y?S26?z1#nca1cjOU5sgEzuJ#Jj$kAf z+7XNm_}133VhBetq3Z7l#-afo!C0JyFWMlEU@YtH2*#5Bj$lGFj3XEe`#J)10cO3> zYyJ83+0hfss0DNda~3>X?!0WE*XpwdynMr)>g@iG&_+C`UFM{HoyDB4kGGi9e5E3I zCDqrd3cg)T@iu!q;SyjKupE+6I$ZpD8 z1PkS0;-Q=NvcZtS8qlO?bOyEJ@4}Xigo(d^#)7!dWi#G>Z`nk^kP2u~2JmFdR)Pdg zP+Q(n63~#ZTjg{|%yscmdnnx3`Ww%KAeV&$x>JBLKaT@2;OhWjCfIA2laGS|$OnkA zAXYpVtpZH=xh#kQZx;eE;_W^vFXR<`^)x~{g`Td*tCzlzXMi*i^YcnQtuWzEUy}tZ zHgEUXQ}gmtJtfcBqc0Xz%j>PzK+E5k^z?k)hTt^Xiz4bC*7zHzzly^D^jMH~KsN)k z=jk<|MglG70h;r2UN9S>&!B}_^>tQYD;_grFdIIO5@sPlLsmMZ zDj{pfK-u$=4%B@6hM}HUB^hY>`LB_hx2GB^`qn&2Q}RNS=yVIwt?5JWtFL|UIs>W) ztf^3oel-|s#kXc7?65zj*aCzmU{jtmPEb3Z!&Rsm|E52-^lGj2Pi>&D-Zn=D!p!Rc z4Ec>m00z7wc!2(&QES%)&2lpuEv0uW>;~|)olvtN7!$A1UO;BUv>cGBux$urGE8d% znF`XbK$gO_Fp#+bZA}%W*?jbE{ZM~!Y)~VefURb%?Kz~K5cFzmW4$m9YOL!UgN_w_ z##duC&-iOB=-;Bon!dvAD%xCt z9lB_n?9VJuU!Q_D_-B{qpKZ=>GP1Sjd*{wcX#IAh+SK}bBdxaGGEM^u>X;^^n#gjY zLZ(zcSbj$-UHiZf+Zmp#kR=EOGrg?A5Ks`7C*j|lsq!N z3#j`VQftN!{!XT&XF3A1QBgvR=noQ$4Ra3-Ck-gJa~3Q)vl`#4e;vzlpb{`-8Y+AA z;6o|T(auQDD}EZPg&FA#RawB|^NnwYdOjnjp_XSTG?epO6g5)w3uK0JzQe9(sz0Xb z`uFNzM-MPi3FrZa%AU=3B(wV#FHz?F7KiaiBoeh1T|se<92VD)bx&Ey;&0d_srag{e`B(*4YyJ$W$i)h!iA&A(g2wOfG{N&@Y*T@Z53yu<@udQHqTiO)hfmbZ8Kfcy3Pjo@z_2=^jsdcd#r{Cj z67&R8AWDKd;Z%oj`8bW!*l}Ub;C4XR)f*@<2Li8wxP%r!B%*kBIuZtkaj7a4C~!?- z=`3NWNQv}7f!LDg^-bGL;EeDR;Z!rC9%+K`+8HUbG6fHRBr1fRt>S@5y^TIu>QI z42D+IOKKdT5Njl$_#iE#EnGOV zn#k3T-oCe@$+E9+sqYBFjt1jReH7_(!F-g&1_&gD_a&e1ze6xLG+nL~pQI@BuY;uo zlgM9*Ow?Y-eUBih_AmQope|cvqxx)CuR z!R~IJJrR4E=PXP|%7S;{likPJLRC{)=C@cv&c&wIr zfuL2Kxd{F80E>#g5V>RAoM;Pe1MolQEm{$l9X8<^ zu74HHEs1u6H7ziybfC01Ep15rXr6Ag7vj-@ZVMF8tLv1b)rPI(OGkya$Pm=0l1Zwf zozyiJOmnE()zL68sEMxO=wlA|T|cN4Hm^{|`#O(+F2+FDaaxBbQ1LY@Xr@q%M|#8& za*?C(WuDZlB)}B=;u4&y+j>y}pi~Xi^u&}W7vbl%9hT!v*#ROxZOF&xyj2|B$6Os=)Kr$|o zEI%{o^ua?WM6n0X@gO@3=)6RY0~D)8G7iG!X$eara1PNz>zz{Pfkz&C&B>}8 zRkex8EZ{+jy{EBAaEY!dPHC^Xb{f*brsX*q{=m`stSLU`(E050Fos#4QjAmj&bN?i z{XIWz0fPX|JDK{m>~(@5ebof2An&&RryecBI-h2ZhdCpue#;+`gIU zvFngxKZOv}-AK4>He%Ny#k{gSH_FsOf1P zH9CtjTopKx$0#=M+@sDe#c*oTGb{Gd>4+~k<(Q6^_F~xW;thBHwI}~4PkvPN7lMRP zx(=q~mpijUB%;k5a9vrH@|IJ*6(^2TVVXr;#GZNb_MAy~Ig6Ks&y)6Q#F`v=u)FJO zJSFrX$wQ_I1>M*?uQJ?uk#z_Vog!36ky4jp8mZGT-A!lTTZrSX(X7z%Q}C#md$al{ znDb-xiX6|t!fr{;MLOOpiV%5Bqw!XJ1V6ppV*$H4Aeh>DB5^8Xon;H_}VZ zf*_rUYDJK5sfe9_6E+*gPBVs%8oBp?pqB9I8Z$c;V0TaXH{lS&$q=K+5Q9gfQSuPE zDS3$el-xTWw0=;rlbq)zYu9A&B(gOUHU&HcJofA%kabdMfgdM4h}yz;W{ z-E%AZ5-{=(^1?YP?|?>~-W0kEG_Mfd6?}(9s`Jv(O~G$aRrFpNI%;&^C3;GDw6UUk zRTQRvmfUaZpv-*-?~Ov_Kk?j3@#)n45j+QV< zVkCziDU$I>m_1af3Va;N^_h!Ewv$O6T`4R@7JIQXPZvp4Igtumpl)UHLJq8+KtLz6 z=DHa+i*ykMn%A2j*X`NkG(l~OToTZ=!mbWkemCAKC7K@Podh5ncc}*=iBIql_d5w8fOJps@WY8KYG}9VLCphs#2Kh(chi{W22s~;KmPt4$v6u&EjAO z&X{|oK|tgBp*Zldkb?7<-jN{Yi`(NiId!%peIgCeOhq~vGan-yQP?Y%gw(ai z+>{uRnd9YV1=@`&aj6QCMIzUJHQR`k#ZU=z^o-pfGJcme$;k_r^~=(HGgp}Qx3>?U z-golmxo+d zdno{gTcc5lyT|CG)!}{%>$V>3r~}dLPXv^mR7NV^Q{$FpeRRrOu7}8G+2-v%&~{(s zsN><-k84j*OQlmLKIk5nR~rc25^+niO+x&*uB~@k%u~m1Hha*O!Yxnt`rnw|-v7q$ z%-jblT|}Vf#q;4ZkGKh&j?*!obk+pQJe@MRnkOO34A3^7dNzMJVv(Zz*7R2}n`={S6FQ z0;{filAt6iVCg{5`c*DB1awK&Kr_Nnw`r;NS^z%mSMFC5DEyTKhLTHd%k({hgE^>c z45nXH&Otd02ZilHIQTi-;A5+m09JDwrmjDllViTMQyEQ1SO2=mS?da;!=b^>XgHhm zACAn44cTC_9uhheBR6K83QirVf}~W%j_ibGuEc{g(Xo)K%Tx}`HLrV>XO6jLDr(Bv zv2M6puFnm3B083Hb=tGe@Ze%Huz-#X*bn+vIoenCiY^Cc?%a0W>xQycJ=o6Fa^p;N zd^CkCPrTP39G_~tnaGpwn07l`2B)@lq0_~lCE(EG%jC|%bt4vtn{HH|GFW&3jh3HE zN`Js2lLB~|hAI*uEP~!Mp$gde1bCC5I>1tR%z&2&f)ZB{*f6$$mC3|ft!W&%c2!8= z^@=+7e}v)eosIQis6LaSN>BIr#k1Tdq-f`Hsv%X&S2M(B%!TA_8Q+Q?PwPbH%V|7A z;!|=QoMgS6FbTZGr{{|S;QR*+$Ozw+DUt9Ccr+)4;=02S0KSCcaJnT2-B(xg4TDWu zl$<@mF2$;l_7Ynfm*xxr7z^RQ>oqoe8b8`g+d3Zy^bQ8DQLp4E#uwu(%H?@vPr*{= z__lW?OOp@HpF^o06rgtV{r%f;VtF1+%NB}$-dWC4s$2es**{3Z%o5Gf-e6?_|nfLaM2fiE~ z7_D=1;F@!z3N*nrbJeH@Uzoq!C2BNrwVRxlkam}o;)TpK(6f+_+O0K#QKwNZkgJzq z(bPca3^O(B5UV>bs?S4e1vp181fFW{mP?}_LiF!-YrZ|}M@}f~8B00rHU~08eibrq z1d+-5S+c^s8!oeq$m8$!uW@);VV%U*QDl}dNBv>8zpR;%C1t|IE9llM^;ArB*s5N^ zK<{LVz}j23D)C_ako2&U!#zl3orxA9#}#{yQ4TAPLBF(+IPfI%>w~V0BIO_O73}2C1~7f_r!pK(8V4H z){Mg1ez)`eU+lH&)BSISu+fGNu+;kI^T)UM)kV&|bwagLi<`1PQqdiy4u8z~*~7ll zS<*XYdSIkmVNO*rp`f`GvA7|pbp$>M>_~-9;(JjLYB_Mw_EQp&T0fr)W3f9Z6-z{WRkw$e7t(&>vg|B z+&u)42$`>c{dN;7O6y7mPT~XIWt0~rOe(u-0TODaZtbjyy@!rYzvUbcv4%2s>Qb#q zN?-W&`q)~d2A&g1%(^74a&>(bNa&YfLVphtpwm2dm1WquBmn!klm`RT7ARDOOFK;^|0aP-sl%ioe9YP!*DvA=qX zscz4hP@1P?bNDF=R3px+`<7}?d>ivuN%TqAJGs@NlvR6;UcCLLd-Bk#QXv6(gehMHFBq6^9kO-2HzXy=;PRKJ$=*c-n*TK|u7vL(U z!VF=G=CU!hzM*DQ<;~#yi>^)wyeKA8A^(#V#r$j~N-b;F`g$sTWCt23i=QMh?}^e7 z$*PWS2q9#Ty=FqzMAZw0g6M^8VwK{rLb4x12Z!m$l!eRqZ!!tu=qj~4D(&fi z_44>2n)h}uWD@jo--{xYgQ-_@22bL@s4Q3?#NFrH&&?9F zU}8a?{5LT^1He-%X!=g4#?G$T8LG`J+nE$yljM(5712hTCD5EIbtdR@N_Jx*)-F=} z^5V7hcO=WmYl-it_acRmR-`Cp>+`}|XoHTRvT^Mlf{IdIH^L`%Yp#TSmGthnwfhgM zI(O}hzw-DpmjIgmpdzTYY`eu*kWF5vPDn9#LXVehLHX;Xx3EWLp2FeK;l0~u?wyu zv8|3pGnbW2s!jmsm+=!gw=`A)5#gP=c6x(d^b^+hSFqJ5m%eiG*DHEaeism zxiq&}T$;;OY2DBlR5@SwZs2IB`f1xs_RLX`dVHbNSeZK8O1t7_CwjM4>YpOB?m=dZ zGi7|tpuGFPR;B-6xmXoUyNmxmAz&xG?RleTm0H-8B(A4gczKlg`?fRd6*spt>*`;fm2zqF-P4~vV5ED*%0*`jaIg3}JA-X0cD4sMZZw|m z7dzCh`sIeGQJWc>FzzJv|yg1(qj8H{(A^r>XJ?}*w6WKb7+k;uP|iIz`g*?hb(X}`2V zN*_DS*Vi(m(zX)wEi;{yWf|waFE`z1uK(;laQ){6L*?0`FHRY#bvtQqf(iXg*)GG- z4DGjci)}oE*=;}S0DDFDnO|B;)ASq4g$|9orkhB)+r?j0M-QQEZ$-PXS5c?OOWlIZ zfe)ACx;uOXw-sYmF-*ozk3wX5S(n0TBYh_W{SEM|$<6bIIZw5&i5~fwvxp{IZmztQ`^zBHQX`JE}=0i=zFhqJL5i<6>e8z&s=eG|)wvSm!g%0IJ#@G%x&3HDZ#(3>Y{q;{ zWy=w^q;Vzl!i3nmbEb;_-~au83L9}(A$@g5)JgXO-dEobx@c`shs(OUYx2Q( z(rP8sR7%H>#OO7AUExWtAG99s*s58{tw_~rUMNaTsL_+HzH1(?{ryh&Vi(mxq}@XA z99X*Q9#&dIP4qBGa{q|aK1cs$I?Jwp^;OrvL-o}~)j?dI;t)irQojOy+y2m~#oE)E zQoI!Txmfh3hOK_fTk4GUL3mVIz8Iyfo9XFf&nDU)<1pr*PIp+%oEA~@Cv2-OuSv3K z>PZ7_OQb)W+in~hfu}YWt)})XU5u9A&1rQ4x6aoW<0XJ|0-XhdtMN+1i|MKGIkCZH w`D%Ll{bDqqJ*Rh?>|Twg%P;KU5;*esW{x-gY{nh!Pd%GGd + + + + + + + + torch.onnx — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.onnx

    + +
    +

    Example: End-to-end AlexNet from PyTorch to ONNX

    +

    Here is a simple script which exports a pretrained AlexNet as defined in +torchvision into ONNX. It runs a single round of inference and then +saves the resulting traced model to alexnet.onnx:

    +
    import torch
    +import torchvision
    +
    +dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
    +model = torchvision.models.alexnet(pretrained=True).cuda()
    +
    +# Providing input and output names sets the display names for values
    +# within the model's graph. Setting these does not change the semantics
    +# of the graph; it is only for readability.
    +#
    +# The inputs to the network consist of the flat list of inputs (i.e.
    +# the values you would pass to the forward() method) followed by the
    +# flat list of parameters. You can partially specify names, i.e. provide
    +# a list here shorter than the number of inputs to the model, and we will
    +# only set that subset of names, starting from the beginning.
    +input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ]
    +output_names = [ "output1" ]
    +
    +torch.onnx.export(model, dummy_input, "alexnet.onnx", verbose=True, input_names=input_names, output_names=output_names)
    +
    +
    +

    The resulting alexnet.onnx is a binary protobuf file which contains both +the network structure and parameters of the model you exported +(in this case, AlexNet). The keyword argument verbose=True causes the +exporter to print out a human-readable representation of the network:

    +
    # These are the inputs and parameters to the network, which have taken on
    +# the names we specified earlier.
    +graph(%actual_input_1 : Float(10, 3, 224, 224)
    +      %learned_0 : Float(64, 3, 11, 11)
    +      %learned_1 : Float(64)
    +      %learned_2 : Float(192, 64, 5, 5)
    +      %learned_3 : Float(192)
    +      # ---- omitted for brevity ----
    +      %learned_14 : Float(1000, 4096)
    +      %learned_15 : Float(1000)) {
    +  # Every statement consists of some output tensors (and their types),
    +  # the operator to be run (with its attributes, e.g., kernels, strides,
    +  # etc.), its input tensors (%actual_input_1, %learned_0, %learned_1)
    +  %17 : Float(10, 64, 55, 55) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[11, 11], pads=[2, 2, 2, 2], strides=[4, 4]](%actual_input_1, %learned_0, %learned_1), scope: AlexNet/Sequential[features]/Conv2d[0]
    +  %18 : Float(10, 64, 55, 55) = onnx::Relu(%17), scope: AlexNet/Sequential[features]/ReLU[1]
    +  %19 : Float(10, 64, 27, 27) = onnx::MaxPool[kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2]](%18), scope: AlexNet/Sequential[features]/MaxPool2d[2]
    +  # ---- omitted for brevity ----
    +  %29 : Float(10, 256, 6, 6) = onnx::MaxPool[kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2]](%28), scope: AlexNet/Sequential[features]/MaxPool2d[12]
    +  # Dynamic means that the shape is not known. This may be because of a
    +  # limitation of our implementation (which we would like to fix in a
    +  # future release) or shapes which are truly dynamic.
    +  %30 : Dynamic = onnx::Shape(%29), scope: AlexNet
    +  %31 : Dynamic = onnx::Slice[axes=[0], ends=[1], starts=[0]](%30), scope: AlexNet
    +  %32 : Long() = onnx::Squeeze[axes=[0]](%31), scope: AlexNet
    +  %33 : Long() = onnx::Constant[value={9216}](), scope: AlexNet
    +  # ---- omitted for brevity ----
    +  %output1 : Float(10, 1000) = onnx::Gemm[alpha=1, beta=1, broadcast=1, transB=1](%45, %learned_14, %learned_15), scope: AlexNet/Sequential[classifier]/Linear[6]
    +  return (%output1);
    +}
    +
    +
    +

    You can also verify the protobuf using the onnx library. +You can install onnx with conda:

    +
    conda install -c conda-forge onnx
    +
    +
    +

    Then, you can run:

    +
    import onnx
    +
    +# Load the ONNX model
    +model = onnx.load("alexnet.onnx")
    +
    +# Check that the IR is well formed
    +onnx.checker.check_model(model)
    +
    +# Print a human readable representation of the graph
    +onnx.helper.printable_graph(model.graph)
    +
    +
    +

    To run the exported script with caffe2, you will need to install caffe2: If you don’t have one already, Please follow the install instructions.

    +

    Once these are installed, you can use the backend for Caffe2:

    +
    # ...continuing from above
    +import caffe2.python.onnx.backend as backend
    +import numpy as np
    +
    +rep = backend.prepare(model, device="CUDA:0") # or "CPU"
    +# For the Caffe2 backend:
    +#     rep.predict_net is the Caffe2 protobuf for the network
    +#     rep.workspace is the Caffe2 workspace for the network
    +#       (see the class caffe2.python.onnx.backend.Workspace)
    +outputs = rep.run(np.random.randn(10, 3, 224, 224).astype(np.float32))
    +# To run networks with more than one input, pass a tuple
    +# rather than a single numpy ndarray.
    +print(outputs[0])
    +
    +
    +

    You can also run the exported model with ONNXRuntime, +you will need to install ONNXRuntime: please follow these instructions.

    +

    Once these are installed, you can use the backend for ONNXRuntime:

    +
    # ...continuing from above
    +import onnxruntime as ort
    +
    +ort_session = ort.InferenceSession('alexnet.onnx')
    +
    +outputs = ort_session.run(None, {'actual_input_1': np.random.randn(10, 3, 224, 224).astype(np.float32)})
    +
    +print(outputs[0])
    +
    +
    +

    Here is another tutorial of exporting the SuperResolution model to ONNX..

    +

    In the future, there will be backends for other frameworks as well.

    +
    +
    +

    Tracing vs Scripting

    +

    The ONNX exporter can be both trace-based and script-based exporter.

    +
      +
    • trace-based means that it operates by executing your model once, and exporting the operators which +were actually run during this run. This means that if your model is +dynamic, e.g., changes behavior depending on input data, the export +won’t be accurate. Similarly, a trace is likely to be valid only +for a specific input size (which is one reason why we require explicit inputs +on tracing.) We recommend examining the model trace and making sure +the traced operators look reasonable. If your model contains control flows like +for loops and if conditions, trace-based exporter will unroll the loops and if conditions, +exporting a static graph that is exactly the same as this run. If you want +to export your model with dynamic control flows, you will need to use the script-based exporter.

    • +
    • script-based means that the model you are trying to export is a ScriptModule. +ScriptModule is the core data structure in TorchScript, and TorchScript is a subset of Python language, +that creates serializable and optimizable models from PyTorch code.

    • +
    +

    We allow mixing tracing and scripting. You can compose tracing and scripting to suit the particular requirements +of a part of a model. Checkout this example:

    +
    import torch
    +
    +# Trace-based only
    +
    +class LoopModel(torch.nn.Module):
    +    def forward(self, x, y):
    +        for i in range(y):
    +            x = x + i
    +        return x
    +
    +model = LoopModel()
    +dummy_input = torch.ones(2, 3, dtype=torch.long)
    +loop_count = torch.tensor(5, dtype=torch.long)
    +
    +torch.onnx.export(model, (dummy_input, loop_count), 'loop.onnx', verbose=True)
    +
    +
    +

    With trace-based exporter, we get the result ONNX graph which unrolls the for loop:

    +
    graph(%0 : Long(2, 3),
    +      %1 : Long()):
    +  %2 : Tensor = onnx::Constant[value={1}]()
    +  %3 : Tensor = onnx::Add(%0, %2)
    +  %4 : Tensor = onnx::Constant[value={2}]()
    +  %5 : Tensor = onnx::Add(%3, %4)
    +  %6 : Tensor = onnx::Constant[value={3}]()
    +  %7 : Tensor = onnx::Add(%5, %6)
    +  %8 : Tensor = onnx::Constant[value={4}]()
    +  %9 : Tensor = onnx::Add(%7, %8)
    +  return (%9)
    +
    +
    +

    To utilize script-based exporter for capturing the dynamic loop, +we can write the loop in script, and call it from the regular nn.Module:

    +
    # Mixing tracing and scripting
    +
    +@torch.jit.script
    +def loop(x, y):
    +    for i in range(int(y)):
    +        x = x + i
    +    return x
    +
    +class LoopModel2(torch.nn.Module):
    +    def forward(self, x, y):
    +        return loop(x, y)
    +
    +model = LoopModel2()
    +dummy_input = torch.ones(2, 3, dtype=torch.long)
    +loop_count = torch.tensor(5, dtype=torch.long)
    +torch.onnx.export(model, (dummy_input, loop_count), 'loop.onnx', verbose=True,
    +                  input_names=['input_data', 'loop_range'])
    +
    +
    +

    Now the exported ONNX graph becomes:

    +
    graph(%input_data : Long(2, 3),
    +      %loop_range : Long()):
    +  %2 : Long() = onnx::Constant[value={1}](), scope: LoopModel2/loop
    +  %3 : Tensor = onnx::Cast[to=9](%2)
    +  %4 : Long(2, 3) = onnx::Loop(%loop_range, %3, %input_data), scope: LoopModel2/loop # custom_loop.py:240:5
    +    block0(%i.1 : Long(), %cond : bool, %x.6 : Long(2, 3)):
    +      %8 : Long(2, 3) = onnx::Add(%x.6, %i.1), scope: LoopModel2/loop # custom_loop.py:241:13
    +      %9 : Tensor = onnx::Cast[to=9](%2)
    +      -> (%9, %8)
    +  return (%4)
    +
    +
    +

    The dynamic control flow is captured correctly. We can verify in backends with different loop range.

    +
    import caffe2.python.onnx.backend as backend
    +import numpy as np
    +import onnx
    +model = onnx.load('loop.onnx')
    +
    +rep = backend.prepare(model)
    +outputs = rep.run((dummy_input.numpy(), np.array(9).astype(np.int64)))
    +print(outputs[0])
    +#[[37 37 37]
    +# [37 37 37]]
    +
    +
    +import onnxruntime as ort
    +ort_sess = ort.InferenceSession('loop.onnx')
    +outputs = ort_sess.run(None, {'input_data': dummy_input.numpy(),
    +                              'loop_range': np.array(9).astype(np.int64)})
    +print(outputs)
    +#[array([[37, 37, 37],
    +#       [37, 37, 37]], dtype=int64)]
    +
    +
    +
    +
    +

    Limitations

    +
      +
    • Tensor in-place indexed assignment like data[index] = new_data is currently not supported in exporting. +One way to resolve this kind of issue is to use operator scatter, explicitly updating the original tensor.

      +
      data = torch.zeros(3, 4)
      +index = torch.tensor(1)
      +new_data = torch.arange(4).to(torch.float32)
      +
      +# Assigning to left hand side indexing is not supported in exporting.
      +# class InPlaceIndexedAssignment(torch.nn.Module):
      +# def forward(self, data, index, new_data):
      +#     data[index] = new_data
      +#     return data
      +
      +class InPlaceIndexedAssignmentONNX(torch.nn.Module):
      +    def forward(self, data, index, new_data):
      +        new_data = new_data.unsqueeze(0)
      +        index = index.expand(1, new_data.size(1))
      +        data.scatter_(0, index, new_data)
      +        return data
      +
      +out = InPlaceIndexedAssignmentONNX()(data, index, new_data)
      +
      +torch.onnx.export(InPlaceIndexedAssignmentONNX(), (data, index, new_data), 'inplace_assign.onnx')
      +
      +# caffe2
      +import caffe2.python.onnx.backend as backend
      +import onnx
      +
      +onnx_model = onnx.load('inplace_assign.onnx')
      +rep = backend.prepare(onnx_model)
      +out_caffe2 = rep.run((torch.zeros(3, 4).numpy(), index.numpy(), new_data.numpy()))
      +
      +assert torch.all(torch.eq(out, torch.tensor(out_caffe2)))
      +
      +# onnxruntime
      +import onnxruntime
      +sess = onnxruntime.InferenceSession('inplace_assign.onnx')
      +out_ort = sess.run(None, {
      +    sess.get_inputs()[0].name: torch.zeros(3, 4).numpy(),
      +    sess.get_inputs()[1].name: index.numpy(),
      +    sess.get_inputs()[2].name: new_data.numpy(),
      +})
      +
      +assert torch.all(torch.eq(out, torch.tensor(out_ort)))
      +
      +
      +
    • +
    • There is no concept of tensor list in ONNX. Without this concept, it is very hard to export operators +that consume or produce tensor list, especially when the length of the tensor list is not known at export time.

      +
      x = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
      +
      +# This is not exportable
      +class Model(torch.nn.Module):
      +    def forward(self, x):
      +        return x.unbind(0)
      +
      +# This is exportable.
      +# Note that in this example we know the split operator will always produce exactly three outputs,
      +# Thus we can export to ONNX without using tensor list.
      +class AnotherModel(torch.nn.Module):
      +    def forward(self, x):
      +        return [torch.squeeze(out, 0) for out in torch.split(x, [1,1,1], dim=0)]
      +
      +
      +
    • +
    • PyTorch and ONNX backends(Caffe2, ONNXRuntime, etc) often have implementations of operators with some +numeric differences. Depending on model structure, these differences +may be negligible, but they can also cause major divergences in behavior +(especially on untrained models.) We allow Caffe2 to call directly to Torch implementations of operators, to +help you smooth over these differences when precision is important, +and to also document these differences.

    • +
    +
    +
    +

    Supported operators

    +

    The following operators are supported:

    +
      +
    • BatchNorm

    • +
    • ConstantPadNd

    • +
    • Conv

    • +
    • Dropout

    • +
    • Embedding (no optional arguments supported)

    • +
    • FeatureDropout (training mode not supported)

    • +
    • Index

    • +
    • MaxPool1d

    • +
    • MaxPool2d

    • +
    • MaxPool3d

    • +
    • RNN

    • +
    • abs

    • +
    • acos

    • +
    • adaptive_avg_pool1d

    • +
    • adaptive_avg_pool2d

    • +
    • adaptive_avg_pool3d

    • +
    • adaptive_max_pool1d

    • +
    • adaptive_max_pool2d

    • +
    • adaptive_max_pool3d

    • +
    • add (nonzero alpha not supported)

    • +
    • addmm

    • +
    • and

    • +
    • arange

    • +
    • argmax

    • +
    • argmin

    • +
    • asin

    • +
    • atan

    • +
    • avg_pool1d

    • +
    • avg_pool2d

    • +
    • avg_pool2d

    • +
    • avg_pool3d

    • +
    • cat

    • +
    • ceil

    • +
    • clamp

    • +
    • clamp_max

    • +
    • clamp_min

    • +
    • concat

    • +
    • cos

    • +
    • dim_arange

    • +
    • div

    • +
    • dropout

    • +
    • elu

    • +
    • eq

    • +
    • erf

    • +
    • exp

    • +
    • expand

    • +
    • expand_as

    • +
    • flatten

    • +
    • floor

    • +
    • full

    • +
    • full_like

    • +
    • gather

    • +
    • ge

    • +
    • glu

    • +
    • gt

    • +
    • hardtanh

    • +
    • index_copy

    • +
    • index_fill

    • +
    • index_select

    • +
    • instance_norm

    • +
    • isnan

    • +
    • layer_norm

    • +
    • le

    • +
    • leaky_relu

    • +
    • log

    • +
    • log2

    • +
    • log_sigmoid

    • +
    • log_softmax

    • +
    • logsumexp

    • +
    • lt

    • +
    • masked_fill

    • +
    • max

    • +
    • mean

    • +
    • min

    • +
    • mm

    • +
    • mul

    • +
    • narrow

    • +
    • ne

    • +
    • neg

    • +
    • nonzero

    • +
    • norm

    • +
    • ones

    • +
    • ones_like

    • +
    • or

    • +
    • permute

    • +
    • pixel_shuffle

    • +
    • pow

    • +
    • prelu (single weight shared among input channels not supported)

    • +
    • prod

    • +
    • rand

    • +
    • randn

    • +
    • randn_like

    • +
    • reciprocal

    • +
    • reflection_pad

    • +
    • relu

    • +
    • repeat

    • +
    • replication_pad

    • +
    • reshape

    • +
    • reshape_as

    • +
    • rrelu

    • +
    • rsub

    • +
    • scatter

    • +
    • scatter_add

    • +
    • select

    • +
    • selu

    • +
    • sigmoid

    • +
    • sign

    • +
    • sin

    • +
    • size

    • +
    • slice

    • +
    • softmax (only dim=-1 supported)

    • +
    • softplus

    • +
    • split

    • +
    • sqrt

    • +
    • squeeze

    • +
    • stack

    • +
    • sub (nonzero alpha not supported)

    • +
    • sum

    • +
    • t

    • +
    • tan

    • +
    • tanh

    • +
    • threshold (non-zero threshold/non-zero value not supported)

    • +
    • to

    • +
    • topk

    • +
    • transpose

    • +
    • type_as

    • +
    • unfold (experimental support with ATen-Caffe2 integration)

    • +
    • unsqueeze

    • +
    • upsample_nearest1d

    • +
    • upsample_nearest2d

    • +
    • upsample_nearest3d

    • +
    • view

    • +
    • where

    • +
    • zeros

    • +
    • zeros_like

    • +
    +

    The operator set above is sufficient to export the following models:

    +
      +
    • AlexNet

    • +
    • DCGAN

    • +
    • DenseNet

    • +
    • Inception (warning: this model is highly sensitive to changes in operator +implementation)

    • +
    • ResNet

    • +
    • SuperResolution

    • +
    • VGG

    • +
    • word_language_model

    • +
    +
    +
    +

    Adding support for operators

    +

    Adding export support for operators is an advance usage. +To achieve this, developers need to touch the source code of PyTorch. +Please follow the instructions +for installing PyTorch from source. +If the wanted operator is standardized in ONNX, it should be easy to add +support for exporting such operator (adding a symbolic function for the operator). +To confirm whether the operator is standardized or not, please check the +ONNX operator list.

    +
    +

    ATen operators

    +

    If the operator is an ATen operator, which means you can find the declaration +of the function in torch/csrc/autograd/generated/VariableType.h +(available in generated code in PyTorch install dir), you should add the symbolic +function in torch/onnx/symbolic_opset<version>.py and follow the instructions listed as below:

    +
      +
    • Define the symbolic function in torch/onnx/symbolic_opset<version>.py, for example +torch/onnx/symbolic_opset9.py. +Make sure the function has the same name as the ATen operator/function +defined in VariableType.h.

    • +
    • The first parameter is always the exported ONNX graph. +Parameter names must EXACTLY match the names in VariableType.h, +because dispatch is done with keyword arguments.

    • +
    • Parameter ordering does NOT necessarily match what is in VariableType.h, +tensors (inputs) are always first, then non-tensor arguments.

    • +
    • In the symbolic function, if the operator is already standardized in ONNX, +we only need to create a node to represent the ONNX operator in the graph.

    • +
    • If the input argument is a tensor, but ONNX asks for a scalar, we have to +explicitly do the conversion. The helper function _scalar can convert a +scalar tensor into a python scalar, and _if_scalar_type_as can turn a +Python scalar into a PyTorch tensor.

    • +
    +
    +
    +

    Non-ATen operators

    +

    If the operator is a non-ATen operator, the symbolic function has to be +added in the corresponding PyTorch Function class. Please read the following +instructions:

    +
      +
    • Create a symbolic function named symbolic in the corresponding Function class.

    • +
    • The first parameter is always the exported ONNX graph.

    • +
    • Parameter names except the first must EXACTLY match the names in forward.

    • +
    • The output tuple size must match the outputs of forward.

    • +
    • In the symbolic function, if the operator is already standardized in ONNX, +we just need to create a node to represent the ONNX operator in the graph.

    • +
    +

    Symbolic functions should be implemented in Python. All of these functions interact +with Python methods which are implemented via C++-Python bindings, +but intuitively the interface they provide looks like this:

    +
    def operator/symbolic(g, *inputs):
    +  """
    +  Modifies Graph (e.g., using "op"), adding the ONNX operations representing
    +  this PyTorch function, and returning a Value or tuple of Values specifying the
    +  ONNX outputs whose values correspond to the original PyTorch return values
    +  of the autograd Function (or None if an output is not supported by ONNX).
    +
    +  Arguments:
    +    g (Graph): graph to write the ONNX representation into
    +    inputs (Value...): list of values representing the variables which contain
    +        the inputs for this function
    +  """
    +
    +class Value(object):
    +  """Represents an intermediate tensor value computed in ONNX."""
    +  def type(self):
    +    """Returns the Type of the value."""
    +
    +class Type(object):
    +  def sizes(self):
    +    """Returns a tuple of ints representing the shape of a tensor this describes."""
    +
    +class Graph(object):
    +  def op(self, opname, *inputs, **attrs):
    +    """
    +    Create an ONNX operator 'opname', taking 'args' as inputs
    +    and attributes 'kwargs' and add it as a node to the current graph,
    +    returning the value representing the single output of this
    +    operator (see the `outputs` keyword argument for multi-return
    +    nodes).
    +
    +    The set of operators and the inputs/attributes they take
    +    is documented at https://github.com/onnx/onnx/blob/master/docs/Operators.md
    +
    +    Arguments:
    +        opname (string): The ONNX operator name, e.g., `Abs` or `Add`.
    +        args (Value...): The inputs to the operator; usually provided
    +            as arguments to the `symbolic` definition.
    +        kwargs: The attributes of the ONNX operator, with keys named
    +            according to the following convention: `alpha_f` indicates
    +            the `alpha` attribute with type `f`.  The valid type specifiers are
    +            `f` (float), `i` (int), `s` (string) or `t` (Tensor).  An attribute
    +            specified with type float accepts either a single float, or a
    +            list of floats (e.g., you would say `dims_i` for a `dims` attribute
    +            that takes a list of integers).
    +        outputs (int, optional):  The number of outputs this operator returns;
    +            by default an operator is assumed to return a single output.
    +            If `outputs` is greater than one, this functions returns a tuple
    +            of output `Value`, representing each output of the ONNX operator
    +            in positional.
    +    """
    +
    +
    +

    The ONNX graph C++ definition is in torch/csrc/jit/ir.h.

    +

    Here is an example of handling missing symbolic function for elu operator. +We try to export the model and see the error message as below:

    +
    UserWarning: ONNX export failed on elu because torch.onnx.symbolic_opset9.elu does not exist
    +RuntimeError: ONNX export failed: Couldn't export operator elu
    +
    +
    +

    The export fails because PyTorch does not support exporting elu operator. +We find virtual Tensor elu(const Tensor & input, Scalar alpha, bool inplace) const override; +in VariableType.h. This means elu is an ATen operator. +We check the ONNX operator list, +and confirm that Elu is standardized in ONNX. +We add the following lines to symbolic_opset9.py:

    +
    def elu(g, input, alpha, inplace=False):
    +    return g.op("Elu", input, alpha_f=_scalar(alpha))
    +
    +
    +

    Now PyTorch is able to export elu operator.

    +

    There are more examples in +symbolic_opset9.py, +symbolic_opset10.py.

    +

    The interface for specifying operator definitions is experimental; +adventurous users should note that the APIs will probably +change in a future interface.

    +
    +
    +

    Custom operators

    +

    Following this tutorial Extending TorchScript with Custom C++ Operators, +you can create and register your own custom ops implementation in PyTorch. Here’s how to export such model to ONNX.:

    +
    # Create custom symbolic function
    +from torch.onnx.symbolic_helper import parse_args
    +@parse_args('v', 'v', 'f', 'i')
    +def symbolic_foo_forward(g, input1, input2, attr1, attr2):
    +    return g.op("Foo", input1, input2, attr1_f=attr1, attr2_i=attr2)
    +
    +# Register custom symbolic function
    +from torch.onnx import register_custom_op_symbolic
    +register_custom_op_symbolic('custom_ops::foo_forward', symbolic_foo_forward, 9)
    +
    +class FooModel(torch.nn.Module):
    +    def __init__(self, attr1, attr2):
    +        super(FooModule, self).__init__()
    +        self.attr1 = attr1
    +        self.attr2 = attr2
    +
    +    def forward(self, input1, input2):
    +        # Calling custom op
    +        return torch.ops.custom_ops.foo_forward(input1, input2, self.attr1, self.attr2)
    +
    +model = FooModel(attr1, attr2)
    +torch.onnx.export(model, (dummy_input1, dummy_input2), 'model.onnx')
    +
    +
    +

    Depending on the custom operator, you can export it as one or a combination of existing ONNX ops. +You can also export it as a custom op in ONNX as well. In that case, you will need to extend the backend of your choice +with matching custom ops implementation, e.g. Caffe2 custom ops, +ONNXRuntime custom ops.

    +
    +
    +
    +

    Frequently Asked Questions

    +

    Q: I have exported my lstm model, but its input size seems to be fixed?

    +
    +

    The tracer records the example inputs shape in the graph. In case the model should accept +inputs of dynamic shape, you can utilize the parameter dynamic_axes in export api.

    +
    layer_count = 4
    +
    +model = nn.LSTM(10, 20, num_layers=layer_count, bidirectional=True)
    +model.eval()
    +
    +with torch.no_grad():
    +    input = torch.randn(5, 3, 10)
    +    h0 = torch.randn(layer_count * 2, 3, 20)
    +    c0 = torch.randn(layer_count * 2, 3, 20)
    +    output, (hn, cn) = model(input, (h0, c0))
    +
    +    # default export
    +    torch.onnx.export(model, (input, (h0, c0)), 'lstm.onnx')
    +    onnx_model = onnx.load('lstm.onnx')
    +    # input shape [5, 3, 10]
    +    print(onnx_model.graph.input[0])
    +
    +    # export with `dynamic_axes`
    +    torch.onnx.export(model, (input, (h0, c0)), 'lstm.onnx',
    +                    input_names=['input', 'h0', 'c0'],
    +                    output_names=['output', 'hn', 'cn'],
    +                    dynamic_axes={'input': {0: 'sequence'}, 'output': {0: 'sequence'}})
    +    onnx_model = onnx.load('lstm.onnx')
    +    # input shape ['sequence', 3, 10]
    +    print(onnx_model.graph.input[0])
    +
    +
    +
    +

    Q: How to export models with loops in it?

    +
    +

    Please checkout Tracing vs Scripting.

    +
    +

    Q: Does ONNX support implicit scalar datatype casting?

    +
    +

    No, but the exporter will try to handle that part. Scalars are converted to constant tensors in ONNX. +The exporter will try to figure out the right datatype for scalars. However for cases that it failed +to do so, you will need to manually provide the datatype information. We are trying to improve the datatype +propagation in the exporter such that manual changes are not required in the future.

    +
    class ImplicitCastType(torch.jit.ScriptModule):
    +    @torch.jit.script_method
    +    def forward(self, x):
    +        # Exporter knows x is float32, will export '2' as float32 as well.
    +        y = x + 2
    +        # Without type propagation, exporter doesn't know the datatype of y.
    +        # Thus '3' is exported as int64 by default.
    +        return y + 3
    +        # The following will export correctly.
    +        # return y + torch.tensor([3], dtype=torch.float32)
    +
    +x = torch.tensor([1.0], dtype=torch.float32)
    +torch.onnx.export(ImplicitCastType(), x, 'models/implicit_cast.onnx',
    +                  example_outputs=ImplicitCastType()(x))
    +
    +
    +
    +
    +
    +

    Functions

    +
    +
    +torch.onnx.export(model, args, f, export_params=True, verbose=False, training=False, input_names=None, output_names=None, aten=False, export_raw_ir=False, operator_export_type=None, opset_version=None, _retain_param_name=True, do_constant_folding=False, example_outputs=None, strip_doc_string=True, dynamic_axes=None)[source]
    +

    Export a model into ONNX format. This exporter runs your model +once in order to get a trace of its execution to be exported; +at the moment, it supports a limited set of dynamic models (e.g., RNNs.) +See also: onnx-export +:param model: the model to be exported. +:type model: torch.nn.Module +:param args: the inputs to

    +
    +

    the model, e.g., such that model(*args) is a valid +invocation of the model. Any non-Tensor arguments will +be hard-coded into the exported model; any Tensor arguments +will become inputs of the exported model, in the order they +occur in args. If args is a Tensor, this is equivalent +to having called it with a 1-ary tuple of that Tensor. +(Note: passing keyword arguments to the model is not currently +supported. Give us a shout if you need it.)

    +
    +
    +
    Parameters
    +
      +
    • f – a file-like object (has to implement fileno that returns a file descriptor) +or a string containing a file name. A binary Protobuf will be written +to this file.

    • +
    • export_params (bool, default True) – if specified, all parameters will +be exported. Set this to False if you want to export an untrained model. +In this case, the exported model will first take all of its parameters +as arguments, the ordering as specified by model.state_dict().values()

    • +
    • verbose (bool, default False) – if specified, we will print out a debug +description of the trace being exported.

    • +
    • training (bool, default False) – export the model in training mode. At +the moment, ONNX is oriented towards exporting models for inference +only, so you will generally not need to set this to True.

    • +
    • input_names (list of strings, default empty list) – names to assign to the +input nodes of the graph, in order

    • +
    • output_names (list of strings, default empty list) – names to assign to the +output nodes of the graph, in order

    • +
    • aten (bool, default False) – [DEPRECATED. use operator_export_type] export the +model in aten mode. If using aten mode, all the ops original exported +by the functions in symbolic_opset<version>.py are exported as ATen ops.

    • +
    • export_raw_ir (bool, default False) – [DEPRECATED. use operator_export_type] +export the internal IR directly instead of converting it to ONNX ops.

    • +
    • operator_export_type (enum, default OperatorExportTypes.ONNX) –

      OperatorExportTypes.ONNX: all ops are exported as regular ONNX ops. +OperatorExportTypes.ONNX_ATEN: all ops are exported as ATen ops. +OperatorExportTypes.ONNX_ATEN_FALLBACK: if symbolic is missing,

      +
      +

      fall back on ATen op.

      +
      +

      OperatorExportTypes.RAW: export raw ir.

      +

    • +
    • opset_version (int, default is 9) – by default we export the model to the +opset version of the onnx submodule. Since ONNX’s latest opset may +evolve before next stable release, by default we export to one stable +opset version. Right now, supported stable opset version is 9. +The opset_version must be _onnx_master_opset or in _onnx_stable_opsets +which are defined in torch/onnx/symbolic_helper.py

    • +
    • do_constant_folding (bool, default False) – If True, the constant-folding +optimization is applied to the model during export. Constant-folding +optimization will replace some of the ops that have all constant +inputs, with pre-computed constant nodes.

    • +
    • example_outputs (tuple of Tensors, default None) – example_outputs must be provided +when exporting a ScriptModule or TorchScript Function.

    • +
    • strip_doc_string (bool, default True) – if True, strips the field +“doc_string” from the exported model, which information about the stack +trace.

    • +
    • example_outputs – example outputs of the model that is being exported.

    • +
    • dynamic_axes (dict<string, dict<python:int, string>> or dict<string, list(int)>, default empty dict) –

      a dictionary to specify dynamic axes of input/output, such that: +- KEY: input and/or output names +- VALUE: index of dynamic axes for given key and potentially the name to be used for +exported dynamic axes. In general the value is defined according to one of the following +ways or a combination of both: +(1). A list of integers specifiying the dynamic axes of provided input. In this scenario +automated names will be generated and applied to dynamic axes of provided input/output +during export. +OR (2). An inner dictionary that specifies a mapping FROM the index of dynamic axis in +corresponding input/output TO the name that is desired to be applied on such axis of +such input/output during export. +Example. if we have the following shape for inputs and outputs:

      +
      +

      shape(input_1) = (‘b’, 3, ‘w’, ‘h’) +and shape(input_2) = (‘b’, 4) +and shape(output) = (‘b’, ‘d’, 5)

      +
      +
      +
      Then dynamic axes can be defined either as:
      +
      (a). ONLY INDICES:

      dynamic_axes = {‘input_1’:[0, 2, 3], ‘input_2’:[0], ‘output’:[0, 1]}

      +

      where automatic names will be generated for exported dynamic axes

      +
      +
      (b). INDICES WITH CORRESPONDING NAMES:

      dynamic_axes = {‘input_1’:{0:’batch’, 1:’width’, 2:’height’}, +‘input_2’:{0:’batch’}, +‘output’:{0:’batch’, 1:’detections’}

      +

      where provided names will be applied to exported dynamic axes

      +
      +
      (c). MIXED MODE OF (a) and (b)

      dynamic_axes = {‘input_1’:[0, 2, 3], ‘input_2’:{0:’batch’}, ‘output’:[0,1]}

      +
      +
      +
      +
      +

    • +
    +
    +
    +
    + +
    +
    +torch.onnx.register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version)[source]
    +
    + +
    +
    +torch.onnx.operators.shape_as_tensor(x)[source]
    +
    + +
    +
    +torch.onnx.set_training(model, mode)[source]
    +

    A context manager to temporarily set the training mode of ‘model’ +to ‘mode’, resetting it when we exit the with-block. A no-op if +mode is None.

    +
    + +
    +
    +torch.onnx.is_in_onnx_export()[source]
    +

    Check whether it’s in the middle of the ONNX export. +This function returns True in the middle of torch.onnx.export(). +torch.onnx.export should be executed with single thread.

    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/optim.html b/docs/stable/optim.html new file mode 100644 index 000000000000..b4550dc2585c --- /dev/null +++ b/docs/stable/optim.html @@ -0,0 +1,1486 @@ + + + + + + + + + + + + torch.optim — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.optim

    +

    torch.optim is a package implementing various optimization algorithms. +Most commonly used methods are already supported, and the interface is general +enough, so that more sophisticated ones can be also easily integrated in the +future.

    +
    +

    How to use an optimizer

    +

    To use torch.optim you have to construct an optimizer object, that will hold +the current state and will update the parameters based on the computed gradients.

    +
    +

    Constructing it

    +

    To construct an Optimizer you have to give it an iterable containing the +parameters (all should be Variable s) to optimize. Then, +you can specify optimizer-specific options such as the learning rate, weight decay, etc.

    +
    +

    Note

    +

    If you need to move a model to GPU via .cuda(), please do so before +constructing optimizers for it. Parameters of a model after .cuda() will +be different objects with those before the call.

    +

    In general, you should make sure that optimized parameters live in +consistent locations when optimizers are constructed and used.

    +
    +

    Example:

    +
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    +optimizer = optim.Adam([var1, var2], lr=0.0001)
    +
    +
    +
    +
    +

    Per-parameter options

    +

    Optimizer s also support specifying per-parameter options. To do this, instead +of passing an iterable of Variable s, pass in an iterable of +dict s. Each of them will define a separate parameter group, and should contain +a params key, containing a list of parameters belonging to it. Other keys +should match the keyword arguments accepted by the optimizers, and will be used +as optimization options for this group.

    +
    +

    Note

    +

    You can still pass options as keyword arguments. They will be used as +defaults, in the groups that didn’t override them. This is useful when you +only want to vary a single option, while keeping all others consistent +between parameter groups.

    +
    +

    For example, this is very useful when one wants to specify per-layer learning rates:

    +
    optim.SGD([
    +                {'params': model.base.parameters()},
    +                {'params': model.classifier.parameters(), 'lr': 1e-3}
    +            ], lr=1e-2, momentum=0.9)
    +
    +
    +

    This means that model.base’s parameters will use the default learning rate of 1e-2, +model.classifier’s parameters will use a learning rate of 1e-3, and a momentum of +0.9 will be used for all parameters.

    +
    +
    +

    Taking an optimization step

    +

    All optimizers implement a step() method, that updates the +parameters. It can be used in two ways:

    +
    +

    optimizer.step()

    +

    This is a simplified version supported by most optimizers. The function can be +called once the gradients are computed using e.g. +backward().

    +

    Example:

    +
    for input, target in dataset:
    +    optimizer.zero_grad()
    +    output = model(input)
    +    loss = loss_fn(output, target)
    +    loss.backward()
    +    optimizer.step()
    +
    +
    +
    +
    +

    optimizer.step(closure)

    +

    Some optimization algorithms such as Conjugate Gradient and LBFGS need to +reevaluate the function multiple times, so you have to pass in a closure that +allows them to recompute your model. The closure should clear the gradients, +compute the loss, and return it.

    +

    Example:

    +
    for input, target in dataset:
    +    def closure():
    +        optimizer.zero_grad()
    +        output = model(input)
    +        loss = loss_fn(output, target)
    +        loss.backward()
    +        return loss
    +    optimizer.step(closure)
    +
    +
    +
    +
    +
    +
    +

    Algorithms

    +
    +
    +class torch.optim.Optimizer(params, defaults)[source]
    +

    Base class for all optimizers.

    +
    +

    Warning

    +

    Parameters need to be specified as collections that have a deterministic +ordering that is consistent between runs. Examples of objects that don’t +satisfy those properties are sets and iterators over values of dictionaries.

    +
    +
    +
    Parameters
    +
      +
    • params (iterable) – an iterable of torch.Tensor s or +dict s. Specifies what Tensors should be optimized.

    • +
    • defaults – (dict): a dict containing default values of optimization +options (used when a parameter group doesn’t specify them).

    • +
    +
    +
    +
    +
    +add_param_group(param_group)[source]
    +

    Add a param group to the Optimizer s param_groups.

    +

    This can be useful when fine tuning a pre-trained network as frozen layers can be made +trainable and added to the Optimizer as training progresses.

    +
    +
    Parameters
    +
      +
    • param_group (dict) – Specifies what Tensors should be optimized along with group

    • +
    • optimization options. (specific) –

    • +
    +
    +
    +
    + +
    +
    +load_state_dict(state_dict)[source]
    +

    Loads the optimizer state.

    +
    +
    Parameters
    +

    state_dict (dict) – optimizer state. Should be an object returned +from a call to state_dict().

    +
    +
    +
    + +
    +
    +state_dict()[source]
    +

    Returns the state of the optimizer as a dict.

    +

    It contains two entries:

    +
      +
    • +
      state - a dict holding current optimization state. Its content

      differs between optimizer classes.

      +
      +
      +
    • +
    • param_groups - a dict containing all parameter groups

    • +
    +
    + +
    +
    +step(closure)[source]
    +

    Performs a single optimization step (parameter update).

    +
    +
    Parameters
    +

    closure (callable) – A closure that reevaluates the model and +returns the loss. Optional for most optimizers.

    +
    +
    +
    + +
    +
    +zero_grad()[source]
    +

    Clears the gradients of all optimized torch.Tensor s.

    +
    + +
    + +
    +
    +class torch.optim.Adadelta(params, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0)[source]
    +

    Implements Adadelta algorithm.

    +

    It has been proposed in ADADELTA: An Adaptive Learning Rate Method.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • rho (float, optional) – coefficient used for computing a running average +of squared gradients (default: 0.9)

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-6)

    • +
    • lr (float, optional) – coefficient that scale delta before it is applied +to the parameters (default: 1.0)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.Adagrad(params, lr=0.01, lr_decay=0, weight_decay=0, initial_accumulator_value=0)[source]
    +

    Implements Adagrad algorithm.

    +

    It has been proposed in Adaptive Subgradient Methods for Online Learning +and Stochastic Optimization.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-2)

    • +
    • lr_decay (float, optional) – learning rate decay (default: 0)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)[source]
    +

    Implements Adam algorithm.

    +

    It has been proposed in Adam: A Method for Stochastic Optimization.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-3)

    • +
    • betas (Tuple[float, float], optional) – coefficients used for computing +running averages of gradient and its square (default: (0.9, 0.999))

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-8)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    • amsgrad (boolean, optional) – whether to use the AMSGrad variant of this +algorithm from the paper On the Convergence of Adam and Beyond +(default: False)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.AdamW(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)[source]
    +

    Implements AdamW algorithm.

    +

    The original Adam algorithm was proposed in Adam: A Method for Stochastic Optimization. +The AdamW variant was proposed in Decoupled Weight Decay Regularization.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-3)

    • +
    • betas (Tuple[float, float], optional) – coefficients used for computing +running averages of gradient and its square (default: (0.9, 0.999))

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-8)

    • +
    • weight_decay (float, optional) – weight decay coefficient (default: 1e-2)

    • +
    • amsgrad (boolean, optional) – whether to use the AMSGrad variant of this +algorithm from the paper On the Convergence of Adam and Beyond +(default: False)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.SparseAdam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08)[source]
    +

    Implements lazy version of Adam algorithm suitable for sparse tensors.

    +

    In this variant, only moments that show up in the gradient get updated, and +only those portions of the gradient get applied to the parameters.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-3)

    • +
    • betas (Tuple[float, float], optional) – coefficients used for computing +running averages of gradient and its square (default: (0.9, 0.999))

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-8)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.Adamax(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)[source]
    +

    Implements Adamax algorithm (a variant of Adam based on infinity norm).

    +

    It has been proposed in Adam: A Method for Stochastic Optimization.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 2e-3)

    • +
    • betas (Tuple[float, float], optional) – coefficients used for computing +running averages of gradient and its square

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-8)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.ASGD(params, lr=0.01, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0)[source]
    +

    Implements Averaged Stochastic Gradient Descent.

    +

    It has been proposed in Acceleration of stochastic approximation by +averaging.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-2)

    • +
    • lambd (float, optional) – decay term (default: 1e-4)

    • +
    • alpha (float, optional) – power for eta update (default: 0.75)

    • +
    • t0 (float, optional) – point at which to start averaging (default: 1e6)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.LBFGS(params, lr=1, max_iter=20, max_eval=None, tolerance_grad=1e-05, tolerance_change=1e-09, history_size=100, line_search_fn=None)[source]
    +

    Implements L-BFGS algorithm, heavily inspired by minFunc +<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>.

    +
    +

    Warning

    +

    This optimizer doesn’t support per-parameter options and parameter +groups (there can be only one).

    +
    +
    +

    Warning

    +

    Right now all parameters have to be on a single device. This will be +improved in the future.

    +
    +
    +

    Note

    +

    This is a very memory intensive optimizer (it requires additional +param_bytes * (history_size + 1) bytes). If it doesn’t fit in memory +try reducing the history size, or use a different algorithm.

    +
    +
    +
    Parameters
    +
      +
    • lr (float) – learning rate (default: 1)

    • +
    • max_iter (int) – maximal number of iterations per optimization step +(default: 20)

    • +
    • max_eval (int) – maximal number of function evaluations per optimization +step (default: max_iter * 1.25).

    • +
    • tolerance_grad (float) – termination tolerance on first order optimality +(default: 1e-5).

    • +
    • tolerance_change (float) – termination tolerance on function +value/parameter changes (default: 1e-9).

    • +
    • history_size (int) – update history size (default: 100).

    • +
    • line_search_fn (str) – either ‘strong_wolfe’ or None (default: None).

    • +
    +
    +
    +
    +
    +step(closure)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.RMSprop(params, lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)[source]
    +

    Implements RMSprop algorithm.

    +

    Proposed by G. Hinton in his +course.

    +

    The centered version first appears in Generating Sequences +With Recurrent Neural Networks.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-2)

    • +
    • momentum (float, optional) – momentum factor (default: 0)

    • +
    • alpha (float, optional) – smoothing constant (default: 0.99)

    • +
    • eps (float, optional) – term added to the denominator to improve +numerical stability (default: 1e-8)

    • +
    • centered (bool, optional) – if True, compute the centered RMSProp, +the gradient is normalized by an estimation of its variance

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.Rprop(params, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50))[source]
    +

    Implements the resilient backpropagation algorithm.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float, optional) – learning rate (default: 1e-2)

    • +
    • etas (Tuple[float, float], optional) – pair of (etaminus, etaplis), that +are multiplicative increase and decrease factors +(default: (0.5, 1.2))

    • +
    • step_sizes (Tuple[float, float], optional) – a pair of minimal and +maximal allowed step sizes (default: (1e-6, 50))

    • +
    +
    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +class torch.optim.SGD(params, lr=<required parameter>, momentum=0, dampening=0, weight_decay=0, nesterov=False)[source]
    +

    Implements stochastic gradient descent (optionally with momentum).

    +

    Nesterov momentum is based on the formula from +On the importance of initialization and momentum in deep learning.

    +
    +
    Parameters
    +
      +
    • params (iterable) – iterable of parameters to optimize or dicts defining +parameter groups

    • +
    • lr (float) – learning rate

    • +
    • momentum (float, optional) – momentum factor (default: 0)

    • +
    • weight_decay (float, optional) – weight decay (L2 penalty) (default: 0)

    • +
    • dampening (float, optional) – dampening for momentum (default: 0)

    • +
    • nesterov (bool, optional) – enables Nesterov momentum (default: False)

    • +
    +
    +
    +

    Example

    +
    >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    +>>> optimizer.zero_grad()
    +>>> loss_fn(model(input), target).backward()
    +>>> optimizer.step()
    +
    +
    +
    +

    Note

    +

    The implementation of SGD with Momentum/Nesterov subtly differs from +Sutskever et. al. and implementations in some other frameworks.

    +

    Considering the specific case of Momentum, the update can be written as

    +
    +\[v = \rho * v + g \\ +p = p - lr * v + +\]
    +

    where p, g, v and \(\rho\) denote the parameters, gradient, +velocity, and momentum respectively.

    +

    This is in contrast to Sutskever et. al. and +other frameworks which employ an update of the form

    +
    +\[v = \rho * v + lr * g \\ +p = p - v + +\]
    +

    The Nesterov version is analogously modified.

    +
    +
    +
    +step(closure=None)[source]
    +

    Performs a single optimization step.

    +
    +
    Parameters
    +

    closure (callable, optional) – A closure that reevaluates the model +and returns the loss.

    +
    +
    +
    + +
    + +
    +
    +

    How to adjust Learning Rate

    +

    torch.optim.lr_scheduler provides several methods to adjust the learning +rate based on the number of epochs. torch.optim.lr_scheduler.ReduceLROnPlateau +allows dynamic learning rate reducing based on some validation measurements.

    +

    Learning rate scheduling should be applied after optimizer’s update; e.g., you +should write your code this way:

    +
    >>> scheduler = ...
    +>>> for epoch in range(100):
    +>>>     train(...)
    +>>>     validate(...)
    +>>>     scheduler.step()
    +
    +
    +
    +

    Warning

    +

    Prior to PyTorch 1.1.0, the learning rate scheduler was expected to be called before +the optimizer’s update; 1.1.0 changed this behavior in a BC-breaking way. If you use +the learning rate scheduler (calling scheduler.step()) before the optimizer’s update +(calling optimizer.step()), this will skip the first value of the learning rate schedule. +If you are unable to reproduce results after upgrading to PyTorch 1.1.0, please check +if you are calling scheduler.step() at the wrong time.

    +
    +
    +
    +class torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1)[source]
    +

    Sets the learning rate of each parameter group to the initial lr +times a given function. When last_epoch=-1, sets initial lr as lr.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • lr_lambda (function or list) – A function which computes a multiplicative +factor given an integer parameter epoch, or a list of such +functions, one for each group in optimizer.param_groups.

    • +
    • last_epoch (int) – The index of last epoch. Default: -1.

    • +
    +
    +
    +

    Example

    +
    >>> # Assuming optimizer has two groups.
    +>>> lambda1 = lambda epoch: epoch // 30
    +>>> lambda2 = lambda epoch: 0.95 ** epoch
    +>>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
    +>>> for epoch in range(100):
    +>>>     train(...)
    +>>>     validate(...)
    +>>>     scheduler.step()
    +
    +
    +
    +
    +load_state_dict(state_dict)[source]
    +

    Loads the schedulers state.

    +
    +
    Parameters
    +

    state_dict (dict) – scheduler state. Should be an object returned +from a call to state_dict().

    +
    +
    +
    + +
    +
    +state_dict()[source]
    +

    Returns the state of the scheduler as a dict.

    +

    It contains an entry for every variable in self.__dict__ which +is not the optimizer. +The learning rate lambda functions will only be saved if they are callable objects +and not if they are functions or lambdas.

    +
    + +
    + +
    +
    +class torch.optim.lr_scheduler.StepLR(optimizer, step_size, gamma=0.1, last_epoch=-1)[source]
    +

    Sets the learning rate of each parameter group to the initial lr +decayed by gamma every step_size epochs. When last_epoch=-1, sets +initial lr as lr.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • step_size (int) – Period of learning rate decay.

    • +
    • gamma (float) – Multiplicative factor of learning rate decay. +Default: 0.1.

    • +
    • last_epoch (int) – The index of last epoch. Default: -1.

    • +
    +
    +
    +

    Example

    +
    >>> # Assuming optimizer uses lr = 0.05 for all groups
    +>>> # lr = 0.05     if epoch < 30
    +>>> # lr = 0.005    if 30 <= epoch < 60
    +>>> # lr = 0.0005   if 60 <= epoch < 90
    +>>> # ...
    +>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
    +>>> for epoch in range(100):
    +>>>     train(...)
    +>>>     validate(...)
    +>>>     scheduler.step()
    +
    +
    +
    + +
    +
    +class torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1)[source]
    +

    Set the learning rate of each parameter group to the initial lr decayed +by gamma once the number of epoch reaches one of the milestones. When +last_epoch=-1, sets initial lr as lr.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • milestones (list) – List of epoch indices. Must be increasing.

    • +
    • gamma (float) – Multiplicative factor of learning rate decay. +Default: 0.1.

    • +
    • last_epoch (int) – The index of last epoch. Default: -1.

    • +
    +
    +
    +

    Example

    +
    >>> # Assuming optimizer uses lr = 0.05 for all groups
    +>>> # lr = 0.05     if epoch < 30
    +>>> # lr = 0.005    if 30 <= epoch < 80
    +>>> # lr = 0.0005   if epoch >= 80
    +>>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
    +>>> for epoch in range(100):
    +>>>     train(...)
    +>>>     validate(...)
    +>>>     scheduler.step()
    +
    +
    +
    + +
    +
    +class torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma, last_epoch=-1)[source]
    +

    Set the learning rate of each parameter group to the initial lr decayed +by gamma every epoch. When last_epoch=-1, sets initial lr as lr.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • gamma (float) – Multiplicative factor of learning rate decay.

    • +
    • last_epoch (int) – The index of last epoch. Default: -1.

    • +
    +
    +
    +
    + +
    +
    +class torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min=0, last_epoch=-1)[source]
    +

    Set the learning rate of each parameter group using a cosine annealing +schedule, where \(\eta_{max}\) is set to the initial lr and +\(T_{cur}\) is the number of epochs since the last restart in SGDR:

    +
    +\[\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + +\cos(\frac{T_{cur}}{T_{max}}\pi)) + +\]
    +

    When last_epoch=-1, sets initial lr as lr.

    +

    It has been proposed in +SGDR: Stochastic Gradient Descent with Warm Restarts. Note that this only +implements the cosine annealing part of SGDR, and not the restarts.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • T_max (int) – Maximum number of iterations.

    • +
    • eta_min (float) – Minimum learning rate. Default: 0.

    • +
    • last_epoch (int) – The index of last epoch. Default: -1.

    • +
    +
    +
    +
    + +
    +
    +class torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)[source]
    +

    Reduce learning rate when a metric has stopped improving. +Models often benefit from reducing the learning rate by a factor +of 2-10 once learning stagnates. This scheduler reads a metrics +quantity and if no improvement is seen for a ‘patience’ number +of epochs, the learning rate is reduced.

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • mode (str) – One of min, max. In min mode, lr will +be reduced when the quantity monitored has stopped +decreasing; in max mode it will be reduced when the +quantity monitored has stopped increasing. Default: ‘min’.

    • +
    • factor (float) – Factor by which the learning rate will be +reduced. new_lr = lr * factor. Default: 0.1.

    • +
    • patience (int) – Number of epochs with no improvement after +which learning rate will be reduced. For example, if +patience = 2, then we will ignore the first 2 epochs +with no improvement, and will only decrease the LR after the +3rd epoch if the loss still hasn’t improved then. +Default: 10.

    • +
    • verbose (bool) – If True, prints a message to stdout for +each update. Default: False.

    • +
    • threshold (float) – Threshold for measuring the new optimum, +to only focus on significant changes. Default: 1e-4.

    • +
    • threshold_mode (str) – One of rel, abs. In rel mode, +dynamic_threshold = best * ( 1 + threshold ) in ‘max’ +mode or best * ( 1 - threshold ) in min mode. +In abs mode, dynamic_threshold = best + threshold in +max mode or best - threshold in min mode. Default: ‘rel’.

    • +
    • cooldown (int) – Number of epochs to wait before resuming +normal operation after lr has been reduced. Default: 0.

    • +
    • min_lr (float or list) – A scalar or a list of scalars. A +lower bound on the learning rate of all param groups +or each group respectively. Default: 0.

    • +
    • eps (float) – Minimal decay applied to lr. If the difference +between new and old lr is smaller than eps, the update is +ignored. Default: 1e-8.

    • +
    +
    +
    +

    Example

    +
    >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    +>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
    +>>> for epoch in range(10):
    +>>>     train(...)
    +>>>     val_loss = validate(...)
    +>>>     # Note that step should be called after validate()
    +>>>     scheduler.step(val_loss)
    +
    +
    +
    + +
    +
    +class torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1)[source]
    +

    Sets the learning rate of each parameter group according to +cyclical learning rate policy (CLR). The policy cycles the learning +rate between two boundaries with a constant frequency, as detailed in +the paper Cyclical Learning Rates for Training Neural Networks. +The distance between the two boundaries can be scaled on a per-iteration +or per-cycle basis.

    +

    Cyclical learning rate policy changes the learning rate after every batch. +step should be called after a batch has been used for training.

    +

    This class has three built-in policies, as put forth in the paper: +“triangular”:

    +
    +

    A basic triangular cycle w/ no amplitude scaling.

    +
    +
    +
    “triangular2”:

    A basic triangular cycle that scales initial amplitude by half each cycle.

    +
    +
    “exp_range”:

    A cycle that scales initial amplitude by gamma**(cycle iterations) at each +cycle iteration.

    +
    +
    +

    This implementation was adapted from the github repo: bckenstler/CLR

    +
    +
    Parameters
    +
      +
    • optimizer (Optimizer) – Wrapped optimizer.

    • +
    • base_lr (float or list) – Initial learning rate which is the +lower boundary in the cycle for each parameter group.

    • +
    • max_lr (float or list) – Upper learning rate boundaries in the cycle +for each parameter group. Functionally, +it defines the cycle amplitude (max_lr - base_lr). +The lr at any cycle is the sum of base_lr +and some scaling of the amplitude; therefore +max_lr may not actually be reached depending on +scaling function.

    • +
    • step_size_up (int) – Number of training iterations in the +increasing half of a cycle. Default: 2000

    • +
    • step_size_down (int) – Number of training iterations in the +decreasing half of a cycle. If step_size_down is None, +it is set to step_size_up. Default: None

    • +
    • mode (str) – One of {triangular, triangular2, exp_range}. +Values correspond to policies detailed above. +If scale_fn is not None, this argument is ignored. +Default: ‘triangular’

    • +
    • gamma (float) – Constant in ‘exp_range’ scaling function: +gamma**(cycle iterations) +Default: 1.0

    • +
    • scale_fn (function) – Custom scaling policy defined by a single +argument lambda function, where +0 <= scale_fn(x) <= 1 for all x >= 0. +If specified, then ‘mode’ is ignored. +Default: None

    • +
    • scale_mode (str) – {‘cycle’, ‘iterations’}. +Defines whether scale_fn is evaluated on +cycle number or cycle iterations (training +iterations since start of cycle). +Default: ‘cycle’

    • +
    • cycle_momentum (bool) – If True, momentum is cycled inversely +to learning rate between ‘base_momentum’ and ‘max_momentum’. +Default: True

    • +
    • base_momentum (float or list) – Lower momentum boundaries in the cycle +for each parameter group. Note that momentum is cycled inversely +to learning rate; at the peak of a cycle, momentum is +‘base_momentum’ and learning rate is ‘max_lr’. +Default: 0.8

    • +
    • max_momentum (float or list) – Upper momentum boundaries in the cycle +for each parameter group. Functionally, +it defines the cycle amplitude (max_momentum - base_momentum). +The momentum at any cycle is the difference of max_momentum +and some scaling of the amplitude; therefore +base_momentum may not actually be reached depending on +scaling function. Note that momentum is cycled inversely +to learning rate; at the start of a cycle, momentum is ‘max_momentum’ +and learning rate is ‘base_lr’ +Default: 0.9

    • +
    • last_epoch (int) – The index of the last batch. This parameter is used when +resuming a training job. Since step() should be invoked after each +batch instead of after each epoch, this number represents the total +number of batches computed, not the total number of epochs computed. +When last_epoch=-1, the schedule is started from the beginning. +Default: -1

    • +
    +
    +
    +

    Example

    +
    >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    +>>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
    +>>> data_loader = torch.utils.data.DataLoader(...)
    +>>> for epoch in range(10):
    +>>>     for batch in data_loader:
    +>>>         train_batch(...)
    +>>>         scheduler.step()
    +
    +
    +
    +
    +get_lr()[source]
    +

    Calculates the learning rate at batch index. This function treats +self.last_epoch as the last batch index.

    +

    If self.cycle_momentum is True, this function has a side effect of +updating the optimizer’s momentum.

    +
    + +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/py-modindex.html b/docs/stable/py-modindex.html new file mode 100644 index 000000000000..55a046675c06 --- /dev/null +++ b/docs/stable/py-modindex.html @@ -0,0 +1,631 @@ + + + + + + + + + + + + Python Module Index — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + + +

    Python Module Index

    + +
    + t +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
     
    + t
    + torch +
        + torch.__config__ +
        + torch.autograd +
        + torch.cuda +
        + torch.distributed +
        + torch.distributed.launch +
        + torch.distributions +
        + torch.distributions.constraint_registry +
        + torch.distributions.constraints +
        + torch.distributions.kl +
        + torch.distributions.transforms +
        + torch.hub +
        + torch.jit +
        + torch.multiprocessing +
        + torch.nn +
        + torch.onnx +
        + torch.optim +
        + torch.utils.data +
        + torch.utils.model_zoo +
    + torchvision +
        + torchvision.transforms.functional +
    + + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/search.html b/docs/stable/search.html new file mode 100644 index 000000000000..469850b702b2 --- /dev/null +++ b/docs/stable/search.html @@ -0,0 +1,527 @@ + + + + + + + + + + + + Search — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + + + + +
    + +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/searchindex.js b/docs/stable/searchindex.js new file mode 100644 index 000000000000..5ba2bc0613b6 --- /dev/null +++ b/docs/stable/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file diff --git a/docs/stable/sparse.html b/docs/stable/sparse.html new file mode 100644 index 000000000000..d2d694af9eb1 --- /dev/null +++ b/docs/stable/sparse.html @@ -0,0 +1,912 @@ + + + + + + + + + + + + torch.sparse — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.sparse

    +
    +

    Warning

    +

    This API is currently experimental and may change in the near future.

    +
    +

    Torch supports sparse tensors in COO(rdinate) format, which can +efficiently store and process tensors for which the majority of elements +are zeros.

    +

    A sparse tensor is represented as a pair of dense tensors: a tensor +of values and a 2D tensor of indices. A sparse tensor can be constructed +by providing these two tensors, as well as the size of the sparse tensor +(which cannot be inferred from these tensors!) Suppose we want to define +a sparse tensor with the entry 3 at location (0, 2), entry 4 at +location (1, 0), and entry 5 at location (1, 2). We would then write:

    +
    >>> i = torch.LongTensor([[0, 1, 1],
    +                          [2, 0, 2]])
    +>>> v = torch.FloatTensor([3, 4, 5])
    +>>> torch.sparse.FloatTensor(i, v, torch.Size([2,3])).to_dense()
    + 0  0  3
    + 4  0  5
    +[torch.FloatTensor of size 2x3]
    +
    +
    +

    Note that the input to LongTensor is NOT a list of index tuples. If you want +to write your indices this way, you should transpose before passing them to +the sparse constructor:

    +
    >>> i = torch.LongTensor([[0, 2], [1, 0], [1, 2]])
    +>>> v = torch.FloatTensor([3,      4,      5    ])
    +>>> torch.sparse.FloatTensor(i.t(), v, torch.Size([2,3])).to_dense()
    + 0  0  3
    + 4  0  5
    +[torch.FloatTensor of size 2x3]
    +
    +
    +

    You can also construct hybrid sparse tensors, where only the first n +dimensions are sparse, and the rest of the dimensions are dense.

    +
    >>> i = torch.LongTensor([[2, 4]])
    +>>> v = torch.FloatTensor([[1, 3], [5, 7]])
    +>>> torch.sparse.FloatTensor(i, v).to_dense()
    + 0  0
    + 0  0
    + 1  3
    + 0  0
    + 5  7
    +[torch.FloatTensor of size 5x2]
    +
    +
    +

    An empty sparse tensor can be constructed by specifying its size:

    +
    >>> torch.sparse.FloatTensor(2, 3)
    +SparseFloatTensor of size 2x3 with indices:
    +[torch.LongTensor with no dimension]
    +and values:
    +[torch.FloatTensor with no dimension]
    +
    +
    +
    +
    SparseTensor has the following invariants:
      +
    1. sparse_dim + dense_dim = len(SparseTensor.shape)

    2. +
    3. SparseTensor._indices().shape = (sparse_dim, nnz)

    4. +
    5. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])

    6. +
    +
    +
    +

    Since SparseTensor._indices() is always a 2D tensor, the smallest sparse_dim = 1. +Therefore, representation of a SparseTensor of sparse_dim = 0 is simply a dense tensor.

    +
    +

    Note

    +

    Our sparse tensor format permits uncoalesced sparse tensors, where +there may be duplicate coordinates in the indices; in this case, +the interpretation is that the value at that index is the sum of all +duplicate value entries. Uncoalesced tensors permit us to implement +certain operators more efficiently.

    +

    For the most part, you shouldn’t have to care whether or not a +sparse tensor is coalesced or not, as most operations will work +identically given a coalesced or uncoalesced sparse tensor. +However, there are two cases in which you may need to care.

    +

    First, if you repeatedly perform an operation that can produce +duplicate entries (e.g., torch.sparse.FloatTensor.add()), you +should occasionally coalesce your sparse tensors to prevent +them from growing too large.

    +

    Second, some operators will produce different values depending on +whether or not they are coalesced or not (e.g., +torch.sparse.FloatTensor._values() and +torch.sparse.FloatTensor._indices(), as well as +torch.Tensor.sparse_mask()). These operators are +prefixed by an underscore to indicate that they reveal internal +implementation details and should be used with care, since code +that works with coalesced sparse tensors may not work with +uncoalesced sparse tensors; generally speaking, it is safest +to explicitly coalesce before working with these operators.

    +

    For example, suppose that we wanted to implement an operator +by operating directly on torch.sparse.FloatTensor._values(). +Multiplication by a scalar can be implemented in the obvious way, +as multiplication distributes over addition; however, square root +cannot be implemented directly, since sqrt(a + b) != sqrt(a) + +sqrt(b) (which is what would be computed if you were given an +uncoalesced tensor.)

    +
    +
    +
    +class torch.sparse.FloatTensor
    +
    +
    +add()
    +
    + +
    +
    +add_()
    +
    + +
    +
    +clone()
    +
    + +
    +
    +dim()
    +
    + +
    +
    +div()
    +
    + +
    +
    +div_()
    +
    + +
    +
    +get_device()
    +
    + +
    +
    +hspmm()
    +
    + +
    +
    +mm()
    +
    + +
    +
    +mul()
    +
    + +
    +
    +mul_()
    +
    + +
    +
    +narrow_copy()
    +
    + +
    +
    +resizeAs_()
    +
    + +
    +
    +size()
    +
    + +
    +
    +spadd()
    +
    + +
    +
    +spmm()
    +
    + +
    +
    +sspaddmm()
    +
    + +
    +
    +sspmm()
    +
    + +
    +
    +sub()
    +
    + +
    +
    +sub_()
    +
    + +
    +
    +t_()
    +
    + +
    +
    +toDense()
    +
    + +
    +
    +transpose()
    +
    + +
    +
    +transpose_()
    +
    + +
    +
    +zero_()
    +
    + +
    +
    +coalesce()
    +
    + +
    +
    +is_coalesced()
    +
    + +
    +
    +_indices()
    +
    + +
    +
    +_values()
    +
    + +
    +
    +_nnz()
    +
    + +
    + +
    +

    Functions

    +
    +
    +torch.sparse.addmm(mat, mat1, mat2, beta=1, alpha=1)[source]
    +

    This function does exact same thing as torch.addmm() in the forward, +except that it supports backward for sparse matrix mat1. mat1 +need to have sparse_dim = 2. Note that the gradients of mat1 is a +coalesced sparse tensor.

    +
    +
    Parameters
    +
      +
    • mat (Tensor) – a dense matrix to be added

    • +
    • mat1 (SparseTensor) – a sparse matrix to be multiplied

    • +
    • mat2 (Tensor) – a dense matrix be multiplied

    • +
    • beta (Number, optional) – multiplier for mat (\(\beta\))

    • +
    • alpha (Number, optional) – multiplier for \(mat1 @ mat2\) (\(\alpha\))

    • +
    +
    +
    +
    + +
    +
    +torch.sparse.mm(mat1, mat2)[source]
    +

    Performs a matrix multiplication of the sparse matrix mat1 +and dense matrix mat2. Similar to torch.mm(), If mat1 is a +\((n \times m)\) tensor, mat2 is a \((m \times p)\) tensor, out will be a +\((n \times p)\) dense tensor. mat1 need to have sparse_dim = 2. +This function also supports backward for both matrices. Note that the gradients of +mat1 is a coalesced sparse tensor.

    +
    +
    Parameters
    +
      +
    • mat1 (SparseTensor) – the first sparse matrix to be multiplied

    • +
    • mat2 (Tensor) – the second dense matrix to be multiplied

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(2, 3).to_sparse().requires_grad_(True)
    +>>> a
    +tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
    +                       [0, 1, 2, 0, 1, 2]]),
    +       values=tensor([ 1.5901,  0.0183, -0.6146,  1.8061, -0.0112,  0.6302]),
    +       size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True)
    +
    +>>> b = torch.randn(3, 2, requires_grad=True)
    +>>> b
    +tensor([[-0.6479,  0.7874],
    +        [-1.2056,  0.5641],
    +        [-1.1716, -0.9923]], requires_grad=True)
    +
    +>>> y = torch.sparse.mm(a, b)
    +>>> y
    +tensor([[-0.3323,  1.8723],
    +        [-1.8951,  0.7904]], grad_fn=<SparseAddmmBackward>)
    +>>> y.sum().backward()
    +>>> a.grad
    +tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
    +                       [0, 1, 2, 0, 1, 2]]),
    +       values=tensor([ 0.1394, -0.6415, -2.1639,  0.1394, -0.6415, -2.1639]),
    +       size=(2, 3), nnz=6, layout=torch.sparse_coo)
    +
    +
    +
    + +
    +
    +torch.sparse.sum(input, dim=None, dtype=None)[source]
    +

    Returns the sum of each row of SparseTensor input in the given +dimensions dim. If dim is a list of dimensions, +reduce over all of them. When sum over all sparse_dim, this method +returns a Tensor instead of SparseTensor.

    +

    All summed dim are squeezed (see torch.squeeze()), resulting an output +tensor having dim fewer dimensions than input.

    +

    During backward, only gradients at nnz locations of input +will propagate back. Note that the gradients of input is coalesced.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input SparseTensor

    • +
    • dim (int or tuple of python:ints) – a dimension or a list of dimensions to reduce. Default: reduce +over all dims.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: dtype of input.

    • +
    +
    +
    +

    Example:

    +
    >>> nnz = 3
    +>>> dims = [5, 5, 2, 3]
    +>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
    +                   torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
    +>>> V = torch.randn(nnz, dims[2], dims[3])
    +>>> size = torch.Size(dims)
    +>>> S = torch.sparse_coo_tensor(I, V, size)
    +>>> S
    +tensor(indices=tensor([[2, 0, 3],
    +                       [2, 4, 1]]),
    +       values=tensor([[[-0.6438, -1.6467,  1.4004],
    +                       [ 0.3411,  0.0918, -0.2312]],
    +
    +                      [[ 0.5348,  0.0634, -2.0494],
    +                       [-0.7125, -1.0646,  2.1844]],
    +
    +                      [[ 0.1276,  0.1874, -0.6334],
    +                       [-1.9682, -0.5340,  0.7483]]]),
    +       size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo)
    +
    +# when sum over only part of sparse_dims, return a SparseTensor
    +>>> torch.sparse.sum(S, [1, 3])
    +tensor(indices=tensor([[0, 2, 3]]),
    +       values=tensor([[-1.4512,  0.4073],
    +                      [-0.8901,  0.2017],
    +                      [-0.3183, -1.7539]]),
    +       size=(5, 2), nnz=3, layout=torch.sparse_coo)
    +
    +# when sum over all sparse dim, return a dense Tensor
    +# with summed dims squeezed
    +>>> torch.sparse.sum(S, [0, 1, 3])
    +tensor([-2.6596, -1.1450])
    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/storage.html b/docs/stable/storage.html new file mode 100644 index 000000000000..6e6d3de52776 --- /dev/null +++ b/docs/stable/storage.html @@ -0,0 +1,765 @@ + + + + + + + + + + + + torch.Storage — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.Storage

    +

    A torch.Storage is a contiguous, one-dimensional array of a single +data type.

    +

    Every torch.Tensor has a corresponding storage of the same data type.

    +
    +
    +class torch.FloatStorage[source]
    +
    +
    +bfloat16()
    +

    Casts this storage to bfloat16 type

    +
    + +
    +
    +bool()
    +

    Casts this storage to bool type

    +
    + +
    +
    +byte()
    +

    Casts this storage to byte type

    +
    + +
    +
    +char()
    +

    Casts this storage to char type

    +
    + +
    +
    +clone()
    +

    Returns a copy of this storage

    +
    + +
    +
    +copy_()
    +
    + +
    +
    +cpu()
    +

    Returns a CPU copy of this storage if it’s not already on the CPU

    +
    + +
    +
    +cuda(device=None, non_blocking=False, **kwargs)
    +

    Returns a copy of this object in CUDA memory.

    +

    If this object is already in CUDA memory and on the correct device, then +no copy is performed and the original object is returned.

    +
    +
    Parameters
    +
      +
    • device (int) – The destination GPU id. Defaults to the current device.

    • +
    • non_blocking (bool) – If True and the source is in pinned memory, +the copy will be asynchronous with respect to the host. Otherwise, +the argument has no effect.

    • +
    • **kwargs – For compatibility, may contain the key async in place of +the non_blocking argument.

    • +
    +
    +
    +
    + +
    +
    +data_ptr()
    +
    + +
    +
    +device
    +
    + +
    +
    +double()
    +

    Casts this storage to double type

    +
    + +
    +
    +dtype
    +
    + +
    +
    +element_size()
    +
    + +
    +
    +fill_()
    +
    + +
    +
    +float()
    +

    Casts this storage to float type

    +
    + +
    +
    +static from_buffer()
    +
    + +
    +
    +static from_file(filename, shared=False, size=0) → Storage
    +

    If shared is True, then memory is shared between all processes. +All changes are written to the file. If shared is False, then the changes on +the storage do not affect the file.

    +

    size is the number of elements in the storage. If shared is False, +then the file must contain at least size * sizeof(Type) bytes +(Type is the type of storage). If shared is True the file will be +created if needed.

    +
    +
    Parameters
    +
      +
    • filename (str) – file name to map

    • +
    • shared (bool) – whether to share memory

    • +
    • size (int) – number of elements in the storage

    • +
    +
    +
    +
    + +
    +
    +half()
    +

    Casts this storage to half type

    +
    + +
    +
    +int()
    +

    Casts this storage to int type

    +
    + +
    +
    +is_cuda = False
    +
    + +
    +
    +is_pinned()
    +
    + +
    +
    +is_shared()
    +
    + +
    +
    +is_sparse = False
    +
    + +
    +
    +long()
    +

    Casts this storage to long type

    +
    + +
    +
    +new()
    +
    + +
    +
    +pin_memory()
    +

    Copies the storage to pinned memory, if it’s not already pinned.

    +
    + +
    +
    +resize_()
    +
    + +
    +
    +share_memory_()
    +

    Moves the storage to shared memory.

    +

    This is a no-op for storages already in shared memory and for CUDA +storages, which do not need to be moved for sharing across processes. +Storages in shared memory cannot be resized.

    +

    Returns: self

    +
    + +
    +
    +short()
    +

    Casts this storage to short type

    +
    + +
    +
    +size()
    +
    + +
    +
    +tolist()
    +

    Returns a list containing the elements of this storage

    +
    + +
    +
    +type(dtype=None, non_blocking=False, **kwargs)
    +

    Returns the type if dtype is not provided, else casts this object to +the specified type.

    +

    If this is already of the correct type, no copy is performed and the +original object is returned.

    +
    +
    Parameters
    +
      +
    • dtype (type or string) – The desired type

    • +
    • non_blocking (bool) – If True, and the source is in pinned memory +and destination is on the GPU or vice versa, the copy is performed +asynchronously with respect to the host. Otherwise, the argument +has no effect.

    • +
    • **kwargs – For compatibility, may contain the key async in place of +the non_blocking argument. The async arg is deprecated.

    • +
    +
    +
    +
    + +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/tensor_attributes.html b/docs/stable/tensor_attributes.html new file mode 100644 index 000000000000..b12f7afd7b7d --- /dev/null +++ b/docs/stable/tensor_attributes.html @@ -0,0 +1,698 @@ + + + + + + + + + + + + Tensor Attributes — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • Tensor Attributes
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Tensor Attributes

    +

    Each torch.Tensor has a torch.dtype, torch.device, and torch.layout.

    +
    +

    torch.dtype

    +
    +
    +class torch.dtype
    +
    + +

    A torch.dtype is an object that represents the data type of a +torch.Tensor. PyTorch has nine different data types:

    + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Data type

    dtype

    Tensor types

    32-bit floating point

    torch.float32 or torch.float

    torch.*.FloatTensor

    64-bit floating point

    torch.float64 or torch.double

    torch.*.DoubleTensor

    16-bit floating point

    torch.float16 or torch.half

    torch.*.HalfTensor

    8-bit integer (unsigned)

    torch.uint8

    torch.*.ByteTensor

    8-bit integer (signed)

    torch.int8

    torch.*.CharTensor

    16-bit integer (signed)

    torch.int16 or torch.short

    torch.*.ShortTensor

    32-bit integer (signed)

    torch.int32 or torch.int

    torch.*.IntTensor

    64-bit integer (signed)

    torch.int64 or torch.long

    torch.*.LongTensor

    Boolean

    torch.bool

    torch.*.BoolTensor

    +

    To find out if a torch.dtype is a floating point data type, the property is_floating_point +can be used, which returns True if the data type is a floating point data type.

    +
    +
    +

    torch.device

    +
    +
    +class torch.device
    +
    + +

    A torch.device is an object representing the device on which a torch.Tensor is +or will be allocated.

    +

    The torch.device contains a device type ('cpu' or 'cuda') and optional device +ordinal for the device type. If the device ordinal is not present, this object will always represent +the current device for the device type, even after torch.cuda.set_device() is called; e.g., +a torch.Tensor constructed with device 'cuda' is equivalent to 'cuda:X' where X is +the result of torch.cuda.current_device().

    +

    A torch.Tensor’s device can be accessed via the Tensor.device property.

    +

    A torch.device can be constructed via a string or via a string and device ordinal

    +

    Via a string:

    +
    >>> torch.device('cuda:0')
    +device(type='cuda', index=0)
    +
    +>>> torch.device('cpu')
    +device(type='cpu')
    +
    +>>> torch.device('cuda')  # current cuda device
    +device(type='cuda')
    +
    +
    +

    Via a string and device ordinal:

    +
    >>> torch.device('cuda', 0)
    +device(type='cuda', index=0)
    +
    +>>> torch.device('cpu', 0)
    +device(type='cpu', index=0)
    +
    +
    +
    +

    Note

    +

    The torch.device argument in functions can generally be substituted with a string. +This allows for fast prototyping of code.

    +
    >>> # Example of a function that takes in a torch.device
    +>>> cuda1 = torch.device('cuda:1')
    +>>> torch.randn((2,3), device=cuda1)
    +
    +
    +
    >>> # You can substitute the torch.device with a string
    +>>> torch.randn((2,3), device='cuda:1')
    +
    +
    +
    +
    +

    Note

    +

    For legacy reasons, a device can be constructed via a single device ordinal, which is treated +as a cuda device. This matches Tensor.get_device(), which returns an ordinal for cuda +tensors and is not supported for cpu tensors.

    +
    >>> torch.device(1)
    +device(type='cuda', index=1)
    +
    +
    +
    +
    +

    Note

    +

    Methods which take a device will generally accept a (properly formatted) string +or (legacy) integer device ordinal, i.e. the following are all equivalent:

    +
    >>> torch.randn((2,3), device=torch.device('cuda:1'))
    +>>> torch.randn((2,3), device='cuda:1')
    +>>> torch.randn((2,3), device=1)  # legacy
    +
    +
    +
    +
    +
    +

    torch.layout

    +
    +
    +class torch.layout
    +
    + +

    A torch.layout is an object that represents the memory layout of a +torch.Tensor. Currently, we support torch.strided (dense Tensors) +and have experimental support for torch.sparse_coo (sparse COO Tensors).

    +

    torch.strided represents dense Tensors and is the memory layout that +is most commonly used. Each strided tensor has an associated +torch.Storage, which holds its data. These tensors provide +multi-dimensional, strided +view of a storage. Strides are a list of integers: the k-th stride +represents the jump in the memory necessary to go from one element to the +next one in the k-th dimension of the Tensor. This concept makes it possible +to perform many tensor operations efficiently.

    +

    Example:

    +
    >>> x = torch.Tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
    +>>> x.stride()
    +(5, 1)
    +
    +>>> x.t().stride()
    +(1, 5)
    +
    +
    +

    For more information on torch.sparse_coo tensors, see torch.sparse.

    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/tensorboard.html b/docs/stable/tensorboard.html new file mode 100644 index 000000000000..fdf7a6d63eff --- /dev/null +++ b/docs/stable/tensorboard.html @@ -0,0 +1,1101 @@ + + + + + + + + + + + + torch.utils.tensorboard — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + +
      + +
    • + + + Docs + + > +
    • + + +
    • torch.utils.tensorboard
    • + + +
    • + + + + + +
    • + +
    + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.utils.tensorboard

    +

    Before going further, more details on TensorBoard can be found at +https://www.tensorflow.org/tensorboard/

    +

    Once you’ve installed TensorBoard, these utilities let you log PyTorch models +and metrics into a directory for visualization within the TensorBoard UI. +Scalars, images, histograms, graphs, and embedding visualizations are all +supported for PyTorch models and tensors as well as Caffe2 nets and blobs.

    +

    The SummaryWriter class is your main entry to log data for consumption +and visualization by TensorBoard. For example:

    +
    import torch
    +import torchvision
    +from torch.utils.tensorboard import SummaryWriter
    +from torchvision import datasets, transforms
    +
    +# Writer will output to ./runs/ directory by default
    +writer = SummaryWriter()
    +
    +transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
    +trainset = datasets.MNIST('mnist_train', train=True, download=True, transform=transform)
    +trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
    +model = torchvision.models.resnet50(False)
    +# Have ResNet model take in grayscale rather than RGB
    +model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
    +images, labels = next(iter(trainloader))
    +
    +grid = torchvision.utils.make_grid(images)
    +writer.add_image('images', grid, 0)
    +writer.add_graph(model, images)
    +writer.close()
    +
    +
    +

    This can then be visualized with TensorBoard, which should be installable +and runnable with:

    +
    pip install tb-nightly  # Until 1.14 moves to the release channel
    +tensorboard --logdir=runs
    +
    +
    +

    Lots of information can be logged for one experiment. To avoid cluttering +the UI and have better result clustering, we can group plots by naming them +hierarchically. For example, “Loss/train” and “Loss/test” will be grouped +together, while “Accuracy/train” and “Accuracy/test” will be grouped separately +in the TensorBoard interface.

    +
    from torch.utils.tensorboard import SummaryWriter
    +import numpy as np
    +
    +writer = SummaryWriter()
    +
    +for n_iter in range(100):
    +    writer.add_scalar('Loss/train', np.random.random(), n_iter)
    +    writer.add_scalar('Loss/test', np.random.random(), n_iter)
    +    writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
    +    writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
    +
    +
    +

    Expected result:

    +_images/hier_tags.png +
    +

    +

    +
    +
    +
    +class torch.utils.tensorboard.writer.SummaryWriter(log_dir=None, comment='', purge_step=None, max_queue=10, flush_secs=120, filename_suffix='')[source]
    +

    Writes entries directly to event files in the log_dir to be +consumed by TensorBoard.

    +

    The SummaryWriter class provides a high-level API to create an event file +in a given directory and add summaries and events to it. The class updates the +file contents asynchronously. This allows a training program to call methods +to add data to the file directly from the training loop, without slowing down +training.

    +
    +
    +__init__(log_dir=None, comment='', purge_step=None, max_queue=10, flush_secs=120, filename_suffix='')[source]
    +

    Creates a SummaryWriter that will write out events and summaries +to the event file.

    +
    +
    Parameters
    +
      +
    • log_dir (string) – Save directory location. Default is +runs/CURRENT_DATETIME_HOSTNAME, which changes after each run. +Use hierarchical folder structure to compare +between runs easily. e.g. pass in ‘runs/exp1’, ‘runs/exp2’, etc. +for each new experiment to compare across them.

    • +
    • comment (string) – Comment log_dir suffix appended to the default +log_dir. If log_dir is assigned, this argument has no effect.

    • +
    • purge_step (int) – When logging crashes at step \(T+X\) and restarts at step \(T\), +any events whose global_step larger or equal to \(T\) will be +purged and hidden from TensorBoard. +Note that crashed and resumed experiments should have the same log_dir.

    • +
    • max_queue (int) – Size of the queue for pending events and +summaries before one of the ‘add’ calls forces a flush to disk. +Default is ten items.

    • +
    • flush_secs (int) – How often, in seconds, to flush the +pending events and summaries to disk. Default is every two minutes.

    • +
    • filename_suffix (string) – Suffix added to all event filenames in +the log_dir directory. More details on filename construction in +tensorboard.summary.writer.event_file_writer.EventFileWriter.

    • +
    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +
    +# create a summary writer with automatically generated folder name.
    +writer = SummaryWriter()
    +# folder location: runs/May04_22-14-54_s-MacBook-Pro.local/
    +
    +# create a summary writer using the specified folder name.
    +writer = SummaryWriter("my_experiment")
    +# folder location: my_experiment
    +
    +# create a summary writer with comment appended.
    +writer = SummaryWriter(comment="LR_0.1_BATCH_16")
    +# folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/
    +
    +
    +
    + +
    +
    +add_scalar(tag, scalar_value, global_step=None, walltime=None)[source]
    +

    Add scalar data to summary.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • scalar_value (float or string/blobname) – Value to save

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +with seconds after epoch of event

    • +
    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +writer = SummaryWriter()
    +x = range(100)
    +for i in x:
    +    writer.add_scalar('y=2x', i * 2, i)
    +writer.close()
    +
    +
    +

    Expected result:

    +_images/add_scalar.png +
    + +
    +
    +add_scalars(main_tag, tag_scalar_dict, global_step=None, walltime=None)[source]
    +

    Adds many scalar data to summary.

    +

    Note that this function also keeps logged scalars in memory. In extreme case it explodes your RAM.

    +
    +
    Parameters
    +
      +
    • main_tag (string) – The parent name for the tags

    • +
    • tag_scalar_dict (dict) – Key-value pair storing the tag and corresponding values

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +writer = SummaryWriter()
    +r = 5
    +for i in range(100):
    +    writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r),
    +                                    'xcosx':i*np.cos(i/r),
    +                                    'tanx': np.tan(i/r)}, i)
    +writer.close()
    +# This call adds three values to the same scalar plot with the tag
    +# 'run_14h' in TensorBoard's scalar section.
    +
    +
    +

    Expected result:

    +_images/add_scalars.png +
    + +
    +
    +add_histogram(tag, values, global_step=None, bins='tensorflow', walltime=None, max_bins=None)[source]
    +

    Add histogram to summary.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • values (torch.Tensor, numpy.array, or string/blobname) – Values to build histogram

    • +
    • global_step (int) – Global step value to record

    • +
    • bins (string) – One of {‘tensorflow’,’auto’, ‘fd’, …}. This determines how the bins are made. You can find +other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +import numpy as np
    +writer = SummaryWriter()
    +for i in range(10):
    +    x = np.random.random(1000)
    +    writer.add_histogram('distribution centers', x + i, i)
    +writer.close()
    +
    +
    +

    Expected result:

    +_images/add_histogram.png +
    + +
    +
    +add_image(tag, img_tensor, global_step=None, walltime=None, dataformats='CHW')[source]
    +

    Add image data to summary.

    +

    Note that this requires the pillow package.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • img_tensor (torch.Tensor, numpy.array, or string/blobname) – Image data

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +
    +
    Shape:

    img_tensor: Default is \((3, H, W)\). You can use torchvision.utils.make_grid() to +convert a batch of tensor into 3xHxW format or call add_images and let us do the job. +Tensor with \((1, H, W)\), \((H, W)\), \((H, W, 3)\) is also suitible as long as +corresponding dataformats argument is passed. e.g. CHW, HWC, HW.

    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +import numpy as np
    +img = np.zeros((3, 100, 100))
    +img[0] = np.arange(0, 10000).reshape(100, 100) / 10000
    +img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
    +
    +img_HWC = np.zeros((100, 100, 3))
    +img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000
    +img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
    +
    +writer = SummaryWriter()
    +writer.add_image('my_image', img, 0)
    +
    +# If you have non-default dimension setting, set the dataformats argument.
    +writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC')
    +writer.close()
    +
    +
    +

    Expected result:

    +_images/add_image.png +
    + +
    +
    +add_images(tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW')[source]
    +

    Add batched image data to summary.

    +

    Note that this requires the pillow package.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • img_tensor (torch.Tensor, numpy.array, or string/blobname) – Image data

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    • dataformats (string) – Image data format specification of the form +NCHW, NHWC, CHW, HWC, HW, WH, etc.

    • +
    +
    +
    +
    +
    Shape:

    img_tensor: Default is \((N, 3, H, W)\). If dataformats is specified, other shape will be +accepted. e.g. NCHW or NHWC.

    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +import numpy as np
    +
    +img_batch = np.zeros((16, 3, 100, 100))
    +for i in range(16):
    +    img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
    +    img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i
    +
    +writer = SummaryWriter()
    +writer.add_images('my_image_batch', img_batch, 0)
    +writer.close()
    +
    +
    +

    Expected result:

    +_images/add_images.png +
    + +
    +
    +add_figure(tag, figure, global_step=None, close=True, walltime=None)[source]
    +

    Render matplotlib figure into an image and add it to summary.

    +

    Note that this requires the matplotlib package.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • figure (matplotlib.pyplot.figure) – Figure or a list of figures

    • +
    • global_step (int) – Global step value to record

    • +
    • close (bool) – Flag to automatically close the figure

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +
    + +
    +
    +add_video(tag, vid_tensor, global_step=None, fps=4, walltime=None)[source]
    +

    Add video data to summary.

    +

    Note that this requires the moviepy package.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • vid_tensor (torch.Tensor) – Video data

    • +
    • global_step (int) – Global step value to record

    • +
    • fps (float or int) – Frames per second

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +
    +
    Shape:

    vid_tensor: \((N, T, C, H, W)\). The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    +
    +
    +
    + +
    +
    +add_audio(tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None)[source]
    +

    Add audio data to summary.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • snd_tensor (torch.Tensor) – Sound data

    • +
    • global_step (int) – Global step value to record

    • +
    • sample_rate (int) – sample rate in Hz

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +
    +
    Shape:

    snd_tensor: \((1, L)\). The values should lie between [-1, 1].

    +
    +
    +
    + +
    +
    +add_text(tag, text_string, global_step=None, walltime=None)[source]
    +

    Add text data to summary.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • text_string (string) – String to save

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +

    Examples:

    +
    writer.add_text('lstm', 'This is an lstm', 0)
    +writer.add_text('rnn', 'This is an rnn', 10)
    +
    +
    +
    + +
    +
    +add_graph(model, input_to_model=None, verbose=False)[source]
    +

    Add graph data to summary.

    +
    +
    Parameters
    +
      +
    • model (torch.nn.Module) – Model to draw.

    • +
    • input_to_model (torch.Tensor or list of torch.Tensor) – A variable or a tuple of +variables to be fed.

    • +
    • verbose (bool) – Whether to print graph structure in console.

    • +
    +
    +
    +
    + +
    +
    +add_embedding(mat, metadata=None, label_img=None, global_step=None, tag='default', metadata_header=None)[source]
    +

    Add embedding projector data to summary.

    +
    +
    Parameters
    +
      +
    • mat (torch.Tensor or numpy.array) – A matrix which each row is the feature vector of the data point

    • +
    • metadata (list) – A list of labels, each element will be convert to string

    • +
    • label_img (torch.Tensor) – Images correspond to each data point

    • +
    • global_step (int) – Global step value to record

    • +
    • tag (string) – Name for the embedding

    • +
    +
    +
    +
    +
    Shape:

    mat: \((N, D)\), where N is number of data and D is feature dimension

    +

    label_img: \((N, C, H, W)\)

    +
    +
    +

    Examples:

    +
    import keyword
    +import torch
    +meta = []
    +while len(meta)<100:
    +    meta = meta+keyword.kwlist # get some strings
    +meta = meta[:100]
    +
    +for i, v in enumerate(meta):
    +    meta[i] = v+str(i)
    +
    +label_img = torch.rand(100, 3, 10, 32)
    +for i in range(100):
    +    label_img[i]*=i/100.0
    +
    +writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img)
    +writer.add_embedding(torch.randn(100, 5), label_img=label_img)
    +writer.add_embedding(torch.randn(100, 5), metadata=meta)
    +
    +
    +
    + +
    +
    +add_pr_curve(tag, labels, predictions, global_step=None, num_thresholds=127, weights=None, walltime=None)[source]
    +

    Adds precision recall curve. +Plotting a precision-recall curve lets you understand your model’s +performance under different threshold settings. With this function, +you provide the ground truth labeling (T/F) and prediction confidence +(usually the output of your model) for each target. The TensorBoard UI +will let you choose the threshold interactively.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • labels (torch.Tensor, numpy.array, or string/blobname) – Ground truth data. Binary label for each element.

    • +
    • predictions (torch.Tensor, numpy.array, or string/blobname) – The probability that an element be classified as true. +Value should in [0, 1]

    • +
    • global_step (int) – Global step value to record

    • +
    • num_thresholds (int) – Number of thresholds used to draw the curve.

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +import numpy as np
    +labels = np.random.randint(2, size=100)  # binary label
    +predictions = np.random.rand(100)
    +writer = SummaryWriter()
    +writer.add_pr_curve('pr_curve', labels, predictions, 0)
    +writer.close()
    +
    +
    +
    + +
    +
    +add_custom_scalars(layout)[source]
    +

    Create special chart by collecting charts tags in ‘scalars’. Note that this function can only be called once +for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called +before or after the training loop.

    +
    +
    Parameters
    +

    layout (dict) – {categoryName: charts}, where charts is also a dictionary +{chartName: ListOfProperties}. The first element in ListOfProperties is the chart’s type +(one of Multiline or Margin) and the second element should be a list containing the tags +you have used in add_scalar function, which will be collected into the new chart.

    +
    +
    +

    Examples:

    +
    layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]},
    +             'USA':{ 'dow':['Margin',   ['dow/aaa', 'dow/bbb', 'dow/ccc']],
    +                  'nasdaq':['Margin',   ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}}
    +
    +writer.add_custom_scalars(layout)
    +
    +
    +
    + +
    +
    +add_mesh(tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None)[source]
    +

    Add meshes or 3D point clouds to TensorBoard. The visualization is based on Three.js, +so it allows users to interact with the rendered object. Besides the basic definitions +such as vertices, faces, users can further provide camera parameter, lighting condition, etc. +Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for +advanced usage. Note that currently this depends on tb-nightly to show.

    +
    +
    Parameters
    +
      +
    • tag (string) – Data identifier

    • +
    • vertices (torch.Tensor) – List of the 3D coordinates of vertices.

    • +
    • colors (torch.Tensor) – Colors for each vertex

    • +
    • faces (torch.Tensor) – Indices of vertices within each triangle. (Optional)

    • +
    • config_dict – Dictionary with ThreeJS classes names and configuration.

    • +
    • global_step (int) – Global step value to record

    • +
    • walltime (float) – Optional override default walltime (time.time()) +seconds after epoch of event

    • +
    +
    +
    +
    +
    Shape:

    vertices: \((B, N, 3)\). (batch, number_of_vertices, channels)

    +

    colors: \((B, N, 3)\). The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    +

    faces: \((B, N, 3)\). The values should lie in [0, number_of_vertices] for type uint8.

    +
    +
    +

    Examples:

    +
    from torch.utils.tensorboard import SummaryWriter
    +vertices_tensor = torch.as_tensor([
    +    [1, 1, 1],
    +    [-1, -1, 1],
    +    [1, -1, -1],
    +    [-1, 1, -1],
    +], dtype=torch.float).unsqueeze(0)
    +colors_tensor = torch.as_tensor([
    +    [255, 0, 0],
    +    [0, 255, 0],
    +    [0, 0, 255],
    +    [255, 0, 255],
    +], dtype=torch.int).unsqueeze(0)
    +faces_tensor = torch.as_tensor([
    +    [0, 2, 3],
    +    [0, 3, 1],
    +    [0, 1, 2],
    +    [1, 3, 2],
    +], dtype=torch.int).unsqueeze(0)
    +
    +writer = SummaryWriter()
    +writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor)
    +
    +writer.close()
    +
    +
    +
    + +
    +
    +flush()[source]
    +

    Flushes the event file to disk. +Call this method to make sure that all pending events have been written to +disk.

    +
    + +
    +
    +close()[source]
    +
    + +
    + +
    + + +
    + +
    + + +
    +
    + +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/tensors.html b/docs/stable/tensors.html new file mode 100644 index 000000000000..b6943078004b --- /dev/null +++ b/docs/stable/tensors.html @@ -0,0 +1,3923 @@ + + + + + + + + + + + + torch.Tensor — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.Tensor

    +

    A torch.Tensor is a multi-dimensional matrix containing elements of +a single data type.

    +

    Torch defines nine CPU tensor types and nine GPU tensor types:

    + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Data type

    dtype

    CPU tensor

    GPU tensor

    32-bit floating point

    torch.float32 or torch.float

    torch.FloatTensor

    torch.cuda.FloatTensor

    64-bit floating point

    torch.float64 or torch.double

    torch.DoubleTensor

    torch.cuda.DoubleTensor

    16-bit floating point

    torch.float16 or torch.half

    torch.HalfTensor

    torch.cuda.HalfTensor

    8-bit integer (unsigned)

    torch.uint8

    torch.ByteTensor

    torch.cuda.ByteTensor

    8-bit integer (signed)

    torch.int8

    torch.CharTensor

    torch.cuda.CharTensor

    16-bit integer (signed)

    torch.int16 or torch.short

    torch.ShortTensor

    torch.cuda.ShortTensor

    32-bit integer (signed)

    torch.int32 or torch.int

    torch.IntTensor

    torch.cuda.IntTensor

    64-bit integer (signed)

    torch.int64 or torch.long

    torch.LongTensor

    torch.cuda.LongTensor

    Boolean

    torch.bool

    torch.BoolTensor

    torch.cuda.BoolTensor

    +

    torch.Tensor is an alias for the default tensor type (torch.FloatTensor).

    +

    A tensor can be constructed from a Python list or sequence using the +torch.tensor() constructor:

    +
    >>> torch.tensor([[1., -1.], [1., -1.]])
    +tensor([[ 1.0000, -1.0000],
    +        [ 1.0000, -1.0000]])
    +>>> torch.tensor(np.array([[1, 2, 3], [4, 5, 6]]))
    +tensor([[ 1,  2,  3],
    +        [ 4,  5,  6]])
    +
    +
    +
    +

    Warning

    +

    torch.tensor() always copies data. If you have a Tensor +data and just want to change its requires_grad flag, use +requires_grad_() or +detach() to avoid a copy. +If you have a numpy array and want to avoid a copy, use +torch.as_tensor().

    +
    +

    A tensor of specific data type can be constructed by passing a +torch.dtype and/or a torch.device to a +constructor or tensor creation op:

    +
    >>> torch.zeros([2, 4], dtype=torch.int32)
    +tensor([[ 0,  0,  0,  0],
    +        [ 0,  0,  0,  0]], dtype=torch.int32)
    +>>> cuda0 = torch.device('cuda:0')
    +>>> torch.ones([2, 4], dtype=torch.float64, device=cuda0)
    +tensor([[ 1.0000,  1.0000,  1.0000,  1.0000],
    +        [ 1.0000,  1.0000,  1.0000,  1.0000]], dtype=torch.float64, device='cuda:0')
    +
    +
    +

    The contents of a tensor can be accessed and modified using Python’s indexing +and slicing notation:

    +
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6]])
    +>>> print(x[1][2])
    +tensor(6)
    +>>> x[0][1] = 8
    +>>> print(x)
    +tensor([[ 1,  8,  3],
    +        [ 4,  5,  6]])
    +
    +
    +

    Use torch.Tensor.item() to get a Python number from a tensor containing a +single value:

    +
    >>> x = torch.tensor([[1]])
    +>>> x
    +tensor([[ 1]])
    +>>> x.item()
    +1
    +>>> x = torch.tensor(2.5)
    +>>> x
    +tensor(2.5000)
    +>>> x.item()
    +2.5
    +
    +
    +

    A tensor can be created with requires_grad=True so that +torch.autograd records operations on them for automatic differentiation.

    +
    >>> x = torch.tensor([[1., -1.], [1., 1.]], requires_grad=True)
    +>>> out = x.pow(2).sum()
    +>>> out.backward()
    +>>> x.grad
    +tensor([[ 2.0000, -2.0000],
    +        [ 2.0000,  2.0000]])
    +
    +
    +

    Each tensor has an associated torch.Storage, which holds its data. +The tensor class provides multi-dimensional, strided +view of a storage and defines numeric operations on it.

    +
    +

    Note

    +

    For more information on the torch.dtype, torch.device, and +torch.layout attributes of a torch.Tensor, see +Tensor Attributes.

    +
    +
    +

    Note

    +

    Methods which mutate a tensor are marked with an underscore suffix. +For example, torch.FloatTensor.abs_() computes the absolute value +in-place and returns the modified tensor, while torch.FloatTensor.abs() +computes the result in a new tensor.

    +
    +
    +

    Note

    +

    To change an existing tensor’s torch.device and/or torch.dtype, consider using +to() method on the tensor.

    +
    +
    +

    Warning

    +

    Current implementation of torch.Tensor introduces memory overhead, +thus it might lead to unexpectedly high memory usage in the applications with many tiny tensors. +If this is your case, consider using one large structure.

    +
    +
    +
    +class torch.Tensor
    +

    There are a few main ways to create a tensor, depending on your use case.

    +
      +
    • To create a tensor with pre-existing data, use torch.tensor().

    • +
    • To create a tensor with specific size, use torch.* tensor creation +ops (see Creation Ops).

    • +
    • To create a tensor with the same size (and similar types) as another tensor, +use torch.*_like tensor creation ops +(see Creation Ops).

    • +
    • To create a tensor with similar type but different size as another tensor, +use tensor.new_* creation ops.

    • +
    +
    +
    +new_tensor(data, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Returns a new Tensor with data as the tensor data. +By default, the returned Tensor has the same torch.dtype and +torch.device as this tensor.

    +
    +

    Warning

    +

    new_tensor() always copies data. If you have a Tensor +data and want to avoid a copy, use torch.Tensor.requires_grad_() +or torch.Tensor.detach(). +If you have a numpy array and want to avoid a copy, use +torch.from_numpy().

    +
    +
    +

    Warning

    +

    When data is a tensor x, new_tensor() reads out ‘the data’ from whatever it is passed, +and constructs a leaf variable. Therefore tensor.new_tensor(x) is equivalent to x.clone().detach() +and tensor.new_tensor(x, requires_grad=True) is equivalent to x.clone().detach().requires_grad_(True). +The equivalents using clone() and detach() are recommended.

    +
    +
    +
    Parameters
    +
      +
    • data (array_like) – The returned Tensor copies data.

    • +
    • dtype (torch.dtype, optional) – the desired type of returned tensor. +Default: if None, same torch.dtype as this tensor.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, same torch.device as this tensor.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> tensor = torch.ones((2,), dtype=torch.int8)
    +>>> data = [[0, 1], [2, 3]]
    +>>> tensor.new_tensor(data)
    +tensor([[ 0,  1],
    +        [ 2,  3]], dtype=torch.int8)
    +
    +
    +
    + +
    +
    +new_full(size, fill_value, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Returns a Tensor of size size filled with fill_value. +By default, the returned Tensor has the same torch.dtype and +torch.device as this tensor.

    +
    +
    Parameters
    +
      +
    • fill_value (scalar) – the number to fill the output tensor with.

    • +
    • dtype (torch.dtype, optional) – the desired type of returned tensor. +Default: if None, same torch.dtype as this tensor.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, same torch.device as this tensor.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> tensor = torch.ones((2,), dtype=torch.float64)
    +>>> tensor.new_full((3, 4), 3.141592)
    +tensor([[ 3.1416,  3.1416,  3.1416,  3.1416],
    +        [ 3.1416,  3.1416,  3.1416,  3.1416],
    +        [ 3.1416,  3.1416,  3.1416,  3.1416]], dtype=torch.float64)
    +
    +
    +
    + +
    +
    +new_empty(size, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Returns a Tensor of size size filled with uninitialized data. +By default, the returned Tensor has the same torch.dtype and +torch.device as this tensor.

    +
    +
    Parameters
    +
      +
    • dtype (torch.dtype, optional) – the desired type of returned tensor. +Default: if None, same torch.dtype as this tensor.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, same torch.device as this tensor.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> tensor = torch.ones(())
    +>>> tensor.new_empty((2, 3))
    +tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
    +        [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])
    +
    +
    +
    + +
    +
    +new_ones(size, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Returns a Tensor of size size filled with 1. +By default, the returned Tensor has the same torch.dtype and +torch.device as this tensor.

    +
    +
    Parameters
    +
      +
    • size (int...) – a list, tuple, or torch.Size of integers defining the +shape of the output tensor.

    • +
    • dtype (torch.dtype, optional) – the desired type of returned tensor. +Default: if None, same torch.dtype as this tensor.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, same torch.device as this tensor.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> tensor = torch.tensor((), dtype=torch.int32)
    +>>> tensor.new_ones((2, 3))
    +tensor([[ 1,  1,  1],
    +        [ 1,  1,  1]], dtype=torch.int32)
    +
    +
    +
    + +
    +
    +new_zeros(size, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Returns a Tensor of size size filled with 0. +By default, the returned Tensor has the same torch.dtype and +torch.device as this tensor.

    +
    +
    Parameters
    +
      +
    • size (int...) – a list, tuple, or torch.Size of integers defining the +shape of the output tensor.

    • +
    • dtype (torch.dtype, optional) – the desired type of returned tensor. +Default: if None, same torch.dtype as this tensor.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, same torch.device as this tensor.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> tensor = torch.tensor((), dtype=torch.float64)
    +>>> tensor.new_zeros((2, 3))
    +tensor([[ 0.,  0.,  0.],
    +        [ 0.,  0.,  0.]], dtype=torch.float64)
    +
    +
    +
    + +
    +
    +is_cuda
    +

    Is True if the Tensor is stored on the GPU, False otherwise.

    +
    + +
    +
    +device
    +

    Is the torch.device where this Tensor is.

    +
    + +
    +
    +grad
    +

    This attribute is None by default and becomes a Tensor the first time a call to +backward() computes gradients for self. +The attribute will then contain the gradients computed and future calls to +backward() will accumulate (add) gradients into it.

    +
    + +
    +
    +ndim
    +

    Alias for dim()

    +
    + +
    +
    +T
    +

    Is this Tensor with its dimensions reversed.

    +

    If n is the number of dimensions in x, +x.T is equivalent to x.permute(n-1, n-2, ..., 0).

    +
    + +
    +
    +abs() → Tensor
    +

    See torch.abs()

    +
    + +
    +
    +abs_() → Tensor
    +

    In-place version of abs()

    +
    + +
    +
    +acos() → Tensor
    +

    See torch.acos()

    +
    + +
    +
    +acos_() → Tensor
    +

    In-place version of acos()

    +
    + +
    +
    +add(value) → Tensor
    +

    add(value=1, other) -> Tensor

    +

    See torch.add()

    +
    + +
    +
    +add_(value) → Tensor
    +

    add_(value=1, other) -> Tensor

    +

    In-place version of add()

    +
    + +
    +
    +addbmm(beta=1, alpha=1, batch1, batch2) → Tensor
    +

    See torch.addbmm()

    +
    + +
    +
    +addbmm_(beta=1, alpha=1, batch1, batch2) → Tensor
    +

    In-place version of addbmm()

    +
    + +
    +
    +addcdiv(value=1, tensor1, tensor2) → Tensor
    +

    See torch.addcdiv()

    +
    + +
    +
    +addcdiv_(value=1, tensor1, tensor2) → Tensor
    +

    In-place version of addcdiv()

    +
    + +
    +
    +addcmul(value=1, tensor1, tensor2) → Tensor
    +

    See torch.addcmul()

    +
    + +
    +
    +addcmul_(value=1, tensor1, tensor2) → Tensor
    +

    In-place version of addcmul()

    +
    + +
    +
    +addmm(beta=1, alpha=1, mat1, mat2) → Tensor
    +

    See torch.addmm()

    +
    + +
    +
    +addmm_(beta=1, alpha=1, mat1, mat2) → Tensor
    +

    In-place version of addmm()

    +
    + +
    +
    +addmv(beta=1, alpha=1, mat, vec) → Tensor
    +

    See torch.addmv()

    +
    + +
    +
    +addmv_(beta=1, alpha=1, mat, vec) → Tensor
    +

    In-place version of addmv()

    +
    + +
    +
    +addr(beta=1, alpha=1, vec1, vec2) → Tensor
    +

    See torch.addr()

    +
    + +
    +
    +addr_(beta=1, alpha=1, vec1, vec2) → Tensor
    +

    In-place version of addr()

    +
    + +
    +
    +allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) → Tensor
    +

    See torch.allclose()

    +
    + +
    +
    +apply_(callable) → Tensor
    +

    Applies the function callable to each element in the tensor, replacing +each element with the value returned by callable.

    +
    +

    Note

    +

    This function only works with CPU tensors and should not be used in code +sections that require high performance.

    +
    +
    + +
    +
    +argmax(dim=None, keepdim=False) → LongTensor
    +

    See torch.argmax()

    +
    + +
    +
    +argmin(dim=None, keepdim=False) → LongTensor
    +

    See torch.argmin()

    +
    + +
    +
    +argsort(dim=-1, descending=False) → LongTensor
    +

    See :func: torch.argsort

    +
    + +
    +
    +asin() → Tensor
    +

    See torch.asin()

    +
    + +
    +
    +asin_() → Tensor
    +

    In-place version of asin()

    +
    + +
    +
    +as_strided(size, stride, storage_offset=0) → Tensor
    +

    See torch.as_strided()

    +
    + +
    +
    +atan() → Tensor
    +

    See torch.atan()

    +
    + +
    +
    +atan2(other) → Tensor
    +

    See torch.atan2()

    +
    + +
    +
    +atan2_(other) → Tensor
    +

    In-place version of atan2()

    +
    + +
    +
    +atan_() → Tensor
    +

    In-place version of atan()

    +
    + +
    +
    +backward(gradient=None, retain_graph=None, create_graph=False)[source]
    +

    Computes the gradient of current tensor w.r.t. graph leaves.

    +

    The graph is differentiated using the chain rule. If the tensor is +non-scalar (i.e. its data has more than one element) and requires +gradient, the function additionally requires specifying gradient. +It should be a tensor of matching type and location, that contains +the gradient of the differentiated function w.r.t. self.

    +

    This function accumulates gradients in the leaves - you might need to +zero them before calling it.

    +
    +
    Parameters
    +
      +
    • gradient (Tensor or None) – Gradient w.r.t. the +tensor. If it is a tensor, it will be automatically converted +to a Tensor that does not require grad unless create_graph is True. +None values can be specified for scalar Tensors or ones that +don’t require grad. If a None value would be acceptable then +this argument is optional.

    • +
    • retain_graph (bool, optional) – If False, the graph used to compute +the grads will be freed. Note that in nearly all cases setting +this option to True is not needed and often can be worked around +in a much more efficient way. Defaults to the value of +create_graph.

    • +
    • create_graph (bool, optional) – If True, graph of the derivative will +be constructed, allowing to compute higher order derivative +products. Defaults to False.

    • +
    +
    +
    +
    + +
    +
    +baddbmm(beta=1, alpha=1, batch1, batch2) → Tensor
    +

    See torch.baddbmm()

    +
    + +
    +
    +baddbmm_(beta=1, alpha=1, batch1, batch2) → Tensor
    +

    In-place version of baddbmm()

    +
    + +
    +
    +bernoulli(*, generator=None) → Tensor
    +

    Returns a result tensor where each \(\texttt{result[i]}\) is independently +sampled from \(\text{Bernoulli}(\texttt{self[i]})\). self must have +floating point dtype, and the result will have the same dtype.

    +

    See torch.bernoulli()

    +
    + +
    +
    +bernoulli_()
    +
    +
    +bernoulli_(p=0.5, *, generator=None) → Tensor
    +

    Fills each location of self with an independent sample from +\(\text{Bernoulli}(\texttt{p})\). self can have integral +dtype.

    +
    + +
    +
    +bernoulli_(p_tensor, *, generator=None) → Tensor
    +

    p_tensor should be a tensor containing probabilities to be used for +drawing the binary random number.

    +

    The \(\text{i}^{th}\) element of self tensor will be set to a +value sampled from \(\text{Bernoulli}(\texttt{p\_tensor[i]})\).

    +

    self can have integral dtype, but p_tensor must have +floating point dtype.

    +
    + +

    See also bernoulli() and torch.bernoulli()

    +
    + +
    +
    +bfloat16() → Tensor
    +

    self.bfloat16() is equivalent to self.to(torch.bfloat16). See to().

    +
    + +
    +
    +bincount(weights=None, minlength=0) → Tensor
    +

    See torch.bincount()

    +
    + +
    +
    +bitwise_not() → Tensor
    +

    See torch.bitwise_not()

    +
    + +
    +
    +bitwise_not_() → Tensor
    +

    In-place version of bitwise_not()

    +
    + +
    +
    +bmm(batch2) → Tensor
    +

    See torch.bmm()

    +
    + +
    +
    +bool() → Tensor
    +

    self.bool() is equivalent to self.to(torch.bool). See to().

    +
    + +
    +
    +byte() → Tensor
    +

    self.byte() is equivalent to self.to(torch.uint8). See to().

    +
    + +
    +
    +cauchy_(median=0, sigma=1, *, generator=None) → Tensor
    +

    Fills the tensor with numbers drawn from the Cauchy distribution:

    +
    +\[f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}\]
    +
    + +
    +
    +ceil() → Tensor
    +

    See torch.ceil()

    +
    + +
    +
    +ceil_() → Tensor
    +

    In-place version of ceil()

    +
    + +
    +
    +char() → Tensor
    +

    self.char() is equivalent to self.to(torch.int8). See to().

    +
    + +
    +
    +cholesky(upper=False) → Tensor
    +

    See torch.cholesky()

    +
    + +
    +
    +cholesky_inverse(upper=False) → Tensor
    +

    See torch.cholesky_inverse()

    +
    + +
    +
    +cholesky_solve(input2, upper=False) → Tensor
    +

    See torch.cholesky_solve()

    +
    + +
    +
    +chunk(chunks, dim=0) → List of Tensors
    +

    See torch.chunk()

    +
    + +
    +
    +clamp(min, max) → Tensor
    +

    See torch.clamp()

    +
    + +
    +
    +clamp_(min, max) → Tensor
    +

    In-place version of clamp()

    +
    + +
    +
    +clone() → Tensor
    +

    Returns a copy of the self tensor. The copy has the same size and data +type as self.

    +
    +

    Note

    +

    Unlike copy_(), this function is recorded in the computation graph. Gradients +propagating to the cloned tensor will propagate to the original tensor.

    +
    +
    + +
    +
    +contiguous() → Tensor
    +

    Returns a contiguous tensor containing the same data as self tensor. If +self tensor is contiguous, this function returns the self +tensor.

    +
    + +
    +
    +copy_(src, non_blocking=False) → Tensor
    +

    Copies the elements from src into self tensor and returns +self.

    +

    The src tensor must be broadcastable +with the self tensor. It may be of a different data type or reside on a +different device.

    +
    +
    Parameters
    +
      +
    • src (Tensor) – the source tensor to copy from

    • +
    • non_blocking (bool) – if True and this copy is between CPU and GPU, +the copy may occur asynchronously with respect to the host. For other +cases, this argument has no effect.

    • +
    +
    +
    +
    + +
    +
    +cos() → Tensor
    +

    See torch.cos()

    +
    + +
    +
    +cos_() → Tensor
    +

    In-place version of cos()

    +
    + +
    +
    +cosh() → Tensor
    +

    See torch.cosh()

    +
    + +
    +
    +cosh_() → Tensor
    +

    In-place version of cosh()

    +
    + +
    +
    +cpu() → Tensor
    +

    Returns a copy of this object in CPU memory.

    +

    If this object is already in CPU memory and on the correct device, +then no copy is performed and the original object is returned.

    +
    + +
    +
    +cross(other, dim=-1) → Tensor
    +

    See torch.cross()

    +
    + +
    +
    +cuda(device=None, non_blocking=False) → Tensor
    +

    Returns a copy of this object in CUDA memory.

    +

    If this object is already in CUDA memory and on the correct device, +then no copy is performed and the original object is returned.

    +
    +
    Parameters
    +
      +
    • device (torch.device) – The destination GPU device. +Defaults to the current CUDA device.

    • +
    • non_blocking (bool) – If True and the source is in pinned memory, +the copy will be asynchronous with respect to the host. +Otherwise, the argument has no effect. Default: False.

    • +
    +
    +
    +
    + +
    +
    +cumprod(dim, dtype=None) → Tensor
    +

    See torch.cumprod()

    +
    + +
    +
    +cumsum(dim, dtype=None) → Tensor
    +

    See torch.cumsum()

    +
    + +
    +
    +data_ptr() → int
    +

    Returns the address of the first element of self tensor.

    +
    + +
    +
    +dequantize() → Tensor
    +

    Given a quantized Tensor, dequantize it and return the dequantized float Tensor.

    +
    + +
    +
    +det() → Tensor
    +

    See torch.det()

    +
    + +
    +
    +dense_dim() → int
    +

    If self is a sparse COO tensor (i.e., with torch.sparse_coo layout), +this returns the number of dense dimensions. Otherwise, this throws an error.

    +

    See also Tensor.sparse_dim().

    +
    + +
    +
    +detach()
    +

    Returns a new Tensor, detached from the current graph.

    +

    The result will never require gradient.

    +
    +

    Note

    +

    Returned Tensor shares the same storage with the original one. +In-place modifications on either of them will be seen, and may trigger +errors in correctness checks. +IMPORTANT NOTE: Previously, in-place size / stride / storage changes +(such as resize_ / resize_as_ / set_ / transpose_) to the returned tensor +also update the original tensor. Now, these in-place changes will not update the +original tensor anymore, and will instead trigger an error. +For sparse tensors: +In-place indices / values changes (such as zero_ / copy_ / add_) to the +returned tensor will not update the original tensor anymore, and will instead +trigger an error.

    +
    +
    + +
    +
    +detach_()
    +

    Detaches the Tensor from the graph that created it, making it a leaf. +Views cannot be detached in-place.

    +
    + +
    +
    +diag(diagonal=0) → Tensor
    +

    See torch.diag()

    +
    + +
    +
    +diag_embed(offset=0, dim1=-2, dim2=-1) → Tensor
    +

    See torch.diag_embed()

    +
    + +
    +
    +diagflat(offset=0) → Tensor
    +

    See torch.diagflat()

    +
    + +
    +
    +diagonal(offset=0, dim1=0, dim2=1) → Tensor
    +

    See torch.diagonal()

    +
    + +
    +
    +fill_diagonal_(fill_value, wrap=False) → Tensor
    +

    Fill the main diagonal of a tensor that has at least 2-dimensions. +When dims>2, all dimensions of input must be of equal length. +This function modifies the input tensor in-place, and returns the input tensor.

    +
    +
    Parameters
    +
      +
    • fill_value (Scalar) – the fill value

    • +
    • wrap (bool) – the diagonal ‘wrapped’ after N columns for tall matrices.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.zeros(3, 3)
    +>>> a.fill_diagonal_(5)
    +tensor([[5., 0., 0.],
    +        [0., 5., 0.],
    +        [0., 0., 5.]])
    +>>> b = torch.zeros(7, 3)
    +>>> b.fill_diagonal_(5)
    +tensor([[5., 0., 0.],
    +        [0., 5., 0.],
    +        [0., 0., 5.],
    +        [0., 0., 0.],
    +        [0., 0., 0.],
    +        [0., 0., 0.],
    +        [0., 0., 0.]])
    +>>> c = torch.zeros(7, 3)
    +>>> c.fill_diagonal_(5, wrap=True)
    +tensor([[5., 0., 0.],
    +        [0., 5., 0.],
    +        [0., 0., 5.],
    +        [0., 0., 0.],
    +        [5., 0., 0.],
    +        [0., 5., 0.],
    +        [0., 0., 5.]])
    +
    +
    +
    + +
    +
    +digamma() → Tensor
    +

    See torch.digamma()

    +
    + +
    +
    +digamma_() → Tensor
    +

    In-place version of digamma()

    +
    + +
    +
    +dim() → int
    +

    Returns the number of dimensions of self tensor.

    +
    + +
    +
    +dist(other, p=2) → Tensor
    +

    See torch.dist()

    +
    + +
    +
    +div(value) → Tensor
    +

    See torch.div()

    +
    + +
    +
    +div_(value) → Tensor
    +

    In-place version of div()

    +
    + +
    +
    +dot(tensor2) → Tensor
    +

    See torch.dot()

    +
    + +
    +
    +double() → Tensor
    +

    self.double() is equivalent to self.to(torch.float64). See to().

    +
    + +
    +
    +eig(eigenvectors=False) -> (Tensor, Tensor)
    +

    See torch.eig()

    +
    + +
    +
    +element_size() → int
    +

    Returns the size in bytes of an individual element.

    +

    Example:

    +
    >>> torch.tensor([]).element_size()
    +4
    +>>> torch.tensor([], dtype=torch.uint8).element_size()
    +1
    +
    +
    +
    + +
    +
    +eq(other) → Tensor
    +

    See torch.eq()

    +
    + +
    +
    +eq_(other) → Tensor
    +

    In-place version of eq()

    +
    + +
    +
    +equal(other) → bool
    +

    See torch.equal()

    +
    + +
    +
    +erf() → Tensor
    +

    See torch.erf()

    +
    + +
    +
    +erf_() → Tensor
    +

    In-place version of erf()

    +
    + +
    +
    +erfc() → Tensor
    +

    See torch.erfc()

    +
    + +
    +
    +erfc_() → Tensor
    +

    In-place version of erfc()

    +
    + +
    +
    +erfinv() → Tensor
    +

    See torch.erfinv()

    +
    + +
    +
    +erfinv_() → Tensor
    +

    In-place version of erfinv()

    +
    + +
    +
    +exp() → Tensor
    +

    See torch.exp()

    +
    + +
    +
    +exp_() → Tensor
    +

    In-place version of exp()

    +
    + +
    +
    +expm1() → Tensor
    +

    See torch.expm1()

    +
    + +
    +
    +expm1_() → Tensor
    +

    In-place version of expm1()

    +
    + +
    +
    +expand(*sizes) → Tensor
    +

    Returns a new view of the self tensor with singleton dimensions expanded +to a larger size.

    +

    Passing -1 as the size for a dimension means not changing the size of +that dimension.

    +

    Tensor can be also expanded to a larger number of dimensions, and the +new ones will be appended at the front. For the new dimensions, the +size cannot be set to -1.

    +

    Expanding a tensor does not allocate new memory, but only creates a +new view on the existing tensor where a dimension of size one is +expanded to a larger size by setting the stride to 0. Any dimension +of size 1 can be expanded to an arbitrary value without allocating new +memory.

    +
    +
    Parameters
    +

    *sizes (torch.Size or int...) – the desired expanded size

    +
    +
    +
    +

    Warning

    +

    More than one element of an expanded tensor may refer to a single +memory location. As a result, in-place operations (especially ones that +are vectorized) may result in incorrect behavior. If you need to write +to the tensors, please clone them first.

    +
    +

    Example:

    +
    >>> x = torch.tensor([[1], [2], [3]])
    +>>> x.size()
    +torch.Size([3, 1])
    +>>> x.expand(3, 4)
    +tensor([[ 1,  1,  1,  1],
    +        [ 2,  2,  2,  2],
    +        [ 3,  3,  3,  3]])
    +>>> x.expand(-1, 4)   # -1 means not changing the size of that dimension
    +tensor([[ 1,  1,  1,  1],
    +        [ 2,  2,  2,  2],
    +        [ 3,  3,  3,  3]])
    +
    +
    +
    + +
    +
    +expand_as(other) → Tensor
    +

    Expand this tensor to the same size as other. +self.expand_as(other) is equivalent to self.expand(other.size()).

    +

    Please see expand() for more information about expand.

    +
    +
    Parameters
    +

    other (torch.Tensor) – The result tensor has the same size +as other.

    +
    +
    +
    + +
    +
    +exponential_(lambd=1, *, generator=None) → Tensor
    +

    Fills self tensor with elements drawn from the exponential distribution:

    +
    +\[f(x) = \lambda e^{-\lambda x}\]
    +
    + +
    +
    +fft(signal_ndim, normalized=False) → Tensor
    +

    See torch.fft()

    +
    + +
    +
    +fill_(value) → Tensor
    +

    Fills self tensor with the specified value.

    +
    + +
    +
    +flatten(input, start_dim=0, end_dim=-1) → Tensor
    +

    see torch.flatten()

    +
    + +
    +
    +flip(dims) → Tensor
    +

    See torch.flip()

    +
    + +
    +
    +float() → Tensor
    +

    self.float() is equivalent to self.to(torch.float32). See to().

    +
    + +
    +
    +floor() → Tensor
    +

    See torch.floor()

    +
    + +
    +
    +floor_() → Tensor
    +

    In-place version of floor()

    +
    + +
    +
    +fmod(divisor) → Tensor
    +

    See torch.fmod()

    +
    + +
    +
    +fmod_(divisor) → Tensor
    +

    In-place version of fmod()

    +
    + +
    +
    +frac() → Tensor
    +

    See torch.frac()

    +
    + +
    +
    +frac_() → Tensor
    +

    In-place version of frac()

    +
    + +
    +
    +gather(dim, index) → Tensor
    +

    See torch.gather()

    +
    + +
    +
    +ge(other) → Tensor
    +

    See torch.ge()

    +
    + +
    +
    +ge_(other) → Tensor
    +

    In-place version of ge()

    +
    + +
    +
    +gels(A)[source]
    +

    See torch.lstsq()

    +
    + +
    +
    +geometric_(p, *, generator=None) → Tensor
    +

    Fills self tensor with elements drawn from the geometric distribution:

    +
    +\[f(X=k) = p^{k - 1} (1 - p)\]
    +
    + +
    +
    +geqrf() -> (Tensor, Tensor)
    +

    See torch.geqrf()

    +
    + +
    +
    +ger(vec2) → Tensor
    +

    See torch.ger()

    +
    + +
    +
    +get_device() -> Device ordinal (Integer)
    +

    For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. +For CPU tensors, an error is thrown.

    +

    Example:

    +
    >>> x = torch.randn(3, 4, 5, device='cuda:0')
    +>>> x.get_device()
    +0
    +>>> x.cpu().get_device()  # RuntimeError: get_device is not implemented for type torch.FloatTensor
    +
    +
    +
    + +
    +
    +gt(other) → Tensor
    +

    See torch.gt()

    +
    + +
    +
    +gt_(other) → Tensor
    +

    In-place version of gt()

    +
    + +
    +
    +half() → Tensor
    +

    self.half() is equivalent to self.to(torch.float16). See to().

    +
    + +
    +
    +hardshrink(lambd=0.5) → Tensor
    +

    See torch.nn.functional.hardshrink()

    +
    + +
    +
    +histc(bins=100, min=0, max=0) → Tensor
    +

    See torch.histc()

    +
    + +
    +
    +ifft(signal_ndim, normalized=False) → Tensor
    +

    See torch.ifft()

    +
    + +
    +
    +index_add_(dim, index, tensor) → Tensor
    +

    Accumulate the elements of tensor into the self tensor by adding +to the indices in the order given in index. For example, if dim == 0 +and index[i] == j, then the ith row of tensor is added to the +jth row of self.

    +

    The dimth dimension of tensor must have the same size as the +length of index (which must be a vector), and all other dimensions must +match self, or an error will be raised.

    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • dim (int) – dimension along which to index

    • +
    • index (LongTensor) – indices of tensor to select from

    • +
    • tensor (Tensor) – the tensor containing values to add

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.ones(5, 3)
    +>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    +>>> index = torch.tensor([0, 4, 2])
    +>>> x.index_add_(0, index, t)
    +tensor([[  2.,   3.,   4.],
    +        [  1.,   1.,   1.],
    +        [  8.,   9.,  10.],
    +        [  1.,   1.,   1.],
    +        [  5.,   6.,   7.]])
    +
    +
    +
    + +
    +
    +index_add(dim, index, tensor) → Tensor
    +

    Out-of-place version of torch.Tensor.index_add_()

    +
    + +
    +
    +index_copy_(dim, index, tensor) → Tensor
    +

    Copies the elements of tensor into the self tensor by selecting +the indices in the order given in index. For example, if dim == 0 +and index[i] == j, then the ith row of tensor is copied to the +jth row of self.

    +

    The dimth dimension of tensor must have the same size as the +length of index (which must be a vector), and all other dimensions must +match self, or an error will be raised.

    +
    +
    Parameters
    +
      +
    • dim (int) – dimension along which to index

    • +
    • index (LongTensor) – indices of tensor to select from

    • +
    • tensor (Tensor) – the tensor containing values to copy

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.zeros(5, 3)
    +>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    +>>> index = torch.tensor([0, 4, 2])
    +>>> x.index_copy_(0, index, t)
    +tensor([[ 1.,  2.,  3.],
    +        [ 0.,  0.,  0.],
    +        [ 7.,  8.,  9.],
    +        [ 0.,  0.,  0.],
    +        [ 4.,  5.,  6.]])
    +
    +
    +
    + +
    +
    +index_copy(dim, index, tensor) → Tensor
    +

    Out-of-place version of torch.Tensor.index_copy_()

    +
    + +
    +
    +index_fill_(dim, index, val) → Tensor
    +

    Fills the elements of the self tensor with value val by +selecting the indices in the order given in index.

    +
    +
    Parameters
    +
      +
    • dim (int) – dimension along which to index

    • +
    • index (LongTensor) – indices of self tensor to fill in

    • +
    • val (float) – the value to fill with

    • +
    +
    +
    +
    +
    Example::
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    +>>> index = torch.tensor([0, 2])
    +>>> x.index_fill_(1, index, -1)
    +tensor([[-1.,  2., -1.],
    +        [-1.,  5., -1.],
    +        [-1.,  8., -1.]])
    +
    +
    +
    +
    +
    + +
    +
    +index_fill(dim, index, value) → Tensor
    +

    Out-of-place version of torch.Tensor.index_fill_()

    +
    + +
    +
    +index_put_(indices, value, accumulate=False) → Tensor
    +

    Puts values from the tensor value into the tensor self using +the indices specified in indices (which is a tuple of Tensors). The +expression tensor.index_put_(indices, value) is equivalent to +tensor[indices] = value. Returns self.

    +

    If accumulate is True, the elements in tensor are added to +self. If accumulate is False, the behavior is undefined if indices +contain duplicate elements.

    +
    +
    Parameters
    +
      +
    • indices (tuple of LongTensor) – tensors used to index into self.

    • +
    • value (Tensor) – tensor of same dtype as self.

    • +
    • accumulate (bool) – whether to accumulate into self

    • +
    +
    +
    +
    + +
    +
    +index_put(indices, value, accumulate=False) → Tensor
    +

    Out-place version of index_put_()

    +
    + +
    +
    +index_select(dim, index) → Tensor
    +

    See torch.index_select()

    +
    + +
    +
    +indices() → Tensor
    +

    If self is a sparse COO tensor (i.e., with torch.sparse_coo layout), +this returns a view of the contained indices tensor. Otherwise, this throws an +error.

    +

    See also Tensor.values().

    +
    +

    Note

    +

    This method can only be called on a coalesced sparse tensor. See +Tensor.coalesce() for details.

    +
    +
    + +
    +
    +int() → Tensor
    +

    self.int() is equivalent to self.to(torch.int32). See to().

    +
    + +
    +
    +int_repr() → Tensor
    +

    Given a quantized Tensor, +self.int_repr() returns a CPU Tensor with uint8_t as data type that stores the +underlying uint8_t values of the given Tensor.

    +
    + +
    +
    +inverse() → Tensor
    +

    See torch.inverse()

    +
    + +
    +
    +irfft(signal_ndim, normalized=False, onesided=True, signal_sizes=None) → Tensor
    +

    See torch.irfft()

    +
    + +
    +
    +is_contiguous() → bool
    +

    Returns True if self tensor is contiguous in memory in C order.

    +
    + +
    +
    +is_floating_point() → bool
    +

    Returns True if the data type of self is a floating point data type.

    +
    + +
    +
    +is_leaf()
    +

    All Tensors that have requires_grad which is False will be leaf Tensors by convention.

    +

    For Tensors that have requires_grad which is True, they will be leaf Tensors if they were +created by the user. This means that they are not the result of an operation and so +grad_fn is None.

    +

    Only leaf Tensors will have their grad populated during a call to backward(). +To get grad populated for non-leaf Tensors, you can use retain_grad().

    +

    Example:

    +
    >>> a = torch.rand(10, requires_grad=True)
    +>>> a.is_leaf
    +True
    +>>> b = torch.rand(10, requires_grad=True).cuda()
    +>>> b.is_leaf
    +False
    +# b was created by the operation that cast a cpu Tensor into a cuda Tensor
    +>>> c = torch.rand(10, requires_grad=True) + 2
    +>>> c.is_leaf
    +False
    +# c was created by the addition operation
    +>>> d = torch.rand(10).cuda()
    +>>> d.is_leaf
    +True
    +# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
    +>>> e = torch.rand(10).cuda().requires_grad_()
    +>>> e.is_leaf
    +True
    +# e requires gradients and has no operations creating it
    +>>> f = torch.rand(10, requires_grad=True, device="cuda")
    +>>> f.is_leaf
    +True
    +# f requires grad, has no operation creating it
    +
    +
    +
    + +
    +
    +is_pinned()[source]
    +

    Returns true if this tensor resides in pinned memory

    +
    + +
    +
    +is_set_to(tensor) → bool
    +

    Returns True if this object refers to the same THTensor object from the +Torch C API as the given tensor.

    +
    + +
    +
    +is_shared()[source]
    +

    Checks if tensor is in shared memory.

    +

    This is always True for CUDA tensors.

    +
    + +
    +
    +is_signed() → bool
    +

    Returns True if the data type of self is a signed data type.

    +
    + +
    +
    +is_sparse()
    +
    + +
    +
    +item() → number
    +

    Returns the value of this tensor as a standard Python number. This only works +for tensors with one element. For other cases, see tolist().

    +

    This operation is not differentiable.

    +

    Example:

    +
    >>> x = torch.tensor([1.0])
    +>>> x.item()
    +1.0
    +
    +
    +
    + +
    +
    +kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
    +

    See torch.kthvalue()

    +
    + +
    +
    +le(other) → Tensor
    +

    See torch.le()

    +
    + +
    +
    +le_(other) → Tensor
    +

    In-place version of le()

    +
    + +
    +
    +lerp(end, weight) → Tensor
    +

    See torch.lerp()

    +
    + +
    +
    +lerp_(end, weight) → Tensor
    +

    In-place version of lerp()

    +
    + +
    +
    +log() → Tensor
    +

    See torch.log()

    +
    + +
    +
    +log_() → Tensor
    +

    In-place version of log()

    +
    + +
    +
    +logdet() → Tensor
    +

    See torch.logdet()

    +
    + +
    +
    +log10() → Tensor
    +

    See torch.log10()

    +
    + +
    +
    +log10_() → Tensor
    +

    In-place version of log10()

    +
    + +
    +
    +log1p() → Tensor
    +

    See torch.log1p()

    +
    + +
    +
    +log1p_() → Tensor
    +

    In-place version of log1p()

    +
    + +
    +
    +log2() → Tensor
    +

    See torch.log2()

    +
    + +
    +
    +log2_() → Tensor
    +

    In-place version of log2()

    +
    + +
    +
    +log_normal_(mean=1, std=2, *, generator=None)
    +

    Fills self tensor with numbers samples from the log-normal distribution +parameterized by the given mean \(\mu\) and standard deviation +\(\sigma\). Note that mean and std are the mean and +standard deviation of the underlying normal distribution, and not of the +returned distribution:

    +
    +\[f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}\]
    +
    + +
    +
    +logsumexp(dim, keepdim=False) → Tensor
    +

    See torch.logsumexp()

    +
    + +
    +
    +long() → Tensor
    +

    self.long() is equivalent to self.to(torch.int64). See to().

    +
    + +
    +
    +lstsq(A) -> (Tensor, Tensor)
    +

    See torch.lstsq()

    +
    + +
    +
    +lt(other) → Tensor
    +

    See torch.lt()

    +
    + +
    +
    +lt_(other) → Tensor
    +

    In-place version of lt()

    +
    + +
    +
    +lu(pivot=True, get_infos=False)[source]
    +

    See torch.lu()

    +
    + +
    +
    +lu_solve(LU_data, LU_pivots) → Tensor
    +

    See torch.lu_solve()

    +
    + +
    +
    +map_(tensor, callable)
    +

    Applies callable for each element in self tensor and the given +tensor and stores the results in self tensor. self tensor and +the given tensor must be broadcastable.

    +

    The callable should have the signature:

    +
    def callable(a, b) -> number
    +
    +
    +
    + +
    +
    +masked_scatter_(mask, source)
    +

    Copies elements from source into self tensor at positions where +the mask is one. +The shape of mask must be broadcastable +with the shape of the underlying tensor. The source should have at least +as many elements as the number of ones in mask

    +
    +
    Parameters
    +
      +
    • mask (ByteTensor) – the binary mask

    • +
    • source (Tensor) – the tensor to copy from

    • +
    +
    +
    +
    +

    Note

    +

    The mask operates on the self tensor, not on the given +source tensor.

    +
    +
    + +
    +
    +masked_scatter(mask, tensor) → Tensor
    +

    Out-of-place version of torch.Tensor.masked_scatter_()

    +
    + +
    +
    +masked_fill_(mask, value)
    +

    Fills elements of self tensor with value where mask is +one. The shape of mask must be +broadcastable with the shape of the underlying +tensor.

    +
    +
    Parameters
    +
      +
    • mask (ByteTensor) – the binary mask

    • +
    • value (float) – the value to fill in with

    • +
    +
    +
    +
    + +
    +
    +masked_fill(mask, value) → Tensor
    +

    Out-of-place version of torch.Tensor.masked_fill_()

    +
    + +
    +
    +masked_select(mask) → Tensor
    +

    See torch.masked_select()

    +
    + +
    +
    +matmul(tensor2) → Tensor
    +

    See torch.matmul()

    +
    + +
    +
    +matrix_power(n) → Tensor
    +

    See torch.matrix_power()

    +
    + +
    +
    +max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
    +

    See torch.max()

    +
    + +
    +
    +mean(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
    +

    See torch.mean()

    +
    + +
    +
    +median(dim=None, keepdim=False) -> (Tensor, LongTensor)
    +

    See torch.median()

    +
    + +
    +
    +min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
    +

    See torch.min()

    +
    + +
    +
    +mm(mat2) → Tensor
    +

    See torch.mm()

    +
    + +
    +
    +mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
    +

    See torch.mode()

    +
    + +
    +
    +mul(value) → Tensor
    +

    See torch.mul()

    +
    + +
    +
    +mul_(value)
    +

    In-place version of mul()

    +
    + +
    +
    +multinomial(num_samples, replacement=False, *, generator=None) → Tensor
    +

    See torch.multinomial()

    +
    + +
    +
    +mv(vec) → Tensor
    +

    See torch.mv()

    +
    + +
    +
    +mvlgamma(p) → Tensor
    +

    See torch.mvlgamma()

    +
    + +
    +
    +mvlgamma_(p) → Tensor
    +

    In-place version of mvlgamma()

    +
    + +
    +
    +narrow(dimension, start, length) → Tensor
    +

    See torch.narrow()

    +

    Example:

    +
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    +>>> x.narrow(0, 0, 2)
    +tensor([[ 1,  2,  3],
    +        [ 4,  5,  6]])
    +>>> x.narrow(1, 1, 2)
    +tensor([[ 2,  3],
    +        [ 5,  6],
    +        [ 8,  9]])
    +
    +
    +
    + +
    +
    +narrow_copy(dimension, start, length) → Tensor
    +

    Same as Tensor.narrow() except returning a copy rather +than shared storage. This is primarily for sparse tensors, which +do not have a shared-storage narrow method. Calling `narrow_copy +with `dimemsion > self.sparse_dim()` will return a copy with the +relevant dense dimension narrowed, and `self.shape` updated accordingly.

    +
    + +
    +
    +ndimension() → int
    +

    Alias for dim()

    +
    + +
    +
    +ne(other) → Tensor
    +

    See torch.ne()

    +
    + +
    +
    +ne_(other) → Tensor
    +

    In-place version of ne()

    +
    + +
    +
    +neg() → Tensor
    +

    See torch.neg()

    +
    + +
    +
    +neg_() → Tensor
    +

    In-place version of neg()

    +
    + +
    +
    +nelement() → int
    +

    Alias for numel()

    +
    + +
    +
    +nonzero() → LongTensor
    +

    See torch.nonzero()

    +
    + +
    +
    +norm(p='fro', dim=None, keepdim=False, dtype=None)[source]
    +

    See torch.norm()

    +
    + +
    +
    +normal_(mean=0, std=1, *, generator=None) → Tensor
    +

    Fills self tensor with elements samples from the normal distribution +parameterized by mean and std.

    +
    + +
    +
    +numel() → int
    +

    See torch.numel()

    +
    + +
    +
    +numpy() → numpy.ndarray
    +

    Returns self tensor as a NumPy ndarray. This tensor and the +returned ndarray share the same underlying storage. Changes to +self tensor will be reflected in the ndarray and vice versa.

    +
    + +
    +
    +orgqr(input2) → Tensor
    +

    See torch.orgqr()

    +
    + +
    +
    +ormqr(input2, input3, left=True, transpose=False) → Tensor
    +

    See torch.ormqr()

    +
    + +
    +
    +permute(*dims) → Tensor
    +

    Permute the dimensions of this tensor.

    +
    +
    Parameters
    +

    *dims (int...) – The desired ordering of dimensions

    +
    +
    +

    Example

    +
    >>> x = torch.randn(2, 3, 5)
    +>>> x.size()
    +torch.Size([2, 3, 5])
    +>>> x.permute(2, 0, 1).size()
    +torch.Size([5, 2, 3])
    +
    +
    +
    + +
    +
    +pin_memory() → Tensor
    +

    Copies the tensor to pinned memory, if it’s not already pinned.

    +
    + +
    +
    +pinverse() → Tensor
    +

    See torch.pinverse()

    +
    + +
    +
    +pow(exponent) → Tensor
    +

    See torch.pow()

    +
    + +
    +
    +pow_(exponent) → Tensor
    +

    In-place version of pow()

    +
    + +
    +
    +prod(dim=None, keepdim=False, dtype=None) → Tensor
    +

    See torch.prod()

    +
    + +
    +
    +put_(indices, tensor, accumulate=False) → Tensor
    +

    Copies the elements from tensor into the positions specified by +indices. For the purpose of indexing, the self tensor is treated as if +it were a 1-D tensor.

    +

    If accumulate is True, the elements in tensor are added to +self. If accumulate is False, the behavior is undefined if indices +contain duplicate elements.

    +
    +
    Parameters
    +
      +
    • indices (LongTensor) – the indices into self

    • +
    • tensor (Tensor) – the tensor containing values to copy from

    • +
    • accumulate (bool) – whether to accumulate into self

    • +
    +
    +
    +

    Example:

    +
    >>> src = torch.tensor([[4, 3, 5],
    +                        [6, 7, 8]])
    +>>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
    +tensor([[  4,   9,   5],
    +        [ 10,   7,   8]])
    +
    +
    +
    + +
    +
    +qr(some=True) -> (Tensor, Tensor)
    +

    See torch.qr()

    +
    + +
    +
    +qscheme() → torch.qscheme
    +

    Returns the quantization scheme of a given QTensor.

    +
    + +
    +
    +q_scale() → float
    +

    Given a Tensor quantized by linear(affine) quantization, +returns the scale of the underlying quantizer().

    +
    + +
    +
    +q_zero_point() → int
    +

    Given a Tensor quantized by linear(affine) quantization, +returns the zero_point of the underlying quantizer().

    +
    + +
    +
    +random_(from=0, to=None, *, generator=None) → Tensor
    +

    Fills self tensor with numbers sampled from the discrete uniform +distribution over [from, to - 1]. If not specified, the values are usually +only bounded by self tensor’s data type. However, for floating point +types, if unspecified, range will be [0, 2^mantissa] to ensure that every +value is representable. For example, torch.tensor(1, dtype=torch.double).random_() +will be uniform in [0, 2^53].

    +
    + +
    +
    +reciprocal() → Tensor
    +

    See torch.reciprocal()

    +
    + +
    +
    +reciprocal_() → Tensor
    +

    In-place version of reciprocal()

    +
    + +
    +
    +register_hook(hook)[source]
    +

    Registers a backward hook.

    +

    The hook will be called every time a gradient with respect to the +Tensor is computed. The hook should have the following signature:

    +
    hook(grad) -> Tensor or None
    +
    +
    +

    The hook should not modify its argument, but it can optionally return +a new gradient which will be used in place of grad.

    +

    This function returns a handle with a method handle.remove() +that removes the hook from the module.

    +

    Example:

    +
    >>> v = torch.tensor([0., 0., 0.], requires_grad=True)
    +>>> h = v.register_hook(lambda grad: grad * 2)  # double the gradient
    +>>> v.backward(torch.tensor([1., 2., 3.]))
    +>>> v.grad
    +
    + 2
    + 4
    + 6
    +[torch.FloatTensor of size (3,)]
    +
    +>>> h.remove()  # removes the hook
    +
    +
    +
    + +
    +
    +remainder(divisor) → Tensor
    +

    See torch.remainder()

    +
    + +
    +
    +remainder_(divisor) → Tensor
    +

    In-place version of remainder()

    +
    + +
    +
    +renorm(p, dim, maxnorm) → Tensor
    +

    See torch.renorm()

    +
    + +
    +
    +renorm_(p, dim, maxnorm) → Tensor
    +

    In-place version of renorm()

    +
    + +
    +
    +repeat(*sizes) → Tensor
    +

    Repeats this tensor along the specified dimensions.

    +

    Unlike expand(), this function copies the tensor’s data.

    +
    +

    Warning

    +

    torch.repeat() behaves differently from +numpy.repeat, +but is more similar to +numpy.tile. +For the operator similar to numpy.repeat, see torch.repeat_interleave().

    +
    +
    +
    Parameters
    +

    sizes (torch.Size or int...) – The number of times to repeat this tensor along each +dimension

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3])
    +>>> x.repeat(4, 2)
    +tensor([[ 1,  2,  3,  1,  2,  3],
    +        [ 1,  2,  3,  1,  2,  3],
    +        [ 1,  2,  3,  1,  2,  3],
    +        [ 1,  2,  3,  1,  2,  3]])
    +>>> x.repeat(4, 2, 1).size()
    +torch.Size([4, 2, 3])
    +
    +
    +
    + +
    +
    +repeat_interleave(repeats, dim=None) → Tensor
    +

    See torch.repeat_interleave().

    +
    + +
    +
    +requires_grad()
    +

    Is True if gradients need to be computed for this Tensor, False otherwise.

    +
    +

    Note

    +

    The fact that gradients need to be computed for a Tensor do not mean that the grad +attribute will be populated, see is_leaf for more details.

    +
    +
    + +
    +
    +requires_grad_(requires_grad=True) → Tensor
    +

    Change if autograd should record operations on this tensor: sets this tensor’s +requires_grad attribute in-place. Returns this tensor.

    +

    require_grad_()’s main use case is to tell autograd to begin recording +operations on a Tensor tensor. If tensor has requires_grad=False +(because it was obtained through a DataLoader, or required preprocessing or +initialization), tensor.requires_grad_() makes it so that autograd will +begin to record operations on tensor.

    +
    +
    Parameters
    +

    requires_grad (bool) – If autograd should record operations on this tensor. +Default: True.

    +
    +
    +

    Example:

    +
    >>> # Let's say we want to preprocess some saved weights and use
    +>>> # the result as new weights.
    +>>> saved_weights = [0.1, 0.2, 0.3, 0.25]
    +>>> loaded_weights = torch.tensor(saved_weights)
    +>>> weights = preprocess(loaded_weights)  # some function
    +>>> weights
    +tensor([-0.5503,  0.4926, -2.1158, -0.8303])
    +
    +>>> # Now, start to record operations done to weights
    +>>> weights.requires_grad_()
    +>>> out = weights.pow(2).sum()
    +>>> out.backward()
    +>>> weights.grad
    +tensor([-1.1007,  0.9853, -4.2316, -1.6606])
    +
    +
    +
    + +
    +
    +reshape(*shape) → Tensor
    +

    Returns a tensor with the same data and number of elements as self +but with the specified shape. This method returns a view if shape is +compatible with the current shape. See torch.Tensor.view() on when it is +possible to return a view.

    +

    See torch.reshape()

    +
    +
    Parameters
    +

    shape (tuple of python:ints or int...) – the desired shape

    +
    +
    +
    + +
    +
    +reshape_as(other) → Tensor
    +

    Returns this tensor as the same shape as other. +self.reshape_as(other) is equivalent to self.reshape(other.sizes()). +This method returns a view if other.sizes() is compatible with the current +shape. See torch.Tensor.view() on when it is possible to return a view.

    +

    Please see reshape() for more information about reshape.

    +
    +
    Parameters
    +

    other (torch.Tensor) – The result tensor has the same shape +as other.

    +
    +
    +
    + +
    +
    +resize_(*sizes) → Tensor
    +

    Resizes self tensor to the specified size. If the number of elements is +larger than the current storage size, then the underlying storage is resized +to fit the new number of elements. If the number of elements is smaller, the +underlying storage is not changed. Existing elements are preserved but any new +memory is uninitialized.

    +
    +

    Warning

    +

    This is a low-level method. The storage is reinterpreted as C-contiguous, +ignoring the current strides (unless the target size equals the current +size, in which case the tensor is left unchanged). For most purposes, you +will instead want to use view(), which checks for +contiguity, or reshape(), which copies data if needed. To +change the size in-place with custom strides, see set_().

    +
    +
    +
    Parameters
    +

    sizes (torch.Size or int...) – the desired size

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
    +>>> x.resize_(2, 2)
    +tensor([[ 1,  2],
    +        [ 3,  4]])
    +
    +
    +
    + +
    +
    +resize_as_(tensor) → Tensor
    +

    Resizes the self tensor to be the same size as the specified +tensor. This is equivalent to self.resize_(tensor.size()).

    +
    + +
    +
    +retain_grad()[source]
    +

    Enables .grad attribute for non-leaf Tensors.

    +
    + +
    +
    +rfft(signal_ndim, normalized=False, onesided=True) → Tensor
    +

    See torch.rfft()

    +
    + +
    +
    +roll(shifts, dims) → Tensor
    +

    See torch.roll()

    +
    + +
    +
    +rot90(k, dims) → Tensor
    +

    See torch.rot90()

    +
    + +
    +
    +round() → Tensor
    +

    See torch.round()

    +
    + +
    +
    +round_() → Tensor
    +

    In-place version of round()

    +
    + +
    +
    +rsqrt() → Tensor
    +

    See torch.rsqrt()

    +
    + +
    +
    +rsqrt_() → Tensor
    +

    In-place version of rsqrt()

    +
    + +
    +
    +scatter(dim, index, source) → Tensor
    +

    Out-of-place version of torch.Tensor.scatter_()

    +
    + +
    +
    +scatter_(dim, index, src) → Tensor
    +

    Writes all values from the tensor src into self at the indices +specified in the index tensor. For each value in src, its output +index is specified by its index in src for dimension != dim and by +the corresponding value in index for dimension = dim.

    +

    For a 3-D tensor, self is updated as:

    +
    self[index[i][j][k]][j][k] = src[i][j][k]  # if dim == 0
    +self[i][index[i][j][k]][k] = src[i][j][k]  # if dim == 1
    +self[i][j][index[i][j][k]] = src[i][j][k]  # if dim == 2
    +
    +
    +

    This is the reverse operation of the manner described in gather().

    +

    self, index and src (if it is a Tensor) should have same +number of dimensions. It is also required that index.size(d) <= src.size(d) +for all dimensions d, and that index.size(d) <= self.size(d) for all +dimensions d != dim.

    +

    Moreover, as for gather(), the values of index must be +between 0 and self.size(dim) - 1 inclusive, and all values in a row +along the specified dimension dim must be unique.

    +
    +
    Parameters
    +
      +
    • dim (int) – the axis along which to index

    • +
    • index (LongTensor) – the indices of elements to scatter, +can be either empty or the same size of src. +When empty, the operation returns identity

    • +
    • src (Tensor) – the source element(s) to scatter, +incase value is not specified

    • +
    • value (float) – the source element(s) to scatter, +incase src is not specified

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.rand(2, 5)
    +>>> x
    +tensor([[ 0.3992,  0.2908,  0.9044,  0.4850,  0.6004],
    +        [ 0.5735,  0.9006,  0.6797,  0.4152,  0.1732]])
    +>>> torch.zeros(3, 5).scatter_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
    +tensor([[ 0.3992,  0.9006,  0.6797,  0.4850,  0.6004],
    +        [ 0.0000,  0.2908,  0.0000,  0.4152,  0.0000],
    +        [ 0.5735,  0.0000,  0.9044,  0.0000,  0.1732]])
    +
    +>>> z = torch.zeros(2, 4).scatter_(1, torch.tensor([[2], [3]]), 1.23)
    +>>> z
    +tensor([[ 0.0000,  0.0000,  1.2300,  0.0000],
    +        [ 0.0000,  0.0000,  0.0000,  1.2300]])
    +
    +
    +
    + +
    +
    +scatter_add_(dim, index, other) → Tensor
    +

    Adds all values from the tensor other into self at the indices +specified in the index tensor in a similar fashion as +scatter_(). For each value in other, it is added to +an index in self which is specified by its index in other +for dimension != dim and by the corresponding value in index for +dimension = dim.

    +

    For a 3-D tensor, self is updated as:

    +
    self[index[i][j][k]][j][k] += other[i][j][k]  # if dim == 0
    +self[i][index[i][j][k]][k] += other[i][j][k]  # if dim == 1
    +self[i][j][index[i][j][k]] += other[i][j][k]  # if dim == 2
    +
    +
    +

    self, index and other should have same number of +dimensions. It is also required that index.size(d) <= other.size(d) for all +dimensions d, and that index.size(d) <= self.size(d) for all dimensions +d != dim.

    +

    Moreover, as for gather(), the values of index must be +between 0 and self.size(dim) - 1 inclusive, and all values in a row along +the specified dimension dim must be unique.

    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • dim (int) – the axis along which to index

    • +
    • index (LongTensor) – the indices of elements to scatter and add, +can be either empty or the same size of src. +When empty, the operation returns identity.

    • +
    • other (Tensor) – the source elements to scatter and add

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.rand(2, 5)
    +>>> x
    +tensor([[0.7404, 0.0427, 0.6480, 0.3806, 0.8328],
    +        [0.7953, 0.2009, 0.9154, 0.6782, 0.9620]])
    +>>> torch.ones(3, 5).scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
    +tensor([[1.7404, 1.2009, 1.9154, 1.3806, 1.8328],
    +        [1.0000, 1.0427, 1.0000, 1.6782, 1.0000],
    +        [1.7953, 1.0000, 1.6480, 1.0000, 1.9620]])
    +
    +
    +
    + +
    +
    +scatter_add(dim, index, source) → Tensor
    +

    Out-of-place version of torch.Tensor.scatter_add_()

    +
    + +
    +
    +select(dim, index) → Tensor
    +

    Slices the self tensor along the selected dimension at the given index. +This function returns a tensor with the given dimension removed.

    +
    +
    Parameters
    +
      +
    • dim (int) – the dimension to slice

    • +
    • index (int) – the index to select with

    • +
    +
    +
    +
    +

    Note

    +

    select() is equivalent to slicing. For example, +tensor.select(0, index) is equivalent to tensor[index] and +tensor.select(2, index) is equivalent to tensor[:,:,index].

    +
    +
    + +
    +
    +set_(source=None, storage_offset=0, size=None, stride=None) → Tensor
    +

    Sets the underlying storage, size, and strides. If source is a tensor, +self tensor will share the same storage and have the same size and +strides as source. Changes to elements in one tensor will be reflected +in the other.

    +

    If source is a Storage, the method sets the underlying +storage, offset, size, and stride.

    +
    +
    Parameters
    +
      +
    • source (Tensor or Storage) – the tensor or storage to use

    • +
    • storage_offset (int, optional) – the offset in the storage

    • +
    • size (torch.Size, optional) – the desired size. Defaults to the size of the source.

    • +
    • stride (tuple, optional) – the desired stride. Defaults to C-contiguous strides.

    • +
    +
    +
    +
    + +
    +
    +share_memory_()[source]
    +

    Moves the underlying storage to shared memory.

    +

    This is a no-op if the underlying storage is already in shared memory +and for CUDA tensors. Tensors in shared memory cannot be resized.

    +
    + +
    +
    +short() → Tensor
    +

    self.short() is equivalent to self.to(torch.int16). See to().

    +
    + +
    +
    +sigmoid() → Tensor
    +

    See torch.sigmoid()

    +
    + +
    +
    +sigmoid_() → Tensor
    +

    In-place version of sigmoid()

    +
    + +
    +
    +sign() → Tensor
    +

    See torch.sign()

    +
    + +
    +
    +sign_() → Tensor
    +

    In-place version of sign()

    +
    + +
    +
    +sin() → Tensor
    +

    See torch.sin()

    +
    + +
    +
    +sin_() → Tensor
    +

    In-place version of sin()

    +
    + +
    +
    +sinh() → Tensor
    +

    See torch.sinh()

    +
    + +
    +
    +sinh_() → Tensor
    +

    In-place version of sinh()

    +
    + +
    +
    +size() → torch.Size
    +

    Returns the size of the self tensor. The returned value is a subclass of +tuple.

    +

    Example:

    +
    >>> torch.empty(3, 4, 5).size()
    +torch.Size([3, 4, 5])
    +
    +
    +
    + +
    +
    +slogdet() -> (Tensor, Tensor)
    +

    See torch.slogdet()

    +
    + +
    +
    +solve(A) → Tensor, Tensor
    +

    See torch.solve()

    +
    + +
    +
    +sort(dim=-1, descending=False) -> (Tensor, LongTensor)
    +

    See torch.sort()

    +
    + +
    +
    +split(split_size, dim=0)[source]
    +

    See torch.split()

    +
    + +
    +
    +sparse_mask(input, mask) → Tensor
    +

    Returns a new SparseTensor with values from Tensor input filtered +by indices of mask and values are ignored. input and mask +must have the same shape.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – an input Tensor

    • +
    • mask (SparseTensor) – a SparseTensor which we filter input based on its indices

    • +
    +
    +
    +

    Example:

    +
    >>> nnz = 5
    +>>> dims = [5, 5, 2, 2]
    +>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
    +                   torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
    +>>> V = torch.randn(nnz, dims[2], dims[3])
    +>>> size = torch.Size(dims)
    +>>> S = torch.sparse_coo_tensor(I, V, size).coalesce()
    +>>> D = torch.randn(dims)
    +>>> D.sparse_mask(S)
    +tensor(indices=tensor([[0, 0, 0, 2],
    +                       [0, 1, 4, 3]]),
    +       values=tensor([[[ 1.6550,  0.2397],
    +                       [-0.1611, -0.0779]],
    +
    +                      [[ 0.2326, -1.0558],
    +                       [ 1.4711,  1.9678]],
    +
    +                      [[-0.5138, -0.0411],
    +                       [ 1.9417,  0.5158]],
    +
    +                      [[ 0.0793,  0.0036],
    +                       [-0.2569, -0.1055]]]),
    +       size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
    +
    +
    +
    + +
    +
    +sparse_dim() → int
    +

    If self is a sparse COO tensor (i.e., with torch.sparse_coo layout), +this returns the number of sparse dimensions. Otherwise, this throws an error.

    +

    See also Tensor.dense_dim().

    +
    + +
    +
    +sqrt() → Tensor
    +

    See torch.sqrt()

    +
    + +
    +
    +sqrt_() → Tensor
    +

    In-place version of sqrt()

    +
    + +
    +
    +squeeze(dim=None) → Tensor
    +

    See torch.squeeze()

    +
    + +
    +
    +squeeze_(dim=None) → Tensor
    +

    In-place version of squeeze()

    +
    + +
    +
    +std(dim=None, unbiased=True, keepdim=False) → Tensor
    +

    See torch.std()

    +
    + +
    +
    +stft(n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)[source]
    +

    See torch.stft()

    +
    +

    Warning

    +

    This function changed signature at version 0.4.1. Calling with +the previous signature may cause error or return incorrect result.

    +
    +
    + +
    +
    +storage() → torch.Storage
    +

    Returns the underlying storage.

    +
    + +
    +
    +storage_offset() → int
    +

    Returns self tensor’s offset in the underlying storage in terms of +number of storage elements (not bytes).

    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3, 4, 5])
    +>>> x.storage_offset()
    +0
    +>>> x[3:].storage_offset()
    +3
    +
    +
    +
    + +
    +
    +storage_type() → type
    +

    Returns the type of the underlying storage.

    +
    + +
    +
    +stride(dim) → tuple or int
    +

    Returns the stride of self tensor.

    +

    Stride is the jump necessary to go from one element to the next one in the +specified dimension dim. A tuple of all strides is returned when no +argument is passed in. Otherwise, an integer value is returned as the stride in +the particular dimension dim.

    +
    +
    Parameters
    +

    dim (int, optional) – the desired dimension in which stride is required

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
    +>>> x.stride()
    +(5, 1)
    +>>>x.stride(0)
    +5
    +>>> x.stride(-1)
    +1
    +
    +
    +
    + +
    +
    +sub(value, other) → Tensor
    +

    Subtracts a scalar or tensor from self tensor. If both value and +other are specified, each element of other is scaled by +value before being used.

    +

    When other is a tensor, the shape of other must be +broadcastable with the shape of the underlying +tensor.

    +
    + +
    +
    +sub_(x) → Tensor
    +

    In-place version of sub()

    +
    + +
    +
    +sum(dim=None, keepdim=False, dtype=None) → Tensor
    +

    See torch.sum()

    +
    + +
    +
    +sum_to_size(*size) → Tensor
    +

    Sum this tensor to size. +size must be broadcastable to this tensor size.

    +
    +
    Parameters
    +

    size (int...) – a sequence of integers defining the shape of the output tensor.

    +
    +
    +
    + +
    +
    +svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
    +

    See torch.svd()

    +
    + +
    +
    +symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor)
    +

    See torch.symeig()

    +
    + +
    +
    +t() → Tensor
    +

    See torch.t()

    +
    + +
    +
    +t_() → Tensor
    +

    In-place version of t()

    +
    + +
    +
    +to(*args, **kwargs) → Tensor
    +

    Performs Tensor dtype and/or device conversion. A torch.dtype and torch.device are +inferred from the arguments of self.to(*args, **kwargs).

    +
    +

    Note

    +

    If the self Tensor already +has the correct torch.dtype and torch.device, then self is returned. +Otherwise, the returned tensor is a copy of self with the desired +torch.dtype and torch.device.

    +
    +

    Here are the ways to call to:

    +
    +
    +to(dtype, non_blocking=False, copy=False) → Tensor
    +

    Returns a Tensor with the specified dtype

    +
    + +
    +
    +to(device=None, dtype=None, non_blocking=False, copy=False) → Tensor
    +

    Returns a Tensor with the specified device and (optional) +dtype. If dtype is None it is inferred to be self.dtype. +When non_blocking, tries to convert asynchronously with respect to +the host if possible, e.g., converting a CPU Tensor with pinned memory to a +CUDA Tensor. +When copy is set, a new Tensor is created even when the Tensor +already matches the desired conversion.

    +
    + +
    +
    +to(other, non_blocking=False, copy=False) → Tensor
    +

    Returns a Tensor with same torch.dtype and torch.device as +the Tensor other. When non_blocking, tries to convert +asynchronously with respect to the host if possible, e.g., converting a CPU +Tensor with pinned memory to a CUDA Tensor. +When copy is set, a new Tensor is created even when the Tensor +already matches the desired conversion.

    +
    + +

    Example:

    +
    >>> tensor = torch.randn(2, 2)  # Initially dtype=float32, device=cpu
    +>>> tensor.to(torch.float64)
    +tensor([[-0.5044,  0.0005],
    +        [ 0.3310, -0.0584]], dtype=torch.float64)
    +
    +>>> cuda0 = torch.device('cuda:0')
    +>>> tensor.to(cuda0)
    +tensor([[-0.5044,  0.0005],
    +        [ 0.3310, -0.0584]], device='cuda:0')
    +
    +>>> tensor.to(cuda0, dtype=torch.float64)
    +tensor([[-0.5044,  0.0005],
    +        [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
    +
    +>>> other = torch.randn((), dtype=torch.float64, device=cuda0)
    +>>> tensor.to(other, non_blocking=True)
    +tensor([[-0.5044,  0.0005],
    +        [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
    +
    +
    +
    + +
    +
    +to_mkldnn() → Tensor
    +

    Returns a copy of the tensor in torch.mkldnn layout.

    +
    + +
    +
    +take(indices) → Tensor
    +

    See torch.take()

    +
    + +
    +
    +tan() → Tensor
    +

    See torch.tan()

    +
    + +
    +
    +tan_() → Tensor
    +

    In-place version of tan()

    +
    + +
    +
    +tanh() → Tensor
    +

    See torch.tanh()

    +
    + +
    +
    +tanh_() → Tensor
    +

    In-place version of tanh()

    +
    + +
    +
    +tolist()
    +

    ” +tolist() -> list or number

    +

    Returns the tensor as a (nested) list. For scalars, a standard +Python number is returned, just like with item(). +Tensors are automatically moved to the CPU first if necessary.

    +

    This operation is not differentiable.

    +

    Examples:

    +
    >>> a = torch.randn(2, 2)
    +>>> a.tolist()
    +[[0.012766935862600803, 0.5415473580360413],
    + [-0.08909505605697632, 0.7729271650314331]]
    +>>> a[0,0].tolist()
    +0.012766935862600803
    +
    +
    +
    + +
    +
    +topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
    +

    See torch.topk()

    +
    + +
    +
    +to_sparse(sparseDims) → Tensor
    +

    Returns a sparse copy of the tensor. PyTorch supports sparse tensors in +coordinate format.

    +
    +
    Parameters
    +

    sparseDims (int, optional) – the number of sparse dimensions to include in the new sparse tensor

    +
    +
    +

    Example:

    +
    >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
    +>>> d
    +tensor([[ 0,  0,  0],
    +        [ 9,  0, 10],
    +        [ 0,  0,  0]])
    +>>> d.to_sparse()
    +tensor(indices=tensor([[1, 1],
    +                       [0, 2]]),
    +       values=tensor([ 9, 10]),
    +       size=(3, 3), nnz=2, layout=torch.sparse_coo)
    +>>> d.to_sparse(1)
    +tensor(indices=tensor([[1]]),
    +       values=tensor([[ 9,  0, 10]]),
    +       size=(3, 3), nnz=1, layout=torch.sparse_coo)
    +
    +
    +
    + +
    +
    +trace() → Tensor
    +

    See torch.trace()

    +
    + +
    +
    +transpose(dim0, dim1) → Tensor
    +

    See torch.transpose()

    +
    + +
    +
    +transpose_(dim0, dim1) → Tensor
    +

    In-place version of transpose()

    +
    + +
    +
    +triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
    +

    See torch.triangular_solve()

    +
    + +
    +
    +tril(k=0) → Tensor
    +

    See torch.tril()

    +
    + +
    +
    +tril_(k=0) → Tensor
    +

    In-place version of tril()

    +
    + +
    +
    +triu(k=0) → Tensor
    +

    See torch.triu()

    +
    + +
    +
    +triu_(k=0) → Tensor
    +

    In-place version of triu()

    +
    + +
    +
    +trunc() → Tensor
    +

    See torch.trunc()

    +
    + +
    +
    +trunc_() → Tensor
    +

    In-place version of trunc()

    +
    + +
    +
    +type(dtype=None, non_blocking=False, **kwargs) → str or Tensor
    +

    Returns the type if dtype is not provided, else casts this object to +the specified type.

    +

    If this is already of the correct type, no copy is performed and the +original object is returned.

    +
    +
    Parameters
    +
      +
    • dtype (type or string) – The desired type

    • +
    • non_blocking (bool) – If True, and the source is in pinned memory +and destination is on the GPU or vice versa, the copy is performed +asynchronously with respect to the host. Otherwise, the argument +has no effect.

    • +
    • **kwargs – For compatibility, may contain the key async in place of +the non_blocking argument. The async arg is deprecated.

    • +
    +
    +
    +
    + +
    +
    +type_as(tensor) → Tensor
    +

    Returns this tensor cast to the type of the given tensor.

    +

    This is a no-op if the tensor is already of the correct type. This is +equivalent to self.type(tensor.type())

    +
    +
    Parameters
    +

    tensor (Tensor) – the tensor which has the desired type

    +
    +
    +
    + +
    +
    +unbind(dim=0) → seq
    +

    See torch.unbind()

    +
    + +
    +
    +unfold(dimension, size, step) → Tensor
    +

    Returns a tensor which contains all slices of size size from +self tensor in the dimension dimension.

    +

    Step between two slices is given by step.

    +

    If sizedim is the size of dimension dimension for self, the size of +dimension dimension in the returned tensor will be +(sizedim - size) / step + 1.

    +

    An additional dimension of size size is appended in the returned tensor.

    +
    +
    Parameters
    +
      +
    • dimension (int) – dimension in which unfolding happens

    • +
    • size (int) – the size of each slice that is unfolded

    • +
    • step (int) – the step between each slice

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.arange(1., 8)
    +>>> x
    +tensor([ 1.,  2.,  3.,  4.,  5.,  6.,  7.])
    +>>> x.unfold(0, 2, 1)
    +tensor([[ 1.,  2.],
    +        [ 2.,  3.],
    +        [ 3.,  4.],
    +        [ 4.,  5.],
    +        [ 5.,  6.],
    +        [ 6.,  7.]])
    +>>> x.unfold(0, 2, 2)
    +tensor([[ 1.,  2.],
    +        [ 3.,  4.],
    +        [ 5.,  6.]])
    +
    +
    +
    + +
    +
    +uniform_(from=0, to=1) → Tensor
    +

    Fills self tensor with numbers sampled from the continuous uniform +distribution:

    +
    +\[P(x) = \dfrac{1}{\text{to} - \text{from}} + +\]
    +
    + +
    +
    +unique(sorted=True, return_inverse=False, return_counts=False, dim=None)[source]
    +

    Returns the unique elements of the input tensor.

    +

    See torch.unique()

    +
    + +
    +
    +unique_consecutive(return_inverse=False, return_counts=False, dim=None)[source]
    +

    Eliminates all but the first element from every consecutive group of equivalent elements.

    +

    See torch.unique_consecutive()

    +
    + +
    +
    +unsqueeze(dim) → Tensor
    +

    See torch.unsqueeze()

    +
    + +
    +
    +unsqueeze_(dim) → Tensor
    +

    In-place version of unsqueeze()

    +
    + +
    +
    +values() → Tensor
    +

    If self is a sparse COO tensor (i.e., with torch.sparse_coo layout), +this returns a view of the contained values tensor. Otherwise, this throws an +error.

    +

    See also Tensor.indices().

    +
    +

    Note

    +

    This method can only be called on a coalesced sparse tensor. See +Tensor.coalesce() for details.

    +
    +
    + +
    +
    +var(dim=None, unbiased=True, keepdim=False) → Tensor
    +

    See torch.var()

    +
    + +
    +
    +view(*shape) → Tensor
    +

    Returns a new tensor with the same data as the self tensor but of a +different shape.

    +

    The returned tensor shares the same data and must have the same number +of elements, but may have a different size. For a tensor to be viewed, the new +view size must be compatible with its original size and stride, i.e., each new +view dimension must either be a subspace of an original dimension, or only span +across original dimensions \(d, d+1, \dots, d+k\) that satisfy the following +contiguity-like condition that \(\forall i = 0, \dots, k-1\),

    +
    +\[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\]
    +

    Otherwise, contiguous() needs to be called before the tensor can be +viewed. See also: reshape(), which returns a view if the shapes are +compatible, and copies (equivalent to calling contiguous()) otherwise.

    +
    +
    Parameters
    +

    shape (torch.Size or int...) – the desired size

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(4, 4)
    +>>> x.size()
    +torch.Size([4, 4])
    +>>> y = x.view(16)
    +>>> y.size()
    +torch.Size([16])
    +>>> z = x.view(-1, 8)  # the size -1 is inferred from other dimensions
    +>>> z.size()
    +torch.Size([2, 8])
    +
    +>>> a = torch.randn(1, 2, 3, 4)
    +>>> a.size()
    +torch.Size([1, 2, 3, 4])
    +>>> b = a.transpose(1, 2)  # Swaps 2nd and 3rd dimension
    +>>> b.size()
    +torch.Size([1, 3, 2, 4])
    +>>> c = a.view(1, 3, 2, 4)  # Does not change tensor layout in memory
    +>>> c.size()
    +torch.Size([1, 3, 2, 4])
    +>>> torch.equal(b, c)
    +False
    +
    +
    +
    + +
    +
    +view_as(other) → Tensor
    +

    View this tensor as the same size as other. +self.view_as(other) is equivalent to self.view(other.size()).

    +

    Please see view() for more information about view.

    +
    +
    Parameters
    +

    other (torch.Tensor) – The result tensor has the same size +as other.

    +
    +
    +
    + +
    +
    +where(condition, y) → Tensor
    +

    self.where(condition, y) is equivalent to torch.where(condition, self, y). +See torch.where()

    +
    + +
    +
    +zero_() → Tensor
    +

    Fills self tensor with zeros.

    +
    + +
    + +
    +
    +class torch.ByteTensor
    +

    The following methods are unique to torch.ByteTensor.

    +
    +
    +all()
    +
    +
    +all() → bool
    +
    + +

    Returns True if all elements in the tensor are non-zero, False otherwise.

    +

    Example:

    +
    >>> a = torch.randn(1, 3).byte() % 2
    +>>> a
    +tensor([[1, 0, 0]], dtype=torch.uint8)
    +>>> a.all()
    +tensor(0, dtype=torch.uint8)
    +
    +
    +
    +
    +all(dim, keepdim=False, out=None) → Tensor
    +
    + +

    Returns True if all elements in each row of the tensor in the given +dimension dim are non-zero, False otherwise.

    +

    If keepdim is True, the output tensor is of the same size as +input except in the dimension dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting +in the output tensor having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 2).byte() % 2
    +>>> a
    +tensor([[0, 0],
    +        [0, 0],
    +        [0, 1],
    +        [1, 1]], dtype=torch.uint8)
    +>>> a.all(dim=1)
    +tensor([0, 0, 0, 1], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +any()
    +
    +
    +any() → bool
    +
    + +

    Returns True if any elements in the tensor are non-zero, False otherwise.

    +

    Example:

    +
    >>> a = torch.randn(1, 3).byte() % 2
    +>>> a
    +tensor([[0, 0, 1]], dtype=torch.uint8)
    +>>> a.any()
    +tensor(1, dtype=torch.uint8)
    +
    +
    +
    +
    +any(dim, keepdim=False, out=None) → Tensor
    +
    + +

    Returns True if any elements in each row of the tensor in the given +dimension dim are non-zero, False otherwise.

    +

    If keepdim is True, the output tensor is of the same size as +input except in the dimension dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting +in the output tensor having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 2).byte() % 2
    +>>> a
    +tensor([[1, 0],
    +        [0, 0],
    +        [0, 1],
    +        [0, 0]], dtype=torch.uint8)
    +>>> a.any(dim=1)
    +tensor([1, 0, 1, 0], dtype=torch.uint8)
    +
    +
    +
    + +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torch.html b/docs/stable/torch.html new file mode 100644 index 000000000000..5114ed429bea --- /dev/null +++ b/docs/stable/torch.html @@ -0,0 +1,9393 @@ + + + + + + + + + + + + torch — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch

    +

    The torch package contains data structures for multi-dimensional +tensors and mathematical operations over these are defined. +Additionally, it provides many utilities for efficient serializing of +Tensors and arbitrary types, and other useful utilities.

    +

    It has a CUDA counterpart, that enables you to run your tensor computations +on an NVIDIA GPU with compute capability >= 3.0.

    +
    +

    Tensors

    +
    +
    +torch.is_tensor(obj)[source]
    +

    Returns True if obj is a PyTorch tensor.

    +
    +
    Parameters
    +

    obj (Object) – Object to test

    +
    +
    +
    + +
    +
    +torch.is_storage(obj)[source]
    +

    Returns True if obj is a PyTorch storage object.

    +
    +
    Parameters
    +

    obj (Object) – Object to test

    +
    +
    +
    + +
    +
    +torch.is_floating_point(input) -> (bool)
    +

    Returns True if the data type of input is a floating point data type i.e., +one of torch.float64, torch.float32 and torch.float16.

    +
    +
    Parameters
    +

    input (Tensor) – the PyTorch tensor to test

    +
    +
    +
    + +
    +
    +torch.set_default_dtype(d)[source]
    +

    Sets the default floating point dtype to d. This type will be +used as default floating point type for type inference in +torch.tensor().

    +

    The default floating point dtype is initially torch.float32.

    +
    +
    Parameters
    +

    d (torch.dtype) – the floating point dtype to make the default

    +
    +
    +

    Example:

    +
    >>> torch.tensor([1.2, 3]).dtype           # initial default for floating point is torch.float32
    +torch.float32
    +>>> torch.set_default_dtype(torch.float64)
    +>>> torch.tensor([1.2, 3]).dtype           # a new floating point tensor
    +torch.float64
    +
    +
    +
    + +
    +
    +torch.get_default_dtype() → torch.dtype
    +

    Get the current default floating point torch.dtype.

    +

    Example:

    +
    >>> torch.get_default_dtype()  # initial default for floating point is torch.float32
    +torch.float32
    +>>> torch.set_default_dtype(torch.float64)
    +>>> torch.get_default_dtype()  # default is now changed to torch.float64
    +torch.float64
    +>>> torch.set_default_tensor_type(torch.FloatTensor)  # setting tensor type also affects this
    +>>> torch.get_default_dtype()  # changed to torch.float32, the dtype for torch.FloatTensor
    +torch.float32
    +
    +
    +
    + +
    +
    +torch.set_default_tensor_type(t)[source]
    +

    Sets the default torch.Tensor type to floating point tensor type +t. This type will also be used as default floating point type for +type inference in torch.tensor().

    +

    The default floating point tensor type is initially torch.FloatTensor.

    +
    +
    Parameters
    +

    t (type or string) – the floating point tensor type or its name

    +
    +
    +

    Example:

    +
    >>> torch.tensor([1.2, 3]).dtype    # initial default for floating point is torch.float32
    +torch.float32
    +>>> torch.set_default_tensor_type(torch.DoubleTensor)
    +>>> torch.tensor([1.2, 3]).dtype    # a new floating point tensor
    +torch.float64
    +
    +
    +
    + +
    +
    +torch.numel(input) → int
    +

    Returns the total number of elements in the input tensor.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 2, 3, 4, 5)
    +>>> torch.numel(a)
    +120
    +>>> a = torch.zeros(4,4)
    +>>> torch.numel(a)
    +16
    +
    +
    +
    + +
    +
    +torch.set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None, sci_mode=None)[source]
    +

    Set options for printing. Items shamelessly taken from NumPy

    +
    +
    Parameters
    +
      +
    • precision – Number of digits of precision for floating point output +(default = 4).

    • +
    • threshold – Total number of array elements which trigger summarization +rather than full repr (default = 1000).

    • +
    • edgeitems – Number of array items in summary at beginning and end of +each dimension (default = 3).

    • +
    • linewidth – The number of characters per line for the purpose of +inserting line breaks (default = 80). Thresholded matrices will +ignore this parameter.

    • +
    • profile – Sane defaults for pretty printing. Can override with any of +the above options. (any one of default, short, full)

    • +
    • sci_mode – Enable (True) or disable (False) scientific notation. If +None (default) is specified, the value is defined by _Formatter

    • +
    +
    +
    +
    + +
    +
    +torch.set_flush_denormal(mode) → bool
    +

    Disables denormal floating numbers on CPU.

    +

    Returns True if your system supports flushing denormal numbers and it +successfully configures flush denormal mode. set_flush_denormal() +is only supported on x86 architectures supporting SSE3.

    +
    +
    Parameters
    +

    mode (bool) – Controls whether to enable flush denormal mode or not

    +
    +
    +

    Example:

    +
    >>> torch.set_flush_denormal(True)
    +True
    +>>> torch.tensor([1e-323], dtype=torch.float64)
    +tensor([ 0.], dtype=torch.float64)
    +>>> torch.set_flush_denormal(False)
    +True
    +>>> torch.tensor([1e-323], dtype=torch.float64)
    +tensor(9.88131e-324 *
    +       [ 1.0000], dtype=torch.float64)
    +
    +
    +
    + +
    +

    Creation Ops

    +
    +

    Note

    +

    Random sampling creation ops are listed under Random sampling and +include: +torch.rand() +torch.rand_like() +torch.randn() +torch.randn_like() +torch.randint() +torch.randint_like() +torch.randperm() +You may also use torch.empty() with the In-place random sampling +methods to create torch.Tensor s with values sampled from a broader +range of distributions.

    +
    +
    +
    +torch.tensor(data, dtype=None, device=None, requires_grad=False, pin_memory=False) → Tensor
    +

    Constructs a tensor with data.

    +
    +

    Warning

    +

    torch.tensor() always copies data. If you have a Tensor +data and want to avoid a copy, use torch.Tensor.requires_grad_() +or torch.Tensor.detach(). +If you have a NumPy ndarray and want to avoid a copy, use +torch.as_tensor().

    +
    +
    +

    Warning

    +

    When data is a tensor x, torch.tensor() reads out ‘the data’ from whatever it is passed, +and constructs a leaf variable. Therefore torch.tensor(x) is equivalent to x.clone().detach() +and torch.tensor(x, requires_grad=True) is equivalent to x.clone().detach().requires_grad_(True). +The equivalents using clone() and detach() are recommended.

    +
    +
    +
    Parameters
    +
      +
    • data (array_like) – Initial data for the tensor. Can be a list, tuple, +NumPy ndarray, scalar, and other types.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, infers data type from data.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    • pin_memory (bool, optional) – If set, returned tensor would be allocated in +the pinned memory. Works only for CPU tensors. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
    +tensor([[ 0.1000,  1.2000],
    +        [ 2.2000,  3.1000],
    +        [ 4.9000,  5.2000]])
    +
    +>>> torch.tensor([0, 1])  # Type inference on data
    +tensor([ 0,  1])
    +
    +>>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
    +                 dtype=torch.float64,
    +                 device=torch.device('cuda:0'))  # creates a torch.cuda.DoubleTensor
    +tensor([[ 0.1111,  0.2222,  0.3333]], dtype=torch.float64, device='cuda:0')
    +
    +>>> torch.tensor(3.14159)  # Create a scalar (zero-dimensional tensor)
    +tensor(3.1416)
    +
    +>>> torch.tensor([])  # Create an empty tensor (of size (0,))
    +tensor([])
    +
    +
    +
    + +
    +
    +torch.sparse_coo_tensor(indices, values, size=None, dtype=None, device=None, requires_grad=False) → Tensor
    +

    Constructs a sparse tensors in COO(rdinate) format with non-zero elements at the given indices +with the given values. A sparse tensor can be uncoalesced, in that case, there are duplicate +coordinates in the indices, and the value at that index is the sum of all duplicate value entries: +torch.sparse.

    +
    +
    Parameters
    +
      +
    • indices (array_like) – Initial data for the tensor. Can be a list, tuple, +NumPy ndarray, scalar, and other types. Will be cast to a torch.LongTensor +internally. The indices are the coordinates of the non-zero values in the matrix, and thus +should be two-dimensional where the first dimension is the number of tensor dimensions and +the second dimension is the number of non-zero values.

    • +
    • values (array_like) – Initial values for the tensor. Can be a list, tuple, +NumPy ndarray, scalar, and other types.

    • +
    • size (list, tuple, or torch.Size, optional) – Size of the sparse tensor. If not +provided the size will be inferred as the minimum size big enough to hold all non-zero +elements.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, infers data type from values.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> i = torch.tensor([[0, 1, 1],
    +                      [2, 0, 2]])
    +>>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
    +>>> torch.sparse_coo_tensor(i, v, [2, 4])
    +tensor(indices=tensor([[0, 1, 1],
    +                       [2, 0, 2]]),
    +       values=tensor([3., 4., 5.]),
    +       size=(2, 4), nnz=3, layout=torch.sparse_coo)
    +
    +>>> torch.sparse_coo_tensor(i, v)  # Shape inference
    +tensor(indices=tensor([[0, 1, 1],
    +                       [2, 0, 2]]),
    +       values=tensor([3., 4., 5.]),
    +       size=(2, 3), nnz=3, layout=torch.sparse_coo)
    +
    +>>> torch.sparse_coo_tensor(i, v, [2, 4],
    +                            dtype=torch.float64,
    +                            device=torch.device('cuda:0'))
    +tensor(indices=tensor([[0, 1, 1],
    +                       [2, 0, 2]]),
    +       values=tensor([3., 4., 5.]),
    +       device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
    +       layout=torch.sparse_coo)
    +
    +# Create an empty sparse tensor with the following invariants:
    +#   1. sparse_dim + dense_dim = len(SparseTensor.shape)
    +#   2. SparseTensor._indices().shape = (sparse_dim, nnz)
    +#   3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
    +#
    +# For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
    +# sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
    +>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
    +tensor(indices=tensor([], size=(1, 0)),
    +       values=tensor([], size=(0,)),
    +       size=(1,), nnz=0, layout=torch.sparse_coo)
    +
    +# and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
    +# sparse_dim = 1
    +>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
    +tensor(indices=tensor([], size=(1, 0)),
    +       values=tensor([], size=(0, 2)),
    +       size=(1, 2), nnz=0, layout=torch.sparse_coo)
    +
    +
    +
    + +
    +
    +torch.as_tensor(data, dtype=None, device=None) → Tensor
    +

    Convert the data into a torch.Tensor. If the data is already a Tensor with the same dtype and device, +no copy will be performed, otherwise a new Tensor will be returned with computational graph retained if data +Tensor has requires_grad=True. Similarly, if the data is an ndarray of the corresponding dtype and +the device is the cpu, no copy will be performed.

    +
    +
    Parameters
    +
      +
    • data (array_like) – Initial data for the tensor. Can be a list, tuple, +NumPy ndarray, scalar, and other types.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, infers data type from data.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    +
    +
    +

    Example:

    +
    >>> a = numpy.array([1, 2, 3])
    +>>> t = torch.as_tensor(a)
    +>>> t
    +tensor([ 1,  2,  3])
    +>>> t[0] = -1
    +>>> a
    +array([-1,  2,  3])
    +
    +>>> a = numpy.array([1, 2, 3])
    +>>> t = torch.as_tensor(a, device=torch.device('cuda'))
    +>>> t
    +tensor([ 1,  2,  3])
    +>>> t[0] = -1
    +>>> a
    +array([1,  2,  3])
    +
    +
    +
    + +
    +
    +torch.as_strided(input, size, stride, storage_offset=0) → Tensor
    +

    Create a view of an existing torch.Tensor input with specified +size, stride and storage_offset.

    +
    +

    Warning

    +

    More than one element of a created tensor may refer to a single memory +location. As a result, in-place operations (especially ones that are +vectorized) may result in incorrect behavior. If you need to write to +the tensors, please clone them first.

    +

    Many PyTorch functions, which return a view of a tensor, are internally +implemented with this function. Those functions, like +torch.Tensor.expand(), are easier to read and are therefore more +advisable to use.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • size (tuple or ints) – the shape of the output tensor

    • +
    • stride (tuple or ints) – the stride of the output tensor

    • +
    • storage_offset (int, optional) – the offset in the underlying storage of the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 3)
    +>>> x
    +tensor([[ 0.9039,  0.6291,  1.0795],
    +        [ 0.1586,  2.1939, -0.4900],
    +        [-0.1909, -0.7503,  1.9355]])
    +>>> t = torch.as_strided(x, (2, 2), (1, 2))
    +>>> t
    +tensor([[0.9039, 1.0795],
    +        [0.6291, 0.1586]])
    +>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
    +tensor([[0.6291, 0.1586],
    +        [1.0795, 2.1939]])
    +
    +
    +
    + +
    +
    +torch.from_numpy(ndarray) → Tensor
    +

    Creates a Tensor from a numpy.ndarray.

    +

    The returned tensor and ndarray share the same memory. Modifications to +the tensor will be reflected in the ndarray and vice versa. The returned +tensor is not resizable.

    +

    It currently accepts ndarray with dtypes of numpy.float64, +numpy.float32, numpy.float16, numpy.int64, numpy.int32, +numpy.int16, numpy.int8, numpy.uint8, and numpy.bool.

    +

    Example:

    +
    >>> a = numpy.array([1, 2, 3])
    +>>> t = torch.from_numpy(a)
    +>>> t
    +tensor([ 1,  2,  3])
    +>>> t[0] = -1
    +>>> a
    +array([-1,  2,  3])
    +
    +
    +
    + +
    +
    +torch.zeros(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with the scalar value 0, with the shape defined +by the variable argument size.

    +
    +
    Parameters
    +
      +
    • size (int...) – a sequence of integers defining the shape of the output tensor. +Can be a variable number of arguments or a collection like a list or tuple.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.zeros(2, 3)
    +tensor([[ 0.,  0.,  0.],
    +        [ 0.,  0.,  0.]])
    +
    +>>> torch.zeros(5)
    +tensor([ 0.,  0.,  0.,  0.,  0.])
    +
    +
    +
    + +
    +
    +torch.zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with the scalar value 0, with the same size as +input. torch.zeros_like(input) is equivalent to +torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +

    Warning

    +

    As of 0.4, this function does not support an out keyword. As an alternative, +the old torch.zeros_like(input, out=output) is equivalent to +torch.zeros(input.size(), out=output).

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> input = torch.empty(2, 3)
    +>>> torch.zeros_like(input)
    +tensor([[ 0.,  0.,  0.],
    +        [ 0.,  0.,  0.]])
    +
    +
    +
    + +
    +
    +torch.ones(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with the scalar value 1, with the shape defined +by the variable argument size.

    +
    +
    Parameters
    +
      +
    • size (int...) – a sequence of integers defining the shape of the output tensor. +Can be a variable number of arguments or a collection like a list or tuple.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.ones(2, 3)
    +tensor([[ 1.,  1.,  1.],
    +        [ 1.,  1.,  1.]])
    +
    +>>> torch.ones(5)
    +tensor([ 1.,  1.,  1.,  1.,  1.])
    +
    +
    +
    + +
    +
    +torch.ones_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with the scalar value 1, with the same size as +input. torch.ones_like(input) is equivalent to +torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +

    Warning

    +

    As of 0.4, this function does not support an out keyword. As an alternative, +the old torch.ones_like(input, out=output) is equivalent to +torch.ones(input.size(), out=output).

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> input = torch.empty(2, 3)
    +>>> torch.ones_like(input)
    +tensor([[ 1.,  1.,  1.],
    +        [ 1.,  1.,  1.]])
    +
    +
    +
    + +
    +
    +torch.arange(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a 1-D tensor of size \(\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil\) +with values from the interval [start, end) taken with common difference +step beginning from start.

    +

    Note that non-integer step is subject to floating point rounding errors when +comparing against end; to avoid inconsistency, we advise adding a small epsilon to end +in such cases.

    +
    +\[\text{out}_{{i+1}} = \text{out}_{i} + \text{step} + +\]
    +
    +
    Parameters
    +
      +
    • start (Number) – the starting value for the set of points. Default: 0.

    • +
    • end (Number) – the ending value for the set of points

    • +
    • step (Number) – the gap between each pair of adjacent points. Default: 1.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). If dtype is not given, infer the data type from the other input +arguments. If any of start, end, or stop are floating-point, the +dtype is inferred to be the default dtype, see +get_default_dtype(). Otherwise, the dtype is inferred to +be torch.int64.

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.arange(5)
    +tensor([ 0,  1,  2,  3,  4])
    +>>> torch.arange(1, 4)
    +tensor([ 1,  2,  3])
    +>>> torch.arange(1, 2.5, 0.5)
    +tensor([ 1.0000,  1.5000,  2.0000])
    +
    +
    +
    + +
    +
    +torch.range(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a 1-D tensor of size \(\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1\) +with values from start to end with step step. Step is +the gap between two values in the tensor.

    +
    +\[\text{out}_{i+1} = \text{out}_i + \text{step}. + +\]
    +
    +

    Warning

    +

    This function is deprecated in favor of torch.arange().

    +
    +
    +
    Parameters
    +
      +
    • start (float) – the starting value for the set of points. Default: 0.

    • +
    • end (float) – the ending value for the set of points

    • +
    • step (float) – the gap between each pair of adjacent points. Default: 1.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). If dtype is not given, infer the data type from the other input +arguments. If any of start, end, or stop are floating-point, the +dtype is inferred to be the default dtype, see +get_default_dtype(). Otherwise, the dtype is inferred to +be torch.int64.

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.range(1, 4)
    +tensor([ 1.,  2.,  3.,  4.])
    +>>> torch.range(1, 4, 0.5)
    +tensor([ 1.0000,  1.5000,  2.0000,  2.5000,  3.0000,  3.5000,  4.0000])
    +
    +
    +
    + +
    +
    +torch.linspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a one-dimensional tensor of steps +equally spaced points between start and end.

    +

    The output tensor is 1-D of size steps.

    +
    +
    Parameters
    +
      +
    • start (float) – the starting value for the set of points

    • +
    • end (float) – the ending value for the set of points

    • +
    • steps (int) – number of points to sample between start +and end. Default: 100.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.linspace(3, 10, steps=5)
    +tensor([  3.0000,   4.7500,   6.5000,   8.2500,  10.0000])
    +>>> torch.linspace(-10, 10, steps=5)
    +tensor([-10.,  -5.,   0.,   5.,  10.])
    +>>> torch.linspace(start=-10, end=10, steps=5)
    +tensor([-10.,  -5.,   0.,   5.,  10.])
    +>>> torch.linspace(start=-10, end=10, steps=1)
    +tensor([-10.])
    +
    +
    +
    + +
    +
    +torch.logspace(start, end, steps=100, base=10.0, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a one-dimensional tensor of steps points +logarithmically spaced with base base between +\({\text{base}}^{\text{start}}\) and \({\text{base}}^{\text{end}}\).

    +

    The output tensor is 1-D of size steps.

    +
    +
    Parameters
    +
      +
    • start (float) – the starting value for the set of points

    • +
    • end (float) – the ending value for the set of points

    • +
    • steps (int) – number of points to sample between start +and end. Default: 100.

    • +
    • base (float) – base of the logarithm function. Default: 10.0.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.logspace(start=-10, end=10, steps=5)
    +tensor([ 1.0000e-10,  1.0000e-05,  1.0000e+00,  1.0000e+05,  1.0000e+10])
    +>>> torch.logspace(start=0.1, end=1.0, steps=5)
    +tensor([  1.2589,   2.1135,   3.5481,   5.9566,  10.0000])
    +>>> torch.logspace(start=0.1, end=1.0, steps=1)
    +tensor([1.2589])
    +>>> torch.logspace(start=2, end=2, steps=1, base=2)
    +tensor([4.0])
    +
    +
    +
    + +
    +
    +torch.eye(n, m=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.

    +
    +
    Parameters
    +
      +
    • n (int) – the number of rows

    • +
    • m (int, optional) – the number of columns with default being n

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    Returns
    +

    A 2-D tensor with ones on the diagonal and zeros elsewhere

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.eye(3)
    +tensor([[ 1.,  0.,  0.],
    +        [ 0.,  1.,  0.],
    +        [ 0.,  0.,  1.]])
    +
    +
    +
    + +
    +
    +torch.empty(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) → Tensor
    +

    Returns a tensor filled with uninitialized data. The shape of the tensor is +defined by the variable argument size.

    +
    +
    Parameters
    +
      +
    • size (int...) – a sequence of integers defining the shape of the output tensor. +Can be a variable number of arguments or a collection like a list or tuple.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    • pin_memory (bool, optional) – If set, returned tensor would be allocated in +the pinned memory. Works only for CPU tensors. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.empty(2, 3)
    +tensor(1.00000e-08 *
    +       [[ 6.3984,  0.0000,  0.0000],
    +        [ 0.0000,  0.0000,  0.0000]])
    +
    +
    +
    + +
    +
    +torch.empty_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
    +

    Returns an uninitialized tensor with the same size as input. +torch.empty_like(input) is equivalent to +torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.empty((2,3), dtype=torch.int64)
    +tensor([[ 9.4064e+13,  2.8000e+01,  9.3493e+13],
    +        [ 7.5751e+18,  7.1428e+18,  7.5955e+18]])
    +
    +
    +
    + +
    +
    +torch.full(size, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor of size size filled with fill_value.

    +
    +
    Parameters
    +
      +
    • size (int...) – a list, tuple, or torch.Size of integers defining the +shape of the output tensor.

    • +
    • fill_value – the number to fill the output tensor with.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.full((2, 3), 3.141592)
    +tensor([[ 3.1416,  3.1416,  3.1416],
    +        [ 3.1416,  3.1416,  3.1416]])
    +
    +
    +
    + +
    +
    +torch.full_like(input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor with the same size as input filled with fill_value. +torch.full_like(input, fill_value) is equivalent to +torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • fill_value – the number to fill the output tensor with.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +
    + +
    +
    +

    Indexing, Slicing, Joining, Mutating Ops

    +
    +
    +torch.cat(tensors, dim=0, out=None) → Tensor
    +

    Concatenates the given sequence of seq tensors in the given dimension. +All tensors must either have the same shape (except in the concatenating +dimension) or be empty.

    +

    torch.cat() can be seen as an inverse operation for torch.split() +and torch.chunk().

    +

    torch.cat() can be best understood via examples.

    +
    +
    Parameters
    +
      +
    • tensors (sequence of Tensors) – any python sequence of tensors of the same type. +Non-empty tensors provided must have the same shape, except in the +cat dimension.

    • +
    • dim (int, optional) – the dimension over which the tensors are concatenated

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(2, 3)
    +>>> x
    +tensor([[ 0.6580, -1.0969, -0.4614],
    +        [-0.1034, -0.5790,  0.1497]])
    +>>> torch.cat((x, x, x), 0)
    +tensor([[ 0.6580, -1.0969, -0.4614],
    +        [-0.1034, -0.5790,  0.1497],
    +        [ 0.6580, -1.0969, -0.4614],
    +        [-0.1034, -0.5790,  0.1497],
    +        [ 0.6580, -1.0969, -0.4614],
    +        [-0.1034, -0.5790,  0.1497]])
    +>>> torch.cat((x, x, x), 1)
    +tensor([[ 0.6580, -1.0969, -0.4614,  0.6580, -1.0969, -0.4614,  0.6580,
    +         -1.0969, -0.4614],
    +        [-0.1034, -0.5790,  0.1497, -0.1034, -0.5790,  0.1497, -0.1034,
    +         -0.5790,  0.1497]])
    +
    +
    +
    + +
    +
    +torch.chunk(input, chunks, dim=0) → List of Tensors
    +

    Splits a tensor into a specific number of chunks.

    +

    Last chunk will be smaller if the tensor size along the given dimension +dim is not divisible by chunks.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to split

    • +
    • chunks (int) – number of chunks to return

    • +
    • dim (int) – dimension along which to split the tensor

    • +
    +
    +
    +
    + +
    +
    +torch.gather(input, dim, index, out=None, sparse_grad=False) → Tensor
    +

    Gathers values along an axis specified by dim.

    +

    For a 3-D tensor the output is specified by:

    +
    out[i][j][k] = input[index[i][j][k]][j][k]  # if dim == 0
    +out[i][j][k] = input[i][index[i][j][k]][k]  # if dim == 1
    +out[i][j][k] = input[i][j][index[i][j][k]]  # if dim == 2
    +
    +
    +

    If input is an n-dimensional tensor with size +\((x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})\) +and dim = i, then index must be an \(n\)-dimensional tensor with +size \((x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})\) where \(y \geq 1\) +and out will have the same size as index.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the source tensor

    • +
    • dim (int) – the axis along which to index

    • +
    • index (LongTensor) – the indices of elements to gather

    • +
    • out (Tensor, optional) – the destination tensor

    • +
    • sparse_grad (bool,optional) – If True, gradient w.r.t. input will be a sparse tensor.

    • +
    +
    +
    +

    Example:

    +
    >>> t = torch.tensor([[1,2],[3,4]])
    +>>> torch.gather(t, 1, torch.tensor([[0,0],[1,0]]))
    +tensor([[ 1,  1],
    +        [ 4,  3]])
    +
    +
    +
    + +
    +
    +torch.index_select(input, dim, index, out=None) → Tensor
    +

    Returns a new tensor which indexes the input tensor along dimension +dim using the entries in index which is a LongTensor.

    +

    The returned tensor has the same number of dimensions as the original tensor +(input). The dimth dimension has the same size as the length +of index; other dimensions have the same size as in the original tensor.

    +
    +

    Note

    +

    The returned tensor does not use the same storage as the original +tensor. If out has a different shape than expected, we +silently change it to the correct shape, reallocating the underlying +storage if necessary.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension in which we index

    • +
    • index (LongTensor) – the 1-D tensor containing the indices to index

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 4)
    +>>> x
    +tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
    +        [-0.4664,  0.2647, -0.1228, -1.1068],
    +        [-1.1734, -0.6571,  0.7230, -0.6004]])
    +>>> indices = torch.tensor([0, 2])
    +>>> torch.index_select(x, 0, indices)
    +tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
    +        [-1.1734, -0.6571,  0.7230, -0.6004]])
    +>>> torch.index_select(x, 1, indices)
    +tensor([[ 0.1427, -0.5414],
    +        [-0.4664, -0.1228],
    +        [-1.1734,  0.7230]])
    +
    +
    +
    + +
    +
    +torch.masked_select(input, mask, out=None) → Tensor
    +

    Returns a new 1-D tensor which indexes the input tensor according to +the binary mask mask which is a ByteTensor.

    +

    The shapes of the mask tensor and the input tensor don’t need +to match, but they must be broadcastable.

    +
    +

    Note

    +

    The returned tensor does not use the same storage +as the original tensor

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input data

    • +
    • mask (ByteTensor) – the tensor containing the binary mask to index with

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 4)
    +>>> x
    +tensor([[ 0.3552, -2.3825, -0.8297,  0.3477],
    +        [-1.2035,  1.2252,  0.5002,  0.6248],
    +        [ 0.1307, -2.0608,  0.1244,  2.0139]])
    +>>> mask = x.ge(0.5)
    +>>> mask
    +tensor([[ 0,  0,  0,  0],
    +        [ 0,  1,  1,  1],
    +        [ 0,  0,  0,  1]], dtype=torch.uint8)
    +>>> torch.masked_select(x, mask)
    +tensor([ 1.2252,  0.5002,  0.6248,  2.0139])
    +
    +
    +
    + +
    +
    +torch.narrow(input, dim, start, length) → Tensor
    +

    Returns a new tensor that is a narrowed version of input tensor. The +dimension dim is input from start to start + length. The +returned tensor and input tensor share the same underlying storage.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to narrow

    • +
    • dim (int) – the dimension along which to narrow

    • +
    • start (int) – the starting dimension

    • +
    • length (int) – the distance to the ending dimension

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    +>>> torch.narrow(x, 0, 0, 2)
    +tensor([[ 1,  2,  3],
    +        [ 4,  5,  6]])
    +>>> torch.narrow(x, 1, 1, 2)
    +tensor([[ 2,  3],
    +        [ 5,  6],
    +        [ 8,  9]])
    +
    +
    +
    + +
    +
    +torch.nonzero(input, *, out=None, as_tuple=False) → LongTensor or tuple of LongTensors
    +

    When as_tuple is false or unspecified:

    +

    Returns a tensor containing the indices of all non-zero elements of +input. Each row in the result contains the indices of a non-zero +element in input. The result is sorted lexicographically, with +the last index changing the fastest (C-style).

    +

    If input has n dimensions, then the resulting indices tensor +out is of size \((z \times n)\), where \(z\) is the total number of +non-zero elements in the input tensor.

    +

    When as_tuple is true:

    +

    Returns a tuple of 1-D tensors, one for each dimension in input, +each containing the indices (in that dimension) of all non-zero elements of +input .

    +

    If input has n dimensions, then the resulting tuple contains n tensors +of size z, where z is the total number of +non-zero elements in the input tensor.

    +

    As a special case, when input has zero dimensions and a nonzero scalar +value, it is treated as a one-dimensional tensor with one element.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (LongTensor, optional) – the output tensor containing indices

    • +
    +
    +
    Returns
    +

    If as_tuple is false, the output +tensor containing indices. If as_tuple is true, one 1-D tensor for +each dimension, containing the indices of each nonzero element along that +dimension.

    +
    +
    Return type
    +

    LongTensor or tuple of LongTensor

    +
    +
    +

    Example:

    +
    >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
    +tensor([[ 0],
    +        [ 1],
    +        [ 2],
    +        [ 4]])
    +>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
    +                                [0.0, 0.4, 0.0, 0.0],
    +                                [0.0, 0.0, 1.2, 0.0],
    +                                [0.0, 0.0, 0.0,-0.4]]))
    +tensor([[ 0,  0],
    +        [ 1,  1],
    +        [ 2,  2],
    +        [ 3,  3]])
    +>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
    +(tensor([0, 1, 2, 4]),)
    +>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
    +                                [0.0, 0.4, 0.0, 0.0],
    +                                [0.0, 0.0, 1.2, 0.0],
    +                                [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
    +(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
    +>>> torch.nonzero(torch.tensor(5), as_tuple=True)
    +(tensor([0]),)
    +
    +
    +
    + +
    +
    +torch.reshape(input, shape) → Tensor
    +

    Returns a tensor with the same data and number of elements as input, +but with the specified shape. When possible, the returned tensor will be a view +of input. Otherwise, it will be a copy. Contiguous inputs and inputs +with compatible strides can be reshaped without copying, but you should not +depend on the copying vs. viewing behavior.

    +

    See torch.Tensor.view() on when it is possible to return a view.

    +

    A single dimension may be -1, in which case it’s inferred from the remaining +dimensions and the number of elements in input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to be reshaped

    • +
    • shape (tuple of python:ints) – the new shape

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.arange(4.)
    +>>> torch.reshape(a, (2, 2))
    +tensor([[ 0.,  1.],
    +        [ 2.,  3.]])
    +>>> b = torch.tensor([[0, 1], [2, 3]])
    +>>> torch.reshape(b, (-1,))
    +tensor([ 0,  1,  2,  3])
    +
    +
    +
    + +
    +
    +torch.split(tensor, split_size_or_sections, dim=0)[source]
    +

    Splits the tensor into chunks.

    +

    If split_size_or_sections is an integer type, then tensor will +be split into equally sized chunks (if possible). Last chunk will be smaller if +the tensor size along the given dimension dim is not divisible by +split_size.

    +

    If split_size_or_sections is a list, then tensor will be split +into len(split_size_or_sections) chunks with sizes in dim according +to split_size_or_sections.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – tensor to split.

    • +
    • split_size_or_sections (int) or (list(int)) – size of a single chunk or +list of sizes for each chunk

    • +
    • dim (int) – dimension along which to split the tensor.

    • +
    +
    +
    +
    + +
    +
    +torch.squeeze(input, dim=None, out=None) → Tensor
    +

    Returns a tensor with all the dimensions of input of size 1 removed.

    +

    For example, if input is of shape: +\((A \times 1 \times B \times C \times 1 \times D)\) then the out tensor +will be of shape: \((A \times B \times C \times D)\).

    +

    When dim is given, a squeeze operation is done only in the given +dimension. If input is of shape: \((A \times 1 \times B)\), +squeeze(input, 0) leaves the tensor unchanged, but squeeze(input, 1) +will squeeze the tensor to the shape \((A \times B)\).

    +
    +

    Note

    +

    The returned tensor shares the storage with the input tensor, +so changing the contents of one will change the contents of the other.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int, optional) – if given, the input will be squeezed only in +this dimension

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.zeros(2, 1, 2, 1, 2)
    +>>> x.size()
    +torch.Size([2, 1, 2, 1, 2])
    +>>> y = torch.squeeze(x)
    +>>> y.size()
    +torch.Size([2, 2, 2])
    +>>> y = torch.squeeze(x, 0)
    +>>> y.size()
    +torch.Size([2, 1, 2, 1, 2])
    +>>> y = torch.squeeze(x, 1)
    +>>> y.size()
    +torch.Size([2, 2, 1, 2])
    +
    +
    +
    + +
    +
    +torch.stack(tensors, dim=0, out=None) → Tensor
    +

    Concatenates sequence of tensors along a new dimension.

    +

    All tensors need to be of the same size.

    +
    +
    Parameters
    +
      +
    • tensors (sequence of Tensors) – sequence of tensors to concatenate

    • +
    • dim (int) – dimension to insert. Has to be between 0 and the number +of dimensions of concatenated tensors (inclusive)

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +
    + +
    +
    +torch.t(input) → Tensor
    +

    Expects input to be <= 2-D tensor and transposes dimensions 0 +and 1.

    +

    0-D and 1-D tensors are returned as it is and +2-D tensor can be seen as a short-hand function for transpose(input, 0, 1).

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(())
    +>>> x
    +tensor(0.1995)
    +>>> torch.t(x)
    +tensor(0.1995)
    +>>> x = torch.randn(3)
    +>>> x
    +tensor([ 2.4320, -0.4608,  0.7702])
    +>>> torch.t(x)
    +tensor([.2.4320,.-0.4608,..0.7702])
    +>>> x = torch.randn(2, 3)
    +>>> x
    +tensor([[ 0.4875,  0.9158, -0.5872],
    +        [ 0.3938, -0.6929,  0.6932]])
    +>>> torch.t(x)
    +tensor([[ 0.4875,  0.3938],
    +        [ 0.9158, -0.6929],
    +        [-0.5872,  0.6932]])
    +
    +
    +
    + +
    +
    +torch.take(input, index) → Tensor
    +

    Returns a new tensor with the elements of input at the given indices. +The input tensor is treated as if it were viewed as a 1-D tensor. The result +takes the same shape as the indices.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • index (LongTensor) – the indices into tensor

    • +
    +
    +
    +

    Example:

    +
    >>> src = torch.tensor([[4, 3, 5],
    +                        [6, 7, 8]])
    +>>> torch.take(src, torch.tensor([0, 2, 5]))
    +tensor([ 4,  5,  8])
    +
    +
    +
    + +
    +
    +torch.transpose(input, dim0, dim1) → Tensor
    +

    Returns a tensor that is a transposed version of input. +The given dimensions dim0 and dim1 are swapped.

    +

    The resulting out tensor shares it’s underlying storage with the +input tensor, so changing the content of one would change the content +of the other.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim0 (int) – the first dimension to be transposed

    • +
    • dim1 (int) – the second dimension to be transposed

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(2, 3)
    +>>> x
    +tensor([[ 1.0028, -0.9893,  0.5809],
    +        [-0.1669,  0.7299,  0.4942]])
    +>>> torch.transpose(x, 0, 1)
    +tensor([[ 1.0028, -0.1669],
    +        [-0.9893,  0.7299],
    +        [ 0.5809,  0.4942]])
    +
    +
    +
    + +
    +
    +torch.unbind(input, dim=0) → seq
    +

    Removes a tensor dimension.

    +

    Returns a tuple of all slices along a given dimension, already without it.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to unbind

    • +
    • dim (int) – dimension to remove

    • +
    +
    +
    +

    Example:

    +
    >>> torch.unbind(torch.tensor([[1, 2, 3],
    +>>>                            [4, 5, 6],
    +>>>                            [7, 8, 9]]))
    +(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
    +
    +
    +
    + +
    +
    +torch.unsqueeze(input, dim, out=None) → Tensor
    +

    Returns a new tensor with a dimension of size one inserted at the +specified position.

    +

    The returned tensor shares the same underlying data with this tensor.

    +

    A dim value within the range [-input.dim() - 1, input.dim() + 1) +can be used. Negative dim will correspond to unsqueeze() +applied at dim = dim + input.dim() + 1.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the index at which to insert the singleton dimension

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3, 4])
    +>>> torch.unsqueeze(x, 0)
    +tensor([[ 1,  2,  3,  4]])
    +>>> torch.unsqueeze(x, 1)
    +tensor([[ 1],
    +        [ 2],
    +        [ 3],
    +        [ 4]])
    +
    +
    +
    + +
    +
    +torch.where()
    +
    +
    +torch.where(condition, input, other) → Tensor
    +
    + +

    Return a tensor of elements selected from either input or other, depending on condition.

    +

    The operation is defined as:

    +
    +\[\text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ +\end{cases} + +\]
    +
    +

    Note

    +

    The tensors condition, input, other must be broadcastable.

    +
    +
    +
    Parameters
    +
      +
    • condition (ByteTensor) – When True (nonzero), yield input, otherwise yield other

    • +
    • input (Tensor) – values selected at indices where condition is True

    • +
    • other (Tensor) – values selected at indices where condition is False

    • +
    +
    +
    Returns
    +

    A tensor of shape equal to the broadcasted shape of condition, input, other

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 2)
    +>>> y = torch.ones(3, 2)
    +>>> x
    +tensor([[-0.4620,  0.3139],
    +        [ 0.3898, -0.7197],
    +        [ 0.0478, -0.1657]])
    +>>> torch.where(x > 0, x, y)
    +tensor([[ 1.0000,  0.3139],
    +        [ 0.3898,  1.0000],
    +        [ 0.0478,  1.0000]])
    +
    +
    +
    +
    +torch.where(condition) → tuple of LongTensor
    +
    + +

    torch.where(condition) is identical to +torch.nonzero(condition, as_tuple=True).

    +
    +

    Note

    +

    See also torch.nonzero().

    +
    +
    + +
    +
    +
    +

    Generators

    +
    +
    +class torch._C.Generator(device='cpu') → Generator
    +

    Creates and returns a generator object which manages the state of the algorithm that +produces pseudo random numbers. Used as a keyword argument in many In-place random sampling +functions.

    +
    +
    Parameters
    +

    device (torch.device, optional) – the desired device for the generator.

    +
    +
    Returns
    +

    An torch.Generator object.

    +
    +
    Return type
    +

    Generator

    +
    +
    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cuda = torch.Generator(device='cuda')
    +
    +
    +
    +
    +device
    +

    Generator.device -> device

    +

    Gets the current device of the generator.

    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu.device
    +device(type='cpu')
    +
    +
    +
    + +
    +
    +get_state() → Tensor
    +

    Returns the Generator state as a torch.ByteTensor.

    +
    +
    Returns
    +

    A torch.ByteTensor which contains all the necessary bits +to restore a Generator to a specific point in time.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu.get_state()
    +
    +
    +
    + +
    +
    +initial_seed() → int
    +

    Returns the initial seed for generating random numbers.

    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu.initial_seed()
    +2147483647
    +
    +
    +
    + +
    +
    +manual_seed(seed) → Generator
    +

    Sets the seed for generating random numbers. Returns a torch.Generator object. +It is recommended to set a large seed, i.e. a number that has a good balance of 0 +and 1 bits. Avoid having many 0 bits in the seed.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    Returns
    +

    An torch.Generator object.

    +
    +
    Return type
    +

    Generator

    +
    +
    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu.manual_seed(2147483647)
    +
    +
    +
    + +
    +
    +seed() → int
    +

    Gets a non-deterministic random number from std::random_device or the current +time and uses it to seed a Generator.

    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu.seed()
    +1516516984916
    +
    +
    +
    + +
    +
    +set_state(new_state) → void
    +

    Sets the Generator state.

    +
    +
    Parameters
    +

    new_state (torch.ByteTensor) – The desired state.

    +
    +
    +

    Example:

    +
    >>> g_cpu = torch.Generator()
    +>>> g_cpu_other = torch.Generator()
    +>>> g_cpu.set_state(g_cpu_other.get_state())
    +
    +
    +
    + +
    + +
    +
    +

    Random sampling

    +
    +
    +torch.seed()[source]
    +

    Sets the seed for generating random numbers to a non-deterministic +random number. Returns a 64 bit number used to seed the RNG.

    +
    + +
    +
    +torch.manual_seed(seed)[source]
    +

    Sets the seed for generating random numbers. Returns a +torch.Generator object.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    +
    + +
    +
    +torch.initial_seed()[source]
    +

    Returns the initial seed for generating random numbers as a +Python long.

    +
    + +
    +
    +torch.get_rng_state()[source]
    +

    Returns the random number generator state as a torch.ByteTensor.

    +
    + +
    +
    +torch.set_rng_state(new_state)[source]
    +

    Sets the random number generator state.

    +
    +
    Parameters
    +

    new_state (torch.ByteTensor) – The desired state

    +
    +
    +
    + +
    +
    +torch.default_generator Returns the default CPU torch.Generator
    +
    + +
    +
    +torch.bernoulli(input, *, generator=None, out=None) → Tensor
    +

    Draws binary random numbers (0 or 1) from a Bernoulli distribution.

    +

    The input tensor should be a tensor containing probabilities +to be used for drawing the binary random number. +Hence, all values in input have to be in the range: +\(0 \leq \text{input}_i \leq 1\).

    +

    The \(\text{i}^{th}\) element of the output tensor will draw a +value \(1\) according to the \(\text{i}^{th}\) probability value given +in input.

    +
    +\[\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + +\]
    +

    The returned out tensor only has values 0 or 1 and is of the same +shape as input.

    +

    out can have integral dtype, but input must have floating +point dtype.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of probability values for the Bernoulli distribution

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.empty(3, 3).uniform_(0, 1)  # generate a uniform random matrix with range [0, 1]
    +>>> a
    +tensor([[ 0.1737,  0.0950,  0.3609],
    +        [ 0.7148,  0.0289,  0.2676],
    +        [ 0.9456,  0.8937,  0.7202]])
    +>>> torch.bernoulli(a)
    +tensor([[ 1.,  0.,  0.],
    +        [ 0.,  0.,  0.],
    +        [ 1.,  1.,  1.]])
    +
    +>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
    +>>> torch.bernoulli(a)
    +tensor([[ 1.,  1.,  1.],
    +        [ 1.,  1.,  1.],
    +        [ 1.,  1.,  1.]])
    +>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
    +>>> torch.bernoulli(a)
    +tensor([[ 0.,  0.,  0.],
    +        [ 0.,  0.,  0.],
    +        [ 0.,  0.,  0.]])
    +
    +
    +
    + +
    +
    +torch.multinomial(input, num_samples, replacement=False, out=None) → LongTensor
    +

    Returns a tensor where each row contains num_samples indices sampled +from the multinomial probability distribution located in the corresponding row +of tensor input.

    +
    +

    Note

    +

    The rows of input do not need to sum to one (in which case we use +the values as weights), but must be non-negative, finite and have +a non-zero sum.

    +
    +

    Indices are ordered from left to right according to when each was sampled +(first samples are placed in first column).

    +

    If input is a vector, out is a vector of size num_samples.

    +

    If input is a matrix with m rows, out is an matrix of shape +\((m \times \text{num\_samples})\).

    +

    If replacement is True, samples are drawn with replacement.

    +

    If not, they are drawn without replacement, which means that when a +sample index is drawn for a row, it cannot be drawn again for that row.

    +
    +

    Note

    +

    When drawn without replacement, num_samples must be lower than +number of non-zero elements in input (or the min number of non-zero +elements in each row of input if it is a matrix).

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor containing probabilities

    • +
    • num_samples (int) – number of samples to draw

    • +
    • replacement (bool, optional) – whether to draw with replacement or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
    +>>> torch.multinomial(weights, 2)
    +tensor([1, 2])
    +>>> torch.multinomial(weights, 4) # ERROR!
    +RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
    +not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
    +>>> torch.multinomial(weights, 4, replacement=True)
    +tensor([ 2,  1,  1,  1])
    +
    +
    +
    + +
    +
    +torch.normal()
    +
    +
    +torch.normal(mean, std, out=None) → Tensor
    +
    + +

    Returns a tensor of random numbers drawn from separate normal distributions +whose mean and standard deviation are given.

    +

    The mean is a tensor with the mean of +each output element’s normal distribution

    +

    The std is a tensor with the standard deviation of +each output element’s normal distribution

    +

    The shapes of mean and std don’t need to match, but the +total number of elements in each tensor need to be the same.

    +
    +

    Note

    +

    When the shapes do not match, the shape of mean +is used as the shape for the returned output tensor

    +
    +
    +
    Parameters
    +
      +
    • mean (Tensor) – the tensor of per-element means

    • +
    • std (Tensor) – the tensor of per-element standard deviations

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
    +tensor([  1.0425,   3.5672,   2.7969,   4.2925,   4.7229,   6.2134,
    +          8.0505,   8.1408,   9.0563,  10.0566])
    +
    +
    +
    +
    +torch.normal(mean=0.0, std, out=None) → Tensor
    +
    + +

    Similar to the function above, but the means are shared among all drawn +elements.

    +
    +
    Parameters
    +
      +
    • mean (float, optional) – the mean for all distributions

    • +
    • std (Tensor) – the tensor of per-element standard deviations

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
    +tensor([-1.2793, -1.0732, -2.0687,  5.1177, -1.2303])
    +
    +
    +
    +
    +torch.normal(mean, std=1.0, out=None) → Tensor
    +
    + +

    Similar to the function above, but the standard-deviations are shared among +all drawn elements.

    +
    +
    Parameters
    +
      +
    • mean (Tensor) – the tensor of per-element means

    • +
    • std (float, optional) – the standard deviation for all distributions

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.normal(mean=torch.arange(1., 6.))
    +tensor([ 1.1552,  2.6148,  2.6535,  5.8318,  4.2361])
    +
    +
    +
    +
    +torch.normal(mean, std, size, *, out=None) → Tensor
    +
    + +

    Similar to the function above, but the means and standard deviations are shared +among all drawn elements. The resulting tensor has size given by size.

    +
    +
    Parameters
    +
      +
    • mean (float) – the mean for all distributions

    • +
    • std (float) – the standard deviation for all distributions

    • +
    • size (int...) – a sequence of integers defining the shape of the output tensor.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.normal(2, 3, size=(1, 4))
    +tensor([[-1.3987, -1.9544,  3.6048,  0.7909]])
    +
    +
    +
    + +
    +
    +torch.rand(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with random numbers from a uniform distribution +on the interval \([0, 1)\)

    +

    The shape of the tensor is defined by the variable argument size.

    +
    +
    Parameters
    +
      +
    • size (int...) – a sequence of integers defining the shape of the output tensor. +Can be a variable number of arguments or a collection like a list or tuple.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.rand(4)
    +tensor([ 0.5204,  0.2503,  0.3525,  0.5673])
    +>>> torch.rand(2, 3)
    +tensor([[ 0.8237,  0.5781,  0.6879],
    +        [ 0.3816,  0.7249,  0.0998]])
    +
    +
    +
    + +
    +
    +torch.rand_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor with the same size as input that is filled with +random numbers from a uniform distribution on the interval \([0, 1)\). +torch.rand_like(input) is equivalent to +torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +
    + +
    +
    +torch.randint(low=0, high, size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with random integers generated uniformly +between low (inclusive) and high (exclusive).

    +

    The shape of the tensor is defined by the variable argument size.

    +
    +
    Parameters
    +
      +
    • low (int, optional) – Lowest integer to be drawn from the distribution. Default: 0.

    • +
    • high (int) – One above the highest integer to be drawn from the distribution.

    • +
    • size (tuple) – a tuple defining the shape of the output tensor.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.randint(3, 5, (3,))
    +tensor([4, 3, 4])
    +
    +
    +>>> torch.randint(10, (2, 2))
    +tensor([[0, 2],
    +        [5, 5]])
    +
    +
    +>>> torch.randint(3, 10, (2, 2))
    +tensor([[4, 5],
    +        [6, 7]])
    +
    +
    +
    + +
    +
    +torch.randint_like(input, low=0, high, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor with the same shape as Tensor input filled with +random integers generated uniformly between low (inclusive) and +high (exclusive).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • low (int, optional) – Lowest integer to be drawn from the distribution. Default: 0.

    • +
    • high (int) – One above the highest integer to be drawn from the distribution.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +
    + +
    +
    +torch.randn(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor filled with random numbers from a normal distribution +with mean 0 and variance 1 (also called the standard normal +distribution).

    +
    +\[\text{out}_{i} \sim \mathcal{N}(0, 1) + +\]
    +

    The shape of the tensor is defined by the variable argument size.

    +
    +
    Parameters
    +
      +
    • size (int...) – a sequence of integers defining the shape of the output tensor. +Can be a variable number of arguments or a collection like a list or tuple.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.randn(4)
    +tensor([-2.1436,  0.9966,  2.3426, -0.6366])
    +>>> torch.randn(2, 3)
    +tensor([[ 1.5954,  2.8929, -1.0923],
    +        [ 1.1719, -0.4709, -0.1996]])
    +
    +
    +
    + +
    +
    +torch.randn_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
    +

    Returns a tensor with the same size as input that is filled with +random numbers from a normal distribution with mean 0 and variance 1. +torch.randn_like(input) is equivalent to +torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the size of input will determine size of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned Tensor. +Default: if None, defaults to the dtype of input.

    • +
    • layout (torch.layout, optional) – the desired layout of returned tensor. +Default: if None, defaults to the layout of input.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, defaults to the device of input.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +
    + +
    +
    +torch.randperm(n, out=None, dtype=torch.int64, layout=torch.strided, device=None, requires_grad=False) → LongTensor
    +

    Returns a random permutation of integers from 0 to n - 1.

    +
    +
    Parameters
    +
      +
    • n (int) – the upper bound (exclusive)

    • +
    • out (Tensor, optional) – the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: torch.int64.

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> torch.randperm(4)
    +tensor([2, 1, 0, 3])
    +
    +
    +
    + +
    +

    In-place random sampling

    +

    There are a few more in-place random sampling functions defined on Tensors as well. Click through to refer to their documentation:

    + +
    +
    +

    Quasi-random sampling

    +
    +
    +class torch.quasirandom.SobolEngine(dimension, scramble=False, seed=None)[source]
    +

    The torch.quasirandom.SobolEngine is an engine for generating +(scrambled) Sobol sequences. Sobol sequences are an example of low +discrepancy quasi-random sequences.

    +

    This implementation of an engine for Sobol sequences is capable of +sampling sequences up to a maximum dimension of 1111. It uses direction +numbers to generate these sequences, and these numbers have been adapted +from here.

    +

    References

    +
      +
    • Art B. Owen. Scrambling Sobol and Niederreiter-Xing points. +Journal of Complexity, 14(4):466-489, December 1998.

    • +
    • I. M. Sobol. The distribution of points in a cube and the accurate +evaluation of integrals. +Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967.

    • +
    +
    +
    Parameters
    +
      +
    • dimension (Int) – The dimensionality of the sequence to be drawn

    • +
    • scramble (bool, optional) – Setting this to True will produce +scrambled Sobol sequences. Scrambling is +capable of producing better Sobol +sequences. Default: False.

    • +
    • seed (Int, optional) – This is the seed for the scrambling. The seed +of the random number generator is set to this, +if specified. Default: None

    • +
    +
    +
    +

    Examples:

    +
    >>> soboleng = torch.quasirandom.SobolEngine(dimension=5)
    +>>> soboleng.draw(3)
    +tensor([[0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
    +        [0.7500, 0.2500, 0.7500, 0.2500, 0.7500],
    +        [0.2500, 0.7500, 0.2500, 0.7500, 0.2500]])
    +
    +
    +
    +
    +draw(n=1, out=None, dtype=torch.float32)[source]
    +

    Function to draw a sequence of n points from a Sobol sequence. +Note that the samples are dependent on the previous samples. The size +of the result is \((n, dimension)\).

    +
    +
    Parameters
    +
      +
    • n (Int, optional) – The length of sequence of points to draw. +Default: 1

    • +
    • out (Tensor, optional) – The output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of the +returned tensor. +Default: torch.float32

    • +
    +
    +
    +
    + +
    +
    +fast_forward(n)[source]
    +

    Function to fast-forward the state of the SobolEngine by +n steps. This is equivalent to drawing n samples +without using the samples.

    +
    +
    Parameters
    +

    n (Int) – The number of steps to fast-forward by.

    +
    +
    +
    + +
    +
    +reset()[source]
    +

    Function to reset the SobolEngine to base state.

    +
    + +
    + +
    +
    +
    +

    Serialization

    +
    +
    +torch.save(obj, f, pickle_module=<module 'pickle' from '/opt/conda/lib/python3.6/pickle.py'>, pickle_protocol=2)[source]
    +

    Saves an object to a disk file.

    +

    See also: Recommended approach for saving a model

    +
    +
    Parameters
    +
      +
    • obj – saved object

    • +
    • f – a file-like object (has to implement write and flush) or a string +containing a file name

    • +
    • pickle_module – module used for pickling metadata and objects

    • +
    • pickle_protocol – can be specified to override the default protocol

    • +
    +
    +
    +
    +

    Warning

    +

    If you are using Python 2, torch.save() does NOT support StringIO.StringIO +as a valid file-like object. This is because the write method should return +the number of bytes written; StringIO.write() does not do this.

    +

    Please use something like io.BytesIO instead.

    +
    +

    Example

    +
    >>> # Save to file
    +>>> x = torch.tensor([0, 1, 2, 3, 4])
    +>>> torch.save(x, 'tensor.pt')
    +>>> # Save to io.BytesIO buffer
    +>>> buffer = io.BytesIO()
    +>>> torch.save(x, buffer)
    +
    +
    +
    + +
    +
    +torch.load(f, map_location=None, pickle_module=<module 'pickle' from '/opt/conda/lib/python3.6/pickle.py'>, **pickle_load_args)[source]
    +

    Loads an object saved with torch.save() from a file.

    +

    torch.load() uses Python’s unpickling facilities but treats storages, +which underlie tensors, specially. They are first deserialized on the +CPU and are then moved to the device they were saved from. If this fails +(e.g. because the run time system doesn’t have certain devices), an exception +is raised. However, storages can be dynamically remapped to an alternative +set of devices using the map_location argument.

    +

    If map_location is a callable, it will be called once for each serialized +storage with two arguments: storage and location. The storage argument +will be the initial deserialization of the storage, residing on the CPU. +Each serialized storage has a location tag associated with it which +identifies the device it was saved from, and this tag is the second +argument passed to map_location. The builtin location tags are 'cpu' +for CPU tensors and 'cuda:device_id' (e.g. 'cuda:2') for CUDA tensors. +map_location should return either None or a storage. If +map_location returns a storage, it will be used as the final deserialized +object, already moved to the right device. Otherwise, torch.load() will +fall back to the default behavior, as if map_location wasn’t specified.

    +

    If map_location is a torch.device object or a string contraining +a device tag, it indicates the location where all tensors should be loaded.

    +

    Otherwise, if map_location is a dict, it will be used to remap location tags +appearing in the file (keys), to ones that specify where to put the +storages (values).

    +

    User extensions can register their own location tags and tagging and +deserialization methods using torch.serialization.register_package().

    +
    +
    Parameters
    +
      +
    • f – a file-like object (has to implement read(), :meth`readline`, :meth`tell`, and :meth`seek`), +or a string containing a file name

    • +
    • map_location – a function, torch.device, string or a dict specifying how to remap storage +locations

    • +
    • pickle_module – module used for unpickling metadata and objects (has to +match the pickle_module used to serialize file)

    • +
    • pickle_load_args – optional keyword arguments passed over to +pickle_module.load() and pickle_module.Unpickler(), e.g., +encoding=....

    • +
    +
    +
    +
    +

    Note

    +

    When you call torch.load() on a file which contains GPU tensors, those tensors +will be loaded to GPU by default. You can call torch.load(.., map_location='cpu') +and then load_state_dict() to avoid GPU RAM surge when loading a model checkpoint.

    +
    +
    +

    Note

    +

    In Python 3, when loading files saved by Python 2, you may encounter +UnicodeDecodeError: 'ascii' codec can't decode byte 0x.... This is +caused by the difference of handling in byte strings in Python2 and +Python 3. You may use extra encoding keyword argument to specify how +these objects should be loaded, e.g., encoding='latin1' decodes them +to strings using latin1 encoding, and encoding='bytes' keeps them +as byte arrays which can be decoded later with byte_array.decode(...).

    +
    +

    Example

    +
    >>> torch.load('tensors.pt')
    +# Load all tensors onto the CPU
    +>>> torch.load('tensors.pt', map_location=torch.device('cpu'))
    +# Load all tensors onto the CPU, using a function
    +>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage)
    +# Load all tensors onto GPU 1
    +>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1))
    +# Map tensors from GPU 1 to GPU 0
    +>>> torch.load('tensors.pt', map_location={'cuda:1':'cuda:0'})
    +# Load tensor from io.BytesIO object
    +>>> with open('tensor.pt', 'rb') as f:
    +        buffer = io.BytesIO(f.read())
    +>>> torch.load(buffer)
    +
    +
    +
    + +
    +
    +

    Parallelism

    +
    +
    +torch.get_num_threads() → int
    +

    Returns the number of threads used for parallelizing CPU operations

    +
    + +
    +
    +torch.set_num_threads(int)
    +

    Sets the number of threads used for parallelizing CPU operations. +WARNING: +To ensure that the correct number of threads is used, set_num_threads +must be called before running eager, JIT or autograd code.

    +
    + +
    +
    +torch.get_num_interop_threads() → int
    +

    Returns the number of threads used for inter-op parallelism on CPU +(e.g. in JIT interpreter)

    +
    + +
    +
    +torch.set_num_interop_threads(int)
    +

    Sets the number of threads used for interop parallelism +(e.g. in JIT interpreter) on CPU. +WARNING: Can only be called once and before any inter-op parallel work +is started (e.g. JIT execution).

    +
    + +
    +
    +

    Locally disabling gradient computation

    +

    The context managers torch.no_grad(), torch.enable_grad(), and +torch.set_grad_enabled() are helpful for locally disabling and enabling +gradient computation. See Locally disabling gradient computation for more details on +their usage. These context managers are thread local, so they won’t +work if you send work to another thread using the :module:`threading` +module, etc.

    +

    Examples:

    +
    >>> x = torch.zeros(1, requires_grad=True)
    +>>> with torch.no_grad():
    +...     y = x * 2
    +>>> y.requires_grad
    +False
    +
    +>>> is_train = False
    +>>> with torch.set_grad_enabled(is_train):
    +...     y = x * 2
    +>>> y.requires_grad
    +False
    +
    +>>> torch.set_grad_enabled(True)  # this can also be used as a function
    +>>> y = x * 2
    +>>> y.requires_grad
    +True
    +
    +>>> torch.set_grad_enabled(False)
    +>>> y = x * 2
    +>>> y.requires_grad
    +False
    +
    +
    +
    +
    +

    Math operations

    +
    +

    Pointwise Ops

    +
    +
    +torch.abs(input, out=None) → Tensor
    +

    Computes the element-wise absolute value of the given input tensor.

    +
    +\[\text{out}_{i} = |\text{input}_{i}| + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.abs(torch.tensor([-1, -2, 3]))
    +tensor([ 1,  2,  3])
    +
    +
    +
    + +
    +
    +torch.acos(input, out=None) → Tensor
    +

    Returns a new tensor with the arccosine of the elements of input.

    +
    +\[\text{out}_{i} = \cos^{-1}(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.3348, -0.5889,  0.2005, -0.1584])
    +>>> torch.acos(a)
    +tensor([ 1.2294,  2.2004,  1.3690,  1.7298])
    +
    +
    +
    + +
    +
    +torch.add()
    +
    +
    +torch.add(input, other, out=None)
    +
    + +

    Adds the scalar other to each element of the input input +and returns a new resulting tensor.

    +
    +\[\text{out} = \text{input} + \text{other} + +\]
    +

    If input is of type FloatTensor or DoubleTensor, other must be +a real number, otherwise it should be an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Number) – the number to be added to each element of input

    • +
    +
    +
    Keyword Arguments
    +

    out (Tensor, optional) – the output tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.0202,  1.0985,  1.3506, -0.6056])
    +>>> torch.add(a, 20)
    +tensor([ 20.0202,  21.0985,  21.3506,  19.3944])
    +
    +
    +
    +
    +torch.add(input, alpha=1, other, out=None)
    +
    + +

    Each element of the tensor other is multiplied by the scalar +alpha and added to each element of the tensor input. +The resulting tensor is returned.

    +

    The shapes of input and other must be +broadcastable.

    +
    +\[\text{out} = \text{input} + \text{alpha} \times \text{other} + +\]
    +

    If other is of type FloatTensor or DoubleTensor, alpha must be +a real number, otherwise it should be an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first input tensor

    • +
    • alpha (Number) – the scalar multiplier for other

    • +
    • other (Tensor) – the second input tensor

    • +
    +
    +
    Keyword Arguments
    +

    out (Tensor, optional) – the output tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.9732, -0.3497,  0.6245,  0.4022])
    +>>> b = torch.randn(4, 1)
    +>>> b
    +tensor([[ 0.3743],
    +        [-1.7724],
    +        [-0.5811],
    +        [-0.8017]])
    +>>> torch.add(a, 10, b)
    +tensor([[  2.7695,   3.3930,   4.3672,   4.1450],
    +        [-18.6971, -18.0736, -17.0994, -17.3216],
    +        [ -6.7845,  -6.1610,  -5.1868,  -5.4090],
    +        [ -8.9902,  -8.3667,  -7.3925,  -7.6147]])
    +
    +
    +
    + +
    +
    +torch.addcdiv(input, value=1, tensor1, tensor2, out=None) → Tensor
    +

    Performs the element-wise division of tensor1 by tensor2, +multiply the result by the scalar value and add it to input.

    +
    +\[\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + +\]
    +

    The shapes of input, tensor1, and tensor2 must be +broadcastable.

    +

    For inputs of type FloatTensor or DoubleTensor, value must be +a real number, otherwise an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to be added

    • +
    • value (Number, optional) – multiplier for \(\text{tensor1} / \text{tensor2}\)

    • +
    • tensor1 (Tensor) – the numerator tensor

    • +
    • tensor2 (Tensor) – the denominator tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> t = torch.randn(1, 3)
    +>>> t1 = torch.randn(3, 1)
    +>>> t2 = torch.randn(1, 3)
    +>>> torch.addcdiv(t, 0.1, t1, t2)
    +tensor([[-0.2312, -3.6496,  0.1312],
    +        [-1.0428,  3.4292, -0.1030],
    +        [-0.5369, -0.9829,  0.0430]])
    +
    +
    +
    + +
    +
    +torch.addcmul(input, value=1, tensor1, tensor2, out=None) → Tensor
    +

    Performs the element-wise multiplication of tensor1 +by tensor2, multiply the result by the scalar value +and add it to input.

    +
    +\[\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + +\]
    +

    The shapes of input, tensor1, and tensor2 must be +broadcastable.

    +

    For inputs of type FloatTensor or DoubleTensor, value must be +a real number, otherwise an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to be added

    • +
    • value (Number, optional) – multiplier for \(tensor1 .* tensor2\)

    • +
    • tensor1 (Tensor) – the tensor to be multiplied

    • +
    • tensor2 (Tensor) – the tensor to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> t = torch.randn(1, 3)
    +>>> t1 = torch.randn(3, 1)
    +>>> t2 = torch.randn(1, 3)
    +>>> torch.addcmul(t, 0.1, t1, t2)
    +tensor([[-0.8635, -0.6391,  1.6174],
    +        [-0.7617, -0.5879,  1.7388],
    +        [-0.8353, -0.6249,  1.6511]])
    +
    +
    +
    + +
    +
    +torch.asin(input, out=None) → Tensor
    +

    Returns a new tensor with the arcsine of the elements of input.

    +
    +\[\text{out}_{i} = \sin^{-1}(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.5962,  1.4985, -0.4396,  1.4525])
    +>>> torch.asin(a)
    +tensor([-0.6387,     nan, -0.4552,     nan])
    +
    +
    +
    + +
    +
    +torch.atan(input, out=None) → Tensor
    +

    Returns a new tensor with the arctangent of the elements of input.

    +
    +\[\text{out}_{i} = \tan^{-1}(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.2341,  0.2539, -0.6256, -0.6448])
    +>>> torch.atan(a)
    +tensor([ 0.2299,  0.2487, -0.5591, -0.5727])
    +
    +
    +
    + +
    +
    +torch.atan2(input, other, out=None) → Tensor
    +

    Returns a new tensor with the arctangent of the elements of input +and other.

    +

    The shapes of input and other must be +broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first input tensor

    • +
    • other (Tensor) – the second input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.9041,  0.0196, -0.3108, -2.4423])
    +>>> torch.atan2(a, torch.randn(4))
    +tensor([ 0.9833,  0.0811, -1.9743, -1.4151])
    +
    +
    +
    + +
    +
    +torch.ceil(input, out=None) → Tensor
    +

    Returns a new tensor with the ceil of the elements of input, +the smallest integer greater than or equal to each element.

    +
    +\[\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1 + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.6341, -1.4208, -1.0900,  0.5826])
    +>>> torch.ceil(a)
    +tensor([-0., -1., -1.,  1.])
    +
    +
    +
    + +
    +
    +torch.clamp(input, min, max, out=None) → Tensor
    +

    Clamp all elements in input into the range [ min, max ] and return +a resulting tensor:

    +
    +\[y_i = \begin{cases} + \text{min} & \text{if } x_i < \text{min} \\ + x_i & \text{if } \text{min} \leq x_i \leq \text{max} \\ + \text{max} & \text{if } x_i > \text{max} +\end{cases} + +\]
    +

    If input is of type FloatTensor or DoubleTensor, args min +and max must be real numbers, otherwise they should be integers.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • min (Number) – lower-bound of the range to be clamped to

    • +
    • max (Number) – upper-bound of the range to be clamped to

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-1.7120,  0.1734, -0.0478, -0.0922])
    +>>> torch.clamp(a, min=-0.5, max=0.5)
    +tensor([-0.5000,  0.1734, -0.0478, -0.0922])
    +
    +
    +
    +
    +torch.clamp(input, *, min, out=None) → Tensor
    +
    + +

    Clamps all elements in input to be larger or equal min.

    +

    If input is of type FloatTensor or DoubleTensor, value +should be a real number, otherwise it should be an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • value (Number) – minimal value of each element in the output

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.0299, -2.3184,  2.1593, -0.8883])
    +>>> torch.clamp(a, min=0.5)
    +tensor([ 0.5000,  0.5000,  2.1593,  0.5000])
    +
    +
    +
    +
    +torch.clamp(input, *, max, out=None) → Tensor
    +
    + +

    Clamps all elements in input to be smaller or equal max.

    +

    If input is of type FloatTensor or DoubleTensor, value +should be a real number, otherwise it should be an integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • value (Number) – maximal value of each element in the output

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.7753, -0.4702, -0.4599,  1.1899])
    +>>> torch.clamp(a, max=0.5)
    +tensor([ 0.5000, -0.4702, -0.4599,  0.5000])
    +
    +
    +
    + +
    +
    +torch.cos(input, out=None) → Tensor
    +

    Returns a new tensor with the cosine of the elements of input.

    +
    +\[\text{out}_{i} = \cos(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 1.4309,  1.2706, -0.8562,  0.9796])
    +>>> torch.cos(a)
    +tensor([ 0.1395,  0.2957,  0.6553,  0.5574])
    +
    +
    +
    + +
    +
    +torch.cosh(input, out=None) → Tensor
    +

    Returns a new tensor with the hyperbolic cosine of the elements of +input.

    +
    +\[\text{out}_{i} = \cosh(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.1632,  1.1835, -0.6979, -0.7325])
    +>>> torch.cosh(a)
    +tensor([ 1.0133,  1.7860,  1.2536,  1.2805])
    +
    +
    +
    + +
    +
    +torch.div()
    +
    +
    +torch.div(input, other, out=None) → Tensor
    +
    + +

    Divides each element of the input input with the scalar other +and returns a new resulting tensor.

    +
    +\[\text{out}_i = \frac{\text{input}_i}{\text{other}} + +\]
    +

    If input is of type FloatTensor or DoubleTensor, other +should be a real number, otherwise it should be an integer

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Number) – the number to be divided to each element of input

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(5)
    +>>> a
    +tensor([ 0.3810,  1.2774, -0.2972, -0.3719,  0.4637])
    +>>> torch.div(a, 0.5)
    +tensor([ 0.7620,  2.5548, -0.5944, -0.7439,  0.9275])
    +
    +
    +
    +
    +torch.div(input, other, out=None) → Tensor
    +
    + +

    Each element of the tensor input is divided by each element +of the tensor other. The resulting tensor is returned. The shapes of +input and other must be +broadcastable.

    +
    +\[\text{out}_i = \frac{\text{input}_i}{\text{other}_i} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the numerator tensor

    • +
    • other (Tensor) – the denominator tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
    +        [ 0.1815, -1.0111,  0.9805, -1.5923],
    +        [ 0.1062,  1.4581,  0.7759, -1.2344],
    +        [-0.1830, -0.0313,  1.1908, -1.4757]])
    +>>> b = torch.randn(4)
    +>>> b
    +tensor([ 0.8032,  0.2930, -0.8113, -0.2308])
    +>>> torch.div(a, b)
    +tensor([[-0.4620, -6.6051,  0.5676,  1.2637],
    +        [ 0.2260, -3.4507, -1.2086,  6.8988],
    +        [ 0.1322,  4.9764, -0.9564,  5.3480],
    +        [-0.2278, -0.1068, -1.4678,  6.3936]])
    +
    +
    +
    + +
    +
    +torch.digamma(input, out=None) → Tensor
    +

    Computes the logarithmic derivative of the gamma function on input.

    +
    +\[\psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} + +\]
    +
    +
    Parameters
    +

    input (Tensor) – the tensor to compute the digamma function on

    +
    +
    +

    Example:

    +
    >>> a = torch.tensor([1, 0.5])
    +>>> torch.digamma(a)
    +tensor([-0.5772, -1.9635])
    +
    +
    +
    + +
    +
    +torch.erf(input, out=None) → Tensor
    +

    Computes the error function of each element. The error function is defined as follows:

    +
    +\[\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.erf(torch.tensor([0, -1., 10.]))
    +tensor([ 0.0000, -0.8427,  1.0000])
    +
    +
    +
    + +
    +
    +torch.erfc(input, out=None) → Tensor
    +

    Computes the complementary error function of each element of input. +The complementary error function is defined as follows:

    +
    +\[\mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt + +\]
    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.erfc(torch.tensor([0, -1., 10.]))
    +tensor([ 1.0000, 1.8427,  0.0000])
    +
    +
    +
    + +
    +
    +torch.erfinv(input, out=None) → Tensor
    +

    Computes the inverse error function of each element of input. +The inverse error function is defined in the range \((-1, 1)\) as:

    +
    +\[\mathrm{erfinv}(\mathrm{erf}(x)) = x + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.erfinv(torch.tensor([0, 0.5, -1.]))
    +tensor([ 0.0000,  0.4769,    -inf])
    +
    +
    +
    + +
    +
    +torch.exp(input, out=None) → Tensor
    +

    Returns a new tensor with the exponential of the elements +of the input tensor input.

    +
    +\[y_{i} = e^{x_{i}} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.exp(torch.tensor([0, math.log(2.)]))
    +tensor([ 1.,  2.])
    +
    +
    +
    + +
    +
    +torch.expm1(input, out=None) → Tensor
    +

    Returns a new tensor with the exponential of the elements minus 1 +of input.

    +
    +\[y_{i} = e^{x_{i}} - 1 + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.expm1(torch.tensor([0, math.log(2.)]))
    +tensor([ 0.,  1.])
    +
    +
    +
    + +
    +
    +torch.floor(input, out=None) → Tensor
    +

    Returns a new tensor with the floor of the elements of input, +the largest integer less than or equal to each element.

    +
    +\[\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.8166,  1.5308, -0.2530, -0.2091])
    +>>> torch.floor(a)
    +tensor([-1.,  1., -1., -1.])
    +
    +
    +
    + +
    +
    +torch.fmod(input, other, out=None) → Tensor
    +

    Computes the element-wise remainder of division.

    +

    The dividend and divisor may contain both for integer and floating point +numbers. The remainder has the same sign as the dividend input.

    +

    When other is a tensor, the shapes of input and +other must be broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the dividend

    • +
    • other (Tensor or float) – the divisor, which may be either a number or a tensor of the same shape as the dividend

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
    +tensor([-1., -0., -1.,  1.,  0.,  1.])
    +>>> torch.fmod(torch.tensor([1., 2, 3, 4, 5]), 1.5)
    +tensor([ 1.0000,  0.5000,  0.0000,  1.0000,  0.5000])
    +
    +
    +
    + +
    +
    +torch.frac(input, out=None) → Tensor
    +

    Computes the fractional portion of each element in input.

    +
    +\[\text{out}_{i} = \text{input}_{i} - \left\lfloor \text{input}_{i} \right\rfloor + +\]
    +

    Example:

    +
    >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
    +tensor([ 0.0000,  0.5000, -0.2000])
    +
    +
    +
    + +
    +
    +torch.lerp(input, end, weight, out=None)
    +

    Does a linear interpolation of two tensors start (given by input) and end based +on a scalar or tensor weight and returns the resulting out tensor.

    +
    +\[\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) + +\]
    +

    The shapes of start and end must be +broadcastable. If weight is a tensor, then +the shapes of weight, start, and end must be broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor with the starting points

    • +
    • end (Tensor) – the tensor with the ending points

    • +
    • weight (float or tensor) – the weight for the interpolation formula

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> start = torch.arange(1., 5.)
    +>>> end = torch.empty(4).fill_(10)
    +>>> start
    +tensor([ 1.,  2.,  3.,  4.])
    +>>> end
    +tensor([ 10.,  10.,  10.,  10.])
    +>>> torch.lerp(start, end, 0.5)
    +tensor([ 5.5000,  6.0000,  6.5000,  7.0000])
    +>>> torch.lerp(start, end, torch.full_like(start, 0.5))
    +tensor([ 5.5000,  6.0000,  6.5000,  7.0000])
    +
    +
    +
    + +
    +
    +torch.log(input, out=None) → Tensor
    +

    Returns a new tensor with the natural logarithm of the elements +of input.

    +
    +\[y_{i} = \log_{e} (x_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(5)
    +>>> a
    +tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])
    +>>> torch.log(a)
    +tensor([ nan,  nan,  nan,  nan,  nan])
    +
    +
    +
    + +
    +
    +torch.log10(input, out=None) → Tensor
    +

    Returns a new tensor with the logarithm to the base 10 of the elements +of input.

    +
    +\[y_{i} = \log_{10} (x_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.rand(5)
    +>>> a
    +tensor([ 0.5224,  0.9354,  0.7257,  0.1301,  0.2251])
    +
    +
    +>>> torch.log10(a)
    +tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
    +
    +
    +
    + +
    +
    +torch.log1p(input, out=None) → Tensor
    +

    Returns a new tensor with the natural logarithm of (1 + input).

    +
    +\[y_i = \log_{e} (x_i + 1) + +\]
    +
    +

    Note

    +

    This function is more accurate than torch.log() for small +values of input

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(5)
    +>>> a
    +tensor([-1.0090, -0.9923,  1.0249, -0.5372,  0.2492])
    +>>> torch.log1p(a)
    +tensor([    nan, -4.8653,  0.7055, -0.7705,  0.2225])
    +
    +
    +
    + +
    +
    +torch.log2(input, out=None) → Tensor
    +

    Returns a new tensor with the logarithm to the base 2 of the elements +of input.

    +
    +\[y_{i} = \log_{2} (x_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.rand(5)
    +>>> a
    +tensor([ 0.8419,  0.8003,  0.9971,  0.5287,  0.0490])
    +
    +
    +>>> torch.log2(a)
    +tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
    +
    +
    +
    + +
    +
    +torch.mul()
    +
    +
    +torch.mul(input, other, out=None)
    +
    + +

    Multiplies each element of the input input with the scalar +other and returns a new resulting tensor.

    +
    +\[\text{out}_i = \text{other} \times \text{input}_i + +\]
    +

    If input is of type FloatTensor or DoubleTensor, other +should be a real number, otherwise it should be an integer

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Number) – the number to be multiplied to each element of input

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3)
    +>>> a
    +tensor([ 0.2015, -0.4255,  2.6087])
    +>>> torch.mul(a, 100)
    +tensor([  20.1494,  -42.5491,  260.8663])
    +
    +
    +
    +
    +torch.mul(input, other, out=None)
    +
    + +

    Each element of the tensor input is multiplied by the corresponding +element of the Tensor other. The resulting tensor is returned.

    +

    The shapes of input and other must be +broadcastable.

    +
    +\[\text{out}_i = \text{input}_i \times \text{other}_i + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first multiplicand tensor

    • +
    • other (Tensor) – the second multiplicand tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 1)
    +>>> a
    +tensor([[ 1.1207],
    +        [-0.3137],
    +        [ 0.0700],
    +        [ 0.8378]])
    +>>> b = torch.randn(1, 4)
    +>>> b
    +tensor([[ 0.5146,  0.1216, -0.5244,  2.2382]])
    +>>> torch.mul(a, b)
    +tensor([[ 0.5767,  0.1363, -0.5877,  2.5083],
    +        [-0.1614, -0.0382,  0.1645, -0.7021],
    +        [ 0.0360,  0.0085, -0.0367,  0.1567],
    +        [ 0.4312,  0.1019, -0.4394,  1.8753]])
    +
    +
    +
    + +
    +
    +torch.mvlgamma(input, p) → Tensor
    +

    Computes the multivariate log-gamma function ([reference]) with dimension \(p\) element-wise, given by

    +
    +\[\log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) + +\]
    +

    where \(C = \log(\pi) \times \frac{p (p - 1)}{4}\) and \(\Gamma(\cdot)\) is the Gamma function.

    +

    If any of the elements are less than or equal to \(\frac{p - 1}{2}\), then an error +is thrown.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compute the multivariate log-gamma function

    • +
    • p (int) – the number of dimensions

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.empty(2, 3).uniform_(1, 2)
    +>>> a
    +tensor([[1.6835, 1.8474, 1.1929],
    +        [1.0475, 1.7162, 1.4180]])
    +>>> torch.mvlgamma(a, 2)
    +tensor([[0.3928, 0.4007, 0.7586],
    +        [1.0311, 0.3901, 0.5049]])
    +
    +
    +
    + +
    +
    +torch.neg(input, out=None) → Tensor
    +

    Returns a new tensor with the negative of the elements of input.

    +
    +\[\text{out} = -1 \times \text{input} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(5)
    +>>> a
    +tensor([ 0.0090, -0.2262, -0.0682, -0.2866,  0.3940])
    +>>> torch.neg(a)
    +tensor([-0.0090,  0.2262,  0.0682,  0.2866, -0.3940])
    +
    +
    +
    + +
    +
    +torch.pow()
    +
    +
    +torch.pow(input, exponent, out=None) → Tensor
    +
    + +

    Takes the power of each element in input with exponent and +returns a tensor with the result.

    +

    exponent can be either a single float number or a Tensor +with the same number of elements as input.

    +

    When exponent is a scalar value, the operation applied is:

    +
    +\[\text{out}_i = x_i ^ \text{exponent} + +\]
    +

    When exponent is a tensor, the operation applied is:

    +
    +\[\text{out}_i = x_i ^ {\text{exponent}_i} + +\]
    +

    When exponent is a tensor, the shapes of input +and exponent must be broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • exponent (float or tensor) – the exponent value

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.4331,  1.2475,  0.6834, -0.2791])
    +>>> torch.pow(a, 2)
    +tensor([ 0.1875,  1.5561,  0.4670,  0.0779])
    +>>> exp = torch.arange(1., 5.)
    +
    +>>> a = torch.arange(1., 5.)
    +>>> a
    +tensor([ 1.,  2.,  3.,  4.])
    +>>> exp
    +tensor([ 1.,  2.,  3.,  4.])
    +>>> torch.pow(a, exp)
    +tensor([   1.,    4.,   27.,  256.])
    +
    +
    +
    +
    +torch.pow(self, exponent, out=None) → Tensor
    +
    + +

    self is a scalar float value, and exponent is a tensor. +The returned tensor out is of the same shape as exponent

    +

    The operation applied is:

    +
    +\[\text{out}_i = \text{self} ^ {\text{exponent}_i} + +\]
    +
    +
    Parameters
    +
      +
    • self (float) – the scalar base value for the power operation

    • +
    • exponent (Tensor) – the exponent tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> exp = torch.arange(1., 5.)
    +>>> base = 2
    +>>> torch.pow(base, exp)
    +tensor([  2.,   4.,   8.,  16.])
    +
    +
    +
    + +
    +
    +torch.reciprocal(input, out=None) → Tensor
    +

    Returns a new tensor with the reciprocal of the elements of input

    +
    +\[\text{out}_{i} = \frac{1}{\text{input}_{i}} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.4595, -2.1219, -1.4314,  0.7298])
    +>>> torch.reciprocal(a)
    +tensor([-2.1763, -0.4713, -0.6986,  1.3702])
    +
    +
    +
    + +
    +
    +torch.remainder(input, other, out=None) → Tensor
    +

    Computes the element-wise remainder of division.

    +

    The divisor and dividend may contain both for integer and floating point +numbers. The remainder has the same sign as the divisor.

    +

    When other is a tensor, the shapes of input and +other must be broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the dividend

    • +
    • other (Tensor or float) – the divisor that may be either a number or a +Tensor of the same shape as the dividend

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
    +tensor([ 1.,  0.,  1.,  1.,  0.,  1.])
    +>>> torch.remainder(torch.tensor([1., 2, 3, 4, 5]), 1.5)
    +tensor([ 1.0000,  0.5000,  0.0000,  1.0000,  0.5000])
    +
    +
    +
    +

    See also

    +

    torch.fmod(), which computes the element-wise remainder of +division equivalently to the C library function fmod().

    +
    +
    + +
    +
    +torch.round(input, out=None) → Tensor
    +

    Returns a new tensor with each of the elements of input rounded +to the closest integer.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.9920,  0.6077,  0.9734, -1.0362])
    +>>> torch.round(a)
    +tensor([ 1.,  1.,  1., -1.])
    +
    +
    +
    + +
    +
    +torch.rsqrt(input, out=None) → Tensor
    +

    Returns a new tensor with the reciprocal of the square-root of each of +the elements of input.

    +
    +\[\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.0370,  0.2970,  1.5420, -0.9105])
    +>>> torch.rsqrt(a)
    +tensor([    nan,  1.8351,  0.8053,     nan])
    +
    +
    +
    + +
    +
    +torch.sigmoid(input, out=None) → Tensor
    +

    Returns a new tensor with the sigmoid of the elements of input.

    +
    +\[\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.9213,  1.0887, -0.8858, -1.7683])
    +>>> torch.sigmoid(a)
    +tensor([ 0.7153,  0.7481,  0.2920,  0.1458])
    +
    +
    +
    + +
    +
    +torch.sign(input, out=None) → Tensor
    +

    Returns a new tensor with the signs of the elements of input.

    +
    +\[\text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
    +>>> a
    +tensor([ 0.7000, -1.2000,  0.0000,  2.3000])
    +>>> torch.sign(a)
    +tensor([ 1., -1.,  0.,  1.])
    +
    +
    +
    + +
    +
    +torch.sin(input, out=None) → Tensor
    +

    Returns a new tensor with the sine of the elements of input.

    +
    +\[\text{out}_{i} = \sin(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-0.5461,  0.1347, -2.7266, -0.2746])
    +>>> torch.sin(a)
    +tensor([-0.5194,  0.1343, -0.4032, -0.2711])
    +
    +
    +
    + +
    +
    +torch.sinh(input, out=None) → Tensor
    +

    Returns a new tensor with the hyperbolic sine of the elements of +input.

    +
    +\[\text{out}_{i} = \sinh(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.5380, -0.8632, -0.1265,  0.9399])
    +>>> torch.sinh(a)
    +tensor([ 0.5644, -0.9744, -0.1268,  1.0845])
    +
    +
    +
    + +
    +
    +torch.sqrt(input, out=None) → Tensor
    +

    Returns a new tensor with the square-root of the elements of input.

    +
    +\[\text{out}_{i} = \sqrt{\text{input}_{i}} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-2.0755,  1.0226,  0.0831,  0.4806])
    +>>> torch.sqrt(a)
    +tensor([    nan,  1.0112,  0.2883,  0.6933])
    +
    +
    +
    + +
    +
    +torch.tan(input, out=None) → Tensor
    +

    Returns a new tensor with the tangent of the elements of input.

    +
    +\[\text{out}_{i} = \tan(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([-1.2027, -1.7687,  0.4412, -1.3856])
    +>>> torch.tan(a)
    +tensor([-2.5930,  4.9859,  0.4722, -5.3366])
    +
    +
    +
    + +
    +
    +torch.tanh(input, out=None) → Tensor
    +

    Returns a new tensor with the hyperbolic tangent of the elements +of input.

    +
    +\[\text{out}_{i} = \tanh(\text{input}_{i}) + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.8986, -0.7279,  1.1745,  0.2611])
    +>>> torch.tanh(a)
    +tensor([ 0.7156, -0.6218,  0.8257,  0.2553])
    +
    +
    +
    + +
    +
    +torch.trunc(input, out=None) → Tensor
    +

    Returns a new tensor with the truncated integer values of +the elements of input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 3.4742,  0.5466, -0.8008, -0.9079])
    +>>> torch.trunc(a)
    +tensor([ 3.,  0., -0., -0.])
    +
    +
    +
    + +
    +
    +

    Reduction Ops

    +
    +
    +torch.argmax()
    +
    +
    +torch.argmax(input) → LongTensor
    +
    + +

    Returns the indices of all elements in the input tensor.

    +

    This is the second value returned by torch.max(). See its +documentation for the exact semantics of this method.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 1.3398,  0.2663, -0.2686,  0.2450],
    +        [-0.7401, -0.8805, -0.3402, -1.1936],
    +        [ 0.4907, -1.3948, -1.0691, -0.3132],
    +        [-1.6092,  0.5419, -0.2993,  0.3195]])
    +>>> torch.argmax(a)
    +tensor(0)
    +
    +
    +
    +
    +torch.argmax(input, dim, keepdim=False) → LongTensor
    +
    + +

    Returns the indices of the maximum values of a tensor across a dimension.

    +

    This is the second value returned by torch.max(). See its +documentation for the exact semantics of this method.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce. If None, the argmax of the +flattened input is returned.

    • +
    • keepdim (bool) – whether the output tensors have dim +retained or not. Ignored if dim=None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 1.3398,  0.2663, -0.2686,  0.2450],
    +        [-0.7401, -0.8805, -0.3402, -1.1936],
    +        [ 0.4907, -1.3948, -1.0691, -0.3132],
    +        [-1.6092,  0.5419, -0.2993,  0.3195]])
    +>>> torch.argmax(a, dim=1)
    +tensor([ 0,  2,  0,  1])
    +
    +
    +
    + +
    +
    +torch.argmin()
    +
    +
    +torch.argmin(input) → LongTensor
    +
    + +

    Returns the indices of the minimum value of all elements in the input tensor.

    +

    This is the second value returned by torch.min(). See its +documentation for the exact semantics of this method.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.1139,  0.2254, -0.1381,  0.3687],
    +        [ 1.0100, -1.1975, -0.0102, -0.4732],
    +        [-0.9240,  0.1207, -0.7506, -1.0213],
    +        [ 1.7809, -1.2960,  0.9384,  0.1438]])
    +>>> torch.argmin(a)
    +tensor(13)
    +
    +
    +
    +
    +torch.argmin(input, dim, keepdim=False, out=None) → LongTensor
    +
    + +

    Returns the indices of the minimum values of a tensor across a dimension.

    +

    This is the second value returned by torch.min(). See its +documentation for the exact semantics of this method.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce. If None, the argmin of the +flattened input is returned.

    • +
    • keepdim (bool) – whether the output tensors have dim +retained or not. Ignored if dim=None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.1139,  0.2254, -0.1381,  0.3687],
    +        [ 1.0100, -1.1975, -0.0102, -0.4732],
    +        [-0.9240,  0.1207, -0.7506, -1.0213],
    +        [ 1.7809, -1.2960,  0.9384,  0.1438]])
    +>>> torch.argmin(a, dim=1)
    +tensor([ 2,  1,  3,  1])
    +
    +
    +
    + +
    +
    +torch.cumprod(input, dim, out=None, dtype=None) → Tensor
    +

    Returns the cumulative product of elements of input in the dimension +dim.

    +

    For example, if input is a vector of size N, the result will also be +a vector of size N, with elements.

    +
    +\[y_i = x_1 \times x_2\times x_3\times \dots \times x_i + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to do the operation over

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(10)
    +>>> a
    +tensor([ 0.6001,  0.2069, -0.1919,  0.9792,  0.6727,  1.0062,  0.4126,
    +        -0.2129, -0.4206,  0.1968])
    +>>> torch.cumprod(a, dim=0)
    +tensor([ 0.6001,  0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
    +         0.0014, -0.0006, -0.0001])
    +
    +>>> a[5] = 0.0
    +>>> torch.cumprod(a, dim=0)
    +tensor([ 0.6001,  0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
    +         0.0000, -0.0000, -0.0000])
    +
    +
    +
    + +
    +
    +torch.cumsum(input, dim, out=None, dtype=None) → Tensor
    +

    Returns the cumulative sum of elements of input in the dimension +dim.

    +

    For example, if input is a vector of size N, the result will also be +a vector of size N, with elements.

    +
    +\[y_i = x_1 + x_2 + x_3 + \dots + x_i + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to do the operation over

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(10)
    +>>> a
    +tensor([-0.8286, -0.4890,  0.5155,  0.8443,  0.1865, -0.1752, -2.0595,
    +         0.1850, -1.1571, -0.4243])
    +>>> torch.cumsum(a, dim=0)
    +tensor([-0.8286, -1.3175, -0.8020,  0.0423,  0.2289,  0.0537, -2.0058,
    +        -1.8209, -2.9780, -3.4022])
    +
    +
    +
    + +
    +
    +torch.dist(input, other, p=2) → Tensor
    +

    Returns the p-norm of (input - other)

    +

    The shapes of input and other must be +broadcastable.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Tensor) – the Right-hand-side input tensor

    • +
    • p (float, optional) – the norm to be computed

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(4)
    +>>> x
    +tensor([-1.5393, -0.8675,  0.5916,  1.6321])
    +>>> y = torch.randn(4)
    +>>> y
    +tensor([ 0.0967, -1.0511,  0.6295,  0.8360])
    +>>> torch.dist(x, y, 3.5)
    +tensor(1.6727)
    +>>> torch.dist(x, y, 3)
    +tensor(1.6973)
    +>>> torch.dist(x, y, 0)
    +tensor(inf)
    +>>> torch.dist(x, y, 1)
    +tensor(2.6537)
    +
    +
    +
    + +
    +
    +torch.logsumexp(input, dim, keepdim=False, out=None)
    +

    Returns the log of summed exponentials of each row of the input +tensor in the given dimension dim. The computation is numerically +stabilized.

    +

    For summation index \(j\) given by dim and other indices \(i\), the result is

    +
    +
    +\[\text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) + +\]
    +
    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +
    +
    Example::
    >>> a = torch.randn(3, 3)
    +>>> torch.logsumexp(a, 1)
    +tensor([ 0.8442,  1.4322,  0.8711])
    +
    +
    +
    +
    +
    + +
    +
    +torch.mean()
    +
    +
    +torch.mean(input) → Tensor
    +
    + +

    Returns the mean value of all elements in the input tensor.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[ 0.2294, -0.5481,  1.3288]])
    +>>> torch.mean(a)
    +tensor(0.3367)
    +
    +
    +
    +
    +torch.mean(input, dim, keepdim=False, out=None) → Tensor
    +
    + +

    Returns the mean value of each row of the input tensor in the given +dimension dim. If dim is a list of dimensions, +reduce over all of them.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • out (Tensor) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-0.3841,  0.6320,  0.4254, -0.7384],
    +        [-0.9644,  1.0131, -0.6549, -1.4279],
    +        [-0.2951, -1.3350, -0.7694,  0.5600],
    +        [ 1.0842, -0.9580,  0.3623,  0.2343]])
    +>>> torch.mean(a, 1)
    +tensor([-0.0163, -0.5085, -0.4599,  0.1807])
    +>>> torch.mean(a, 1, True)
    +tensor([[-0.0163],
    +        [-0.5085],
    +        [-0.4599],
    +        [ 0.1807]])
    +
    +
    +
    + +
    +
    +torch.median()
    +
    +
    +torch.median(input) → Tensor
    +
    + +

    Returns the median value of all elements in the input tensor.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[ 1.5219, -1.5212,  0.2202]])
    +>>> torch.median(a)
    +tensor(0.2202)
    +
    +
    +
    +
    +torch.median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
    +
    + +

    Returns a namedtuple (values, indices) where values is the median +value of each row of the input tensor in the given dimension +dim. And indices is the index location of each median value found.

    +

    By default, dim is the last dimension of the input tensor.

    +

    If keepdim is True, the output tensors are of the same size +as input except in the dimension dim where they are of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in +the outputs tensor having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensors have dim retained or not

    • +
    • values (Tensor, optional) – the output tensor

    • +
    • indices (Tensor, optional) – the output index tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 5)
    +>>> a
    +tensor([[ 0.2505, -0.3982, -0.9948,  0.3518, -1.3131],
    +        [ 0.3180, -0.6993,  1.0436,  0.0438,  0.2270],
    +        [-0.2751,  0.7303,  0.2192,  0.3321,  0.2488],
    +        [ 1.0778, -1.9510,  0.7048,  0.4742, -0.7125]])
    +>>> torch.median(a, 1)
    +torch.return_types.median(values=tensor([-0.3982,  0.2270,  0.2488,  0.4742]), indices=tensor([1, 4, 4, 3]))
    +
    +
    +
    + +
    +
    +torch.mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
    +

    Returns a namedtuple (values, indices) where values is the mode +value of each row of the input tensor in the given dimension +dim, i.e. a value which appears most often +in that row, and indices is the index location of each mode value found.

    +

    By default, dim is the last dimension of the input tensor.

    +

    If keepdim is True, the output tensors are of the same size as +input except in the dimension dim where they are of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting +in the output tensors having 1 fewer dimension than input.

    +
    +

    Note

    +

    This function is not defined for torch.cuda.Tensor yet.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensors have dim retained or not

    • +
    • values (Tensor, optional) – the output tensor

    • +
    • indices (Tensor, optional) – the output index tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randint(10, (5,))
    +>>> a
    +tensor([6, 5, 1, 0, 2])
    +>>> b = a + (torch.randn(50, 1) * 5).long()
    +>>> torch.mode(b, 0)
    +torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
    +
    +
    +
    + +
    +
    +torch.norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None)[source]
    +

    Returns the matrix norm or vector norm of a given tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • p (int, float, inf, -inf, 'fro', 'nuc', optional) –

      the order of norm. Default: 'fro' +The following norms can be calculated:

      + +++++ + + + + + + + + + + + + + + + + + + + + + + + + +

      ord

      matrix norm

      vector norm

      None

      Frobenius norm

      2-norm

      ’fro’

      Frobenius norm

      ‘nuc’

      nuclear norm

      Other

      as vec norm when dim is None

      sum(abs(x)**ord)**(1./ord)

      +

    • +
    • dim (int, 2-tuple of python:ints, 2-list of python:ints, optional) – If it is an int, +vector norm will be calculated, if it is 2-tuple of ints, matrix norm +will be calculated. If the value is None, matrix norm will be calculated +when the input tensor only has two dimensions, vector norm will be +calculated when the input tensor only has one dimension. If the input +tensor has more than two dimensions, the vector norm will be applied to +last dimension.

    • +
    • keepdim (bool, optional) – whether the output tensors have dim +retained or not. Ignored if dim = None and +out = None. Default: False

    • +
    • out (Tensor, optional) – the output tensor. Ignored if +dim = None and out = None.

    • +
    • dtype (torch.dtype, optional) – the desired data type of +returned tensor. If specified, the input tensor is casted to +:attr:’dtype’ while performing the operation. Default: None.

    • +
    +
    +
    +

    Example:

    +
    >>> import torch
    +>>> a = torch.arange(9, dtype= torch.float) - 4
    +>>> b = a.reshape((3, 3))
    +>>> torch.norm(a)
    +tensor(7.7460)
    +>>> torch.norm(b)
    +tensor(7.7460)
    +>>> torch.norm(a, float('inf'))
    +tensor(4.)
    +>>> torch.norm(b, float('inf'))
    +tensor(4.)
    +>>> c = torch.tensor([[ 1, 2, 3],[-1, 1, 4]] , dtype= torch.float)
    +>>> torch.norm(c, dim=0)
    +tensor([1.4142, 2.2361, 5.0000])
    +>>> torch.norm(c, dim=1)
    +tensor([3.7417, 4.2426])
    +>>> torch.norm(c, p=1, dim=1)
    +tensor([6., 6.])
    +>>> d = torch.arange(8, dtype= torch.float).reshape(2,2,2)
    +>>> torch.norm(d, dim=(1,2))
    +tensor([ 3.7417, 11.2250])
    +>>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
    +(tensor(3.7417), tensor(11.2250))
    +
    +
    +
    + +
    +
    +torch.prod()
    +
    +
    +torch.prod(input, dtype=None) → Tensor
    +
    + +

    Returns the product of all elements in the input tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[-0.8020,  0.5428, -1.5854]])
    +>>> torch.prod(a)
    +tensor(0.6902)
    +
    +
    +
    +
    +torch.prod(input, dim, keepdim=False, dtype=None) → Tensor
    +
    + +

    Returns the product of each row of the input tensor in the given +dimension dim.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in +the output tensor having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 2)
    +>>> a
    +tensor([[ 0.5261, -0.3837],
    +        [ 1.1857, -0.2498],
    +        [-1.1646,  0.0705],
    +        [ 1.1131, -1.0629]])
    +>>> torch.prod(a, 1)
    +tensor([-0.2018, -0.2962, -0.0821, -1.1831])
    +
    +
    +
    + +
    +
    +torch.std()
    +
    +
    +torch.std(input, unbiased=True) → Tensor
    +
    + +

    Returns the standard-deviation of all elements in the input tensor.

    +

    If unbiased is False, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[-0.8166, -1.3802, -0.3560]])
    +>>> torch.std(a)
    +tensor(0.5130)
    +
    +
    +
    +
    +torch.std(input, dim, keepdim=False, unbiased=True, out=None) → Tensor
    +
    + +

    Returns the standard-deviation of each row of the input tensor in the +dimension dim. If dim is a list of dimensions, +reduce over all of them.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +

    If unbiased is False, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.2035,  1.2959,  1.8101, -0.4644],
    +        [ 1.5027, -0.3270,  0.5905,  0.6538],
    +        [-1.5745,  1.3330, -0.5596, -0.6548],
    +        [ 0.1264, -0.5080,  1.6420,  0.1992]])
    +>>> torch.std(a, dim=1)
    +tensor([ 1.0311,  0.7477,  1.2204,  0.9087])
    +
    +
    +
    + +
    +
    +torch.std_mean()
    +
    +
    +torch.std_mean(input, unbiased=True) -> (Tensor, Tensor)
    +
    + +

    Returns the standard-deviation and mean of all elements in the input tensor.

    +

    If unbiased is False, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[0.3364, 0.3591, 0.9462]])
    +>>> torch.std_mean(a)
    +(tensor(0.3457), tensor(0.5472))
    +
    +
    +
    +
    +torch.std(input, dim, keepdim=False, unbiased=True) -> (Tensor, Tensor)
    +
    + +

    Returns the standard-deviation and mean of each row of the input tensor in the +dimension dim. If dim is a list of dimensions, +reduce over all of them.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +

    If unbiased is False, then the standard-deviation will be calculated +via the biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.5648, -0.5984, -1.2676, -1.4471],
    +        [ 0.9267,  1.0612,  1.1050, -0.6014],
    +        [ 0.0154,  1.9301,  0.0125, -1.0904],
    +        [-1.9711, -0.7748, -1.3840,  0.5067]])
    +>>> torch.std_mean(a, 1)
    +(tensor([0.9110, 0.8197, 1.2552, 1.0608]), tensor([-0.6871,  0.6229,  0.2169, -0.9058]))
    +
    +
    +
    + +
    +
    +torch.sum()
    +
    +
    +torch.sum(input, dtype=None) → Tensor
    +
    + +

    Returns the sum of all elements in the input tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[ 0.1133, -0.9567,  0.2958]])
    +>>> torch.sum(a)
    +tensor(-0.5475)
    +
    +
    +
    +
    +torch.sum(input, dim, keepdim=False, dtype=None) → Tensor
    +
    + +

    Returns the sum of each row of the input tensor in the given +dimension dim. If dim is a list of dimensions, +reduce over all of them.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +If specified, the input tensor is casted to dtype before the operation +is performed. This is useful for preventing data type overflows. Default: None.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.0569, -0.2475,  0.0737, -0.3429],
    +        [-0.2993,  0.9138,  0.9337, -1.6864],
    +        [ 0.1132,  0.7892, -0.1003,  0.5688],
    +        [ 0.3637, -0.9906, -0.4752, -1.5197]])
    +>>> torch.sum(a, 1)
    +tensor([-0.4598, -0.1381,  1.3708, -2.6217])
    +>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
    +>>> torch.sum(b, (2, 1))
    +tensor([  435.,  1335.,  2235.,  3135.])
    +
    +
    +
    + +
    +
    +torch.unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None)[source]
    +

    Returns the unique elements of the input tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • sorted (bool) – Whether to sort the unique elements in ascending order +before returning as output.

    • +
    • return_inverse (bool) – Whether to also return the indices for where +elements in the original input ended up in the returned unique list.

    • +
    • return_counts (bool) – Whether to also return the counts for each unique +element.

    • +
    • dim (int) – the dimension to apply unique. If None, the unique of the +flattened input is returned. default: None

    • +
    +
    +
    Returns
    +

    A tensor or a tuple of tensors containing

    +
    +
      +
    • output (Tensor): the output list of unique scalar elements.

    • +
    • inverse_indices (Tensor): (optional) if +return_inverse is True, there will be an additional +returned tensor (same shape as input) representing the indices +for where elements in the original input map to in the output; +otherwise, this function will only return a single tensor.

    • +
    • counts (Tensor): (optional) if +return_counts is True, there will be an additional +returned tensor (same shape as output or output.size(dim), +if dim was specified) representing the number of occurrences +for each unique value or tensor.

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, Tensor (optional), Tensor (optional))

    +
    +
    +

    Example:

    +
    >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
    +>>> output
    +tensor([ 2,  3,  1])
    +
    +>>> output, inverse_indices = torch.unique(
    +        torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
    +>>> output
    +tensor([ 1,  2,  3])
    +>>> inverse_indices
    +tensor([ 0,  2,  1,  2])
    +
    +>>> output, inverse_indices = torch.unique(
    +        torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
    +>>> output
    +tensor([ 1,  2,  3])
    +>>> inverse_indices
    +tensor([[ 0,  2],
    +        [ 1,  2]])
    +
    +
    +
    + +
    +
    +torch.unique_consecutive(input, return_inverse=False, return_counts=False, dim=None)[source]
    +

    Eliminates all but the first element from every consecutive group of equivalent elements.

    +
    +

    Note

    +

    This function is different from torch.unique() in the sense that this function +only eliminates consecutive duplicate values. This semantics is similar to std::unique +in C++.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • return_inverse (bool) – Whether to also return the indices for where +elements in the original input ended up in the returned unique list.

    • +
    • return_counts (bool) – Whether to also return the counts for each unique +element.

    • +
    • dim (int) – the dimension to apply unique. If None, the unique of the +flattened input is returned. default: None

    • +
    +
    +
    Returns
    +

    A tensor or a tuple of tensors containing

    +
    +
      +
    • output (Tensor): the output list of unique scalar elements.

    • +
    • inverse_indices (Tensor): (optional) if +return_inverse is True, there will be an additional +returned tensor (same shape as input) representing the indices +for where elements in the original input map to in the output; +otherwise, this function will only return a single tensor.

    • +
    • counts (Tensor): (optional) if +return_counts is True, there will be an additional +returned tensor (same shape as output or output.size(dim), +if dim was specified) representing the number of occurrences +for each unique value or tensor.

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, Tensor (optional), Tensor (optional))

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
    +>>> output = torch.unique_consecutive(x)
    +>>> output
    +tensor([1, 2, 3, 1, 2])
    +
    +>>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
    +>>> output
    +tensor([1, 2, 3, 1, 2])
    +>>> inverse_indices
    +tensor([0, 0, 1, 1, 2, 3, 3, 4])
    +
    +>>> output, counts = torch.unique_consecutive(x, return_counts=True)
    +>>> output
    +tensor([1, 2, 3, 1, 2])
    +>>> counts
    +tensor([2, 2, 1, 2, 1])
    +
    +
    +
    + +
    +
    +torch.var()
    +
    +
    +torch.var(input, unbiased=True) → Tensor
    +
    + +

    Returns the variance of all elements in the input tensor.

    +

    If unbiased is False, then the variance will be calculated via the +biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[-0.3425, -1.2636, -0.4864]])
    +>>> torch.var(a)
    +tensor(0.2455)
    +
    +
    +
    +
    +torch.var(input, dim, keepdim=False, unbiased=True, out=None) → Tensor
    +
    + +

    Returns the variance of each row of the input tensor in the given +dimension dim.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +

    If unbiased is False, then the variance will be calculated via the +biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-0.3567,  1.7385, -1.3042,  0.7423],
    +        [ 1.3436, -0.1015, -0.9834, -0.8438],
    +        [ 0.6056,  0.1089, -0.3112, -1.4085],
    +        [-0.7700,  0.6074, -0.1469,  0.7777]])
    +>>> torch.var(a, 1)
    +tensor([ 1.7444,  1.1363,  0.7356,  0.5112])
    +
    +
    +
    + +
    +
    +torch.var_mean()
    +
    +
    +torch.var_mean(input, unbiased=True) -> (Tensor, Tensor)
    +
    + +

    Returns the variance and mean of all elements in the input tensor.

    +

    If unbiased is False, then the variance will be calculated via the +biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[0.0146, 0.4258, 0.2211]])
    +>>> torch.var_mean(a)
    +(tensor(0.0423), tensor(0.2205))
    +
    +
    +
    +
    +torch.var_mean(input, dim, keepdim=False, unbiased=True) -> (Tensor, Tensor)
    +
    + +

    Returns the variance and mean of each row of the input tensor in the given +dimension dim.

    +

    If keepdim is True, the output tensor is of the same size +as input except in the dimension(s) dim where it is of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in the +output tensor having 1 (or len(dim)) fewer dimension(s).

    +

    If unbiased is False, then the variance will be calculated via the +biased estimator. Otherwise, Bessel’s correction will be used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int or tuple of python:ints) – the dimension or dimensions to reduce

    • +
    • keepdim (bool) – whether the output tensor has dim retained or not

    • +
    • unbiased (bool) – whether to use the unbiased estimation or not

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-1.5650,  2.0415, -0.1024, -0.5790],
    +        [ 0.2325, -2.6145, -1.6428, -0.3537],
    +        [-0.2159, -1.1069,  1.2882, -1.3265],
    +        [-0.6706, -1.5893,  0.6827,  1.6727]])
    +>>> torch.var_mean(a, 1)
    +(tensor([2.3174, 1.6403, 1.4092, 2.0791]), tensor([-0.0512, -1.0946, -0.3403,  0.0239]))
    +
    +
    +
    + +
    +
    +

    Comparison Ops

    +
    +
    +torch.allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) → bool
    +

    This function checks if all input and other satisfy the condition:

    +
    +\[\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert + +\]
    +

    elementwise, for all elements of input and other. The behaviour of this function is analogous to +numpy.allclose

    +
    +
    Parameters
    +
      +
    • input (Tensor) – first tensor to compare

    • +
    • other (Tensor) – second tensor to compare

    • +
    • atol (float, optional) – absolute tolerance. Default: 1e-08

    • +
    • rtol (float, optional) – relative tolerance. Default: 1e-05

    • +
    • equal_nan (bool, optional) – if True, then two NaN s will be compared as equal. Default: False

    • +
    +
    +
    +

    Example:

    +
    >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
    +False
    +>>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
    +True
    +>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
    +False
    +>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
    +True
    +
    +
    +
    + +
    +
    +torch.argsort(input, dim=-1, descending=False, out=None) → LongTensor
    +

    Returns the indices that sort a tensor along a given dimension in ascending +order by value.

    +

    This is the second value returned by torch.sort(). See its documentation +for the exact semantics of this method.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int, optional) – the dimension to sort along

    • +
    • descending (bool, optional) – controls the sorting order (ascending or descending)

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[ 0.0785,  1.5267, -0.8521,  0.4065],
    +        [ 0.1598,  0.0788, -0.0745, -1.2700],
    +        [ 1.2208,  1.0722, -0.7064,  1.2564],
    +        [ 0.0669, -0.2318, -0.8229, -0.9280]])
    +
    +
    +>>> torch.argsort(a, dim=1)
    +tensor([[2, 0, 3, 1],
    +        [3, 2, 1, 0],
    +        [2, 1, 0, 3],
    +        [3, 2, 1, 0]])
    +
    +
    +
    + +
    +
    +torch.eq(input, other, out=None) → Tensor
    +

    Computes element-wise equality

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor. Must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 1,  0],
    +        [ 0,  1]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.equal(input, other) → bool
    +

    True if two tensors have the same size and elements, False otherwise.

    +

    Example:

    +
    >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
    +True
    +
    +
    +
    + +
    +
    +torch.ge(input, other, out=None) → Tensor
    +

    Computes \(\text{input} \geq \text{other}\) element-wise.

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 1,  1],
    +        [ 0,  1]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.gt(input, other, out=None) → Tensor
    +

    Computes \(\text{input} > \text{other}\) element-wise.

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 0,  1],
    +        [ 0,  0]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.isfinite(tensor)[source]
    +

    Returns a new tensor with boolean elements representing if each element is Finite or not.

    +
    +
    Parameters
    +

    tensor (Tensor) – A tensor to check

    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location of finite elements and 0 otherwise

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    +tensor([ 1,  0,  1,  0,  0], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.isinf(tensor)[source]
    +

    Returns a new tensor with boolean elements representing if each element is +/-INF or not.

    +
    +
    Parameters
    +

    tensor (Tensor) – A tensor to check

    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location of +/-INF elements and 0 otherwise

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    +tensor([ 0,  1,  0,  1,  0], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.isnan()
    +

    Returns a new tensor with boolean elements representing if each element is NaN or not.

    +
    +
    Parameters
    +

    input (Tensor) – A tensor to check

    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location of NaN elements.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
    +tensor([ 0,  1,  0], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
    +

    Returns a namedtuple (values, indices) where values is the k th +smallest element of each row of the input tensor in the given dimension +dim. And indices is the index location of each element found.

    +

    If dim is not given, the last dimension of the input is chosen.

    +

    If keepdim is True, both the values and indices tensors +are the same size as input, except in the dimension dim where +they are of size 1. Otherwise, dim is squeezed +(see torch.squeeze()), resulting in both the values and +indices tensors having 1 fewer dimension than the input tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • k (int) – k for the k-th smallest element

    • +
    • dim (int, optional) – the dimension to find the kth value along

    • +
    • keepdim (bool) – whether the output tensors have dim retained or not

    • +
    • out (tuple, optional) – the output tuple of (Tensor, LongTensor) +can be optionally given to be used as output buffers

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.arange(1., 6.)
    +>>> x
    +tensor([ 1.,  2.,  3.,  4.,  5.])
    +>>> torch.kthvalue(x, 4)
    +torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
    +
    +>>> x=torch.arange(1.,7.).resize_(2,3)
    +>>> x
    +tensor([[ 1.,  2.,  3.],
    +        [ 4.,  5.,  6.]])
    +>>> torch.kthvalue(x, 2, 0, True)
    +torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
    +
    +
    +
    + +
    +
    +torch.le(input, other, out=None) → Tensor
    +

    Computes \(\text{input} \leq \text{other}\) element-wise.

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 1,  0],
    +        [ 1,  1]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.lt(input, other, out=None) → Tensor
    +

    Computes \(\text{input} < \text{other}\) element-wise.

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 0,  0],
    +        [ 1,  0]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.max()
    +
    +
    +torch.max(input) → Tensor
    +
    + +

    Returns the maximum value of all elements in the input tensor.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[ 0.6763,  0.7445, -2.2369]])
    +>>> torch.max(a)
    +tensor(0.7445)
    +
    +
    +
    +
    +torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
    +
    + +

    Returns a namedtuple (values, indices) where values is the maximum +value of each row of the input tensor in the given dimension +dim. And indices is the index location of each maximum value found +(argmax).

    +

    If keepdim is True, the output tensors are of the same size +as input except in the dimension dim where they are of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting +in the output tensors having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool, optional) – whether the output tensors have dim retained or not. Default: False.

    • +
    • out (tuple, optional) – the result tuple of two output tensors (max, max_indices)

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-1.2360, -0.2942, -0.1222,  0.8475],
    +        [ 1.1949, -1.1127, -2.2379, -0.6702],
    +        [ 1.5717, -0.9207,  0.1297, -1.8768],
    +        [-0.6172,  1.0036, -0.6060, -0.2432]])
    +>>> torch.max(a, 1)
    +torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
    +
    +
    +
    +
    +torch.max(input, other, out=None) → Tensor
    +
    + +

    Each element of the tensor input is compared with the corresponding +element of the tensor other and an element-wise maximum is taken.

    +

    The shapes of input and other don’t need to match, +but they must be broadcastable.

    +
    +\[\text{out}_i = \max(\text{tensor}_i, \text{other}_i) + +\]
    +
    +

    Note

    +

    When the shapes do not match, the shape of the returned output tensor +follows the broadcasting rules.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Tensor) – the second input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.2942, -0.7416,  0.2653, -0.1584])
    +>>> b = torch.randn(4)
    +>>> b
    +tensor([ 0.8722, -1.7421, -0.4141, -0.5055])
    +>>> torch.max(a, b)
    +tensor([ 0.8722, -0.7416,  0.2653, -0.1584])
    +
    +
    +
    + +
    +
    +torch.min()
    +
    +
    +torch.min(input) → Tensor
    +
    + +

    Returns the minimum value of all elements in the input tensor.

    +
    +
    Parameters
    +

    input (Tensor) – the input tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(1, 3)
    +>>> a
    +tensor([[ 0.6750,  1.0857,  1.7197]])
    +>>> torch.min(a)
    +tensor(0.6750)
    +
    +
    +
    +
    +torch.min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
    +
    + +

    Returns a namedtuple (values, indices) where values is the minimum +value of each row of the input tensor in the given dimension +dim. And indices is the index location of each minimum value found +(argmin).

    +

    If keepdim is True, the output tensors are of the same size as +input except in the dimension dim where they are of size 1. +Otherwise, dim is squeezed (see torch.squeeze()), resulting in +the output tensors having 1 fewer dimension than input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int) – the dimension to reduce

    • +
    • keepdim (bool) – whether the output tensors have dim retained or not

    • +
    • out (tuple, optional) – the tuple of two output tensors (min, min_indices)

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 4)
    +>>> a
    +tensor([[-0.6248,  1.1334, -1.1899, -0.2803],
    +        [-1.4644, -0.2635, -0.3651,  0.6134],
    +        [ 0.2457,  0.0384,  1.0128,  0.7015],
    +        [-0.1153,  2.9849,  2.1458,  0.5788]])
    +>>> torch.min(a, 1)
    +torch.return_types.min(values=tensor([-1.1899, -1.4644,  0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
    +
    +
    +
    +
    +torch.min(input, other, out=None) → Tensor
    +
    + +

    Each element of the tensor input is compared with the corresponding +element of the tensor other and an element-wise minimum is taken. +The resulting tensor is returned.

    +

    The shapes of input and other don’t need to match, +but they must be broadcastable.

    +
    +\[\text{out}_i = \min(\text{tensor}_i, \text{other}_i) + +\]
    +
    +

    Note

    +

    When the shapes do not match, the shape of the returned output tensor +follows the broadcasting rules.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Tensor) – the second input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4)
    +>>> a
    +tensor([ 0.8137, -1.1740, -0.6460,  0.6308])
    +>>> b = torch.randn(4)
    +>>> b
    +tensor([-0.1369,  0.1555,  0.4019, -0.1929])
    +>>> torch.min(a, b)
    +tensor([-0.1369, -1.1740, -0.6460, -0.1929])
    +
    +
    +
    + +
    +
    +torch.ne(input, other, out=None) → Tensor
    +

    Computes \(input \neq other\) element-wise.

    +

    The second argument can be a number or a tensor whose shape is +broadcastable with the first argument.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the tensor to compare

    • +
    • other (Tensor or float) – the tensor or value to compare

    • +
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    +
    +
    Returns
    +

    A torch.ByteTensor containing a 1 at each location where comparison is true.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    +tensor([[ 0,  1],
    +        [ 1,  0]], dtype=torch.uint8)
    +
    +
    +
    + +
    +
    +torch.sort(input, dim=-1, descending=False, out=None) -> (Tensor, LongTensor)
    +

    Sorts the elements of the input tensor along a given dimension +in ascending order by value.

    +

    If dim is not given, the last dimension of the input is chosen.

    +

    If descending is True then the elements are sorted in descending +order by value.

    +

    A namedtuple of (values, indices) is returned, where the values are the +sorted values and indices are the indices of the elements in the original +input tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dim (int, optional) – the dimension to sort along

    • +
    • descending (bool, optional) – controls the sorting order (ascending or descending)

    • +
    • out (tuple, optional) – the output tuple of (Tensor, LongTensor) that can +be optionally given to be used as output buffers

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 4)
    +>>> sorted, indices = torch.sort(x)
    +>>> sorted
    +tensor([[-0.2162,  0.0608,  0.6719,  2.3332],
    +        [-0.5793,  0.0061,  0.6058,  0.9497],
    +        [-0.5071,  0.3343,  0.9553,  1.0960]])
    +>>> indices
    +tensor([[ 1,  0,  2,  3],
    +        [ 3,  1,  0,  2],
    +        [ 0,  3,  1,  2]])
    +
    +>>> sorted, indices = torch.sort(x, 0)
    +>>> sorted
    +tensor([[-0.5071, -0.2162,  0.6719, -0.5793],
    +        [ 0.0608,  0.0061,  0.9497,  0.3343],
    +        [ 0.6058,  0.9553,  1.0960,  2.3332]])
    +>>> indices
    +tensor([[ 2,  0,  0,  1],
    +        [ 0,  1,  1,  2],
    +        [ 1,  2,  2,  0]])
    +
    +
    +
    + +
    +
    +torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
    +

    Returns the k largest elements of the given input tensor along +a given dimension.

    +

    If dim is not given, the last dimension of the input is chosen.

    +

    If largest is False then the k smallest elements are returned.

    +

    A namedtuple of (values, indices) is returned, where the indices are the indices +of the elements in the original input tensor.

    +

    The boolean option sorted if True, will make sure that the returned +k elements are themselves sorted

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • k (int) – the k in “top-k”

    • +
    • dim (int, optional) – the dimension to sort along

    • +
    • largest (bool, optional) – controls whether to return largest or +smallest elements

    • +
    • sorted (bool, optional) – controls whether to return the elements +in sorted order

    • +
    • out (tuple, optional) – the output tuple of (Tensor, LongTensor) that can be +optionally given to be used as output buffers

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.arange(1., 6.)
    +>>> x
    +tensor([ 1.,  2.,  3.,  4.,  5.])
    +>>> torch.topk(x, 3)
    +torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
    +
    +
    +
    + +
    +
    +

    Spectral Ops

    +
    +
    +torch.fft(input, signal_ndim, normalized=False) → Tensor
    +

    Complex-to-complex Discrete Fourier Transform

    +

    This method computes the complex-to-complex discrete Fourier transform. +Ignoring the batch dimensions, it computes the following expression:

    +
    +\[X[\omega_1, \dots, \omega_d] = + \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] + e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, + +\]
    +

    where \(d\) = signal_ndim is number of dimensions for the +signal, and \(N_i\) is the size of signal dimension \(i\).

    +

    This method supports 1D, 2D and 3D complex-to-complex transforms, indicated +by signal_ndim. input must be a tensor with last dimension +of size 2, representing the real and imaginary components of complex +numbers, and should have at least signal_ndim + 1 dimensions with optionally +arbitrary number of leading batch dimensions. If normalized is set to +True, this normalizes the result by dividing it with +\(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is unitary.

    +

    Returns the real and the imaginary parts together as one tensor of the same +shape of input.

    +

    The inverse of this function is ifft().

    +
    +

    Note

    +

    For CUDA tensors, an LRU cache is used for cuFFT plans to speed up +repeatedly running FFT methods on tensors of same geometry with same +configuration. See cuFFT plan cache for more details on how to +monitor and control the cache.

    +
    +
    +

    Warning

    +

    For CPU tensors, this method is currently only available with MKL. Use +torch.backends.mkl.is_available() to check if MKL is installed.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of at least signal_ndim + 1 +dimensions

    • +
    • signal_ndim (int) – the number of dimensions in each signal. +signal_ndim can only be 1, 2 or 3

    • +
    • normalized (bool, optional) – controls whether to return normalized results. +Default: False

    • +
    +
    +
    Returns
    +

    A tensor containing the complex-to-complex Fourier transform result

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> # unbatched 2D FFT
    +>>> x = torch.randn(4, 3, 2)
    +>>> torch.fft(x, 2)
    +tensor([[[-0.0876,  1.7835],
    +         [-2.0399, -2.9754],
    +         [ 4.4773, -5.0119]],
    +
    +        [[-1.5716,  2.7631],
    +         [-3.8846,  5.2652],
    +         [ 0.2046, -0.7088]],
    +
    +        [[ 1.9938, -0.5901],
    +         [ 6.5637,  6.4556],
    +         [ 2.9865,  4.9318]],
    +
    +        [[ 7.0193,  1.1742],
    +         [-1.3717, -2.1084],
    +         [ 2.0289,  2.9357]]])
    +>>> # batched 1D FFT
    +>>> torch.fft(x, 1)
    +tensor([[[ 1.8385,  1.2827],
    +         [-0.1831,  1.6593],
    +         [ 2.4243,  0.5367]],
    +
    +        [[-0.9176, -1.5543],
    +         [-3.9943, -2.9860],
    +         [ 1.2838, -2.9420]],
    +
    +        [[-0.8854, -0.6860],
    +         [ 2.4450,  0.0808],
    +         [ 1.3076, -0.5768]],
    +
    +        [[-0.1231,  2.7411],
    +         [-0.3075, -1.7295],
    +         [-0.5384, -2.0299]]])
    +>>> # arbitrary number of batch dimensions, 2D FFT
    +>>> x = torch.randn(3, 3, 5, 5, 2)
    +>>> y = torch.fft(x, 2)
    +>>> y.shape
    +torch.Size([3, 3, 5, 5, 2])
    +
    +
    +
    + +
    +
    +torch.ifft(input, signal_ndim, normalized=False) → Tensor
    +

    Complex-to-complex Inverse Discrete Fourier Transform

    +

    This method computes the complex-to-complex inverse discrete Fourier +transform. Ignoring the batch dimensions, it computes the following +expression:

    +
    +\[X[\omega_1, \dots, \omega_d] = + \frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] + e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, + +\]
    +

    where \(d\) = signal_ndim is number of dimensions for the +signal, and \(N_i\) is the size of signal dimension \(i\).

    +

    The argument specifications are almost identical with fft(). +However, if normalized is set to True, this instead returns the +results multiplied by \(\sqrt{\prod_{i=1}^d N_i}\), to become a unitary +operator. Therefore, to invert a fft(), the normalized +argument should be set identically for fft().

    +

    Returns the real and the imaginary parts together as one tensor of the same +shape of input.

    +

    The inverse of this function is fft().

    +
    +

    Note

    +

    For CUDA tensors, an LRU cache is used for cuFFT plans to speed up +repeatedly running FFT methods on tensors of same geometry with same +configuration. See cuFFT plan cache for more details on how to +monitor and control the cache.

    +
    +
    +

    Warning

    +

    For CPU tensors, this method is currently only available with MKL. Use +torch.backends.mkl.is_available() to check if MKL is installed.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of at least signal_ndim + 1 +dimensions

    • +
    • signal_ndim (int) – the number of dimensions in each signal. +signal_ndim can only be 1, 2 or 3

    • +
    • normalized (bool, optional) – controls whether to return normalized results. +Default: False

    • +
    +
    +
    Returns
    +

    A tensor containing the complex-to-complex inverse Fourier transform result

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(3, 3, 2)
    +>>> x
    +tensor([[[ 1.2766,  1.3680],
    +         [-0.8337,  2.0251],
    +         [ 0.9465, -1.4390]],
    +
    +        [[-0.1890,  1.6010],
    +         [ 1.1034, -1.9230],
    +         [-0.9482,  1.0775]],
    +
    +        [[-0.7708, -0.8176],
    +         [-0.1843, -0.2287],
    +         [-1.9034, -0.2196]]])
    +>>> y = torch.fft(x, 2)
    +>>> torch.ifft(y, 2)  # recover x
    +tensor([[[ 1.2766,  1.3680],
    +         [-0.8337,  2.0251],
    +         [ 0.9465, -1.4390]],
    +
    +        [[-0.1890,  1.6010],
    +         [ 1.1034, -1.9230],
    +         [-0.9482,  1.0775]],
    +
    +        [[-0.7708, -0.8176],
    +         [-0.1843, -0.2287],
    +         [-1.9034, -0.2196]]])
    +
    +
    +
    + +
    +
    +torch.rfft(input, signal_ndim, normalized=False, onesided=True) → Tensor
    +

    Real-to-complex Discrete Fourier Transform

    +

    This method computes the real-to-complex discrete Fourier transform. It is +mathematically equivalent with fft() with differences only in +formats of the input and output.

    +

    This method supports 1D, 2D and 3D real-to-complex transforms, indicated +by signal_ndim. input must be a tensor with at least +signal_ndim dimensions with optionally arbitrary number of leading batch +dimensions. If normalized is set to True, this normalizes the result +by dividing it with \(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is +unitary, where \(N_i\) is the size of signal dimension \(i\).

    +

    The real-to-complex Fourier transform results follow conjugate symmetry:

    +
    +\[X[\omega_1, \dots, \omega_d] = X^*[N_1 - \omega_1, \dots, N_d - \omega_d], + +\]
    +

    where the index arithmetic is computed modulus the size of the corresponding +dimension, \(\ ^*\) is the conjugate operator, and +\(d\) = signal_ndim. onesided flag controls whether to avoid +redundancy in the output results. If set to True (default), the output will +not be full complex result of shape \((*, 2)\), where \(*\) is the shape +of input, but instead the last dimension will be halfed as of size +\(\lfloor \frac{N_d}{2} \rfloor + 1\).

    +

    The inverse of this function is irfft().

    +
    +

    Note

    +

    For CUDA tensors, an LRU cache is used for cuFFT plans to speed up +repeatedly running FFT methods on tensors of same geometry with same +configuration. See cuFFT plan cache for more details on how to +monitor and control the cache.

    +
    +
    +

    Warning

    +

    For CPU tensors, this method is currently only available with MKL. Use +torch.backends.mkl.is_available() to check if MKL is installed.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of at least signal_ndim dimensions

    • +
    • signal_ndim (int) – the number of dimensions in each signal. +signal_ndim can only be 1, 2 or 3

    • +
    • normalized (bool, optional) – controls whether to return normalized results. +Default: False

    • +
    • onesided (bool, optional) – controls whether to return half of results to +avoid redundancy. Default: True

    • +
    +
    +
    Returns
    +

    A tensor containing the real-to-complex Fourier transform result

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(5, 5)
    +>>> torch.rfft(x, 2).shape
    +torch.Size([5, 3, 2])
    +>>> torch.rfft(x, 2, onesided=False).shape
    +torch.Size([5, 5, 2])
    +
    +
    +
    + +
    +
    +torch.irfft(input, signal_ndim, normalized=False, onesided=True, signal_sizes=None) → Tensor
    +

    Complex-to-real Inverse Discrete Fourier Transform

    +

    This method computes the complex-to-real inverse discrete Fourier transform. +It is mathematically equivalent with ifft() with differences only in +formats of the input and output.

    +

    The argument specifications are almost identical with ifft(). +Similar to ifft(), if normalized is set to True, +this normalizes the result by multiplying it with +\(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is unitary, where +\(N_i\) is the size of signal dimension \(i\).

    +
    +

    Note

    +

    Due to the conjugate symmetry, input do not need to contain the full +complex frequency values. Roughly half of the values will be sufficient, as +is the case when input is given by rfft() with +rfft(signal, onesided=True). In such case, set the onesided +argument of this method to True. Moreover, the original signal shape +information can sometimes be lost, optionally set signal_sizes to be +the size of the original signal (without the batch dimensions if in batched +mode) to recover it with correct shape.

    +

    Therefore, to invert an rfft(), the normalized and +onesided arguments should be set identically for irfft(), +and preferrably a signal_sizes is given to avoid size mismatch. See the +example below for a case of size mismatch.

    +

    See rfft() for details on conjugate symmetry.

    +
    +

    The inverse of this function is rfft().

    +
    +

    Warning

    +

    Generally speaking, input to this function should contain values +following conjugate symmetry. Note that even if onesided is +True, often symmetry on some part is still needed. When this +requirement is not satisfied, the behavior of irfft() is +undefined. Since torch.autograd.gradcheck() estimates numerical +Jacobian with point perturbations, irfft() will almost +certainly fail the check.

    +
    +
    +

    Note

    +

    For CUDA tensors, an LRU cache is used for cuFFT plans to speed up +repeatedly running FFT methods on tensors of same geometry with same +configuration. See cuFFT plan cache for more details on how to +monitor and control the cache.

    +
    +
    +

    Warning

    +

    For CPU tensors, this method is currently only available with MKL. Use +torch.backends.mkl.is_available() to check if MKL is installed.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of at least signal_ndim + 1 +dimensions

    • +
    • signal_ndim (int) – the number of dimensions in each signal. +signal_ndim can only be 1, 2 or 3

    • +
    • normalized (bool, optional) – controls whether to return normalized results. +Default: False

    • +
    • onesided (bool, optional) – controls whether input was halfed to avoid +redundancy, e.g., by rfft(). Default: True

    • +
    • signal_sizes (list or torch.Size, optional) – the size of the original +signal (without batch dimension). Default: None

    • +
    +
    +
    Returns
    +

    A tensor containing the complex-to-real inverse Fourier transform result

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.randn(4, 4)
    +>>> torch.rfft(x, 2, onesided=True).shape
    +torch.Size([4, 3, 2])
    +>>>
    +>>> # notice that with onesided=True, output size does not determine the original signal size
    +>>> x = torch.randn(4, 5)
    +
    +>>> torch.rfft(x, 2, onesided=True).shape
    +torch.Size([4, 3, 2])
    +>>>
    +>>> # now we use the original shape to recover x
    +>>> x
    +tensor([[-0.8992,  0.6117, -1.6091, -0.4155, -0.8346],
    +        [-2.1596, -0.0853,  0.7232,  0.1941, -0.0789],
    +        [-2.0329,  1.1031,  0.6869, -0.5042,  0.9895],
    +        [-0.1884,  0.2858, -1.5831,  0.9917, -0.8356]])
    +>>> y = torch.rfft(x, 2, onesided=True)
    +>>> torch.irfft(y, 2, onesided=True, signal_sizes=x.shape)  # recover x
    +tensor([[-0.8992,  0.6117, -1.6091, -0.4155, -0.8346],
    +        [-2.1596, -0.0853,  0.7232,  0.1941, -0.0789],
    +        [-2.0329,  1.1031,  0.6869, -0.5042,  0.9895],
    +        [-0.1884,  0.2858, -1.5831,  0.9917, -0.8356]])
    +
    +
    +
    + +
    +
    +torch.stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)[source]
    +

    Short-time Fourier transform (STFT).

    +

    Ignoring the optional batch dimension, this method computes the following +expression:

    +
    +\[X[m, \omega] = \sum_{k = 0}^{\text{win\_length-1}}% + \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ % + \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{win\_length}}\right), + +\]
    +

    where \(m\) is the index of the sliding window, and \(\omega\) is +the frequency that \(0 \leq \omega < \text{n\_fft}\). When +onesided is the default value True,

    +
      +
    • input must be either a 1-D time sequence or a 2-D batch of time +sequences.

    • +
    • If hop_length is None (default), it is treated as equal to +floor(n_fft / 4).

    • +
    • If win_length is None (default), it is treated as equal to +n_fft.

    • +
    • window can be a 1-D tensor of size win_length, e.g., from +torch.hann_window(). If window is None (default), it is +treated as if having \(1\) everywhere in the window. If +\(\text{win\_length} < \text{n\_fft}\), window will be padded on +both sides to length n_fft before being applied.

    • +
    • If center is True (default), input will be padded on +both sides so that the \(t\)-th frame is centered at time +\(t \times \text{hop\_length}\). Otherwise, the \(t\)-th frame +begins at time \(t \times \text{hop\_length}\).

    • +
    • pad_mode determines the padding method used on input when +center is True. See torch.nn.functional.pad() for +all available options. Default is "reflect".

    • +
    • If onesided is True (default), only values for \(\omega\) +in \(\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]\) +are returned because the real-to-complex Fourier transform satisfies the +conjugate symmetry, i.e., \(X[m, \omega] = X[m, \text{n\_fft} - \omega]^*\).

    • +
    • If normalized is True (default is False), the function +returns the normalized STFT results, i.e., multiplied by \((\text{frame\_length})^{-0.5}\).

    • +
    +

    Returns the real and the imaginary parts together as one tensor of size +\((* \times N \times T \times 2)\), where \(*\) is the optional +batch size of input, \(N\) is the number of frequencies where +STFT is applied, \(T\) is the total number of frames used, and each pair +in the last dimension represents a complex number as the real part and the +imaginary part.

    +
    +

    Warning

    +

    This function changed signature at version 0.4.1. Calling with the +previous signature may cause error or return incorrect result.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • n_fft (int) – size of Fourier transform

    • +
    • hop_length (int, optional) – the distance between neighboring sliding window +frames. Default: None (treated as equal to floor(n_fft / 4))

    • +
    • win_length (int, optional) – the size of window frame and STFT filter. +Default: None (treated as equal to n_fft)

    • +
    • window (Tensor, optional) – the optional window function. +Default: None (treated as window of all \(1\) s)

    • +
    • center (bool, optional) – whether to pad input on both sides so +that the \(t\)-th frame is centered at time \(t \times \text{hop\_length}\). +Default: True

    • +
    • pad_mode (string, optional) – controls the padding method used when +center is True. Default: "reflect"

    • +
    • normalized (bool, optional) – controls whether to return the normalized STFT results +Default: False

    • +
    • onesided (bool, optional) – controls whether to return half of results to +avoid redundancy Default: True

    • +
    +
    +
    Returns
    +

    A tensor containing the STFT result with shape described above

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torch.bartlett_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Bartlett window function.

    +
    +\[w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ +\end{cases}, + +\]
    +

    where \(N\) is the full window size.

    +

    The input window_length is a positive integer controlling the +returned window size. periodic flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +torch.stft(). Therefore, if periodic is true, the \(N\) in +above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.bartlett_window(L, periodic=True) equal to +torch.bartlett_window(L + 1, periodic=False)[:-1]).

    +
    +

    Note

    +

    If window_length \(=1\), the returned window contains a single value 1.

    +
    +
    +
    Parameters
    +
      +
    • window_length (int) – the size of returned window

    • +
    • periodic (bool, optional) – If True, returns a window to be used as periodic +function. If False, return a symmetric window.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). Only floating point types are supported.

    • +
    • layout (torch.layout, optional) – the desired layout of returned window tensor. Only +torch.strided (dense layout) is supported.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    Returns
    +

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torch.blackman_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Blackman window function.

    +
    +\[w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + +\]
    +

    where \(N\) is the full window size.

    +

    The input window_length is a positive integer controlling the +returned window size. periodic flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +torch.stft(). Therefore, if periodic is true, the \(N\) in +above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.blackman_window(L, periodic=True) equal to +torch.blackman_window(L + 1, periodic=False)[:-1]).

    +
    +

    Note

    +

    If window_length \(=1\), the returned window contains a single value 1.

    +
    +
    +
    Parameters
    +
      +
    • window_length (int) – the size of returned window

    • +
    • periodic (bool, optional) – If True, returns a window to be used as periodic +function. If False, return a symmetric window.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). Only floating point types are supported.

    • +
    • layout (torch.layout, optional) – the desired layout of returned window tensor. Only +torch.strided (dense layout) is supported.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    Returns
    +

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torch.hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Hamming window function.

    +
    +\[w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + +\]
    +

    where \(N\) is the full window size.

    +

    The input window_length is a positive integer controlling the +returned window size. periodic flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +torch.stft(). Therefore, if periodic is true, the \(N\) in +above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.hamming_window(L, periodic=True) equal to +torch.hamming_window(L + 1, periodic=False)[:-1]).

    +
    +

    Note

    +

    If window_length \(=1\), the returned window contains a single value 1.

    +
    +
    +

    Note

    +

    This is a generalized version of torch.hann_window().

    +
    +
    +
    Parameters
    +
      +
    • window_length (int) – the size of returned window

    • +
    • periodic (bool, optional) – If True, returns a window to be used as periodic +function. If False, return a symmetric window.

    • +
    • alpha (float, optional) – The coefficient \(\alpha\) in the equation above

    • +
    • beta (float, optional) – The coefficient \(\beta\) in the equation above

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). Only floating point types are supported.

    • +
    • layout (torch.layout, optional) – the desired layout of returned window tensor. Only +torch.strided (dense layout) is supported.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    Returns
    +

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torch.hann_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    +

    Hann window function.

    +
    +\[w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + +\]
    +

    where \(N\) is the full window size.

    +

    The input window_length is a positive integer controlling the +returned window size. periodic flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +torch.stft(). Therefore, if periodic is true, the \(N\) in +above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.hann_window(L, periodic=True) equal to +torch.hann_window(L + 1, periodic=False)[:-1]).

    +
    +

    Note

    +

    If window_length \(=1\), the returned window contains a single value 1.

    +
    +
    +
    Parameters
    +
      +
    • window_length (int) – the size of returned window

    • +
    • periodic (bool, optional) – If True, returns a window to be used as periodic +function. If False, return a symmetric window.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()). Only floating point types are supported.

    • +
    • layout (torch.layout, optional) – the desired layout of returned window tensor. Only +torch.strided (dense layout) is supported.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    +
    +
    Returns
    +

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +

    Other Operations

    +
    +
    +torch.bincount(input, weights=None, minlength=0) → Tensor
    +

    Count the frequency of each value in an array of non-negative ints.

    +

    The number of bins (size 1) is one larger than the largest value in +input unless input is empty, in which case the result is a +tensor of size 0. If minlength is specified, the number of bins is at least +minlength and if input is empty, then the result is tensor of size +minlength filled with zeros. If n is the value at position i, +out[n] += weights[i] if weights is specified else +out[n] += 1.

    +
    +

    Note

    +

    When using the CUDA backend, this operation may induce nondeterministic +behaviour that is not easily switched off. +Please see the notes on Reproducibility for background.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – 1-d int tensor

    • +
    • weights (Tensor) – optional, weight for each value in the input tensor. +Should be of same size as input tensor.

    • +
    • minlength (int) – optional, minimum number of bins. Should be non-negative.

    • +
    +
    +
    Returns
    +

    a tensor of shape Size([max(input) + 1]) if +input is non-empty, else Size(0)

    +
    +
    Return type
    +

    output (Tensor)

    +
    +
    +

    Example:

    +
    >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
    +>>> weights = torch.linspace(0, 1, steps=5)
    +>>> input, weights
    +(tensor([4, 3, 6, 3, 4]),
    + tensor([ 0.0000,  0.2500,  0.5000,  0.7500,  1.0000])
    +
    +>>> torch.bincount(input)
    +tensor([0, 0, 0, 2, 2, 0, 1])
    +
    +>>> input.bincount(weights)
    +tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
    +
    +
    +
    + +
    +
    +torch.broadcast_tensors(*tensors) → List of Tensors[source]
    +

    Broadcasts the given tensors according to Broadcasting semantics.

    +
    +
    Parameters
    +

    *tensors – any number of tensors of the same type

    +
    +
    +
    +

    Warning

    +

    More than one element of a broadcasted tensor may refer to a single +memory location. As a result, in-place operations (especially ones that +are vectorized) may result in incorrect behavior. If you need to write +to the tensors, please clone them first.

    +
    +

    Example:

    +
    >>> x = torch.arange(3).view(1, 3)
    +>>> y = torch.arange(2).view(2, 1)
    +>>> a, b = torch.broadcast_tensors(x, y)
    +>>> a.size()
    +torch.Size([2, 3])
    +>>> a
    +tensor([[0, 1, 2],
    +        [0, 1, 2]])
    +
    +
    +
    + +
    +
    +torch.cartesian_prod(*tensors)[source]
    +

    Do cartesian product of the given sequence of tensors. The behavior is similar to +python’s itertools.product.

    +
    +
    Parameters
    +

    *tensors – any number of 1 dimensional tensors.

    +
    +
    Returns
    +

    +
    A tensor equivalent to converting all the input tensors into lists,

    do itertools.product on these lists, and finally convert the resulting list +into tensor.

    +
    +
    +

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> a = [1, 2, 3]
    +>>> b = [4, 5]
    +>>> list(itertools.product(a, b))
    +[(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
    +>>> tensor_a = torch.tensor(a)
    +>>> tensor_b = torch.tensor(b)
    +>>> torch.cartesian_prod(tensor_a, tensor_b)
    +tensor([[1, 4],
    +        [1, 5],
    +        [2, 4],
    +        [2, 5],
    +        [3, 4],
    +        [3, 5]])
    +
    +
    +
    + +
    +
    +torch.combinations(input, r=2, with_replacement=False) → seq
    +

    Compute combinations of length \(r\) of the given tensor. The behavior is similar to +python’s itertools.combinations when with_replacement is set to False, and +itertools.combinations_with_replacement when with_replacement is set to True.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – 1D vector.

    • +
    • r (int, optional) – number of elements to combine

    • +
    • with_replacement (boolean, optional) – whether to allow duplication in combination

    • +
    +
    +
    Returns
    +

    A tensor equivalent to converting all the input tensors into lists, do +itertools.combinations or itertools.combinations_with_replacement on these +lists, and finally convert the resulting list into tensor.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> a = [1, 2, 3]
    +>>> list(itertools.combinations(a, r=2))
    +[(1, 2), (1, 3), (2, 3)]
    +>>> list(itertools.combinations(a, r=3))
    +[(1, 2, 3)]
    +>>> list(itertools.combinations_with_replacement(a, r=2))
    +[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
    +>>> tensor_a = torch.tensor(a)
    +>>> torch.combinations(tensor_a)
    +tensor([[1, 2],
    +        [1, 3],
    +        [2, 3]])
    +>>> torch.combinations(tensor_a, r=3)
    +tensor([[1, 2, 3]])
    +>>> torch.combinations(tensor_a, with_replacement=True)
    +tensor([[1, 1],
    +        [1, 2],
    +        [1, 3],
    +        [2, 2],
    +        [2, 3],
    +        [3, 3]])
    +
    +
    +
    + +
    +
    +torch.cross(input, other, dim=-1, out=None) → Tensor
    +

    Returns the cross product of vectors in dimension dim of input +and other.

    +

    input and other must have the same size, and the size of their +dim dimension should be 3.

    +

    If dim is not given, it defaults to the first dimension found with the +size 3.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • other (Tensor) – the second input tensor

    • +
    • dim (int, optional) – the dimension to take the cross-product in.

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(4, 3)
    +>>> a
    +tensor([[-0.3956,  1.1455,  1.6895],
    +        [-0.5849,  1.3672,  0.3599],
    +        [-1.1626,  0.7180, -0.0521],
    +        [-0.1339,  0.9902, -2.0225]])
    +>>> b = torch.randn(4, 3)
    +>>> b
    +tensor([[-0.0257, -1.4725, -1.2251],
    +        [-1.1479, -0.7005, -1.9757],
    +        [-1.3904,  0.3726, -1.1836],
    +        [-0.9688, -0.7153,  0.2159]])
    +>>> torch.cross(a, b, dim=1)
    +tensor([[ 1.0844, -0.5281,  0.6120],
    +        [-2.4490, -1.5687,  1.9792],
    +        [-0.8304, -1.3037,  0.5650],
    +        [-1.2329,  1.9883,  1.0551]])
    +>>> torch.cross(a, b)
    +tensor([[ 1.0844, -0.5281,  0.6120],
    +        [-2.4490, -1.5687,  1.9792],
    +        [-0.8304, -1.3037,  0.5650],
    +        [-1.2329,  1.9883,  1.0551]])
    +
    +
    +
    + +
    +
    +torch.diag(input, diagonal=0, out=None) → Tensor
    +
      +
    • If input is a vector (1-D tensor), then returns a 2-D square tensor +with the elements of input as the diagonal.

    • +
    • If input is a matrix (2-D tensor), then returns a 1-D tensor with +the diagonal elements of input.

    • +
    +

    The argument diagonal controls which diagonal to consider:

    +
      +
    • If diagonal = 0, it is the main diagonal.

    • +
    • If diagonal > 0, it is above the main diagonal.

    • +
    • If diagonal < 0, it is below the main diagonal.

    • +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • diagonal (int, optional) – the diagonal to consider

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +
    +

    See also

    +

    torch.diagonal() always returns the diagonal of its input.

    +

    torch.diagflat() always constructs a tensor with diagonal elements +specified by the input.

    +
    +

    Examples:

    +

    Get the square matrix where the input vector is the diagonal:

    +
    >>> a = torch.randn(3)
    +>>> a
    +tensor([ 0.5950,-0.0872, 2.3298])
    +>>> torch.diag(a)
    +tensor([[ 0.5950, 0.0000, 0.0000],
    +        [ 0.0000,-0.0872, 0.0000],
    +        [ 0.0000, 0.0000, 2.3298]])
    +>>> torch.diag(a, 1)
    +tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
    +        [ 0.0000, 0.0000,-0.0872, 0.0000],
    +        [ 0.0000, 0.0000, 0.0000, 2.3298],
    +        [ 0.0000, 0.0000, 0.0000, 0.0000]])
    +
    +
    +

    Get the k-th diagonal of a given matrix:

    +
    >>> a = torch.randn(3, 3)
    +>>> a
    +tensor([[-0.4264, 0.0255,-0.1064],
    +        [ 0.8795,-0.2429, 0.1374],
    +        [ 0.1029,-0.6482,-1.6300]])
    +>>> torch.diag(a, 0)
    +tensor([-0.4264,-0.2429,-1.6300])
    +>>> torch.diag(a, 1)
    +tensor([ 0.0255, 0.1374])
    +
    +
    +
    + +
    +
    +torch.diag_embed(input, offset=0, dim1=-2, dim2=-1) → Tensor
    +

    Creates a tensor whose diagonals of certain 2D planes (specified by +dim1 and dim2) are filled by input. +To facilitate creating batched diagonal matrices, the 2D planes formed by +the last two dimensions of the returned tensor are chosen by default.

    +

    The argument offset controls which diagonal to consider:

    +
      +
    • If offset = 0, it is the main diagonal.

    • +
    • If offset > 0, it is above the main diagonal.

    • +
    • If offset < 0, it is below the main diagonal.

    • +
    +

    The size of the new matrix will be calculated to make the specified diagonal +of the size of the last input dimension. +Note that for offset other than \(0\), the order of dim1 +and dim2 matters. Exchanging them is equivalent to changing the +sign of offset.

    +

    Applying torch.diagonal() to the output of this function with +the same arguments yields a matrix identical to input. However, +torch.diagonal() has different default dimensions, so those +need to be explicitly specified.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor. Must be at least 1-dimensional.

    • +
    • offset (int, optional) – which diagonal to consider. Default: 0 +(main diagonal).

    • +
    • dim1 (int, optional) – first dimension with respect to which to +take diagonal. Default: -2.

    • +
    • dim2 (int, optional) – second dimension with respect to which to +take diagonal. Default: -1.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(2, 3)
    +>>> torch.diag_embed(a)
    +tensor([[[ 1.5410,  0.0000,  0.0000],
    +         [ 0.0000, -0.2934,  0.0000],
    +         [ 0.0000,  0.0000, -2.1788]],
    +
    +        [[ 0.5684,  0.0000,  0.0000],
    +         [ 0.0000, -1.0845,  0.0000],
    +         [ 0.0000,  0.0000, -1.3986]]])
    +
    +>>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
    +tensor([[[ 0.0000,  1.5410,  0.0000,  0.0000],
    +         [ 0.0000,  0.5684,  0.0000,  0.0000]],
    +
    +        [[ 0.0000,  0.0000, -0.2934,  0.0000],
    +         [ 0.0000,  0.0000, -1.0845,  0.0000]],
    +
    +        [[ 0.0000,  0.0000,  0.0000, -2.1788],
    +         [ 0.0000,  0.0000,  0.0000, -1.3986]],
    +
    +        [[ 0.0000,  0.0000,  0.0000,  0.0000],
    +         [ 0.0000,  0.0000,  0.0000,  0.0000]]])
    +
    +
    +
    + +
    +
    +torch.diagflat(input, offset=0) → Tensor
    +
      +
    • If input is a vector (1-D tensor), then returns a 2-D square tensor +with the elements of input as the diagonal.

    • +
    • If input is a tensor with more than one dimension, then returns a +2-D tensor with diagonal elements equal to a flattened input.

    • +
    +

    The argument offset controls which diagonal to consider:

    +
      +
    • If offset = 0, it is the main diagonal.

    • +
    • If offset > 0, it is above the main diagonal.

    • +
    • If offset < 0, it is below the main diagonal.

    • +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • offset (int, optional) – the diagonal to consider. Default: 0 (main +diagonal).

    • +
    +
    +
    +

    Examples:

    +
    >>> a = torch.randn(3)
    +>>> a
    +tensor([-0.2956, -0.9068,  0.1695])
    +>>> torch.diagflat(a)
    +tensor([[-0.2956,  0.0000,  0.0000],
    +        [ 0.0000, -0.9068,  0.0000],
    +        [ 0.0000,  0.0000,  0.1695]])
    +>>> torch.diagflat(a, 1)
    +tensor([[ 0.0000, -0.2956,  0.0000,  0.0000],
    +        [ 0.0000,  0.0000, -0.9068,  0.0000],
    +        [ 0.0000,  0.0000,  0.0000,  0.1695],
    +        [ 0.0000,  0.0000,  0.0000,  0.0000]])
    +
    +>>> a = torch.randn(2, 2)
    +>>> a
    +tensor([[ 0.2094, -0.3018],
    +        [-0.1516,  1.9342]])
    +>>> torch.diagflat(a)
    +tensor([[ 0.2094,  0.0000,  0.0000,  0.0000],
    +        [ 0.0000, -0.3018,  0.0000,  0.0000],
    +        [ 0.0000,  0.0000, -0.1516,  0.0000],
    +        [ 0.0000,  0.0000,  0.0000,  1.9342]])
    +
    +
    +
    + +
    +
    +torch.diagonal(input, offset=0, dim1=0, dim2=1) → Tensor
    +

    Returns a partial view of input with the its diagonal elements +with respect to dim1 and dim2 appended as a dimension +at the end of the shape.

    +

    The argument offset controls which diagonal to consider:

    +
      +
    • If offset = 0, it is the main diagonal.

    • +
    • If offset > 0, it is above the main diagonal.

    • +
    • If offset < 0, it is below the main diagonal.

    • +
    +

    Applying torch.diag_embed() to the output of this function with +the same arguments yields a diagonal matrix with the diagonal entries +of the input. However, torch.diag_embed() has different default +dimensions, so those need to be explicitly specified.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor. Must be at least 2-dimensional.

    • +
    • offset (int, optional) – which diagonal to consider. Default: 0 +(main diagonal).

    • +
    • dim1 (int, optional) – first dimension with respect to which to +take diagonal. Default: 0.

    • +
    • dim2 (int, optional) – second dimension with respect to which to +take diagonal. Default: 1.

    • +
    +
    +
    +
    +

    Note

    +

    To take a batch diagonal, pass in dim1=-2, dim2=-1.

    +
    +

    Examples:

    +
    >>> a = torch.randn(3, 3)
    +>>> a
    +tensor([[-1.0854,  1.1431, -0.1752],
    +        [ 0.8536, -0.0905,  0.0360],
    +        [ 0.6927, -0.3735, -0.4945]])
    +
    +
    +>>> torch.diagonal(a, 0)
    +tensor([-1.0854, -0.0905, -0.4945])
    +
    +
    +>>> torch.diagonal(a, 1)
    +tensor([ 1.1431,  0.0360])
    +
    +
    +>>> x = torch.randn(2, 5, 4, 2)
    +>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
    +tensor([[[-1.2631,  0.3755, -1.5977, -1.8172],
    +         [-1.1065,  1.0401, -0.2235, -0.7938]],
    +
    +        [[-1.7325, -0.3081,  0.6166,  0.2335],
    +         [ 1.0500,  0.7336, -0.3836, -1.1015]]])
    +
    +
    +
    + +
    +
    +torch.einsum(equation, *operands) → Tensor[source]
    +

    This function provides a way of computing multilinear expressions (i.e. sums of products) using the +Einstein summation convention.

    +
    +
    Parameters
    +
      +
    • equation (string) – The equation is given in terms of lower case letters (indices) to be associated +with each dimension of the operands and result. The left hand side lists the operands +dimensions, separated by commas. There should be one index letter per tensor dimension. +The right hand side follows after -> and gives the indices for the output. +If the -> and right hand side are omitted, it implicitly defined as the alphabetically +sorted list of all indices appearing exactly once in the left hand side. +The indices not apprearing in the output are summed over after multiplying the operands +entries. +If an index appears several times for the same operand, a diagonal is taken. +Ellipses represent a fixed number of dimensions. If the right hand side is inferred, +the ellipsis dimensions are at the beginning of the output.

    • +
    • operands (list of Tensors) – The operands to compute the Einstein sum of.

    • +
    +
    +
    +

    Examples:

    +
    >>> x = torch.randn(5)
    +>>> y = torch.randn(4)
    +>>> torch.einsum('i,j->ij', x, y)  # outer product
    +tensor([[-0.0570, -0.0286, -0.0231,  0.0197],
    +        [ 1.2616,  0.6335,  0.5113, -0.4351],
    +        [ 1.4452,  0.7257,  0.5857, -0.4984],
    +        [-0.4647, -0.2333, -0.1883,  0.1603],
    +        [-1.1130, -0.5588, -0.4510,  0.3838]])
    +
    +
    +>>> A = torch.randn(3,5,4)
    +>>> l = torch.randn(2,5)
    +>>> r = torch.randn(2,4)
    +>>> torch.einsum('bn,anm,bm->ba', l, A, r) # compare torch.nn.functional.bilinear
    +tensor([[-0.3430, -5.2405,  0.4494],
    +        [ 0.3311,  5.5201, -3.0356]])
    +
    +
    +>>> As = torch.randn(3,2,5)
    +>>> Bs = torch.randn(3,5,4)
    +>>> torch.einsum('bij,bjk->bik', As, Bs) # batch matrix multiplication
    +tensor([[[-1.0564, -1.5904,  3.2023,  3.1271],
    +         [-1.6706, -0.8097, -0.8025, -2.1183]],
    +
    +        [[ 4.2239,  0.3107, -0.5756, -0.2354],
    +         [-1.4558, -0.3460,  1.5087, -0.8530]],
    +
    +        [[ 2.8153,  1.8787, -4.3839, -1.2112],
    +         [ 0.3728, -2.1131,  0.0921,  0.8305]]])
    +
    +>>> A = torch.randn(3, 3)
    +>>> torch.einsum('ii->i', A) # diagonal
    +tensor([-0.7825,  0.8291, -0.1936])
    +
    +>>> A = torch.randn(4, 3, 3)
    +>>> torch.einsum('...ii->...i', A) # batch diagonal
    +tensor([[-1.0864,  0.7292,  0.0569],
    +        [-0.9725, -1.0270,  0.6493],
    +        [ 0.5832, -1.1716, -1.5084],
    +        [ 0.4041, -1.1690,  0.8570]])
    +
    +>>> A = torch.randn(2, 3, 4, 5)
    +>>> torch.einsum('...ij->...ji', A).shape # batch permute
    +torch.Size([2, 3, 5, 4])
    +
    +
    +
    + +
    +
    +torch.flatten(input, start_dim=0, end_dim=-1) → Tensor
    +

    Flattens a contiguous range of dims in a tensor.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • start_dim (int) – the first dim to flatten

    • +
    • end_dim (int) – the last dim to flatten

    • +
    +
    +
    +

    Example:

    +
    >>> t = torch.tensor([[[1, 2],
    +                       [3, 4]],
    +                      [[5, 6],
    +                       [7, 8]]])
    +>>> torch.flatten(t)
    +tensor([1, 2, 3, 4, 5, 6, 7, 8])
    +>>> torch.flatten(t, start_dim=1)
    +tensor([[1, 2, 3, 4],
    +        [5, 6, 7, 8]])
    +
    +
    +
    + +
    +
    +torch.flip(input, dims) → Tensor
    +

    Reverse the order of a n-D tensor along given axis in dims.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • dims (a list or tuple) – axis to flip on

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.arange(8).view(2, 2, 2)
    +>>> x
    +tensor([[[ 0,  1],
    +         [ 2,  3]],
    +
    +        [[ 4,  5],
    +         [ 6,  7]]])
    +>>> torch.flip(x, [0, 1])
    +tensor([[[ 6,  7],
    +         [ 4,  5]],
    +
    +        [[ 2,  3],
    +         [ 0,  1]]])
    +
    +
    +
    + +
    +
    +torch.rot90(input, k, dims) → Tensor
    +

    Rotate a n-D tensor by 90 degrees in the plane specified by dims axis. +Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • k (int) – number of times to rotate

    • +
    • dims (a list or tuple) – axis to rotate

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.arange(4).view(2, 2)
    +>>> x
    +tensor([[0, 1],
    +        [2, 3]])
    +>>> torch.rot90(x, 1, [0, 1])
    +tensor([[1, 3],
    +        [0, 2]])
    +
    +>>> x = torch.arange(8).view(2, 2, 2)
    +>>> x
    +tensor([[[0, 1],
    +         [2, 3]],
    +
    +        [[4, 5],
    +         [6, 7]]])
    +>>> torch.rot90(x, 1, [1, 2])
    +tensor([[[1, 3],
    +         [0, 2]],
    +
    +        [[5, 7],
    +         [4, 6]]])
    +
    +
    +
    + +
    +
    +torch.histc(input, bins=100, min=0, max=0, out=None) → Tensor
    +

    Computes the histogram of a tensor.

    +

    The elements are sorted into equal width bins between min and +max. If min and max are both zero, the minimum and +maximum values of the data are used.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • bins (int) – number of histogram bins

    • +
    • min (int) – lower end of the range (inclusive)

    • +
    • max (int) – upper end of the range (inclusive)

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    Returns
    +

    Histogram represented as a tensor

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
    +tensor([ 0.,  2.,  1.,  0.])
    +
    +
    +
    + +
    +
    +torch.meshgrid(*tensors, **kwargs)[source]
    +

    Take \(N\) tensors, each of which can be either scalar or 1-dimensional +vector, and create \(N\) N-dimensional grids, where the \(i\) th grid is defined by +expanding the \(i\) th input over dimensions defined by other inputs.

    +
    +
    +
    Args:

    tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be +treated as tensors of size \((1,)\) automatically

    +
    +
    Returns:

    seq (sequence of Tensors): If the input has \(k\) tensors of size +\((N_1,), (N_2,), \ldots , (N_k,)\), then the output would also have \(k\) tensors, +where all tensors are of size \((N_1, N_2, \ldots , N_k)\).

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3])
    +>>> y = torch.tensor([4, 5, 6])
    +>>> grid_x, grid_y = torch.meshgrid(x, y)
    +>>> grid_x
    +tensor([[1, 1, 1],
    +        [2, 2, 2],
    +        [3, 3, 3]])
    +>>> grid_y
    +tensor([[4, 5, 6],
    +        [4, 5, 6],
    +        [4, 5, 6]])
    +
    +
    +
    +
    + +
    +
    +torch.renorm(input, p, dim, maxnorm, out=None) → Tensor
    +

    Returns a tensor where each sub-tensor of input along dimension +dim is normalized such that the p-norm of the sub-tensor is lower +than the value maxnorm

    +
    +

    Note

    +

    If the norm of a row is lower than maxnorm, the row is unchanged

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • p (float) – the power for the norm computation

    • +
    • dim (int) – the dimension to slice over to get the sub-tensors

    • +
    • maxnorm (float) – the maximum norm to keep each sub-tensor under

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.ones(3, 3)
    +>>> x[1].fill_(2)
    +tensor([ 2.,  2.,  2.])
    +>>> x[2].fill_(3)
    +tensor([ 3.,  3.,  3.])
    +>>> x
    +tensor([[ 1.,  1.,  1.],
    +        [ 2.,  2.,  2.],
    +        [ 3.,  3.,  3.]])
    +>>> torch.renorm(x, 1, 0, 5)
    +tensor([[ 1.0000,  1.0000,  1.0000],
    +        [ 1.6667,  1.6667,  1.6667],
    +        [ 1.6667,  1.6667,  1.6667]])
    +
    +
    +
    + +
    +
    +torch.repeat_interleave()
    +
    +
    +torch.repeat_interleave(input, repeats, dim=None) → Tensor
    +
    + +

    Repeat elements of a tensor.

    +
    +

    Warning

    +

    This is different from torch.repeat() but similar to numpy.repeat.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – The input tensor

    • +
    • repeats (Tensor or int) – The number of repetitions for each element. +repeats is broadcasted to fit the shape of the given axis.

    • +
    • dim (int, optional) – The dimension along which to repeat values. +By default, use the flattened input array, and return a flat output +array.

    • +
    +
    +
    Returns
    +

    +
    Repeated tensor which has the same shape as input, except along the

    given axis.

    +
    +
    +

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3])
    +>>> x.repeat_interleave(2)
    +tensor([1, 1, 2, 2, 3, 3])
    +>>> y = torch.tensor([[1, 2], [3, 4]])
    +>>> torch.repeat_interleave(y, 2)
    +tensor([1, 1, 2, 2, 3, 3, 4, 4])
    +>>> torch.repeat_interleave(y, 3, dim=1)
    +tensor([[1, 1, 1, 2, 2, 2],
    +        [3, 3, 3, 4, 4, 4]])
    +>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
    +tensor([[1, 2],
    +        [3, 4],
    +        [3, 4]])
    +
    +
    +
    +
    +torch.repeat_interleave(repeats) → Tensor
    +
    + +

    If the repeats is tensor([n1, n2, n3, …]), then the output will be +tensor([0, 0, …, 1, 1, …, 2, 2, …, …]) where 0 appears n1 times, +1 appears n2 times, 2 appears n3 times, etc.

    +
    + +
    +
    +torch.roll(input, shifts, dims=None) → Tensor
    +

    Roll the tensor along the given dimension(s). Elements that are shifted beyond the +last position are re-introduced at the first position. If a dimension is not +specified, the tensor will be flattened before rolling and then restored +to the original shape.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • shifts (int or tuple of python:ints) – The number of places by which the elements +of the tensor are shifted. If shifts is a tuple, dims must be a tuple of +the same size, and each dimension will be rolled by the corresponding +value

    • +
    • dims (int or tuple of python:ints) – Axis along which to roll

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
    +>>> x
    +tensor([[1, 2],
    +        [3, 4],
    +        [5, 6],
    +        [7, 8]])
    +>>> torch.roll(x, 1, 0)
    +tensor([[7, 8],
    +        [1, 2],
    +        [3, 4],
    +        [5, 6]])
    +>>> torch.roll(x, -1, 0)
    +tensor([[3, 4],
    +        [5, 6],
    +        [7, 8],
    +        [1, 2]])
    +>>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
    +tensor([[6, 5],
    +        [8, 7],
    +        [2, 1],
    +        [4, 3]])
    +
    +
    +
    + +
    +
    +torch.tensordot(a, b, dims=2)[source]
    +

    Returns a contraction of a and b over multiple dimensions.

    +

    tensordot implements a generalized matrix product.

    +
    +
    Parameters
    +
      +
    • a (Tensor) – Left tensor to contract

    • +
    • b (Tensor) – Right tensor to contract

    • +
    • dims (int or tuple of two lists of python:integers) – number of dimensions to +contract or explicit lists of dimensions for a and +b respectively

    • +
    +
    +
    +

    When called with an integer argument dims = \(d\), and the number of +dimensions of a and b is \(m\) and \(n\), respectively, +it computes

    +
    +\[r_{i_0,...,i_{m-d}, i_d,...,i_n} + = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. + +\]
    +

    When called with dims of the list form, the given dimensions will be contracted +in place of the last \(d\) of a and the first \(d\) of \(b\). The sizes +in these dimensions must match, but tensordot will deal with broadcasted +dimensions.

    +

    Examples:

    +
    >>> a = torch.arange(60.).reshape(3, 4, 5)
    +>>> b = torch.arange(24.).reshape(4, 3, 2)
    +>>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
    +tensor([[4400., 4730.],
    +        [4532., 4874.],
    +        [4664., 5018.],
    +        [4796., 5162.],
    +        [4928., 5306.]])
    +
    +>>> a = torch.randn(3, 4, 5, device='cuda')
    +>>> b = torch.randn(4, 5, 6, device='cuda')
    +>>> c = torch.tensordot(a, b, dims=2).cpu()
    +tensor([[ 8.3504, -2.5436,  6.2922,  2.7556, -1.0732,  3.2741],
    +        [ 3.3161,  0.0704,  5.0187, -0.4079, -4.3126,  4.8744],
    +        [ 0.8223,  3.9445,  3.2168, -0.2400,  3.4117,  1.7780]])
    +
    +
    +
    + +
    +
    +torch.trace(input) → Tensor
    +

    Returns the sum of the elements of the diagonal of the input 2-D matrix.

    +

    Example:

    +
    >>> x = torch.arange(1., 10.).view(3, 3)
    +>>> x
    +tensor([[ 1.,  2.,  3.],
    +        [ 4.,  5.,  6.],
    +        [ 7.,  8.,  9.]])
    +>>> torch.trace(x)
    +tensor(15.)
    +
    +
    +
    + +
    +
    +torch.tril(input, diagonal=0, out=None) → Tensor
    +

    Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices +input, the other elements of the result tensor out are set to 0.

    +

    The lower triangular part of the matrix is defined as the elements on and +below the diagonal.

    +

    The argument diagonal controls which diagonal to consider. If +diagonal = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +\(\lbrace (i, i) \rbrace\) for \(i \in [0, \min\{d_{1}, d_{2}\} - 1]\) where +\(d_{1}, d_{2}\) are the dimensions of the matrix.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • diagonal (int, optional) – the diagonal to consider

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 3)
    +>>> a
    +tensor([[-1.0813, -0.8619,  0.7105],
    +        [ 0.0935,  0.1380,  2.2112],
    +        [-0.3409, -0.9828,  0.0289]])
    +>>> torch.tril(a)
    +tensor([[-1.0813,  0.0000,  0.0000],
    +        [ 0.0935,  0.1380,  0.0000],
    +        [-0.3409, -0.9828,  0.0289]])
    +
    +>>> b = torch.randn(4, 6)
    +>>> b
    +tensor([[ 1.2219,  0.5653, -0.2521, -0.2345,  1.2544,  0.3461],
    +        [ 0.4785, -0.4477,  0.6049,  0.6368,  0.8775,  0.7145],
    +        [ 1.1502,  3.2716, -1.1243, -0.5413,  0.3615,  0.6864],
    +        [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024,  0.0978]])
    +>>> torch.tril(b, diagonal=1)
    +tensor([[ 1.2219,  0.5653,  0.0000,  0.0000,  0.0000,  0.0000],
    +        [ 0.4785, -0.4477,  0.6049,  0.0000,  0.0000,  0.0000],
    +        [ 1.1502,  3.2716, -1.1243, -0.5413,  0.0000,  0.0000],
    +        [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024,  0.0000]])
    +>>> torch.tril(b, diagonal=-1)
    +tensor([[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +        [ 0.4785,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
    +        [ 1.1502,  3.2716,  0.0000,  0.0000,  0.0000,  0.0000],
    +        [-0.0614, -0.7344, -1.3164,  0.0000,  0.0000,  0.0000]])
    +
    +
    +
    + +
    +
    +torch.tril_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) → Tensor
    +

    Returns the indices of the lower triangular part of a row-by- +col matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns.

    +

    The lower triangular part of the matrix is defined as the elements on and +below the diagonal.

    +

    The argument offset controls which diagonal to consider. If +offset = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +\(\lbrace (i, i) \rbrace\) for \(i \in [0, \min\{d_{1}, d_{2}\} - 1]\) +where \(d_{1}, d_{2}\) are the dimensions of the matrix.

    +

    NOTE: when running on ‘cuda’, row * col must be less than \(2^{59}\) to +prevent overflow during calculation.

    +
    +
    Parameters
    +
      +
    • row (int) – number of rows in the 2-D matrix.

    • +
    • col (int) – number of columns in the 2-D matrix.

    • +
    • offset (int) – diagonal offset from the main diagonal. +Default: if not provided, 0.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, torch.long.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • layout (torch.layout, optional) – currently only support torch.strided.

    • +
    +
    +
    +
    +
    Example::
    >>> a = torch.tril_indices(3, 3)
    +>>> a
    +tensor([[0, 1, 1, 2, 2, 2],
    +        [0, 0, 1, 0, 1, 2]])
    +
    +
    +
    >>> a = torch.tril_indices(4, 3, -1)
    +>>> a
    +tensor([[1, 2, 2, 3, 3, 3],
    +        [0, 0, 1, 0, 1, 2]])
    +
    +
    +
    >>> a = torch.tril_indices(4, 3, 1)
    +>>> a
    +tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
    +        [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
    +
    +
    +
    +
    +
    + +
    +
    +torch.triu(input, diagonal=0, out=None) → Tensor
    +

    Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices +input, the other elements of the result tensor out are set to 0.

    +

    The upper triangular part of the matrix is defined as the elements on and +above the diagonal.

    +

    The argument diagonal controls which diagonal to consider. If +diagonal = 0, all elements on and below the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +\(\lbrace (i, i) \rbrace\) for \(i \in [0, \min\{d_{1}, d_{2}\} - 1]\) where +\(d_{1}, d_{2}\) are the dimensions of the matrix.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • diagonal (int, optional) – the diagonal to consider

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 3)
    +>>> a
    +tensor([[ 0.2309,  0.5207,  2.0049],
    +        [ 0.2072, -1.0680,  0.6602],
    +        [ 0.3480, -0.5211, -0.4573]])
    +>>> torch.triu(a)
    +tensor([[ 0.2309,  0.5207,  2.0049],
    +        [ 0.0000, -1.0680,  0.6602],
    +        [ 0.0000,  0.0000, -0.4573]])
    +>>> torch.triu(a, diagonal=1)
    +tensor([[ 0.0000,  0.5207,  2.0049],
    +        [ 0.0000,  0.0000,  0.6602],
    +        [ 0.0000,  0.0000,  0.0000]])
    +>>> torch.triu(a, diagonal=-1)
    +tensor([[ 0.2309,  0.5207,  2.0049],
    +        [ 0.2072, -1.0680,  0.6602],
    +        [ 0.0000, -0.5211, -0.4573]])
    +
    +>>> b = torch.randn(4, 6)
    +>>> b
    +tensor([[ 0.5876, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
    +        [-0.2447,  0.9556, -1.2919,  1.3378, -0.1768, -1.0857],
    +        [ 0.4333,  0.3146,  0.6576, -1.0432,  0.9348, -0.4410],
    +        [-0.9888,  1.0679, -1.3337, -1.6556,  0.4798,  0.2830]])
    +>>> torch.triu(b, diagonal=1)
    +tensor([[ 0.0000, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
    +        [ 0.0000,  0.0000, -1.2919,  1.3378, -0.1768, -1.0857],
    +        [ 0.0000,  0.0000,  0.0000, -1.0432,  0.9348, -0.4410],
    +        [ 0.0000,  0.0000,  0.0000,  0.0000,  0.4798,  0.2830]])
    +>>> torch.triu(b, diagonal=-1)
    +tensor([[ 0.5876, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
    +        [-0.2447,  0.9556, -1.2919,  1.3378, -0.1768, -1.0857],
    +        [ 0.0000,  0.3146,  0.6576, -1.0432,  0.9348, -0.4410],
    +        [ 0.0000,  0.0000, -1.3337, -1.6556,  0.4798,  0.2830]])
    +
    +
    +
    + +
    +
    +torch.triu_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) → Tensor
    +

    Returns the indices of the upper triangular part of a row by +col matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns.

    +

    The upper triangular part of the matrix is defined as the elements on and +above the diagonal.

    +

    The argument offset controls which diagonal to consider. If +offset = 0, all elements on and above the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +\(\lbrace (i, i) \rbrace\) for \(i \in [0, \min\{d_{1}, d_{2}\} - 1]\) +where \(d_{1}, d_{2}\) are the dimensions of the matrix.

    +

    NOTE: when running on ‘cuda’, row * col must be less than \(2^{59}\) to +prevent overflow during calculation.

    +
    +
    Parameters
    +
      +
    • row (int) – number of rows in the 2-D matrix.

    • +
    • col (int) – number of columns in the 2-D matrix.

    • +
    • offset (int) – diagonal offset from the main diagonal. +Default: if not provided, 0.

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, torch.long.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • layout (torch.layout, optional) – currently only support torch.strided.

    • +
    +
    +
    +
    +
    Example::
    >>> a = torch.triu_indices(3, 3)
    +>>> a
    +tensor([[0, 0, 0, 1, 1, 2],
    +        [0, 1, 2, 1, 2, 2]])
    +
    +
    +
    >>> a = torch.triu_indices(4, 3, -1)
    +>>> a
    +tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
    +        [0, 1, 2, 0, 1, 2, 1, 2, 2]])
    +
    +
    +
    >>> a = torch.triu_indices(4, 3, 1)
    +>>> a
    +tensor([[0, 0, 1],
    +        [1, 2, 2]])
    +
    +
    +
    +
    +
    + +
    +
    +

    BLAS and LAPACK Operations

    +
    +
    +torch.addbmm(beta=1, input, alpha=1, batch1, batch2, out=None) → Tensor
    +

    Performs a batch matrix-matrix product of matrices stored +in batch1 and batch2, +with a reduced add step (all matrix multiplications get accumulated +along the first dimension). +input is added to the final result.

    +

    batch1 and batch2 must be 3-D tensors each containing the +same number of matrices.

    +

    If batch1 is a \((b \times n \times m)\) tensor, batch2 is a +\((b \times m \times p)\) tensor, input must be +broadcastable with a \((n \times p)\) tensor +and out will be a \((n \times p)\) tensor.

    +
    +\[out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + +\]
    +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha +must be real numbers, otherwise they should be integers.

    +
    +
    Parameters
    +
      +
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • input (Tensor) – matrix to be added

    • +
    • alpha (Number, optional) – multiplier for batch1 @ batch2 (\(\alpha\))

    • +
    • batch1 (Tensor) – the first batch of matrices to be multiplied

    • +
    • batch2 (Tensor) – the second batch of matrices to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> M = torch.randn(3, 5)
    +>>> batch1 = torch.randn(10, 3, 4)
    +>>> batch2 = torch.randn(10, 4, 5)
    +>>> torch.addbmm(M, batch1, batch2)
    +tensor([[  6.6311,   0.0503,   6.9768, -12.0362,  -2.1653],
    +        [ -4.8185,  -1.4255,  -6.6760,   8.9453,   2.5743],
    +        [ -3.8202,   4.3691,   1.0943,  -1.1109,   5.4730]])
    +
    +
    +
    + +
    +
    +torch.addmm(beta=1, input, alpha=1, mat1, mat2, out=None) → Tensor
    +

    Performs a matrix multiplication of the matrices mat1 and mat2. +The matrix input is added to the final result.

    +

    If mat1 is a \((n \times m)\) tensor, mat2 is a +\((m \times p)\) tensor, then input must be +broadcastable with a \((n \times p)\) tensor +and out will be a \((n \times p)\) tensor.

    +

    alpha and beta are scaling factors on matrix-vector product between +mat1 and mat2 and the added matrix input respectively.

    +
    +\[\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + +\]
    +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and +alpha must be real numbers, otherwise they should be integers.

    +
    +
    Parameters
    +
      +
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • input (Tensor) – matrix to be added

    • +
    • alpha (Number, optional) – multiplier for \(mat1 @ mat2\) (\(\alpha\))

    • +
    • mat1 (Tensor) – the first matrix to be multiplied

    • +
    • mat2 (Tensor) – the second matrix to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> M = torch.randn(2, 3)
    +>>> mat1 = torch.randn(2, 3)
    +>>> mat2 = torch.randn(3, 3)
    +>>> torch.addmm(M, mat1, mat2)
    +tensor([[-4.8716,  1.4671, -1.3746],
    +        [ 0.7573, -3.9555, -2.8681]])
    +
    +
    +
    + +
    +
    +torch.addmv(beta=1, input, alpha=1, mat, vec, out=None) → Tensor
    +

    Performs a matrix-vector product of the matrix mat and +the vector vec. +The vector input is added to the final result.

    +

    If mat is a \((n \times m)\) tensor, vec is a 1-D tensor of +size m, then input must be +broadcastable with a 1-D tensor of size n and +out will be 1-D tensor of size n.

    +

    alpha and beta are scaling factors on matrix-vector product between +mat and vec and the added tensor input respectively.

    +
    +\[\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + +\]
    +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and +alpha must be real numbers, otherwise they should be integers

    +
    +
    Parameters
    +
      +
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • input (Tensor) – vector to be added

    • +
    • alpha (Number, optional) – multiplier for \(mat @ vec\) (\(\alpha\))

    • +
    • mat (Tensor) – matrix to be multiplied

    • +
    • vec (Tensor) – vector to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> M = torch.randn(2)
    +>>> mat = torch.randn(2, 3)
    +>>> vec = torch.randn(3)
    +>>> torch.addmv(M, mat, vec)
    +tensor([-0.3768, -5.5565])
    +
    +
    +
    + +
    +
    +torch.addr(beta=1, input, alpha=1, vec1, vec2, out=None) → Tensor
    +

    Performs the outer-product of vectors vec1 and vec2 +and adds it to the matrix input.

    +

    Optional values beta and alpha are scaling factors on the +outer product between vec1 and vec2 and the added matrix +input respectively.

    +
    +\[\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + +\]
    +

    If vec1 is a vector of size n and vec2 is a vector +of size m, then input must be +broadcastable with a matrix of size +\((n \times m)\) and out will be a matrix of size +\((n \times m)\).

    +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and +alpha must be real numbers, otherwise they should be integers

    +
    +
    Parameters
    +
      +
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • input (Tensor) – matrix to be added

    • +
    • alpha (Number, optional) – multiplier for \(\text{vec1} \otimes \text{vec2}\) (\(\alpha\))

    • +
    • vec1 (Tensor) – the first vector of the outer product

    • +
    • vec2 (Tensor) – the second vector of the outer product

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> vec1 = torch.arange(1., 4.)
    +>>> vec2 = torch.arange(1., 3.)
    +>>> M = torch.zeros(3, 2)
    +>>> torch.addr(M, vec1, vec2)
    +tensor([[ 1.,  2.],
    +        [ 2.,  4.],
    +        [ 3.,  6.]])
    +
    +
    +
    + +
    +
    +torch.baddbmm(beta=1, input, alpha=1, batch1, batch2, out=None) → Tensor
    +

    Performs a batch matrix-matrix product of matrices in batch1 +and batch2. +input is added to the final result.

    +

    batch1 and batch2 must be 3-D tensors each containing the same +number of matrices.

    +

    If batch1 is a \((b \times n \times m)\) tensor, batch2 is a +\((b \times m \times p)\) tensor, then input must be +broadcastable with a +\((b \times n \times p)\) tensor and out will be a +\((b \times n \times p)\) tensor. Both alpha and beta mean the +same as the scaling factors used in torch.addbmm().

    +
    +\[\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + +\]
    +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and +alpha must be real numbers, otherwise they should be integers.

    +
    +
    Parameters
    +
      +
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • input (Tensor) – the tensor to be added

    • +
    • alpha (Number, optional) – multiplier for \(\text{batch1} \mathbin{@} \text{batch2}\) (\(\alpha\))

    • +
    • batch1 (Tensor) – the first batch of matrices to be multiplied

    • +
    • batch2 (Tensor) – the second batch of matrices to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> M = torch.randn(10, 3, 5)
    +>>> batch1 = torch.randn(10, 3, 4)
    +>>> batch2 = torch.randn(10, 4, 5)
    +>>> torch.baddbmm(M, batch1, batch2).size()
    +torch.Size([10, 3, 5])
    +
    +
    +
    + +
    +
    +torch.bmm(input, mat2, out=None) → Tensor
    +

    Performs a batch matrix-matrix product of matrices stored in input +and mat2.

    +

    input and mat2 must be 3-D tensors each containing +the same number of matrices.

    +

    If input is a \((b \times n \times m)\) tensor, mat2 is a +\((b \times m \times p)\) tensor, out will be a +\((b \times n \times p)\) tensor.

    +
    +\[\text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i + +\]
    +
    +

    Note

    +

    This function does not broadcast. +For broadcasting matrix products, see torch.matmul().

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first batch of matrices to be multiplied

    • +
    • mat2 (Tensor) – the second batch of matrices to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> input = torch.randn(10, 3, 4)
    +>>> mat2 = torch.randn(10, 4, 5)
    +>>> res = torch.bmm(input, mat2)
    +>>> res.size()
    +torch.Size([10, 3, 5])
    +
    +
    +
    + +
    +
    +torch.bitwise_not(input, out=None) → Tensor
    +

    Computes the bitwise NOT of the given input tensor. The input must be of +integral or Boolean types.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example

    +
    >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
    +tensor([ 0,  1, -4], dtype=torch.int8)
    +
    +
    +
    + +
    +
    +torch.chain_matmul(*matrices)[source]
    +

    Returns the matrix product of the \(N\) 2-D tensors. This product is efficiently computed +using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms +of arithmetic operations ([CLRS]). Note that since this is a function to compute the product, \(N\) +needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. +If \(N\) is 1, then this is a no-op - the original matrix is returned as is.

    +
    +
    Parameters
    +

    matrices (Tensors...) – a sequence of 2 or more 2-D tensors whose product is to be determined.

    +
    +
    Returns
    +

    if the \(i^{th}\) tensor was of dimensions \(p_{i} \times p_{i + 1}\), then the product +would be of dimensions \(p_{1} \times p_{N + 1}\).

    +
    +
    Return type
    +

    Tensor

    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 4)
    +>>> b = torch.randn(4, 5)
    +>>> c = torch.randn(5, 6)
    +>>> d = torch.randn(6, 7)
    +>>> torch.chain_matmul(a, b, c, d)
    +tensor([[ -2.3375,  -3.9790,  -4.1119,  -6.6577,   9.5609, -11.5095,  -3.2614],
    +        [ 21.4038,   3.3378,  -8.4982,  -5.2457, -10.2561,  -2.4684,   2.7163],
    +        [ -0.9647,  -5.8917,  -2.3213,  -5.2284,  12.8615, -12.2816,  -2.5095]])
    +
    +
    +
    + +
    +
    +torch.cholesky(input, upper=False, out=None) → Tensor
    +

    Computes the Cholesky decomposition of a symmetric positive-definite +matrix \(A\) or for batches of symmetric positive-definite matrices.

    +

    If upper is True, the returned matrix U is upper-triangular, and +the decomposition has the form:

    +
    +\[A = U^TU\]
    +

    If upper is False, the returned matrix L is lower-triangular, and +the decomposition has the form:

    +
    +\[A = LL^T\]
    +

    If upper is True, and \(A\) is a batch of symmetric positive-definite +matrices, then the returned tensor will be composed of upper-triangular Cholesky factors +of each of the individual matrices. Similarly, when upper is False, the returned +tensor will be composed of lower-triangular Cholesky factors of each of the individual +matrices.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor \(A\) of size \((*, n, n)\) where * is zero or more +batch dimensions consisting of symmetric positive-definite matrices.

    • +
    • upper (bool, optional) – flag that indicates whether to return a +upper or lower triangular matrix. Default: False

    • +
    • out (Tensor, optional) – the output matrix

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 3)
    +>>> a = torch.mm(a, a.t()) # make symmetric positive-definite
    +>>> l = torch.cholesky(a)
    +>>> a
    +tensor([[ 2.4112, -0.7486,  1.4551],
    +        [-0.7486,  1.3544,  0.1294],
    +        [ 1.4551,  0.1294,  1.6724]])
    +>>> l
    +tensor([[ 1.5528,  0.0000,  0.0000],
    +        [-0.4821,  1.0592,  0.0000],
    +        [ 0.9371,  0.5487,  0.7023]])
    +>>> torch.mm(l, l.t())
    +tensor([[ 2.4112, -0.7486,  1.4551],
    +        [-0.7486,  1.3544,  0.1294],
    +        [ 1.4551,  0.1294,  1.6724]])
    +>>> a = torch.randn(3, 2, 2)
    +>>> a = torch.matmul(a, a.transpose(-1, -2)) + 1e-03 # make symmetric positive-definite
    +>>> l = torch.cholesky(a)
    +>>> z = torch.matmul(l, l.transpose(-1, -2))
    +>>> torch.max(torch.abs(z - a)) # Max non-zero
    +tensor(2.3842e-07)
    +
    +
    +
    + +
    +
    +torch.cholesky_inverse(input, upper=False, out=None) → Tensor
    +

    Computes the inverse of a symmetric positive-definite matrix \(A\) using its +Cholesky factor \(u\): returns matrix inv. The inverse is computed using +LAPACK routines dpotri and spotri (and the corresponding MAGMA routines).

    +

    If upper is False, \(u\) is lower triangular +such that the returned tensor is

    +
    +\[inv = (uu^{T})^{-1} + +\]
    +

    If upper is True or not provided, \(u\) is upper +triangular such that the returned tensor is

    +
    +\[inv = (u^T u)^{-1} + +\]
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input 2-D tensor \(u\), a upper or lower triangular +Cholesky factor

    • +
    • upper (bool, optional) – whether to return a lower (default) or upper triangular matrix

    • +
    • out (Tensor, optional) – the output tensor for inv

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 3)
    +>>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
    +>>> u = torch.cholesky(a)
    +>>> a
    +tensor([[  0.9935,  -0.6353,   1.5806],
    +        [ -0.6353,   0.8769,  -1.7183],
    +        [  1.5806,  -1.7183,  10.6618]])
    +>>> torch.cholesky_inverse(u)
    +tensor([[ 1.9314,  1.2251, -0.0889],
    +        [ 1.2251,  2.4439,  0.2122],
    +        [-0.0889,  0.2122,  0.1412]])
    +>>> a.inverse()
    +tensor([[ 1.9314,  1.2251, -0.0889],
    +        [ 1.2251,  2.4439,  0.2122],
    +        [-0.0889,  0.2122,  0.1412]])
    +
    +
    +
    + +
    +
    +torch.cholesky_solve(input, input2, upper=False, out=None) → Tensor
    +

    Solves a linear system of equations with a positive semidefinite +matrix to be inverted given its Cholesky factor matrix \(u\).

    +

    If upper is False, \(u\) is and lower triangular and c is +returned such that:

    +
    +\[c = (u u^T)^{-1} b + +\]
    +

    If upper is True or not provided, \(u\) is upper triangular +and c is returned such that:

    +
    +\[c = (u^T u)^{-1} b + +\]
    +

    torch.cholesky_solve(b, u) can take in 2D inputs b, u or inputs that are +batches of 2D matrices. If the inputs are batches, then returns +batched outputs c

    +
    +

    Note

    +

    The out keyword only supports 2D matrix inputs, that is, +b, u must be 2D matrices.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – input matrix \(b\) of size \((*, m, k)\), +where \(*\) is zero or more batch dimensions

    • +
    • input2 (Tensor) – input matrix \(u\) of size \((*, m, m)\), +where \(*\) is zero of more batch dimensions composed of +upper or lower triangular Cholesky factor

    • +
    • upper (bool, optional) – whether to consider the Cholesky factor as a +lower or upper triangular matrix. Default: False.

    • +
    • out (Tensor, optional) – the output tensor for c

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(3, 3)
    +>>> a = torch.mm(a, a.t()) # make symmetric positive definite
    +>>> u = torch.cholesky(a)
    +>>> a
    +tensor([[ 0.7747, -1.9549,  1.3086],
    +        [-1.9549,  6.7546, -5.4114],
    +        [ 1.3086, -5.4114,  4.8733]])
    +>>> b = torch.randn(3, 2)
    +>>> b
    +tensor([[-0.6355,  0.9891],
    +        [ 0.1974,  1.4706],
    +        [-0.4115, -0.6225]])
    +>>> torch.cholesky_solve(b, u)
    +tensor([[ -8.1625,  19.6097],
    +        [ -5.8398,  14.2387],
    +        [ -4.3771,  10.4173]])
    +>>> torch.mm(a.inverse(), b)
    +tensor([[ -8.1626,  19.6097],
    +        [ -5.8398,  14.2387],
    +        [ -4.3771,  10.4173]])
    +
    +
    +
    + +
    +
    +torch.dot(input, tensor) → Tensor
    +

    Computes the dot product (inner product) of two tensors.

    +
    +

    Note

    +

    This function does not broadcast.

    +
    +

    Example:

    +
    >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
    +tensor(7)
    +
    +
    +
    + +
    +
    +torch.eig(input, eigenvectors=False, out=None) -> (Tensor, Tensor)
    +

    Computes the eigenvalues and eigenvectors of a real square matrix.

    +
    +

    Note

    +

    Since eigenvalues and eigenvectors might be complex, backward pass is supported only +for torch.symeig()

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the square matrix of shape \((n \times n)\) for which the eigenvalues and eigenvectors +will be computed

    • +
    • eigenvectors (bool) – True to compute both eigenvalues and eigenvectors; +otherwise, only eigenvalues will be computed

    • +
    • out (tuple, optional) – the output tensors

    • +
    +
    +
    Returns
    +

    A namedtuple (eigenvalues, eigenvectors) containing

    +
    +
      +
    • eigenvalues (Tensor): Shape \((n \times 2)\). Each row is an eigenvalue of input, +where the first element is the real part and the second element is the imaginary part. +The eigenvalues are not necessarily ordered.

    • +
    • eigenvectors (Tensor): If eigenvectors=False, it’s an empty tensor. +Otherwise, this tensor of shape \((n \times n)\) can be used to compute normalized (unit length) +eigenvectors of corresponding eigenvalues as follows. +If the corresponding eigenvalues[j] is a real number, column eigenvectors[:, j] is the eigenvector +corresponding to eigenvalues[j]. +If the corresponding eigenvalues[j] and eigenvalues[j + 1] form a complex conjugate pair, then the +true eigenvectors can be computed as +\(\text{true eigenvector}[j] = eigenvectors[:, j] + i \times eigenvectors[:, j + 1]\), +\(\text{true eigenvector}[j + 1] = eigenvectors[:, j] - i \times eigenvectors[:, j + 1]\).

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, Tensor)

    +
    +
    +
    + +
    +
    +torch.gels(input, A, out=None)[source]
    +

    Computes the solution to the least squares and least norm problems for a full +rank matrix \(A\) of size \((m \times n)\) and a matrix \(B\) of +size \((m \times k)\).

    +

    For more information regarding torch.gels(), please check torch.lstsq().

    +
    +

    Warning

    +

    torch.gels() is deprecated in favour of torch.lstsq() and will be removed in the +next release. Please use torch.lstsq() instead.

    +
    +
    + +
    +
    +torch.geqrf(input, out=None) -> (Tensor, Tensor)
    +

    This is a low-level function for calling LAPACK directly. This function +returns a namedtuple (a, tau) as defined in LAPACK documentation for geqrf .

    +

    You’ll generally want to use torch.qr() instead.

    +

    Computes a QR decomposition of input, but without constructing +\(Q\) and \(R\) as explicit separate matrices.

    +

    Rather, this directly calls the underlying LAPACK function ?geqrf +which produces a sequence of ‘elementary reflectors’.

    +

    See LAPACK documentation for geqrf for further details.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input matrix

    • +
    • out (tuple, optional) – the output tuple of (Tensor, Tensor)

    • +
    +
    +
    +
    + +
    +
    +torch.ger(input, vec2, out=None) → Tensor
    +

    Outer product of input and vec2. +If input is a vector of size \(n\) and vec2 is a vector of +size \(m\), then out must be a matrix of size \((n \times m)\).

    +
    +

    Note

    +

    This function does not broadcast.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – 1-D input vector

    • +
    • vec2 (Tensor) – 1-D input vector

    • +
    • out (Tensor, optional) – optional output matrix

    • +
    +
    +
    +

    Example:

    +
    >>> v1 = torch.arange(1., 5.)
    +>>> v2 = torch.arange(1., 4.)
    +>>> torch.ger(v1, v2)
    +tensor([[  1.,   2.,   3.],
    +        [  2.,   4.,   6.],
    +        [  3.,   6.,   9.],
    +        [  4.,   8.,  12.]])
    +
    +
    +
    + +
    +
    +torch.inverse(input, out=None) → Tensor
    +

    Takes the inverse of the square matrix input. input can be batches +of 2D square tensors, in which case this function would return a tensor composed of +individual inverses.

    +
    +

    Note

    +

    Irrespective of the original strides, the returned tensors will be +transposed, i.e. with strides like input.contiguous().transpose(-2, -1).stride()

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of size \((*, n, n)\) where * is zero or more +batch dimensions

    • +
    • out (Tensor, optional) – the optional output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> x = torch.rand(4, 4)
    +>>> y = torch.inverse(x)
    +>>> z = torch.mm(x, y)
    +>>> z
    +tensor([[ 1.0000, -0.0000, -0.0000,  0.0000],
    +        [ 0.0000,  1.0000,  0.0000,  0.0000],
    +        [ 0.0000,  0.0000,  1.0000,  0.0000],
    +        [ 0.0000, -0.0000, -0.0000,  1.0000]])
    +>>> torch.max(torch.abs(z - torch.eye(4))) # Max non-zero
    +tensor(1.1921e-07)
    +>>> # Batched inverse example
    +>>> x = torch.randn(2, 3, 4, 4)
    +>>> y = torch.inverse(x)
    +>>> z = torch.matmul(x, y)
    +>>> torch.max(torch.abs(z - torch.eye(4).expand_as(x))) # Max non-zero
    +tensor(1.9073e-06)
    +
    +
    +
    + +
    +
    +torch.det(input) → Tensor
    +

    Calculates determinant of a 2D square tensor.

    +
    +

    Note

    +

    Backward through det() internally uses SVD results when input is +not invertible. In this case, double backward through det() will be +unstable in when input doesn’t have distinct singular values. See +svd() for details.

    +
    +
    +
    Parameters
    +

    input (Tensor) – The input 2D square tensor

    +
    +
    +

    Example:

    +
    >>> A = torch.randn(3, 3)
    +>>> torch.det(A)
    +tensor(3.7641)
    +
    +
    +
    + +
    +
    +torch.logdet(input) → Tensor
    +

    Calculates log determinant of a 2D square tensor.

    +
    +

    Note

    +

    Result is -inf if input has zero log determinant, and is nan if +input has negative determinant.

    +
    +
    +

    Note

    +

    Backward through logdet() internally uses SVD results when input +is not invertible. In this case, double backward through logdet() will +be unstable in when input doesn’t have distinct singular values. See +svd() for details.

    +
    +
    +
    Parameters
    +

    input (Tensor) – The input 2D square tensor

    +
    +
    +

    Example:

    +
    >>> A = torch.randn(3, 3)
    +>>> torch.det(A)
    +tensor(0.2611)
    +>>> torch.logdet(A)
    +tensor(-1.3430)
    +
    +
    +
    + +
    +
    +torch.slogdet(input) -> (Tensor, Tensor)
    +

    Calculates the sign and log value of a 2D square tensor’s determinant.

    +
    +

    Note

    +

    If input has zero determinant, this returns (0, -inf).

    +
    +
    +

    Note

    +

    Backward through slogdet() internally uses SVD results when input +is not invertible. In this case, double backward through slogdet() +will be unstable in when input doesn’t have distinct singular values. +See svd() for details.

    +
    +
    +
    Parameters
    +

    input (Tensor) – The input 2D square tensor

    +
    +
    Returns
    +

    A namedtuple (sign, logabsdet) containing the sign of the determinant, and the log +value of the absolute determinant.

    +
    +
    +

    Example:

    +
    >>> A = torch.randn(3, 3)
    +>>> A
    +tensor([[ 0.0032, -0.2239, -1.1219],
    +        [-0.6690,  0.1161,  0.4053],
    +        [-1.6218, -0.9273, -0.0082]])
    +>>> torch.det(A)
    +tensor(-0.7576)
    +>>> torch.logdet(A)
    +tensor(nan)
    +>>> torch.slogdet(A)
    +torch.return_types.slogdet(sign=tensor(-1.), logabsdet=tensor(-0.2776))
    +
    +
    +
    + +
    +
    +torch.lstsq(input, A, out=None) → Tensor
    +

    Computes the solution to the least squares and least norm problems for a full +rank matrix \(A\) of size \((m \times n)\) and a matrix \(B\) of +size \((m \times k)\).

    +

    If \(m \geq n\), lstsq() solves the least-squares problem:

    +
    +\[\begin{array}{ll} +\min_X & \|AX-B\|_2. +\end{array}\]
    +

    If \(m < n\), lstsq() solves the least-norm problem:

    +
    +\[\begin{array}{ll} +\min_X & \|X\|_2 & \text{subject to} & AX = B. +\end{array}\]
    +

    Returned tensor \(X\) has shape \((\max(m, n) \times k)\). The first \(n\) +rows of \(X\) contains the solution. If \(m \geq n\), the residual sum of squares +for the solution in each column is given by the sum of squares of elements in the +remaining \(m - n\) rows of that column.

    +
    +

    Note

    +

    The case when \(m < n\) is not supported on the GPU.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the matrix \(B\)

    • +
    • A (Tensor) – the \(m\) by \(n\) matrix \(A\)

    • +
    • out (tuple, optional) – the optional destination tensor

    • +
    +
    +
    Returns
    +

    A namedtuple (solution, QR) containing:

    +
    +
      +
    • solution (Tensor): the least squares solution

    • +
    • QR (Tensor): the details of the QR factorization

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, Tensor)

    +
    +
    +
    +

    Note

    +

    The returned matrices will always be transposed, irrespective of the strides +of the input matrices. That is, they will have stride (1, m) instead of +(m, 1).

    +
    +

    Example:

    +
    >>> A = torch.tensor([[1., 1, 1],
    +                      [2, 3, 4],
    +                      [3, 5, 2],
    +                      [4, 2, 5],
    +                      [5, 4, 3]])
    +>>> B = torch.tensor([[-10., -3],
    +                      [ 12, 14],
    +                      [ 14, 12],
    +                      [ 16, 16],
    +                      [ 18, 16]])
    +>>> X, _ = torch.lstsq(B, A)
    +>>> X
    +tensor([[  2.0000,   1.0000],
    +        [  1.0000,   1.0000],
    +        [  1.0000,   2.0000],
    +        [ 10.9635,   4.8501],
    +        [  8.9332,   5.2418]])
    +
    +
    +
    + +
    +
    +torch.lu(A, pivot=True, get_infos=False, out=None)[source]
    +

    Computes the LU factorization of a square matrix or batches of square matrices +A. Returns a tuple containing the LU factorization and pivots of A. +Pivoting is done if pivot is set to True.

    +
    +

    Note

    +

    The pivots returned by the function are 1-indexed. If pivot is False, +then the returned pivots is a tensor filled with zeros of the appropriate size.

    +
    +
    +

    Note

    +

    LU factorization with pivot = False is not available for CPU, and attempting +to do so will throw an error. However, LU factorization with pivot = False is +available for CUDA.

    +
    +
    +

    Note

    +

    This function does not check if the factorization was successful or not if +get_infos is True since the status of the factorization is present in the +third element of the return tuple.

    +
    +
    +
    Parameters
    +
      +
    • A (Tensor) – the tensor to factor of size \((*, m, m)\)

    • +
    • pivot (bool, optional) – controls whether pivoting is done. Default: True

    • +
    • get_infos (bool, optional) – if set to True, returns an info IntTensor. +Default: False

    • +
    • out (tuple, optional) – optional output tuple. If get_infos is True, +then the elements in the tuple are Tensor, IntTensor, +and IntTensor. If get_infos is False, then the +elements in the tuple are Tensor, IntTensor. Default: None

    • +
    +
    +
    Returns
    +

    A tuple of tensors containing

    +
    +
      +
    • factorization (Tensor): the factorization of size \((*, m, m)\)

    • +
    • pivots (IntTensor): the pivots of size \((*, m)\)

    • +
    • infos (IntTensor, optional): if get_infos is True, this is a tensor of +size \((*)\) where non-zero values indicate whether factorization for the matrix or +each minibatch has succeeded or failed

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, IntTensor, IntTensor (optional))

    +
    +
    +

    Example:

    +
    >>> A = torch.randn(2, 3, 3)
    +>>> A_LU, pivots = torch.lu(A)
    +>>> A_LU
    +tensor([[[ 1.3506,  2.5558, -0.0816],
    +         [ 0.1684,  1.1551,  0.1940],
    +         [ 0.1193,  0.6189, -0.5497]],
    +
    +        [[ 0.4526,  1.2526, -0.3285],
    +         [-0.7988,  0.7175, -0.9701],
    +         [ 0.2634, -0.9255, -0.3459]]])
    +>>> pivots
    +tensor([[ 3,  3,  3],
    +        [ 3,  3,  3]], dtype=torch.int32)
    +>>> A_LU, pivots, info = torch.lu(A, get_infos=True)
    +>>> if info.nonzero().size(0) == 0:
    +...   print('LU factorization succeeded for all samples!')
    +LU factorization succeeded for all samples!
    +
    +
    +
    + +
    +
    +torch.lu_solve(input, LU_data, LU_pivots, out=None) → Tensor
    +

    Returns the LU solve of the linear system \(Ax = b\) using the partially pivoted +LU factorization of A from torch.lu().

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the RHS tensor of size \((b, m, k)\)

    • +
    • LU_data (Tensor) – the pivoted LU factorization of A from torch.lu() of size \((b, m, m)\)

    • +
    • LU_pivots (IntTensor) – the pivots of the LU factorization from torch.lu() of size \((b, m)\)

    • +
    • out (Tensor, optional) – the optional output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> A = torch.randn(2, 3, 3)
    +>>> b = torch.randn(2, 3, 1)
    +>>> A_LU = torch.lu(A)
    +>>> x = torch.lu_solve(b, *A_LU)
    +>>> torch.norm(torch.bmm(A, x) - b)
    +tensor(1.00000e-07 *
    +       2.8312)
    +
    +
    +
    + +
    +
    +torch.lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True)[source]
    +

    Unpacks the data and pivots from a LU factorization of a tensor.

    +

    Returns a tuple of tensors as (the pivots, the L tensor, the U tensor).

    +
    +
    Parameters
    +
      +
    • LU_data (Tensor) – the packed LU factorization data

    • +
    • LU_pivots (Tensor) – the packed LU factorization pivots

    • +
    • unpack_data (bool) – flag indicating if the data should be unpacked

    • +
    • unpack_pivots (bool) – flag indicating if the pivots should be unpacked

    • +
    +
    +
    +

    Example:

    +
    >>> A = torch.randn(2, 3, 3)
    +>>> A_LU, pivots = A.lu()
    +>>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots)
    +>>>
    +>>> # can recover A from factorization
    +>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
    +
    +
    +
    + +
    +
    +torch.matmul(input, other, out=None) → Tensor
    +

    Matrix product of two tensors.

    +

    The behavior depends on the dimensionality of the tensors as follows:

    +
      +
    • If both tensors are 1-dimensional, the dot product (scalar) is returned.

    • +
    • If both arguments are 2-dimensional, the matrix-matrix product is returned.

    • +
    • If the first argument is 1-dimensional and the second argument is 2-dimensional, +a 1 is prepended to its dimension for the purpose of the matrix multiply. +After the matrix multiply, the prepended dimension is removed.

    • +
    • If the first argument is 2-dimensional and the second argument is 1-dimensional, +the matrix-vector product is returned.

    • +
    • If both arguments are at least 1-dimensional and at least one argument is +N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first +argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the +batched matrix multiply and removed after. If the second argument is 1-dimensional, a +1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. +The non-matrix (i.e. batch) dimensions are broadcasted (and thus +must be broadcastable). For example, if input is a +\((j \times 1 \times n \times m)\) tensor and other is a \((k \times m \times p)\) +tensor, out will be an \((j \times k \times n \times p)\) tensor.

    • +
    +
    +

    Note

    +

    The 1-dimensional dot product version of this function does not support an out parameter.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first tensor to be multiplied

    • +
    • other (Tensor) – the second tensor to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> # vector x vector
    +>>> tensor1 = torch.randn(3)
    +>>> tensor2 = torch.randn(3)
    +>>> torch.matmul(tensor1, tensor2).size()
    +torch.Size([])
    +>>> # matrix x vector
    +>>> tensor1 = torch.randn(3, 4)
    +>>> tensor2 = torch.randn(4)
    +>>> torch.matmul(tensor1, tensor2).size()
    +torch.Size([3])
    +>>> # batched matrix x broadcasted vector
    +>>> tensor1 = torch.randn(10, 3, 4)
    +>>> tensor2 = torch.randn(4)
    +>>> torch.matmul(tensor1, tensor2).size()
    +torch.Size([10, 3])
    +>>> # batched matrix x batched matrix
    +>>> tensor1 = torch.randn(10, 3, 4)
    +>>> tensor2 = torch.randn(10, 4, 5)
    +>>> torch.matmul(tensor1, tensor2).size()
    +torch.Size([10, 3, 5])
    +>>> # batched matrix x broadcasted matrix
    +>>> tensor1 = torch.randn(10, 3, 4)
    +>>> tensor2 = torch.randn(4, 5)
    +>>> torch.matmul(tensor1, tensor2).size()
    +torch.Size([10, 3, 5])
    +
    +
    +
    + +
    +
    +torch.matrix_power(input, n) → Tensor
    +

    Returns the matrix raised to the power n for square matrices. +For batch of matrices, each individual matrix is raised to the power n.

    +

    If n is negative, then the inverse of the matrix (if invertible) is +raised to the power n. For a batch of matrices, the batched inverse +(if invertible) is raised to the power n. If n is 0, then an identity matrix +is returned.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor

    • +
    • n (int) – the power to raise the matrix to

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(2, 2, 2)
    +>>> a
    +tensor([[[-1.9975, -1.9610],
    +         [ 0.9592, -2.3364]],
    +
    +        [[-1.2534, -1.3429],
    +         [ 0.4153, -1.4664]]])
    +>>> torch.matrix_power(a, 3)
    +tensor([[[  3.9392, -23.9916],
    +         [ 11.7357,  -0.2070]],
    +
    +        [[  0.2468,  -6.7168],
    +         [  2.0774,  -0.8187]]])
    +
    +
    +
    + +
    +
    +torch.matrix_rank(input, tol=None, bool symmetric=False) → Tensor
    +

    Returns the numerical rank of a 2-D tensor. The method to compute the +matrix rank is done using SVD by default. If symmetric is True, +then input is assumed to be symmetric, and the computation of the +rank is done by obtaining the eigenvalues.

    +

    tol is the threshold below which the singular values (or the eigenvalues +when symmetric is True) are considered to be 0. If tol is not +specified, tol is set to S.max() * max(S.size()) * eps where S is the +singular values (or the eigenvalues when symmetric is True), and eps +is the epsilon value for the datatype of input.

    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input 2-D tensor

    • +
    • tol (float, optional) – the tolerance value. Default: None

    • +
    • symmetric (bool, optional) – indicates whether input is symmetric. +Default: False

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.eye(10)
    +>>> torch.matrix_rank(a)
    +tensor(10)
    +>>> b = torch.eye(10)
    +>>> b[0, 0] = 0
    +>>> torch.matrix_rank(b)
    +tensor(9)
    +
    +
    +
    + +
    +
    +torch.mm(input, mat2, out=None) → Tensor
    +

    Performs a matrix multiplication of the matrices input and mat2.

    +

    If input is a \((n \times m)\) tensor, mat2 is a +\((m \times p)\) tensor, out will be a \((n \times p)\) tensor.

    +
    +

    Note

    +

    This function does not broadcast. +For broadcasting matrix products, see torch.matmul().

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the first matrix to be multiplied

    • +
    • mat2 (Tensor) – the second matrix to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> mat1 = torch.randn(2, 3)
    +>>> mat2 = torch.randn(3, 3)
    +>>> torch.mm(mat1, mat2)
    +tensor([[ 0.4851,  0.5037, -0.3633],
    +        [-0.0760, -3.6705,  2.4784]])
    +
    +
    +
    + +
    +
    +torch.mv(input, vec, out=None) → Tensor
    +

    Performs a matrix-vector product of the matrix input and the vector +vec.

    +

    If input is a \((n \times m)\) tensor, vec is a 1-D tensor of +size \(m\), out will be 1-D of size \(n\).

    +
    +

    Note

    +

    This function does not broadcast.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – matrix to be multiplied

    • +
    • vec (Tensor) – vector to be multiplied

    • +
    • out (Tensor, optional) – the output tensor

    • +
    +
    +
    +

    Example:

    +
    >>> mat = torch.randn(2, 3)
    +>>> vec = torch.randn(3)
    +>>> torch.mv(mat, vec)
    +tensor([ 1.0404, -0.6361])
    +
    +
    +
    + +
    +
    +torch.orgqr(input, input2) → Tensor
    +

    Computes the orthogonal matrix Q of a QR factorization, from the (input, input2) +tuple returned by torch.geqrf().

    +

    This directly calls the underlying LAPACK function ?orgqr. +See LAPACK documentation for orgqr for further details.

    +
    +
    Parameters
    +
    +
    +
    +
    + +
    +
    +torch.ormqr(input, input2, input3, left=True, transpose=False) → Tensor
    +

    Multiplies mat (given by input3) by the orthogonal Q matrix of the QR factorization +formed by torch.geqrf() that is represented by (a, tau) (given by (input, input2)).

    +

    This directly calls the underlying LAPACK function ?ormqr. +See LAPACK documentation for ormqr for further details.

    +
    +
    Parameters
    +
    +
    +
    +
    + +
    +
    +torch.pinverse(input, rcond=1e-15) → Tensor
    +

    Calculates the pseudo-inverse (also known as the Moore-Penrose inverse) of a 2D tensor. +Please look at Moore-Penrose inverse for more details

    +
    +

    Note

    +

    This method is implemented using the Singular Value Decomposition.

    +
    +
    +

    Note

    +

    The pseudo-inverse is not necessarily a continuous function in the elements of the matrix [1]. +Therefore, derivatives are not always existent, and exist for a constant rank only [2]. +However, this method is backprop-able due to the implementation by using SVD results, and +could be unstable. Double-backward will also be unstable due to the usage of SVD internally. +See svd() for more details.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – The input 2D tensor of dimensions \(m \times n\)

    • +
    • rcond (float) – A floating point value to determine the cutoff for small singular values. +Default: 1e-15

    • +
    +
    +
    Returns
    +

    The pseudo-inverse of input of dimensions \(n \times m\)

    +
    +
    +

    Example:

    +
    >>> input = torch.randn(3, 5)
    +>>> input
    +tensor([[ 0.5495,  0.0979, -1.4092, -0.1128,  0.4132],
    +        [-1.1143, -0.3662,  0.3042,  1.6374, -0.9294],
    +        [-0.3269, -0.5745, -0.0382, -0.5922, -0.6759]])
    +>>> torch.pinverse(input)
    +tensor([[ 0.0600, -0.1933, -0.2090],
    +        [-0.0903, -0.0817, -0.4752],
    +        [-0.7124, -0.1631, -0.2272],
    +        [ 0.1356,  0.3933, -0.5023],
    +        [-0.0308, -0.1725, -0.5216]])
    +
    +
    +
    + +
    +
    +torch.qr(input, some=True, out=None) -> (Tensor, Tensor)
    +

    Computes the QR decomposition of a matrix or a batch of matrices input, +and returns a namedtuple (Q, R) of tensors such that \(\text{input} = Q R\) +with \(Q\) being an orthogonal matrix or batch of orthogonal matrices and +\(R\) being an upper triangular matrix or batch of upper triangular matrices.

    +

    If some is True, then this function returns the thin (reduced) QR factorization. +Otherwise, if some is False, this function returns the complete QR factorization.

    +
    +

    Note

    +

    precision may be lost if the magnitudes of the elements of input +are large

    +
    +
    +

    Note

    +

    While it should always give you a valid decomposition, it may not +give you the same one across platforms - it will depend on your +LAPACK implementation.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of size \((*, m, n)\) where * is zero or more +batch dimensions consisting of matrices of dimension \(m \times n\).

    • +
    • some (bool, optional) – Set to True for reduced QR decomposition and False for +complete QR decomposition.

    • +
    • out (tuple, optional) – tuple of Q and R tensors +satisfying input = torch.matmul(Q, R). +The dimensions of Q and R are \((*, m, k)\) and \((*, k, n)\) +respectively, where \(k = \min(m, n)\) if some: is True and +\(k = m\) otherwise.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
    +>>> q, r = torch.qr(a)
    +>>> q
    +tensor([[-0.8571,  0.3943,  0.3314],
    +        [-0.4286, -0.9029, -0.0343],
    +        [ 0.2857, -0.1714,  0.9429]])
    +>>> r
    +tensor([[ -14.0000,  -21.0000,   14.0000],
    +        [   0.0000, -175.0000,   70.0000],
    +        [   0.0000,    0.0000,  -35.0000]])
    +>>> torch.mm(q, r).round()
    +tensor([[  12.,  -51.,    4.],
    +        [   6.,  167.,  -68.],
    +        [  -4.,   24.,  -41.]])
    +>>> torch.mm(q.t(), q).round()
    +tensor([[ 1.,  0.,  0.],
    +        [ 0.,  1., -0.],
    +        [ 0., -0.,  1.]])
    +>>> a = torch.randn(3, 4, 5)
    +>>> q, r = torch.qr(a, some=False)
    +>>> torch.allclose(torch.matmul(q, r), a)
    +True
    +>>> torch.allclose(torch.matmul(q.transpose(-2, -1), q), torch.eye(5))
    +True
    +
    +
    +
    + +
    +
    +torch.solve(input, A, out=None) -> (Tensor, Tensor)
    +

    This function returns the solution to the system of linear +equations represented by \(AX = B\) and the LU factorization of +A, in order as a namedtuple solution, LU.

    +

    LU contains L and U factors for LU factorization of A.

    +

    torch.solve(B, A) can take in 2D inputs B, A or inputs that are +batches of 2D matrices. If the inputs are batches, then returns +batched outputs solution, LU.

    +
    +

    Note

    +

    Irrespective of the original strides, the returned matrices +solution and LU will be transposed, i.e. with strides like +B.contiguous().transpose(-1, -2).stride() and +A.contiguous().transpose(-1, -2).stride() respectively.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – input matrix \(B\) of size \((*, m, k)\) , where \(*\) +is zero or more batch dimensions.

    • +
    • A (Tensor) – input square matrix of size \((*, m, m)\), where +\(*\) is zero or more batch dimensions.

    • +
    • out ((Tensor, Tensor), optional) – optional output tuple.

    • +
    +
    +
    +

    Example:

    +
    >>> A = torch.tensor([[6.80, -2.11,  5.66,  5.97,  8.23],
    +                      [-6.05, -3.30,  5.36, -4.44,  1.08],
    +                      [-0.45,  2.58, -2.70,  0.27,  9.04],
    +                      [8.32,  2.71,  4.35,  -7.17,  2.14],
    +                      [-9.67, -5.14, -7.26,  6.08, -6.87]]).t()
    +>>> B = torch.tensor([[4.02,  6.19, -8.22, -7.57, -3.03],
    +                      [-1.56,  4.00, -8.67,  1.75,  2.86],
    +                      [9.81, -4.09, -4.57, -8.61,  8.99]]).t()
    +>>> X, LU = torch.solve(B, A)
    +>>> torch.dist(B, torch.mm(A, X))
    +tensor(1.00000e-06 *
    +       7.0977)
    +
    +>>> # Batched solver example
    +>>> A = torch.randn(2, 3, 1, 4, 4)
    +>>> B = torch.randn(2, 3, 1, 4, 6)
    +>>> X, LU = torch.solve(B, A)
    +>>> torch.dist(B, A.matmul(X))
    +tensor(1.00000e-06 *
    +   3.6386)
    +
    +
    +
    + +
    +
    +torch.svd(input, some=True, compute_uv=True, out=None) -> (Tensor, Tensor, Tensor)
    +

    This function returns a namedtuple (U, S, V) which is the singular value +decomposition of a input real matrix or batches of real matrices input such that +\(input = U \times diag(S) \times V^T\).

    +

    If some is True (default), the method returns the reduced singular value decomposition +i.e., if the last two dimensions of input are m and n, then the returned +U and V matrices will contain only \(min(n, m)\) orthonormal columns.

    +

    If compute_uv is False, the returned U and V matrices will be zero matrices +of shape \((m \times m)\) and \((n \times n)\) respectively. some will be ignored here.

    +
    +

    Note

    +

    The implementation of SVD on CPU uses the LAPACK routine ?gesdd (a divide-and-conquer +algorithm) instead of ?gesvd for speed. Analogously, the SVD on GPU uses the MAGMA routine +gesdd as well.

    +
    +
    +

    Note

    +

    Irrespective of the original strides, the returned matrix U +will be transposed, i.e. with strides U.contiguous().transpose(-2, -1).stride()

    +
    +
    +

    Note

    +

    Extra care needs to be taken when backward through U and V +outputs. Such operation is really only stable when input is +full rank with all distinct singular values. Otherwise, NaN can +appear as the gradients are not properly defined. Also, notice that +double backward will usually do an additional backward through U and +V even if the original backward is only on S.

    +
    +
    +

    Note

    +

    When some = False, the gradients on U[..., :, min(m, n):] +and V[..., :, min(m, n):] will be ignored in backward as those vectors +can be arbitrary bases of the subspaces.

    +
    +
    +

    Note

    +

    When compute_uv = False, backward cannot be performed since U and V +from the forward pass is required for the backward operation.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of size \((*, m, n)\) where * is zero or more +batch dimensions consisting of \(m \times n\) matrices.

    • +
    • some (bool, optional) – controls the shape of returned U and V

    • +
    • compute_uv (bool, optional) – option whether to compute U and V or not

    • +
    • out (tuple, optional) – the output tuple of tensors

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.randn(5, 3)
    +>>> a
    +tensor([[ 0.2364, -0.7752,  0.6372],
    +        [ 1.7201,  0.7394, -0.0504],
    +        [-0.3371, -1.0584,  0.5296],
    +        [ 0.3550, -0.4022,  1.5569],
    +        [ 0.2445, -0.0158,  1.1414]])
    +>>> u, s, v = torch.svd(a)
    +>>> u
    +tensor([[ 0.4027,  0.0287,  0.5434],
    +        [-0.1946,  0.8833,  0.3679],
    +        [ 0.4296, -0.2890,  0.5261],
    +        [ 0.6604,  0.2717, -0.2618],
    +        [ 0.4234,  0.2481, -0.4733]])
    +>>> s
    +tensor([2.3289, 2.0315, 0.7806])
    +>>> v
    +tensor([[-0.0199,  0.8766,  0.4809],
    +        [-0.5080,  0.4054, -0.7600],
    +        [ 0.8611,  0.2594, -0.4373]])
    +>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
    +tensor(8.6531e-07)
    +>>> a_big = torch.randn(7, 5, 3)
    +>>> u, s, v = torch.svd(a_big)
    +>>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.transpose(-2, -1)))
    +tensor(2.6503e-06)
    +
    +
    +
    + +
    +
    +torch.symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)
    +

    This function returns eigenvalues and eigenvectors +of a real symmetric matrix input or a batch of real symmetric matrices, +represented by a namedtuple (eigenvalues, eigenvectors).

    +

    This function calculates all eigenvalues (and vectors) of input +such that \(\text{input} = V \text{diag}(e) V^T\).

    +

    The boolean argument eigenvectors defines computation of +both eigenvectors and eigenvalues or eigenvalues only.

    +

    If it is False, only eigenvalues are computed. If it is True, +both eigenvalues and eigenvectors are computed.

    +

    Since the input matrix input is supposed to be symmetric, +only the upper triangular portion is used by default.

    +

    If upper is False, then lower triangular portion is used.

    +
    +

    Note

    +

    Irrespective of the original strides, the returned matrix V will +be transposed, i.e. with strides V.contiguous().transpose(-1, -2).stride().

    +
    +
    +

    Note

    +

    Extra care needs to be taken when backward through outputs. Such +operation is really only stable when all eigenvalues are distinct. +Otherwise, NaN can appear as the gradients are not properly defined.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – the input tensor of size \((*, n, n)\) where * is zero or more +batch dimensions consisting of symmetric matrices.

    • +
    • eigenvectors (boolean, optional) – controls whether eigenvectors have to be computed

    • +
    • upper (boolean, optional) – controls whether to consider upper-triangular or lower-triangular region

    • +
    • out (tuple, optional) – the output tuple of (Tensor, Tensor)

    • +
    +
    +
    Returns
    +

    A namedtuple (eigenvalues, eigenvectors) containing

    +
    +
      +
    • eigenvalues (Tensor): Shape \((*, m)\). The eigenvalues in ascending order.

    • +
    • eigenvectors (Tensor): Shape \((*, m, m)\). +If eigenvectors=False, it’s a tensor filled with zeros. +Otherwise, this tensor contains the orthonormal eigenvectors of the input.

    • +
    +
    +

    +
    +
    Return type
    +

    (Tensor, Tensor)

    +
    +
    +

    Examples:

    +
    >>> a = torch.randn(5, 5)
    +>>> a = a + a.t()  # To make a symmetric
    +>>> a
    +tensor([[-5.7827,  4.4559, -0.2344, -1.7123, -1.8330],
    +        [ 4.4559,  1.4250, -2.8636, -3.2100, -0.1798],
    +        [-0.2344, -2.8636,  1.7112, -5.5785,  7.1988],
    +        [-1.7123, -3.2100, -5.5785, -2.6227,  3.1036],
    +        [-1.8330, -0.1798,  7.1988,  3.1036, -5.1453]])
    +>>> e, v = torch.symeig(a, eigenvectors=True)
    +>>> e
    +tensor([-13.7012,  -7.7497,  -2.3163,   5.2477,   8.1050])
    +>>> v
    +tensor([[ 0.1643,  0.9034, -0.0291,  0.3508,  0.1817],
    +        [-0.2417, -0.3071, -0.5081,  0.6534,  0.4026],
    +        [-0.5176,  0.1223, -0.0220,  0.3295, -0.7798],
    +        [-0.4850,  0.2695, -0.5773, -0.5840,  0.1337],
    +        [ 0.6415, -0.0447, -0.6381, -0.0193, -0.4230]])
    +>>> a_big = torch.randn(5, 2, 2)
    +>>> a_big = a_big + a_big.transpose(-2, -1)  # To make a_big symmetric
    +>>> e, v = a_big.symeig(eigenvectors=True)
    +>>> torch.allclose(torch.matmul(v, torch.matmul(e.diag_embed(), v.transpose(-2, -1))), a_big)
    +True
    +
    +
    +
    + +
    +
    +torch.trapz()
    +
    +
    +torch.trapz(y, x, *, dim=-1) → Tensor
    +
    + +

    Estimate \(\int y\,dx\) along dim, using the trapezoid rule.

    +
    +
    Parameters
    +
      +
    • y (Tensor) – The values of the function to integrate

    • +
    • x (Tensor) – The points at which the function y is sampled. +If x is not in ascending order, intervals on which it is decreasing +contribute negatively to the estimated integral (i.e., the convention +\(\int_a^b f = -\int_b^a f\) is followed).

    • +
    • dim (int) – The dimension along which to integrate. +By default, use the last dimension.

    • +
    +
    +
    Returns
    +

    A Tensor with the same shape as the input, except with dim removed. +Each element of the returned tensor represents the estimated integral +\(\int y\,dx\) along dim.

    +
    +
    +

    Example:

    +
    >>> y = torch.randn((2, 3))
    +>>> y
    +tensor([[-2.1156,  0.6857, -0.2700],
    +        [-1.2145,  0.5540,  2.0431]])
    +>>> x = torch.tensor([[1, 3, 4], [1, 2, 3]])
    +>>> torch.trapz(y, x)
    +tensor([-1.2220,  0.9683])
    +
    +
    +
    +
    +torch.trapz(y, *, dx=1, dim=-1) → Tensor
    +
    + +

    As above, but the sample points are spaced uniformly at a distance of dx.

    +
    +
    Parameters
    +
      +
    • y (Tensor) – The values of the function to integrate

    • +
    • dx (float) – The distance between points at which y is sampled.

    • +
    • dim (int) – The dimension along which to integrate. +By default, use the last dimension.

    • +
    +
    +
    Returns
    +

    A Tensor with the same shape as the input, except with dim removed. +Each element of the returned tensor represents the estimated integral +\(\int y\,dx\) along dim.

    +
    +
    +
    + +
    +
    +torch.triangular_solve(input, A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
    +

    Solves a system of equations with a triangular coefficient matrix \(A\) +and multiple right-hand sides \(b\).

    +

    In particular, solves \(AX = b\) and assumes \(A\) is upper-triangular +with the default keyword arguments.

    +

    torch.triangular_solve(b, A) can take in 2D inputs b, A or inputs that are +batches of 2D matrices. If the inputs are batches, then returns +batched outputs X

    +
    +

    Note

    +

    The out keyword only supports 2D matrix inputs, that is, +b, A must be 2D matrices.

    +
    +
    +
    Parameters
    +
      +
    • input (Tensor) – multiple right-hand sides of size \((*, m, k)\) where +\(*\) is zero of more batch dimensions (\(b\))

    • +
    • A (Tensor) – the input triangular coefficient matrix of size \((*, m, m)\) +where \(*\) is zero or more batch dimensions

    • +
    • upper (bool, optional) – whether to solve the upper-triangular system +of equations (default) or the lower-triangular system of equations. Default: True.

    • +
    • transpose (bool, optional) – whether \(A\) should be transposed before +being sent into the solver. Default: False.

    • +
    • unitriangular (bool, optional) – whether \(A\) is unit triangular. +If True, the diagonal elements of \(A\) are assumed to be +1 and not referenced from \(A\). Default: False.

    • +
    +
    +
    Returns
    +

    A namedtuple (solution, cloned_coefficient) where cloned_coefficient +is a clone of \(A\) and solution is the solution \(X\) to \(AX = b\) +(or whatever variant of the system of equations, depending on the keyword arguments.)

    +
    +
    +

    Examples:

    +
    >>> A = torch.randn(2, 2).triu()
    +>>> A
    +tensor([[ 1.1527, -1.0753],
    +        [ 0.0000,  0.7986]])
    +>>> b = torch.randn(2, 3)
    +>>> b
    +tensor([[-0.0210,  2.3513, -1.5492],
    +        [ 1.5429,  0.7403, -1.0243]])
    +>>> torch.triangular_solve(b, A)
    +torch.return_types.triangular_solve(
    +solution=tensor([[ 1.7841,  2.9046, -2.5405],
    +        [ 1.9320,  0.9270, -1.2826]]),
    +cloned_coefficient=tensor([[ 1.1527, -1.0753],
    +        [ 0.0000,  0.7986]]))
    +
    +
    +
    + +
    +
    +
    +

    Utilities

    +
    +
    +torch.compiled_with_cxx11_abi()[source]
    +

    Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1

    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/datasets.html b/docs/stable/torchvision/datasets.html new file mode 100644 index 000000000000..dbbb498748ae --- /dev/null +++ b/docs/stable/torchvision/datasets.html @@ -0,0 +1,1723 @@ + + + + + + + + + + + + torchvision.datasets — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision.datasets

    +

    All datasets are subclasses of torch.utils.data.Dataset +i.e, they have __getitem__ and __len__ methods implemented. +Hence, they can all be passed to a torch.utils.data.DataLoader +which can load multiple samples parallelly using torch.multiprocessing workers. +For example:

    +
    imagenet_data = torchvision.datasets.ImageNet('path/to/imagenet_root/')
    +data_loader = torch.utils.data.DataLoader(imagenet_data,
    +                                          batch_size=4,
    +                                          shuffle=True,
    +                                          num_workers=args.nThreads)
    +
    +
    +

    The following datasets are available:

    + +

    All the datasets have almost similar API. They all have two common arguments: +transform and target_transform to transform the input and target respectively.

    +
    +

    MNIST

    +
    +
    +class torchvision.datasets.MNIST(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    MNIST Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where MNIST/processed/training.pt +and MNIST/processed/test.pt exist.

    • +
    • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    + +
    +
    +

    Fashion-MNIST

    +
    +
    +class torchvision.datasets.FashionMNIST(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    Fashion-MNIST Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where Fashion-MNIST/processed/training.pt +and Fashion-MNIST/processed/test.pt exist.

    • +
    • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    + +
    +
    +

    KMNIST

    +
    +
    +class torchvision.datasets.KMNIST(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    Kuzushiji-MNIST Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where KMNIST/processed/training.pt +and KMNIST/processed/test.pt exist.

    • +
    • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    + +
    +
    +

    EMNIST

    +
    +
    +class torchvision.datasets.EMNIST(root, split, **kwargs)[source]
    +

    EMNIST Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where EMNIST/processed/training.pt +and EMNIST/processed/test.pt exist.

    • +
    • split (string) – The dataset has 6 different splits: byclass, bymerge, +balanced, letters, digits and mnist. This argument specifies +which one to use.

    • +
    • train (bool, optional) – If True, creates dataset from training.pt, +otherwise from test.pt.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    + +
    +
    +

    QMNIST

    +
    +
    +class torchvision.datasets.QMNIST(root, what=None, compat=True, train=True, **kwargs)[source]
    +

    QMNIST Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset whose ``processed’’ +subdir contains torch binary files with the datasets.

    • +
    • what (string,optional) – Can be ‘train’, ‘test’, ‘test10k’, +‘test50k’, or ‘nist’ for respectively the mnist compatible +training set, the 60k qmnist testing set, the 10k qmnist +examples that match the mnist testing set, the 50k +remaining qmnist testing examples, or all the nist +digits. The default is to select ‘train’ or ‘test’ +according to the compatibility argument ‘train’.

    • +
    • compat (bool,optional) – A boolean that says whether the target +for each example is class number (for compatibility with +the MNIST dataloader) or a torch vector containing the +full qmnist information. Default=True.

    • +
    • download (bool, optional) – If true, downloads the dataset from +the internet and puts it in root directory. If dataset is +already downloaded, it is not downloaded again.

    • +
    • transform (callable, optional) – A function/transform that +takes in an PIL image and returns a transformed +version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform +that takes in the target and transforms it.

    • +
    • train (bool,optional,compatibility) – When argument ‘what’ is +not specified, this boolean decides whether to load the +training set ot the testing set. Default: True.

    • +
    +
    +
    +
    + +
    +
    +

    FakeData

    +
    +
    +class torchvision.datasets.FakeData(size=1000, image_size=(3, 224, 224), num_classes=10, transform=None, target_transform=None, random_offset=0)[source]
    +

    A fake dataset that returns randomly generated images and returns them as PIL images

    +
    +
    Parameters
    +
      +
    • size (int, optional) – Size of the dataset. Default: 1000 images

    • +
    • image_size (tuple, optional) – Size if the returned images. Default: (3, 224, 224)

    • +
    • num_classes (int, optional) – Number of classes in the datset. Default: 10

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • random_offset (int) – Offsets the index-based random seed used to +generate each image. Default: 0

    • +
    +
    +
    +
    + +
    +
    +

    COCO

    +
    +

    Note

    +

    These require the COCO API to be installed

    +
    +
    +

    Captions

    +
    +
    +class torchvision.datasets.CocoCaptions(root, annFile, transform=None, target_transform=None, transforms=None)[source]
    +

    MS Coco Captions Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory where images are downloaded to.

    • +
    • annFile (string) – Path to json annotation file.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.ToTensor

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.

    • +
    +
    +
    +

    Example

    +
    import torchvision.datasets as dset
    +import torchvision.transforms as transforms
    +cap = dset.CocoCaptions(root = 'dir where images are',
    +                        annFile = 'json annotation file',
    +                        transform=transforms.ToTensor())
    +
    +print('Number of samples: ', len(cap))
    +img, target = cap[3] # load 4th sample
    +
    +print("Image Size: ", img.size())
    +print(target)
    +
    +
    +

    Output:

    +
    Number of samples: 82783
    +Image Size: (3L, 427L, 640L)
    +[u'A plane emitting smoke stream flying over a mountain.',
    +u'A plane darts across a bright blue sky behind a mountain covered in snow',
    +u'A plane leaves a contrail above the snowy mountain top.',
    +u'A mountain that has a plane flying overheard in the distance.',
    +u'A mountain view with a plume of smoke in the background']
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    Tuple (image, target). target is a list of captions for the image.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    Detection

    +
    +
    +class torchvision.datasets.CocoDetection(root, annFile, transform=None, target_transform=None, transforms=None)[source]
    +

    MS Coco Detection Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory where images are downloaded to.

    • +
    • annFile (string) – Path to json annotation file.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.ToTensor

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    Tuple (image, target). target is the object returned by coco.loadAnns.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +
    +

    LSUN

    +
    +
    +class torchvision.datasets.LSUN(root, classes='train', transform=None, target_transform=None)[source]
    +

    LSUN dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory for the database files.

    • +
    • classes (string or list) – One of {‘train’, ‘val’, ‘test’} or a list of +categories to load. e,g. [‘bedroom_train’, ‘church_train’].

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    Tuple (image, target) where target is the index of the target category.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    ImageFolder

    +
    +
    +class torchvision.datasets.ImageFolder(root, transform=None, target_transform=None, loader=<function default_loader>, is_valid_file=None)[source]
    +

    A generic data loader where the images are arranged in this way:

    +
    root/dog/xxx.png
    +root/dog/xxy.png
    +root/dog/xxz.png
    +
    +root/cat/123.png
    +root/cat/nsdf3.png
    +root/cat/asd932_.png
    +
    +
    +
    +
    Parameters
    +
      +
    • root (string) – Root directory path.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • loader (callable, optional) – A function to load an image given its path.

    • +
    • is_valid_file – A function that takes path of an Image file +and check if the file is a valid_file (used to check of corrupt files)

    • +
    +
    +
    +
    +
    +__getitem__(index)
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (sample, target) where target is class_index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    DatasetFolder

    +
    +
    +class torchvision.datasets.DatasetFolder(root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None)[source]
    +

    A generic data loader where the samples are arranged in this way:

    +
    root/class_x/xxx.ext
    +root/class_x/xxy.ext
    +root/class_x/xxz.ext
    +
    +root/class_y/123.ext
    +root/class_y/nsdf3.ext
    +root/class_y/asd932_.ext
    +
    +
    +
    +
    Parameters
    +
      +
    • root (string) – Root directory path.

    • +
    • loader (callable) – A function to load a sample given its path.

    • +
    • extensions (tuple[string]) – A list of allowed extensions. +both extensions and is_valid_file should not be passed.

    • +
    • transform (callable, optional) – A function/transform that takes in +a sample and returns a transformed version. +E.g, transforms.RandomCrop for images.

    • +
    • target_transform (callable, optional) – A function/transform that takes +in the target and transforms it.

    • +
    • is_valid_file – A function that takes path of an Image file +and check if the file is a valid_file (used to check of corrupt files) +both extensions and is_valid_file should not be passed.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (sample, target) where target is class_index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    ImageNet

    +
    +
    +class torchvision.datasets.ImageNet(root, split='train', download=False, **kwargs)[source]
    +

    ImageNet 2012 Classification Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the ImageNet Dataset.

    • +
    • split (string, optional) – The dataset split, supports train, or val.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • loader – A function to load an image given its path.

    • +
    +
    +
    +
    + +
    +

    Note

    +

    This requires scipy to be installed

    +
    +
    +
    +

    CIFAR

    +
    +
    +class torchvision.datasets.CIFAR10(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    CIFAR10 Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where directory +cifar-10-batches-py exists or will be saved to if download is set to True.

    • +
    • train (bool, optional) – If True, creates dataset from training set, otherwise +creates from test set.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +class torchvision.datasets.CIFAR100(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    CIFAR100 Dataset.

    +

    This is a subclass of the CIFAR10 Dataset.

    +
    + +
    +
    +

    STL10

    +
    +
    +class torchvision.datasets.STL10(root, split='train', folds=None, transform=None, target_transform=None, download=False)[source]
    +

    STL10 Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where directory +stl10_binary exists.

    • +
    • split (string) – One of {‘train’, ‘test’, ‘unlabeled’, ‘train+unlabeled’}. +Accordingly dataset is selected.

    • +
    • folds (int, optional) –

      One of {0-9} or None. +For training, loads one of the 10 pre-defined folds of 1k samples for the

      +
      +

      standard evaluation procedure. If no value is passed, loads the 5k samples.

      +
      +

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    SVHN

    +
    +
    +class torchvision.datasets.SVHN(root, split='train', transform=None, target_transform=None, download=False)[source]
    +

    SVHN Dataset. +Note: The SVHN dataset assigns the label 10 to the digit 0. However, in this Dataset, +we assign the label 0 to the digit 0 to be compatible with PyTorch loss functions which +expect the class labels to be in the range [0, C-1]

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where directory +SVHN exists.

    • +
    • split (string) – One of {‘train’, ‘test’, ‘extra’}. +Accordingly dataset is selected. ‘extra’ is Extra training set.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    PhotoTour

    +
    +
    +class torchvision.datasets.PhotoTour(root, name, train=True, transform=None, download=False)[source]
    +

    Learning Local Image Descriptors Data Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory where images are.

    • +
    • name (string) – Name of the dataset to load.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (data1, data2, matches)

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    SBU

    +
    +
    +class torchvision.datasets.SBU(root, transform=None, target_transform=None, download=True)[source]
    +

    SBU Captioned Photo Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where tarball +SBUCaptionedPhotoDataset.tar.gz exists.

    • +
    • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • download (bool, optional) – If True, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is a caption for the photo.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    Flickr

    +
    +
    +class torchvision.datasets.Flickr8k(root, ann_file, transform=None, target_transform=None)[source]
    +

    Flickr8k Entities Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory where images are downloaded to.

    • +
    • ann_file (string) – Path to annotation file.

    • +
    • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.ToTensor

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    Tuple (image, target). target is a list of captions for the image.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +class torchvision.datasets.Flickr30k(root, ann_file, transform=None, target_transform=None)[source]
    +

    Flickr30k Entities Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory where images are downloaded to.

    • +
    • ann_file (string) – Path to annotation file.

    • +
    • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.ToTensor

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    Tuple (image, target). target is a list of captions for the image.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    VOC

    +
    +
    +class torchvision.datasets.VOCSegmentation(root, year='2012', image_set='train', download=False, transform=None, target_transform=None, transforms=None)[source]
    +

    Pascal VOC Segmentation Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the VOC Dataset.

    • +
    • year (string, optional) – The dataset year, supports years 2007 to 2012.

    • +
    • image_set (string, optional) – Select the image_set to use, train, trainval or val

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is the image segmentation.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +class torchvision.datasets.VOCDetection(root, year='2012', image_set='train', download=False, transform=None, target_transform=None, transforms=None)[source]
    +

    Pascal VOC Detection Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the VOC Dataset.

    • +
    • year (string, optional) – The dataset year, supports years 2007 to 2012.

    • +
    • image_set (string, optional) – Select the image_set to use, train, trainval or val

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again. +(default: alphabetic indexing of VOC’s 20 classes).

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, required) – A function/transform that takes in the +target and transforms it.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is a dictionary of the XML tree.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    Cityscapes

    +
    +

    Note

    +

    Requires Cityscape to be downloaded.

    +
    +
    +
    +class torchvision.datasets.Cityscapes(root, split='train', mode='fine', target_type='instance', transform=None, target_transform=None, transforms=None)[source]
    +

    Cityscapes Dataset.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset where directory leftImg8bit +and gtFine or gtCoarse are located.

    • +
    • split (string, optional) – The image split to use, train, test or val if mode=”gtFine” +otherwise train, train_extra or val

    • +
    • mode (string, optional) – The quality mode to use, gtFine or gtCoarse

    • +
    • target_type (string or list, optional) – Type of target to use, instance, semantic, polygon +or color. Can also be a list to output a tuple with all specified target types.

    • +
    • transform (callable, optional) – A function/transform that takes in a PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version.

    • +
    +
    +
    +

    Examples

    +

    Get semantic segmentation target

    +
    dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
    +                     target_type='semantic')
    +
    +img, smnt = dataset[0]
    +
    +
    +

    Get multiple targets

    +
    dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',
    +                     target_type=['instance', 'color', 'polygon'])
    +
    +img, (inst, col, poly) = dataset[0]
    +
    +
    +

    Validate on the “coarse” set

    +
    dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',
    +                     target_type='semantic')
    +
    +img, smnt = dataset[0]
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is a tuple of all target types if target_type is a list with more +than one item. Otherwise target is a json object if target_type=”polygon”, else the image segmentation.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    SBD

    +
    +
    +class torchvision.datasets.SBDataset(root, image_set='train', mode='boundaries', download=False, transforms=None)[source]
    +

    Semantic Boundaries Dataset

    +

    The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.

    +
    +

    Note

    +

    Please note that the train and val splits included with this dataset are different from +the splits in the PASCAL VOC dataset. In particular some “train” images might be part of +VOC2012 val. +If you are interested in testing on VOC 2012 val, then use image_set=’train_noval’, +which excludes all val images.

    +
    +
    +

    Warning

    +

    This class needs scipy to load target files from .mat format.

    +
    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the Semantic Boundaries Dataset

    • +
    • image_set (string, optional) – Select the image_set to use, train, val or train_noval. +Image set train_noval excludes VOC 2012 val images.

    • +
    • mode (string, optional) – Select target type. Possible values ‘boundaries’ or ‘segmentation’. +In case of ‘boundaries’, the target is an array of shape [num_classes, H, W], +where num_classes=20.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    • transforms (callable, optional) – A function/transform that takes input sample and its target as entry +and returns a transformed version. Input sample is PIL image and target is a numpy array +if mode=’boundaries’ or PIL image if mode=’segmentation’.

    • +
    +
    +
    +
    + +
    +
    +

    USPS

    +
    +
    +class torchvision.datasets.USPS(root, train=True, transform=None, target_transform=None, download=False)[source]
    +

    USPS Dataset. +The data-format is : [label [index:value ]*256 n] * num_lines, where label lies in [1, 10]. +The value for each pixel lies in [-1, 1]. Here we transform the label into [0, 9] +and make pixel values in [0, 255].

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of dataset to store``USPS`` data files.

    • +
    • train (bool, optional) – If True, creates dataset from usps.bz2, +otherwise from usps.t.bz2.

    • +
    • transform (callable, optional) – A function/transform that takes in an PIL image +and returns a transformed version. E.g, transforms.RandomCrop

    • +
    • target_transform (callable, optional) – A function/transform that takes in the +target and transforms it.

    • +
    • download (bool, optional) – If true, downloads the dataset from the internet and +puts it in root directory. If dataset is already downloaded, it is not +downloaded again.

    • +
    +
    +
    +
    +
    +__getitem__(index)[source]
    +
    +
    Parameters
    +

    index (int) – Index

    +
    +
    Returns
    +

    (image, target) where target is index of the target class.

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    + +
    +
    +

    Kinetics-400

    +
    +
    +class torchvision.datasets.Kinetics400(root, frames_per_clip, step_between_clips=1, transform=None)[source]
    +

    Kinetics-400 +dataset.

    +

    Kinetics-400 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

    +

    To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

    +

    Internally, it uses a VideoClips object to handle clip creation.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the Kinetics-400 Dataset.

    • +
    • frames_per_clip (int) – number of frames in a clip

    • +
    • step_between_clips (int) – number of frames between each clip

    • +
    • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.

    • +
    +
    +
    Returns
    +

    the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

    +
    +

    and L is the number of points

    +
    +

    label (int): class of the video clip

    +

    +
    +
    Return type
    +

    video (Tensor[T, H, W, C])

    +
    +
    +
    + +
    +
    +

    HMDB51

    +
    +
    +class torchvision.datasets.HMDB51(root, annotation_path, frames_per_clip, step_between_clips=1, fold=1, train=True, transform=None)[source]
    +

    HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_ +dataset.

    +

    HMDB51 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

    +

    To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

    +

    Internally, it uses a VideoClips object to handle clip creation.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the HMDB51 Dataset.

    • +
    • annotation_path (str) – path to the folder containing the split files

    • +
    • frames_per_clip (int) – number of frames in a clip.

    • +
    • step_between_clips (int) – number of frames between each clip.

    • +
    • fold (int, optional) – which fold to use. Should be between 1 and 3.

    • +
    • train (bool, optional) – if True, creates a dataset from the train split, +otherwise from the test split.

    • +
    • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.

    • +
    +
    +
    Returns
    +

    the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

    +
    +

    and L is the number of points

    +
    +

    label (int): class of the video clip

    +

    +
    +
    Return type
    +

    video (Tensor[T, H, W, C])

    +
    +
    +
    + +
    +
    +

    UCF101

    +
    +
    +class torchvision.datasets.UCF101(root, annotation_path, frames_per_clip, step_between_clips=1, fold=1, train=True, transform=None)[source]
    +

    UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.

    +

    UCF101 is an action recognition video dataset. +This dataset consider every video as a collection of video clips of fixed size, specified +by frames_per_clip, where the step in frames between each clip is given by +step_between_clips.

    +

    To give an example, for 2 videos with 10 and 15 frames respectively, if frames_per_clip=5 +and step_between_clips=5, the dataset size will be (2 + 3) = 5, where the first two +elements will come from video 1, and the next three elements from video 2. +Note that we drop clips which do not have exactly frames_per_clip elements, so not all +frames in a video might be present.

    +

    Internally, it uses a VideoClips object to handle clip creation.

    +
    +
    Parameters
    +
      +
    • root (string) – Root directory of the UCF101 Dataset.

    • +
    • annotation_path (str) – path to the folder containing the split files

    • +
    • frames_per_clip (int) – number of frames in a clip.

    • +
    • step_between_clips (int, optional) – number of frames between each clip.

    • +
    • fold (int, optional) – which fold to use. Should be between 1 and 3.

    • +
    • train (bool, optional) – if True, creates a dataset from the train split, +otherwise from the test split.

    • +
    • transform (callable, optional) – A function/transform that takes in a TxHxWxC video +and returns a transformed version.

    • +
    +
    +
    Returns
    +

    the T video frames +audio(Tensor[K, L]): the audio frames, where K is the number of channels

    +
    +

    and L is the number of points

    +
    +

    label (int): class of the video clip

    +

    +
    +
    Return type
    +

    video (Tensor[T, H, W, C])

    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/index.html b/docs/stable/torchvision/index.html new file mode 100644 index 000000000000..679e0887f1cb --- /dev/null +++ b/docs/stable/torchvision/index.html @@ -0,0 +1,598 @@ + + + + + + + + + + + + torchvision — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision

    +

    The torchvision package consists of popular datasets, model +architectures, and common image transformations for computer vision.

    + +
    +
    +torchvision.get_image_backend()[source]
    +

    Gets the name of the package used to load images

    +
    + +
    +
    +torchvision.set_image_backend(backend)[source]
    +

    Specifies the package used to load images.

    +
    +
    Parameters
    +

    backend (string) – Name of the image backend. one of {‘PIL’, ‘accimage’}. +The accimage package uses the Intel IPP library. It is +generally faster than PIL, but does not support as many operations.

    +
    +
    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/models.html b/docs/stable/torchvision/models.html new file mode 100644 index 000000000000..ce6a4660fcf5 --- /dev/null +++ b/docs/stable/torchvision/models.html @@ -0,0 +1,1779 @@ + + + + + + + + + + + + torchvision.models — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision.models

    +

    The models subpackage contains definitions of models for addressing +different tasks, including: image classification, pixelwise semantic +segmentation, object detection, instance segmentation and person +keypoint detection.

    +
    +

    Classification

    +

    The models subpackage contains definitions for the following model +architectures for image classification:

    + +

    You can construct a model with random weights by calling its constructor:

    +
    import torchvision.models as models
    +resnet18 = models.resnet18()
    +alexnet = models.alexnet()
    +vgg16 = models.vgg16()
    +squeezenet = models.squeezenet1_0()
    +densenet = models.densenet161()
    +inception = models.inception_v3()
    +googlenet = models.googlenet()
    +shufflenet = models.shufflenet_v2_x1_0()
    +mobilenet = models.mobilenet_v2()
    +resnext50_32x4d = models.resnext50_32x4d()
    +wide_resnet50_2 = models.wide_resnet50_2()
    +mnasnet = models.mnasnet1_0()
    +
    +
    +

    We provide pre-trained models, using the PyTorch torch.utils.model_zoo. +These can be constructed by passing pretrained=True:

    +
    import torchvision.models as models
    +resnet18 = models.resnet18(pretrained=True)
    +alexnet = models.alexnet(pretrained=True)
    +squeezenet = models.squeezenet1_0(pretrained=True)
    +vgg16 = models.vgg16(pretrained=True)
    +densenet = models.densenet161(pretrained=True)
    +inception = models.inception_v3(pretrained=True)
    +googlenet = models.googlenet(pretrained=True)
    +shufflenet = models.shufflenet_v2_x1_0(pretrained=True)
    +mobilenet = models.mobilenet_v2(pretrained=True)
    +resnext50_32x4d = models.resnext50_32x4d(pretrained=True)
    +wide_resnet50_2 = models.wide_resnet50_2(pretrained=True)
    +mnasnet = models.mnasnet1_0(pretrained=True)
    +
    +
    +

    Instancing a pre-trained model will download its weights to a cache directory. +This directory can be set using the TORCH_MODEL_ZOO environment variable. See +torch.utils.model_zoo.load_url() for details.

    +

    Some models use modules which have different training and evaluation +behavior, such as batch normalization. To switch between these modes, use +model.train() or model.eval() as appropriate. See +train() or eval() for details.

    +

    All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), +where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized +using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. +You can use the following transform to normalize:

    +
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
    +                                 std=[0.229, 0.224, 0.225])
    +
    +
    +

    An example of such normalization can be found in the imagenet example +here

    +

    ImageNet 1-crop error rates (224x224)

    + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Network

    Top-1 error

    Top-5 error

    AlexNet

    43.45

    20.91

    VGG-11

    30.98

    11.37

    VGG-13

    30.07

    10.75

    VGG-16

    28.41

    9.62

    VGG-19

    27.62

    9.12

    VGG-11 with batch normalization

    29.62

    10.19

    VGG-13 with batch normalization

    28.45

    9.63

    VGG-16 with batch normalization

    26.63

    8.50

    VGG-19 with batch normalization

    25.76

    8.15

    ResNet-18

    30.24

    10.92

    ResNet-34

    26.70

    8.58

    ResNet-50

    23.85

    7.13

    ResNet-101

    22.63

    6.44

    ResNet-152

    21.69

    5.94

    SqueezeNet 1.0

    41.90

    19.58

    SqueezeNet 1.1

    41.81

    19.38

    Densenet-121

    25.35

    7.83

    Densenet-169

    24.00

    7.00

    Densenet-201

    22.80

    6.43

    Densenet-161

    22.35

    6.20

    Inception v3

    22.55

    6.44

    GoogleNet

    30.22

    10.47

    ShuffleNet V2

    30.64

    11.68

    MobileNet V2

    28.12

    9.71

    ResNeXt-50-32x4d

    22.38

    6.30

    ResNeXt-101-32x8d

    20.69

    5.47

    Wide ResNet-50-2

    21.49

    5.91

    Wide ResNet-101-2

    21.16

    5.72

    MNASNet 1.0

    26.49

    8.456

    +
    +

    Alexnet

    +
    +
    +torchvision.models.alexnet(pretrained=False, progress=True, **kwargs)[source]
    +

    AlexNet model architecture from the +“One weird trick…” paper.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    VGG

    +
    +
    +torchvision.models.vgg11(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 11-layer model (configuration “A”) from +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg11_bn(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 11-layer model (configuration “A”) with batch normalization +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg13(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 13-layer model (configuration “B”) +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg13_bn(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 13-layer model (configuration “B”) with batch normalization +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg16(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 16-layer model (configuration “D”) +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg16_bn(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 16-layer model (configuration “D”) with batch normalization +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg19(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 19-layer model (configuration “E”) +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.vgg19_bn(pretrained=False, progress=True, **kwargs)[source]
    +

    VGG 19-layer model (configuration ‘E’) with batch normalization +`”Very Deep Convolutional Networks For Large-Scale Image Recognition” <https://arxiv.org/pdf/1409.1556.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    ResNet

    +
    +
    +torchvision.models.resnet18(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNet-18 model from +`”Deep Residual Learning for Image Recognition” <https://arxiv.org/pdf/1512.03385.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.resnet34(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNet-34 model from +`”Deep Residual Learning for Image Recognition” <https://arxiv.org/pdf/1512.03385.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.resnet50(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNet-50 model from +`”Deep Residual Learning for Image Recognition” <https://arxiv.org/pdf/1512.03385.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.resnet101(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNet-101 model from +`”Deep Residual Learning for Image Recognition” <https://arxiv.org/pdf/1512.03385.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.resnet152(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNet-152 model from +`”Deep Residual Learning for Image Recognition” <https://arxiv.org/pdf/1512.03385.pdf>’_

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    SqueezeNet

    +
    +
    +torchvision.models.squeezenet1_0(pretrained=False, progress=True, **kwargs)[source]
    +

    SqueezeNet model architecture from the “SqueezeNet: AlexNet-level +accuracy with 50x fewer parameters and <0.5MB model size” paper.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.squeezenet1_1(pretrained=False, progress=True, **kwargs)[source]
    +

    SqueezeNet 1.1 model from the official SqueezeNet repo. +SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters +than SqueezeNet 1.0, without sacrificing accuracy.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    DenseNet

    +
    +
    +torchvision.models.densenet121(pretrained=False, progress=True, **kwargs)[source]
    +

    Densenet-121 model from +“Densely Connected Convolutional Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • memory_efficient (bool) – but slower. Default: False. See “paper”

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.densenet169(pretrained=False, progress=True, **kwargs)[source]
    +

    Densenet-169 model from +“Densely Connected Convolutional Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • memory_efficient (bool) –

      but slower. Default: False. See “paper”

      +

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.densenet161(pretrained=False, progress=True, **kwargs)[source]
    +

    Densenet-161 model from +“Densely Connected Convolutional Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • memory_efficient (bool) –

      but slower. Default: False. See “paper”

      +

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.densenet201(pretrained=False, progress=True, **kwargs)[source]
    +

    Densenet-201 model from +“Densely Connected Convolutional Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • memory_efficient (bool) –

      but slower. Default: False. See “paper”

      +

    • +
    +
    +
    +
    + +
    +
    +

    Inception v3

    +
    +
    +torchvision.models.inception_v3(pretrained=False, progress=True, **kwargs)[source]
    +

    Inception v3 model architecture from +“Rethinking the Inception Architecture for Computer Vision”.

    +
    +

    Note

    +

    Important: In contrast to the other models the inception_v3 expects tensors with a size of +N x 3 x 299 x 299, so ensure your images are sized accordingly.

    +
    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • aux_logits (bool) – If True, add an auxiliary branch that can improve training. +Default: True

    • +
    • transform_input (bool) – If True, preprocesses the input according to the method with which it +was trained on ImageNet. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    GoogLeNet

    +
    +
    +torchvision.models.googlenet(pretrained=False, progress=True, **kwargs)[source]
    +

    GoogLeNet (Inception v1) model architecture from +“Going Deeper with Convolutions”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    • aux_logits (bool) – If True, adds two auxiliary branches that can improve training. +Default: False when pretrained is True otherwise True

    • +
    • transform_input (bool) – If True, preprocesses the input according to the method with which it +was trained on ImageNet. Default: False

    • +
    +
    +
    +
    + +
    +
    +

    ShuffleNet v2

    +
    +
    +torchvision.models.shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs)[source]
    +

    Constructs a ShuffleNetV2 with 0.5x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs)[source]
    +

    Constructs a ShuffleNetV2 with 1.0x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs)[source]
    +

    Constructs a ShuffleNetV2 with 1.5x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs)[source]
    +

    Constructs a ShuffleNetV2 with 2.0x output channels, as described in +“ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    MobileNet v2

    +
    +
    +torchvision.models.mobilenet_v2(pretrained=False, progress=True, **kwargs)[source]
    +

    Constructs a MobileNetV2 architecture from +“MobileNetV2: Inverted Residuals and Linear Bottlenecks”.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    ResNext

    +
    +
    +torchvision.models.resnext50_32x4d(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNeXt-50 32x4d model from +“Aggregated Residual Transformation for Deep Neural Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.resnext101_32x8d(pretrained=False, progress=True, **kwargs)[source]
    +

    ResNeXt-101 32x8d model from +“Aggregated Residual Transformation for Deep Neural Networks”

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    Wide ResNet

    +
    +
    +torchvision.models.wide_resnet50_2(pretrained=False, progress=True, **kwargs)[source]
    +

    Wide ResNet-50-2 model from +“Wide Residual Networks”

    +

    The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 +channels, and in Wide ResNet-50-2 has 2048-1024-2048.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.wide_resnet101_2(pretrained=False, progress=True, **kwargs)[source]
    +

    Wide ResNet-101-2 model from +“Wide Residual Networks”

    +

    The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 +channels, and in Wide ResNet-50-2 has 2048-1024-2048.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on ImageNet

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    MNASNet

    +
    +
    +torchvision.models.mnasnet0_5(pretrained=False, progress=True, **kwargs)[source]
    +

    MNASNet with depth multiplier of 0.5 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

    +
    + +
    +
    +torchvision.models.mnasnet0_75(pretrained=False, progress=True, **kwargs)[source]
    +

    MNASNet with depth multiplier of 0.75 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

    +
    + +
    +
    +torchvision.models.mnasnet1_0(pretrained=False, progress=True, **kwargs)[source]
    +

    MNASNet with depth multiplier of 1.0 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

    +
    + +
    +
    +torchvision.models.mnasnet1_3(pretrained=False, progress=True, **kwargs)[source]
    +

    MNASNet with depth multiplier of 1.3 from +“MnasNet: Platform-Aware Neural Architecture Search for Mobile”. +:param pretrained: If True, returns a model pre-trained on ImageNet +:type pretrained: bool +:param progress: If True, displays a progress bar of the download to stderr +:type progress: bool

    +
    + +
    +
    +
    +

    Semantic Segmentation

    +

    The models subpackage contains definitions for the following model +architectures for semantic segmentation:

    + +

    As with image classification models, all pre-trained models expect input images normalized in the same way. +The images have to be loaded in to a range of [0, 1] and then normalized using +mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. +They have been trained on images resized such that their minimum size is 520.

    +

    The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are +present in the Pascal VOC dataset. You can see more information on how the subset has been selected in +references/segmentation/coco_utils.py. The classes that the pre-trained model outputs are the following, +in order:

    +
    +
    ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
    + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
    + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
    +
    +
    +
    +

    The accuracies of the pre-trained models evaluated on COCO val2017 are as follows

    + +++++ + + + + + + + + + + + + + + + + +

    Network

    mean IoU

    global pixelwise acc

    FCN ResNet101

    63.7

    91.9

    DeepLabV3 ResNet101

    67.4

    92.4

    +
    +

    Fully Convolutional Networks

    +
    +
    +torchvision.models.segmentation.fcn_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
    +

    Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.segmentation.fcn_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
    +

    Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    DeepLabV3

    +
    +
    +torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
    +

    Constructs a DeepLabV3 model with a ResNet-50 backbone.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs)[source]
    +

    Constructs a DeepLabV3 model with a ResNet-101 backbone.

    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017 which +contains the same classes as Pascal VOC

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +
    +

    Object Detection, Instance Segmentation and Person Keypoint Detection

    +

    The models subpackage contains definitions for the following model +architectures for detection:

    + +

    The pre-trained models for detection, instance segmentation and +keypoint detection are initialized with the classification models +in torchvision.

    +

    The models expect a list of Tensor[C, H, W], in the range 0-1. +The models internally resize the images so that they have a minimum size +of 800. This option can be changed by passing the option min_size +to the constructor of the models.

    +

    For object detection and instance segmentation, the pre-trained +models return the predictions of the following classes:

    +
    +
    COCO_INSTANCE_CATEGORY_NAMES = [
    +    '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    +    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
    +    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    +    'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
    +    'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
    +    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
    +    'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
    +    'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
    +    'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
    +    'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    +    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
    +    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
    +]
    +
    +
    +
    +

    Here are the summary of the accuracies for the models trained on +the instances set of COCO train2017 and evaluated on COCO val2017.

    + ++++++ + + + + + + + + + + + + + + + + + + + +

    Network

    box AP

    mask AP

    keypoint AP

    Faster R-CNN ResNet-50 FPN

    37.0

      +
    • +
    +
      +
    • +
    +

    Mask R-CNN ResNet-50 FPN

    37.9

    34.6

      +
    • +
    +
    +

    For person keypoint detection, the accuracies for the pre-trained +models are as follows

    + ++++++ + + + + + + + + + + + + + + +

    Network

    box AP

    mask AP

    keypoint AP

    Keypoint R-CNN ResNet-50 FPN

    54.6

      +
    • +
    +

    65.0

    +

    For person keypoint detection, the pre-trained model return the +keypoints in the following order:

    +
    +
    COCO_PERSON_KEYPOINT_NAMES = [
    +    'nose',
    +    'left_eye',
    +    'right_eye',
    +    'left_ear',
    +    'right_ear',
    +    'left_shoulder',
    +    'right_shoulder',
    +    'left_elbow',
    +    'right_elbow',
    +    'left_wrist',
    +    'right_wrist',
    +    'left_hip',
    +    'right_hip',
    +    'left_knee',
    +    'right_knee',
    +    'left_ankle',
    +    'right_ankle'
    +]
    +
    +
    +
    +
    +

    Runtime characteristics

    +

    The implementations of the models for object detection, instance segmentation +and keypoint detection are efficient.

    +

    In the following table, we use 8 V100 GPUs, with CUDA 10.0 and CUDNN 7.4 to +report the results. During training, we use a batch size of 2 per GPU, and +during testing a batch size of 1 is used.

    +

    For test time, we report the time for the model evaluation and postprocessing +(including mask pasting in image), but not the time for computing the +precision-recall.

    + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + +

    Network

    train time (s / it)

    test time (s / it)

    memory (GB)

    Faster R-CNN ResNet-50 FPN

    0.2288

    0.0590

    5.2

    Mask R-CNN ResNet-50 FPN

    0.2728

    0.0903

    5.4

    Keypoint R-CNN ResNet-50 FPN

    0.3789

    0.1242

    6.8

    +
    +
    +

    Faster R-CNN

    +
    +
    +torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, **kwargs)[source]
    +

    Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.

    +

    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

    +

    The behavior of the model changes depending if it is in training or evaluation mode.

    +

    During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values +between 0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the class label for each ground-truth box

    • +
    +
    +

    The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN.

    +

    During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between +0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the predicted labels for each image

    • +
    • scores (Tensor[N]): the scores or each prediction

    • +
    +
    +

    Example:

    +
    >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
    +>>> model.eval()
    +>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +>>> predictions = model(x)
    +
    +
    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    Mask R-CNN

    +
    +
    +torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, **kwargs)[source]
    +

    Constructs a Mask R-CNN model with a ResNet-50-FPN backbone.

    +

    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

    +

    The behavior of the model changes depending if it is in training or evaluation mode.

    +

    During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values +between 0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the class label for each ground-truth box

    • +
    • masks (UInt8Tensor[N, 1, H, W]): the segmentation binary masks for each instance

    • +
    +
    +

    The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN, and the mask loss.

    +

    During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between +0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the predicted labels for each image

    • +
    • scores (Tensor[N]): the scores or each prediction

    • +
    • masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to +obtain the final segmentation masks, the soft masks can be thresholded, generally +with a value of 0.5 (mask >= 0.5)

    • +
    +
    +

    Example:

    +
    >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    +>>> model.eval()
    +>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +>>> predictions = model(x)
    +
    +
    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +

    Keypoint R-CNN

    +
    +
    +torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=2, num_keypoints=17, pretrained_backbone=True, **kwargs)[source]
    +

    Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.

    +

    The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each +image, and should be in 0-1 range. Different images can have different sizes.

    +

    The behavior of the model changes depending if it is in training or evaluation mode.

    +

    During training, the model expects both the input tensors, as well as a targets (list of dictionary), +containing:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values +between 0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the class label for each ground-truth box

    • +
    • keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the +format [x, y, visibility], where visibility=0 means that the keypoint is not visible.

    • +
    +
    +

    The model returns a Dict[Tensor] during training, containing the classification and regression +losses for both the RPN and the R-CNN, and the keypoint loss.

    +

    During inference, the model requires only the input tensors, and returns the post-processed +predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as +follows:

    +
    +
      +
    • boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between +0 and H and 0 and W

    • +
    • labels (Int64Tensor[N]): the predicted labels for each image

    • +
    • scores (Tensor[N]): the scores or each prediction

    • +
    • keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.

    • +
    +
    +

    Example:

    +
    >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
    +>>> model.eval()
    +>>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    +>>> predictions = model(x)
    +
    +
    +
    +
    Parameters
    +
      +
    • pretrained (bool) – If True, returns a model pre-trained on COCO train2017

    • +
    • progress (bool) – If True, displays a progress bar of the download to stderr

    • +
    +
    +
    +
    + +
    +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/transforms.html b/docs/stable/torchvision/transforms.html new file mode 100644 index 000000000000..819f8d2c614f --- /dev/null +++ b/docs/stable/torchvision/transforms.html @@ -0,0 +1,1687 @@ + + + + + + + + + + + + torchvision.transforms — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision.transforms

    +

    Transforms are common image transformations. They can be chained together using Compose. +Additionally, there is the torchvision.transforms.functional module. +Functional transforms give fine-grained control over the transformations. +This is useful if you have to build a more complex transformation pipeline +(e.g. in the case of segmentation tasks).

    +
    +
    +class torchvision.transforms.Compose(transforms)[source]
    +

    Composes several transforms together.

    +
    +
    Parameters
    +

    transforms (list of Transform objects) – list of transforms to compose.

    +
    +
    +

    Example

    +
    >>> transforms.Compose([
    +>>>     transforms.CenterCrop(10),
    +>>>     transforms.ToTensor(),
    +>>> ])
    +
    +
    +
    + +
    +

    Transforms on PIL Image

    +
    +
    +class torchvision.transforms.CenterCrop(size)[source]
    +

    Crops the given PIL Image at the center.

    +
    +
    Parameters
    +

    size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made.

    +
    +
    +
    + +
    +
    +class torchvision.transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0)[source]
    +

    Randomly change the brightness, contrast and saturation of an image.

    +
    +
    Parameters
    +
      +
    • brightness (float or tuple of python:float (min, max)) – How much to jitter brightness. +brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness] +or the given [min, max]. Should be non negative numbers.

    • +
    • contrast (float or tuple of python:float (min, max)) – How much to jitter contrast. +contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast] +or the given [min, max]. Should be non negative numbers.

    • +
    • saturation (float or tuple of python:float (min, max)) – How much to jitter saturation. +saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation] +or the given [min, max]. Should be non negative numbers.

    • +
    • hue (float or tuple of python:float (min, max)) – How much to jitter hue. +hue_factor is chosen uniformly from [-hue, hue] or the given [min, max]. +Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.FiveCrop(size)[source]
    +

    Crop the given PIL Image into four corners and the central crop

    +
    +

    Note

    +

    This transform returns a tuple of images and there may be a mismatch in the number of +inputs and targets your Dataset returns. See below for an example of how to deal with +this.

    +
    +
    +
    Parameters
    +

    size (sequence or int) – Desired output size of the crop. If size is an int +instead of sequence like (h, w), a square crop of size (size, size) is made.

    +
    +
    +

    Example

    +
    >>> transform = Compose([
    +>>>    FiveCrop(size), # this is a list of PIL Images
    +>>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
    +>>> ])
    +>>> #In your test loop you can do the following:
    +>>> input, target = batch # input is a 5d tensor, target is 2d
    +>>> bs, ncrops, c, h, w = input.size()
    +>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
    +>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    +
    +
    +
    + +
    +
    +class torchvision.transforms.Grayscale(num_output_channels=1)[source]
    +

    Convert image to grayscale.

    +
    +
    Parameters
    +

    num_output_channels (int) – (1 or 3) number of channels desired for output image

    +
    +
    Returns
    +

    Grayscale version of the input. +- If num_output_channels == 1 : returned image is single channel +- If num_output_channels == 3 : returned image is 3 channel with r == g == b

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +class torchvision.transforms.Pad(padding, fill=0, padding_mode='constant')[source]
    +

    Pad the given PIL Image on all sides with the given “pad” value.

    +
    +
    Parameters
    +
      +
    • padding (int or tuple) – Padding on each border. If a single int is provided this +is used to pad all borders. If tuple of length 2 is provided this is the padding +on left/right and top/bottom respectively. If a tuple of length 4 is provided +this is the padding for the left, top, right and bottom borders +respectively.

    • +
    • fill (int or tuple) – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant

    • +
    • padding_mode (str) –

      Type of padding. Should be: constant, edge, reflect or symmetric. +Default is constant.

      +
        +
      • constant: pads with a constant value, this value is specified with fill

      • +
      • edge: pads with the last value at the edge of the image

      • +
      • reflect: pads with reflection of image without repeating the last value on the edge

        +
        +

        For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]

        +
        +
      • +
      • symmetric: pads with reflection of image repeating the last value on the edge

        +
        +

        For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]

        +
        +
      • +
      +

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomAffine(degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0)[source]
    +

    Random affine transformation of the image keeping center invariant

    +
    +
    Parameters
    +
      +
    • degrees (sequence or float or int) – Range of degrees to select from. +If degrees is a number instead of sequence like (min, max), the range of degrees +will be (-degrees, +degrees). Set to 0 to deactivate rotations.

    • +
    • translate (tuple, optional) – tuple of maximum absolute fraction for horizontal +and vertical translations. For example translate=(a, b), then horizontal shift +is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is +randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.

    • +
    • scale (tuple, optional) – scaling factor interval, e.g (a, b), then scale is +randomly sampled from the range a <= scale <= b. Will keep original scale by default.

    • +
    • shear (sequence or float or int, optional) – Range of degrees to select from. +If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) +will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the +range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values, +a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. +Will not apply shear by default

    • +
    • resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST.

    • +
    • fillcolor (tuple or int) – Optional fill color (Tuple for RGB Image And int for grayscale) for the area +outside the transform in the output image.(Pillow>=5.0.0)

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomApply(transforms, p=0.5)[source]
    +

    Apply randomly a list of transformations with a given probability

    +
    +
    Parameters
    +
      +
    • transforms (list or tuple) – list of transformations

    • +
    • p (float) – probability

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomChoice(transforms)[source]
    +

    Apply single transformation randomly picked from a list

    +
    + +
    +
    +class torchvision.transforms.RandomCrop(size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant')[source]
    +

    Crop the given PIL Image at a random location.

    +
    +
    Parameters
    +
      +
    • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made.

    • +
    • padding (int or sequence, optional) – Optional padding on each border +of the image. Default is None, i.e no padding. If a sequence of length +4 is provided, it is used to pad left, top, right, bottom borders +respectively. If a sequence of length 2 is provided, it is used to +pad left/right, top/bottom borders, respectively.

    • +
    • pad_if_needed (boolean) – It will pad the image if smaller than the +desired size to avoid raising an exception. Since cropping is done +after padding, the padding seems to be done at a random offset.

    • +
    • fill – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant

    • +
    • padding_mode

      Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.

      +
        +
      • constant: pads with a constant value, this value is specified with fill

      • +
      • edge: pads with the last value on the edge of the image

      • +
      • reflect: pads with reflection of image (without repeating the last value on the edge)

        +
        +

        padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]

        +
        +
      • +
      • symmetric: pads with reflection of image (repeating the last value on the edge)

        +
        +

        padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]

        +
        +
      • +
      +

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomGrayscale(p=0.1)[source]
    +

    Randomly convert image to grayscale with a probability of p (default 0.1).

    +
    +
    Parameters
    +

    p (float) – probability that image should be converted to grayscale.

    +
    +
    Returns
    +

    Grayscale version of the input image with probability p and unchanged +with probability (1-p). +- If input image is 1 channel: grayscale version is 1 channel +- If input image is 3 channel: grayscale version is 3 channel with r == g == b

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomHorizontalFlip(p=0.5)[source]
    +

    Horizontally flip the given PIL Image randomly with a given probability.

    +
    +
    Parameters
    +

    p (float) – probability of the image being flipped. Default value is 0.5

    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomOrder(transforms)[source]
    +

    Apply a list of transformations in a random order

    +
    + +
    +
    +class torchvision.transforms.RandomPerspective(distortion_scale=0.5, p=0.5, interpolation=3)[source]
    +

    Performs Perspective transformation of the given PIL Image randomly with a given probability.

    +
    +
    Parameters
    +
      +
    • interpolation – Default- Image.BICUBIC

    • +
    • p (float) – probability of the image being perspectively transformed. Default value is 0.5

    • +
    • distortion_scale (float) – it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=2)[source]
    +

    Crop the given PIL Image to random size and aspect ratio.

    +

    A crop of random size (default: of 0.08 to 1.0) of the original size and a random +aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop +is finally resized to given size. +This is popularly used to train the Inception networks.

    +
    +
    Parameters
    +
      +
    • size – expected output size of each edge

    • +
    • scale – range of size of the origin size cropped

    • +
    • ratio – range of aspect ratio of the origin aspect ratio cropped

    • +
    • interpolation – Default: PIL.Image.BILINEAR

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomRotation(degrees, resample=False, expand=False, center=None)[source]
    +

    Rotate the image by angle.

    +
    +
    Parameters
    +
      +
    • degrees (sequence or float or int) – Range of degrees to select from. +If degrees is a number instead of sequence like (min, max), the range of degrees +will be (-degrees, +degrees).

    • +
    • resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST.

    • +
    • expand (bool, optional) – Optional expansion flag. +If true, expands the output to make it large enough to hold the entire rotated image. +If false or omitted, make the output image the same size as the input image. +Note that the expand flag assumes rotation around the center and no translation.

    • +
    • center (2-tuple, optional) – Optional center of rotation. +Origin is the upper left corner. +Default is the center of the image.

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.RandomSizedCrop(*args, **kwargs)[source]
    +

    Note: This transform is deprecated in favor of RandomResizedCrop.

    +
    + +
    +
    +class torchvision.transforms.RandomVerticalFlip(p=0.5)[source]
    +

    Vertically flip the given PIL Image randomly with a given probability.

    +
    +
    Parameters
    +

    p (float) – probability of the image being flipped. Default value is 0.5

    +
    +
    +
    + +
    +
    +class torchvision.transforms.Resize(size, interpolation=2)[source]
    +

    Resize the input PIL Image to the given size.

    +
    +
    Parameters
    +
      +
    • size (sequence or int) – Desired output size. If size is a sequence like +(h, w), output size will be matched to this. If size is an int, +smaller edge of the image will be matched to this number. +i.e, if height > width, then image will be rescaled to +(size * height / width, size)

    • +
    • interpolation (int, optional) – Desired interpolation. Default is +PIL.Image.BILINEAR

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.Scale(*args, **kwargs)[source]
    +

    Note: This transform is deprecated in favor of Resize.

    +
    + +
    +
    +class torchvision.transforms.TenCrop(size, vertical_flip=False)[source]
    +

    Crop the given PIL Image into four corners and the central crop plus the flipped version of +these (horizontal flipping is used by default)

    +
    +

    Note

    +

    This transform returns a tuple of images and there may be a mismatch in the number of +inputs and targets your Dataset returns. See below for an example of how to deal with +this.

    +
    +
    +
    Parameters
    +
      +
    • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made.

    • +
    • vertical_flip (bool) – Use vertical flipping instead of horizontal

    • +
    +
    +
    +

    Example

    +
    >>> transform = Compose([
    +>>>    TenCrop(size), # this is a list of PIL Images
    +>>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
    +>>> ])
    +>>> #In your test loop you can do the following:
    +>>> input, target = batch # input is a 5d tensor, target is 2d
    +>>> bs, ncrops, c, h, w = input.size()
    +>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
    +>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    +
    +
    +
    + +
    +
    +

    Transforms on torch.*Tensor

    +
    +
    +class torchvision.transforms.LinearTransformation(transformation_matrix, mean_vector)[source]
    +

    Transform a tensor image with a square transformation matrix and a mean_vector computed +offline. +Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and +subtract mean_vector from it which is then followed by computing the dot +product with the transformation matrix and then reshaping the tensor to its +original shape.

    +
    +
    Applications:

    whitening transformation: Suppose X is a column vector zero-centered data. +Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X), +perform SVD on this matrix and pass it as transformation_matrix.

    +
    +
    +
    +
    Parameters
    +
      +
    • transformation_matrix (Tensor) – tensor [D x D], D = C x H x W

    • +
    • mean_vector (Tensor) – tensor [D], D = C x H x W

    • +
    +
    +
    +
    + +
    +
    +class torchvision.transforms.Normalize(mean, std, inplace=False)[source]
    +

    Normalize a tensor image with mean and standard deviation. +Given mean: (M1,...,Mn) and std: (S1,..,Sn) for n channels, this transform +will normalize each channel of the input torch.*Tensor i.e. +input[channel] = (input[channel] - mean[channel]) / std[channel]

    +
    +

    Note

    +

    This transform acts out of place, i.e., it does not mutates the input tensor.

    +
    +
    +
    Parameters
    +
      +
    • mean (sequence) – Sequence of means for each channel.

    • +
    • std (sequence) – Sequence of standard deviations for each channel.

    • +
    • inplace (bool,optional) – Bool to make this operation in-place.

    • +
    +
    +
    +
    +
    +__call__(tensor)[source]
    +
    +
    Parameters
    +

    tensor (Tensor) – Tensor image of size (C, H, W) to be normalized.

    +
    +
    Returns
    +

    Normalized Tensor image.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    + +
    +
    +class torchvision.transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False)[source]
    +
    +
    Randomly selects a rectangle region in an image and erases its pixels.

    ‘Random Erasing Data Augmentation’ by Zhong et al. +See https://arxiv.org/pdf/1708.04896.pdf

    +
    +
    +
    +
    Parameters
    +
      +
    • p – probability that the random erasing operation will be performed.

    • +
    • scale – range of proportion of erased area against input image.

    • +
    • ratio – range of aspect ratio of erased area.

    • +
    • value – erasing value. Default is 0. If a single int, it is used to +erase all pixels. If a tuple of length 3, it is used to erase +R, G, B channels respectively. +If a str of ‘random’, erasing each pixel with random values.

    • +
    • inplace – boolean to make this transform inplace. Default set to False.

    • +
    +
    +
    Returns
    +

    Erased Image.

    +
    +
    +
    +
    # Examples:
    >>> transform = transforms.Compose([
    +>>> transforms.RandomHorizontalFlip(),
    +>>> transforms.ToTensor(),
    +>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    +>>> transforms.RandomErasing(),
    +>>> ])
    +
    +
    +
    +
    +
    + +
    +
    +

    Conversion Transforms

    +
    +
    +class torchvision.transforms.ToPILImage(mode=None)[source]
    +

    Convert a tensor or an ndarray to PIL Image.

    +

    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape +H x W x C to a PIL Image while preserving the value range.

    +
    +
    Parameters
    +

    mode (PIL.Image mode) –

    color space and pixel depth of input data (optional). +If mode is None (default) there are some assumptions made about the input data:

    +
    +
      +
    • If the input has 4 channels, the mode is assumed to be RGBA.

    • +
    • If the input has 3 channels, the mode is assumed to be RGB.

    • +
    • If the input has 2 channels, the mode is assumed to be LA.

    • +
    • If the input has 1 channel, the mode is determined by the data type (i.e int, float, +short).

    • +
    +
    +

    +
    +
    +
    +
    +__call__(pic)[source]
    +
    +
    Parameters
    +

    pic (Tensor or numpy.ndarray) – Image to be converted to PIL Image.

    +
    +
    Returns
    +

    Image converted to PIL Image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    + +
    +
    +class torchvision.transforms.ToTensor[source]
    +

    Convert a PIL Image or numpy.ndarray to tensor.

    +

    Converts a PIL Image or numpy.ndarray (H x W x C) in the range +[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] +if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) +or if the numpy.ndarray has dtype = np.uint8

    +

    In the other cases, tensors are returned without scaling.

    +
    +
    +__call__(pic)[source]
    +
    +
    Parameters
    +

    pic (PIL Image or numpy.ndarray) – Image to be converted to tensor.

    +
    +
    Returns
    +

    Converted image.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    + +
    +
    +

    Generic Transforms

    +
    +
    +class torchvision.transforms.Lambda(lambd)[source]
    +

    Apply a user-defined lambda as a transform.

    +
    +
    Parameters
    +

    lambd (function) – Lambda/function to be used for transform.

    +
    +
    +
    + +
    +
    +

    Functional Transforms

    +

    Functional transforms give you fine-grained control of the transformation pipeline. +As opposed to the transformations above, functional transforms don’t contain a random number +generator for their parameters. +That means you have to specify/generate all parameters, but you can reuse the functional transform. +For example, you can apply a functional transform to multiple images like this:

    +
    import torchvision.transforms.functional as TF
    +import random
    +
    +def my_segmentation_transforms(image, segmentation):
    +    if random.random() > 0.5:
    +        angle = random.randint(-30, 30)
    +        image = TF.rotate(image, angle)
    +        segmentation = TF.rotate(segmentation, angle)
    +    # more transforms ...
    +    return image, segmentation
    +
    +
    +
    +
    +torchvision.transforms.functional.adjust_brightness(img, brightness_factor)[source]
    +

    Adjust brightness of an Image.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be adjusted.

    • +
    • brightness_factor (float) – How much to adjust the brightness. Can be +any non negative number. 0 gives a black image, 1 gives the +original image while 2 increases the brightness by a factor of 2.

    • +
    +
    +
    Returns
    +

    Brightness adjusted image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.adjust_contrast(img, contrast_factor)[source]
    +

    Adjust contrast of an Image.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be adjusted.

    • +
    • contrast_factor (float) – How much to adjust the contrast. Can be any +non negative number. 0 gives a solid gray image, 1 gives the +original image while 2 increases the contrast by a factor of 2.

    • +
    +
    +
    Returns
    +

    Contrast adjusted image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.adjust_gamma(img, gamma, gain=1)[source]
    +

    Perform gamma correction on an image.

    +

    Also known as Power Law Transform. Intensities in RGB mode are adjusted +based on the following equation:

    +
    +\[I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} + +\]
    +

    See Gamma Correction for more details.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be adjusted.

    • +
    • gamma (float) – Non negative real number, same as \(\gamma\) in the equation. +gamma larger than 1 make the shadows darker, +while gamma smaller than 1 make dark regions lighter.

    • +
    • gain (float) – The constant multiplier.

    • +
    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.adjust_hue(img, hue_factor)[source]
    +

    Adjust hue of an image.

    +

    The image hue is adjusted by converting the image to HSV and +cyclically shifting the intensities in the hue channel (H). +The image is then converted back to original image mode.

    +

    hue_factor is the amount of shift in H channel and must be in the +interval [-0.5, 0.5].

    +

    See Hue for more details.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be adjusted.

    • +
    • hue_factor (float) – How much to shift the hue channel. Should be in +[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in +HSV space in positive and negative direction respectively. +0 means no shift. Therefore, both -0.5 and 0.5 will give an image +with complementary colors while 0 gives the original image.

    • +
    +
    +
    Returns
    +

    Hue adjusted image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.adjust_saturation(img, saturation_factor)[source]
    +

    Adjust color saturation of an image.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be adjusted.

    • +
    • saturation_factor (float) – How much to adjust the saturation. 0 will +give a black and white image, 1 will give the original image while +2 will enhance the saturation by a factor of 2.

    • +
    +
    +
    Returns
    +

    Saturation adjusted image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.affine(img, angle, translate, scale, shear, resample=0, fillcolor=None)[source]
    +

    Apply affine transformation on the image keeping image center invariant

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be rotated.

    • +
    • angle (float or int) – rotation angle in degrees between -180 and 180, clockwise direction.

    • +
    • translate (list or tuple of python:integers) – horizontal and vertical translations (post-rotation translation)

    • +
    • scale (float) – overall scale

    • +
    • shear (float or tuple or list) – shear angle value in degrees between -180 to 180, clockwise direction.

    • +
    • a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while (If) –

    • +
    • second value corresponds to a shear parallel to the y axis. (the) –

    • +
    • resample (PIL.Image.NEAREST or PIL.Image.BILINEAR or PIL.Image.BICUBIC, optional) – An optional resampling filter. +See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST.

    • +
    • fillcolor (int) – Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)

    • +
    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.crop(img, i, j, h, w)[source]
    +

    Crop the given PIL Image.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – Image to be cropped.

    • +
    • i (int) – i in (i,j) i.e coordinates of the upper left corner.

    • +
    • j (int) – j in (i,j) i.e coordinates of the upper left corner.

    • +
    • h (int) – Height of the cropped image.

    • +
    • w (int) – Width of the cropped image.

    • +
    +
    +
    Returns
    +

    Cropped image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.erase(img, i, j, h, w, v, inplace=False)[source]
    +

    Erase the input Tensor Image with given value.

    +
    +
    Parameters
    +
      +
    • img (Tensor Image) – Tensor image of size (C, H, W) to be erased

    • +
    • i (int) – i in (i,j) i.e coordinates of the upper left corner.

    • +
    • j (int) – j in (i,j) i.e coordinates of the upper left corner.

    • +
    • h (int) – Height of the erased region.

    • +
    • w (int) – Width of the erased region.

    • +
    • v – Erasing value.

    • +
    • inplace (bool, optional) – For in-place operations. By default is set False.

    • +
    +
    +
    Returns
    +

    Erased image.

    +
    +
    Return type
    +

    Tensor Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.five_crop(img, size)[source]
    +

    Crop the given PIL Image into four corners and the central crop.

    +
    +

    Note

    +

    This transform returns a tuple of images and there may be a +mismatch in the number of inputs and targets your Dataset returns.

    +
    +
    +
    Parameters
    +

    size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made.

    +
    +
    Returns
    +

    +
    tuple (tl, tr, bl, br, center)

    Corresponding top left, top right, bottom left, bottom right and center crop.

    +
    +
    +

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.hflip(img)[source]
    +

    Horizontally flip the given PIL Image.

    +
    +
    Parameters
    +

    img (PIL Image) – Image to be flipped.

    +
    +
    Returns
    +

    Horizontall flipped image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.normalize(tensor, mean, std, inplace=False)[source]
    +

    Normalize a tensor image with mean and standard deviation.

    +
    +

    Note

    +

    This transform acts out of place by default, i.e., it does not mutates the input tensor.

    +
    +

    See Normalize for more details.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor) – Tensor image of size (C, H, W) to be normalized.

    • +
    • mean (sequence) – Sequence of means for each channel.

    • +
    • std (sequence) – Sequence of standard deviations for each channel.

    • +
    • inplace (bool,optional) – Bool to make this operation inplace.

    • +
    +
    +
    Returns
    +

    Normalized Tensor image.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.pad(img, padding, fill=0, padding_mode='constant')[source]
    +

    Pad the given PIL Image on all sides with specified padding mode and fill value.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – Image to be padded.

    • +
    • padding (int or tuple) – Padding on each border. If a single int is provided this +is used to pad all borders. If tuple of length 2 is provided this is the padding +on left/right and top/bottom respectively. If a tuple of length 4 is provided +this is the padding for the left, top, right and bottom borders +respectively.

    • +
    • fill – Pixel fill value for constant fill. Default is 0. If a tuple of +length 3, it is used to fill R, G, B channels respectively. +This value is only used when the padding_mode is constant

    • +
    • padding_mode

      Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.

      +
        +
      • constant: pads with a constant value, this value is specified with fill

      • +
      • edge: pads with the last value on the edge of the image

      • +
      • reflect: pads with reflection of image (without repeating the last value on the edge)

        +
        +

        padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode +will result in [3, 2, 1, 2, 3, 4, 3, 2]

        +
        +
      • +
      • symmetric: pads with reflection of image (repeating the last value on the edge)

        +
        +

        padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode +will result in [2, 1, 1, 2, 3, 4, 4, 3]

        +
        +
      • +
      +

    • +
    +
    +
    Returns
    +

    Padded image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.perspective(img, startpoints, endpoints, interpolation=3)[source]
    +

    Perform perspective transform of the given PIL Image.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – Image to be transformed.

    • +
    • startpoints – List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image

    • +
    • endpoints – List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image

    • +
    • interpolation – Default- Image.BICUBIC

    • +
    +
    +
    Returns
    +

    Perspectively transformed Image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.resize(img, size, interpolation=2)[source]
    +

    Resize the input PIL Image to the given size.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – Image to be resized.

    • +
    • size (sequence or int) – Desired output size. If size is a sequence like +(h, w), the output size will be matched to this. If size is an int, +the smaller edge of the image will be matched to this number maintaing +the aspect ratio. i.e, if height > width, then image will be rescaled to +\(\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)\)

    • +
    • interpolation (int, optional) – Desired interpolation. Default is +PIL.Image.BILINEAR

    • +
    +
    +
    Returns
    +

    Resized image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.resized_crop(img, i, j, h, w, size, interpolation=2)[source]
    +

    Crop the given PIL Image and resize it to desired size.

    +

    Notably used in RandomResizedCrop.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – Image to be cropped.

    • +
    • i (int) – i in (i,j) i.e coordinates of the upper left corner

    • +
    • j (int) – j in (i,j) i.e coordinates of the upper left corner

    • +
    • h (int) – Height of the cropped image.

    • +
    • w (int) – Width of the cropped image.

    • +
    • size (sequence or int) – Desired output size. Same semantics as resize.

    • +
    • interpolation (int, optional) – Desired interpolation. Default is +PIL.Image.BILINEAR.

    • +
    +
    +
    Returns
    +

    Cropped image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.rotate(img, angle, resample=False, expand=False, center=None)[source]
    +

    Rotate the image by angle.

    +
    +
    Parameters
    +
      +
    • img (PIL Image) – PIL Image to be rotated.

    • +
    • angle (float or int) – In degrees degrees counter clockwise order.

    • +
    • resample (PIL.Image.NEAREST or PIL.Image.BILINEAR or PIL.Image.BICUBIC, optional) – An optional resampling filter. See filters for more information. +If omitted, or if the image has mode “1” or “P”, it is set to PIL.Image.NEAREST.

    • +
    • expand (bool, optional) – Optional expansion flag. +If true, expands the output image to make it large enough to hold the entire rotated image. +If false or omitted, make the output image the same size as the input image. +Note that the expand flag assumes rotation around the center and no translation.

    • +
    • center (2-tuple, optional) – Optional center of rotation. +Origin is the upper left corner. +Default is the center of the image.

    • +
    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.ten_crop(img, size, vertical_flip=False)[source]
    +
    +
    Crop the given PIL Image into four corners and the central crop plus the

    flipped version of these (horizontal flipping is used by default).

    +
    +
    +
    +

    Note

    +

    This transform returns a tuple of images and there may be a +mismatch in the number of inputs and targets your Dataset returns.

    +
    +
    +
    Parameters
    +
      +
    • size (sequence or int) – Desired output size of the crop. If size is an +int instead of sequence like (h, w), a square crop (size, size) is +made.

    • +
    • vertical_flip (bool) – Use vertical flipping instead of horizontal

    • +
    +
    +
    Returns
    +

    +
    tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)

    Corresponding top left, top right, bottom left, bottom right and center crop +and same for the flipped image.

    +
    +
    +

    +
    +
    Return type
    +

    tuple

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.to_grayscale(img, num_output_channels=1)[source]
    +

    Convert image to grayscale version of image.

    +
    +
    Parameters
    +

    img (PIL Image) – Image to be converted to grayscale.

    +
    +
    Returns
    +

    +
    Grayscale version of the image.

    if num_output_channels = 1 : returned image is single channel

    +

    if num_output_channels = 3 : returned image is 3 channel with r = g = b

    +
    +
    +

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.to_pil_image(pic, mode=None)[source]
    +

    Convert a tensor or an ndarray to PIL Image.

    +

    See ToPILImage for more details.

    +
    +
    Parameters
    +
    +
    +
    +
    +
    Returns
    +

    Image converted to PIL Image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.to_tensor(pic)[source]
    +

    Convert a PIL Image or numpy.ndarray to tensor.

    +

    See ToTensor for more details.

    +
    +
    Parameters
    +

    pic (PIL Image or numpy.ndarray) – Image to be converted to tensor.

    +
    +
    Returns
    +

    Converted image.

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torchvision.transforms.functional.vflip(img)[source]
    +

    Vertically flip the given PIL Image.

    +
    +
    Parameters
    +

    img (PIL Image) – Image to be flipped.

    +
    +
    Returns
    +

    Vertically flipped image.

    +
    +
    Return type
    +

    PIL Image

    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/utils.html b/docs/stable/torchvision/utils.html new file mode 100644 index 000000000000..fda029d0a6ff --- /dev/null +++ b/docs/stable/torchvision/utils.html @@ -0,0 +1,571 @@ + + + + + + + + + + + + torchvision.utils — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision.utils

    +
    +
    +torchvision.utils.make_grid(tensor, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0)[source]
    +

    Make a grid of images.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor or list) – 4D mini-batch Tensor of shape (B x C x H x W) +or a list of images all of the same size.

    • +
    • nrow (int, optional) – Number of images displayed in each row of the grid. +The final grid size is (B / nrow, nrow). Default: 8.

    • +
    • padding (int, optional) – amount of padding. Default: 2.

    • +
    • normalize (bool, optional) – If True, shift the image to the range (0, 1), +by the min and max values specified by range. Default: False.

    • +
    • range (tuple, optional) – tuple (min, max) where min and max are numbers, +then these numbers are used to normalize the image. By default, min and max +are computed from the tensor.

    • +
    • scale_each (bool, optional) – If True, scale each image in the batch of +images separately rather than the (min, max) over all images. Default: False.

    • +
    • pad_value (float, optional) – Value for the padded pixels. Default: 0.

    • +
    +
    +
    +

    Example

    +

    See this notebook here

    +
    + +
    +
    +torchvision.utils.save_image(tensor, filename, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0)[source]
    +

    Save a given Tensor into an image file.

    +
    +
    Parameters
    +
      +
    • tensor (Tensor or list) – Image to be saved. If given a mini-batch tensor, +saves the tensor as a grid of images by calling make_grid.

    • +
    • **kwargs – Other arguments are documented in make_grid.

    • +
    +
    +
    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/type_info.html b/docs/stable/type_info.html new file mode 100644 index 000000000000..f68e32742160 --- /dev/null +++ b/docs/stable/type_info.html @@ -0,0 +1,623 @@ + + + + + + + + + + + + Type Info — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    Type Info

    +

    The numerical properties of a torch.dtype can be accessed through either the torch.finfo or the torch.iinfo.

    +
    +

    torch.finfo

    +
    +
    +class torch.finfo
    +
    + +

    A torch.finfo is an object that represents the numerical properties of a floating point +torch.dtype, (i.e. torch.float32, torch.float64, and torch.float16). This is similar to numpy.finfo.

    +

    A torch.finfo provides the following attributes:

    + +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Name

    Type

    Description

    bits

    int

    The number of bits occupied by the type.

    eps

    float

    The smallest representable number such that 1.0 + eps != 1.0.

    max

    float

    The largest representable number.

    min

    float

    The smallest representable number (typically -max).

    tiny

    float

    The smallest positive representable number.

    +
    +

    Note

    +

    The constructor of torch.finfo can be called without argument, in which case the class is created for the pytorch default dtype (as returned by torch.get_default_dtype()).

    +
    +
    +
    +

    torch.iinfo

    +
    +
    +class torch.iinfo
    +
    + +

    A torch.iinfo is an object that represents the numerical properties of a integer +torch.dtype (i.e. torch.uint8, torch.int8, torch.int16, torch.int32, and torch.int64). This is similar to numpy.iinfo.

    +

    A torch.iinfo provides the following attributes:

    + +++++ + + + + + + + + + + + + + + + + + + + + +

    Name

    Type

    Description

    bits

    int

    The number of bits occupied by the type.

    max

    int

    The largest representable number.

    min

    int

    The smallest representable number.

    +
    +
    + + +
    + +
    + + +
    +
    + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file From 01f3fd32c6b12ad54bc074ac6e68065f09d74c9b Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Thu, 1 Aug 2019 20:20:00 +0000 Subject: [PATCH 04/12] auto-generating sphinx docs --- docs/stable/_sources/data.rst.txt | 7 ++++--- docs/stable/data.html | 7 ++++--- docs/stable/objects.inv | Bin 11478 -> 11478 bytes docs/stable/searchindex.js | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/docs/stable/_sources/data.rst.txt b/docs/stable/_sources/data.rst.txt index e074943fe544..3bd5022c5843 100644 --- a/docs/stable/_sources/data.rst.txt +++ b/docs/stable/_sources/data.rst.txt @@ -172,9 +172,10 @@ better to not use automatic batching (where :attr:`collate_fn` is used to collate the samples), but let the data loader directly return each member of the :attr:`dataset` object. -When both :attr:`batch_size` and :attr:`batch_sampler` are ``None``, automatic -batching is disabled. Each sample obtained from the :attr:`dataset` is -processed with the function passed as the :attr:`collate_fn` argument. +When both :attr:`batch_size` and :attr:`batch_sampler` are ``None`` (default +value for :attr:`batch_sampler` is already ``None``), automatic batching is +disabled. Each sample obtained from the :attr:`dataset` is processed with the +function passed as the :attr:`collate_fn` argument. **When automatic batching is disabled**, the default :attr:`collate_fn` simply converts NumPy arrays into PyTorch Tensors, and keeps everything else untouched. diff --git a/docs/stable/data.html b/docs/stable/data.html index fcda05deba1a..57ab98b5e25e 100644 --- a/docs/stable/data.html +++ b/docs/stable/data.html @@ -413,9 +413,10 @@

    Disable automatic batchingcollate_fn is used to collate the samples), but let the data loader directly return each member of the dataset object.

    -

    When both batch_size and batch_sampler are None, automatic -batching is disabled. Each sample obtained from the dataset is -processed with the function passed as the collate_fn argument.

    +

    When both batch_size and batch_sampler are None (default +value for batch_sampler is already None), automatic batching is +disabled. Each sample obtained from the dataset is processed with the +function passed as the collate_fn argument.

    When automatic batching is disabled, the default collate_fn simply converts NumPy arrays into PyTorch Tensors, and keeps everything else untouched.

    In this case, loading from a map-style dataset is roughly equivalent with:

    diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 2b1eff0f3d26d62cc9ebe3e40d9b3f03cbf61b87..ff0260ee16298bb3ffa9c711fa936650760f69bb 100644 GIT binary patch delta 18 ZcmcZ>c`b5+H@kVFd2(8E(#D_`9RNzh2T=e3 delta 18 ZcmcZ>c`b5+H+xc&Nm`nL!N#B#9RNyq2N(bV diff --git a/docs/stable/searchindex.js b/docs/stable/searchindex.js index 5ba2bc0613b6..d150e5032aa9 100644 --- a/docs/stable/searchindex.js +++ b/docs/stable/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file +Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file From bbe3c71f23aca26ee31c6bac1bcf872735caa8b4 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Fri, 2 Aug 2019 00:20:15 +0000 Subject: [PATCH 05/12] auto-generating sphinx docs --- docs/stable/_modules/torch/jit.html | 43 ++++++++++++++++------------ docs/stable/objects.inv | Bin 11478 -> 11478 bytes 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/docs/stable/_modules/torch/jit.html b/docs/stable/_modules/torch/jit.html index 3b4db4b09840..a790d2bc70d6 100644 --- a/docs/stable/_modules/torch/jit.html +++ b/docs/stable/_modules/torch/jit.html @@ -1269,7 +1269,8 @@

    Source code for torch.jit

         # extract the necessary info from the closed over variables on the function
         # object
         rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
    -    return torch.jit.script(fn, _rcb=rcb)
    +    qualified_name = _qualified_name(fn)
    +    return _compile_function(fn, qualified_name=qualified_name, _frames_up=1, _rcb=rcb)
     
     
     @contextlib.contextmanager
    @@ -1322,6 +1323,26 @@ 

    Source code for torch.jit

         _add_script_class(obj, qualified_name)
     
     
    +def _compile_function(fn, qualified_name, _frames_up, _rcb=None):
    +    ast = get_jit_def(fn)
    +    if _rcb is None:
    +        closure_rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
    +        stack_rcb = _jit_internal.createResolutionCallback(_frames_up + 1)
    +
    +        def _rcb(name):
    +            # since type comments aren't captured in the function's closures,
    +            # we still need to try to the rcb based on stack frames if the
    +            # closure rcb fails
    +            result = closure_rcb(name)
    +            if result:
    +                return result
    +            return stack_rcb(name)
    +    script_fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(fn))
    +    # Forward docstrings
    +    script_fn.__doc__ = fn.__doc__
    +    return script_fn
    +
    +
     
    [docs]def script(obj, optimize=None, _frames_up=0, _rcb=None): r""" Scripting a function or ``nn.Module`` will inspect the source code, compile @@ -1395,9 +1416,11 @@

    Source code for torch.jit

         """
         if not _enabled:
             return obj
    +
         if optimize is not None:
             warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead")
     
    +    torch._C._clear_compilation_stack_DELETEME()
         if isinstance(obj, torch.nn.Module):
             return _convert_to_script_module(obj)
     
    @@ -1418,23 +1441,7 @@ 

    Source code for torch.jit

             _compile_and_register_class(obj, _rcb, qualified_name)
             return obj
         else:
    -        ast = get_jit_def(obj)
    -        if _rcb is None:
    -            closure_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
    -            stack_rcb = _jit_internal.createResolutionCallback(_frames_up + 1)
    -
    -            def _rcb(name):
    -                # since type comments aren't captured in the function's closures,
    -                # we still need to try to the rcb based on stack frames if the
    -                # closure rcb fails
    -                result = closure_rcb(name)
    -                if result:
    -                    return result
    -                return stack_rcb(name)
    -        fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(obj))
    -        # Forward docstrings
    -        fn.__doc__ = obj.__doc__
    -        return fn
    + return _compile_function(fn=obj, qualified_name=qualified_name, _frames_up=_frames_up + 1, _rcb=_rcb)
    ScriptMethodStub = namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method')) diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index ff0260ee16298bb3ffa9c711fa936650760f69bb..79583e4e9175dad27d590ace0bdf02a5ad0c0ec5 100644 GIT binary patch delta 18 ZcmcZ>c`b5+H@iuisga?f*~Xw29RNkY28{p! delta 18 ZcmcZ>c`b5+H@kVFd2(8E(#D_`9RNzh2T=e3 From 78783bdbab47fc83bd3c355dba5011fd4ba86f63 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Fri, 2 Aug 2019 05:54:49 +0000 Subject: [PATCH 06/12] auto-generating sphinx docs --- docs/stable/_modules/torch/hub.html | 7 ++- docs/stable/_modules/torch/nn/functional.html | 26 ++++++----- docs/stable/nn.functional.html | 26 ++++++----- docs/stable/objects.inv | Bin 11478 -> 11478 bytes docs/stable/searchindex.js | 2 +- docs/stable/torch.html | 41 +++++++++++++++--- 6 files changed, 70 insertions(+), 32 deletions(-) diff --git a/docs/stable/_modules/torch/hub.html b/docs/stable/_modules/torch/hub.html index 9659dfe8538f..c2854f9713d8 100644 --- a/docs/stable/_modules/torch/hub.html +++ b/docs/stable/_modules/torch/hub.html @@ -641,7 +641,12 @@

    Source code for torch.hub

         if content_length is not None and len(content_length) > 0:
             file_size = int(content_length[0])
     
    -    f = tempfile.NamedTemporaryFile(delete=False)
    +    # We deliberately save it in a temp file and move it after
    +    # download is complete. This prevents a local working checkpoint
    +    # being overriden by a broken download.
    +    dst_dir = os.path.dirname(dst)
    +    f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
    +
         try:
             if hash_prefix is not None:
                 sha256 = hashlib.sha256()
    diff --git a/docs/stable/_modules/torch/nn/functional.html b/docs/stable/_modules/torch/nn/functional.html
    index 1b1178548399..83c0519051c4 100644
    --- a/docs/stable/_modules/torch/nn/functional.html
    +++ b/docs/stable/_modules/torch/nn/functional.html
    @@ -2625,12 +2625,13 @@ 

    Source code for torch.nn.functional

                 ``'trilinear'``. Default: ``'nearest'``
             align_corners (bool, optional): Geometrically, we consider the pixels of the
                 input and output as squares rather than points.
    -            If set to ``False``, the input and output tensors are aligned by the
    -            center points of their corner pixels. If set to ``True``, the input and
    -            output tensors are aligned by the corner points of their corner
    -            pixels, and the interpolation uses edge value padding for out-of-boundary values.
    -            This only has effect when :attr:`mode` is ``'linear'``,
    -            ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
    +            If set to ``True``, the input and output tensors are aligned by the
    +            center points of their corner pixels, preserving the values at the corner pixels.
    +            If set to ``False``, the input and output tensors are aligned by the corner
    +            points of their corner pixels, and the interpolation uses edge value padding
    +            for out-of-boundary values, making this operation *independent* of input size
    +            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
    +            is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
                 Default: ``False``
     
         .. note::
    @@ -2678,12 +2679,13 @@ 

    Source code for torch.nn.functional

                 ``'trilinear'`` | ``'area'``. Default: ``'nearest'``
             align_corners (bool, optional): Geometrically, we consider the pixels of the
                 input and output as squares rather than points.
    -            If set to ``False``, the input and output tensors are aligned by the
    -            center points of their corner pixels. If set to ``True``, the input and
    -            output tensors are aligned by the corner points of their corner
    -            pixels, and the interpolation uses edge value padding for out-of-boundary values.
    -            This only has effect when :attr:`mode` is ``'linear'``,
    -            ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
    +            If set to ``True``, the input and output tensors are aligned by the
    +            center points of their corner pixels, preserving the values at the corner pixels.
    +            If set to ``False``, the input and output tensors are aligned by the corner
    +            points of their corner pixels, and the interpolation uses edge value padding
    +            for out-of-boundary values, making this operation *independent* of input size
    +            when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
    +            is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
                 Default: ``False``
     
         .. note::
    diff --git a/docs/stable/nn.functional.html b/docs/stable/nn.functional.html
    index 1eaf31f530c0..d41d74514432 100644
    --- a/docs/stable/nn.functional.html
    +++ b/docs/stable/nn.functional.html
    @@ -2299,12 +2299,13 @@ 

    interpolate'trilinear' | 'area'. Default: 'nearest'

    5Idab@*6Y^B;*Hf(ou9t0(f4ACm*J`Y<^Ji3Gvfz< zKZX*NG8Ek?Qwqk?{#|e0ckNZHEN7I)fBcmqZ)%#&2V{I!#PJEY0bo$d;97p$a?6SX ztB8Wxt@n4wqVl~ftyDDi;V=;iPLs?}L%w!ju6lVY+2Cy<&5Z2q6` z$KVDhW<57W?9;U zI%Rg9H`cF3+G#)*8t<5ID-=zj9igl!uN=Ap5S;Be3tF!0WTC|F30~8R7H2CU%HO%0 zw7(FDmTOPCs0YgM{=Zd}AeP?S{AgN*O7FiZyXtVRuETmT2t_W8*NfJ;jEiD^o!%DG z)P)>ZuRP_p5G-o~WC38_uwotkGD63n<5f;n`zu0Ion$zwni%@V2b(z|6b8Hx08dm@NtZ zvMur$i0KudKZbw$XO(l7FFq%d>p+%kAj*&8v-E$(Z z(!bCh*Ti>wyFD0`EeB(`({jDizZ}QeGcDt`75jF$@vft1;HvAW)K_v*J^N-xuNxII zY5AuXk=61PlHTK{Kp39jh z1g0hbI0#X84^+fyCJ%}(%XNkwPnReE<{cCVNV2_$1D(_*yb|+ep6SJ|u9_FdZS%YN zBer|Uavuq&fnrU6$n~Le+<>d928PshBc5EvFEqwdD_N#-jtZt|Ti){H=wH z-4@a)*UyzzFDuvWZHlq>sNRW<#fM3odBKJ$#~t78XWaZAWY)a#FCqqa!za)W z@gqNE+GJYb_RBBTFT2BEdpk`Z-GFx;Ib+RUN5#Gpi&~l1oee98Egg4N<=S7Zi`j?q zUwWqa{MNrHR=B>-5Rt2!a9duxl$Wj!jh}?{42mJ~u>=4NHe}uT_JBc{w?UwkgX6TU z9D}nML_xqHfi((+kNmKtwoJ->10=EpXqT@oR2Cv(C6*;epdKR0d*M8Y;_tHB@U~LPliUhZd2+r@wSkR=M2gpJboU8!3~N!uMN~_+N>0m>nhFyNfB(K^f}y)-X;?#3w?+; zUassK2z#!rT(tW_p8OBs0~gN0wyq+_eAl{hc)CAFzb-d?4Die{QcwDl`3-}Jwj<4A zZMS0LI60npnRnwCVh}8oa^AfyA&2SDlgvZft|0o;QdsA(#6v$?xNX4cOHXt!E0~`8 zr^h497jN6Zzg`WPFqB+0)U~=Z;{_86ZfXIdtmEC)mfgs+=z(VoEXVLR|F=+@SC`Z6!MpuH zif-Ob8@`Hhx^Ua|)}}?A>kR5>be)y@N-VCK(MswGllEa()!YgFk-e8Qc+^cNMF*XWHUSGz`Y`yPs}HPu_;F+8uH((RKOceqUfN-l=O# zrfYG^5J(#D4>+h^>b;|0E@PZ8H=9th{1 zpFbqyYx|t0d3opa+2{18yGsa)&pmYv!N+BA-ZEB5yTrP*f0LtsT88_!3|p?57<-H0 zrC6@XDfPt%vAC^+NzC8+o^nl{OW{=rWaUX0}$K7rS`eTj(=MuUUeo&#)oMR|7p zyRLm}lefu5S(oFv;`HKu_g(btN4X6ii~K(CS!ugHC`*hVlFm{4WF!p<7|GA#+e{Ga z>*D>*h}Z5jw9K_z$g8SkKH$K|1Z<)89ms8AO2OcH-QR3j^Tj`qg;1S-A)s?v!Lgu? z`0iAVpE^<%4l|x!#xm zCf9^m*x?b~4Z;9NpDCJ!+Xzg{^FeuVvHYp@$3L!dS`$1DjQ5R2Z2~nSD|rCl zZU8*tC~y6IN|{`)u5kR^<_=%z>AX7*@3HJ{CUDxcliScV97cMKiI%;*$1vWnQ&6qk zfGeT~%#cM1@a1z2NWQe)ik}Q**F>5e~#?;_!K>Z z>)!7RJ)?5%{no2TOI-T&x7M=Sa(((#{u<*P?Px{Xc_r3jlWeDB z<@!_qxum5#{TBpR?@c;q19*IotDX4Aytuvb9|V?tgTnj#YbN@}0B5WZvAmcc%~#pC z;>UbaC&mRQh80TJ<-bRx>$31yR-r4?TCreH9)Qq+S=}kUzpI4mUxLdD=exf*`;L9) z_nXjehvIl!NK{UZYyr}|EB)GGu;lasLW;0&|G|`W?Nhe4f8YmF;^+Okl!@Z8YzA$% zlq^*!xMSxIEMaMjsx8-SIk*@gKZXkt22qH#m*JwI%=M~7UiEdQ%mbmsuf+z{4iY!KUj7>Jr=sV=M7QB6az2kjlg>F+F zAGlT#IdQmH`)9nhhBRY6Sm7R&7q`hBcNGk;9&Fw+C`R?K5!c_^kGIga5XuzumSZ{t zm5l3l9$tSkeNoL%4TE*y;^jJ}-kNkea2XThxafEtsMnL1YUKu85jEgG6BP?GmMtC% zY?SNW{n>@-t%$$nThpwTEk`)e;w#tsvCA8Ayfoisxo&q<-mBkqqc~N<`QsvTqh$Y& ze>-{Mz|mh;+n_ z?uFtImapQgNbKb_AW%Gj;+GXiJ=w?n(4+m630ub;_hO$h-%+?NM6apO4#Ca7<3_oj zEL3b|ZuU`k5O><&_g7NFTIkh`irQGN`2;Y^x_K{e3kmdcdDN(Jz*(h(aGwb8)LoR2ogg z&qz0?dl~*rJGOxhGd|Nn*yXq_B3YpvP8XJ{xzE3fGeX0DnlX!QBJJXum=!E-gtY-=3WWU2r$0y z4HoWM?QdQA{NpKo@#87I{ZB>Ao|`yc?r)UeUT`yGwHN8v&NSma;SNKgv~`mX_Jcny zh4)>}7pu~(b-B#_4a>~l?J0fr3p!-;kf{uQdY0`DY+=j+y4b-p1t&6?TrVWzMt2XR z(`7}Hz+f#?jrLnyvr<^;zWqB>dg4=H6o?AC{VwL|ft$F|9cj6aEFk@CirhHL(Z_Fw07}{({UQ@_v$}~;sakZ=rh1Y9 z8+Tm0Hm=I24>ZQUm9rfv2=Cwd!zEI*c=nNL@tM=} z(|1dwRFf}TuHE1IXw?_TZDb&6)O2hCjpuN{^LB%~w2BxZuFROOpBb-}{}}h8+?Af? z4K%zh#H(V2MR|Tv>C{MGA#i(7OV%KB*m}m*h1|};)rEVv=LpmBPHM-gq!xU*&ZvKA zdv$ixwgscP*_y+s#*T&r{7fG=Ws2|kHH7j@neZW3=i$}x_%KcvKSeydJ=THmb~KHc z5AYg)MUHOMx(=UVM7j=3esTwM~`?Hk{coe!06(3-`VKAl%!7SCbfmCm5;kaihDgp+?A;K4iVjP)z>x#JiDR~08%JT# zAs>A=cZBdvp#D|w#&SKV`~#wADly-EX~u+ZxUb|Iu;qF#t|F0Px2;z;+OncusK_Vb zaPE0os^?5A(``qr?=gP%GK1EqAIF8eg@r-987cyCKoNMsu5%wA^+SOJhbE`V7XZRF>Wg{4xnKn$!b_!;y2A{TO z2CVS2d0(^ezw)_q0MdBz>4S!G8$%3lD=MU4JJqZQ3GnkFw_aE#!5zkQL}*08;ZDiE zY=F-RN5NZs=l>hW@trq+w8Z{7rt)$z&_`nt0#T3z%XO}wzKKSt7gb04w(l>5^Fhap z%x61mG2hW0@$63?fVPk2g_+$GUe|#;uufy!ec58Pkk7H{4fr4GjB0L+FV*e*L(htJHvVWFa@`-;nbN); znd0;v_x31Xh;r>VX^a$=Of;ALDeyUC44x9LMWXx&)Woqz(p|cJ=F`>a;We z)i@1ah}YLYKxpGOk*+Jp4dwbuX>Ri>j2wh}T2;3{pvYkXMX}!bTrbyL@djiIw?3&M z^7ipSEr2yW+s9k(r24SGUxyG;Nx^$1+BD7jE>kvX5U-$nr;I;z~fy zi!-Ep`MP(Oec^xopGInz=60RT6K;`g$OGO@ecHd`QV0AzAK3Cbg6B7d8PpE;SuDyZgVsSlHj`^Aat{m+N@_P$c+3H?%Ocj39ck+sevfv_yBMfuHLPK6&f66)d(l4w66f=qR2)c0*VkPcyD7 zPA{b8^m5oG{+Tp+2Nt6r>$FB`@)PsbF1zWMRIc0quvT<^*1?<@;pow$eTAQzno85! zvvus)u|EIH@pAgKEW3h}Cr|3;vrzq-ZI8&0>(It^m6;ZLGMte`=G=+nIc)sat!bHN z-e;crTspU2m)Hx&jvP#=$kS4OPM?iPqA@~sbmF-75J~|XR^D|zf0~mJ&+g%6 zX?9r3w?UJFOjCo^n5@@hd$*^v5_VHJ^%P+0&+Y z?d5!poKl_D!DoiNL~HVtw#-y8UAHMTJuuHfJLc7qx}Fl=!^0ZLRUfmV?#^h!a&pT( zDgDGh%M+Wq6RM9H$z1A1`IudPw3OfV*+qMR-wCzrIo+i?)>a_EpQyJ6guFyDL)rqA z^DsT9ufZKlBcg?Zo$HFcs7=qDQQq~O9X*joRTr5zCJ&~QM~|g7+WXJ6>iD_oG&dj( zEJY2!2)ESdSWfr!=`(3m?J}ah`ta$w{kzhLuEU}YPbp92gZu$=L!+v*p)|Z!dnEtL zFJ-@3^*BE@Q<>_=i%za*5dOGlDZd;UO+LXS!LVR>i9dZB+t38I)-m~i4 zYqT0@J>-<+^_=)*^!%wDb9P*G#8X`m`@rm&@M>}+;?p#?Drq-&K67wvax$&eo@>%L zs`M1NPZtUkCpXe&f)70?$1ig zl$6RtM<#RU7+-fwV%Q%3(#O-Gx4kdp#lJO&4`g9;PWwy;m2o^wIHtx`4^k>oCQlsi zDJpYPst+k|!@9qD^w`lfGle!eoJRDaglQ>BGv|7P5PM^D`83##-MlSnu{F(p<*{_= zp>J%m#w3UQjF0z}@=;l;NA#nrX3o#0RrRF#5e<~*g*zq%(ED!BEn8hIO%27KgZf7B znKpT`a&6i@?4`IQ5CeZ4m+&P2OfJX|d7U{uRm!Yoz;naPQ?649o?M?xNuuJAcjx;8 zqGi-!DUZvPui5dbbY8>SI0La@YWd;(98wrOIjOdto*hXmX67<)%p(eR%RG>WC@^%;D)Hdg}-r+#%MH~;h)4$^zV`w<3IY> z#HewJ1O($61r?2oC{fu20RsZE&J43L``SHAukU|;_tmMsRbBO7z4z+9e%<}M^O>%y zTlb!O?)Tn0b?Uxbw+F(av|MW%~P$vz}^*~4WnEMc}y|jB%yeu5IAJqHAVYM|Qt`GgZraGfmAKZAqC(bT0&m&&|(`Y`|tu;hgt*)xE zS}y=~RHy4R|83uvnY%X0Ow;Yr+g!pK`dQUgYfqA``ib)5XPSXi#|bkyPSV6RTIR(bfj2qU=qBIT-8tBO-R_NckQR1M`R zy?U|YXHfr>*y?b)I;it0r<^YGy?>MGPd_OqtXf%=r)9zUg}iQC-Vf#L2&x>^F(3R4 ziWiOW*0+Xbkbx;Cs*dx#5s9DmQs(aM+O^9WBye|OhO!JCD>o-^me&@=6cznFvUQ^z zf4p*5BjiTXArH%XP19q{v0J!Ix5@zLF~VIzzz7SNELflnDh)8Es4G}unhe?%ZM3YY z!@x+<*e+eB`e7a?bkxwHeP#ZZ_bC)lZo`pre z(~kCPs$v;SR;?*==~8_VkPknTb}J(bXm`9I8xPuc2Zu0f-7AY1YrRk&@+0VtU3O~O z2$1Lh3|ovX{@tNmY>E{6cr3tiM}MrxNXzu;{_Q?i@B}_=v_0M3U3?IqK6|#zF`Ec} zuJ3uyG_Jo=Q=703GCsYu z+Emp2*Ec!2-7D8RnPcp!U88nQx$9Q1T?9xT-aDMInwp*`b)D@pK*Ix|G=^djx`WE^aY?R)2Wn-}tD_3ft@TZ2j zeyFw@0X7M%%HZCLp!bf$>Rc~cw5YlVbJMjbEwH_TY>mSM6CMnlPDq27+0nP~3p?5o z_Y!<82lo=cZ(4uy`##dNapR1$rYauIwk=lnw>!Sg+~7+F(_?)&ngf4&$mFKwsWjf?nf=C~3W9y57rNSgD13jWn53$|cKxGn7|21eGU067 zoOg{Lj1M-s#2NpD^-|slRzI7cT#POK_tf)>=W+ivd1*QP^ZtHX`cm;@^2hsEEVDgq zVQAWHb|}NkYwW{wF*#H8Fn(8@q8OT>+SXTLB=O$@I;?eC#IHK7c;<-*id&4*Ap{>k z2)|S|Tc6+&?6a@MBYo=DpTF%w@t>IOe93K~r2pbx{03kSi(POg i)|^p16WnMl z*_f@dzzZ-pUac&&%f9XJMw1yE7h{Eto=)HM(pF*^(*#EJ2ohPGtAn+vS8kfDy2NYw z#?tii%R!vQ+fCa(^piB#+s})A%j@>zC$(&h_Q7$-C>Hu4g6l^f57!RfG19~i5#@qk z9wPky7CDzDK=as*(Fx?AiAW$^EhPQ`7{8_?eWFxNgX`44R#N*`2|0dwwr|(f6Gi9R zY<8-Yc-M7uf=K8$e{nJEhTj(ZGHfKm!ezy6+Fx@%O)r-&URYY@3ghh>Ls|HbffWQe z*N1gf7NP7zMlo5*zg8D;v4;nn&V}~#`U75QtUeg0pr2S-{>#xx!hQYl{fYnk4Uxb8 zw5Bw9K_=^Wp$z}LzaJ*3ey5x`@UveH<9)`@8?yByt5<8Xl3XG_$^O8v7UWU~sP+1M$(*s4G$N!fKyy9SUE@+?gw0h`;hzP#g`Kw zxk0`$O)(}SkPwTvQ7T_P_G8H+r;SG>k_QusfUR>KS>6m4h&wxBpG>KpowvRUTZZ8J zWZae&PmTF&GAy4ZuoadzC4o|#;Ja`nhw4h04L9E{i5Yoa`M(3rR1~u z#G0OVjo}#o;Yj0J#dHUKvEGGp9I|K>mZu!7e)doV{u5`*E;7+gPRtDd9t<+v_Eoj}=qB z>1xsb%`I{+O@L;^$c$YJ zCWqSzMc~l#Q)KETOat^$64C`Od2m-Q1EF%5RT!kO^HResi1PL= z&#}QsbC3h8dXx^H3`=OTpx;U76o0Y7@_Sg~4|9R>Q@&llmdCaWm+Qca&UN;Gv9ynR zS(@Cof$nxcfZnD%IBegnOUgb`EG?Ij>)?I^E}?U+*Ea$lv|g_Fjk|s#vf?DwBS>33 zTiIW$gY}Qp!K9d2YsxTR(9cYsB@d$QFl6Q;Oh|aP!ku{j_;G?b=%+8simj(^S$w^X zC*El&^nPK;I8__R%x>-Wt zZ|Cjj=BFMh7J`>p%LR;H1XCSH+aW02x{7`H0#& z_VSF8=Vf2G+&kdzL?qCA;<@sRb-QG_4LVpQ;_V(kwsdJ*yB55zGD+N+Yr*P(E%bo>#20Y!!LXaYQWSv~>(TVOUgta|nSSaq<54UC%EbsN1+lN;)13Ct*( zym)+YZ255L?_2Upw(+8}%7Z_GfjH-D#D%B=DgYWc{h+N)Pv5s!W+qM>O zTg;_KXNFUX4=C82Ynv8=kqex+k!$;@5F>PE2<*cPrADqtJ*3f^+7l{t^KFA&Z-}=t zEbVA+Z!gHa=FDPTsd?9sX)nIm%P^j#sKGHs`~)fceo|wbk~CYiFAzEO*yN(R_R!IA zLu7h6C5lxsy_}l*;UU*&bd<;9ZFLQ-V1p2US*#cv{TpfTpiGlzoyA^iL{2x=#AAdw zm)`V6goaQ?@i6=wkN3I8M1*;`Gj#|=`lD7m-e2M*?7Z3CVCy~(<)^jteq6Zkbv(a( zU$*y?dBra@njG6gmuEb?So+;MDvMy(Zt-aR{+f=V2Hv&q+X0-nfuJyL6+6ZoOooGB z(CmTR&IR@?oZeVPG2v~|`hnT?G4+iLx4r>8tikeIbTKxti+W)4Y-{Ih+Qtg2MD|=A zd_j84V_WEBd~4Ee@@1xT-K=dIUVL^z*j5dPy>*UNLu@XY{}UseAvmJA0PsaT$TFuF zBNsN>U24v?6&K+XL^|C9BjK!#m)P|7ogI^57o=qkr-uB-QjFa~9_HsE*vl2gqvBta zi6ejCPm{Rs>0D91L}8<=h1h9Nyn7M}M^hSn$(bHtZfz6ZAxzm|Yeo`%$sghJ_hVVY zLw}B|@eIoj8r&bb?$)0W?Ys#=KFe!yY1??ZNPqJL$J3?BWBlYI5O4SRak<*swGz)4 z>1QQ`cwI1bKH!Mq%dtTy%+&eErGhWRr3)fVC&)YdlBk5VDIw8RjX#3Urnk3phf^e% zFEj>h&~BlvwP_)N&9!%#%+6Iq!Rip0H2tbiQA)XfVQp#s%C;uYbb;-~RGnof1~HaV z594VqPu8s`s+eU=M7ku*yT*L#FiW_@k?Ry{4ZhQ-Xu;?h0li@37oQmf#M=vJ{*rv2 zbM2;v;He!iHNwzfZD@0@A;%o@QqFZuBX?*QH*^T~(=&FRx%gnz!1PU$V$`AF@TPf0rWGEEEPr5iK}vtWe9q()V`M-tPdA@`vV=Otnht)OSzN40s{`6` zsTp@oqhAay8xq?)R*vsn+sJj2VABKV{Qab~7H6YWE{{cyIq*+#e=-vo(;s7fFV+@W0fYQcLS(d$(4LhfLqOE;nLp`Ak z#gf}$MAw)MOOtsU%O5)|D+YsG`T-ml{Je3#Wd%)*1m$|f)S%ETwX_S#UAMu-Yr2K! zG~Pxh^i$H#QN}s%!v+OgmP)?}=E9y}ySH=dvd*9OUmsOAQ0&fTHp9_DuTc`S)Wboe~J0M&gSigWkcq1%fg%$PYVfi2Et9p zGlQuiINvPJb(|acn~m`80Pb5BAJ(sm9cy3Qe~gRC^?y}^XiHl~F=1YmVKzOBJWFG` zS=knE$MLOy-tSZKbXkt$`F+`$@~Z&5fBAVWE{lvQl!nfAQ#Lj(+`!0NC$=r(ZC+jY z!kNCBgy7o*J6AlTQ}}9ddrVi@(yUWLf^v(Spv|(_Vq}PLj2uLy zW;=-Ij~{1~;!>IZ6=a!eNrN%|saEYKtuHQIbXr`0!8I5f9{-}Yk?W1YY7Vg%6Wz#l zf3LnMGP_t&ywnK84%xSUuNe2@HcOToNK?$!y;`*>yM?J?B`m@#8;CNl6(ui3mil zGrzNxtE2W+gy32!`_NoW9sKpm@%ymtLniFq>CX~3s9^CC8QwwsydB%tuICjxK=GIV z5`3mw2P~Q*cr2g~bT+;HkGQyZFJft{(cff;c2<88wGinG8fj_md?j1U3-;k)QU+%| z%x8rK{VeJ|+evTh$u=3@*@|}K`Qyix2(!LdNuDW|c0xMWTem(f58VHtoUnS8oOt4D zy;hPh&Lc;T$ZfaYDKmOz$TOdLp8lfYb=0P8XsZV;5A}rwsGM?Q^VN_Ud;%U5st@!ZfER4{~CM6&d21^oV6PJ)AKN`c$dNqfu7XsvOD=1o#i!aVGKOipsP$(>jEGBxZjS<$3vV-r6p%cRa6Ui^q@m zF{Ym4mvNu-cW{ix$c!|K)SqpS_DZG1oXp+Jyu?O;@}4%pf4%Rtqt8rzfme-PWO$l>p2mk>!gc7!X?%VgV|>J5LuGd{}FKA4i$K0XWk)dO+C7v&-q zjvx5-vN{LOL--XvKaags>&MxCm>&_Qznu)Wej2~*Sh76YHZF299Zy<&>i2QKucS7f z^)tOr5JDSe@x|!uf4zLBi#cN^yPRI}^LA`oljG}aWo1{iF)5+5>E`Q=b*Iy=@k%r^ z*_OxdRXC2HGk%t~lC3o>>R|V~)WIZAu%8dn#=zpeq>)`;SARc}DC>b&Sgo>PbMZ2* zeF-|N3R5WEgmSL?`}<|*j$QJi%bqXOr%#h}&OJ+3ov>0Kd+doZO?K$eVR_#3FP5&Z zPFb~Tg`9rIsj_9u(__*#SA^I3!;3G7vx@=o=-B>pM&0^SNIW2otqYB5A((ct;kU)N z-SIJ&aseMZz*^b<%WP(_%ux(w1UiN~XXsWK<;JLmy?}#}>v+JNHgoK3{&+l1wZ|Un zTpTcKAJfN|kGODDKLZ<+k;m5bfX`s+*5XMQrlC%5BU38Tig17HF@J@m<*F?TZ@mUx8wME zyOu59oOPrR9lZwI7t=MqH61M;#};Sx$GHLhR)fV`+G@6O2iH~#fvniMu>&C$6ll7l z4pxu2^PVWq8ZS6$dCNUmSa~j6Oh#fkB_jRo5xFJu@Kn_VCe*Z$`|i76&O86yNM$d) z@O=6D*RPkgYu7~L7aXe}Tk%3)U!TmG6U;&O;+tyMI={Q{;0+-Jk#;fIMTw~)_Jyn1 zoNJd24wN~hNcYQ|T(q3`g5sCGibs|GHK#0X6Dy3Lyyg_WYi0YOHk1 ziyv@YYK?28HD-RkO#5C&(<@WHv_zgfm&9u~w3#{AnGA7S8P&v_UY-s%y(dYWv4GAW z_+s&^p7CEw67zXCb(3L5V}5m8FcZI`pAE=9DRn(@)oB-h%M-)@{;z zuupn)?sYt1s>u5F>m$(z4jho9M~}*;O`9U|c^vHCt!qwe*tTt(96F>1(hD6sw@ZsU zu7e{T(xy&j>*0ekbYxIQ_V-G=&e02xryr2H(`Jc{SNCf5_XWB_E*>O~$e^b0mE+V9 zx9EJ=f_}jZm)7=^o+$%W*6YZ=J#t9(@0h+>S}+=^hJ2vV<@P&v>F9V6M!KfUP=Cyb z=(&zv+g#eAfr7LL4+ca1BeF=>uWr*%oF9DRx3XDB%DcLBK9>ftn6+(-=(76K+u9M< z(VYzbwC~?3t-(ie23qV+v8Dyu#j zIy$Ud;>~TJ#r%p+W5}%D??kzl?0(=JJ#(8*lf3eO)Iv|aNiNvw=sWza3uNVtB&2;PlB(AnyjR<2bB5Y!^*rz^*bD_ z5`ctOXWbpv>t}enzT8_y*s)`Wy@EH1MWJ( z^>VOdx@^-=ad&rjr#76sc7wBndQCz<*!6b(cEQNOLozh0cpbOz-6pO23lq%pNU$zF zu4~*M&7P(fQpJh5J&lWit;7Ay!qAZ+Xa8UmhSjDF>$J0uWoIbkVHw=KSqi>0teJ4_ zReJyJ+_5tppmAmN>Mx@RIq}w(IyzFc$$HZ7Czdw8wLUPSgJ|3K9a4KbEHh?|^=0Du;>Xr+5T9g!;rF1Q zx0M=vi3_dZc(U8mIEvC1C!Ttok6|bV2fdqU27P}1^V{_z-7IhX{nw7rjmvLS2gKbJupRD~){y}<$kSy+^=KVD;*JaY6W6dFx7c(F)vsM& zk}+8(w_AoZy*lQ)!bY3E#Mq(+W@zy$>Cp9y+siEhl~U&doGx8CVp^y2Y1ff)HHhud zMJ;-pcY`RX&d@O)T>S>y)amS!)*$_e{(Lv0tDnHRo~0|CpdNNJz+BzV!J>g|(|TAP zeYtJ@%BoN_Y|tewz%j)OttJC(S_clhD6@M+NHSo^(>%Q#=^<7M)h~#k9U= zP+QttrR|XRb#-2mw(XD(8t8SnaJu&EfQjmC*QL`J&%-WFvEJ5y&7Q_(*K!=Ur(7A(+-^g%dq z;GoRV3pXC{ba(aD{~^zK#_95=H@z-?Iy}ZoBgY+gTzHI~EnBv@wtg5;=L92mh=>2xq1GMyc*DF)o61_NN9()d#4zWydGlpjpevp{XV0E3 zb6VA*Lwc+&j1@vOv|n(k%v-QP=TR3gFc(?DpDrE2YuVN#vyZ`8vIf0C1?6J6r2k+s z4u7Gq>NF+{>L{Z+=rT+57qp%TSc>WRl_KkYBXjipY^`tTcp&XiaU^K^!sBInVtZ)W zodKA{^TlE?VvS!T!ZroZ7M-qu046ntS+iytm)?Jn&Ozsf_B2^iIdvX*W(Co zY{ds5mlkvNJ6ol1ruLWFc=!w0UWk;$wU<-? z`mx%sP$sS`OsjydxTdWfyLZ681iy^!)L(XRj|uH*lKd*;#@N%i9PmF9yo`=>upd~1 z8uul<1UE;It!}uV$9_4qYcDRg2z&6bEd8jUTHX_cHQo~3HBEc1{i>&hV)xwwrRH;3oG_{A^FD_?oJoFAK`9VTeX9W|CNs4+Ez3ImFO<@4aQ zBH**<_Oc?5lfJZQKeGi>ACOE=I=gs5yMrqJ#GOJ@H!dn>D8tj1-gh?fvRtqnUzh%L z8VO*aKO?$e^T#6zYB?A?ZKW1zF0Zv!M37ZFh>VTUM3UC?04$9=9*WX})xqX)x^%HR zB<1z%Wio8*r^ihfA?aN43{Hl7(S0x4-}TG(0-4`-k~;C#`?^X|nEHJ+h`} zx{3*uxXV^7tL_;v%KW0sULe=#i1f1MOJ-LiPeqHrV|@gMq!_sRbK2jt#+{>RNX z^Od^z;%CdsmCOC8=GqUhKU{!;YW_ulQE2^XO)$sUz0?wmO*BN^Sy{4^pfjkM4|?U=IFh%-B7jQ7CI{ zvM|OkJB;K`IRcZ~o<{mamQG}}0>fKL-?yxx(GDYuJJz2~rfrk1k62dkVv6j#(P>=I zgF2FmcLKt%1y)kmF@@z95my5YYT}SVHNVZr3K;!3TKftj@K>Qc3`jmfag6w-2 zn;GEvMF0nT%ewHVKUXo|s8~(Sfx_1uasJZR7tdI{rK@zCC4XFJE5oW6cd&w=^gH#1 zICZB-18x|7`cof*zVw)DsL@U(>Jq89Rn|+Hg`Jl?w_WIE{Rt0U5j|t2*g2QJ0!WeN zt`h2Q#n=tW%CUHV8#z}(mgR}0w@7ZY5SZNdG$4?re^?sp@^-ygd={8#X-tO6w5`Rb zAc;ns?i%Z}iYDSwP6Q^Lb8YpGJJuFIp|{re-Nk2C-XepV*@cV?`%^#C z`P5IRObbyb3IQX&XI@yGH;dyMFRI8}-6qSnkdI)a=EWoLZ%`LAN|+mnIp)E9wax>( znnUaNd z(A4$|tlg7K2?+CPy{#WuT&a9{p7XkrHb$P3DJy|M;DIHaVbnMf@H%{{3(y=Sb5gx* zq0*KH&h>L%R*cTU;1nV227Ntv15Ph=PQwJ z`1sY*zi?&s>$dx4a-Rf1!0fJ#1p9rl93jgaPiHbrW{@t^$4o4*lj&UywwxzMu5ADU zcCuLm>S1*Ohme(5ryAGQg*wuzRxE=$mM->Q| zB^v?ky>(PoT^By8h=i1Y(jgtvf^;Y?rF4VRBHb;Z(jc9Okdp2=bT@}MboY_&j&t|% z{l53p`|oeuaqk~@j4hmj`|Q2;T64|$JkOkaErH2tJ|6m5Cs*A{Q%@GiWXneMbU%wM zVV>TBEBaBELom3Th|Ive%gf-KOOcfQb-4PPgsmZa)q8%vF&y5&C2=~|);@Z+bzj3y zR8J;*YUJjkPz74+1~y(5DJtzn*;R|HIuY}rh8u#Xi`qWo4ZR1@P_2xdF9R}nc$=P= za}2)RNTza72#v@n>}2YwV^ywKMi6*JV5;vp>wa68Y-oV z5W5X~l^S0o+%Wd^b$aQhfpuw{V_CIBlhcx}hPKOdsPRNOkJrb@)&@HC(8>9xEM*~d zq}GQC#-9gYJ(4cQFIIN&yG>getR26{H;WO5ew99O@i5Fi0s6FIjL5FoDm&Wf%^dd% zm*r2LK`k0je4-0%^TDrb&ssD(uOzjd3;bWKIpZHZ%rZGy*`;5q2@9*#;)17i(pf!c z5s*b*m3bz3zoLHHAx6PsGN#cn!O<)0(IMMCn@RqB*GSxQTguPTlb>iN?|8z!1iwMP zO|K|#8?>UYYQKsLj_x`g+|ms=w;L1w>8Y#U*{7P2F|w}qbHP|n5muL|JtolnLd5w# zJ>wLKd~A1J23lN+tLn{mcOrhOzXM?+!3ycM%(TTkbKL!K{exDD#F`#i^XAoNDLV48 zDluI5H=(+#COxZh!7TdAQKolBC~OYjFwwm>OyzHD$u7)>si}gx8|MaFPEM~4{W>~+ zu3JhTh~{Aqgj5H#bqwfdZ7%%~ns20?qx0QC9*BM(*?vV9HG}rEGq#67I955lmlJI2 z!6L#;>;7c(F4F&(>T{hOo+OWy2qCX`8U-b*hJ_8>RUa*Nqu18dLdRSCdOkt@FmcI` zmHT4y3`t9*A8I9XllpSAGwN99##QQ=m4ug{{a5G!o~06+_91?|_}eSrU61-m^3_I*o1c8=|UAUJjp7 zb0Uve%6ohk#4TKiy@B=&)S1058*+Z!3O?K3PYZ#a4_#g#IttPIvLFvc^hXhh=VyPi zVIt1%L%+8BderWzUD}Kzd_(AVNdCnXqiNoEFGuu3y}wqK+$&j)SuYh9g@T8-j><7C zonP#chaIPB&OLfTGqD(v^V_-Ze5B$ZZw7a02C%!_pLu9SGaARPPs;bg)onP%_S%gr zwb*a<*GDQQVrXB`Haf!`#Xa>jdL4rsbuIhjcnR-2(25U!T6I?ysKhjcROP0?d%WlH zSYnHi2x_<*m2G!uGG?-<(&l0t-N!|KOPU_m#*jTkLg-Q~*LQL<$egw*^DI^jCAXt^ zK5)0laVJlL|9gaGp8{5yqt=yM5OU?NuE~H+&^2&{ohu(jEd)x7vNEXTM zaVfTz|FOy&tnKdF;=bu!W&0b4i%#tKtt%_l(|07wx$zVin4dVTfh`ZWq3g}RYSd7ATp!cb71=ci?CByKRdQKTb=%+B;MLm<0QTVnWS}E3s*}@Dx ze>fC37Ca>^*d)754-6P@PFLV9kSi5hMBCW8TPz8=xl)g;w;^y_@wFxYq@?VycXcYq zmBN2elE2cVfA%(-zGw6Y>j;kC!OZlJ{Aumv_~=9xyYZhrLebwpPiHHuRfxc@yoL{V zwHafFL<#lTy^TxvDH_T=lWQ>bvX53j97b^zGwF|*%V@B;5SGlum zN2XR-YTM;gNnKZ=f5gyl+__UzPmk}8`%!e^{Wjk}R!t1wRbXu9y!;7oCv+HWorC?@ z*|jln=iB2hx8N@FtSoLBYnnVDe08V(eB(j`eLc!$#?(#5S`Z?9PmEq9)ZqsG3a%8~ ze=^AGO(9%Vn-g?C+q$C(IC*bIE|Zv;K|7_d1X)hUgoC{@3c6QcAeN%;bB~DiRXw$M z1YmWl>Pm~sy>CF`X|K}CM;eta+`^GPyW3Nl=c{yr#8s$^rtc>ce(J9J9suebs}?dI z{~@idZ@^w-Q=L6<%c`KydU!mJvMk!RG1zHU<#X#keOrL1$0^=W<(A3? z4YukcY#4Pk@$^EQA~bwnMtRp$RsXGjO2FVa+wvy!SJN~~GP6ndh-xh5;MhJ9w`Jhe zW3^EfJ#J#k_E%8;L57Iu4QwoaREM25ch%J8snNBQQ?yc%$F zY)t&w(}OrzP+iIbIN{s6|66Ljr=zETigUDS9Ezj*JhxCLrucgpgb@} z>Jh~075fRGr?Zc~sdwIuYHAYM+!^Dy;t#Nzt>zC>5<2`MbX$3zXA`zNoDG|aIO$yq zZ?vXFk02wn-CXs_*`0eLg$0>=F5cQ8*ptHVkaNB0eUa{XvWo-7+L>c-=ed#h23{MO zZuF?B%wG)Q_;EaW$M8jLEBkGf?GS%$Ud~$w{QGJTG4JiH=4+64R-$n*-{t_zHnYqHLr@lTv z>6N)LVkVB6282&XC#T-C-Pspe6v@UjY`4VRwnXymay%Ry&va_6$0bc1MSgx$Cs*5? zsUNJe%4k%X38#qi1zlGHo9|exDD!X#H$|j(41*4kejUI;VCGV%H zXVT$hdHr52A;P;0jgq_WBSA0c2MMr#2^XvEyR_>;A6_5c^O$+RC>EGjcvu^GHl{J| zYWR_E&*j$R)I0rUb=1-<^vw~KRfuBr_JYa@|2NZts8-^UYd~AbmXOO$JA&{Wq}X*4 z*vyjgq(Eie@)Wr_XfL;4f1~k@y~1Q)@iu1Hn`uB;eXc2$g+*{g z32(1vU@U|yAy>EMW^{=LX?}j5%W{Gx@rAU;+*>VrRALN_xUrL`?7E(|z$={(MPs+H zev}%=Li0@AInp$x88Es_uuo51w9obl(3|QsTXb8EHVBTbC-W$;h!-*DgY0f*ZWBL(z zeT;B}<$Ii0-}@{1b@G9)XE9%dv83*c`Rq?8%fYN)vI?ZdTCydOVx z_jE@KSg#S8bf`N)C3*+vvQbo|U+an`Jp=VRl?yJ@x+xT9rp2*onQvh0+F{1sq7sEg?ea;Ldj=K zA$`qJX}JC$6r9KFbS@cZPaYZj2lcwVGgpb%pC#gz%Ia=_FrR!Air#gkJiMY=>vBX_ zW$sFbSXgj#8YkUw*n|txlg>7Xk6R_yQ1ka&GJ9xS%nw5#XZcv|P1^-v-84r zq|-ScVNDAVjlMHyUG#}`)`Zi<)Di@v|L`e-PZWBn77F?el%G}V@rVBue#J50n(@$p z0DI34)a%^h=;G#ZXH;IW;rH^@XQNN2%w2r+^rC?z^M=`YQyR#h>=p|+5_a+(yP5TK z1ifP;Hn9`wn_pLP(eX}CZII`$MAqPOy0zLGvBU+einmDd_@~`($KaGDh39^(H(cFZ zQR;lT&z@R5YV;h3_lIk#!{Nn6S&2^RzW3`&{z(oAd^Y57(8*a?B%zOiH{G?!aiwQd zles|yk!H4$a1L7Y4YI1y=nFq&64{okrp8ReZns~C35@_DQMRYd>oU`4@#HNTo$lK>LGcEr{bL$-)}Y<#4RQjSdR_~25N z;usl{*!Xw{Ue*gP%~7l$F%W$}pINjgVMIq7 zA2C-{NkXSx)aT;`YOdhwBMBXqaekQRW8Jc zowT8eyWpbDVwz=~mPj^1Sv8DogUYu7O>?rwf-2jazpzq%@#WN5;2RV9ZzoHiV~emi z<+j;~PS92P=ju5gs~)CUloYSp`yBTTRoEz{=U8UtI~J453d(752%(-oWV#KZikoic z^J?E^I3}N}3Nl5k!CS89WHkZ`JAPj6mQo^lQg=O0qi!8x-4EB`H^h_Ai$s8IYbkavhLdYD zU-28g@MzcEdBd$AVv4U1ZC|Lw+ZUQKo(?aIKABKK?W4b+_F1pFu~qxRc_%l|c~M_k z+S|u4L)#6XKTVVsg{DP??&8)$=<2gUQ?sbr1#fLC8Q7ZRK)$mT+=@Zz+RDy)VPNqo zAEVPpD1s?%<83FF!o*NpR|oI<`5@F`&0`PJ(S8e^iF#f$Gp}6*UI`J zVJueCkhvqYDt0}QB`7F$z)(oAZaGJ8WAd0t!U=B!k=4c#e9N)$TyqmOwotNItT zuOx+j_F-pqO8dlBm8a;0{7YA(yQm3V+9z#!_#!*;5%YSbDc*OZ9~Y14n#>)3Mq!4) zOW$r~w8it{o{^r0v`QG>Ctt;(#Fw<$l0~9@OYTeSVF1bs0`)I2ef*})J>shSV(Di9 zcJvH*q3+beZRrkm{mXd4S_i(yHutFPL?%HAV-p!vmHl(9VdzuF15u_6r-Yx-*HK$e zJ9xOBVFNzzc$mmnWzu`m<=s7ILT?D!ETxKms_-D+FiDTEnWdsj8jcnd9A#48N>I2T z;DXn{Ly=7jBH*(SgT%L3(X!Lw#Wxeh5LLq=jHP2z!+t&6nxD5`&D6ybzBZX03cf-&k>(=)-bn3$-_f$*SmV4%M=oy65NT z{Sfl{HOY8kgB+zadRU4_T%o}GY@xF${KQuIq%Y1?@kzyz(k6oABLNRKBg8bQC*f)t z3w7#02%%9ePZ-2J1Ud!9ipFy(3F&^}&`J&+nB(Cl#6V8H&ga=-ARp_jL^hy)1$@2d zL1U+~vUD-t=~gT^$?f6^+LD#x5cY0c zMUkIr)Rml^oNJ}Ih%|-u7lv5?_GXGEJ0>=E-$aql!`X)Rz4>}j4=RPLnDn?)7B8&S zWnVw>rB%{(T7`KlnmT}B(?c7?VaxubHBJ>|0MNNQCxhrTSQ=SQ&V4NqJBj!8U8*?c zCoJ`Wd%uE%IBDQK_N~T?O=SQYnXvCK#s5*4yi zZL`Sc<>fW19blkrOAd0Lv2Yp@w8^N<#UD!jfwMCQRyo_5Ui~D!;DvNPV1MS=wZ~dK z<#(6l<|=pC6t5N`Yo%1Pj4?2N8?0X-T=kNl%OfCk3hBYC%;8kiUC{KX!W$S#xC~wR zA)*qca%stGrn2`oBBkf>9^_O4OG)S*=zWjs;`~*5#e1Ectid=mFVi32oVF(vRL8gT z&%a@jZqfVv;G3+>b=pQ!wAI_S@KR%EYIHDuE8s|1aQ}8`+HhNi4oVNo5?1&Tr7^eB zfUHJq#ykxL?1Ej-xuMVi9>b!^QODKk`vBEMs!fJd{W1S)=nHCc`$oti2>l> z3v6$qC&ho*5fOP#Cl@y+=_14ea@$CBiVS;47J36o6P-yrW=4}YJWv#1x{_1UKT7jF zlGOr0?8(W=8XZ@RKdzS$8oNQ0P-wpcY&n234k(Y>;tf0EjXou@8mhRX4>^lD8@uwe zvQP?>aFp{PSzo49;PFU;>$T_dD_-ZOc?Wx*z0l~rTxi&)yj$u%@*@=Ct&p<1qBb-$ zduXX(76Ty0P?T{w??z*Bwv&>8g%;H5(=jj2_}BUId#79MryG8G%J^V~?Y#ObroJA* zF>%ucfh3_B=+)H68I|q#Mm5g6VZtgNn8l6m^JeBs(An;pN{Z?d3NEN+)UN!UOW3FN z37)4?nFfy27;8xdaPc@+1Lk_?y|_*}0!gXz?fiRRqwk@AZ=t0EW|=HW)cCXec;-aD z!0`AlB=IoR!mrOMhnesyNKj%PMj}o$rU6cz){Z%)x{vph!B-aiBn>Atyh_RcnpO;O zXu|hrpj*?N48WOVR2-Y2?L25qArty{;H&=}Ew1?K4#EqpEl0ASk!AjS&|?9rdvp$y z7A_vpfd3r6hxB4F_XeHM>xd@)_mC$+ax7+0P${QI-0;80s={Z;scX%>eujU)ejhJ^ z8}QFcNjb)n|K14r(3K3R35VAAx#Cow9DJ zm18~x`F?IQn8Y(WJ{}v;6Bz1`iX#9%{Vojf=>QzcxakTr>Arz&!~KOuglSt?TK3Bs zGs1czX}E-hScHXz-|MCd3cBpYxStUInwp%1CCDegWL1g>AD`wWIA=Fmbp0o^IY2*H9i2&n6CoF80yCoLhh)gsk84--Y~ex0)H80- z(_ITL(ix~R+n~A9Y_u9^rtBCtL`c-+j?5QPt2Nnv`L$_xcURF{*fC(?yFtnD$OsiH zE2|T(0ZzcYMkNI#vo~Pxbd<88wNd{)^|^GVMdKAsadt*VhWmRPW^iS0@8Bzsq=AQJ zz^6~Pg^FD{0wo*l&K*5 z<6K`^CBwcF0H8wV0T^@*E;l{1xwjY~6s9au+5Y3|; zn_jtFZ6~fUeJa}8=mWnGRsiqJ));_?O1Aog1)XmB4pq z?nWLIIS+QIxmkhL^a5eXVLRgD z%}TY^>?!`%3oHj&f~x9QS&u2kB+~V*0Fzr_sm#p;EKXW$iefh}^6@f4)2`dsa~gNr zat!T}BoyxFkE>a5mmV}}>EngY`Pug5CcTRAM%Z?nz3xc(t(VK} zFZ=abJkEAv;WtAz^%p{p=dqWg@lAUarYvj`FE7?X%~AM@n#6Z$FU+d(;ZsIRJzzm8t*PsmeK;jEf zMHtdYD;1$YuRRBi{=D2&T6UjHITKr3nWodheZKi)bLjO6)lQs_HyfdWePoK=hQ-nJ z&|STUdA{U%dq6}FgXksy9hGRsG+2haUyXVAwoS-|ALl-{Tld{w#p{kX$zWiNmcf<;OF5XI) ziu#Ga8GbxGj|@M_6@C6$@I<^P6U*J>`qAR`0!~q2&)MSLZb;;+UcED+b?tJOj4s%J z$a5oK2Cwq)Y*)~J1XyF2gX8VRmPXU&k0W!HN;tc>UA=+Ca-C)KRyfcdLq2<3fuAbd zeePgbYUk4AAji}0vl+`~ygs&0>WDjhf(}p;+CR_F3rKzflrt(bP}lvX9;co6nfd`Z zHyc#^+dD64R+I#HpLo@}ap7t7w)QflF_yTl@4xZz@SvUj93D5rjh13=WffaJQ7w~X zj|wkyMN4|t%R`02<~5~(H4*0&!G}pErhnyK6v17f6;^M zk`l1@_)Fv!!EKH0NLy)&(9BY#G)44RjEHDb>x)!fc+q}eSME#x%dME+fz;GemK{T5 zBY4w;$XaF8gg5qccHMm{N>(x3CC%@=&lYR?W~vJGbSdhtO?*~Kd3*Nty%R(FG8N+4 zg!NNBKX_?=5t)V{u0Q z;C}B6+0(ubb|FbOt|7El84N^t+Wgi|4~v)~ZMR&>CgIze9xnO(A3iga>yRTX$s z+~>Ro&S-G662G1%a>wMh{+!)KlAiNtrD&TLDk_HI$q97UVKagre-`M2PY;^q_cRpK zBdsE(;n~6p)<%dp3IsY`dgV`!Jr2|sBCU>{j30#zk9<7G_)~L|Ry)d30t%Qh*d^{R za?<*e;Y+7%KGM4Iy|Eh4<8eU|IK)G4kgvs|baDj!a{Bgp9DHKItfD-f31x0h}wNwS`iq z&`#jln2h`!D`*>~irkyJd+dJbl(uH`9uxm*W%Le5ja5{I-NnIj&qqqFM^JEQ-G|Qa zH`8g&^I<8}L!;SpC+oerb1|sVzLB~*tnDjrtPa~2LfAaK6Od_s$`P>R6;{40*h|np zT=H*zsy5Xtn$A@t(x{MAK$9j-3ItDnX#>p@aS!Qw9nOa|ZHM1g(_Bde^=)@NdDlC_ z%k;`VBref)12GFcY|$0E>6{>!ypUbc*RW9qe;yMQ(8W`r0gmO1B2t~9JUrSq(7o)o zu5_QNwC3F^cIa{#5mwc9d2_noBpyKGjCCzw=fmPFR1#1&n#t*QVjz^1R5JuP^ydo&k9ZI_J=_*dikI;==q+u>#7$$Y_$ z;SVP#Y9;RAYxoE9%f=F!I*Onm=i%ARITJ&oeho157 z3{PC;4gu7v)IOMUuVK~Ntt&AX_Cs*(19NAgQE)$sHosft<{a3;WVLIMe$lwG_<~3>BN{9+DyA!gB=xO3=L(AUcHI`Fw$c=H5qhmgw9>g`msuOIQ7i& z6Djq#GwwtT09nQdJ(=|zf2}BV*b$U<{AK*sF@9d(ZM}ZIumTd8C3v}3We@D?SjUrE)^$2+ z(%laM)f-=yXG7e=XxZ#OgDVWim>xL9Jea8IlDPoLObkYV5dIKn~igevK zk98f3tUja0mUx{emw#BdXRwJDh(T1S3rr*w9{?K?%Ed9ouaGS-+p>X6RPwxp9Ovx& z+2?-lW1Gwt9Zw&QGQTdW6YrXLna{+zFK#r0vF$uLn&YZ@14Q->@74rSo79%FuF5zV z3XWb?HPh8}81J1}9IIabLl!z(2R8z_ZGpFObuzrgxJ>d{n4TP3Ck^#(Xt28n1+j|932lg z#wb=*TL45s=bG$!c89^zV5WlIZn|hUI9;)7ivku!U$S8Jc+$Un6PaY1!0lx5Bx!8H z2SCW1qyIt3QPM1+Z6QWwl>hNGfIczahk#9a|D{NX556e?&w`-ScK%7TzTX2`p8WxU zffIXz0Fss>ri}U51&6_=l>ZN3t`bHY@S*Q`02PtO`fY%dP09A=WPOhASmV+N%9z2xBex2(Nu3XD;Ays6Kzn+^52J z_#?5%{#?+#k8afsf!=>&04%xAF#9dgALxpPbKiucL+$6@4@Q~(INOdT6qPFpph) zF;B`?jVG+skoxAdB$;YA1=oOtXebIG&ti{&n?B(Pr3daW=xYa5!Gx_{!J?$}wiI() zAKnZ`MD*Owe$&gNtX==PvgQ z%|PkwO{g|YNt(Fjh}l~+?}q(c=`|zv{7*3?P^r6pj2u~M9qnBrfTeRhMOXb8!K{$h z6?4WNhPet{Ifn~DoN7;9D42Wpy;i?_uC}%Y%SlnJeZ@pUv@u}|A^ExE7~aaJ^P>Y5 z9uVgY^?%g@-~;~mmRbc2Vs>@6TQcpUyKabiq)nsqw`2n4ywJ(ig;ooy*w;qxzyQ_Z zrVAI5YCi({EujUuYY%1ehc2%nXEu#FUFr$WE%*n!q=2e%wwjIRlE_QOhmWD zI23o|JA=y?9gg2!&e)*nmQSXQFi|$fP)0I!hr$p%^I6cnT?@RYT|l35`yoc+u1DIF zdrCB(Fw~XUt#?oy5@0aQ8nMUhoq>CeiFoF>==T>*yR1My#dwL?qh0-svDG6mb0yM$ z28IY>3<|i`k~2=baRvIEdXMTxDb9Wm=4a%5KR71h0qgnS;rj{T;%2KIs(;!J?b|rOJCz3}{8zsE=j!FS=?nqY@%W{d6_!sd(4qlNP(4dN{-G)5Yaz$_RJa1MhZ3w)_{QnyL*GRj|`sB`gwX4`} z2d${hc^29Z#&zp{H~$hn+NoAm>ksqJ)D$-n!~3VBPwz7dd}0U~LN0f@%-tq%!% zXd;AGez4W=a3yjYeM^3MRP53y*L1sUgV0co?`|$fr>B_!1nTo_Qr97@;bxsZUdd}q zr5wmv@%C9A)zbat7EP(%YQRXKuOBbbUT4(T>+H+{@7u2dJ)h{g^nmg^F!$U5aR#=b zVKY}dV$!Pmoz%MaRqAP>Sue0!0u3)xIRDh&lX>_PVoXYO!K35n#r1oSw!Kb%z2gmV z2c&W~>&aq0>#J_kg#(n!NuOH{@a@^``f!H0H8LsWERIzN*@Bau6vC4esOvKCWPh;K z`j+11uUV2#jJC!FBVlJ+u3>x-~5+^cBa)TQi?CGyAn3+)IEVtmi};GNb=( z`rP=w1bU)%Co52s%FapRvK48emUmrv9z0 z*V*h|c{d}Jw~>{VmB6wb^Ym%(8`%0}^Ig?*q=N7zGIhfDe@|#r>bDgRpekTxdnD7J~#wDgLb)L-@iq+rYwfE>(Ilgq!R#akOBC$EgM zg+3LLKV~9;G7vsyaExT^>mMd<_!^PN$w-dq`!hN7|JhT`o41SWji<{^5NQ;^B`GsL znS-P2J!k&gFRh9Jfx*$)S)e-!a>`eEFXT_L9!MuqLwKZ%7!>ONZ^fgB-c} z%(nbL!0BK6W2PBin3-WtAi;C_39QKyWb*MZU!UNPoZ>J)G4Zksoa)Y~`5R#WJIX)9 z#60BWLZ;v|5}dq$jfuP!Qig+Y6=&($-$N*P-&-Qb`CH825h)xXSkB0Q-S_`Bg7yD8 z{R8%?U0!~F2}G>tUtecvsb%RSx)4YJjtzpFV5uH!;evZT566I{!dm^}tuNub+S(uEr0@T}QzY8hMt{*45qwXK4ZI6(B(2dWtu9z1x!<+7*i!p8fRDG>2oNikwc z!i}3b$!iljHnG?H%?2Wuy-Iz@spmo#Ha79Vw1G@d22i;7O!ooke`5R-5Q6O6v3Pr? z&C*!?{r!(JaJ+gz&46CM{@ma)xW67Kb+xc6nvFK_yf% znjy_|^xX{L-#yo8C?mbD&(Kl0N^zvGfdGS-;?XOoPi=5_6rYRsPCXjY6fHtK7-bk`s z!+{^*%wsJ9I$Z3W_x0{kH!wziBUU$AKrY261^RkxmXZ;KER!CDmNl*Na|#MwO@Xb3 z3o4%NFS270u-G===>e$4kC2FvU4Q2jGudX==aFq3Ae`Ambeam0kO?w<7>J(>Ic0TOk*UczC$FceL0ai zxUjlO-U9_AN5B)j+G{-5zXTd7&AW?zNEEAx0Za4oc72pNwFu-6+l==90b{C4>1;rK z(U{v>r1abqoB>dm&%pf~z_h(!w(UV(XlDqJg-8}M5$ZWcrZ`P#(b-_ins9_8Iuwve zLRF0xPA&D1NzPOI9}?;=k?rO6_&EV&8X$V~!=;gy4cXzB-mV!VNY-dpR#w(0SAbQ6 z5n=EhpS#znECafJb!&g*f55^fvY}|%2~*Bc37h;xw=zN4gUh_9%14Z5dHevW(ln<1Z6sJ0D4A{+n z#UQL>o%q-(7ykQR9IUetTslsJd)B0=8sV04Biu8Q0g{~u$s~$g{D)XN79IBmH&`!@_rU}3K^w-pFDgYK z16IVPqFkWgZ$SI~xi5j2Z|Uc8fi2up%*9rJ)Oq)OddmK--RTPHjiI!(+9OoRd)Do& zh)zR((NI&P|G*T0jmw^K>-$V;+|-#hUv&;0Cm?!ICLNflTe_15UU7eik3sQtYv}8w z+~Bd_p75Xk27Z!=13bluuj0uck;Ruu3`j{oF*jC^{C&-T_)S3xxHFsUrbt+bl#4REcZn^Erk^ zX(O;Sf)pWI1UUszx^BG5@xins%k1RO)8ZPd*^Rizx)r8jhRuzQdWLfaSQ`3n+1GCs zt1c6P47uSJpyiNT9a}(idVx$6jh>!fX5<_o+dT87ryf3oxHKL61KzD)fi6_t2dFlR z+4tJ_@N-f{)OBc8e@eWj-Ov0Ucl_VA0GuZE7mlH^z9XKzxDY7d5(M|a zXC|A}MeYtG1VP2ME0180Y3`tDca?$xx6SbD=J=Y1b=E1T`@AptCIj1>#xZor76FHR zG+-tICHD5LT>cTlW`D_1=M506M<>2CVec6DmY3ec%S>5+1N9ghLua${SrttEhF_BG z_9_Q&VL)WB*fTP+tz0%-B|~cZdY}QgegwvEHCwbBx@aS8TFMC$x}F|#b_SEFr2ZD~ z>hM~mjkj=5+PNQ+=Q;gwlBl!Yt(qhlGpsY>34M6`A#-+;orhrIcF!e%S=YAp;a4EK z`87aa_`;GPHMkH?s>&w<<{A>jf* zGzrG0S%3N)TpR?5l5M#G>y#`#5Paw*W_vmI&e_B$#1^2tPww{i?;yJc;(EilyKU__ zqPoCF^zn+6m~;G~F6FnX(fzvs3Vy8J_re27Db_cnVNB+f6=TWnx@Oq$ZG z@&Jgsb+@ZfZ61K*EYwW=?cBFsBZmGk_rCm4i^fxf zv-0>}p!z_PTz_+S*QuAo3ps7G>g{fPCr5rJr1WRI<&Qa;ZY z-DBYzjazDL^Vf@TZv=MP&+#@iCZ(B6&c5WOuW8(9_f11#YzZB{+~FfjV3SU@loi2*ad?D2-LIf@off5AgevsD>-DuJQlQiV1Rr*hN=ifY>VWFNN265OqP%UK zd;9w?5Ydf;%^r8gp+#TL?z!Kg-2QsErXH>&d>YfRjAdgBm0ofbU)a_^7vL4Wd69M* zng;r(jP9v#FHjyO_FLb0Kybd`VWYqv$PQ!B?6ykL; zk&_mTuL%#lrcL5|F$^wVmx1SCAeU6bJmChf@SE@THj5(XD^#QJRXdRMJ0v(z?+{vq zuFvf$B(Wopq2!QfW=?x^Eam z!k)DnFENlO)zX6cdk%67`??Q;C*t4DHPoIrz=W+u$Tl>ehx#?yYPK}+-CjZl9RO-> z`ARxvo__!=F0`R*l=@4aKPDN-Dz z3xP;h);{R-^hpR+%LIJBoE6aP*uD1QkS4Pj_hkdBW-C7P&*q?R{p9`PC&Kq+;qI&^ z;7}$rb4oF^SbpWCMd;PZ7DC55P(`Mz(!=uy4b2}Mik+4^ z2f^N#{RR{pm;KE*{cUMCy=h;%=YFHD!)L=tFl~dlcW@5)=XWM7AY+FFPQ3S-&FUoT z(3VR8#Mg8xI}??434M}>l{UnQ^b-_B8_LrA$O^BiwD^q}haz?QH<979T?X)bY?0MR z?$=_jwfDS?L#}omu2^mrVWfMZoh)6g6dkuL#KpD-oo~_vU2k!p! zlOGTrKrh|~BVzV{kAylzx!ioU+2EgdvIlBM1o#LqI3!PnfBxIaYsx@`wgNI`%ZB<|9AmjMu5m--PJ`+)V4BS$`SW z^(fNkGTtW=SiR4b{e!WD@v2cKyry_4RYL{w*5ByjqJhZJ0;rSA{ag}iJ*yeIFNznY z#P5E!nQH=U+%-~>OhGyBlkQh1olQ$YF5;<4;dL_7;gAGTy6B?qY-rZc+Jgta(ddk8ey5t!vVz5=E#f;sz}h5 zYeqBWn78?@NYN14lweYqqZ*8LpvDB$89u!tun2gogo`=q;f0)L{R*IV@2CTRW&+a) ztx%^%k;3z!mGjU|LI|L`M`1Re;@D(--veqnxrQUQ=tw7*zhTAB=5oHtkw_Oct>evF zV}IIcHbl|dn67{SWv%{wcX7Bo@ zv>NPD?danb{S0QWK$c%7UgGk775DF4ZGda@v$DSHC3@#aiQb+2+@1Ltmy81AuBns9 zeLIilbc`Wnhi_l1!o2z@R4doHy4Zr+^{4paw)|V}3AGR=>agVRd9U87N1#0AeHUyo za@XXzIT~1Zwxk&mN$226$fEfSfOsJ>$=&x+-T=^y6#)Nv3Ul9q#CFx8)s-lk>$7$l zAK}{cK!7gxwtdCOuCJdB9oEK)0 z0Uf9Atb&39>pG*-2X#uZ24y*8Jx+U#oH)K4C)FQS5|+u;&1m)wA!2fcy|aSBbXXdj zW68QuUhhwbSo2rdn&cQ~+gtnBl3i}su$+}9yEbuY>7{q88C*=7+VB)g*#5NzN!fn< z?mq92PjMqeNCJHm6Ez0f*y|j?0+v3TQ+0~QaBFf2bQDz*6^yvofTV$q%x5o*Jxn7R zjvW3h#WM8Mk{N!ee$DzE_r|>rY2`t1v2({91$H5@%}dCTLr5}yj?Oa{1D&}mpQ2*6 z;_z_iIQx|bFX7ykrc)f-))F2xU_(j$St^5@S9ER7jQMsxUh@sNT}&-eTMFs$(#rGQ zX!ViS@Kuc`a)NWu?KOSk0_HnPyvbYHIo2It!6ki!)1kQT=OF{BCHzv-W&I|wOCn#I zfMAt@#~??XKsp!|I(L%TC!&gx&I%!P5LT!zYG%*4KwY-&xAhKhOI}n4tCP|g`IHbr zzQa}f%L0DUm9QNK0n^lq)m?QJ)n1{lCqbiq84l!<=C==YcjA*On_w@hJgdXrSlzsn zz%dxU@2X{SJ1L?Xq9y$R~fi*RC?vf82ewk(rzR#P6Rzlk7uDuRu;Hs*VU)1H$q`E zHUz>XYKwWAihefaXX2}oXNKl5D@$kZI0#1fFQ;H|=^DOiNFCW2VFZlrNg{4CYG{1# zAqSU3b*d=aA~Rhha^rgM3VSnSQ0DaIQBGwry6sxs2V>$FJEOdL0GMTHQErtIUH+7S zuMcds(UQ~+5*9oIeO12d8tOdmj9A?~<~y@7mo4-p-*UqbiHTPJF5Iy8Zol_gdx8lu z7_#D<&>&TD*nfIXT7=qPK;w(NKQlw(PmgAS^R2TY(+5L98=zYg#+$R9WP`Vp8{Zki zI@-&mWOQWnT&wGvWeaq&6PC6fC9Hvmtu=SiaHKJvsDSJgo2GEaI%Y_idF6`PkzE1- zp1`D9ld%yFs=7)2<&>Rp^&J*#FF1UzcFWwZVr*Oq-%JhXkwCBJnY4A~PVXiamgEq|N?=pn8NRHx2{t{faQ}*ur@`2t~@^x9iQ7>$ooO zB4TL7Dlz?(9p3W-zwzmC%X#c9R^enrjE}6AsU*(^KK^#zUf`N2`1^|uGSY!I%xOQL#}>U&?b;qR|lo6y|O@}n<;XXN@W#)wqX$~ zI|!XdPk;uKWlNPvnQ&EPe~cYj3^;go+$$Ql+-3Bx(&?^BjM3Ka);VcyPL*WI;8l|*Gr0-IVqI9+WFb*_+9O>6R0k9fSLg0mbm zO@`diWV&mmGo`TI9LqO(CT3xul=-8|68C)M=4x6v=S(>mFT6{{bt67jgIGmUv0*;v zO(W@BC4tFPza4{v?1fJ+rSGwGw`sONrm`XM*9+QHSDYK3R!VMadfIyn-)Tfm_|&UL zPn4*2=C(L=UNK+feDsluDgYS|$rqUf`9VmjU!o1J;EPhh1RIxoWs5AKT0~-=+Fx)L zNzi?fqr_ODAE(s|#<#4iRO(@RjT)z(mwmVJzzdsHQnwe+Up(J#bRTzWVn1FWwOi>3 z3dCnnObH~N#1s?J3z(Ig>4lOQN#f(`5O^#qQWPy|&zyX zWi@mg^=;+1je)AK0>9Zp&A!dLH+g;y-KG}lSRFl`DfCUb=tQl0VZ!&KSZCO66gFBj zX&T18EKoLD2o*O{iXO9)%!V*m1r=N2)=Nd6I!*mJEGeR<+nrFKp<6TMDK*yREdVL5 zjw*`GtT|+JA!j+lwzH)UvpiK_5(@-txE4%u>s`$;gPaez-*RF#@5Q&pF8R7`9F@_O zz3)MB@%YX;t&3li&)!Rx2*HfGXp3i+Dc-|drEjm0;e2~}5Q$%A8a*4Tn9TbUC#~{x z#7kMW4E9PA4y*jLiE+|NyNk-JpYL$T&UrFuQ_XqaIIw=reVN2_rJa|i-rpBbD;u%s zJFJ;kXBFG^p)LA$*dnozZ7-%vxvq^w^Zx?6Kt;bYt_!PWj)78|6kPtOXzWbhl9yuO z3ylORjBX#tXL$-ag@1J}#oM@6?!~`wJEf!atz6T(v5MTMH-lm;^g6*w%h@nX6ERms z*a@FkG7V*19+jeeB`cCS;%J%Nu0|-Wtz&zycwSBQX2EGa^IYPtLy~bL{<@y;62U80 z1Gb5?_Kks*;bs%Vlu5}XjqAk%)@auDMwQJvJydqZ&E=}TJg}_smPV^z=91S^+NBzp zLk%pVo<4_K=z7wqfT>6amj~*Nyl0ug5u1XZWxR~#&UX2T91RYs;yLoPbN7GFx#I^r zs|C?j>hxLR-MmVvYa)^w*LF~3zVpFT9nCb;SYV5%w(e$KEF@aCtVMh&-macBHBfAw z#(U8a{q&jPv7W2?ygHA>EW#6wPOjai*9i|xY38{Gto+5TA6Xx^#&rwZLL@Jged%C@ z&2uT*BF|GKnoW=mv63~KrIQtJb+W}%d?#1dmrej$wgUAS$IOLTkDZk7YNU}YUX8pL zd7g{+Oxl_@xMZhVmI4I_#DD=(+qcMz{Zuf19Ovu%DmgZ4ReTpLS(T8cv1-J@wR6u8Na8mYqevz@%vGOxP;b zcm6cm@UojYIfA-@H_Cwe27@Ms&bsNoLEl#xE*L*^!Oisa>Cs;)lNS#iaMcxS+}go@ zSHeI)0U4pZbPL3{=#2~xxzp#)JJxNhaQp4Iw~D%icQtCj8g07Qm4=A14qrt~YPq9Oyvdl}2Ff zaNwY_#aFtj>$lYMnByJmbZ~Ic-E`AUff5vTX$8cV^@*>Ym2en-b=5lO)|iO_mJUm2 zR%JQ+GHW2rLL1-AKv~~#!ws$mzwJVRDV6I%b(wDI-1Vsds#L~%NAg6WCiS3Ph_qg8E z=oh{HuDa#=kP53bC46Y9sBTK9FnGq7mwf%xTLmw4=`;q^2<%_3e^8I%inCi!&5XI8 zb(`F}jhnoTa0RKlaa$g54o{`i{76*oU5X1U*SnRchuwbk{bw^!ADui|(<`hC_F+zr zIf?8xoIfA7{Lo>-e%nnbL)OlpzkqG~Gnm{Bxhmwga^{RHVc+j`?+RDKWU+j91e3;T zb@9UNIk~IQp@RwZ)Bq;ZNW*sl&QJGvM^90`^c;Q4)0)A@YG&Y|=>*Q6J?kgYO~;0W zFwcfBp9{{Xsb!7Z>kR5)>ik)^T=QD!H>yZ27#KL3w4)O%#J&!OjH@((PFe)$;l1e@3IZS^mKm?rV*v&>|f7me$z4#_{$aS5K~qmd1qZh^{{<;Uel=BK*#9TkJB&68n1JaW;4=gey~ zJ7>zX!pQ>ZFi%ULB4yie0XCM(Fh0-5Y#f1}5Xo3m3q(dgtVxsPdm z`9S|lwP7bLU#T=SrY%fi|1FK{Zm*Ou53Oj?sOf}$856>uxY{zYWw&M9mEq`^zz0h> zj+C$>QJt>3UgSe#c`{ymP_CKsD2%zrt3Wiydx1x%@FZ6mabJRQ%~g&@eD?u|Fg-fv zrpvNmbL`FfAljVpv~Mqt=hIN=&Uky4fV~o9pTys8KW-a3Q8lOzN=t6!vI!Th!<%rTfNbhF##-G zNu5C&0$0qi8rTV3xNyO3-@e@q4CD=~rnE+YyDaKeu1=<_LNPIEPWD&ab%Ssq3ATY( z-=9&R;-3?2_C400nQSXbqbt4ES-tQpf;05q1BD)?3gnD@WpZyBYP`ppCNnV4GOkrtVT*aeK{{ zP*?O=FJHcV-gu`TV7<(TaXlY2XK9LAqhkxrinoN&P7P?_m7%Krz-AdaBS(Xa+bRez zc`X!MRHTXZ)DvzVEZ)<&b^11%=3&aGjCbLgu1PPaZ+f;i=#s9h+NWxa3c7 ze!HL#$as%sRO~6=$RZDQ0r}`op3-ur&lydVw~W3lZjs&C$wN_DRY&RcX(aoYuV5PV@v!s%t62>xP z4k+mGj(FI#-MJNUe_b~}exYNM#1Aw3?B9nxzvBnt8HstGjYfWmY3*NSl?PJKjZ!O~ zr>7h{fR|E^!Ke>QppBB`!exZf3qr$*Wn!4ueJS|@ue7eZSf+A(h zYVXCQ!mJ!hZi);P;WNGNbxmpN+2e+RyvpM(nV0gI%~-HBX1f<3DvO?HOScriz-j>U z(GZLtQTyu3iJRoANOmnv^?Tt~Q`$tKWR^qIr@G`T`jlZMA^5jT^~etF)Hz8rHy) zaowt8?%Wd(1TJ3q8W{Ae!#!Rs zYa66A<)Kj78xcz5c}&_6_-PfS&^1tL*{_L%vXIpm4~k7vqWfKYrx84nvoSP z$*$U&>7;{-WZlwIymX}UwDc{$HLjJPjZg21fBb-Rho4Ae+JBaQWlDLV8&Js|#YY^K zQ@WVOwfcwR8qg{XRz2{29yx%B_>C_FbRG~uIDaNwpN;>GWT;tPNS2AyihMPrwG`K| z29}KLhE>yfL`ecGB}!7-Yi-ZFCf-P#nQz@4GL)g))jij-2THl0I27`_<&H2wNO5s* zTf3mLing8`(NR1-&-B!LXhTC}o@;om2~^+&>-}znTu9u>f0{}99BHb&e3a4GNp{yl z8asg6a*KC`X~1Q8W}p-pzP3u0R@aoKo_W>ap?tL0Q?g@mJG~bllKW2SETy~3HDH8H za+7gnuzO)GNP^wb=b`VY+3Aa zdPS*JF2yOW__3aA#R<2StLRI|GtFr@Vvao%974CfG*IE$+^y78D|ucEIJT_zz9H|4 zoY`?`HhJM81#Qx*R-Q<>*`$*SthmqrF=5uy1;-p z$=bRVA2OKq+!m1~e%S9+X2liIX&9tB)yfkQABbM-=e+_)pT_YkHLhE! zvxb>oYY_$dZ?+=XwwSH1a8&BtAo3@fpBu4jD)o|aJs-^_fwclfC8HBEv^9i^P{J!O zuDr;j-F%@j*%qQadG0bn=csnHEhKiHR+Xr&8`Y1(0yoY7GldJqP0+`4q|g~wN3R0~ zL^^_^ZJB8v>9bFpbZ`6K*qJwivNf2aqfM;>)e4yG2z8~G3|oVWc3CDU;R{| z@`6|4+Xm~y{W+|-s*c;u<66YGZEWvbEsL$I+0t{Rom)3Mg^a%RWT%9#y>v^)^?bCM z1j;I5=97L3okA`{cVpZl!9CYZN1+y-x$-DFGBz^L^@c!srS@EpbdXatDx?6FS0hS_ zCoifWg*8fJ!!zSL-4=qEUouDxo#Zh?o6C(9l*&*v2&VZe&0O-mnrSJMd^10O=%|hr zTRwX)M1`r&_;ICw`g8vgC6A(23E|s1m5k^rob;LDageXd@R8@sdIXie)2s4TpVD)u z^p?_H{TiriL(_p7r+CRPIhJh65G*Q%j84g;>xpCO zSMLm`Q8lN0jmI`_la>l5c_eZ(`H~#UUTBGUf+SWY^GWn#eq1uH=cRR{L}}nv9(?)k zo0!di=@TJ@p1gR_yVJH1D&PT}n@XYY*1l_XAPovK=;gWAK5v}s+BQg;Yv52`jR+~8 zyoeu#HA-W{Q-xdPm8_$u9&QQE@m_hTjSI6#0##RY%=)S&=kbYLf;FFg&+#J9Gy_#X z7C~XEv-|Ic*A0`RKl|-~rHPZ$*y>YSU4>%}aD}O!l}{!k9eB?~Grfur$$c8X^n7j9 zfZCDVN^Zn?Du2S2-l9<_!%uoiY3)^UnXJgtuo;*HGHMv4JWX`nt@T`6!>JCjg#e{j zpRu%Ua`MF=>q5BI*uO0QOhcM!-CA0W?xhbjf%$SAH(&CdVZ}gS<-HJ%>p%Pz=RWy< z=idEmjVLaTa4Qq`zSYvDZPAE^_FGd&cf4m>GA@~KmC0)#Z^^ixk2aLBN(u8^=MVmY zbHDWc&i%uCocqc@hg>P-nQ9qD|E$W4SG9W%Cni+hGG;XROLjzmE?4C%176Q_p}Cay%GZEx zH_1)Lg=7u^9xv)qZR_xp!l=XQ(q=?zUR%jZCc6g5-rBxl`Lj6oKIgeMJX@Sr?$v+U zZ{&@(yjU74FDDo3XAI+yJYPy2H5%tEjoI#P9>o3pKZO+E_J(lfApy*rCvMrkoqXy$ zATOT(C~=`ct;k1Obk%DX-auw-opS3qx6JyxVgfE0~@YgJyX>@E~C!XY@@?-)Yo4cXsx|5^$V#kyRM|l-Cx4d@?Bg7NbYG@W{Q+$(x!^ z3~eDqmood*H=X;nA9n6tKb>R{6OgX*@j;deOVr$3?{cHQY0w+qDlN;$uN)GrffYjh zf3_tgHLit!>ZrlS?bDCspIdp-HZkV$~*5WVd6)mBJ zl>)_MXtccY%kW-vuNdL}&*N;;gW;Z^xD_ysj_vE3=h_IkH|Wt(sIYXVHuta*Ix*&L zS=E6Q7O3X`6-V4w@NH@-ZW&8HP9khHi1M&$yM1p5Oi$b7ue`*E%E@qr3Flh6+Hw@} zt-+absvnD|Fw&86{i4@6cf<1_dzfT?Rvh+&Q_cner2#xYK>}p-0`TU-L?L@xn#-XMc(pObg(BfAP0&pugX}-OFk#mE-p6i@j*bvPKvPKcndtx)_W1Fwhrul#Ldq2Wt|;+I)rZ9>)hMk8Sd!?Y;*)=N>BBkb1#C7 z297KqVSoCY!J9%Fb1^;wmo@oj9<9iq(k-y-`1rUxb#lnP{td5n%a#qg7ryBEZVwD+ zdazTVNBcQ9--zOt+}3Sd+`fIg-O!LR6w?IP%Cj|aB|GBX%AA*-h|D&(#^Y!Hu4V`- zt-Xpab-;49>3jCG`|b)UU-G&d1-646Lyvr|7ETA!Vg@S{R(>0nIYCNg#L}}gCga3v z-E|?9_K4cV;_1GKj1K|1*9-Tgzwb;xT;0 z!o%=W1m+kJ7t(pCpjSe}dUq&VT~s_>#T3X{^+ertvorEi6nXm)y49@dCUX=ON& z13Lhvc-P9a%Fu{k4bF_W_rkaJA}y}UD`05w4^N%_D^D+bXj=~&MLHvSHB#Mo-#6S#?s!oy>zBX$4tLKz#!!i|GZ>J^ z$HNCDXlNffdfaW;5SQe06t1-g({8EJVT-~xx9!gNXasyQ_ryUx)n8?h&=56vrJ_@J z+Lg}~-@bcN&q?zTm7jdh;<+wK7dJAlj~xhkz4H5l?mLOGZtQaN{1vNuR@wOFNsfir z?SLc7IEC2%`r{K#{yml??w z!+F;A1UymnUT+)>yteXh%St)~g@vd5zv6_etHSfx`6mrhtr*vT^IyY0Kc0vB6Th0L z(~T$b+sMfCc}TI~K}NXJb%TeiNrUB3+ldbi&U`K^r^1--=rbYJw%uXbcK|2k5E^=} zLv%*BQGIIQP`o~s{OSMc+>2j>2_*Ik&sFQ$a;Odz7TK)3@C^CklOb4pM(dESYyiF1 zsqXB>x$!fqL&FZYgZ%cpeixqYUGAx;o^fyg@gMPCY=h!&X?Tx}IkOWX471D>COr!iOk38kX<)t&H+=bGz zT)mVr8T9;c>l=(q9>6hE8AyoM9!uyiyT|Wt#uM5w> zh(B-e7;WMF`SWgKVgjE?sM!`aelGWvb=LcQ$U6BV?ei6^0mkoOu&!pRlBhiAUhWta zWlyo?bC7xSe#^|+5iiG-70Y`C;e=_lad~{wl~AuU{ll(00AsrVDyUZhWu4%|(6-Z{ zeevQ&UoSmc6%le5u58m)t6;+{Aeieg)3u`9jhAHJ@s6MH^wzFj<9_IEKj>ewdGlMo zH_L;@_2$iSi2|Wt`qDk_wXc1ZBmRN{2$KhPc)le~!(r>L5JsiB=T_%vT+92hF`12P z&vn;4*T$PC&-I#(fgk?U5TS9+{ZjPw@uiVgL1pzuh6bjXUl9!%U@}Vd=6$-nUzAp! zjIt41(Ged$9qL~%&^+-VLSlOq(d;xVD+lV%LqQ%2B9Aim3JjM{S-9fqY#p;LbfSP5 zvGJ-O#qp|m3~YV{h3Wb6Bzl_q(MJgSV1aN$Xf3VUlAaU~s$@L|*{ez}zV(e5fy-Av z?cA1KwdYRn#G7x|7K!9u@@X;6JfqRLpwT-U(AfUK2R`in({KEWcW@B@Lm&ExTe)hb zyY4!(0u~9KIt86);PU0m-oP6khCb{v$hf9qP&6r+ExR5}dMa`I-4u+{>B*Wr^}qG3Joi$Q#5V#@U$JK#wVeb3LhP59 zcK{|({#DZXz`1z(v^&?g+Sz*PIe*4g&-9}KsN>9%PW7J@BFcr%{ zgM+mDu~)tosKB`HL7Hhy9;Rn1fp6%oiQanS`XVP#FqUXM%}m5HVr-k~9d*kNe9JM# z_@>^jI7^@x~Mk>RZrxT`CmZYpYNS^RnXk@(3d^?!`p6jqeg~G z@T{&b_XgwIW5hqy+wd9e50(zFak!_fjE;^LP7V{)wjXI+tKQpA+(H8KK8P7tbt3Fb zpJsd4FY<#@)%Bxo1mlCXzSoPkqihK#dSNhQl1RS!Td`t=E8zqhFSn?F%54w&K?Mf+ zRH@HR4;E}^0X>bA$0q`aAh0y z$JLU|qoTSKf%q)Ck{>+_O?;@nAO5Oh!b?%F|B>2@ytSD2?v$sOU9>GhVd1I%f42Dg z6!RXkW9N4F^wS62#*J7_i*WATdAEF7oan?bt}4Fx#V^PVYX-mRO>c1j_>ceW-ul+> zk67WtJ=r(ilwV1d0la6=o*ZsB9_WE2Y~Q}!tzW;seUfxGoR7M1)mrCt_CG0|9&|lp z7h!CybgMRP#ukLt(36mPent1(kk4z=i(lc~H!x{C@R-}a7U5|bxs2Y6K2N>RbjGOJ zvUatbTDu)dOud|fKYSE;EAZosxZEPBZy*$vj;kRT&|Z9rk9lzqa@-@!adr3Z-ELr@ zvFAHSlB9-v0J~-IQt0!YpuB$s?q=JTV_Y`h?NTmoW2(m*ZOxd7s0?jvL&B@`OSt zV@7X>uS@+aTvhSt6up=&($_Ez&-bXpJ>~j8dWBe?W*(?AmUXz_tqR=!q2qp4d~9%~ zn~Jnc4DVm=!^ekCxXC7Ya-if2g0Dj;kuuU!iOnEQ8QuU69y2&~!1Wb-#7e+pe%0PV zSM7;upbTd)Gxh^SQbd+dWh}u%x`ce; zBfT0d-vG--z`)4zStxTkdKBg88Sp&0DdeBvxQX#lKYi1FiPYoM69!xhEFar%zU0#8 z#e24)>NJgO_Uy|Xjn^ucg{y-`dBc4Hkhv;JgLyrSZfs5Pz*9p~km8}?dHg^wwlop-iw)K4 z9BF&7JXpkdFMJu-;;{gA)}V~nXq0l&%SklDbs~s}Xp5fTa}7KK!-%h!Hh12DbR`&C zxN_?wCj#X6_<^u;%XoqE<}+|gQO`K7Jl%Rn-moSfjc2{k$Y)L}I6>eMa$F7LigBmF zQ%eYqjMor*b^fhL*n7)ODhGV_&Hx~(ZfOMj_AMZy{nnC0IkeR7Hm~$v`G^NSr+AC} zc|jQ0oJkhFz}TqnM@G(~AJ5FXym08yp&SVszcei9U6~J%F+YT5(SA4WHetUtRphCa z*SX8bj%8U6V1~F29tkeP5l);u<))2+uyS<5Z9(|d*1Mig{8x>NV|glTN_>3!B^rg96RKOo8&n(H00}#lavw^V*lwO z?2#+G@iKTA9t!!)aCwl^gL>$}?6>FgIX5{rg}q#3uI~)eKpBqD^upk-F9SUR%#(&l z@7YtXGJe@jOu?A%4|P|@a{Dr@5KLiP%Xn$PFFE#&oOHWl*4QTLP8>gu`pvJ5)N@t~ zVSdZty<3U$5USXQGG5K|);EGW#UML{a`apraWg&rzJ6;|=s_9xb;7tR_L)^M_;M9K z<>lnblYakSvk8)=JJ)*})+%E=vQI&sMH%_dcuu z&48!rflJ75*j+%c8XBMReX)o}FFc)Wf5KQL$l?t*+)#vi#loluU;l>Jx%d9b``k_# z(L<+B`;$QA$!7R({?@Pg7Z;afW$|zS=7Vm>j_qDwv0wh!kG<`R@p?rvlF^~eYGM@( zYZ(itj|Ys+JJHD@&OForn9pM-tBS^Ya`8C3C7B0gn zGo<4!nQ@&#T}~|M}m-maOZY`#*j#yqt*1u~R&G5S6U<_U(l+C>ilVxu?9#X{ zTH2Ld$+#vi!Q+S9hX0)Oco3h;xM7M#8L=RY>vikax)Ucbt4*Mhv2pk3z6kHis}t<{ z$6If`H5YjJ@L~VFW5zQbd~fnWd~2cCAfeJ{!j4-Vf5kIPIx@AQv*(7C+C z(;-%3*)QV_1C_Q0m^3s5IdSmykl~^n{c)-DeZPafd>;)d-u>C&O*&Cq2JIUd#7nT? zjZ3mE0+*WiqfPpb$L_%wFnFqk1?It}HF`_vk?BJnZ`oxrLi`CBaV!@HKi2bl;OhpoWPXKRl_5lZm(VNHRMh&u5%G>q`+wZx(e$9ZZ zkj|IU{(z0oyTfgitlf+0F_(FqRRr_kOUym?kU z+W!(>Eb1Gd_HrXua@QevskNW{`m1YHTfX-1-wU4iVmbJnTeD`hjq--I0>1mZztit& z=5{=uFk)Xc7tS?#r928`bm-X{9mEr^jBD<%ro8KYf#@vBlJiCet2eF>=Auf}A2~oC zGG1iVD9;?8d8jkSqfu&5TE3NmOI`^q8=;7YtX!3VBQGdlNM`-exv~S}8i9rxX_MD| zw__X4=h0^{4p>m$3?ps+h|iD55T5t#;cEZ+GC8)3WWbX|xTl>TX5%>24Tx3H4N~($ zqo{3|Q5tTaCFh(Nae>*!iOELBwQVQirq@hvV#GqP!<7FnJ)6<1OB6dG!#y}O66p;Q zd>zL`@my8Hp+iR$?5__${3w(UfRa4+Oi&OzpYZR|=}4&%)4hL{Lv3gCEXqZAYKtPAg&dZaPlLL6meS=!i;&{mF!l|C zF}~d(3{`@>e`)&cIkquz$wL;-06^q}p7dUxtxH+I`Nd%Z`05`B6FTC3(#ns$_i?5c7^irBpI|eL zYl1waMJL^;U$FllKN#>iF$I(+660DQO`*(Yg4Il}5GS3eF=j4uW_k4tM&VYu85SYi zw!!o2X^6>N`8XU##J}|=!O=l{#nHi|ac3kou3=0YD3j)~1A#Uj8{F?+2U+I}F`VSc z*e=?lQ+kbh6S9DXc0*ys zkxL$Q1fS)RUJ2~S%^(#%DoPqD^afJM<;BPnw0WbB#nV6td?~@^$@|QphwGDU9uYEB_+jeZ*iMG<(EAM?i$-ofv?LYK* znB4MZVtT!a%hi>C|KGzz@?Ad_CYspN(Ke{8zD=WSD6DN_O9`%S4H)^6{>T_Iacp17 zG*5aWa2ocy41z#wh-ac5SLzQCRdbc0>=m8&t>v%^Cw-=EaE{% z7T;_5>ZhFh_U~m1tbl$aT%Y&KaOJ-o3-r{liZ7jVwas6M!a09aN8b6M043&97fn@-TO&g5+@>7wiqVdRPPJlUL`kj zc4>^aIM0In1!G)uyTQ->+)w%!g1Cp+zRYM1YqrTR{n9%ZcGFOJWnAyOwVqdb_Y@=L zqh~4%8Xw#~!L0<*)1m0Luj))PuGhf0w!qG?9QhI2@?66cPEcpkHBpoSDgKKW;p&=o z#2j}1K?+?R{qB$C2Jl>h zh5L|}FZEoj&DAz3K1;?heK^VPw`5%AxsH>iG`*fpm4+pE8vr`7e)|tTN~+G?0uLHN zeU5_zw?jQ2ga6$hg`tlZY1;+yLfLIqepNs1=Cy=Bzcrw?WG-nN<>DY`dgf=I33wsb zgN{xuGOpF%X)K<_I|ip6+!9n>VhdhoVjvPCHlWSM9l$ zY!~sMFn$vI&JTo-r149?&tPvC%gFvEknZ!wbzCvkSR@?PQ+)QG>k|jUQ?qm7$s_lt zbE_Qsj^8Hfhwp#FZ>aZ%VH#Jk3w7pman406+jqL_t(^ z87`HqJe5zHW|2R~9=UKzNh8}D)(eNTHaMY@lmgbiGn+j1M9Zrw913IhAsHCgDbPUR zizBUqgj@=Zo@)!tkH$QK27Rj_y7XTB?*HoCXFnWLD!hn};?&_(c7+$EVVF9v6g3C3 zdp`pO0TGth6E7Be;GR1Hy+i8k3Tx$R`6)`Tc=1z2Q|VaNtekTg{EG6pQek%BXP&8X zt@hFKq3Xx-$1wE^yCsA;YQxMxiIt~%6CD~U0uI)xCnfSZ@YDkV>y|shm2IE_g8e}I z`|I|-ig=T&%JvWMaqfS6qjO*X-0U(YS!hCbDe5ZJfZCE-+xwIiFITY2 zE{wYrWQ@q$%~La?Zm-)GqWmfI5LXWhOX>1ieUN=ldPx1W2!FOP^{(KysvTKPE*XQ<##=w$N51hEHJy7^f{|Mz?TvwKSAhDmTJ?mRuK{ zalIJoy+q;Cpe@B~3#Qqi!lZn%X9&H#A?dzrdfF)D+PA7S&8`xBdQJ#yH-)SI+0nEu zq@8oF)rlVXg7Zfvee(T*qQXVrUd4+>k5&c|rkC=Rf%K(Mgr)xHz8o*Q?F>1{>u$Y~ z9eb{=(z5rlFe;~s=!43qiJ{sgVWkIzDVkBglsR!YeIhs|(p4^{;Z^!qyvuWqV^TW?=`s>X z%ib~3E+(`**?XHs8m?!R&FX2zDSf&ngyD~U-8meA>gz)^gwLz$--pG1qwb^ez~Vm( z?$wK)1x3ArMWnDwaZ2&p>}8-R%qky-Q5gC000O?il>-?FxxiT-)Oi|ut|10ATFEPw zn%&3TjI`+L)Dhx(C8=IjkXUKd;iMcDUXa8Gsq?C?KJ#}0^Hp!Iz3g%k10Nyte&U{r zLTPRbJ_y>~o26&H5b)lgMd$2_GK* z2gezX6P)At)MBMPw5rRH*9K$HWS@s}c-PUr6M@cn*dYU^;%O z&Oh)^eDfafb$IAx5LQ_Og)kvi`4pC3ZC{-YH%+_r+ z7Tv1+>c5S6h)!6CC*4$5$wyqFnOU=m7&TB+F+_2lOuSSUQ5J7{F1o6I7?-9m!e0vO ztOm4V$katfKpoX4eLHO@(vzoFMjYwMfl_q*oBF#Zl{74cpL(u^%Rct|zvSGzei|B~-G8*A13!7rQk&l2RrL{C?nmr1hNZ+jMd0k3rVN|Z|(*HWU*n2@mNaG+e}$xF9| z*pA=MpiTjoGAr71&HUSasZpKBE{C2xSkPH*;bO;ChtpNGEULZNKuJZez2iFdM3Vw} z+%xZIB+!X>($3~n-tEq+z|(GNNr5(-!TyjsroR=UQJ4WnMZRr=vmGl#4vDAF6)% zTm-5g#?6LXlzu6!wHmO-bt`=>Z3{k2C#5-(o*W6+8;`2=yw!tBD;}hqXk7DyQ-HdU z$cPup=fn2~3Wws>sPL5k7ryxQ{SG|(JVB!!kbF|V3Pe}=C_I*_qE%HoYdGP4^EO4Q|>FLh4UE0DXr^W(uOq@RTKuk!hMm{@i8{T;#RUmEu9Nq~PU^E25h! z1&c!^*^1{{2B%G%-rJ5tuM6`{4IA+ygF_zRlvesI+SXHi{7_WxDRO*PCuE+-4mkH4 zKknRz-#xp*A?3?~N5-{0@bX-10Mh^{7VJ{7!e+awY(;pA^Yzw5b67m`O;9<))3x(& z+fAP985orU#&enX-+ixh|L2be88Bt_D>^iqshBy5vs*MsgLSFBtCT0!ooz==G9_y| zsX<1!cc?qk*_sR!?c=6kaukhk>^^U z6W5;W)PvLw?HY+LdmgPR8O$s_f$N4rv+`vaVe;T1eulj3#0tcbeh;2|d(pNiNB{Y=4wCA|aOB%`EKg}{ z+%4madN2wvox`>#EvEyod>Lg%KN)~=&9UKfrQelcRQnzOkQ>jp=-^sHIo#w_hzI@@ z$~4}f-b&z)_|-oAdM>##zi^DP$Cm*{xoA(@ehQjJP=Z6q=Z)?&BYs9hSQXDzBikj^ z5$mNKSBR!otaTHpcb^`_m0tNUTyfRqGp=V~g+pz*siEUp*~-YH|Lb?T@<0D|_C@2F zja%I)>K4mcB2{-SMIIGV#`wT>fobT8u?F>a8THmo9!u!k8Na0-R~=|wZdos?ck$HC zD`}5bCtU?U4^z{1_08}S%C0i-*|%XCAK$xl(NS&&&6qfP;)t6WSm{_V{s&QK+A4qL zUN?yGnKD0vcUa1BpiE*CG_id@$!0*A$nwKjM;Ry~4|&7zA6M+2o`b;YC75R-9j4GczdU>CHYG&8T6j0kzBMT1uv ze)1gB!`KRl7kO;>5+;p~Q%CVJm+OitC=HD%TQ-Fkr$Wz;Mk(t}#`W5`r*`G)kV;z) z#1B0r=P}dn$a9@C!g}ZMNQk_s-a=sgznY48GDua9dhCTAqNP%9`;h0_PVU%#*0|;i zr~K@=fbnV{d9DNjI(Sb)<3Xgr|GG*L5X#_}oQTfsTaqx-w*sd=JDI z9D8t1e-H2~ko)b3CyWjcxnqY9*K~9U_WwO_g7n}W1YgecmtEh;DOZMZJuy{wGuQ%k z>eMNGs=m+vOtQjT^1`~C!2}m`hPGFkv`-Y6$6ve7dnx zz8_T3S9;@1#uJkzw+iDPM~6vFjwKTVCyu-8Ic^Z*Im?$CCE_~<3%;f z;Q+T<0%mZ+(aH0pu4k~vU9JY6s#pOWpQ@ig_CQ~tC*0KXOOUT~kn1CEyU{@- zPd)A~^sIF3r<@oMUg~qKgUNC5GIPZ3J$=kEpOY_rliTv~KXKIuzvzy<=zB!kUzyjA z5Bv^f@T42tyxSf5v0wM{O_`d8Oihkg-O$jGTe))OynM&xG~{v5o;^99uJ%LE)n;I^ zug)dhJdMUAzHGPvRx13+Z)&X zN@JTKK9vl`%xr7Lu?#t|NQ~e!0eD&+P}&f2?fh$%g5`js^?HP z@POO3YnNM3v0QBoJIdLbs3RI>wj7B#Nu;dMgS~lYZ3SgTkL!b&tXRFqty&%@V{zqV z&vRc`lNnC-`Hl=^V&8Ubi`clWmfHYs{iG5ygRLYt-gr}3eTkm#-P^ah{i+vktKybC z>dy!(k^hZ1+)$gGD}q<`I^I_NoWQ#l15s7Lu(b~rldv8KTx6Q4Exc49{|QcIls1hgi!zP&Pb)|dXGKgL)Pxz@7CV_e8o1ua?9it`pTVP zaqL2n3Eyv_F9f%~6noC^aVz%UGP}&E&Xjg_Xr}?zo9b0@@k$RT>ty4`jY&*=WP6aMr(pt)a5q$jFF0d-kl`y#jiXa*mY)_l+-j%o4`)Ew6jC zbD#Pv*M~a4aSgo0_QV{@B9Bi544il-)IIHpA;=GH8T5(q@o{$pWO+V3*Q-|Hl$r$w z5WywrZVh1hdomhx%?4}=E|ucu>+7>;hZu;RTOBRKLPrr;&!NK7s|Io^U4mZz3s1Fw z=Bdd*JI1x>($l_kf1qauhgt^lPW&*xV+U%{W|mzGG*4$2-c{D6jBv(lOURlA11D(_th^_NazJSE&$1*R zsVH5}4_LX>X?sh4a!5-LbFP8xU6*K$Sov#6JoeF|m8BS!^;=HFILYVa(&~1~lAf*1 zs+}dDI)aXq?RXw@Sv-?J;g7&j5qNnQBMQ`2;h;yzCh+J7?LxNaI(k95^7O55_|LSv z^ep)(&lOFUXPdxSUdW@q(ny1hWoA3+Xw~Pv!nwD-(>?wApT^49OKi&Gd+A=r=KA#I zV*FM@t=yBF3|K_o$8gED%0&9<31nIc&Pa$R%Aoz$ zH(@f#38y8<`}F{dUQ??s5zJE=46wkird5`F|>C3SrT4 zeH2D+vEVf|I@L)?&3X>=)N4<|mw{QdEri(2OZ>_^Sff&W#||XJghspeVKw@Cq!bnN zqL)ZtEhF{W_IX>+wUmJjD0w|epPoy4%9w{dq@6DP+fs{nhUuQ4TmRW_$5#Y@Dq>10 z*@V0YF9dz6S8WnPuZ@3|nRT6cuEWx#F96GAJ=Yv8m4*nsmr^g<7Q*(@i(WJ+yfJO+ zlWCcUCVt#!zY0Hsj1dkTXW^{@OzEgS|5WhD{PZCiYY-Kn)&-Fd$-N?FKZwW4X!-Z^yVMkM!Epn@OF{7d@G` zJ}JL_uU|blyZd&yo}aHA8(+mQF<1|ppm(D4m5b_v&#Gg^&G)rge%g*_o3|0Z!c{+d zX4|DUg_Sp3X4a=p3ZZe$KJvm>hZ?x=E{xyD!ae`>$#mY+uAy@ISu*3=cq_PN!+Nec$uY;~`F^3nSd$)G za?+JiL}ORD!l7}^K~IJXCv#qj17QS9odn8P^juFh+hXLAk)QgfdAY|&&GzT}WLyb~ zxPDtmqjYw#CXVW|5&lwm`!zuQKz&H#EqaB$yv#;PZBb;Hs?V$6N}gA)OQ=$oqPN^m zy43EP5Egl^^?nX13SW~&hEd+dBcClOJku}WgCIKLskMKlQ9W4WI-NL1+=}|BoE)v> zq`K5ZgIfvMcHBlrpmE&}sO(Hb+UnDCCbdILxT;q^+k3^gadXou*;isogYQTX|z*rDZtvIDuPKXxwi@`#ksMp|Abr?}Rx1b37MJ!MR8p zqSI+zh~71Ng)}ZfS8IU6-iqhCS*8+3e(j22kH(Y0GTa)RoPF|4RCOH26NE1#xY4!{ z&Mr0UmjM$fH2x=T4IK1{bMPR~G~~G#E(P(z@*RIBA4@NAGm!GCRhfllm!2?$VrB`bkk}Llw_3fEOQxfn#{LCL`F!itztW=-^$38+=(tk zDKqM7r3?WUo8{nHC7&dJ?I(b36$dSK*T$p_6ill(jrmR%=|V-zK;feI0ep z2F1*N6@5)mSXP$r)ZvhpFDkbKDm&Bgo)at91IOTY+TZ4xOLv9)DxEX}>N>kRt@c*D zzc%1cxTM?&K*NS{nrVj6nu3wiml3LXU8#^V z&ta>cYvd=7IC&+F>#h$Ji#~oJ-ideYfN~C38B_Vt+LI;{@{VdhH8EIU+U}xG&OP8N zj~&2j6lb|hl$LmBD&1S-n#PvGMOn{kPnMgIQ{EwH!T+l)+S;NeN)7-7DQFtC@;;@; zwU*Uar=C9&KpU#Pc(kJifUUbj`*MKSBvUeM2WXODy~a}sX5AJ-d>Pl)+sN?QfOVn% zv?a)UcFbT=+O)k@mqoZsVV%{0^dOB1859a@Mp12{G`6i`od3)>X=Fw+m%R zNf)CzE)9%GYEWZljLk<^H>!`@%m4L$#H1r?kW?briknhn}cm zr1=Av#|iT1jal{ET8id(tS@=-@dBVPANw~~#OzCQ&8dV~=~OTcqZ496&ly)1Loe_!qsl)V~En8i4HP3I7Ex> z{rx`^(thZV!c{8|e9}8lVwY|5ECfjN;)Q3Pc_vr53l}cqy0glS75|c%(>D-=f?5w<8rqmc7z#h zg_xRwS1*EStvGYqp-Z@tiD`GKfTtcDJH=z1HZdI(osLL~WU>p7F)E{zO-)U?9t;A@ zPoHq`S-XqXiaU4gm|MGitp$`> zH+t&0J5#_HkHBJ{>z1)*ORi_=h@)wCxo?>pj`(F5M{98&Dc(zVV+X$F>uen7b6&bU z=5fc)j=0kWw1Hj-{wO7^fGxX-2}^{@YHxNlQW@T)9(YT8j~{g!W`~a-aibfL_#=yy zhMt`YjKpai2{wHJM<`+YLK%u@Y8+>ia@$ITB_-wqm`*CLwqEsYkLlIs^PFjAORq zu}e6MH))SD43Onx6F%MOnISjPf5_io!q*@NA=7N*;YYsV+jA0U44*{$)vm|O-ld}l z-3XrPJ>HCdz$wh}DddED6ol+(dMhUTGy$#KQ3%uG#G-3%sw+qZAeBQ8D7p$6hePTeje zz#2(~lua5LX)-JPC>O^S4}2-VU0{7~ z-WgIo`Ji+AqLHT+uXgC!vD~*4+X8jVZ6P$S>11gaRIf~D@7tkc`D!(t-M7p%)@$u) zM0q7oC}s$XVev7$h;MnD)xMN5wWmL22JfxVX~79y347r^M4DP9-moPj>$irGuUEa< zxexv!jFaPG#aw&liOF}KDEnXgv~!$1u`bZI1Is8YH`iz_^1DTCT&G76(NN>z@jT;l zJ|G-1gU=R3Z(bRDf;-8pJlT&gOk-e=+d}$aSWHKrE4@AN7}N&+zRIM}gC9B|R57b8 zoAQ+>dOfY4sWBg4#+lGXJXMf}=e$<&<)#ukc|ZCC?^9q}8rQs%m!7!4yo5d1ef<~^ z)~xeMdeMQ?d`dW{utGjTxA*i>k2lSwDHzuk7@a9T5qO+bKMtMm>48Fqx3o`QAPhFb zFL8Dsz5IQ{=Oluva^>=JlZeUztqe4X}9*Y;fZ;+}29 z^FRe<&|y2WJy^fxT~`Mh#s#4@&1Cl$0o36r)G+PKQ{K+Y&H za^SgzGDAm8p?%je}*l=9BV+v1P5{%!!jruk+yb8kwI>0l7@_H z?e)*-BG1fQYWCtQf2GTe@)~~5xF){5i0d~8gPq2;UAfHi#Etl~r+GYnpL4gp)MB-E zuexCU*)1VVt-a==gfTQ*scC0Rh|;sJZ@B}XdYsC7t2Zd}! z3dVIOpmt+g)o~|u+D&7(gfOl5TB~93EM81d!z_d=xz;nwrpexFZ0F&R#x+0swek8u zPPjCj3G!U?p2oV2{#U%oxzGJW_!ykdC?^i#zx+a-9NmE6nI0JCuSB|M0n{vkMOfG^ zTg;3L<)cwS*tCf=sP4nTg9l-7K+ndHjI7$U*{$j%uZ#T_#p6M(IFrQD5fq4ugnz>oHnvBb1N^&7y%o2TIPu_rAI$kqFL3B}qCvR|UKF+k zyww2#B>X`c&$v5kPa>cq{4G%mgHj|-U2GA}8x4g*n z#Z?C7aUMF^!>SEgSPTi5OQ5;oI)C4X2}s{en7Az4gg$%%-Z;q1@^jdq3Fw7U?dfja z8Sr7)`{&ExhkyZN-G+_L&$6X)98Virfwo(r_85(O|808*sgix^-6e>{uVeFDu2 zgrHOrJ zBuCM6t?bamr||Wn9{d9 zXk%r|!&TiD!bWZrnWau-Tb(#a>05Z9b&!nr#7-fu;qa#l? zFs@Go8XKX51@eTG&I?`@jO)8U8Vq(imbm4Q9&gEz(#31}Bky_E7}wB5OVG_4@O!T3 zd|F8CDB3!};)~QA#H4t@W9hT zNJB=0eJRJ{^PYw^2LK+vNSJfjQ zb#LEDXZ=R~m8I0P^q!1!$w!8G4cw^AQr244pzKj5wHKaCh-?eW0|$qAer?ZaTt;54 zjWkJ_*@kK_F0D%*NJlcI{q*V=I?_+xK5S9)(9%*h^3zhj`i5;ll{aD2KQa!+be0ze z9RK;~xMEe8M8=K=CYw(k@=`!2HNaIA50oR-GYx>I5IZQ-@JGnnJ;dRymRg*QOvz(N z5#o!yrD}J{a<*avnh`(mxkjGxgry`X{v;W7LjCZBd`HaY!ZuK#^QE z#;(QKbe;5Zy_73GNDr~R#LOk5ueMUw=`(R-e^6ggTIyEz3m)}L8$v4&@>1J;_(2;2 zS5XAnyEdS~;j!?av~w>sqV(xYW;UUCIR)dI zz?aqJ&Cwpd5R?D6ib95zRq9l#X}SDqA+anS8M6fnOI)cODm%md%Z+IoJ$tDLo)sFU zOfMM`&4qO3gz4bp;U~UiC}=Q~am}Sn#;IMXh$&0hv+RLh5kRlxPwnkP?H}qW^Zr@m z8XjIP!yi8w5@;etV}<-sxpTnK7eDEdnGKju#q@>qT3)eaHgl22U~Uc_G( z2U(E8Cpov(AX?fUk;*XRHF1_)Fh90C^N{f&IX3t?DI~i)M*>MBLfL*nedJ(!7GW(s{hM2lnTXcE^ ztNJTxs$O_M8#GEMT{;^rImx64Ia#+aG|FJNvPquOXCD>=CD+WG{7DwMD$RCK`*B-{ zfVh%HrniR&$XF+D)SI_`n^J{UYw=zSt-rHQsruMO6S8Dnce^Pen5Fb%2JPk{C9wh? zKtj%Q9Xk#7z} zIs^luHc^wDGm7-RUci<5QSW1r9JfUPGUHmj$WXSPYxM=6!u+LaZoMN!KKLbr)Z(pZ z9X}8faKflbgeDb?iS;w5J=eCb?|6O4kN=IvrW!{5o%9i7NZ0mjtDoyrHV$bfGmTS) z_Ex;=Q&)zYcXWi7U5O|Crb$1?!;U@Iphr;su-!;YvZO7ax?1C!vOjV%q~EYLT=g#< z#up&@5t;pQ0>*g0e=-p52jLf8rL&9T7E#!eaoy#{^lDgMNf~$T<{@QH1Cd}o7wW9> zB!dzS*qkvGJ5@*-Iy}>GzbxrCnprB(0qhGVm=j4tK+!+w$}sW9HRzKLI}h_rw}xoo z(x+~9%o+`Hf{t?wPqlxgqw?fYYwX}s{KR6OGPsnA-c6O&9+{{3S+524s5Ih79#4k* z*?5vh_#Au%GOj!Fri;itUp4sIY*c2+36(aD1-3IM3OeJO+d`P0TYd*VJ*?~w1g9fLmK-6P6#RZM}BGx4Z`YM#8!C=Qu;xV z6I$|28u7*BiuR$VX+%o{lg%f^y@Jm?N#)eZA@}+>ycQ?N47wM-==pBX-re4VowD)8 zFMh?n{U`o4_LZ-7Teod-uX)WY96jC(22`iKe*Lbn>cVYAs{2AlO@EbKXLL*g#!=5w zPI#Xh2Mm+0kt{JejWikLxk)APqwLj;Yv2-OTzBHRrlCr@^gbVcBG7B~U*(nz@4G$7 zHOodlr}$`imM_PxD)M}`nh){#MmK@9Ui+_t9^tWYMS3S-HvT7n0)Jnt+n|#b*~T@M z8I^rD64oeJ8eN$-zVlT`Mh-Hr2x=1Mw}rsK7Kj$Fzw>XLd)H5g@lfHy<5gi+=a4_y z653Ly_S4Fv(rSBB>Zs7_oATnvWd!tabH%s`RC$j1mg}2fES@NgW=qh`8VFx#gdt0i z!VOQY|4U)WU@Y`p^KCikBX3;C{1~84#d`vur7&g0hM4@n@*Ert;YFTn(BeyKy~myo zbV=J9*Gx$|8c6DlYv2=PT+?$+!&A%rww%O^ka?~->t>!b(8RCDff%P{W8LY467&e9 z3RA=zT=`;Z9!vmvAlQMFo=k;@*xIw@Ro>Et?75b=L*?LAWwQgIwua~!Cbm13IR{UE z$&2v#*Zm;ctI3*_dWfz4tB$lSiEYL(?yr_S30v~Q_?Nsc3`R%bA^+xA0!BI@D4o3` zX|^pyGORKZ&kmF}9q)xNqd&@Cj|{04U$itZ*?dymEBJihecx~|x#LB-9AEzOJKR0@ zd@UDt_Ut)#-KLH1;DJN#Lm&Ex`^=|5=l1N`<+g20yjrO91?@ieAD&a|GAnDrCyP&iYA}cuO%Cdqt21;m8|$QHT;@!9huP!jXZTn z9}$zcQTe4;?YX8w&JTVNzWS+fZS`OECw{%N1bS8P;AqQJT#f9}8TK;HTFr-e(jlxL z)*D}HzYdde8G>!mLR|{j4t$Igmz;hUPAk`R)k<|UTUv{!@y3QVSef-?Xeq_y3-AzwljH$@#UQzu9(@B{?}{W7wBwnO2Uj)N{=- zm0L2og@+z?^7_kv;M~9aZ()mwJlDIPW4ZD76VUxLZ~Mi%@WmQ{npuMG)&P|kN$^RUc{HeS4XPF*6x(2I)~EK zFnm5drq2&V^ky($d2=$_&~EfL(y65mDjMvxOp|R3xxQvV5vPH1ot0S%-tGZoI*cMu1lf_E8G6&U0-O@ZKhhaZ>zT-5?#+ zdzy5iJhQ#TqpswQ{Bt0ta@Ax>FA{R_qynb*nsInRsv^&|U2(8w6m80%6g>xa8tOU2 z1c|s+gupbnz7%^yZ+Grb-&ON=(|e9_q$x11mS3}WH0`B+mJ-SJCm8;O*iS3+=7rGO zKkJZ?84o6(5OY=j>9xN3=9_YP^N2Qh6f-GPIL+z4=@Qscmk}HpmT!T0jlQc0XONt7T170 z(v+cd%J=}w)DQd{yp#&vhNh|VY;^Ek!GvwyXWs7KPN5%8qg;L1s#QYY zoj7v}=i8%iqD;)U&+D$T8(#)puX2g8S+1o?7>%rJ(5m*1yUK0f?$1k}_HD_MfEC=g zBrh;neY^QYG)BX5uPnnfUhbSK;Eg9G@x-e}^s{Z9lx?(C6@2>hotZ~7KwC~uO!`yF z*j_=741X|$t35DI3!rof+csGzsCT9-A>UwFqYdb#228*5``pCy-{_`vG8n?9&|cFl z<8;2y`Zm&wt=KX*f%dJ!0o03ca6mh|DaZ=bmB3FQbs@Ghk!SzI@xDDc@x$Y-*%;`P z0RrR}XXAnZ-u0r?k!R{@_A|6=1t+!my2lZ_)mMJM>v`XQ_I+mRCw|pEG(G5U!an;R z^4n$5iCbMJJ126!f6s}+QsP_<^l)}a#Y3;2X6QML79M115S3^D(u>TWKLut?@u#p{Vmzqs?Di4zl>_svIHen*-=7 zFSVoEl?iPfSVF{+F)n$^JlBxzA`mTkq-;J#_*W>5hO~^+W?4>S;GP(taIbn5j3B_) zt()DO-}1fg)1SJ_eb?(>TbJ~aN6-cahKGk8KU}nOCHiNVfgkQ696Nf{T`bSI!Q;<3 zw7!SwqffhMo_x|xtDR%GE@c>#%MU*3c7Fb&?&7Hv?#v5b=g+Ahy99#>a=+%pakn)h zkA3CS4qDEQRr=i1plP8D!?^#<8Mg<4C9GaN8RPrTpLXkx9(Gt~b7yBt?syERK3hF< z&TT{Z6b!(F2q%r@haPh~Asdr0#Gicn88W+Qu zQGApOlLE;8GJKa{C&ETypgj$ocA*Db6M8WbogBH~jl2PTn5xpd+FgF;8CQ)HP=3U0 z#R}GVb6{N#oIdQ4NRPT|{iRX20U>8k9(N~_wjv^BhDV?Bqi=P9`X3xR%u&@(hWQPJ zu~B^gL4CI_K`nr*5G6PhhK7b*8IzLn^LWNS>j9pQ1(-T>)=lb#|5`GC*NczFQAbX$ zyyT`%pMyN%qg~T5Tmh6T!sR0e+~Ey}@eP1s|A8%r4Ieq^PNR&~%pPDUY;?%R4(Q3g z=;%9q=n(4rqT7bU1lS8tj!n66X--Yd~Y}X?XKj22DOZW!Dl)LWuVgGTg6Zja{ zxq>=m9(<3c@4^}0dYFdouo@rE+k~g{Cy%*{gX`T6+>fnT>z?ji>yDtWQRk!+@{oFN z|EKYhvKwx=VQxgaoO;Q)?sA3p>FwSI3caQ5WSnqN=Kek!vh>>0xYoc-V>lW;mHY0h zk!83A;-|)djn}>M)G-0rd60*l52Vw`xF#=JI@c$#iLd3)2F7(}=qR3E zQ48W-FL^SK%7)=uu5I?_xGk5ZYwuO>4AZ@gGKR^h=Y0k;$iWBUQ}NwgZl%&?~COA?%lQh)m+LZ2#-tDpZ`byk%1m9d+G(6fLZ%j z9(J{d#w)|U2fG4ZkS{^E;0t?FKFr*>tEbZJ;rh{K5#LEBT} z9^A%zu91fMS$+eBtNOoV=XMxJ2i!)U=^f$RIeaN|S$q*VhHcuk!EN5mA+rWMk@gE; z_;M{E-qB0?f)}9O4cvF%eaK^@yXBT!OlWs*IZ!|J&_nL}ojcuHo+Wy-B{A*ir}$tX0%oudc@xk;@tJRa!|I9q0Zu@ z`IZ|o$W#Ath3>I0`gH4d?Q`o?e)g%!V3aJw{xeUHyqrDcaeKG!3Sk@I4EX@a;)6aeDm$MW3M?3>Dyn1uj75jJ?Fv^x9sL0_vgKP!p;7Uz=WQL=lAh1 zd8{q_Z*p76?>flFsY7npdW=JS=Bm=!gC3`Zm8n9$n&rbd>Tx<==wP8PKZLqszuLJ! z>$~(Sw?vy`Bs99g!~d^ZwW{$M_|!3n!4Hjmdal{Fd^bp2 z;0&F*TY7(2gpYIW*|!&~px48*3kS;a!GOx~o5$4^0Y4VR7f1b3rfBz*_)#y7#Fw;@ zW+h~A@7}#`#}IT$giXr^+@|M1UgHb03iowFofy;aU4{1Dhx+|w7{j;Se7oEB9JD9p zmMhJ$5%zD{eOqz`>*kwp#>d&ZZ+0HX*LW8iB$n7hWAEDIr%wiejMPqwAmfCdUKy%dDwhGk zff}>v5e)DFZz7Bm#$Yw2WW3Ed;!% z9M;H!2gHLs)8Q%m&pg-KPr0UnaV<}#PZ<@EJ}A)a_#d@dmLqOd&o)f=tW$zD*r^E9 zG=!hQ0aHBDq!Y;c z0ca7FxB7JmZSCKt@%P|4oiw*bDPT$0aXXs$WIeY6c0#T)>B{uYUT74a$|FztVd!;1 z9B)gKFNS;hD&h}e(o@7cb^wzn$kD;a_yj#{(EhCW~E`CKW`vfi(?Wg}|P{infE1$uD zpgrEqSO#z4A}B6-=ByE{cwMDPG$}~w5HB@2WPr=aAP(thLJ*ZOgFUc3GC=H$ZSs7} zGc9~wm9LEJ_%%jF`z!O6VqyRuJue#YR0ke8`I7-;gQe^gW}kV_!H#^1K8<(rsWjOY z%<#oN9n{aygL9H`=lh-ek8gD=zwnX#S>;)}7H=pM^g7W?(haa>W!>nRULoFp>@Pwt zx4a~un;mQ=Pc(J~9H`^6uY$6x@FjnY;AuI&T~U8Fop9ApH2A6hv-pg9@eSpL9!92P z9jctf)*zdvB#vZBAG3)kmh=?uxfUJPo%pi(r1Y=gGri2Of5YqCd;jEp?#??u>;C4i zKj^k>-I95-87_F^8^6mRzxUbC-0eQ_fe*WrIP?0s&%I62SO2OymClieu;~XY(3Ub( zut(Ixu>;|VG14oVkV{wPN(}iVNat&%kTf!V2QO^~z>j((Gp>m%-s!z0P+p?plQ|_q zO!#LSIV)sbQ_gnmgX?gUpU{?{e88q?at~SrZ3pC7!Te=hD<8^poWM-2y)L9__g{LN z{7d(e?`{Bjo{Mo!o(P^t&>|=-JT?B8{;_Fn*lb7fF&+6Q$hbc8M0npDyleIW5AuiQhf zcckQOtEGHa+;!hS|E#FC#)Z^pn>#)!grwEM*g>#g$%B*|ze4xre+fSv;J)FZFNgT= z{$Jn;{mZNahCuw2hB~7ZWfo~}l;;4Ji3l>B!&CT=hJyCR)t1Mj=b2cfqvO#u`xOoH zlD@6XI4$!xFs^6yT$2X#rMDpi-s2~arwpPj-rUFlGC`=g+*3}0u^bwB{e%Vi(Aefc zZR?nL1oFoH$%HrlxN~p*S?9ijHsohxpM21*{`B9v>2KWS+<*R{M``=dD)6=w7bFnh zjPk}c6f%M3puF*P5Kf@p`McpoLV2<`7*7@zEnmuc{wtjO%!hpPDpn{OzFbAx1Q{s2 zr_yIQqjE3KMO>vJzx=5ktszc2jezOcXU{J8j(7Zo8yPu^z0)h^Wob9d ztdkqbtUTBJy5ML24l6pwI7#WUd#WvIT=N|W9v#V*jE#sa@)k?CSz2mWt-6RN+g^Ix zuGo0FgI5|*o`BvU5Ld?a#;qZYVon*OUZkNSqub+?c|fL5<%xux@onB0zZP2n{?=8n zJtX_mqEFD^PxYs5dwvoD2BgoF5Br#mYi$E-K(b*l`vQTqvOXKbLd@L${iwf`j@Psvr0=*QDbSd#@tonBya|U_=E&ZOV(MWA8bwswETFRWtl^sT3p(AQEtgG~(z52}|u@rkG<lLl20ID*;wd`c+QYp!cf;WzHK>nqq?NX7vK@GNrWCqF;4+e6gZ;7+cH zMJw-#r3Y3Uf>&H|P~=QqOJPNIMj})`U%xBxgg3rDZ#?1Rhkwzzp9}^}h3!hZoq!ZG z({@V3=4)w?ww_tP|NPeh^Sgc+3cBLln|>Pm$M=LX*}-3J!Sibc^xR0c6_#D&fFt>1 zxF#;Nu0@aG5jCSl{?vvVl!!MQke7jAmJ0gZG9v@C|0zKM#gy#=v9P7uy)y*p_YC)-FqGZQbgZV=(q?i*gi$g{-(9AVN_>zybTysTQ zCoz#k9LMY2z05^o002M$Nklxvq$Ne1D|3XQJ$~~dC@hi^#gNd@o=e zmU+dxsLJVj0^Kw2 zul=3d^TkiOHMia2dK4}^UDf8A+%eoM7HmiSf6tG@h2|+tMmFJv3aBg6(29{&!bQ8; zm2@=_lRs2W;@di~@fn_;7D77YVSYW769{YZ5uI)ZYn$%1`_?tUw!?8|9=NfV%E|Jz z!Oer48uf7%ONDVH!`OaWff|MU#4BxRtTOyRd_2U=^+An%DA_5R`lK{Jkdgyj?I?lm zynLpd;pCi(tz;wlO4f8K(V`<>mC`F?h58p{*71ApcMEo5GW>UWGaB9HTLP_I-Sr}UHEz=695zz zo|^yK4ULlhXu!5>IF-YXVK^jED`(6J$4sTAoRQY~Xi%fF zOC?z=I@>`@{@ld(vI}E5Z$Zm%XjDJ4a?E4o{Ad#934n)LZ){5%ljL4v_gCeRn5{u( zG`zA!eD1jy+&}#Nr`3m{riL8|2WPkkF#2nwFe$}&@IK+!ZvN%;Qu#nTyNf9 z`P=>~ItM|@M|kS|R|04OItPKmjlxLEH3tq@K1Ww^oeD<^_W~qpTqD zRsAIjJa!Cr5H3kEJquDoIS|n5DnTv*OkCw#Ux2cfJa&2sX*)=fo~-KHw5W8I2hFCl zB~u~9tI|~-eR1$3%Vaad33RGz zLs@GSS9SNLzj5xT-|GA)i)}i>yLP>_8epADDWESfWz&icf#w5$pHG`{qzF@nNxAl? z$KYM`xz5^_R6IW>SZDJSVPdf2hB2vzmhsd5X^_@%U)ZE6%At<@j?Zs&gK`@=D& zjJM^^AQw?zJ>b_m%QanR&K!I@lg6t zajxWZpA;QgF}lr+1hxd7S8UK}G9SLR31u*~1_urB6kY@kAW~pMX#KC!V<*o9I%mZO zBzV-Jbj*|lFi}NE%5$n*OM%pT#Sx8kQ={sWRzZS}sR~c#7v(2NNXfb>!K(o;VAXzQ zb_mLY+sPjd51g4SYS=vCxWAmzZkD|BnW&ZT@8ir5S( z)&XC${zX%KWtaJ^#Yt_?cv7?(S8Zb76DI|}m$J)uo+l8N#?!N|OhbIqiRGF+qV#-w znsO`8%mwlT z6L+-C{^*zd>)H?K6QuMmj0#Mm;7{KF0|D=EKjz&3{2u3iGkZj<1BDWB`nYdHBR36vCz{L7j`)I{iJW-UU>Vk4Vll`%-+tFZ~ z3v{om!W+h$_(FL4dhy7xV!(}KJ9rc041gbQsZ*{Qs0kMD38W?En*2lKn}Btdl?#Ea zuc#Mf8yd5*78dU!z`Ex@`w8jU{=znCXH+SY##M+jBb4h4_`K+X`EhWHmBtz>LOT#PIv5#qhwaa|u1@0K<#DNwgK~`^wo6$piv;?*nEKUN} zEu)B*!ZmSHIO)rB-HCEd+8KW&mg_9fz#)(DgcPoxFkHMOo&}wnat&WY3+gj=Vx=&P zckTX?YQUd!tyM2tuF01??O?-gL1upGZyV{ef7(si>W8wp#dfX^#{NkPl9P?pG9|_B z-T{UH-_ue|$WyMtGn32Lh4;Ik1MX0`w|=hVZ+_*JyhEi+iWf1-hy3RW55!xzB*fRa zKo=9dpWOD^#*gR>?G3p4Wjzp77ox#1Dn{(znqbBOjp)ywk7u;usqyMpUOv;QOY&-2F6uC>S}cY^sMgVT}Bc1G6dB4m51ZUPq-y; z^++N8OlB1g|BbId>^8x&{oeO|x7&WlZSMDe?+?>xSM)ivTub5VHa8ODvLaLIJhCrP z%>)VJXpm6{6=oD3Rpl*Dxt8K1zdYPxrHF6lqb1y1ZuTkDP;;#ow4d(dIn#r!l3L=9Ef#EJmQF6o#h%9 zuR0(eCPRc4KKK=_C?sVM5exjPj)yGnm8-ghfk1e(#0_G z2;(ZgHe2z19*_@odI#Too}^$-gmdE20MMa!(@V*CMcJ$#q;`4Vt@7$ZdH_&O2GoVF~aX{8`hWR|8ZV0rH zX3BLzt7eE<|0_AcI+C(ERn|?_gC+=_8Z6Xby{wAzg>_v7QanB@Op}LZZXu%2Nj!l^ zNpf$?>!1)5qQR?|yeYRdZdN3;v|K|mr?34Iv=@%SQgl!_AAo@zGz+FjlNs?(W2g3|@F)C%!9> z*;xXQJn~3g2tU8c4Us2LLP$&l96of&-53SkfjGZ{=ZhZs!!JJXjxAW@N+|E#BfH&3bnpr~ z%QMfyLed0E==6hF#;oGRhZWFgX{*`2<0*IcOwI3o0M?b$$A;a7Qy1Lu>9cO@sS!t8 z<&M1v+~g4`|A4-;BmUzB1E z>~KdHz~U5P0{&lT_wRSh5AAcXce`DuFSzq2uJHO#9mQZW>dVP`8=rs7O=7UT;rMa4 z5=du`4!b=$@p@qi=#$b9ybB}e-LAd+T>1E1xAo6{!;_!Dz`OJNKLVP;x-zm8*Xkq zd(Pd6cfYid&Rl>F`Avr!Qpv>FK9EF3Y;$?0LtbZe-u? zx;9ijtT?paA(tNZJ@bS+HCA@pwrz_c*DjY_15{#UrIh8GM0pe>6;@7)J`W|6M@VLX zIMx*+Gb+G3NWh9Jp~~>lxFXR^QDFFr^| zh7@z*F`npY@{p(JC?3<4?|r)35sh^{P<%_8_%FdYd1!Bq4{W+>d)5Q%KB70|NuD=Lx8hFJKjN?toj&_fRs+ z)i6+$z~qXPn9qYjJZtdblu42l-_eTf%+$270y=>KLukf>D-gL(+ zLI(xm5`{3W1!x!dS;*z_(JbAb15f&-=PX;5g)yLX^g37G4?*$#LvHDuK6j%@a}-~d zIy!#d4L%(Fzt_Iv-rBK)o?7@R9YZJ9j2mUCa=?K_r+jj+N0{GRs!LN*OI+$d4FMFP ziUD#0(qO})ntCxl;M(&70?C1^at5C=KJ3 zmLtzQ_bupO^X9l4dnYo~m3>vWYzsDnaTyimD2+hDK)sgY(tHUM4%I^m3dhA`P>>!2 z4}dM|cz_m3tnl=FF(_(GVAcG&ZiV5KEYWK>UDbeh31Q5WkUIpgTt_8>%FWw9=-elN z-MPQ~9p@hZvU5D=_f78!@`4Hw6$`5rP%){3pJ>HHc$Je?PP7Qb%L9f}IpX>C&pzVZ z*FGJr*OWt2tkQUpE2C>}ar0h+Ts#2&A*7aThH!vaA?ufNo%a>wK%vbq@?}qa^6h}Imm$mu<+>NXLsX>cdrjq; z@YthIxLdd0?C+^muU)$umZ(1W#Vae#7Nerbo#b`Lt9XtVBE9$%&?P|C3Am3`U}4m{@+ zP{O;GEy_DjN3jx(vx0^o3=Y_(H`3Jtg@|kp_A9@LjSC$VG z&41OA@dUVM8GDUy8dpph1_EKg$PLz1 z9<*7SiUSpm*S-TM_B@A4)Yrmsjy%$ln;oM((vrKI7nMkcN7tQ-+=kj_%;zF(KBIE| z;HN?it1Md@QK>nJyoR*so`|(tP>MV|h z*=j~n`l<_`zu2Y5=M_V{4e^{4GoXWFPsNb z<`*Nz%zzJk;Jxk>pZIgHM4vl%-ZKdOstF(e!#{EV>6bs^PY2;?BA@)^U%>jj%I)2| z-)&gGuHm!2SFS`=mu+Y)ER>t%R!q;%sZON9K)zOK)aaL$ZK7jb>rxBOVU7<*Opw%=g+Sw9wcthlx1(?FH2~4UE)6JYs z@Ed})tjHJ2UjtGH0VxeGW$Ju#S-;wmv-~7_YNMucC4^0Vs~v3|(b1Lk)sEsni{JDf z>B+~NRy(_4?ASrnW#lC(0pxr22_2D1+8~qp<)#B2+lKLF6(qj;0qQi$(&(% z;Zsp`pBCu*6(|yf)w)7{;6EG2w;iq@{4nl+&Xs@Rdrte>`dLA$U`GhX+Kbf{ZTkFtrY@fWcoGT>Vxko4_|bU2}B z2v&yEyp`bM$nx}YMUa5;wTcP7FC)f{x-ufambyXKF zuBRCyxBo@GmAJweF{_@JkVq-?A=Q0*LY-ldZkh6V2EmL`uDMM8p%49t_lce}2R_DT zhDF6%@V2+T*)twpL0Kuktj#O7L-jmvSxSX4Vi{UbNAW0zxxb zDR82ZUbFNNCsVF5leR#51mf~beS~yJu^5~knD3~tj2sQiTv93)-Vowuzn~`{J}rys z9Rx)${J1>`NAK*_=AYr}-9?mZ=JBil@PePqW^!XhL9ot7IKbw<1J z)7||FpC#9Hugme{$K9DTaGB#u!4w01>d6g9cfaWOKwQNBlO5lJpATS=`&QtuKMKG7 zDlCP)XuGvHtZ>&sxjyjZLkR0}>wtH*QgS;Hryp`=E2&|#^6qE88ng=*RD*s;Sc zyAWj1*zkz==_h>)gWUH3Hkh~Kr1%#xVLdR}=hNZ4D&)u0 z&phKQ@k^)NM=^ME$W8VRxH0T$xedxxANt|5-}owgwfo(ggZuqHm4j!ohaxA9gV=v@ zVRRSjJq+`c*t@eO?%#R#n_qL|bAw@|4CBDysbRO~#8HQJAGZ_xL(Y{#9dhqOPv1g! z5qnOqhx^L1$G_k%3@&ibKKm?m#EWiZB+6!6xB~Zs*zZuD2&<}N*Pn8e>W4AzOE0~I z#iGvD6YklVbK(&8XqY54C?<+882nzWT;Ga`7o?TjfmMIqTOrd=DwKzvnSlri<6$0`l8FwfpZh?xS{DI7x?Kx-&u0%jKj$9;Sly_9eesI z$3qZy?b?NUnXJ?83Xm*>LK(Al>()ZJmkX(L?w#hGQORxxoS`gPvbY`E<$sScK07U> zg)&trW7hI#mx$=Ys-4Fvp(rdf1%OJ8lt9cNTY^5Tm1in5pZR3Kx$pbJ zRlig~2*hVzskrjP1@s}4RquL)TVPo!T&ZMoVn9o^tlKCD9!KGE*XZ&cNW&+jmk8Zt(iI^Nt%5w&2dfO576lpD zhZTz3Z@=9wI*oS(uAb$|(!+bh3fA&V|sTX{M+B^`XMuKTd>r-ady3b ziI#nhO{+H6%FHwYv_y8rFO&rbG1jq6pGCKXGMm!bj}7v4Kh52bF|;qp_T_7XOC?Wy zp>d0T{Ow@>d^d>`T{i7{#tp8+xX2C5G?@7MLmRWa+&s^5(Hbc@mJFetho18(EV^a8 zTa>ov8PwN30g0e<$}~ObroNZMoI0Vp$;jskc!9?q<@|E*p6hrFL+K(~+}p!}eGDTK4Z83VQL4D1|wm(rI~EJcU|xR#?^D0dkd(T#qn^~!ZrD$z;i zeCw-0=&oEJjtj~N;D*XKf!4m&o5Q`V(Q+l>22Be}SMDyxHN?-lH8>P;3lUw>q;DIQ zI-aIMMJEL=$-&}~0qcw)h0WhX0hje!@1swqA86+U3s81WEaMQACI`z_iY4;O|$5s&zycn+P)7-!C=|2f!@?Ygs%=aW$F_?GpIp4ATTCoxT@_8!S ziS(~QIMhL|S=Oi_S;V?#eIo#%^sH;d_nR?)@%O`-#vl1uO@0#3(&V%9PU8dltU#ZvauP^oj`hO+EWYJxqCovk zGR;ozl?URGmkD+}3L%~UiY{YnGSNAe#u%c@a`T>jQRQpIZVXYs-1RiRkoc*f7#EF? zxIGVk+6^Ar>)az>2+~ub zm!e!}iVf;3y3LSxT7up)edU2+3Rh%3i=1%NfkXIBuON^tM|&%$`0(JK5R(%a^2+c3 zQWzMQuXFAlKUkw0^HY&xCq5?z`Gg;TBFIum}$>?1lM zLz6?E*GwxLLEi^pJJNDZi03(1Bs=U^=mJLluXvKYxZ&2?JNw1OT4?%#`b})SoHc^;Bz5r0yZe!7BW}|FH8P z7<&AHdP>4+^+it^UlfxQd5-twJC19%J~io*o2Yn`anrWbJLwL@;h3gL7@s==wg|t$xHB+TB;}t-}l|q?z&b&e^ z&CKW8D40>O4!-c1>p6jCB<^bG*_kOwsiKg&yMD~q8%mcw_T z(<@~~mOqB4AQ}p{&r+bY3@IF1t+^a(!(|0fy$M$Wi|rw$S(6^7r8={g=crWbL<*5= zxMD!9D1N8@xEk<8RDxR1y6$}*%i;K^#jY)&jD$;3u2uh3V7L@YP*`}%`9(c?K>8{N z@&OsfVSFi1qQRs8WNjSU9YWEs-Ctv%r8SF+%Fq2K+B;e}6%SW`-Zr1b zL%e4{5dTs^W16fJDTBnh2A5O=s8P67VO4>k2@db2NYH;xYPT`7|cjbv3 zJXJ_aQ!JlHW!f~t@JczPyzymrk9V@L)=dEN5-Sj9mB9k&g;B^11eQfEY@HY^)DD3P z`tIpk+bAsW%6K-eueTV+4;cO4|5A(f$iLR^c_)QF`cu2*>+0Y0DILEle>m34XeJp; zT*)6DshG7P-f4U$9zpLceSTe8;Q_1oFXO4-F_yhAPGweLUQ@28uo-)zfUjBs-z|MH zH#@A9IMQmRTvO=D3PWX(aiut_6Sm5a($vgTmJG&;f_2`*{}L$BYAt+AQ=N%QAJbgE zHn>MT_T`WthNbcJOy5I-aSNf~>C>`$voBLJj%e_nkUgWbOMy~d1dK4-r^0DLijXZk z-^vS~4kcFRa3t62-YZx))mz|@C1p@IGdF|RQq^NSO1nH{M4p>HlRjySpMFw)l zwF5B9=M~R7ix`m-P@5_J>^17Q4&t-$^{!LPHRJOPV+&NS$+LvR`@(g{!#F?wiE#h3 zAFX|AI)r4r)}UNV8PUMEa7mb0vV8RMz6MiL12kxq^4vl!e{gdgH-g`=G^EK%ruwuL z3*yUiJp#)%pusI9#LoGRJju4DVP)f1xW&Q6$mYSuvf)y$Y#4E~=hz=<-6qg~ma>d} zpFCf5ykVbI|7H0F%(GVN;xVjxmF0mr%4e#eWyl@e(6BKrpmqRpYw0hVminylxsf{! z(>r@5jtnpi*Z9kF!d1irzZS~6;~pegsriC+%LxD{fE?@d+LL2Q#t(?&#~zebyNo>) zwRW^hSRIKSuR*zP(rk<)MUJyqc6hmk*v`{5-PoZ-Ljsrh6s$NOwMWaVuY7Y;D%tFq zac;UZ;63`qaJ6O0D8zJi3W`y#OZ-3!%fWap&qGaxL44*#mTM`yN|)9X3OWl>i7chc zHT~rXY!6wkWo>2}@-vg)B7HA2%k^9zwdoJqEwfyAVr3!0bdX~HT0zS?L`20WFWEs< z<&x#ART_N9ft-T`CxdG?yNmqfJ$)P;(CrvnMKW>vzzd$Vc^JOG`UIQGr#O7 zI10`USL&L74XV4yHHK%i3*4`?Eya>$ZI-&d{Lq?5V^n%4pf*8q4nvs_Dd zF*5mqJVXoT%8h{^X?);@aLPqler35PZf3cLWgC#Wh4`lyKjKyPdqUQan#k&a74W3I z+5X1&qA6NZj430eOiAGuy;fJRTrUb3BcXi$f%G*_$l577HV@+GG@ruleW)mLUHLi|g$Tr&@{h~;mKFJ3%g-#&da1I?Kas8)Ob_w1^J`lj!^eb0Lz&2) zIbdS9Jpntp>wt>so@YbQ@^#^=&7j)sqzLDtiwp&!P5{XS64H>hL3IYS%yO-FqLp5e z*P7)TLb(%=J^P?!Iem!5Q+e{VE*51=pFEiKo`10hw= zLTG2doaA%TGZls?o3g5vbtcPo(NCbO9jp=fjeir%*ZZKRKN7waF8pk-g*?l((xr05Pr5Q5{m-ae z>uX|Cd@L<{FB&$C_Y^Gr+%e&iFV>W4KJye44l38dD!x7nzi4GFT{ugO8q{qPg`!DN zT{CS7zRcPW;VvPq0XxKM_~jX(6%Fv#0?~k(6CmYUXToMe&EU!(l|dTx&Fi{b#b=q+ zjV+P*HDRy#;;D}QDsRY46`#pD!ze4DBZY7jzI_CC&?scuE7y^& zsQ9t-(=|YlXK7J+Sia?J118f#nwfIlF8>k|N=Gy~ify^exi`ND^pKz5{YY&f+4WSw zrX^m3ik9?c$q_wSq`kWb6mo);Yu;zd^%*{)t?)04huXxy$oWBDyLJna;LI%7Xr~mc zaxET8arCVBtWQ}vvJc2C64vxw)n*dRnFf+bXyIk=V|F^VT&on@UmNZ;5#*#o{HB$S zd*3STrq}Gl5aJ=$C6yMo5qX7^ZVmYMM4YA+ue~Tz10_^*<`&YfuaT#(Px#m>*OGfN z;lj$bcv1aFS6YknAo4Kto}EP0RE7bEt8AK>jliWtik!mo(#Q*=oM0I#3q^x@mAq43 zWO1V`Cyx4Mn$HY(hTUWv8llJ*0m)KLx)g^isqKLEqtyiA(uGnh1llA`C053sZ~RNZ zWWAmME`k2ac|d;2R!V`udxpLK-OjxUV;k@P<`10ve||Y^!Xyo~rFb!qu4KH*P8{wT zi2RusrDd4n>2xjii)cDv;472g3fS}ummreWQOY#^WC@vD2n1p37Q!@A<=QLZ z7eZXsWq4}*Mf|SaLgaIuDc2D4DOk4<@lcARXYm`$B+Ip2WuzDicgnAkud_VF1k{E~ zPtUq4yi?^`@jv+U7?i$RJKe@B*Ku<#X>${tj?q(E_8RGAZXx0!^1l@AMbTn7?R97~ zHZ$(|VHy5U9~hr&^}}X~Q4Y#-EgV^dnMbYK#dn2QA+z$u{wLYjRPL%ifUiE;Oc7^) z;1RFnOZtR!uLwWxLdcV+;Z6Q8l_~Kct=9z7m%Nj;GrYmLp&RA_{7jHS7M^C_ui8S# z4i@#fRy$~-p_1{mnLOYJrF04t>##w&K2|%ONtSCWuB$f&YWsJFE8}LC>os{P1!dQc zBi^us#$j8$_5IHM?Ef9&J^Xp+9{W~WU;9UQ;3t0FxvlpA zFKhV5IITid_o_=hw~8}M-=Sr$!Bo{ik4|Ek3bzn3B|Dx#_j;#rT^TNGD&0c%#ii%9 zSc)nFqRWnYkr6Hw^3OPZW8hK9_j1;nTQC6#5V|lK`WB&*% z*85A-AJNDV65&AQ^QV3_%=kHw{OX5l%W}880)tfaU6a3>EcQ!Q2a1|xunn|qXe|k{ zm}yDQ;uRS1CuKP=IuNLws^eD5byt>a3T_PM1X-@N1Q|l7{#R|NL0r!bDaE!;-z&cj z_)P*7UtLW7i*)Q{?p1HC)kPkuNT;@;@l?PfJ*^(erDnS8FfV^?!VVrhw#46f>B$EJdk`f

    +
    +
    + + + + + +
    + +
    +

    5Idab@*6Y^B;*Hf(ou9t0(f4ACm*J`Y<^Ji3Gvfz< zKZX*NG8Ek?Qwqk?{#|e0ckNZHEN7I)fBcmqZ)%#&2V{I!#PJEY0bo$d;97p$a?6SX ztB8Wxt@n4wqVl~ftyDDi;V=;iPLs?}L%w!ju6lVY+2Cy<&5Z2q6` z$KVDhW<57W?9;U zI%Rg9H`cF3+G#)*8t<5ID-=zj9igl!uN=Ap5S;Be3tF!0WTC|F30~8R7H2CU%HO%0 zw7(FDmTOPCs0YgM{=Zd}AeP?S{AgN*O7FiZyXtVRuETmT2t_W8*NfJ;jEiD^o!%DG z)P)>ZuRP_p5G-o~WC38_uwotkGD63n<5f;n`zu0Ion$zwni%@V2b(z|6b8Hx08dm@NtZ zvMur$i0KudKZbw$XO(l7FFq%d>p+%kAj*&8v-E$(Z z(!bCh*Ti>wyFD0`EeB(`({jDizZ}QeGcDt`75jF$@vft1;HvAW)K_v*J^N-xuNxII zY5AuXk=61PlHTK{Kp39jh z1g0hbI0#X84^+fyCJ%}(%XNkwPnReE<{cCVNV2_$1D(_*yb|+ep6SJ|u9_FdZS%YN zBer|Uavuq&fnrU6$n~Le+<>d928PshBc5EvFEqwdD_N#-jtZt|Ti){H=wH z-4@a)*UyzzFDuvWZHlq>sNRW<#fM3odBKJ$#~t78XWaZAWY)a#FCqqa!za)W z@gqNE+GJYb_RBBTFT2BEdpk`Z-GFx;Ib+RUN5#Gpi&~l1oee98Egg4N<=S7Zi`j?q zUwWqa{MNrHR=B>-5Rt2!a9duxl$Wj!jh}?{42mJ~u>=4NHe}uT_JBc{w?UwkgX6TU z9D}nML_xqHfi((+kNmKtwoJ->10=EpXqT@oR2Cv(C6*;epdKR0d*M8Y;_tHB@U~LPliUhZd2+r@wSkR=M2gpJboU8!3~N!uMN~_+N>0m>nhFyNfB(K^f}y)-X;?#3w?+; zUassK2z#!rT(tW_p8OBs0~gN0wyq+_eAl{hc)CAFzb-d?4Die{QcwDl`3-}Jwj<4A zZMS0LI60npnRnwCVh}8oa^AfyA&2SDlgvZft|0o;QdsA(#6v$?xNX4cOHXt!E0~`8 zr^h497jN6Zzg`WPFqB+0)U~=Z;{_86ZfXIdtmEC)mfgs+=z(VoEXVLR|F=+@SC`Z6!MpuH zif-Ob8@`Hhx^Ua|)}}?A>kR5>be)y@N-VCK(MswGllEa()!YgFk-e8Qc+^cNMF*XWHUSGz`Y`yPs}HPu_;F+8uH((RKOceqUfN-l=O# zrfYG^5J(#D4>+h^>b;|0E@PZ8H=9th{1 zpFbqyYx|t0d3opa+2{18yGsa)&pmYv!N+BA-ZEB5yTrP*f0LtsT88_!3|p?57<-H0 zrC6@XDfPt%vAC^+NzC8+o^nl{OW{=rWaUX0}$K7rS`eTj(=MuUUeo&#)oMR|7p zyRLm}lefu5S(oFv;`HKu_g(btN4X6ii~K(CS!ugHC`*hVlFm{4WF!p<7|GA#+e{Ga z>*D>*h}Z5jw9K_z$g8SkKH$K|1Z<)89ms8AO2OcH-QR3j^Tj`qg;1S-A)s?v!Lgu? z`0iAVpE^<%4l|x!#xm zCf9^m*x?b~4Z;9NpDCJ!+Xzg{^FeuVvHYp@$3L!dS`$1DjQ5R2Z2~nSD|rCl zZU8*tC~y6IN|{`)u5kR^<_=%z>AX7*@3HJ{CUDxcliScV97cMKiI%;*$1vWnQ&6qk zfGeT~%#cM1@a1z2NWQe)ik}Q**F>5e~#?;_!K>Z z>)!7RJ)?5%{no2TOI-T&x7M=Sa(((#{u<*P?Px{Xc_r3jlWeDB z<@!_qxum5#{TBpR?@c;q19*IotDX4Aytuvb9|V?tgTnj#YbN@}0B5WZvAmcc%~#pC z;>UbaC&mRQh80TJ<-bRx>$31yR-r4?TCreH9)Qq+S=}kUzpI4mUxLdD=exf*`;L9) z_nXjehvIl!NK{UZYyr}|EB)GGu;lasLW;0&|G|`W?Nhe4f8YmF;^+Okl!@Z8YzA$% zlq^*!xMSxIEMaMjsx8-SIk*@gKZXkt22qH#m*JwI%=M~7UiEdQ%mbmsuf+z{4iY!KUj7>Jr=sV=M7QB6az2kjlg>F+F zAGlT#IdQmH`)9nhhBRY6Sm7R&7q`hBcNGk;9&Fw+C`R?K5!c_^kGIga5XuzumSZ{t zm5l3l9$tSkeNoL%4TE*y;^jJ}-kNkea2XThxafEtsMnL1YUKu85jEgG6BP?GmMtC% zY?SNW{n>@-t%$$nThpwTEk`)e;w#tsvCA8Ayfoisxo&q<-mBkqqc~N<`QsvTqh$Y& ze>-{Mz|mh;+n_ z?uFtImapQgNbKb_AW%Gj;+GXiJ=w?n(4+m630ub;_hO$h-%+?NM6apO4#Ca7<3_oj zEL3b|ZuU`k5O><&_g7NFTIkh`irQGN`2;Y^x_K{e3kmdcdDN(Jz*(h(aGwb8)LoR2ogg z&qz0?dl~*rJGOxhGd|Nn*yXq_B3YpvP8XJ{xzE3fGeX0DnlX!QBJJXum=!E-gtY-=3WWU2r$0y z4HoWM?QdQA{NpKo@#87I{ZB>Ao|`yc?r)UeUT`yGwHN8v&NSma;SNKgv~`mX_Jcny zh4)>}7pu~(b-B#_4a>~l?J0fr3p!-;kf{uQdY0`DY+=j+y4b-p1t&6?TrVWzMt2XR z(`7}Hz+f#?jrLnyvr<^;zWqB>dg4=H6o?AC{VwL|ft$F|9cj6aEFk@CirhHL(Z_Fw07}{({UQ@_v$}~;sakZ=rh1Y9 z8+Tm0Hm=I24>ZQUm9rfv2=Cwd!zEI*c=nNL@tM=} z(|1dwRFf}TuHE1IXw?_TZDb&6)O2hCjpuN{^LB%~w2BxZuFROOpBb-}{}}h8+?Af? z4K%zh#H(V2MR|Tv>C{MGA#i(7OV%KB*m}m*h1|};)rEVv=LpmBPHM-gq!xU*&ZvKA zdv$ixwgscP*_y+s#*T&r{7fG=Ws2|kHH7j@neZW3=i$}x_%KcvKSeydJ=THmb~KHc z5AYg)MUHOMx(=UVM7j=3esTwM~`?Hk{coe!06(3-`VKAl%!7SCbfmCm5;kaihDgp+?A;K4iVjP)z>x#JiDR~08%JT# zAs>A=cZBdvp#D|w#&SKV`~#wADly-EX~u+ZxUb|Iu;qF#t|F0Px2;z;+OncusK_Vb zaPE0os^?5A(``qr?=gP%GK1EqAIF8eg@r-987cyCKoNMsu5%wA^+SOJhbE`V7XZRF>Wg{4xnKn$!b_!;y2A{TO z2CVS2d0(^ezw)_q0MdBz>4S!G8$%3lD=MU4JJqZQ3GnkFw_aE#!5zkQL}*08;ZDiE zY=F-RN5NZs=l>hW@trq+w8Z{7rt)$z&_`nt0#T3z%XO}wzKKSt7gb04w(l>5^Fhap z%x61mG2hW0@$63?fVPk2g_+$GUe|#;uufy!ec58Pkk7H{4fr4GjB0L+FV*e*L(htJHvVWFa@`-;nbN); znd0;v_x31Xh;r>VX^a$=Of;ALDeyUC44x9LMWXx&)Woqz(p|cJ=F`>a;We z)i@1ah}YLYKxpGOk*+Jp4dwbuX>Ri>j2wh}T2;3{pvYkXMX}!bTrbyL@djiIw?3&M z^7ipSEr2yW+s9k(r24SGUxyG;Nx^$1+BD7jE>kvX5U-$nr;I;z~fy zi!-Ep`MP(Oec^xopGInz=60RT6K;`g$OGO@ecHd`QV0AzAK3Cbg6B7d8PpE;SuDyZgVsSlHj`^Aat{m+N@_P$c+3H?%Ocj39ck+sevfv_yBMfuHLPK6&f66)d(l4w66f=qR2)c0*VkPcyD7 zPA{b8^m5oG{+Tp+2Nt6r>$FB`@)PsbF1zWMRIc0quvT<^*1?<@;pow$eTAQzno85! zvvus)u|EIH@pAgKEW3h}Cr|3;vrzq-ZI8&0>(It^m6;ZLGMte`=G=+nIc)sat!bHN z-e;crTspU2m)Hx&jvP#=$kS4OPM?iPqA@~sbmF-75J~|XR^D|zf0~mJ&+g%6 zX?9r3w?UJFOjCo^n5@@hd$*^v5_VHJ^%P+0&+Y z?d5!poKl_D!DoiNL~HVtw#-y8UAHMTJuuHfJLc7qx}Fl=!^0ZLRUfmV?#^h!a&pT( zDgDGh%M+Wq6RM9H$z1A1`IudPw3OfV*+qMR-wCzrIo+i?)>a_EpQyJ6guFyDL)rqA z^DsT9ufZKlBcg?Zo$HFcs7=qDQQq~O9X*joRTr5zCJ&~QM~|g7+WXJ6>iD_oG&dj( zEJY2!2)ESdSWfr!=`(3m?J}ah`ta$w{kzhLuEU}YPbp92gZu$=L!+v*p)|Z!dnEtL zFJ-@3^*BE@Q<>_=i%za*5dOGlDZd;UO+LXS!LVR>i9dZB+t38I)-m~i4 zYqT0@J>-<+^_=)*^!%wDb9P*G#8X`m`@rm&@M>}+;?p#?Drq-&K67wvax$&eo@>%L zs`M1NPZtUkCpXe&f)70?$1ig zl$6RtM<#RU7+-fwV%Q%3(#O-Gx4kdp#lJO&4`g9;PWwy;m2o^wIHtx`4^k>oCQlsi zDJpYPst+k|!@9qD^w`lfGle!eoJRDaglQ>BGv|7P5PM^D`83##-MlSnu{F(p<*{_= zp>J%m#w3UQjF0z}@=;l;NA#nrX3o#0RrRF#5e<~*g*zq%(ED!BEn8hIO%27KgZf7B znKpT`a&6i@?4`IQ5CeZ4m+&P2OfJX|d7U{uRm!Yoz;naPQ?649o?M?xNuuJAcjx;8 zqGi-!DUZvPui5dbbY8>SI0La@YWd;(98wrOIjOdto*hXmX67<)%p(eR%RG>WC@^%;D)Hdg}-r+#%MH~;h)4$^zV`w<3IY> z#HewJ1O($61r?2oC{fu20RsZE&J43L``SHAukU|;_tmMsRbBO7z4z+9e%<}M^O>%y zTlb!O?)Tn0b?Uxbw+F(av|MW%~P$vz}^*~4WnEMc}y|jB%yeu5IAJqHAVYM|Qt`GgZraGfmAKZAqC(bT0&m&&|(`Y`|tu;hgt*)xE zS}y=~RHy4R|83uvnY%X0Ow;Yr+g!pK`dQUgYfqA``ib)5XPSXi#|bkyPSV6RTIR(bfj2qU=qBIT-8tBO-R_NckQR1M`R zy?U|YXHfr>*y?b)I;it0r<^YGy?>MGPd_OqtXf%=r)9zUg}iQC-Vf#L2&x>^F(3R4 ziWiOW*0+Xbkbx;Cs*dx#5s9DmQs(aM+O^9WBye|OhO!JCD>o-^me&@=6cznFvUQ^z zf4p*5BjiTXArH%XP19q{v0J!Ix5@zLF~VIzzz7SNELflnDh)8Es4G}unhe?%ZM3YY z!@x+<*e+eB`e7a?bkxwHeP#ZZ_bC)lZo`pre z(~kCPs$v;SR;?*==~8_VkPknTb}J(bXm`9I8xPuc2Zu0f-7AY1YrRk&@+0VtU3O~O z2$1Lh3|ovX{@tNmY>E{6cr3tiM}MrxNXzu;{_Q?i@B}_=v_0M3U3?IqK6|#zF`Ec} zuJ3uyG_Jo=Q=703GCsYu z+Emp2*Ec!2-7D8RnPcp!U88nQx$9Q1T?9xT-aDMInwp*`b)D@pK*Ix|G=^djx`WE^aY?R)2Wn-}tD_3ft@TZ2j zeyFw@0X7M%%HZCLp!bf$>Rc~cw5YlVbJMjbEwH_TY>mSM6CMnlPDq27+0nP~3p?5o z_Y!<82lo=cZ(4uy`##dNapR1$rYauIwk=lnw>!Sg+~7+F(_?)&ngf4&$mFKwsWjf?nf=C~3W9y57rNSgD13jWn53$|cKxGn7|21eGU067 zoOg{Lj1M-s#2NpD^-|slRzI7cT#POK_tf)>=W+ivd1*QP^ZtHX`cm;@^2hsEEVDgq zVQAWHb|}NkYwW{wF*#H8Fn(8@q8OT>+SXTLB=O$@I;?eC#IHK7c;<-*id&4*Ap{>k z2)|S|Tc6+&?6a@MBYo=DpTF%w@t>IOe93K~r2pbx{03kSi(POg i)|^p16WnMl z*_f@dzzZ-pUac&&%f9XJMw1yE7h{Eto=)HM(pF*^(*#EJ2ohPGtAn+vS8kfDy2NYw z#?tii%R!vQ+fCa(^piB#+s})A%j@>zC$(&h_Q7$-C>Hu4g6l^f57!RfG19~i5#@qk z9wPky7CDzDK=as*(Fx?AiAW$^EhPQ`7{8_?eWFxNgX`44R#N*`2|0dwwr|(f6Gi9R zY<8-Yc-M7uf=K8$e{nJEhTj(ZGHfKm!ezy6+Fx@%O)r-&URYY@3ghh>Ls|HbffWQe z*N1gf7NP7zMlo5*zg8D;v4;nn&V}~#`U75QtUeg0pr2S-{>#xx!hQYl{fYnk4Uxb8 zw5Bw9K_=^Wp$z}LzaJ*3ey5x`@UveH<9)`@8?yByt5<8Xl3XG_$^O8v7UWU~sP+1M$(*s4G$N!fKyy9SUE@+?gw0h`;hzP#g`Kw zxk0`$O)(}SkPwTvQ7T_P_G8H+r;SG>k_QusfUR>KS>6m4h&wxBpG>KpowvRUTZZ8J zWZae&PmTF&GAy4ZuoadzC4o|#;Ja`nhw4h04L9E{i5Yoa`M(3rR1~u z#G0OVjo}#o;Yj0J#dHUKvEGGp9I|K>mZu!7e)doV{u5`*E;7+gPRtDd9t<+v_Eoj}=qB z>1xsb%`I{+O@L;^$c$YJ zCWqSzMc~l#Q)KETOat^$64C`Od2m-Q1EF%5RT!kO^HResi1PL= z&#}QsbC3h8dXx^H3`=OTpx;U76o0Y7@_Sg~4|9R>Q@&llmdCaWm+Qca&UN;Gv9ynR zS(@Cof$nxcfZnD%IBegnOUgb`EG?Ij>)?I^E}?U+*Ea$lv|g_Fjk|s#vf?DwBS>33 zTiIW$gY}Qp!K9d2YsxTR(9cYsB@d$QFl6Q;Oh|aP!ku{j_;G?b=%+8simj(^S$w^X zC*El&^nPK;I8__R%x>-Wt zZ|Cjj=BFMh7J`>p%LR;H1XCSH+aW02x{7`H0#& z_VSF8=Vf2G+&kdzL?qCA;<@sRb-QG_4LVpQ;_V(kwsdJ*yB55zGD+N+Yr*P(E%bo>#20Y!!LXaYQWSv~>(TVOUgta|nSSaq<54UC%EbsN1+lN;)13Ct*( zym)+YZ255L?_2Upw(+8}%7Z_GfjH-D#D%B=DgYWc{h+N)Pv5s!W+qM>O zTg;_KXNFUX4=C82Ynv8=kqex+k!$;@5F>PE2<*cPrADqtJ*3f^+7l{t^KFA&Z-}=t zEbVA+Z!gHa=FDPTsd?9sX)nIm%P^j#sKGHs`~)fceo|wbk~CYiFAzEO*yN(R_R!IA zLu7h6C5lxsy_}l*;UU*&bd<;9ZFLQ-V1p2US*#cv{TpfTpiGlzoyA^iL{2x=#AAdw zm)`V6goaQ?@i6=wkN3I8M1*;`Gj#|=`lD7m-e2M*?7Z3CVCy~(<)^jteq6Zkbv(a( zU$*y?dBra@njG6gmuEb?So+;MDvMy(Zt-aR{+f=V2Hv&q+X0-nfuJyL6+6ZoOooGB z(CmTR&IR@?oZeVPG2v~|`hnT?G4+iLx4r>8tikeIbTKxti+W)4Y-{Ih+Qtg2MD|=A zd_j84V_WEBd~4Ee@@1xT-K=dIUVL^z*j5dPy>*UNLu@XY{}UseAvmJA0PsaT$TFuF zBNsN>U24v?6&K+XL^|C9BjK!#m)P|7ogI^57o=qkr-uB-QjFa~9_HsE*vl2gqvBta zi6ejCPm{Rs>0D91L}8<=h1h9Nyn7M}M^hSn$(bHtZfz6ZAxzm|Yeo`%$sghJ_hVVY zLw}B|@eIoj8r&bb?$)0W?Ys#=KFe!yY1??ZNPqJL$J3?BWBlYI5O4SRak<*swGz)4 z>1QQ`cwI1bKH!Mq%dtTy%+&eErGhWRr3)fVC&)YdlBk5VDIw8RjX#3Urnk3phf^e% zFEj>h&~BlvwP_)N&9!%#%+6Iq!Rip0H2tbiQA)XfVQp#s%C;uYbb;-~RGnof1~HaV z594VqPu8s`s+eU=M7ku*yT*L#FiW_@k?Ry{4ZhQ-Xu;?h0li@37oQmf#M=vJ{*rv2 zbM2;v;He!iHNwzfZD@0@A;%o@QqFZuBX?*QH*^T~(=&FRx%gnz!1PU$V$`AF@TPf0rWGEEEPr5iK}vtWe9q()V`M-tPdA@`vV=Otnht)OSzN40s{`6` zsTp@oqhAay8xq?)R*vsn+sJj2VABKV{Qab~7H6YWE{{cyIq*+#e=-vo(;s7fFV+@W0fYQcLS(d$(4LhfLqOE;nLp`Ak z#gf}$MAw)MOOtsU%O5)|D+YsG`T-ml{Je3#Wd%)*1m$|f)S%ETwX_S#UAMu-Yr2K! zG~Pxh^i$H#QN}s%!v+OgmP)?}=E9y}ySH=dvd*9OUmsOAQ0&fTHp9_DuTc`S)Wboe~J0M&gSigWkcq1%fg%$PYVfi2Et9p zGlQuiINvPJb(|acn~m`80Pb5BAJ(sm9cy3Qe~gRC^?y}^XiHl~F=1YmVKzOBJWFG` zS=knE$MLOy-tSZKbXkt$`F+`$@~Z&5fBAVWE{lvQl!nfAQ#Lj(+`!0NC$=r(ZC+jY z!kNCBgy7o*J6AlTQ}}9ddrVi@(yUWLf^v(Spv|(_Vq}PLj2uLy zW;=-Ij~{1~;!>IZ6=a!eNrN%|saEYKtuHQIbXr`0!8I5f9{-}Yk?W1YY7Vg%6Wz#l zf3LnMGP_t&ywnK84%xSUuNe2@HcOToNK?$!y;`*>yM?J?B`m@#8;CNl6(ui3mil zGrzNxtE2W+gy32!`_NoW9sKpm@%ymtLniFq>CX~3s9^CC8QwwsydB%tuICjxK=GIV z5`3mw2P~Q*cr2g~bT+;HkGQyZFJft{(cff;c2<88wGinG8fj_md?j1U3-;k)QU+%| z%x8rK{VeJ|+evTh$u=3@*@|}K`Qyix2(!LdNuDW|c0xMWTem(f58VHtoUnS8oOt4D zy;hPh&Lc;T$ZfaYDKmOz$TOdLp8lfYb=0P8XsZV;5A}rwsGM?Q^VN_Ud;%U5st@!ZfER4{~CM6&d21^oV6PJ)AKN`c$dNqfu7XsvOD=1o#i!aVGKOipsP$(>jEGBxZjS<$3vV-r6p%cRa6Ui^q@m zF{Ym4mvNu-cW{ix$c!|K)SqpS_DZG1oXp+Jyu?O;@}4%pf4%Rtqt8rzfme-PWO$l>p2mk>!gc7!X?%VgV|>J5LuGd{}FKA4i$K0XWk)dO+C7v&-q zjvx5-vN{LOL--XvKaags>&MxCm>&_Qznu)Wej2~*Sh76YHZF299Zy<&>i2QKucS7f z^)tOr5JDSe@x|!uf4zLBi#cN^yPRI}^LA`oljG}aWo1{iF)5+5>E`Q=b*Iy=@k%r^ z*_OxdRXC2HGk%t~lC3o>>R|V~)WIZAu%8dn#=zpeq>)`;SARc}DC>b&Sgo>PbMZ2* zeF-|N3R5WEgmSL?`}<|*j$QJi%bqXOr%#h}&OJ+3ov>0Kd+doZO?K$eVR_#3FP5&Z zPFb~Tg`9rIsj_9u(__*#SA^I3!;3G7vx@=o=-B>pM&0^SNIW2otqYB5A((ct;kU)N z-SIJ&aseMZz*^b<%WP(_%ux(w1UiN~XXsWK<;JLmy?}#}>v+JNHgoK3{&+l1wZ|Un zTpTcKAJfN|kGODDKLZ<+k;m5bfX`s+*5XMQrlC%5BU38Tig17HF@J@m<*F?TZ@mUx8wME zyOu59oOPrR9lZwI7t=MqH61M;#};Sx$GHLhR)fV`+G@6O2iH~#fvniMu>&C$6ll7l z4pxu2^PVWq8ZS6$dCNUmSa~j6Oh#fkB_jRo5xFJu@Kn_VCe*Z$`|i76&O86yNM$d) z@O=6D*RPkgYu7~L7aXe}Tk%3)U!TmG6U;&O;+tyMI={Q{;0+-Jk#;fIMTw~)_Jyn1 zoNJd24wN~hNcYQ|T(q3`g5sCGibs|GHK#0X6Dy3Lyyg_WYi0YOHk1 ziyv@YYK?28HD-RkO#5C&(<@WHv_zgfm&9u~w3#{AnGA7S8P&v_UY-s%y(dYWv4GAW z_+s&^p7CEw67zXCb(3L5V}5m8FcZI`pAE=9DRn(@)oB-h%M-)@{;z zuupn)?sYt1s>u5F>m$(z4jho9M~}*;O`9U|c^vHCt!qwe*tTt(96F>1(hD6sw@ZsU zu7e{T(xy&j>*0ekbYxIQ_V-G=&e02xryr2H(`Jc{SNCf5_XWB_E*>O~$e^b0mE+V9 zx9EJ=f_}jZm)7=^o+$%W*6YZ=J#t9(@0h+>S}+=^hJ2vV<@P&v>F9V6M!KfUP=Cyb z=(&zv+g#eAfr7LL4+ca1BeF=>uWr*%oF9DRx3XDB%DcLBK9>ftn6+(-=(76K+u9M< z(VYzbwC~?3t-(ie23qV+v8Dyu#j zIy$Ud;>~TJ#r%p+W5}%D??kzl?0(=JJ#(8*lf3eO)Iv|aNiNvw=sWza3uNVtB&2;PlB(AnyjR<2bB5Y!^*rz^*bD_ z5`ctOXWbpv>t}enzT8_y*s)`Wy@EH1MWJ( z^>VOdx@^-=ad&rjr#76sc7wBndQCz<*!6b(cEQNOLozh0cpbOz-6pO23lq%pNU$zF zu4~*M&7P(fQpJh5J&lWit;7Ay!qAZ+Xa8UmhSjDF>$J0uWoIbkVHw=KSqi>0teJ4_ zReJyJ+_5tppmAmN>Mx@RIq}w(IyzFc$$HZ7Czdw8wLUPSgJ|3K9a4KbEHh?|^=0Du;>Xr+5T9g!;rF1Q zx0M=vi3_dZc(U8mIEvC1C!Ttok6|bV2fdqU27P}1^V{_z-7IhX{nw7rjmvLS2gKbJupRD~){y}<$kSy+^=KVD;*JaY6W6dFx7c(F)vsM& zk}+8(w_AoZy*lQ)!bY3E#Mq(+W@zy$>Cp9y+siEhl~U&doGx8CVp^y2Y1ff)HHhud zMJ;-pcY`RX&d@O)T>S>y)amS!)*$_e{(Lv0tDnHRo~0|CpdNNJz+BzV!J>g|(|TAP zeYtJ@%BoN_Y|tewz%j)OttJC(S_clhD6@M+NHSo^(>%Q#=^<7M)h~#k9U= zP+QttrR|XRb#-2mw(XD(8t8SnaJu&EfQjmC*QL`J&%-WFvEJ5y&7Q_(*K!=Ur(7A(+-^g%dq z;GoRV3pXC{ba(aD{~^zK#_95=H@z-?Iy}ZoBgY+gTzHI~EnBv@wtg5;=L92mh=>2xq1GMyc*DF)o61_NN9()d#4zWydGlpjpevp{XV0E3 zb6VA*Lwc+&j1@vOv|n(k%v-QP=TR3gFc(?DpDrE2YuVN#vyZ`8vIf0C1?6J6r2k+s z4u7Gq>NF+{>L{Z+=rT+57qp%TSc>WRl_KkYBXjipY^`tTcp&XiaU^K^!sBInVtZ)W zodKA{^TlE?VvS!T!ZroZ7M-qu046ntS+iytm)?Jn&Ozsf_B2^iIdvX*W(Co zY{ds5mlkvNJ6ol1ruLWFc=!w0UWk;$wU<-? z`mx%sP$sS`OsjydxTdWfyLZ681iy^!)L(XRj|uH*lKd*;#@N%i9PmF9yo`=>upd~1 z8uul<1UE;It!}uV$9_4qYcDRg2z&6bEd8jUTHX_cHQo~3HBEc1{i>&hV)xwwrRH;3oG_{A^FD_?oJoFAK`9VTeX9W|CNs4+Ez3ImFO<@4aQ zBH**<_Oc?5lfJZQKeGi>ACOE=I=gs5yMrqJ#GOJ@H!dn>D8tj1-gh?fvRtqnUzh%L z8VO*aKO?$e^T#6zYB?A?ZKW1zF0Zv!M37ZFh>VTUM3UC?04$9=9*WX})xqX)x^%HR zB<1z%Wio8*r^ihfA?aN43{Hl7(S0x4-}TG(0-4`-k~;C#`?^X|nEHJ+h`} zx{3*uxXV^7tL_;v%KW0sULe=#i1f1MOJ-LiPeqHrV|@gMq!_sRbK2jt#+{>RNX z^Od^z;%CdsmCOC8=GqUhKU{!;YW_ulQE2^XO)$sUz0?wmO*BN^Sy{4^pfjkM4|?U=IFh%-B7jQ7CI{ zvM|OkJB;K`IRcZ~o<{mamQG}}0>fKL-?yxx(GDYuJJz2~rfrk1k62dkVv6j#(P>=I zgF2FmcLKt%1y)kmF@@z95my5YYT}SVHNVZr3K;!3TKftj@K>Qc3`jmfag6w-2 zn;GEvMF0nT%ewHVKUXo|s8~(Sfx_1uasJZR7tdI{rK@zCC4XFJE5oW6cd&w=^gH#1 zICZB-18x|7`cof*zVw)DsL@U(>Jq89Rn|+Hg`Jl?w_WIE{Rt0U5j|t2*g2QJ0!WeN zt`h2Q#n=tW%CUHV8#z}(mgR}0w@7ZY5SZNdG$4?re^?sp@^-ygd={8#X-tO6w5`Rb zAc;ns?i%Z}iYDSwP6Q^Lb8YpGJJuFIp|{re-Nk2C-XepV*@cV?`%^#C z`P5IRObbyb3IQX&XI@yGH;dyMFRI8}-6qSnkdI)a=EWoLZ%`LAN|+mnIp)E9wax>( znnUaNd z(A4$|tlg7K2?+CPy{#WuT&a9{p7XkrHb$P3DJy|M;DIHaVbnMf@H%{{3(y=Sb5gx* zq0*KH&h>L%R*cTU;1nV227Ntv15Ph=PQwJ z`1sY*zi?&s>$dx4a-Rf1!0fJ#1p9rl93jgaPiHbrW{@t^$4o4*lj&UywwxzMu5ADU zcCuLm>S1*Ohme(5ryAGQg*wuzRxE=$mM->Q| zB^v?ky>(PoT^By8h=i1Y(jgtvf^;Y?rF4VRBHb;Z(jc9Okdp2=bT@}MboY_&j&t|% z{l53p`|oeuaqk~@j4hmj`|Q2;T64|$JkOkaErH2tJ|6m5Cs*A{Q%@GiWXneMbU%wM zVV>TBEBaBELom3Th|Ive%gf-KOOcfQb-4PPgsmZa)q8%vF&y5&C2=~|);@Z+bzj3y zR8J;*YUJjkPz74+1~y(5DJtzn*;R|HIuY}rh8u#Xi`qWo4ZR1@P_2xdF9R}nc$=P= za}2)RNTza72#v@n>}2YwV^ywKMi6*JV5;vp>wa68Y-oV z5W5X~l^S0o+%Wd^b$aQhfpuw{V_CIBlhcx}hPKOdsPRNOkJrb@)&@HC(8>9xEM*~d zq}GQC#-9gYJ(4cQFIIN&yG>getR26{H;WO5ew99O@i5Fi0s6FIjL5FoDm&Wf%^dd% zm*r2LK`k0je4-0%^TDrb&ssD(uOzjd3;bWKIpZHZ%rZGy*`;5q2@9*#;)17i(pf!c z5s*b*m3bz3zoLHHAx6PsGN#cn!O<)0(IMMCn@RqB*GSxQTguPTlb>iN?|8z!1iwMP zO|K|#8?>UYYQKsLj_x`g+|ms=w;L1w>8Y#U*{7P2F|w}qbHP|n5muL|JtolnLd5w# zJ>wLKd~A1J23lN+tLn{mcOrhOzXM?+!3ycM%(TTkbKL!K{exDD#F`#i^XAoNDLV48 zDluI5H=(+#COxZh!7TdAQKolBC~OYjFwwm>OyzHD$u7)>si}gx8|MaFPEM~4{W>~+ zu3JhTh~{Aqgj5H#bqwfdZ7%%~ns20?qx0QC9*BM(*?vV9HG}rEGq#67I955lmlJI2 z!6L#;>;7c(F4F&(>T{hOo+OWy2qCX`8U-b*hJ_8>RUa*Nqu18dLdRSCdOkt@FmcI` zmHT4y3`t9*A8I9XllpSAGwN99##QQ=m4ug{{a5G!o~06+_91?|_}eSrU61-m^3_I*o1c8=|UAUJjp7 zb0Uve%6ohk#4TKiy@B=&)S1058*+Z!3O?K3PYZ#a4_#g#IttPIvLFvc^hXhh=VyPi zVIt1%L%+8BderWzUD}Kzd_(AVNdCnXqiNoEFGuu3y}wqK+$&j)SuYh9g@T8-j><7C zonP#chaIPB&OLfTGqD(v^V_-Ze5B$ZZw7a02C%!_pLu9SGaARPPs;bg)onP%_S%gr zwb*a<*GDQQVrXB`Haf!`#Xa>jdL4rsbuIhjcnR-2(25U!T6I?ysKhjcROP0?d%WlH zSYnHi2x_<*m2G!uGG?-<(&l0t-N!|KOPU_m#*jTkLg-Q~*LQL<$egw*^DI^jCAXt^ zK5)0laVJlL|9gaGp8{5yqt=yM5OU?NuE~H+&^2&{ohu(jEd)x7vNEXTM zaVfTz|FOy&tnKdF;=bu!W&0b4i%#tKtt%_l(|07wx$zVin4dVTfh`ZWq3g}RYSd7ATp!cb71=ci?CByKRdQKTb=%+B;MLm<0QTVnWS}E3s*}@Dx ze>fC37Ca>^*d)754-6P@PFLV9kSi5hMBCW8TPz8=xl)g;w;^y_@wFxYq@?VycXcYq zmBN2elE2cVfA%(-zGw6Y>j;kC!OZlJ{Aumv_~=9xyYZhrLebwpPiHHuRfxc@yoL{V zwHafFL<#lTy^TxvDH_T=lWQ>bvX53j97b^zGwF|*%V@B;5SGlum zN2XR-YTM;gNnKZ=f5gyl+__UzPmk}8`%!e^{Wjk}R!t1wRbXu9y!;7oCv+HWorC?@ z*|jln=iB2hx8N@FtSoLBYnnVDe08V(eB(j`eLc!$#?(#5S`Z?9PmEq9)ZqsG3a%8~ ze=^AGO(9%Vn-g?C+q$C(IC*bIE|Zv;K|7_d1X)hUgoC{@3c6QcAeN%;bB~DiRXw$M z1YmWl>Pm~sy>CF`X|K}CM;eta+`^GPyW3Nl=c{yr#8s$^rtc>ce(J9J9suebs}?dI z{~@idZ@^w-Q=L6<%c`KydU!mJvMk!RG1zHU<#X#keOrL1$0^=W<(A3? z4YukcY#4Pk@$^EQA~bwnMtRp$RsXGjO2FVa+wvy!SJN~~GP6ndh-xh5;MhJ9w`Jhe zW3^EfJ#J#k_E%8;L57Iu4QwoaREM25ch%J8snNBQQ?yc%$F zY)t&w(}OrzP+iIbIN{s6|66Ljr=zETigUDS9Ezj*JhxCLrucgpgb@} z>Jh~075fRGr?Zc~sdwIuYHAYM+!^Dy;t#Nzt>zC>5<2`MbX$3zXA`zNoDG|aIO$yq zZ?vXFk02wn-CXs_*`0eLg$0>=F5cQ8*ptHVkaNB0eUa{XvWo-7+L>c-=ed#h23{MO zZuF?B%wG)Q_;EaW$M8jLEBkGf?GS%$Ud~$w{QGJTG4JiH=4+64R-$n*-{t_zHnYqHLr@lTv z>6N)LVkVB6282&XC#T-C-Pspe6v@UjY`4VRwnXymay%Ry&va_6$0bc1MSgx$Cs*5? zsUNJe%4k%X38#qi1zlGHo9|exDD!X#H$|j(41*4kejUI;VCGV%H zXVT$hdHr52A;P;0jgq_WBSA0c2MMr#2^XvEyR_>;A6_5c^O$+RC>EGjcvu^GHl{J| zYWR_E&*j$R)I0rUb=1-<^vw~KRfuBr_JYa@|2NZts8-^UYd~AbmXOO$JA&{Wq}X*4 z*vyjgq(Eie@)Wr_XfL;4f1~k@y~1Q)@iu1Hn`uB;eXc2$g+*{g z32(1vU@U|yAy>EMW^{=LX?}j5%W{Gx@rAU;+*>VrRALN_xUrL`?7E(|z$={(MPs+H zev}%=Li0@AInp$x88Es_uuo51w9obl(3|QsTXb8EHVBTbC-W$;h!-*DgY0f*ZWBL(z zeT;B}<$Ii0-}@{1b@G9)XE9%dv83*c`Rq?8%fYN)vI?ZdTCydOVx z_jE@KSg#S8bf`N)C3*+vvQbo|U+an`Jp=VRl?yJ@x+xT9rp2*onQvh0+F{1sq7sEg?ea;Ldj=K zA$`qJX}JC$6r9KFbS@cZPaYZj2lcwVGgpb%pC#gz%Ia=_FrR!Air#gkJiMY=>vBX_ zW$sFbSXgj#8YkUw*n|txlg>7Xk6R_yQ1ka&GJ9xS%nw5#XZcv|P1^-v-84r zq|-ScVNDAVjlMHyUG#}`)`Zi<)Di@v|L`e-PZWBn77F?el%G}V@rVBue#J50n(@$p z0DI34)a%^h=;G#ZXH;IW;rH^@XQNN2%w2r+^rC?z^M=`YQyR#h>=p|+5_a+(yP5TK z1ifP;Hn9`wn_pLP(eX}CZII`$MAqPOy0zLGvBU+einmDd_@~`($KaGDh39^(H(cFZ zQR;lT&z@R5YV;h3_lIk#!{Nn6S&2^RzW3`&{z(oAd^Y57(8*a?B%zOiH{G?!aiwQd zles|yk!H4$a1L7Y4YI1y=nFq&64{okrp8ReZns~C35@_DQMRYd>oU`4@#HNTo$lK>LGcEr{bL$-)}Y<#4RQjSdR_~25N z;usl{*!Xw{Ue*gP%~7l$F%W$}pINjgVMIq7 zA2C-{NkXSx)aT;`YOdhwBMBXqaekQRW8Jc zowT8eyWpbDVwz=~mPj^1Sv8DogUYu7O>?rwf-2jazpzq%@#WN5;2RV9ZzoHiV~emi z<+j;~PS92P=ju5gs~)CUloYSp`yBTTRoEz{=U8UtI~J453d(752%(-oWV#KZikoic z^J?E^I3}N}3Nl5k!CS89WHkZ`JAPj6mQo^lQg=O0qi!8x-4EB`H^h_Ai$s8IYbkavhLdYD zU-28g@MzcEdBd$AVv4U1ZC|Lw+ZUQKo(?aIKABKK?W4b+_F1pFu~qxRc_%l|c~M_k z+S|u4L)#6XKTVVsg{DP??&8)$=<2gUQ?sbr1#fLC8Q7ZRK)$mT+=@Zz+RDy)VPNqo zAEVPpD1s?%<83FF!o*NpR|oI<`5@F`&0`PJ(S8e^iF#f$Gp}6*UI`J zVJueCkhvqYDt0}QB`7F$z)(oAZaGJ8WAd0t!U=B!k=4c#e9N)$TyqmOwotNItT zuOx+j_F-pqO8dlBm8a;0{7YA(yQm3V+9z#!_#!*;5%YSbDc*OZ9~Y14n#>)3Mq!4) zOW$r~w8it{o{^r0v`QG>Ctt;(#Fw<$l0~9@OYTeSVF1bs0`)I2ef*})J>shSV(Di9 zcJvH*q3+beZRrkm{mXd4S_i(yHutFPL?%HAV-p!vmHl(9VdzuF15u_6r-Yx-*HK$e zJ9xOBVFNzzc$mmnWzu`m<=s7ILT?D!ETxKms_-D+FiDTEnWdsj8jcnd9A#48N>I2T z;DXn{Ly=7jBH*(SgT%L3(X!Lw#Wxeh5LLq=jHP2z!+t&6nxD5`&D6ybzBZX03cf-&k>(=)-bn3$-_f$*SmV4%M=oy65NT z{Sfl{HOY8kgB+zadRU4_T%o}GY@xF${KQuIq%Y1?@kzyz(k6oABLNRKBg8bQC*f)t z3w7#02%%9ePZ-2J1Ud!9ipFy(3F&^}&`J&+nB(Cl#6V8H&ga=-ARp_jL^hy)1$@2d zL1U+~vUD-t=~gT^$?f6^+LD#x5cY0c zMUkIr)Rml^oNJ}Ih%|-u7lv5?_GXGEJ0>=E-$aql!`X)Rz4>}j4=RPLnDn?)7B8&S zWnVw>rB%{(T7`KlnmT}B(?c7?VaxubHBJ>|0MNNQCxhrTSQ=SQ&V4NqJBj!8U8*?c zCoJ`Wd%uE%IBDQK_N~T?O=SQYnXvCK#s5*4yi zZL`Sc<>fW19blkrOAd0Lv2Yp@w8^N<#UD!jfwMCQRyo_5Ui~D!;DvNPV1MS=wZ~dK z<#(6l<|=pC6t5N`Yo%1Pj4?2N8?0X-T=kNl%OfCk3hBYC%;8kiUC{KX!W$S#xC~wR zA)*qca%stGrn2`oBBkf>9^_O4OG)S*=zWjs;`~*5#e1Ectid=mFVi32oVF(vRL8gT z&%a@jZqfVv;G3+>b=pQ!wAI_S@KR%EYIHDuE8s|1aQ}8`+HhNi4oVNo5?1&Tr7^eB zfUHJq#ykxL?1Ej-xuMVi9>b!^QODKk`vBEMs!fJd{W1S)=nHCc`$oti2>l> z3v6$qC&ho*5fOP#Cl@y+=_14ea@$CBiVS;47J36o6P-yrW=4}YJWv#1x{_1UKT7jF zlGOr0?8(W=8XZ@RKdzS$8oNQ0P-wpcY&n234k(Y>;tf0EjXou@8mhRX4>^lD8@uwe zvQP?>aFp{PSzo49;PFU;>$T_dD_-ZOc?Wx*z0l~rTxi&)yj$u%@*@=Ct&p<1qBb-$ zduXX(76Ty0P?T{w??z*Bwv&>8g%;H5(=jj2_}BUId#79MryG8G%J^V~?Y#ObroJA* zF>%ucfh3_B=+)H68I|q#Mm5g6VZtgNn8l6m^JeBs(An;pN{Z?d3NEN+)UN!UOW3FN z37)4?nFfy27;8xdaPc@+1Lk_?y|_*}0!gXz?fiRRqwk@AZ=t0EW|=HW)cCXec;-aD z!0`AlB=IoR!mrOMhnesyNKj%PMj}o$rU6cz){Z%)x{vph!B-aiBn>Atyh_RcnpO;O zXu|hrpj*?N48WOVR2-Y2?L25qArty{;H&=}Ew1?K4#EqpEl0ASk!AjS&|?9rdvp$y z7A_vpfd3r6hxB4F_XeHM>xd@)_mC$+ax7+0P${QI-0;80s={Z;scX%>eujU)ejhJ^ z8}QFcNjb)n|K14r(3K3R35VAAx#Cow9DJ zm18~x`F?IQn8Y(WJ{}v;6Bz1`iX#9%{Vojf=>QzcxakTr>Arz&!~KOuglSt?TK3Bs zGs1czX}E-hScHXz-|MCd3cBpYxStUInwp%1CCDegWL1g>AD`wWIA=Fmbp0o^IY2*H9i2&n6CoF80yCoLhh)gsk84--Y~ex0)H80- z(_ITL(ix~R+n~A9Y_u9^rtBCtL`c-+j?5QPt2Nnv`L$_xcURF{*fC(?yFtnD$OsiH zE2|T(0ZzcYMkNI#vo~Pxbd<88wNd{)^|^GVMdKAsadt*VhWmRPW^iS0@8Bzsq=AQJ zz^6~Pg^FD{0wo*l&K*5 z<6K`^CBwcF0H8wV0T^@*E;l{1xwjY~6s9au+5Y3|; zn_jtFZ6~fUeJa}8=mWnGRsiqJ));_?O1Aog1)XmB4pq z?nWLIIS+QIxmkhL^a5eXVLRgD z%}TY^>?!`%3oHj&f~x9QS&u2kB+~V*0Fzr_sm#p;EKXW$iefh}^6@f4)2`dsa~gNr zat!T}BoyxFkE>a5mmV}}>EngY`Pug5CcTRAM%Z?nz3xc(t(VK} zFZ=abJkEAv;WtAz^%p{p=dqWg@lAUarYvj`FE7?X%~AM@n#6Z$FU+d(;ZsIRJzzm8t*PsmeK;jEf zMHtdYD;1$YuRRBi{=D2&T6UjHITKr3nWodheZKi)bLjO6)lQs_HyfdWePoK=hQ-nJ z&|STUdA{U%dq6}FgXksy9hGRsG+2haUyXVAwoS-|ALl-{Tld{w#p{kX$zWiNmcf<;OF5XI) ziu#Ga8GbxGj|@M_6@C6$@I<^P6U*J>`qAR`0!~q2&)MSLZb;;+UcED+b?tJOj4s%J z$a5oK2Cwq)Y*)~J1XyF2gX8VRmPXU&k0W!HN;tc>UA=+Ca-C)KRyfcdLq2<3fuAbd zeePgbYUk4AAji}0vl+`~ygs&0>WDjhf(}p;+CR_F3rKzflrt(bP}lvX9;co6nfd`Z zHyc#^+dD64R+I#HpLo@}ap7t7w)QflF_yTl@4xZz@SvUj93D5rjh13=WffaJQ7w~X zj|wkyMN4|t%R`02<~5~(H4*0&!G}pErhnyK6v17f6;^M zk`l1@_)Fv!!EKH0NLy)&(9BY#G)44RjEHDb>x)!fc+q}eSME#x%dME+fz;GemK{T5 zBY4w;$XaF8gg5qccHMm{N>(x3CC%@=&lYR?W~vJGbSdhtO?*~Kd3*Nty%R(FG8N+4 zg!NNBKX_?=5t)V{u0Q z;C}B6+0(ubb|FbOt|7El84N^t+Wgi|4~v)~ZMR&>CgIze9xnO(A3iga>yRTX$s z+~>Ro&S-G662G1%a>wMh{+!)KlAiNtrD&TLDk_HI$q97UVKagre-`M2PY;^q_cRpK zBdsE(;n~6p)<%dp3IsY`dgV`!Jr2|sBCU>{j30#zk9<7G_)~L|Ry)d30t%Qh*d^{R za?<*e;Y+7%KGM4Iy|Eh4<8eU|IK)G4kgvs|baDj!a{Bgp9DHKItfD-f31x0h}wNwS`iq z&`#jln2h`!D`*>~irkyJd+dJbl(uH`9uxm*W%Le5ja5{I-NnIj&qqqFM^JEQ-G|Qa zH`8g&^I<8}L!;SpC+oerb1|sVzLB~*tnDjrtPa~2LfAaK6Od_s$`P>R6;{40*h|np zT=H*zsy5Xtn$A@t(x{MAK$9j-3ItDnX#>p@aS!Qw9nOa|ZHM1g(_Bde^=)@NdDlC_ z%k;`VBref)12GFcY|$0E>6{>!ypUbc*RW9qe;yMQ(8W`r0gmO1B2t~9JUrSq(7o)o zu5_QNwC3F^cIa{#5mwc9d2_noBpyKGjCCzw=fmPFR1#1&n#t*QVjz^1R5JuP^ydo&k9ZI_J=_*dikI;==q+u>#7$$Y_$ z;SVP#Y9;RAYxoE9%f=F!I*Onm=i%ARITJ&oeho157 z3{PC;4gu7v)IOMUuVK~Ntt&AX_Cs*(19NAgQE)$sHosft<{a3;WVLIMe$lwG_<~3>BN{9+DyA!gB=xO3=L(AUcHI`Fw$c=H5qhmgw9>g`msuOIQ7i& z6Djq#GwwtT09nQdJ(=|zf2}BV*b$U<{AK*sF@9d(ZM}ZIumTd8C3v}3We@D?SjUrE)^$2+ z(%laM)f-=yXG7e=XxZ#OgDVWim>xL9Jea8IlDPoLObkYV5dIKn~igevK zk98f3tUja0mUx{emw#BdXRwJDh(T1S3rr*w9{?K?%Ed9ouaGS-+p>X6RPwxp9Ovx& z+2?-lW1Gwt9Zw&QGQTdW6YrXLna{+zFK#r0vF$uLn&YZ@14Q->@74rSo79%FuF5zV z3XWb?HPh8}81J1}9IIabLl!z(2R8z_ZGpFObuzrgxJ>d{n4TP3Ck^#(Xt28n1+j|932lg z#wb=*TL45s=bG$!c89^zV5WlIZn|hUI9;)7ivku!U$S8Jc+$Un6PaY1!0lx5Bx!8H z2SCW1qyIt3QPM1+Z6QWwl>hNGfIczahk#9a|D{NX556e?&w`-ScK%7TzTX2`p8WxU zffIXz0Fss>ri}U51&6_=l>ZN3t`bHY@S*Q`02PtO`fY%dP09A=WPOhASmV+N%9z2xBex2(Nu3XD;Ays6Kzn+^52J z_#?5%{#?+#k8afsf!=>&04%xAF#9dgALxpPbKiucL+$6@4@Q~(INOdT6qPFpph) zF;B`?jVG+skoxAdB$;YA1=oOtXebIG&ti{&n?B(Pr3daW=xYa5!Gx_{!J?$}wiI() zAKnZ`MD*Owe$&gNtX==PvgQ z%|PkwO{g|YNt(Fjh}l~+?}q(c=`|zv{7*3?P^r6pj2u~M9qnBrfTeRhMOXb8!K{$h z6?4WNhPet{Ifn~DoN7;9D42Wpy;i?_uC}%Y%SlnJeZ@pUv@u}|A^ExE7~aaJ^P>Y5 z9uVgY^?%g@-~;~mmRbc2Vs>@6TQcpUyKabiq)nsqw`2n4ywJ(ig;ooy*w;qxzyQ_Z zrVAI5YCi({EujUuYY%1ehc2%nXEu#FUFr$WE%*n!q=2e%wwjIRlE_QOhmWD zI23o|JA=y?9gg2!&e)*nmQSXQFi|$fP)0I!hr$p%^I6cnT?@RYT|l35`yoc+u1DIF zdrCB(Fw~XUt#?oy5@0aQ8nMUhoq>CeiFoF>==T>*yR1My#dwL?qh0-svDG6mb0yM$ z28IY>3<|i`k~2=baRvIEdXMTxDb9Wm=4a%5KR71h0qgnS;rj{T;%2KIs(;!J?b|rOJCz3}{8zsE=j!FS=?nqY@%W{d6_!sd(4qlNP(4dN{-G)5Yaz$_RJa1MhZ3w)_{QnyL*GRj|`sB`gwX4`} z2d${hc^29Z#&zp{H~$hn+NoAm>ksqJ)D$-n!~3VBPwz7dd}0U~LN0f@%-tq%!% zXd;AGez4W=a3yjYeM^3MRP53y*L1sUgV0co?`|$fr>B_!1nTo_Qr97@;bxsZUdd}q zr5wmv@%C9A)zbat7EP(%YQRXKuOBbbUT4(T>+H+{@7u2dJ)h{g^nmg^F!$U5aR#=b zVKY}dV$!Pmoz%MaRqAP>Sue0!0u3)xIRDh&lX>_PVoXYO!K35n#r1oSw!Kb%z2gmV z2c&W~>&aq0>#J_kg#(n!NuOH{@a@^``f!H0H8LsWERIzN*@Bau6vC4esOvKCWPh;K z`j+11uUV2#jJC!FBVlJ+u3>x-~5+^cBa)TQi?CGyAn3+)IEVtmi};GNb=( z`rP=w1bU)%Co52s%FapRvK48emUmrv9z0 z*V*h|c{d}Jw~>{VmB6wb^Ym%(8`%0}^Ig?*q=N7zGIhfDe@|#r>bDgRpekTxdnD7J~#wDgLb)L-@iq+rYwfE>(Ilgq!R#akOBC$EgM zg+3LLKV~9;G7vsyaExT^>mMd<_!^PN$w-dq`!hN7|JhT`o41SWji<{^5NQ;^B`GsL znS-P2J!k&gFRh9Jfx*$)S)e-!a>`eEFXT_L9!MuqLwKZ%7!>ONZ^fgB-c} z%(nbL!0BK6W2PBin3-WtAi;C_39QKyWb*MZU!UNPoZ>J)G4Zksoa)Y~`5R#WJIX)9 z#60BWLZ;v|5}dq$jfuP!Qig+Y6=&($-$N*P-&-Qb`CH825h)xXSkB0Q-S_`Bg7yD8 z{R8%?U0!~F2}G>tUtecvsb%RSx)4YJjtzpFV5uH!;evZT566I{!dm^}tuNub+S(uEr0@T}QzY8hMt{*45qwXK4ZI6(B(2dWtu9z1x!<+7*i!p8fRDG>2oNikwc z!i}3b$!iljHnG?H%?2Wuy-Iz@spmo#Ha79Vw1G@d22i;7O!ooke`5R-5Q6O6v3Pr? z&C*!?{r!(JaJ+gz&46CM{@ma)xW67Kb+xc6nvFK_yf% znjy_|^xX{L-#yo8C?mbD&(Kl0N^zvGfdGS-;?XOoPi=5_6rYRsPCXjY6fHtK7-bk`s z!+{^*%wsJ9I$Z3W_x0{kH!wziBUU$AKrY261^RkxmXZ;KER!CDmNl*Na|#MwO@Xb3 z3o4%NFS270u-G===>e$4kC2FvU4Q2jGudX==aFq3Ae`Ambeam0kO?w<7>J(>Ic0TOk*UczC$FceL0ai zxUjlO-U9_AN5B)j+G{-5zXTd7&AW?zNEEAx0Za4oc72pNwFu-6+l==90b{C4>1;rK z(U{v>r1abqoB>dm&%pf~z_h(!w(UV(XlDqJg-8}M5$ZWcrZ`P#(b-_ins9_8Iuwve zLRF0xPA&D1NzPOI9}?;=k?rO6_&EV&8X$V~!=;gy4cXzB-mV!VNY-dpR#w(0SAbQ6 z5n=EhpS#znECafJb!&g*f55^fvY}|%2~*Bc37h;xw=zN4gUh_9%14Z5dHevW(ln<1Z6sJ0D4A{+n z#UQL>o%q-(7ykQR9IUetTslsJd)B0=8sV04Biu8Q0g{~u$s~$g{D)XN79IBmH&`!@_rU}3K^w-pFDgYK z16IVPqFkWgZ$SI~xi5j2Z|Uc8fi2up%*9rJ)Oq)OddmK--RTPHjiI!(+9OoRd)Do& zh)zR((NI&P|G*T0jmw^K>-$V;+|-#hUv&;0Cm?!ICLNflTe_15UU7eik3sQtYv}8w z+~Bd_p75Xk27Z!=13bluuj0uck;Ruu3`j{oF*jC^{C&-T_)S3xxHFsUrbt+bl#4REcZn^Erk^ zX(O;Sf)pWI1UUszx^BG5@xins%k1RO)8ZPd*^Rizx)r8jhRuzQdWLfaSQ`3n+1GCs zt1c6P47uSJpyiNT9a}(idVx$6jh>!fX5<_o+dT87ryf3oxHKL61KzD)fi6_t2dFlR z+4tJ_@N-f{)OBc8e@eWj-Ov0Ucl_VA0GuZE7mlH^z9XKzxDY7d5(M|a zXC|A}MeYtG1VP2ME0180Y3`tDca?$xx6SbD=J=Y1b=E1T`@AptCIj1>#xZor76FHR zG+-tICHD5LT>cTlW`D_1=M506M<>2CVec6DmY3ec%S>5+1N9ghLua${SrttEhF_BG z_9_Q&VL)WB*fTP+tz0%-B|~cZdY}QgegwvEHCwbBx@aS8TFMC$x}F|#b_SEFr2ZD~ z>hM~mjkj=5+PNQ+=Q;gwlBl!Yt(qhlGpsY>34M6`A#-+;orhrIcF!e%S=YAp;a4EK z`87aa_`;GPHMkH?s>&w<<{A>jf* zGzrG0S%3N)TpR?5l5M#G>y#`#5Paw*W_vmI&e_B$#1^2tPww{i?;yJc;(EilyKU__ zqPoCF^zn+6m~;G~F6FnX(fzvs3Vy8J_re27Db_cnVNB+f6=TWnx@Oq$ZG z@&Jgsb+@ZfZ61K*EYwW=?cBFsBZmGk_rCm4i^fxf zv-0>}p!z_PTz_+S*QuAo3ps7G>g{fPCr5rJr1WRI<&Qa;ZY z-DBYzjazDL^Vf@TZv=MP&+#@iCZ(B6&c5WOuW8(9_f11#YzZB{+~FfjV3SU@loi2*ad?D2-LIf@off5AgevsD>-DuJQlQiV1Rr*hN=ifY>VWFNN265OqP%UK zd;9w?5Ydf;%^r8gp+#TL?z!Kg-2QsErXH>&d>YfRjAdgBm0ofbU)a_^7vL4Wd69M* zng;r(jP9v#FHjyO_FLb0Kybd`VWYqv$PQ!B?6ykL; zk&_mTuL%#lrcL5|F$^wVmx1SCAeU6bJmChf@SE@THj5(XD^#QJRXdRMJ0v(z?+{vq zuFvf$B(Wopq2!QfW=?x^Eam z!k)DnFENlO)zX6cdk%67`??Q;C*t4DHPoIrz=W+u$Tl>ehx#?yYPK}+-CjZl9RO-> z`ARxvo__!=F0`R*l=@4aKPDN-Dz z3xP;h);{R-^hpR+%LIJBoE6aP*uD1QkS4Pj_hkdBW-C7P&*q?R{p9`PC&Kq+;qI&^ z;7}$rb4oF^SbpWCMd;PZ7DC55P(`Mz(!=uy4b2}Mik+4^ z2f^N#{RR{pm;KE*{cUMCy=h;%=YFHD!)L=tFl~dlcW@5)=XWM7AY+FFPQ3S-&FUoT z(3VR8#Mg8xI}??434M}>l{UnQ^b-_B8_LrA$O^BiwD^q}haz?QH<979T?X)bY?0MR z?$=_jwfDS?L#}omu2^mrVWfMZoh)6g6dkuL#KpD-oo~_vU2k!p! zlOGTrKrh|~BVzV{kAylzx!ioU+2EgdvIlBM1o#LqI3!PnfBxIaYsx@`wgNI`%ZB<|9AmjMu5m--PJ`+)V4BS$`SW z^(fNkGTtW=SiR4b{e!WD@v2cKyry_4RYL{w*5ByjqJhZJ0;rSA{ag}iJ*yeIFNznY z#P5E!nQH=U+%-~>OhGyBlkQh1olQ$YF5;<4;dL_7;gAGTy6B?qY-rZc+Jgta(ddk8ey5t!vVz5=E#f;sz}h5 zYeqBWn78?@NYN14lweYqqZ*8LpvDB$89u!tun2gogo`=q;f0)L{R*IV@2CTRW&+a) ztx%^%k;3z!mGjU|LI|L`M`1Re;@D(--veqnxrQUQ=tw7*zhTAB=5oHtkw_Oct>evF zV}IIcHbl|dn67{SWv%{wcX7Bo@ zv>NPD?danb{S0QWK$c%7UgGk775DF4ZGda@v$DSHC3@#aiQb+2+@1Ltmy81AuBns9 zeLIilbc`Wnhi_l1!o2z@R4doHy4Zr+^{4paw)|V}3AGR=>agVRd9U87N1#0AeHUyo za@XXzIT~1Zwxk&mN$226$fEfSfOsJ>$=&x+-T=^y6#)Nv3Ul9q#CFx8)s-lk>$7$l zAK}{cK!7gxwtdCOuCJdB9oEK)0 z0Uf9Atb&39>pG*-2X#uZ24y*8Jx+U#oH)K4C)FQS5|+u;&1m)wA!2fcy|aSBbXXdj zW68QuUhhwbSo2rdn&cQ~+gtnBl3i}su$+}9yEbuY>7{q88C*=7+VB)g*#5NzN!fn< z?mq92PjMqeNCJHm6Ez0f*y|j?0+v3TQ+0~QaBFf2bQDz*6^yvofTV$q%x5o*Jxn7R zjvW3h#WM8Mk{N!ee$DzE_r|>rY2`t1v2({91$H5@%}dCTLr5}yj?Oa{1D&}mpQ2*6 z;_z_iIQx|bFX7ykrc)f-))F2xU_(j$St^5@S9ER7jQMsxUh@sNT}&-eTMFs$(#rGQ zX!ViS@Kuc`a)NWu?KOSk0_HnPyvbYHIo2It!6ki!)1kQT=OF{BCHzv-W&I|wOCn#I zfMAt@#~??XKsp!|I(L%TC!&gx&I%!P5LT!zYG%*4KwY-&xAhKhOI}n4tCP|g`IHbr zzQa}f%L0DUm9QNK0n^lq)m?QJ)n1{lCqbiq84l!<=C==YcjA*On_w@hJgdXrSlzsn zz%dxU@2X{SJ1L?Xq9y$R~fi*RC?vf82ewk(rzR#P6Rzlk7uDuRu;Hs*VU)1H$q`E zHUz>XYKwWAihefaXX2}oXNKl5D@$kZI0#1fFQ;H|=^DOiNFCW2VFZlrNg{4CYG{1# zAqSU3b*d=aA~Rhha^rgM3VSnSQ0DaIQBGwry6sxs2V>$FJEOdL0GMTHQErtIUH+7S zuMcds(UQ~+5*9oIeO12d8tOdmj9A?~<~y@7mo4-p-*UqbiHTPJF5Iy8Zol_gdx8lu z7_#D<&>&TD*nfIXT7=qPK;w(NKQlw(PmgAS^R2TY(+5L98=zYg#+$R9WP`Vp8{Zki zI@-&mWOQWnT&wGvWeaq&6PC6fC9Hvmtu=SiaHKJvsDSJgo2GEaI%Y_idF6`PkzE1- zp1`D9ld%yFs=7)2<&>Rp^&J*#FF1UzcFWwZVr*Oq-%JhXkwCBJnY4A~PVXiamgEq|N?=pn8NRHx2{t{faQ}*ur@`2t~@^x9iQ7>$ooO zB4TL7Dlz?(9p3W-zwzmC%X#c9R^enrjE}6AsU*(^KK^#zUf`N2`1^|uGSY!I%xOQL#}>U&?b;qR|lo6y|O@}n<;XXN@W#)wqX$~ zI|!XdPk;uKWlNPvnQ&EPe~cYj3^;go+$$Ql+-3Bx(&?^BjM3Ka);VcyPL*WI;8l|*Gr0-IVqI9+WFb*_+9O>6R0k9fSLg0mbm zO@`diWV&mmGo`TI9LqO(CT3xul=-8|68C)M=4x6v=S(>mFT6{{bt67jgIGmUv0*;v zO(W@BC4tFPza4{v?1fJ+rSGwGw`sONrm`XM*9+QHSDYK3R!VMadfIyn-)Tfm_|&UL zPn4*2=C(L=UNK+feDsluDgYS|$rqUf`9VmjU!o1J;EPhh1RIxoWs5AKT0~-=+Fx)L zNzi?fqr_ODAE(s|#<#4iRO(@RjT)z(mwmVJzzdsHQnwe+Up(J#bRTzWVn1FWwOi>3 z3dCnnObH~N#1s?J3z(Ig>4lOQN#f(`5O^#qQWPy|&zyX zWi@mg^=;+1je)AK0>9Zp&A!dLH+g;y-KG}lSRFl`DfCUb=tQl0VZ!&KSZCO66gFBj zX&T18EKoLD2o*O{iXO9)%!V*m1r=N2)=Nd6I!*mJEGeR<+nrFKp<6TMDK*yREdVL5 zjw*`GtT|+JA!j+lwzH)UvpiK_5(@-txE4%u>s`$;gPaez-*RF#@5Q&pF8R7`9F@_O zz3)MB@%YX;t&3li&)!Rx2*HfGXp3i+Dc-|drEjm0;e2~}5Q$%A8a*4Tn9TbUC#~{x z#7kMW4E9PA4y*jLiE+|NyNk-JpYL$T&UrFuQ_XqaIIw=reVN2_rJa|i-rpBbD;u%s zJFJ;kXBFG^p)LA$*dnozZ7-%vxvq^w^Zx?6Kt;bYt_!PWj)78|6kPtOXzWbhl9yuO z3ylORjBX#tXL$-ag@1J}#oM@6?!~`wJEf!atz6T(v5MTMH-lm;^g6*w%h@nX6ERms z*a@FkG7V*19+jeeB`cCS;%J%Nu0|-Wtz&zycwSBQX2EGa^IYPtLy~bL{<@y;62U80 z1Gb5?_Kks*;bs%Vlu5}XjqAk%)@auDMwQJvJydqZ&E=}TJg}_smPV^z=91S^+NBzp zLk%pVo<4_K=z7wqfT>6amj~*Nyl0ug5u1XZWxR~#&UX2T91RYs;yLoPbN7GFx#I^r zs|C?j>hxLR-MmVvYa)^w*LF~3zVpFT9nCb;SYV5%w(e$KEF@aCtVMh&-macBHBfAw z#(U8a{q&jPv7W2?ygHA>EW#6wPOjai*9i|xY38{Gto+5TA6Xx^#&rwZLL@Jged%C@ z&2uT*BF|GKnoW=mv63~KrIQtJb+W}%d?#1dmrej$wgUAS$IOLTkDZk7YNU}YUX8pL zd7g{+Oxl_@xMZhVmI4I_#DD=(+qcMz{Zuf19Ovu%DmgZ4ReTpLS(T8cv1-J@wR6u8Na8mYqevz@%vGOxP;b zcm6cm@UojYIfA-@H_Cwe27@Ms&bsNoLEl#xE*L*^!Oisa>Cs;)lNS#iaMcxS+}go@ zSHeI)0U4pZbPL3{=#2~xxzp#)JJxNhaQp4Iw~D%icQtCj8g07Qm4=A14qrt~YPq9Oyvdl}2Ff zaNwY_#aFtj>$lYMnByJmbZ~Ic-E`AUff5vTX$8cV^@*>Ym2en-b=5lO)|iO_mJUm2 zR%JQ+GHW2rLL1-AKv~~#!ws$mzwJVRDV6I%b(wDI-1Vsds#L~%NAg6WCiS3Ph_qg8E z=oh{HuDa#=kP53bC46Y9sBTK9FnGq7mwf%xTLmw4=`;q^2<%_3e^8I%inCi!&5XI8 zb(`F}jhnoTa0RKlaa$g54o{`i{76*oU5X1U*SnRchuwbk{bw^!ADui|(<`hC_F+zr zIf?8xoIfA7{Lo>-e%nnbL)OlpzkqG~Gnm{Bxhmwga^{RHVc+j`?+RDKWU+j91e3;T zb@9UNIk~IQp@RwZ)Bq;ZNW*sl&QJGvM^90`^c;Q4)0)A@YG&Y|=>*Q6J?kgYO~;0W zFwcfBp9{{Xsb!7Z>kR5)>ik)^T=QD!H>yZ27#KL3w4)O%#J&!OjH@((PFe)$;l1e@3IZS^mKm?rV*v&>|f7me$z4#_{$aS5K~qmd1qZh^{{<;Uel=BK*#9TkJB&68n1JaW;4=gey~ zJ7>zX!pQ>ZFi%ULB4yie0XCM(Fh0-5Y#f1}5Xo3m3q(dgtVxsPdm z`9S|lwP7bLU#T=SrY%fi|1FK{Zm*Ou53Oj?sOf}$856>uxY{zYWw&M9mEq`^zz0h> zj+C$>QJt>3UgSe#c`{ymP_CKsD2%zrt3Wiydx1x%@FZ6mabJRQ%~g&@eD?u|Fg-fv zrpvNmbL`FfAljVpv~Mqt=hIN=&Uky4fV~o9pTys8KW-a3Q8lOzN=t6!vI!Th!<%rTfNbhF##-G zNu5C&0$0qi8rTV3xNyO3-@e@q4CD=~rnE+YyDaKeu1=<_LNPIEPWD&ab%Ssq3ATY( z-=9&R;-3?2_C400nQSXbqbt4ES-tQpf;05q1BD)?3gnD@WpZyBYP`ppCNnV4GOkrtVT*aeK{{ zP*?O=FJHcV-gu`TV7<(TaXlY2XK9LAqhkxrinoN&P7P?_m7%Krz-AdaBS(Xa+bRez zc`X!MRHTXZ)DvzVEZ)<&b^11%=3&aGjCbLgu1PPaZ+f;i=#s9h+NWxa3c7 ze!HL#$as%sRO~6=$RZDQ0r}`op3-ur&lydVw~W3lZjs&C$wN_DRY&RcX(aoYuV5PV@v!s%t62>xP z4k+mGj(FI#-MJNUe_b~}exYNM#1Aw3?B9nxzvBnt8HstGjYfWmY3*NSl?PJKjZ!O~ zr>7h{fR|E^!Ke>QppBB`!exZf3qr$*Wn!4ueJS|@ue7eZSf+A(h zYVXCQ!mJ!hZi);P;WNGNbxmpN+2e+RyvpM(nV0gI%~-HBX1f<3DvO?HOScriz-j>U z(GZLtQTyu3iJRoANOmnv^?Tt~Q`$tKWR^qIr@G`T`jlZMA^5jT^~etF)Hz8rHy) zaowt8?%Wd(1TJ3q8W{Ae!#!Rs zYa66A<)Kj78xcz5c}&_6_-PfS&^1tL*{_L%vXIpm4~k7vqWfKYrx84nvoSP z$*$U&>7;{-WZlwIymX}UwDc{$HLjJPjZg21fBb-Rho4Ae+JBaQWlDLV8&Js|#YY^K zQ@WVOwfcwR8qg{XRz2{29yx%B_>C_FbRG~uIDaNwpN;>GWT;tPNS2AyihMPrwG`K| z29}KLhE>yfL`ecGB}!7-Yi-ZFCf-P#nQz@4GL)g))jij-2THl0I27`_<&H2wNO5s* zTf3mLing8`(NR1-&-B!LXhTC}o@;om2~^+&>-}znTu9u>f0{}99BHb&e3a4GNp{yl z8asg6a*KC`X~1Q8W}p-pzP3u0R@aoKo_W>ap?tL0Q?g@mJG~bllKW2SETy~3HDH8H za+7gnuzO)GNP^wb=b`VY+3Aa zdPS*JF2yOW__3aA#R<2StLRI|GtFr@Vvao%974CfG*IE$+^y78D|ucEIJT_zz9H|4 zoY`?`HhJM81#Qx*R-Q<>*`$*SthmqrF=5uy1;-p z$=bRVA2OKq+!m1~e%S9+X2liIX&9tB)yfkQABbM-=e+_)pT_YkHLhE! zvxb>oYY_$dZ?+=XwwSH1a8&BtAo3@fpBu4jD)o|aJs-^_fwclfC8HBEv^9i^P{J!O zuDr;j-F%@j*%qQadG0bn=csnHEhKiHR+Xr&8`Y1(0yoY7GldJqP0+`4q|g~wN3R0~ zL^^_^ZJB8v>9bFpbZ`6K*qJwivNf2aqfM;>)e4yG2z8~G3|oVWc3CDU;R{| z@`6|4+Xm~y{W+|-s*c;u<66YGZEWvbEsL$I+0t{Rom)3Mg^a%RWT%9#y>v^)^?bCM z1j;I5=97L3okA`{cVpZl!9CYZN1+y-x$-DFGBz^L^@c!srS@EpbdXatDx?6FS0hS_ zCoifWg*8fJ!!zSL-4=qEUouDxo#Zh?o6C(9l*&*v2&VZe&0O-mnrSJMd^10O=%|hr zTRwX)M1`r&_;ICw`g8vgC6A(23E|s1m5k^rob;LDageXd@R8@sdIXie)2s4TpVD)u z^p?_H{TiriL(_p7r+CRPIhJh65G*Q%j84g;>xpCO zSMLm`Q8lN0jmI`_la>l5c_eZ(`H~#UUTBGUf+SWY^GWn#eq1uH=cRR{L}}nv9(?)k zo0!di=@TJ@p1gR_yVJH1D&PT}n@XYY*1l_XAPovK=;gWAK5v}s+BQg;Yv52`jR+~8 zyoeu#HA-W{Q-xdPm8_$u9&QQE@m_hTjSI6#0##RY%=)S&=kbYLf;FFg&+#J9Gy_#X z7C~XEv-|Ic*A0`RKl|-~rHPZ$*y>YSU4>%}aD}O!l}{!k9eB?~Grfur$$c8X^n7j9 zfZCDVN^Zn?Du2S2-l9<_!%uoiY3)^UnXJgtuo;*HGHMv4JWX`nt@T`6!>JCjg#e{j zpRu%Ua`MF=>q5BI*uO0QOhcM!-CA0W?xhbjf%$SAH(&CdVZ}gS<-HJ%>p%Pz=RWy< z=idEmjVLaTa4Qq`zSYvDZPAE^_FGd&cf4m>GA@~KmC0)#Z^^ixk2aLBN(u8^=MVmY zbHDWc&i%uCocqc@hg>P-nQ9qD|E$W4SG9W%Cni+hGG;XROLjzmE?4C%176Q_p}Cay%GZEx zH_1)Lg=7u^9xv)qZR_xp!l=XQ(q=?zUR%jZCc6g5-rBxl`Lj6oKIgeMJX@Sr?$v+U zZ{&@(yjU74FDDo3XAI+yJYPy2H5%tEjoI#P9>o3pKZO+E_J(lfApy*rCvMrkoqXy$ zATOT(C~=`ct;k1Obk%DX-auw-opS3qx6JyxVgfE0~@YgJyX>@E~C!XY@@?-)Yo4cXsx|5^$V#kyRM|l-Cx4d@?Bg7NbYG@W{Q+$(x!^ z3~eDqmood*H=X;nA9n6tKb>R{6OgX*@j;deOVr$3?{cHQY0w+qDlN;$uN)GrffYjh zf3_tgHLit!>ZrlS?bDCspIdp-HZkV$~*5WVd6)mBJ zl>)_MXtccY%kW-vuNdL}&*N;;gW;Z^xD_ysj_vE3=h_IkH|Wt(sIYXVHuta*Ix*&L zS=E6Q7O3X`6-V4w@NH@-ZW&8HP9khHi1M&$yM1p5Oi$b7ue`*E%E@qr3Flh6+Hw@} zt-+absvnD|Fw&86{i4@6cf<1_dzfT?Rvh+&Q_cner2#xYK>}p-0`TU-L?L@xn#-XMc(pObg(BfAP0&pugX}-OFk#mE-p6i@j*bvPKvPKcndtx)_W1Fwhrul#Ldq2Wt|;+I)rZ9>)hMk8Sd!?Y;*)=N>BBkb1#C7 z297KqVSoCY!J9%Fb1^;wmo@oj9<9iq(k-y-`1rUxb#lnP{td5n%a#qg7ryBEZVwD+ zdazTVNBcQ9--zOt+}3Sd+`fIg-O!LR6w?IP%Cj|aB|GBX%AA*-h|D&(#^Y!Hu4V`- zt-Xpab-;49>3jCG`|b)UU-G&d1-646Lyvr|7ETA!Vg@S{R(>0nIYCNg#L}}gCga3v z-E|?9_K4cV;_1GKj1K|1*9-Tgzwb;xT;0 z!o%=W1m+kJ7t(pCpjSe}dUq&VT~s_>#T3X{^+ertvorEi6nXm)y49@dCUX=ON& z13Lhvc-P9a%Fu{k4bF_W_rkaJA}y}UD`05w4^N%_D^D+bXj=~&MLHvSHB#Mo-#6S#?s!oy>zBX$4tLKz#!!i|GZ>J^ z$HNCDXlNffdfaW;5SQe06t1-g({8EJVT-~xx9!gNXasyQ_ryUx)n8?h&=56vrJ_@J z+Lg}~-@bcN&q?zTm7jdh;<+wK7dJAlj~xhkz4H5l?mLOGZtQaN{1vNuR@wOFNsfir z?SLc7IEC2%`r{K#{yml??w z!+F;A1UymnUT+)>yteXh%St)~g@vd5zv6_etHSfx`6mrhtr*vT^IyY0Kc0vB6Th0L z(~T$b+sMfCc}TI~K}NXJb%TeiNrUB3+ldbi&U`K^r^1--=rbYJw%uXbcK|2k5E^=} zLv%*BQGIIQP`o~s{OSMc+>2j>2_*Ik&sFQ$a;Odz7TK)3@C^CklOb4pM(dESYyiF1 zsqXB>x$!fqL&FZYgZ%cpeixqYUGAx;o^fyg@gMPCY=h!&X?Tx}IkOWX471D>COr!iOk38kX<)t&H+=bGz zT)mVr8T9;c>l=(q9>6hE8AyoM9!uyiyT|Wt#uM5w> zh(B-e7;WMF`SWgKVgjE?sM!`aelGWvb=LcQ$U6BV?ei6^0mkoOu&!pRlBhiAUhWta zWlyo?bC7xSe#^|+5iiG-70Y`C;e=_lad~{wl~AuU{ll(00AsrVDyUZhWu4%|(6-Z{ zeevQ&UoSmc6%le5u58m)t6;+{Aeieg)3u`9jhAHJ@s6MH^wzFj<9_IEKj>ewdGlMo zH_L;@_2$iSi2|Wt`qDk_wXc1ZBmRN{2$KhPc)le~!(r>L5JsiB=T_%vT+92hF`12P z&vn;4*T$PC&-I#(fgk?U5TS9+{ZjPw@uiVgL1pzuh6bjXUl9!%U@}Vd=6$-nUzAp! zjIt41(Ged$9qL~%&^+-VLSlOq(d;xVD+lV%LqQ%2B9Aim3JjM{S-9fqY#p;LbfSP5 zvGJ-O#qp|m3~YV{h3Wb6Bzl_q(MJgSV1aN$Xf3VUlAaU~s$@L|*{ez}zV(e5fy-Av z?cA1KwdYRn#G7x|7K!9u@@X;6JfqRLpwT-U(AfUK2R`in({KEWcW@B@Lm&ExTe)hb zyY4!(0u~9KIt86);PU0m-oP6khCb{v$hf9qP&6r+ExR5}dMa`I-4u+{>B*Wr^}qG3Joi$Q#5V#@U$JK#wVeb3LhP59 zcK{|({#DZXz`1z(v^&?g+Sz*PIe*4g&-9}KsN>9%PW7J@BFcr%{ zgM+mDu~)tosKB`HL7Hhy9;Rn1fp6%oiQanS`XVP#FqUXM%}m5HVr-k~9d*kNe9JM# z_@>^jI7^@x~Mk>RZrxT`CmZYpYNS^RnXk@(3d^?!`p6jqeg~G z@T{&b_XgwIW5hqy+wd9e50(zFak!_fjE;^LP7V{)wjXI+tKQpA+(H8KK8P7tbt3Fb zpJsd4FY<#@)%Bxo1mlCXzSoPkqihK#dSNhQl1RS!Td`t=E8zqhFSn?F%54w&K?Mf+ zRH@HR4;E}^0X>bA$0q`aAh0y z$JLU|qoTSKf%q)Ck{>+_O?;@nAO5Oh!b?%F|B>2@ytSD2?v$sOU9>GhVd1I%f42Dg z6!RXkW9N4F^wS62#*J7_i*WATdAEF7oan?bt}4Fx#V^PVYX-mRO>c1j_>ceW-ul+> zk67WtJ=r(ilwV1d0la6=o*ZsB9_WE2Y~Q}!tzW;seUfxGoR7M1)mrCt_CG0|9&|lp z7h!CybgMRP#ukLt(36mPent1(kk4z=i(lc~H!x{C@R-}a7U5|bxs2Y6K2N>RbjGOJ zvUatbTDu)dOud|fKYSE;EAZosxZEPBZy*$vj;kRT&|Z9rk9lzqa@-@!adr3Z-ELr@ zvFAHSlB9-v0J~-IQt0!YpuB$s?q=JTV_Y`h?NTmoW2(m*ZOxd7s0?jvL&B@`OSt zV@7X>uS@+aTvhSt6up=&($_Ez&-bXpJ>~j8dWBe?W*(?AmUXz_tqR=!q2qp4d~9%~ zn~Jnc4DVm=!^ekCxXC7Ya-if2g0Dj;kuuU!iOnEQ8QuU69y2&~!1Wb-#7e+pe%0PV zSM7;upbTd)Gxh^SQbd+dWh}u%x`ce; zBfT0d-vG--z`)4zStxTkdKBg88Sp&0DdeBvxQX#lKYi1FiPYoM69!xhEFar%zU0#8 z#e24)>NJgO_Uy|Xjn^ucg{y-`dBc4Hkhv;JgLyrSZfs5Pz*9p~km8}?dHg^wwlop-iw)K4 z9BF&7JXpkdFMJu-;;{gA)}V~nXq0l&%SklDbs~s}Xp5fTa}7KK!-%h!Hh12DbR`&C zxN_?wCj#X6_<^u;%XoqE<}+|gQO`K7Jl%Rn-moSfjc2{k$Y)L}I6>eMa$F7LigBmF zQ%eYqjMor*b^fhL*n7)ODhGV_&Hx~(ZfOMj_AMZy{nnC0IkeR7Hm~$v`G^NSr+AC} zc|jQ0oJkhFz}TqnM@G(~AJ5FXym08yp&SVszcei9U6~J%F+YT5(SA4WHetUtRphCa z*SX8bj%8U6V1~F29tkeP5l);u<))2+uyS<5Z9(|d*1Mig{8x>NV|glTN_>3!B^rg96RKOo8&n(H00}#lavw^V*lwO z?2#+G@iKTA9t!!)aCwl^gL>$}?6>FgIX5{rg}q#3uI~)eKpBqD^upk-F9SUR%#(&l z@7YtXGJe@jOu?A%4|P|@a{Dr@5KLiP%Xn$PFFE#&oOHWl*4QTLP8>gu`pvJ5)N@t~ zVSdZty<3U$5USXQGG5K|);EGW#UML{a`apraWg&rzJ6;|=s_9xb;7tR_L)^M_;M9K z<>lnblYakSvk8)=JJ)*})+%E=vQI&sMH%_dcuu z&48!rflJ75*j+%c8XBMReX)o}FFc)Wf5KQL$l?t*+)#vi#loluU;l>Jx%d9b``k_# z(L<+B`;$QA$!7R({?@Pg7Z;afW$|zS=7Vm>j_qDwv0wh!kG<`R@p?rvlF^~eYGM@( zYZ(itj|Ys+JJHD@&OForn9pM-tBS^Ya`8C3C7B0gn zGo<4!nQ@&#T}~|M}m-maOZY`#*j#yqt*1u~R&G5S6U<_U(l+C>ilVxu?9#X{ zTH2Ld$+#vi!Q+S9hX0)Oco3h;xM7M#8L=RY>vikax)Ucbt4*Mhv2pk3z6kHis}t<{ z$6If`H5YjJ@L~VFW5zQbd~fnWd~2cCAfeJ{!j4-Vf5kIPIx@AQv*(7C+C z(;-%3*)QV_1C_Q0m^3s5IdSmykl~^n{c)-DeZPafd>;)d-u>C&O*&Cq2JIUd#7nT? zjZ3mE0+*WiqfPpb$L_%wFnFqk1?It}HF`_vk?BJnZ`oxrLi`CBaV!@HKi2bl;OhpoWPXKRl_5lZm(VNHRMh&u5%G>q`+wZx(e$9ZZ zkj|IU{(z0oyTfgitlf+0F_(FqRRr_kOUym?kU z+W!(>Eb1Gd_HrXua@QevskNW{`m1YHTfX-1-wU4iVmbJnTeD`hjq--I0>1mZztit& z=5{=uFk)Xc7tS?#r928`bm-X{9mEr^jBD<%ro8KYf#@vBlJiCet2eF>=Auf}A2~oC zGG1iVD9;?8d8jkSqfu&5TE3NmOI`^q8=;7YtX!3VBQGdlNM`-exv~S}8i9rxX_MD| zw__X4=h0^{4p>m$3?ps+h|iD55T5t#;cEZ+GC8)3WWbX|xTl>TX5%>24Tx3H4N~($ zqo{3|Q5tTaCFh(Nae>*!iOELBwQVQirq@hvV#GqP!<7FnJ)6<1OB6dG!#y}O66p;Q zd>zL`@my8Hp+iR$?5__${3w(UfRa4+Oi&OzpYZR|=}4&%)4hL{Lv3gCEXqZAYKtPAg&dZaPlLL6meS=!i;&{mF!l|C zF}~d(3{`@>e`)&cIkquz$wL;-06^q}p7dUxtxH+I`Nd%Z`05`B6FTC3(#ns$_i?5c7^irBpI|eL zYl1waMJL^;U$FllKN#>iF$I(+660DQO`*(Yg4Il}5GS3eF=j4uW_k4tM&VYu85SYi zw!!o2X^6>N`8XU##J}|=!O=l{#nHi|ac3kou3=0YD3j)~1A#Uj8{F?+2U+I}F`VSc z*e=?lQ+kbh6S9DXc0*ys zkxL$Q1fS)RUJ2~S%^(#%DoPqD^afJM<;BPnw0WbB#nV6td?~@^$@|QphwGDU9uYEB_+jeZ*iMG<(EAM?i$-ofv?LYK* znB4MZVtT!a%hi>C|KGzz@?Ad_CYspN(Ke{8zD=WSD6DN_O9`%S4H)^6{>T_Iacp17 zG*5aWa2ocy41z#wh-ac5SLzQCRdbc0>=m8&t>v%^Cw-=EaE{% z7T;_5>ZhFh_U~m1tbl$aT%Y&KaOJ-o3-r{liZ7jVwas6M!a09aN8b6M043&97fn@-TO&g5+@>7wiqVdRPPJlUL`kj zc4>^aIM0In1!G)uyTQ->+)w%!g1Cp+zRYM1YqrTR{n9%ZcGFOJWnAyOwVqdb_Y@=L zqh~4%8Xw#~!L0<*)1m0Luj))PuGhf0w!qG?9QhI2@?66cPEcpkHBpoSDgKKW;p&=o z#2j}1K?+?R{qB$C2Jl>h zh5L|}FZEoj&DAz3K1;?heK^VPw`5%AxsH>iG`*fpm4+pE8vr`7e)|tTN~+G?0uLHN zeU5_zw?jQ2ga6$hg`tlZY1;+yLfLIqepNs1=Cy=Bzcrw?WG-nN<>DY`dgf=I33wsb zgN{xuGOpF%X)K<_I|ip6+!9n>VhdhoVjvPCHlWSM9l$ zY!~sMFn$vI&JTo-r149?&tPvC%gFvEknZ!wbzCvkSR@?PQ+)QG>k|jUQ?qm7$s_lt zbE_Qsj^8Hfhwp#FZ>aZ%VH#Jk3w7pman406+jqL_t(^ z87`HqJe5zHW|2R~9=UKzNh8}D)(eNTHaMY@lmgbiGn+j1M9Zrw913IhAsHCgDbPUR zizBUqgj@=Zo@)!tkH$QK27Rj_y7XTB?*HoCXFnWLD!hn};?&_(c7+$EVVF9v6g3C3 zdp`pO0TGth6E7Be;GR1Hy+i8k3Tx$R`6)`Tc=1z2Q|VaNtekTg{EG6pQek%BXP&8X zt@hFKq3Xx-$1wE^yCsA;YQxMxiIt~%6CD~U0uI)xCnfSZ@YDkV>y|shm2IE_g8e}I z`|I|-ig=T&%JvWMaqfS6qjO*X-0U(YS!hCbDe5ZJfZCE-+xwIiFITY2 zE{wYrWQ@q$%~La?Zm-)GqWmfI5LXWhOX>1ieUN=ldPx1W2!FOP^{(KysvTKPE*XQ<##=w$N51hEHJy7^f{|Mz?TvwKSAhDmTJ?mRuK{ zalIJoy+q;Cpe@B~3#Qqi!lZn%X9&H#A?dzrdfF)D+PA7S&8`xBdQJ#yH-)SI+0nEu zq@8oF)rlVXg7Zfvee(T*qQXVrUd4+>k5&c|rkC=Rf%K(Mgr)xHz8o*Q?F>1{>u$Y~ z9eb{=(z5rlFe;~s=!43qiJ{sgVWkIzDVkBglsR!YeIhs|(p4^{;Z^!qyvuWqV^TW?=`s>X z%ib~3E+(`**?XHs8m?!R&FX2zDSf&ngyD~U-8meA>gz)^gwLz$--pG1qwb^ez~Vm( z?$wK)1x3ArMWnDwaZ2&p>}8-R%qky-Q5gC000O?il>-?FxxiT-)Oi|ut|10ATFEPw zn%&3TjI`+L)Dhx(C8=IjkXUKd;iMcDUXa8Gsq?C?KJ#}0^Hp!Iz3g%k10Nyte&U{r zLTPRbJ_y>~o26&H5b)lgMd$2_GK* z2gezX6P)At)MBMPw5rRH*9K$HWS@s}c-PUr6M@cn*dYU^;%O z&Oh)^eDfafb$IAx5LQ_Og)kvi`4pC3ZC{-YH%+_r+ z7Tv1+>c5S6h)!6CC*4$5$wyqFnOU=m7&TB+F+_2lOuSSUQ5J7{F1o6I7?-9m!e0vO ztOm4V$katfKpoX4eLHO@(vzoFMjYwMfl_q*oBF#Zl{74cpL(u^%Rct|zvSGzei|B~-G8*A13!7rQk&l2RrL{C?nmr1hNZ+jMd0k3rVN|Z|(*HWU*n2@mNaG+e}$xF9| z*pA=MpiTjoGAr71&HUSasZpKBE{C2xSkPH*;bO;ChtpNGEULZNKuJZez2iFdM3Vw} z+%xZIB+!X>($3~n-tEq+z|(GNNr5(-!TyjsroR=UQJ4WnMZRr=vmGl#4vDAF6)% zTm-5g#?6LXlzu6!wHmO-bt`=>Z3{k2C#5-(o*W6+8;`2=yw!tBD;}hqXk7DyQ-HdU z$cPup=fn2~3Wws>sPL5k7ryxQ{SG|(JVB!!kbF|V3Pe}=C_I*_qE%HoYdGP4^EO4Q|>FLh4UE0DXr^W(uOq@RTKuk!hMm{@i8{T;#RUmEu9Nq~PU^E25h! z1&c!^*^1{{2B%G%-rJ5tuM6`{4IA+ygF_zRlvesI+SXHi{7_WxDRO*PCuE+-4mkH4 zKknRz-#xp*A?3?~N5-{0@bX-10Mh^{7VJ{7!e+awY(;pA^Yzw5b67m`O;9<))3x(& z+fAP985orU#&enX-+ixh|L2be88Bt_D>^iqshBy5vs*MsgLSFBtCT0!ooz==G9_y| zsX<1!cc?qk*_sR!?c=6kaukhk>^^U z6W5;W)PvLw?HY+LdmgPR8O$s_f$N4rv+`vaVe;T1eulj3#0tcbeh;2|d(pNiNB{Y=4wCA|aOB%`EKg}{ z+%4madN2wvox`>#EvEyod>Lg%KN)~=&9UKfrQelcRQnzOkQ>jp=-^sHIo#w_hzI@@ z$~4}f-b&z)_|-oAdM>##zi^DP$Cm*{xoA(@ehQjJP=Z6q=Z)?&BYs9hSQXDzBikj^ z5$mNKSBR!otaTHpcb^`_m0tNUTyfRqGp=V~g+pz*siEUp*~-YH|Lb?T@<0D|_C@2F zja%I)>K4mcB2{-SMIIGV#`wT>fobT8u?F>a8THmo9!u!k8Na0-R~=|wZdos?ck$HC zD`}5bCtU?U4^z{1_08}S%C0i-*|%XCAK$xl(NS&&&6qfP;)t6WSm{_V{s&QK+A4qL zUN?yGnKD0vcUa1BpiE*CG_id@$!0*A$nwKjM;Ry~4|&7zA6M+2o`b;YC75R-9j4GczdU>CHYG&8T6j0kzBMT1uv ze)1gB!`KRl7kO;>5+;p~Q%CVJm+OitC=HD%TQ-Fkr$Wz;Mk(t}#`W5`r*`G)kV;z) z#1B0r=P}dn$a9@C!g}ZMNQk_s-a=sgznY48GDua9dhCTAqNP%9`;h0_PVU%#*0|;i zr~K@=fbnV{d9DNjI(Sb)<3Xgr|GG*L5X#_}oQTfsTaqx-w*sd=JDI z9D8t1e-H2~ko)b3CyWjcxnqY9*K~9U_WwO_g7n}W1YgecmtEh;DOZMZJuy{wGuQ%k z>eMNGs=m+vOtQjT^1`~C!2}m`hPGFkv`-Y6$6ve7dnx zz8_T3S9;@1#uJkzw+iDPM~6vFjwKTVCyu-8Ic^Z*Im?$CCE_~<3%;f z;Q+T<0%mZ+(aH0pu4k~vU9JY6s#pOWpQ@ig_CQ~tC*0KXOOUT~kn1CEyU{@- zPd)A~^sIF3r<@oMUg~qKgUNC5GIPZ3J$=kEpOY_rliTv~KXKIuzvzy<=zB!kUzyjA z5Bv^f@T42tyxSf5v0wM{O_`d8Oihkg-O$jGTe))OynM&xG~{v5o;^99uJ%LE)n;I^ zug)dhJdMUAzHGPvRx13+Z)&X zN@JTKK9vl`%xr7Lu?#t|NQ~e!0eD&+P}&f2?fh$%g5`js^?HP z@POO3YnNM3v0QBoJIdLbs3RI>wj7B#Nu;dMgS~lYZ3SgTkL!b&tXRFqty&%@V{zqV z&vRc`lNnC-`Hl=^V&8Ubi`clWmfHYs{iG5ygRLYt-gr}3eTkm#-P^ah{i+vktKybC z>dy!(k^hZ1+)$gGD}q<`I^I_NoWQ#l15s7Lu(b~rldv8KTx6Q4Exc49{|QcIls1hgi!zP&Pb)|dXGKgL)Pxz@7CV_e8o1ua?9it`pTVP zaqL2n3Eyv_F9f%~6noC^aVz%UGP}&E&Xjg_Xr}?zo9b0@@k$RT>ty4`jY&*=WP6aMr(pt)a5q$jFF0d-kl`y#jiXa*mY)_l+-j%o4`)Ew6jC zbD#Pv*M~a4aSgo0_QV{@B9Bi544il-)IIHpA;=GH8T5(q@o{$pWO+V3*Q-|Hl$r$w z5WywrZVh1hdomhx%?4}=E|ucu>+7>;hZu;RTOBRKLPrr;&!NK7s|Io^U4mZz3s1Fw z=Bdd*JI1x>($l_kf1qauhgt^lPW&*xV+U%{W|mzGG*4$2-c{D6jBv(lOURlA11D(_th^_NazJSE&$1*R zsVH5}4_LX>X?sh4a!5-LbFP8xU6*K$Sov#6JoeF|m8BS!^;=HFILYVa(&~1~lAf*1 zs+}dDI)aXq?RXw@Sv-?J;g7&j5qNnQBMQ`2;h;yzCh+J7?LxNaI(k95^7O55_|LSv z^ep)(&lOFUXPdxSUdW@q(ny1hWoA3+Xw~Pv!nwD-(>?wApT^49OKi&Gd+A=r=KA#I zV*FM@t=yBF3|K_o$8gED%0&9<31nIc&Pa$R%Aoz$ zH(@f#38y8<`}F{dUQ??s5zJE=46wkird5`F|>C3SrT4 zeH2D+vEVf|I@L)?&3X>=)N4<|mw{QdEri(2OZ>_^Sff&W#||XJghspeVKw@Cq!bnN zqL)ZtEhF{W_IX>+wUmJjD0w|epPoy4%9w{dq@6DP+fs{nhUuQ4TmRW_$5#Y@Dq>10 z*@V0YF9dz6S8WnPuZ@3|nRT6cuEWx#F96GAJ=Yv8m4*nsmr^g<7Q*(@i(WJ+yfJO+ zlWCcUCVt#!zY0Hsj1dkTXW^{@OzEgS|5WhD{PZCiYY-Kn)&-Fd$-N?FKZwW4X!-Z^yVMkM!Epn@OF{7d@G` zJ}JL_uU|blyZd&yo}aHA8(+mQF<1|ppm(D4m5b_v&#Gg^&G)rge%g*_o3|0Z!c{+d zX4|DUg_Sp3X4a=p3ZZe$KJvm>hZ?x=E{xyD!ae`>$#mY+uAy@ISu*3=cq_PN!+Nec$uY;~`F^3nSd$)G za?+JiL}ORD!l7}^K~IJXCv#qj17QS9odn8P^juFh+hXLAk)QgfdAY|&&GzT}WLyb~ zxPDtmqjYw#CXVW|5&lwm`!zuQKz&H#EqaB$yv#;PZBb;Hs?V$6N}gA)OQ=$oqPN^m zy43EP5Egl^^?nX13SW~&hEd+dBcClOJku}WgCIKLskMKlQ9W4WI-NL1+=}|BoE)v> zq`K5ZgIfvMcHBlrpmE&}sO(Hb+UnDCCbdILxT;q^+k3^gadXou*;isogYQTX|z*rDZtvIDuPKXxwi@`#ksMp|Abr?}Rx1b37MJ!MR8p zqSI+zh~71Ng)}ZfS8IU6-iqhCS*8+3e(j22kH(Y0GTa)RoPF|4RCOH26NE1#xY4!{ z&Mr0UmjM$fH2x=T4IK1{bMPR~G~~G#E(P(z@*RIBA4@NAGm!GCRhfllm!2?$VrB`bkk}Llw_3fEOQxfn#{LCL`F!itztW=-^$38+=(tk zDKqM7r3?WUo8{nHC7&dJ?I(b36$dSK*T$p_6ill(jrmR%=|V-zK;feI0ep z2F1*N6@5)mSXP$r)ZvhpFDkbKDm&Bgo)at91IOTY+TZ4xOLv9)DxEX}>N>kRt@c*D zzc%1cxTM?&K*NS{nrVj6nu3wiml3LXU8#^V z&ta>cYvd=7IC&+F>#h$Ji#~oJ-ideYfN~C38B_Vt+LI;{@{VdhH8EIU+U}xG&OP8N zj~&2j6lb|hl$LmBD&1S-n#PvGMOn{kPnMgIQ{EwH!T+l)+S;NeN)7-7DQFtC@;;@; zwU*Uar=C9&KpU#Pc(kJifUUbj`*MKSBvUeM2WXODy~a}sX5AJ-d>Pl)+sN?QfOVn% zv?a)UcFbT=+O)k@mqoZsVV%{0^dOB1859a@Mp12{G`6i`od3)>X=Fw+m%R zNf)CzE)9%GYEWZljLk<^H>!`@%m4L$#H1r?kW?briknhn}cm zr1=Av#|iT1jal{ET8id(tS@=-@dBVPANw~~#OzCQ&8dV~=~OTcqZ496&ly)1Loe_!qsl)V~En8i4HP3I7Ex> z{rx`^(thZV!c{8|e9}8lVwY|5ECfjN;)Q3Pc_vr53l}cqy0glS75|c%(>D-=f?5w<8rqmc7z#h zg_xRwS1*EStvGYqp-Z@tiD`GKfTtcDJH=z1HZdI(osLL~WU>p7F)E{zO-)U?9t;A@ zPoHq`S-XqXiaU4gm|MGitp$`> zH+t&0J5#_HkHBJ{>z1)*ORi_=h@)wCxo?>pj`(F5M{98&Dc(zVV+X$F>uen7b6&bU z=5fc)j=0kWw1Hj-{wO7^fGxX-2}^{@YHxNlQW@T)9(YT8j~{g!W`~a-aibfL_#=yy zhMt`YjKpai2{wHJM<`+YLK%u@Y8+>ia@$ITB_-wqm`*CLwqEsYkLlIs^PFjAORq zu}e6MH))SD43Onx6F%MOnISjPf5_io!q*@NA=7N*;YYsV+jA0U44*{$)vm|O-ld}l z-3XrPJ>HCdz$wh}DddED6ol+(dMhUTGy$#KQ3%uG#G-3%sw+qZAeBQ8D7p$6hePTeje zz#2(~lua5LX)-JPC>O^S4}2-VU0{7~ z-WgIo`Ji+AqLHT+uXgC!vD~*4+X8jVZ6P$S>11gaRIf~D@7tkc`D!(t-M7p%)@$u) zM0q7oC}s$XVev7$h;MnD)xMN5wWmL22JfxVX~79y347r^M4DP9-moPj>$irGuUEa< zxexv!jFaPG#aw&liOF}KDEnXgv~!$1u`bZI1Is8YH`iz_^1DTCT&G76(NN>z@jT;l zJ|G-1gU=R3Z(bRDf;-8pJlT&gOk-e=+d}$aSWHKrE4@AN7}N&+zRIM}gC9B|R57b8 zoAQ+>dOfY4sWBg4#+lGXJXMf}=e$<&<)#ukc|ZCC?^9q}8rQs%m!7!4yo5d1ef<~^ z)~xeMdeMQ?d`dW{utGjTxA*i>k2lSwDHzuk7@a9T5qO+bKMtMm>48Fqx3o`QAPhFb zFL8Dsz5IQ{=Oluva^>=JlZeUztqe4X}9*Y;fZ;+}29 z^FRe<&|y2WJy^fxT~`Mh#s#4@&1Cl$0o36r)G+PKQ{K+Y&H za^SgzGDAm8p?%je}*l=9BV+v1P5{%!!jruk+yb8kwI>0l7@_H z?e)*-BG1fQYWCtQf2GTe@)~~5xF){5i0d~8gPq2;UAfHi#Etl~r+GYnpL4gp)MB-E zuexCU*)1VVt-a==gfTQ*scC0Rh|;sJZ@B}XdYsC7t2Zd}! z3dVIOpmt+g)o~|u+D&7(gfOl5TB~93EM81d!z_d=xz;nwrpexFZ0F&R#x+0swek8u zPPjCj3G!U?p2oV2{#U%oxzGJW_!ykdC?^i#zx+a-9NmE6nI0JCuSB|M0n{vkMOfG^ zTg;3L<)cwS*tCf=sP4nTg9l-7K+ndHjI7$U*{$j%uZ#T_#p6M(IFrQD5fq4ugnz>oHnvBb1N^&7y%o2TIPu_rAI$kqFL3B}qCvR|UKF+k zyww2#B>X`c&$v5kPa>cq{4G%mgHj|-U2GA}8x4g*n z#Z?C7aUMF^!>SEgSPTi5OQ5;oI)C4X2}s{en7Az4gg$%%-Z;q1@^jdq3Fw7U?dfja z8Sr7)`{&ExhkyZN-G+_L&$6X)98Virfwo(r_85(O|808*sgix^-6e>{uVeFDu2 zgrHOrJ zBuCM6t?bamr||Wn9{d9 zXk%r|!&TiD!bWZrnWau-Tb(#a>05Z9b&!nr#7-fu;qa#l? zFs@Go8XKX51@eTG&I?`@jO)8U8Vq(imbm4Q9&gEz(#31}Bky_E7}wB5OVG_4@O!T3 zd|F8CDB3!};)~QA#H4t@W9hT zNJB=0eJRJ{^PYw^2LK+vNSJfjQ zb#LEDXZ=R~m8I0P^q!1!$w!8G4cw^AQr244pzKj5wHKaCh-?eW0|$qAer?ZaTt;54 zjWkJ_*@kK_F0D%*NJlcI{q*V=I?_+xK5S9)(9%*h^3zhj`i5;ll{aD2KQa!+be0ze z9RK;~xMEe8M8=K=CYw(k@=`!2HNaIA50oR-GYx>I5IZQ-@JGnnJ;dRymRg*QOvz(N z5#o!yrD}J{a<*avnh`(mxkjGxgry`X{v;W7LjCZBd`HaY!ZuK#^QE z#;(QKbe;5Zy_73GNDr~R#LOk5ueMUw=`(R-e^6ggTIyEz3m)}L8$v4&@>1J;_(2;2 zS5XAnyEdS~;j!?av~w>sqV(xYW;UUCIR)dI zz?aqJ&Cwpd5R?D6ib95zRq9l#X}SDqA+anS8M6fnOI)cODm%md%Z+IoJ$tDLo)sFU zOfMM`&4qO3gz4bp;U~UiC}=Q~am}Sn#;IMXh$&0hv+RLh5kRlxPwnkP?H}qW^Zr@m z8XjIP!yi8w5@;etV}<-sxpTnK7eDEdnGKju#q@>qT3)eaHgl22U~Uc_G( z2U(E8Cpov(AX?fUk;*XRHF1_)Fh90C^N{f&IX3t?DI~i)M*>MBLfL*nedJ(!7GW(s{hM2lnTXcE^ ztNJTxs$O_M8#GEMT{;^rImx64Ia#+aG|FJNvPquOXCD>=CD+WG{7DwMD$RCK`*B-{ zfVh%HrniR&$XF+D)SI_`n^J{UYw=zSt-rHQsruMO6S8Dnce^Pen5Fb%2JPk{C9wh? zKtj%Q9Xk#7z} zIs^luHc^wDGm7-RUci<5QSW1r9JfUPGUHmj$WXSPYxM=6!u+LaZoMN!KKLbr)Z(pZ z9X}8faKflbgeDb?iS;w5J=eCb?|6O4kN=IvrW!{5o%9i7NZ0mjtDoyrHV$bfGmTS) z_Ex;=Q&)zYcXWi7U5O|Crb$1?!;U@Iphr;su-!;YvZO7ax?1C!vOjV%q~EYLT=g#< z#up&@5t;pQ0>*g0e=-p52jLf8rL&9T7E#!eaoy#{^lDgMNf~$T<{@QH1Cd}o7wW9> zB!dzS*qkvGJ5@*-Iy}>GzbxrCnprB(0qhGVm=j4tK+!+w$}sW9HRzKLI}h_rw}xoo z(x+~9%o+`Hf{t?wPqlxgqw?fYYwX}s{KR6OGPsnA-c6O&9+{{3S+524s5Ih79#4k* z*?5vh_#Au%GOj!Fri;itUp4sIY*c2+36(aD1-3IM3OeJO+d`P0TYd*VJ*?~w1g9fLmK-6P6#RZM}BGx4Z`YM#8!C=Qu;xV z6I$|28u7*BiuR$VX+%o{lg%f^y@Jm?N#)eZA@}+>ycQ?N47wM-==pBX-re4VowD)8 zFMh?n{U`o4_LZ-7Teod-uX)WY96jC(22`iKe*Lbn>cVYAs{2AlO@EbKXLL*g#!=5w zPI#Xh2Mm+0kt{JejWikLxk)APqwLj;Yv2-OTzBHRrlCr@^gbVcBG7B~U*(nz@4G$7 zHOodlr}$`imM_PxD)M}`nh){#MmK@9Ui+_t9^tWYMS3S-HvT7n0)Jnt+n|#b*~T@M z8I^rD64oeJ8eN$-zVlT`Mh-Hr2x=1Mw}rsK7Kj$Fzw>XLd)H5g@lfHy<5gi+=a4_y z653Ly_S4Fv(rSBB>Zs7_oATnvWd!tabH%s`RC$j1mg}2fES@NgW=qh`8VFx#gdt0i z!VOQY|4U)WU@Y`p^KCikBX3;C{1~84#d`vur7&g0hM4@n@*Ert;YFTn(BeyKy~myo zbV=J9*Gx$|8c6DlYv2=PT+?$+!&A%rww%O^ka?~->t>!b(8RCDff%P{W8LY467&e9 z3RA=zT=`;Z9!vmvAlQMFo=k;@*xIw@Ro>Et?75b=L*?LAWwQgIwua~!Cbm13IR{UE z$&2v#*Zm;ctI3*_dWfz4tB$lSiEYL(?yr_S30v~Q_?Nsc3`R%bA^+xA0!BI@D4o3` zX|^pyGORKZ&kmF}9q)xNqd&@Cj|{04U$itZ*?dymEBJihecx~|x#LB-9AEzOJKR0@ zd@UDt_Ut)#-KLH1;DJN#Lm&Ex`^=|5=l1N`<+g20yjrO91?@ieAD&a|GAnDrCyP&iYA}cuO%Cdqt21;m8|$QHT;@!9huP!jXZTn z9}$zcQTe4;?YX8w&JTVNzWS+fZS`OECw{%N1bS8P;AqQJT#f9}8TK;HTFr-e(jlxL z)*D}HzYdde8G>!mLR|{j4t$Igmz;hUPAk`R)k<|UTUv{!@y3QVSef-?Xeq_y3-AzwljH$@#UQzu9(@B{?}{W7wBwnO2Uj)N{=- zm0L2og@+z?^7_kv;M~9aZ()mwJlDIPW4ZD76VUxLZ~Mi%@WmQ{npuMG)&P|kN$^RUc{HeS4XPF*6x(2I)~EK zFnm5drq2&V^ky($d2=$_&~EfL(y65mDjMvxOp|R3xxQvV5vPH1ot0S%-tGZoI*cMu1lf_E8G6&U0-O@ZKhhaZ>zT-5?#+ zdzy5iJhQ#TqpswQ{Bt0ta@Ax>FA{R_qynb*nsInRsv^&|U2(8w6m80%6g>xa8tOU2 z1c|s+gupbnz7%^yZ+Grb-&ON=(|e9_q$x11mS3}WH0`B+mJ-SJCm8;O*iS3+=7rGO zKkJZ?84o6(5OY=j>9xN3=9_YP^N2Qh6f-GPIL+z4=@Qscmk}HpmT!T0jlQc0XONt7T170 z(v+cd%J=}w)DQd{yp#&vhNh|VY;^Ek!GvwyXWs7KPN5%8qg;L1s#QYY zoj7v}=i8%iqD;)U&+D$T8(#)puX2g8S+1o?7>%rJ(5m*1yUK0f?$1k}_HD_MfEC=g zBrh;neY^QYG)BX5uPnnfUhbSK;Eg9G@x-e}^s{Z9lx?(C6@2>hotZ~7KwC~uO!`yF z*j_=741X|$t35DI3!rof+csGzsCT9-A>UwFqYdb#228*5``pCy-{_`vG8n?9&|cFl z<8;2y`Zm&wt=KX*f%dJ!0o03ca6mh|DaZ=bmB3FQbs@Ghk!SzI@xDDc@x$Y-*%;`P z0RrR}XXAnZ-u0r?k!R{@_A|6=1t+!my2lZ_)mMJM>v`XQ_I+mRCw|pEG(G5U!an;R z^4n$5iCbMJJ126!f6s}+QsP_<^l)}a#Y3;2X6QML79M115S3^D(u>TWKLut?@u#p{Vmzqs?Di4zl>_svIHen*-=7 zFSVoEl?iPfSVF{+F)n$^JlBxzA`mTkq-;J#_*W>5hO~^+W?4>S;GP(taIbn5j3B_) zt()DO-}1fg)1SJ_eb?(>TbJ~aN6-cahKGk8KU}nOCHiNVfgkQ696Nf{T`bSI!Q;<3 zw7!SwqffhMo_x|xtDR%GE@c>#%MU*3c7Fb&?&7Hv?#v5b=g+Ahy99#>a=+%pakn)h zkA3CS4qDEQRr=i1plP8D!?^#<8Mg<4C9GaN8RPrTpLXkx9(Gt~b7yBt?syERK3hF< z&TT{Z6b!(F2q%r@haPh~Asdr0#Gicn88W+Qu zQGApOlLE;8GJKa{C&ETypgj$ocA*Db6M8WbogBH~jl2PTn5xpd+FgF;8CQ)HP=3U0 z#R}GVb6{N#oIdQ4NRPT|{iRX20U>8k9(N~_wjv^BhDV?Bqi=P9`X3xR%u&@(hWQPJ zu~B^gL4CI_K`nr*5G6PhhK7b*8IzLn^LWNS>j9pQ1(-T>)=lb#|5`GC*NczFQAbX$ zyyT`%pMyN%qg~T5Tmh6T!sR0e+~Ey}@eP1s|A8%r4Ieq^PNR&~%pPDUY;?%R4(Q3g z=;%9q=n(4rqT7bU1lS8tj!n66X--Yd~Y}X?XKj22DOZW!Dl)LWuVgGTg6Zja{ zxq>=m9(<3c@4^}0dYFdouo@rE+k~g{Cy%*{gX`T6+>fnT>z?ji>yDtWQRk!+@{oFN z|EKYhvKwx=VQxgaoO;Q)?sA3p>FwSI3caQ5WSnqN=Kek!vh>>0xYoc-V>lW;mHY0h zk!83A;-|)djn}>M)G-0rd60*l52Vw`xF#=JI@c$#iLd3)2F7(}=qR3E zQ48W-FL^SK%7)=uu5I?_xGk5ZYwuO>4AZ@gGKR^h=Y0k;$iWBUQ}NwgZl%&?~COA?%lQh)m+LZ2#-tDpZ`byk%1m9d+G(6fLZ%j z9(J{d#w)|U2fG4ZkS{^E;0t?FKFr*>tEbZJ;rh{K5#LEBT} z9^A%zu91fMS$+eBtNOoV=XMxJ2i!)U=^f$RIeaN|S$q*VhHcuk!EN5mA+rWMk@gE; z_;M{E-qB0?f)}9O4cvF%eaK^@yXBT!OlWs*IZ!|J&_nL}ojcuHo+Wy-B{A*ir}$tX0%oudc@xk;@tJRa!|I9q0Zu@ z`IZ|o$W#Ath3>I0`gH4d?Q`o?e)g%!V3aJw{xeUHyqrDcaeKG!3Sk@I4EX@a;)6aeDm$MW3M?3>Dyn1uj75jJ?Fv^x9sL0_vgKP!p;7Uz=WQL=lAh1 zd8{q_Z*p76?>flFsY7npdW=JS=Bm=!gC3`Zm8n9$n&rbd>Tx<==wP8PKZLqszuLJ! z>$~(Sw?vy`Bs99g!~d^ZwW{$M_|!3n!4Hjmdal{Fd^bp2 z;0&F*TY7(2gpYIW*|!&~px48*3kS;a!GOx~o5$4^0Y4VR7f1b3rfBz*_)#y7#Fw;@ zW+h~A@7}#`#}IT$giXr^+@|M1UgHb03iowFofy;aU4{1Dhx+|w7{j;Se7oEB9JD9p zmMhJ$5%zD{eOqz`>*kwp#>d&ZZ+0HX*LW8iB$n7hWAEDIr%wiejMPqwAmfCdUKy%dDwhGk zff}>v5e)DFZz7Bm#$Yw2WW3Ed;!% z9M;H!2gHLs)8Q%m&pg-KPr0UnaV<}#PZ<@EJ}A)a_#d@dmLqOd&o)f=tW$zD*r^E9 zG=!hQ0aHBDq!Y;c z0ca7FxB7JmZSCKt@%P|4oiw*bDPT$0aXXs$WIeY6c0#T)>B{uYUT74a$|FztVd!;1 z9B)gKFNS;hD&h}e(o@7cb^wzn$kD;a_yj#{(EhCW~E`CKW`vfi(?Wg}|P{infE1$uD zpgrEqSO#z4A}B6-=ByE{cwMDPG$}~w5HB@2WPr=aAP(thLJ*ZOgFUc3GC=H$ZSs7} zGc9~wm9LEJ_%%jF`z!O6VqyRuJue#YR0ke8`I7-;gQe^gW}kV_!H#^1K8<(rsWjOY z%<#oN9n{aygL9H`=lh-ek8gD=zwnX#S>;)}7H=pM^g7W?(haa>W!>nRULoFp>@Pwt zx4a~un;mQ=Pc(J~9H`^6uY$6x@FjnY;AuI&T~U8Fop9ApH2A6hv-pg9@eSpL9!92P z9jctf)*zdvB#vZBAG3)kmh=?uxfUJPo%pi(r1Y=gGri2Of5YqCd;jEp?#??u>;C4i zKj^k>-I95-87_F^8^6mRzxUbC-0eQ_fe*WrIP?0s&%I62SO2OymClieu;~XY(3Ub( zut(Ixu>;|VG14oVkV{wPN(}iVNat&%kTf!V2QO^~z>j((Gp>m%-s!z0P+p?plQ|_q zO!#LSIV)sbQ_gnmgX?gUpU{?{e88q?at~SrZ3pC7!Te=hD<8^poWM-2y)L9__g{LN z{7d(e?`{Bjo{Mo!o(P^t&>|=-JT?B8{;_Fn*lb7fF&+6Q$hbc8M0npDyleIW5AuiQhf zcckQOtEGHa+;!hS|E#FC#)Z^pn>#)!grwEM*g>#g$%B*|ze4xre+fSv;J)FZFNgT= z{$Jn;{mZNahCuw2hB~7ZWfo~}l;;4Ji3l>B!&CT=hJyCR)t1Mj=b2cfqvO#u`xOoH zlD@6XI4$!xFs^6yT$2X#rMDpi-s2~arwpPj-rUFlGC`=g+*3}0u^bwB{e%Vi(Aefc zZR?nL1oFoH$%HrlxN~p*S?9ijHsohxpM21*{`B9v>2KWS+<*R{M``=dD)6=w7bFnh zjPk}c6f%M3puF*P5Kf@p`McpoLV2<`7*7@zEnmuc{wtjO%!hpPDpn{OzFbAx1Q{s2 zr_yIQqjE3KMO>vJzx=5ktszc2jezOcXU{J8j(7Zo8yPu^z0)h^Wob9d ztdkqbtUTBJy5ML24l6pwI7#WUd#WvIT=N|W9v#V*jE#sa@)k?CSz2mWt-6RN+g^Ix zuGo0FgI5|*o`BvU5Ld?a#;qZYVon*OUZkNSqub+?c|fL5<%xux@onB0zZP2n{?=8n zJtX_mqEFD^PxYs5dwvoD2BgoF5Br#mYi$E-K(b*l`vQTqvOXKbLd@L${iwf`j@Psvr0=*QDbSd#@tonBya|U_=E&ZOV(MWA8bwswETFRWtl^sT3p(AQEtgG~(z52}|u@rkG<lLl20ID*;wd`c+QYp!cf;WzHK>nqq?NX7vK@GNrWCqF;4+e6gZ;7+cH zMJw-#r3Y3Uf>&H|P~=QqOJPNIMj})`U%xBxgg3rDZ#?1Rhkwzzp9}^}h3!hZoq!ZG z({@V3=4)w?ww_tP|NPeh^Sgc+3cBLln|>Pm$M=LX*}-3J!Sibc^xR0c6_#D&fFt>1 zxF#;Nu0@aG5jCSl{?vvVl!!MQke7jAmJ0gZG9v@C|0zKM#gy#=v9P7uy)y*p_YC)-FqGZQbgZV=(q?i*gi$g{-(9AVN_>zybTysTQ zCoz#k9LMY2z05^o002M$Nklxvq$Ne1D|3XQJ$~~dC@hi^#gNd@o=e zmU+dxsLJVj0^Kw2 zul=3d^TkiOHMia2dK4}^UDf8A+%eoM7HmiSf6tG@h2|+tMmFJv3aBg6(29{&!bQ8; zm2@=_lRs2W;@di~@fn_;7D77YVSYW769{YZ5uI)ZYn$%1`_?tUw!?8|9=NfV%E|Jz z!Oer48uf7%ONDVH!`OaWff|MU#4BxRtTOyRd_2U=^+An%DA_5R`lK{Jkdgyj?I?lm zynLpd;pCi(tz;wlO4f8K(V`<>mC`F?h58p{*71ApcMEo5GW>UWGaB9HTLP_I-Sr}UHEz=695zz zo|^yK4ULlhXu!5>IF-YXVK^jED`(6J$4sTAoRQY~Xi%fF zOC?z=I@>`@{@ld(vI}E5Z$Zm%XjDJ4a?E4o{Ad#934n)LZ){5%ljL4v_gCeRn5{u( zG`zA!eD1jy+&}#Nr`3m{riL8|2WPkkF#2nwFe$}&@IK+!ZvN%;Qu#nTyNf9 z`P=>~ItM|@M|kS|R|04OItPKmjlxLEH3tq@K1Ww^oeD<^_W~qpTqD zRsAIjJa!Cr5H3kEJquDoIS|n5DnTv*OkCw#Ux2cfJa&2sX*)=fo~-KHw5W8I2hFCl zB~u~9tI|~-eR1$3%Vaad33RGz zLs@GSS9SNLzj5xT-|GA)i)}i>yLP>_8epADDWESfWz&icf#w5$pHG`{qzF@nNxAl? z$KYM`xz5^_R6IW>SZDJSVPdf2hB2vzmhsd5X^_@%U)ZE6%At<@j?Zs&gK`@=D& zjJM^^AQw?zJ>b_m%QanR&K!I@lg6t zajxWZpA;QgF}lr+1hxd7S8UK}G9SLR31u*~1_urB6kY@kAW~pMX#KC!V<*o9I%mZO zBzV-Jbj*|lFi}NE%5$n*OM%pT#Sx8kQ={sWRzZS}sR~c#7v(2NNXfb>!K(o;VAXzQ zb_mLY+sPjd51g4SYS=vCxWAmzZkD|BnW&ZT@8ir5S( z)&XC${zX%KWtaJ^#Yt_?cv7?(S8Zb76DI|}m$J)uo+l8N#?!N|OhbIqiRGF+qV#-w znsO`8%mwlT z6L+-C{^*zd>)H?K6QuMmj0#Mm;7{KF0|D=EKjz&3{2u3iGkZj<1BDWB`nYdHBR36vCz{L7j`)I{iJW-UU>Vk4Vll`%-+tFZ~ z3v{om!W+h$_(FL4dhy7xV!(}KJ9rc041gbQsZ*{Qs0kMD38W?En*2lKn}Btdl?#Ea zuc#Mf8yd5*78dU!z`Ex@`w8jU{=znCXH+SY##M+jBb4h4_`K+X`EhWHmBtz>LOT#PIv5#qhwaa|u1@0K<#DNwgK~`^wo6$piv;?*nEKUN} zEu)B*!ZmSHIO)rB-HCEd+8KW&mg_9fz#)(DgcPoxFkHMOo&}wnat&WY3+gj=Vx=&P zckTX?YQUd!tyM2tuF01??O?-gL1upGZyV{ef7(si>W8wp#dfX^#{NkPl9P?pG9|_B z-T{UH-_ue|$WyMtGn32Lh4;Ik1MX0`w|=hVZ+_*JyhEi+iWf1-hy3RW55!xzB*fRa zKo=9dpWOD^#*gR>?G3p4Wjzp77ox#1Dn{(znqbBOjp)ywk7u;usqyMpUOv;QOY&-2F6uC>S}cY^sMgVT}Bc1G6dB4m51ZUPq-y; z^++N8OlB1g|BbId>^8x&{oeO|x7&WlZSMDe?+?>xSM)ivTub5VHa8ODvLaLIJhCrP z%>)VJXpm6{6=oD3Rpl*Dxt8K1zdYPxrHF6lqb1y1ZuTkDP;;#ow4d(dIn#r!l3L=9Ef#EJmQF6o#h%9 zuR0(eCPRc4KKK=_C?sVM5exjPj)yGnm8-ghfk1e(#0_G z2;(ZgHe2z19*_@odI#Too}^$-gmdE20MMa!(@V*CMcJ$#q;`4Vt@7$ZdH_&O2GoVF~aX{8`hWR|8ZV0rH zX3BLzt7eE<|0_AcI+C(ERn|?_gC+=_8Z6Xby{wAzg>_v7QanB@Op}LZZXu%2Nj!l^ zNpf$?>!1)5qQR?|yeYRdZdN3;v|K|mr?34Iv=@%SQgl!_AAo@zGz+FjlNs?(W2g3|@F)C%!9> z*;xXQJn~3g2tU8c4Us2LLP$&l96of&-53SkfjGZ{=ZhZs!!JJXjxAW@N+|E#BfH&3bnpr~ z%QMfyLed0E==6hF#;oGRhZWFgX{*`2<0*IcOwI3o0M?b$$A;a7Qy1Lu>9cO@sS!t8 z<&M1v+~g4`|A4-;BmUzB1E z>~KdHz~U5P0{&lT_wRSh5AAcXce`DuFSzq2uJHO#9mQZW>dVP`8=rs7O=7UT;rMa4 z5=du`4!b=$@p@qi=#$b9ybB}e-LAd+T>1E1xAo6{!;_!Dz`OJNKLVP;x-zm8*Xkq zd(Pd6cfYid&Rl>F`Avr!Qpv>FK9EF3Y;$?0LtbZe-u? zx;9ijtT?paA(tNZJ@bS+HCA@pwrz_c*DjY_15{#UrIh8GM0pe>6;@7)J`W|6M@VLX zIMx*+Gb+G3NWh9Jp~~>lxFXR^QDFFr^| zh7@z*F`npY@{p(JC?3<4?|r)35sh^{P<%_8_%FdYd1!Bq4{W+>d)5Q%KB70|NuD=Lx8hFJKjN?toj&_fRs+ z)i6+$z~qXPn9qYjJZtdblu42l-_eTf%+$270y=>KLukf>D-gL(+ zLI(xm5`{3W1!x!dS;*z_(JbAb15f&-=PX;5g)yLX^g37G4?*$#LvHDuK6j%@a}-~d zIy!#d4L%(Fzt_Iv-rBK)o?7@R9YZJ9j2mUCa=?K_r+jj+N0{GRs!LN*OI+$d4FMFP ziUD#0(qO})ntCxl;M(&70?C1^at5C=KJ3 zmLtzQ_bupO^X9l4dnYo~m3>vWYzsDnaTyimD2+hDK)sgY(tHUM4%I^m3dhA`P>>!2 z4}dM|cz_m3tnl=FF(_(GVAcG&ZiV5KEYWK>UDbeh31Q5WkUIpgTt_8>%FWw9=-elN z-MPQ~9p@hZvU5D=_f78!@`4Hw6$`5rP%){3pJ>HHc$Je?PP7Qb%L9f}IpX>C&pzVZ z*FGJr*OWt2tkQUpE2C>}ar0h+Ts#2&A*7aThH!vaA?ufNo%a>wK%vbq@?}qa^6h}Imm$mu<+>NXLsX>cdrjq; z@YthIxLdd0?C+^muU)$umZ(1W#Vae#7Nerbo#b`Lt9XtVBE9$%&?P|C3Am3`U}4m{@+ zP{O;GEy_DjN3jx(vx0^o3=Y_(H`3Jtg@|kp_A9@LjSC$VG z&41OA@dUVM8GDUy8dpph1_EKg$PLz1 z9<*7SiUSpm*S-TM_B@A4)Yrmsjy%$ln;oM((vrKI7nMkcN7tQ-+=kj_%;zF(KBIE| z;HN?it1Md@QK>nJyoR*so`|(tP>MV|h z*=j~n`l<_`zu2Y5=M_V{4e^{4GoXWFPsNb z<`*Nz%zzJk;Jxk>pZIgHM4vl%-ZKdOstF(e!#{EV>6bs^PY2;?BA@)^U%>jj%I)2| z-)&gGuHm!2SFS`=mu+Y)ER>t%R!q;%sZON9K)zOK)aaL$ZK7jb>rxBOVU7<*Opw%=g+Sw9wcthlx1(?FH2~4UE)6JYs z@Ed})tjHJ2UjtGH0VxeGW$Ju#S-;wmv-~7_YNMucC4^0Vs~v3|(b1Lk)sEsni{JDf z>B+~NRy(_4?ASrnW#lC(0pxr22_2D1+8~qp<)#B2+lKLF6(qj;0qQi$(&(% z;Zsp`pBCu*6(|yf)w)7{;6EG2w;iq@{4nl+&Xs@Rdrte>`dLA$U`GhX+Kbf{ZTkFtrY@fWcoGT>Vxko4_|bU2}B z2v&yEyp`bM$nx}YMUa5;wTcP7FC)f{x-ufambyXKF zuBRCyxBo@GmAJweF{_@JkVq-?A=Q0*LY-ldZkh6V2EmL`uDMM8p%49t_lce}2R_DT zhDF6%@V2+T*)twpL0Kuktj#O7L-jmvSxSX4Vi{UbNAW0zxxb zDR82ZUbFNNCsVF5leR#51mf~beS~yJu^5~knD3~tj2sQiTv93)-Vowuzn~`{J}rys z9Rx)${J1>`NAK*_=AYr}-9?mZ=JBil@PePqW^!XhL9ot7IKbw<1J z)7||FpC#9Hugme{$K9DTaGB#u!4w01>d6g9cfaWOKwQNBlO5lJpATS=`&QtuKMKG7 zDlCP)XuGvHtZ>&sxjyjZLkR0}>wtH*QgS;Hryp`=E2&|#^6qE88ng=*RD*s;Sc zyAWj1*zkz==_h>)gWUH3Hkh~Kr1%#xVLdR}=hNZ4D&)u0 z&phKQ@k^)NM=^ME$W8VRxH0T$xedxxANt|5-}owgwfo(ggZuqHm4j!ohaxA9gV=v@ zVRRSjJq+`c*t@eO?%#R#n_qL|bAw@|4CBDysbRO~#8HQJAGZ_xL(Y{#9dhqOPv1g! z5qnOqhx^L1$G_k%3@&ibKKm?m#EWiZB+6!6xB~Zs*zZuD2&<}N*Pn8e>W4AzOE0~I z#iGvD6YklVbK(&8XqY54C?<+882nzWT;Ga`7o?TjfmMIqTOrd=DwKzvnSlri<6$0`l8FwfpZh?xS{DI7x?Kx-&u0%jKj$9;Sly_9eesI z$3qZy?b?NUnXJ?83Xm*>LK(Al>()ZJmkX(L?w#hGQORxxoS`gPvbY`E<$sScK07U> zg)&trW7hI#mx$=Ys-4Fvp(rdf1%OJ8lt9cNTY^5Tm1in5pZR3Kx$pbJ zRlig~2*hVzskrjP1@s}4RquL)TVPo!T&ZMoVn9o^tlKCD9!KGE*XZ&cNW&+jmk8Zt(iI^Nt%5w&2dfO576lpD zhZTz3Z@=9wI*oS(uAb$|(!+bh3fA&V|sTX{M+B^`XMuKTd>r-ady3b ziI#nhO{+H6%FHwYv_y8rFO&rbG1jq6pGCKXGMm!bj}7v4Kh52bF|;qp_T_7XOC?Wy zp>d0T{Ow@>d^d>`T{i7{#tp8+xX2C5G?@7MLmRWa+&s^5(Hbc@mJFetho18(EV^a8 zTa>ov8PwN30g0e<$}~ObroNZMoI0Vp$;jskc!9?q<@|E*p6hrFL+K(~+}p!}eGDTK4Z83VQL4D1|wm(rI~EJcU|xR#?^D0dkd(T#qn^~!ZrD$z;i zeCw-0=&oEJjtj~N;D*XKf!4m&o5Q`V(Q+l>22Be}SMDyxHN?-lH8>P;3lUw>q;DIQ zI-aIMMJEL=$-&}~0qcw)h0WhX0hje!@1swqA86+U3s81WEaMQACI`z_iY4;O|$5s&zycn+P)7-!C=|2f!@?Ygs%=aW$F_?GpIp4ATTCoxT@_8!S ziS(~QIMhL|S=Oi_S;V?#eIo#%^sH;d_nR?)@%O`-#vl1uO@0#3(&V%9PU8dltU#ZvauP^oj`hO+EWYJxqCovk zGR;ozl?URGmkD+}3L%~UiY{YnGSNAe#u%c@a`T>jQRQpIZVXYs-1RiRkoc*f7#EF? zxIGVk+6^Ar>)az>2+~ub zm!e!}iVf;3y3LSxT7up)edU2+3Rh%3i=1%NfkXIBuON^tM|&%$`0(JK5R(%a^2+c3 zQWzMQuXFAlKUkw0^HY&xCq5?z`Gg;TBFIum}$>?1lM zLz6?E*GwxLLEi^pJJNDZi03(1Bs=U^=mJLluXvKYxZ&2?JNw1OT4?%#`b})SoHc^;Bz5r0yZe!7BW}|FH8P z7<&AHdP>4+^+it^UlfxQd5-twJC19%J~io*o2Yn`anrWbJLwL@;h3gL7@s==wg|t$xHB+TB;}t-}l|q?z&b&e^ z&CKW8D40>O4!-c1>p6jCB<^bG*_kOwsiKg&yMD~q8%mcw_T z(<@~~mOqB4AQ}p{&r+bY3@IF1t+^a(!(|0fy$M$Wi|rw$S(6^7r8={g=crWbL<*5= zxMD!9D1N8@xEk<8RDxR1y6$}*%i;K^#jY)&jD$;3u2uh3V7L@YP*`}%`9(c?K>8{N z@&OsfVSFi1qQRs8WNjSU9YWEs-Ctv%r8SF+%Fq2K+B;e}6%SW`-Zr1b zL%e4{5dTs^W16fJDTBnh2A5O=s8P67VO4>k2@db2NYH;xYPT`7|cjbv3 zJXJ_aQ!JlHW!f~t@JczPyzymrk9V@L)=dEN5-Sj9mB9k&g;B^11eQfEY@HY^)DD3P z`tIpk+bAsW%6K-eueTV+4;cO4|5A(f$iLR^c_)QF`cu2*>+0Y0DILEle>m34XeJp; zT*)6DshG7P-f4U$9zpLceSTe8;Q_1oFXO4-F_yhAPGweLUQ@28uo-)zfUjBs-z|MH zH#@A9IMQmRTvO=D3PWX(aiut_6Sm5a($vgTmJG&;f_2`*{}L$BYAt+AQ=N%QAJbgE zHn>MT_T`WthNbcJOy5I-aSNf~>C>`$voBLJj%e_nkUgWbOMy~d1dK4-r^0DLijXZk z-^vS~4kcFRa3t62-YZx))mz|@C1p@IGdF|RQq^NSO1nH{M4p>HlRjySpMFw)l zwF5B9=M~R7ix`m-P@5_J>^17Q4&t-$^{!LPHRJOPV+&NS$+LvR`@(g{!#F?wiE#h3 zAFX|AI)r4r)}UNV8PUMEa7mb0vV8RMz6MiL12kxq^4vl!e{gdgH-g`=G^EK%ruwuL z3*yUiJp#)%pusI9#LoGRJju4DVP)f1xW&Q6$mYSuvf)y$Y#4E~=hz=<-6qg~ma>d} zpFCf5ykVbI|7H0F%(GVN;xVjxmF0mr%4e#eWyl@e(6BKrpmqRpYw0hVminylxsf{! z(>r@5jtnpi*Z9kF!d1irzZS~6;~pegsriC+%LxD{fE?@d+LL2Q#t(?&#~zebyNo>) zwRW^hSRIKSuR*zP(rk<)MUJyqc6hmk*v`{5-PoZ-Ljsrh6s$NOwMWaVuY7Y;D%tFq zac;UZ;63`qaJ6O0D8zJi3W`y#OZ-3!%fWap&qGaxL44*#mTM`yN|)9X3OWl>i7chc zHT~rXY!6wkWo>2}@-vg)B7HA2%k^9zwdoJqEwfyAVr3!0bdX~HT0zS?L`20WFWEs< z<&x#ART_N9ft-T`CxdG?yNmqfJ$)P;(CrvnMKW>vzzd$Vc^JOG`UIQGr#O7 zI10`USL&L74XV4yHHK%i3*4`?Eya>$ZI-&d{Lq?5V^n%4pf*8q4nvs_Dd zF*5mqJVXoT%8h{^X?);@aLPqler35PZf3cLWgC#Wh4`lyKjKyPdqUQan#k&a74W3I z+5X1&qA6NZj430eOiAGuy;fJRTrUb3BcXi$f%G*_$l577HV@+GG@ruleW)mLUHLi|g$Tr&@{h~;mKFJ3%g-#&da1I?Kas8)Ob_w1^J`lj!^eb0Lz&2) zIbdS9Jpntp>wt>so@YbQ@^#^=&7j)sqzLDtiwp&!P5{XS64H>hL3IYS%yO-FqLp5e z*P7)TLb(%=J^P?!Iem!5Q+e{VE*51=pFEiKo`10hw= zLTG2doaA%TGZls?o3g5vbtcPo(NCbO9jp=fjeir%*ZZKRKN7waF8pk-g*?l((xr05Pr5Q5{m-ae z>uX|Cd@L<{FB&$C_Y^Gr+%e&iFV>W4KJye44l38dD!x7nzi4GFT{ugO8q{qPg`!DN zT{CS7zRcPW;VvPq0XxKM_~jX(6%Fv#0?~k(6CmYUXToMe&EU!(l|dTx&Fi{b#b=q+ zjV+P*HDRy#;;D}QDsRY46`#pD!ze4DBZY7jzI_CC&?scuE7y^& zsQ9t-(=|YlXK7J+Sia?J118f#nwfIlF8>k|N=Gy~ify^exi`ND^pKz5{YY&f+4WSw zrX^m3ik9?c$q_wSq`kWb6mo);Yu;zd^%*{)t?)04huXxy$oWBDyLJna;LI%7Xr~mc zaxET8arCVBtWQ}vvJc2C64vxw)n*dRnFf+bXyIk=V|F^VT&on@UmNZ;5#*#o{HB$S zd*3STrq}Gl5aJ=$C6yMo5qX7^ZVmYMM4YA+ue~Tz10_^*<`&YfuaT#(Px#m>*OGfN z;lj$bcv1aFS6YknAo4Kto}EP0RE7bEt8AK>jliWtik!mo(#Q*=oM0I#3q^x@mAq43 zWO1V`Cyx4Mn$HY(hTUWv8llJ*0m)KLx)g^isqKLEqtyiA(uGnh1llA`C053sZ~RNZ zWWAmME`k2ac|d;2R!V`udxpLK-OjxUV;k@P<`10ve||Y^!Xyo~rFb!qu4KH*P8{wT zi2RusrDd4n>2xjii)cDv;472g3fS}ummreWQOY#^WC@vD2n1p37Q!@A<=QLZ z7eZXsWq4}*Mf|SaLgaIuDc2D4DOk4<@lcARXYm`$B+Ip2WuzDicgnAkud_VF1k{E~ zPtUq4yi?^`@jv+U7?i$RJKe@B*Ku<#X>${tj?q(E_8RGAZXx0!^1l@AMbTn7?R97~ zHZ$(|VHy5U9~hr&^}}X~Q4Y#-EgV^dnMbYK#dn2QA+z$u{wLYjRPL%ifUiE;Oc7^) z;1RFnOZtR!uLwWxLdcV+;Z6Q8l_~Kct=9z7m%Nj;GrYmLp&RA_{7jHS7M^C_ui8S# z4i@#fRy$~-p_1{mnLOYJrF04t>##w&K2|%ONtSCWuB$f&YWsJFE8}LC>os{P1!dQc zBi^us#$j8$_5IHM?Ef9&J^Xp+9{W~WU;9UQ;3t0FxvlpA zFKhV5IITid_o_=hw~8}M-=Sr$!Bo{ik4|Ek3bzn3B|Dx#_j;#rT^TNGD&0c%#ii%9 zSc)nFqRWnYkr6Hw^3OPZW8hK9_j1;nTQC6#5V|lK`WB&*% z*85A-AJNDV65&AQ^QV3_%=kHw{OX5l%W}880)tfaU6a3>EcQ!Q2a1|xunn|qXe|k{ zm}yDQ;uRS1CuKP=IuNLws^eD5byt>a3T_PM1X-@N1Q|l7{#R|NL0r!bDaE!;-z&cj z_)P*7UtLW7i*)Q{?p1HC)kPkuNT;@;@l?PfJ*^(erDnS8FfV^?!VVrhw#46f>B$EJdk`f

  • align_corners (bool, optional) – Geometrically, we consider the pixels of the input and output as squares rather than points. -If set to False, the input and output tensors are aligned by the -center points of their corner pixels. If set to True, the input and -output tensors are aligned by the corner points of their corner -pixels, and the interpolation uses edge value padding for out-of-boundary values. -This only has effect when mode is 'linear', -'bilinear', 'bicubic', or 'trilinear'. +If set to True, the input and output tensors are aligned by the +center points of their corner pixels, preserving the values at the corner pixels. +If set to False, the input and output tensors are aligned by the corner +points of their corner pixels, and the interpolation uses edge value padding +for out-of-boundary values, making this operation independent of input size +when scale_factor is kept the same. This only has an effect when mode +is 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: False

  • @@ -2371,12 +2372,13 @@

    upsample'trilinear'. Default: 'nearest'

  • align_corners (bool, optional) – Geometrically, we consider the pixels of the input and output as squares rather than points. -If set to False, the input and output tensors are aligned by the -center points of their corner pixels. If set to True, the input and -output tensors are aligned by the corner points of their corner -pixels, and the interpolation uses edge value padding for out-of-boundary values. -This only has effect when mode is 'linear', -'bilinear', 'bicubic' or 'trilinear'. +If set to True, the input and output tensors are aligned by the +center points of their corner pixels, preserving the values at the corner pixels. +If set to False, the input and output tensors are aligned by the corner +points of their corner pixels, and the interpolation uses edge value padding +for out-of-boundary values, making this operation independent of input size +when scale_factor is kept the same. This only has an effect when mode +is 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: False

  • diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 79583e4e9175dad27d590ace0bdf02a5ad0c0ec5..7e989d607dc2f4ee61a8ce472378652917e36f48 100644 GIT binary patch delta 17 YcmcZ>c`b5+H(Q!PqM2#x#=uq`079n*x&QzG delta 17 YcmcZ>c`b5+H=9YCsga@K#=uq`06+l-RsaA1 diff --git a/docs/stable/searchindex.js b/docs/stable/searchindex.js index d150e5032aa9..da53fee741fe 100644 --- a/docs/stable/searchindex.js +++ b/docs/stable/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file +Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22,23],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,23,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file diff --git a/docs/stable/torch.html b/docs/stable/torch.html index 5114ed429bea..bd7fd29104e6 100644 --- a/docs/stable/torch.html +++ b/docs/stable/torch.html @@ -8126,7 +8126,7 @@

    BLAS and LAPACK Operations
    torch.det(input) → Tensor
    -

    Calculates determinant of a 2D square tensor.

    +

    Calculates determinant of a square matrix or batches of square matrices.

    Note

    Backward through det() internally uses SVD results when input is @@ -8136,13 +8136,27 @@

    BLAS and LAPACK Operations
    Parameters
    -

    input (Tensor) – The input 2D square tensor

    +

    input (Tensor) – the input tensor of size (*, n, n) where * is zero or more +batch dimensions.

    Example:

    >>> A = torch.randn(3, 3)
     >>> torch.det(A)
     tensor(3.7641)
    +
    +>>> A = torch.randn(3, 2, 2)
    +>>> A
    +tensor([[[ 0.9254, -0.6213],
    +         [-0.5787,  1.6843]],
    +
    +        [[ 0.3242, -0.9665],
    +         [ 0.4539, -0.0887]],
    +
    +        [[ 1.1336, -0.4025],
    +         [-0.7089,  0.9032]]])
    +>>> A.det()
    +tensor([1.1990, 0.4099, 0.7386])
     

    @@ -8150,7 +8164,7 @@

    BLAS and LAPACK Operations
    torch.logdet(input) → Tensor
    -

    Calculates log determinant of a 2D square tensor.

    +

    Calculates log determinant of a square matrix or batches of square matrices.

    Note

    Result is -inf if input has zero log determinant, and is nan if @@ -8165,7 +8179,8 @@

    BLAS and LAPACK Operations
    Parameters
    -

    input (Tensor) – The input 2D square tensor

    +

    input (Tensor) – the input tensor of size (*, n, n) where * is zero or more +batch dimensions.

    Example:

    @@ -8174,6 +8189,19 @@

    BLAS and LAPACK Operationstensor(0.2611) >>> torch.logdet(A) tensor(-1.3430) +>>> A +tensor([[[ 0.9254, -0.6213], + [-0.5787, 1.6843]], + + [[ 0.3242, -0.9665], + [ 0.4539, -0.0887]], + + [[ 1.1336, -0.4025], + [-0.7089, 0.9032]]]) +>>> A.det() +tensor([1.1990, 0.4099, 0.7386]) +>>> A.det().log() +tensor([ 0.1815, -0.8917, -0.3031])

    @@ -8181,7 +8209,7 @@

    BLAS and LAPACK Operations
    torch.slogdet(input) -> (Tensor, Tensor)
    -

    Calculates the sign and log value of a 2D square tensor’s determinant.

    +

    Calculates the sign and log absolute value of the determinant(s) of a square matrix or batches of square matrices.

    Note

    If input has zero determinant, this returns (0, -inf).

    @@ -8195,7 +8223,8 @@

    BLAS and LAPACK Operations
    Parameters
    -

    input (Tensor) – The input 2D square tensor

    +

    input (Tensor) – the input tensor of size (*, n, n) where * is zero or more +batch dimensions.

    Returns

    A namedtuple (sign, logabsdet) containing the sign of the determinant, and the log From 5b0853fae260fca7cdac4b3df59210c124235598 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Sat, 3 Aug 2019 06:49:38 +0000 Subject: [PATCH 07/12] auto-generating sphinx docs --- docs/stable/_modules/index.html | 4 + .../stable/_modules/torchvision/io/video.html | 755 ++++++++++++++++++ .../_modules/torchvision/ops/boxes.html | 670 ++++++++++++++++ .../_modules/torchvision/ops/roi_align.html | 605 ++++++++++++++ .../_modules/torchvision/ops/roi_pool.html | 596 ++++++++++++++ docs/stable/_sources/torch.rst.txt | 1 + .../stable/_sources/torchvision/index.rst.txt | 2 + docs/stable/_sources/torchvision/io.rst.txt | 16 + docs/stable/_sources/torchvision/ops.rst.txt | 17 + docs/stable/genindex.html | 30 +- docs/stable/index.html | 2 + docs/stable/objects.inv | Bin 11478 -> 11574 bytes docs/stable/searchindex.js | 2 +- docs/stable/torch.html | 47 ++ docs/stable/torchvision/datasets.html | 4 +- docs/stable/torchvision/index.html | 5 + docs/stable/torchvision/io.html | 600 ++++++++++++++ docs/stable/torchvision/models.html | 8 +- docs/stable/torchvision/ops.html | 633 +++++++++++++++ docs/stable/torchvision/transforms.html | 4 +- 20 files changed, 3986 insertions(+), 15 deletions(-) create mode 100644 docs/stable/_modules/torchvision/io/video.html create mode 100644 docs/stable/_modules/torchvision/ops/boxes.html create mode 100644 docs/stable/_modules/torchvision/ops/roi_align.html create mode 100644 docs/stable/_modules/torchvision/ops/roi_pool.html create mode 100644 docs/stable/_sources/torchvision/io.rst.txt create mode 100644 docs/stable/_sources/torchvision/ops.rst.txt create mode 100644 docs/stable/torchvision/io.html create mode 100644 docs/stable/torchvision/ops.html diff --git a/docs/stable/_modules/index.html b/docs/stable/_modules/index.html index b61792e352b5..c9d54de7adff 100644 --- a/docs/stable/_modules/index.html +++ b/docs/stable/_modules/index.html @@ -397,6 +397,7 @@

    All modules for which code is available

  • torchvision.datasets.ucf101
  • torchvision.datasets.usps
  • torchvision.datasets.voc
  • +
  • torchvision.io.video
  • torchvision.models.alexnet
  • torchvision.models.densenet
  • torchvision.models.detection.faster_rcnn
  • @@ -411,6 +412,9 @@

    All modules for which code is available

  • torchvision.models.shufflenetv2
  • torchvision.models.squeezenet
  • torchvision.models.vgg
  • +
  • torchvision.ops.boxes
  • +
  • torchvision.ops.roi_align
  • +
  • torchvision.ops.roi_pool
  • torchvision.transforms.functional
  • torchvision.transforms.transforms
  • torchvision.utils
  • diff --git a/docs/stable/_modules/torchvision/io/video.html b/docs/stable/_modules/torchvision/io/video.html new file mode 100644 index 000000000000..19863f6ae77e --- /dev/null +++ b/docs/stable/_modules/torchvision/io/video.html @@ -0,0 +1,755 @@ + + + + + + + + + + + + torchvision.io.video — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.io.video

    +import re
    +import gc
    +import torch
    +import numpy as np
    +
    +try:
    +    import av
    +    av.logging.set_level(av.logging.ERROR)
    +except ImportError:
    +    av = None
    +
    +
    +def _check_av_available():
    +    if av is None:
    +        raise ImportError("""\
    +PyAV is not installed, and is necessary for the video operations in torchvision.
    +See https://github.com/mikeboers/PyAV#installation for instructions on how to
    +install PyAV on your system.
    +""")
    +
    +
    +# PyAV has some reference cycles
    +_CALLED_TIMES = 0
    +_GC_COLLECTION_INTERVAL = 10
    +
    +
    +
    [docs]def write_video(filename, video_array, fps, video_codec='libx264', options=None): + """ + Writes a 4d tensor in [T, H, W, C] format in a video file + + Parameters + ---------- + filename : str + path where the video will be saved + video_array : Tensor[T, H, W, C] + tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format + fps : Number + frames per second + """ + _check_av_available() + video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() + + container = av.open(filename, mode='w') + + stream = container.add_stream(video_codec, rate=fps) + stream.width = video_array.shape[2] + stream.height = video_array.shape[1] + stream.pix_fmt = 'yuv420p' if video_codec != 'libx264rgb' else 'rgb24' + stream.options = options or {} + + for img in video_array: + frame = av.VideoFrame.from_ndarray(img, format='rgb24') + frame.pict_type = 'NONE' + for packet in stream.encode(frame): + container.mux(packet) + + # Flush stream + for packet in stream.encode(): + container.mux(packet) + + # Close the file + container.close()
    + + +def _read_from_stream(container, start_offset, end_offset, stream, stream_name): + global _CALLED_TIMES, _GC_COLLECTION_INTERVAL + _CALLED_TIMES += 1 + if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1: + gc.collect() + + frames = {} + should_buffer = False + max_buffer_size = 5 + if stream.type == "video": + # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt) + # so need to buffer some extra frames to sort everything + # properly + extradata = stream.codec_context.extradata + # overly complicated way of finding if `divx_packed` is set, following + # https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263 + if extradata and b"DivX" in extradata: + # can't use regex directly because of some weird characters sometimes... + pos = extradata.find(b"DivX") + d = extradata[pos:] + o = re.search(br"DivX(\d+)Build(\d+)(\w)", d) + if o is None: + o = re.search(br"DivX(\d+)b(\d+)(\w)", d) + if o is not None: + should_buffer = o.group(3) == b"p" + seek_offset = start_offset + # some files don't seek to the right location, so better be safe here + seek_offset = max(seek_offset - 1, 0) + if should_buffer: + # FIXME this is kind of a hack, but we will jump to the previous keyframe + # so this will be safe + seek_offset = max(seek_offset - max_buffer_size, 0) + try: + # TODO check if stream needs to always be the video stream here or not + container.seek(seek_offset, any_frame=False, backward=True, stream=stream) + except av.AVError: + # TODO add some warnings in this case + # print("Corrupted file?", container.name) + return [] + buffer_count = 0 + for idx, frame in enumerate(container.decode(**stream_name)): + frames[frame.pts] = frame + if frame.pts >= end_offset: + if should_buffer and buffer_count < max_buffer_size: + buffer_count += 1 + continue + break + # ensure that the results are sorted wrt the pts + result = [frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset] + if start_offset > 0 and start_offset not in frames: + # if there is no frame that exactly matches the pts of start_offset + # add the last frame smaller than start_offset, to guarantee that + # we will have all the necessary data. This is most useful for audio + first_frame_pts = max(i for i in frames if i < start_offset) + result.insert(0, frames[first_frame_pts]) + return result + + +def _align_audio_frames(aframes, audio_frames, ref_start, ref_end): + start, end = audio_frames[0].pts, audio_frames[-1].pts + total_aframes = aframes.shape[1] + step_per_aframe = (end - start + 1) / total_aframes + s_idx = 0 + e_idx = total_aframes + if start < ref_start: + s_idx = int((ref_start - start) / step_per_aframe) + if end > ref_end: + e_idx = int((ref_end - end) / step_per_aframe) + return aframes[:, s_idx:e_idx] + + +
    [docs]def read_video(filename, start_pts=0, end_pts=None): + """ + Reads a video from a file, returning both the video frames as well as + the audio frames + + Parameters + ---------- + filename : str + path to the video file + start_pts : int, optional + the start presentation time of the video + end_pts : int, optional + the end presentation time + + Returns + ------- + vframes : Tensor[T, H, W, C] + the `T` video frames + aframes : Tensor[K, L] + the audio frames, where `K` is the number of channels and `L` is the + number of points + info : Dict + metadata for the video and audio. Can contain the fields video_fps (float) + and audio_fps (int) + """ + _check_av_available() + + if end_pts is None: + end_pts = float("inf") + + if end_pts < start_pts: + raise ValueError("end_pts should be larger than start_pts, got " + "start_pts={} and end_pts={}".format(start_pts, end_pts)) + + container = av.open(filename, metadata_errors='ignore') + info = {} + + video_frames = [] + if container.streams.video: + video_frames = _read_from_stream(container, start_pts, end_pts, + container.streams.video[0], {'video': 0}) + info["video_fps"] = float(container.streams.video[0].average_rate) + audio_frames = [] + if container.streams.audio: + audio_frames = _read_from_stream(container, start_pts, end_pts, + container.streams.audio[0], {'audio': 0}) + info["audio_fps"] = container.streams.audio[0].rate + + container.close() + + vframes = [frame.to_rgb().to_ndarray() for frame in video_frames] + aframes = [frame.to_ndarray() for frame in audio_frames] + vframes = torch.as_tensor(np.stack(vframes)) + if aframes: + aframes = np.concatenate(aframes, 1) + aframes = torch.as_tensor(aframes) + aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts) + else: + aframes = torch.empty((1, 0), dtype=torch.float32) + + return vframes, aframes, info
    + + +def _can_read_timestamps_from_packets(container): + extradata = container.streams[0].codec_context.extradata + if extradata is None: + return False + if b"Lavc" in extradata: + return True + return False + + +
    [docs]def read_video_timestamps(filename): + """ + List the video frames timestamps. + + Note that the function decodes the whole video frame-by-frame. + + Parameters + ---------- + filename : str + path to the video file + + Returns + ------- + pts : List[int] + presentation timestamps for each one of the frames in the video. + video_fps : int + the frame rate for the video + + """ + _check_av_available() + container = av.open(filename, metadata_errors='ignore') + + video_frames = [] + video_fps = None + if container.streams.video: + if _can_read_timestamps_from_packets(container): + # fast path + video_frames = [x for x in container.demux(video=0) if x.pts is not None] + else: + video_frames = _read_from_stream(container, 0, float("inf"), + container.streams.video[0], {'video': 0}) + video_fps = float(container.streams.video[0].average_rate) + container.close() + return [x.pts for x in video_frames], video_fps
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/ops/boxes.html b/docs/stable/_modules/torchvision/ops/boxes.html new file mode 100644 index 000000000000..1e047f69b311 --- /dev/null +++ b/docs/stable/_modules/torchvision/ops/boxes.html @@ -0,0 +1,670 @@ + + + + + + + + + + + + torchvision.ops.boxes — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.ops.boxes

    +import torch
    +from torchvision.extension import _lazy_import
    +
    +
    +
    [docs]def nms(boxes, scores, iou_threshold): + """ + Performs non-maximum suppression (NMS) on the boxes according + to their intersection-over-union (IoU). + + NMS iteratively removes lower scoring boxes which have an + IoU greater than iou_threshold with another (higher scoring) + box. + + Parameters + ---------- + boxes : Tensor[N, 4]) + boxes to perform NMS on. They + are expected to be in (x1, y1, x2, y2) format + scores : Tensor[N] + scores for each one of the boxes + iou_threshold : float + discards all overlapping + boxes with IoU < iou_threshold + + Returns + ------- + keep : Tensor + int64 tensor with the indices + of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + _C = _lazy_import() + return _C.nms(boxes, scores, iou_threshold)
    + + +def batched_nms(boxes, scores, idxs, iou_threshold): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Parameters + ---------- + boxes : Tensor[N, 4] + boxes where NMS will be performed. They + are expected to be in (x1, y1, x2, y2) format + scores : Tensor[N] + scores for each one of the boxes + idxs : Tensor[N] + indices of the categories for each one of the boxes. + iou_threshold : float + discards all overlapping boxes + with IoU < iou_threshold + + Returns + ------- + keep : Tensor + int64 tensor with the indices of + the elements that have been kept by NMS, sorted + in decreasing order of scores + """ + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + # strategy: in order to perform NMS independently per class. + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + max_coordinate = boxes.max() + offsets = idxs.to(boxes) * (max_coordinate + 1) + boxes_for_nms = boxes + offsets[:, None] + keep = nms(boxes_for_nms, scores, iou_threshold) + return keep + + +def remove_small_boxes(boxes, min_size): + """ + Remove boxes which contains at least one side smaller than min_size. + + Arguments: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format + min_size (int): minimum size + + Returns: + keep (Tensor[K]): indices of the boxes that have both sides + larger than min_size + """ + ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] + keep = (ws >= min_size) & (hs >= min_size) + keep = keep.nonzero().squeeze(1) + return keep + + +def clip_boxes_to_image(boxes, size): + """ + Clip boxes so that they lie inside an image of size `size`. + + Arguments: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format + size (Tuple[height, width]): size of the image + + Returns: + clipped_boxes (Tensor[N, 4]) + """ + dim = boxes.dim() + boxes_x = boxes[..., 0::2] + boxes_y = boxes[..., 1::2] + height, width = size + boxes_x = boxes_x.clamp(min=0, max=width) + boxes_y = boxes_y.clamp(min=0, max=height) + clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) + return clipped_boxes.reshape(boxes.shape) + + +def box_area(boxes): + """ + Computes the area of a set of bounding boxes, which are specified by its + (x1, y1, x2, y2) coordinates. + + Arguments: + boxes (Tensor[N, 4]): boxes for which the area will be computed. They + are expected to be in (x1, y1, x2, y2) format + + Returns: + area (Tensor[N]): area for each box + """ + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def box_iou(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + + Arguments: + boxes1 (Tensor[N, 4]) + boxes2 (Tensor[M, 4]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + iou = inter / (area1[:, None] + area2 - inter) + return iou +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/ops/roi_align.html b/docs/stable/_modules/torchvision/ops/roi_align.html new file mode 100644 index 000000000000..29b0ad576663 --- /dev/null +++ b/docs/stable/_modules/torchvision/ops/roi_align.html @@ -0,0 +1,605 @@ + + + + + + + + + + + + torchvision.ops.roi_align — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.ops.roi_align

    +import torch
    +from torch import nn
    +
    +from torch.autograd import Function
    +from torch.autograd.function import once_differentiable
    +
    +from torch.nn.modules.utils import _pair
    +
    +from torchvision.extension import _lazy_import
    +from ._utils import convert_boxes_to_roi_format
    +
    +
    +class _RoIAlignFunction(Function):
    +    @staticmethod
    +    def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
    +        ctx.save_for_backward(roi)
    +        ctx.output_size = _pair(output_size)
    +        ctx.spatial_scale = spatial_scale
    +        ctx.sampling_ratio = sampling_ratio
    +        ctx.input_shape = input.size()
    +        _C = _lazy_import()
    +        output = _C.roi_align_forward(
    +            input, roi, spatial_scale,
    +            output_size[0], output_size[1], sampling_ratio)
    +        return output
    +
    +    @staticmethod
    +    @once_differentiable
    +    def backward(ctx, grad_output):
    +        rois, = ctx.saved_tensors
    +        output_size = ctx.output_size
    +        spatial_scale = ctx.spatial_scale
    +        sampling_ratio = ctx.sampling_ratio
    +        bs, ch, h, w = ctx.input_shape
    +        _C = _lazy_import()
    +        grad_input = _C.roi_align_backward(
    +            grad_output, rois, spatial_scale,
    +            output_size[0], output_size[1], bs, ch, h, w, sampling_ratio)
    +        return grad_input, None, None, None, None
    +
    +
    +
    [docs]def roi_align(input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1): + """ + Performs Region of Interest (RoI) Align operator described in Mask R-CNN + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0, + then exactly sampling_ratio x sampling_ratio grid points are used. If + <= 0, then an adaptive number of grid points are used (computed as + ceil(roi_width / pooled_w), and likewise for height). Default: -1 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + rois = boxes + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + return _RoIAlignFunction.apply(input, rois, output_size, spatial_scale, sampling_ratio)
    + + +
    [docs]class RoIAlign(nn.Module): + """ + See roi_align + """ + def __init__(self, output_size, spatial_scale, sampling_ratio): + super(RoIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) + tmpstr += ')' + return tmpstr
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/ops/roi_pool.html b/docs/stable/_modules/torchvision/ops/roi_pool.html new file mode 100644 index 000000000000..fc307c2d72c4 --- /dev/null +++ b/docs/stable/_modules/torchvision/ops/roi_pool.html @@ -0,0 +1,596 @@ + + + + + + + + + + + + torchvision.ops.roi_pool — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.ops.roi_pool

    +import torch
    +from torch import nn
    +
    +from torch.autograd import Function
    +from torch.autograd.function import once_differentiable
    +
    +from torch.nn.modules.utils import _pair
    +
    +from torchvision.extension import _lazy_import
    +from ._utils import convert_boxes_to_roi_format
    +
    +
    +class _RoIPoolFunction(Function):
    +    @staticmethod
    +    def forward(ctx, input, rois, output_size, spatial_scale):
    +        ctx.output_size = _pair(output_size)
    +        ctx.spatial_scale = spatial_scale
    +        ctx.input_shape = input.size()
    +        _C = _lazy_import()
    +        output, argmax = _C.roi_pool_forward(
    +            input, rois, spatial_scale,
    +            output_size[0], output_size[1])
    +        ctx.save_for_backward(rois, argmax)
    +        return output
    +
    +    @staticmethod
    +    @once_differentiable
    +    def backward(ctx, grad_output):
    +        rois, argmax = ctx.saved_tensors
    +        output_size = ctx.output_size
    +        spatial_scale = ctx.spatial_scale
    +        bs, ch, h, w = ctx.input_shape
    +        _C = _lazy_import()
    +        grad_input = _C.roi_pool_backward(
    +            grad_output, rois, argmax, spatial_scale,
    +            output_size[0], output_size[1], bs, ch, h, w)
    +        return grad_input, None, None, None
    +
    +
    +
    [docs]def roi_pool(input, boxes, output_size, spatial_scale=1.0): + """ + Performs Region of Interest (RoI) Pool operator described in Fast R-CNN + + Arguments: + input (Tensor[N, C, H, W]): input tensor + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. If a single Tensor is passed, + then the first column should contain the batch index. If a list of Tensors + is passed, then each Tensor will correspond to the boxes for an element i + in a batch + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + + Returns: + output (Tensor[K, C, output_size[0], output_size[1]]) + """ + rois = boxes + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + return _RoIPoolFunction.apply(input, rois, output_size, spatial_scale)
    + + +
    [docs]class RoIPool(nn.Module): + """ + See roi_pool + """ + def __init__(self, output_size, spatial_scale): + super(RoIPool, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input, rois): + return roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ')' + return tmpstr
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_sources/torch.rst.txt b/docs/stable/_sources/torch.rst.txt index ce376788ed7f..dffcaec3f027 100644 --- a/docs/stable/_sources/torch.rst.txt +++ b/docs/stable/_sources/torch.rst.txt @@ -49,6 +49,7 @@ Creation Ops .. autofunction:: eye .. autofunction:: empty .. autofunction:: empty_like +.. autofunction:: empty_strided .. autofunction:: full .. autofunction:: full_like diff --git a/docs/stable/_sources/torchvision/index.rst.txt b/docs/stable/_sources/torchvision/index.rst.txt index f8f89f92629b..9de82b6e7fc5 100644 --- a/docs/stable/_sources/torchvision/index.rst.txt +++ b/docs/stable/_sources/torchvision/index.rst.txt @@ -9,7 +9,9 @@ architectures, and common image transformations for computer vision. :caption: Package Reference datasets + io models + ops transforms utils diff --git a/docs/stable/_sources/torchvision/io.rst.txt b/docs/stable/_sources/torchvision/io.rst.txt new file mode 100644 index 000000000000..e7aeedc07162 --- /dev/null +++ b/docs/stable/_sources/torchvision/io.rst.txt @@ -0,0 +1,16 @@ +torchvision.io +============== + +.. currentmodule:: torchvision.io + +The :mod:`torchvision.io` package provides functions for performing IO +operations. They are currently specific to reading and writing video. + +Video +----- + +.. autofunction:: read_video + +.. autofunction:: read_video_timestamps + +.. autofunction:: write_video diff --git a/docs/stable/_sources/torchvision/ops.rst.txt b/docs/stable/_sources/torchvision/ops.rst.txt new file mode 100644 index 000000000000..ec87d02556e6 --- /dev/null +++ b/docs/stable/_sources/torchvision/ops.rst.txt @@ -0,0 +1,17 @@ +torchvision.ops +=============== + +.. currentmodule:: torchvision.ops + +:mod:`torchvision.ops` implements operators that are specific for Computer Vision. + +.. note:: + Those operators currently do not support TorchScript. + + +.. autofunction:: nms +.. autofunction:: roi_align +.. autofunction:: roi_pool + +.. autoclass:: RoIAlign +.. autoclass:: RoIPool diff --git a/docs/stable/genindex.html b/docs/stable/genindex.html index b744665c20fe..0f158432e587 100644 --- a/docs/stable/genindex.html +++ b/docs/stable/genindex.html @@ -1331,6 +1331,8 @@

    E

  • empty_cache() (in module torch.cuda), [1]
  • empty_like() (in module torch) +
  • +
  • empty_strided() (in module torch)
  • enable_grad (class in torch.autograd)
  • @@ -1429,14 +1431,14 @@

    E

  • erf_() (torch.Tensor method)
  • + + - - + - + diff --git a/docs/stable/index.html b/docs/stable/index.html index 5a640b76a31d..c202e4e3ff46 100644 --- a/docs/stable/index.html +++ b/docs/stable/index.html @@ -329,7 +329,9 @@

    PyTorch documentation
  • torchvision diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 7e989d607dc2f4ee61a8ce472378652917e36f48..6c29ce27842dc3410e6be19ad596b1927305c9c5 100644 GIT binary patch delta 11299 zcmV+;EZo!9S+-h`O9wPJI5Ie9WRX%u0ltx#AAcnhpaD|0GUj1^V}5VGWB`zqcop{s z;F^evN?BTKZ2$x|Hb7hoA>-XLKmJE1k}{>sH_O6&v)UC|`ft^DSO31eT2$Y@I~MdC zSEB+3hXX(aT(Vp|-fC?s2# z`Y`~RqFkr=f=r5I?qrXqG^4zT6#IdRsslSkF_b({k0={*n}3lP zB!O`pLG}IW3`2Ks+7%@f)lwBMRT_>er-E=K{0581rU!#xlX(A3WP+k#z1bS?&el|N zQKl(#;h+qS4gF>)jK`wn1&UHUGesjV99gI=;T}$Z2s;Ds4_+_BV^YRDl;?>lOObGt ze19B02Nx+-`=bLTV*EfQ%1xiBN`IsW9I@nk3_c}Uj>V&jc`)-ZgBQGD+ftMYMapqi zOW@QMQhbMmqvU(jD>G;*6e_R%iI_QfmP?V~7^=j$rO{cQ8H)agMhQcip-&~zRqqEy zkqR7|#5*)0LNUf>wq`htY%wM@Jfhhe6?_@eCM5-uA~;D=lr70Jf@h$L7=Kr-`lI+c z_yLDTr5qJ=j7)W2CQyc{Mw2s}GqfiZI6|RIRBfYjgF{C+c#MQ2<#kYkCZJ5b_{l4e6|z?Mx}+O3I!J$DH$%m^a6f>UO7rO&VON@mS2aZr^ znIqvSd9lTzuis$t$mCq-SUl3e4HAx&$HKXg8EzDyS-4p-O;BZq8!Zl&sLa4sp)m6X zDJuP_o-Zv?VD$WdMo@n>E+cAB6Kr(RVmTO7u^A^yXQGQqY^0Hq0=K+D6C~4dka$Ms z4D)I|qo{Yx8P&sh6an>y9QATh4}DS97+3hXbtM|E100bt)N~(erAT&2u~X3v1(Op_ zq!iC+0(RiS>p5?D9BCN(6BC#Ug?dlOQGp7lTO1lEUd?5rD9;lS&GO=iOTE6(Q2KjLMV})Hp%Xr;p4yK~PwzoN&**toaqJK)IpFyh3)WXrBaP$&XsxOQOl841VLmFFFmm_o7>dk}ndyZS^qm89rx zvp=7tGY+2G!ZFJ;3lLR7`qRI&JjR(_fUIbLt;;MmBfNBDI#J;HGtFkhwo%kM#pwL? z#8E~qfbJ+sn$k34+Bq1NjdpW|d7pJXf6y5f&?Dw&Ft0u@p@kjIS99{gEUucW0Kn=U zq5x!brT_$5zr6)O!R9llaS6h;HqOstb@;FaBAc!95M(vG06;cF=UHfHS3CfHe$6)z z2Bx+Sz#+IB5U?!7X_B>R$IX`$t@!lw$40#6f4kfZr11^$aJmi<@a(qDR?*Dze-c{I zxQGZB8A*?*{8CQVDLu_(gLw@BlT7o!ct_*C3(=X(1wMDV6y`uGxuO{>B7P`d+~}zw ziKd&svB^GZ)F0duP=BynjbstUJ1H_64ToKL#XUVnBD)YnNsSlE$JJmd!BKk(+Ll_h;N^XwRhgT4@shSV&4T z(9@})m|WM4D%%k~^w%lQe-Y8SuX!f>4(2RPoeE^0r(17!cHX_l17zuOIniz_9*mqW z^UoV-!4yf8GN#dRfPR_QTIc}clv0|r6?AW@T5SqLJ* zK^J3Y1g(=Q_b7s$d5dz9yRgbZ%r!}q)Qq@%p29!@S3H>0Gioe`LK-Y8F->s|CTG&(ngSn}f6<*yaSSdF*Mw(mYfZP+ax?sylt# z`hH@_(nh?~1AfXoQy>F@apr@KwSLPTsJi+_qtP9jAKA@j+v?0dH=!zK^yPadWKE=G zuEjAGo@42rdGL9nCKy_utD>8L?wR)P>}Mg<CWRZx<5r9hZMva)gr>hwT};_ zp=7tG-ONM|AGG;p?onIJX##%ru00h$$oyFlp=91N+m%LzB#alTzxc#co$^BVHynFQ zmYG=>LL*h?e|djxxv%)}a6wPS9Z}I}k^`2qIj%!{_jSm0YfTosIt)%R@YFL)b+Tx< zyUbTsmu2XzE0X1D&{V!oT{;l6{5AGSX+gAHL7aV5%@69C8cnSi8=quf5xw!wV^{Cm zU(efLo`gIdS)*gzmps>9?+4)Tsv0FfU(%>Z-*(vue}!m`jh1&CtdBTZy=$Fy;?frT zIw9NSKs#K=K9u%r4FkR4o%+CncRLwVWPzWUR}&nlh3L$OqP|`EK+)4-4D~`+c3vj|)|4Gz$8|xMy-GFlabi=N z0bJRnf3&AVo6_}jYm=(4(?tAToVk%oOPYV29a~Eys5@)x#>pl~CjGr(N6gbjwY9>G zFuIy7Sh4xLv$mR_uW2hu-}c%T3(*_2^}L(sj@_(o_5WO(X2TEJEKiyqRqLeT*?>+t zimKDI>J@?RbIc=(tJ~9e4=p1uj{_urIta|ae^gYk(on60dgs0^wmR)+@yu$otlYI) zx0Di^Q=XW`eA?sXR90)#0tjy-mQGgJUoa}P$Ov(BrN)ehCDCxEb# z+v?=Lf^0y!)Vv-J_>fdVsygndl`tLd?NVB--Z!5d8{vpjz`kKj-80@AmGtT~#uUTE ze_mtCzH!r-rcd-Vrso+$jY;}PQe&#V@l^XSEqngMxiwAbjkT$u30|#95%2s)*3;r1 zqlRfwZ$~gK9B3lyf{BA&+W$JG?d#5_blIysMt_GkrR?X{rc`zOrhVHBD_)`Ic{|Q2 z;n&rTUZI#(ktFo+3c9b=q<#Amb6e+ze`G8@u0EXau>5GhaqLbmN&33a);*hTarcyt zsRaxrr!08*v?FLm?Wc{4uq^fs}J*DjH z8#hets8pOLX3OpBqy6)-{?pOrT+|HccrM%VYt)yu!!+&7Hhdf7Wp$q>d0E-Be_>u0 z_isp-^?jR@-Ilca)ND?2Xgn>2fK8>X>e-2Qq`cZ#TPaM7YAgD-pSG4yYia9vwvD!w ze@keqdDr{qitE*<=96pvA1Z}x2P5jf?O{yPuU(8Nde`Q0O%}W~y_(91p6}w3F-^}_ zIHu^|8b?&UTV?_qvSVE>1p{_ z-j1GcwY9G_>*X20>d#(lp2o!jR^$l-9>enqMb9=erRvpKCPc$Ta}(k$e9`*0@(Ed= z<~|`gyxlK=rg!_F5Dwx9s*5qL>{t8I#}N#~LOX(?0pHp>R1Dz=MpXSBf5A{Rpd%QH zv+zY5#1RZ-y&b_&(%%t`XohhFLt$S>U@pL{7kaHepFcZ#f(f;Nu3*Z7XUmR(5P+EWSWNc+kjJ$_%x(-#lqyj)~oElk|hS7iZ< z&)50&^?aOOU(3_k_2vBLS_W#~K~`VU*RPpJS;uU5V(T@?8j}j?4#wnt8}g{8Uuzzd z4Ai8@q*DnluXGixxp%&1=Pg(HuWmBrY9^-;8bKll8 zrRw8prZfW_!7(Y_f2^ViDeC8WCRP0=f2L%8BfBwg5iFF0iHC05^9Dl(Ye18p(HYc= zzYCi;5+?ov8Vlk+=goNgy?GM>Ln@#}8NidxTL}_0L2Y?QNkBusZk5w5G1tXMr$gbs z*57y@1UWAp(47K|`FR|G0bd6IGr?ZFoO~P%Kt4c>1+n6}e`pn8!p~(v40yW`fDv!^ zQF$S+;H#q%(kXOwJzl+Zg**eKo|vCk>S%=tceyzNDk)>ox?Z(M}Xm`>@8}IQ>-={-?)+v;(>sm_1Lg0W}h6F%Qt3m-B+z2z>@E z%&M=m0$cHze;I?>@Ntwd3jrcJpdp_*G?)oL{|&V9C)HtJAFZD&EyU{H{^RJ2I~oCf zZdcdSxAw%moLX1Qr}1@_JpFlB%)4d?rj+2T`|4lUy3th$Svz{lo{zMr=G!;)^}H%c zPs`7L4b;3nRbSD!=1H287n(%pTZryVANo*z?R(c5e^5POO@&(YtHDq!zBL<$z3uZ9^cFVOkT&RFHNBvJ|d`fy@PHYpN*C z=A&<`f5-ZRV}ly#1Z*`!ZO%9laVKTzIX1Ng;sCZ>XcevZ=}^W8^&ose?cA7gj5q*E>y_$eR#C5>JyPm23n5L`WtA8CmKu;y02k0w% zHrs)mSF`P_`BdV*mj4*Nujo=$4!vV$s_*+SQ9xjnZDBC3$_#|e^6J_k5HL#Fp2E?l zSQ46m2v0CnNm39{Zh){Dh^u3oS>#I&e}rY%Am9;H-kS}m`fO6&4k(8~Sq{UA>cWdz zpdJ;1aFWC@oX8?xW;v+(ZfeI2!GUY}mJ~Y*%KR3lM^gpEJ;Cbt7Mi+3$JQHCruyIF zm{TZ87k_onsmP;Zr$;A=!cdVE$+z02kOG!GP=nZ!EYkz3=$Y|EJi{2AW8k`Pe*jhI zUAgW|tLVQcCR6M$H0`&}6QPStbpn7$K@u2Jnfk)yJeUFfk~w+qB~mn{F%ZwpEk2q7 z>s_?-rc#0G4U^C`t4RAkQkd$Ki~7=N07>aj1m+JkT6=2+^RG;8OtAok8Q;KwoPfHE zKUmSJzWVQp>T7;^j3vv9FBP~Ie=eU`UHC}NoIomapg^Qe0t~A|@E9PwFLno#mY^q) z0#Opw38y;V%ZF(k#|{f~0=EUiuHQg`IS_aQ#3i%_xlPXIKDJI?MwbWh;{!E-5Ms&vP*?Q|5q-zGYZKI$!91yJo+iB@QS7&krJTs zbaqI$&8={ASWcvbMdh5re^8(vkn>K3=6s?K>mS}GBscrG-yXL2LZsJ8yY4o`ON<*d z<0m-dPHgKJVZXCtcPEtcppMUOV?}bQeQR96HNKD+WXaZy8#Dt_vT42}@Ac?dl*K$4 zUMb*}0bB{*-F^T)agD2Y2rMLbOozynf5=&&E`{&&pl`RJ zoZp)1d%}x*k^u7FLf@d!KY`E(SP~8}+XtXg`;OJDdbmd7Z;*IUdUQWTKV)l~B)UTa zK0?EJWEo3!e_0&o2~x{`sNS%EEW9TLxhGOb4&A%v#=$^eQsV%HSR(<&2Wc6n!i6KN ziCpdI?Tap&%=`KifAt+f*wJ9TsgEL^FPM*#*Z_f~@V?~J!*>YgmZtNS;*%6*{roi5#4Zp#`0rmeMckkH&l)t-c z80;R{-&a6Dj_w}6eF1?$_3DlssRY%lJ4*J)2MTZycd%~we@?_CeW2R2b46iE(7|K1 zE(`>%;>=m-mq%Ds^o7VRr0^{QyOA|>soRF7rY?%-+I1%LzFLV7$3u4GcDOnfAa|FVhnT@r*(J&6<@J}W(vjF z(<6?MiyVb7^Q2xS0jAg&m*7<0)r$%MrD~w2C#F2XsGAN1OJO=zl#m*2CZ_Mmnx<9Q z28-8DBojMgiCFxOlH{#fs@3172c*Ee$FFyHpk&>H!uxx|^o}KNzM>jE)RwT zHR%o-f7+_MBw)ktl3;DS%K8vgM0D+B`?IzNC~3mEhZ0JSl%;dRr8O*VJ%HxPWDvY22DvN4=iSx^9Q4<`7P;(7zCk884G5UoZ#x}_?;!KV=l!`7gN66$ZPJ$V`V7{m zl1OtD>snQ9A~FkjP-5?CED~IzYl>6aYp$J!bg*f8PKG~lbUtf}k2!Qc+dPb6mZucs zf0VxS4WwFs&(C|tAVBj@rhYAZlORZ6HGwL~yRHAJM~kq|r&;4+4ppk(@<-%g7WOt% zq^(sTT|7?gCZyO;A;fey5-yvK*iA?=uWZ^Q0iuybcNh_~j3Y|U?_M2cLR6>_^(a8~ zu^nyH^t6r|twkBG3Y^Ge6q|SMQEOXce>k=1nH9U}d?uKia!dzHdok?O;thBHjVJ#n zPkvN%7lMRPx(TM_mpijUB%;k5a9vrH@|IJb6(^2TVVXr;#GZNb_MAz3Ig6Ks&y)6Q z#F`v=u-ofuJSFrX$wQ_I1>M+NuQJ?vk#z_Vog-97ky4jp8maRz-A!lLS%~AVf6=Va z@l)`qm^-ujCz$hN^@<$Nz`|}x%|$xgDvA)|^-eoF40Bi(j9f1j^;*BI4RCV*6GUa1DxGp##}C^zRp_$k=C*Vqq_ zzNo%A$Xy^Oi4VDYscTP}Y~JcRN+rYN2rzW>`Y%%F_kWHu)r-N%|Jmk#U8_I)7eBg3 zk1jnE?`d9nS@-U_m3;{qc?Wsn9F=!Kqt0&%-36Lgi0%r$!y?ss>FB25e>bQqISYvC$sE+O(gT{fG-K3gOU9a= z1x<15-G32|mM}?TB!?a;lCdYu9;#FYK91!2kVYii$)t|16qX{3o!FYEizKR?NQEs> zx3YL42UbrYpp#j1-3*&Wf4YbQ&FjsNtJ7iSG(l~OoDZ+oN;EynTM0lk z?otmz)GAI{UaiXJ<0Uvxw9EAn%=3!-s1VVC#)Xh@wxpRj&`9N)r}I5A#WKTwrfj<_ zuw1(wFy7OoTCFuUz_`RU=iBIql_d5w8fOJps@WY8?>%h0FdZLLe^n_=_2_QTfU!|e zS8(G8KnG|H^(Jw!182;|(;%R6{ZJhESV+P7OXo-s^TouWAoeHv(i~IUl0K7qXr>|^ zjG2!SjwtM%OhW3~W3E<=$i(q-lLDuWDsia_kwqfcel=T*l*Ld9bM%beA2NKGHOk2g zmUYY0d^1&;_P4i>f1lpB`-~Q247$=e>aM@OL`X{u!l1DhGPrz( z*8UwK{ySQ|yF7LljMBe*_wmysP~Kuu>c5%)vmf16eh|Ff{aW3Mx;2~q(FQN>XaouP zaUIHNVz1W)>cf}ETvmH20EJtlQHguN=%dx)ehcfi9_^?D9ntL11eBdrMk?M>d9v3J_vUNiIxUQ{tT+CC)ZZ><=mBKAg5BlGj-aY)r@6Fr? zDP2UM=H>R2uO1~Lw~TkzWMg=%M3U!DH9@gAL>1B4%5;>9q>2{vbXng*F;|oJ9you~ zJ@T_q;K|=o!0r-|qWJq87_bCZUG*eENmRhnfu8lNTy6+xld6GcgrRQIQth+=eAutt zuOv|TD+vrGm)e%;djtn_P}dlYzo?voau^N@+ktTKbGX6BRx1Il<~B@SeKaS>d^=5L zG#y?2>nvxbD~t|@dOM@xY|ei;GDm+lWP{0iNa#$A+?a7HICZ27l2R2rvJ;lM5)aNq z$3m(uQ#ml#yzW$TKXkXPU zx*V9fbK7;V8_HVsU^`RGwKLK2(G;#c@m_y$e5$9-M4oiVwAkdsRJy9#|(Ju zASiJKfem8|SecBh)f&fvYgdH?-p8n8|2+(6FL|s7L-m;qReIXTFJ9y}A;oDPry5e7 z_-caKjJc4!4dWZp;c1=7d^vxPXGnZXj=htt=MyG@m-uviF#w$ZfB_ld+cG5*egTi> zq)=RY7y`hTP#jLTZNO1@^WNsE%RC)lM}71G{%OXJd<0RUqm{CB;^W=G?_y|k_M zu}5!V;2QNxj$(K*&a7OXNA?sfWsYxqSF$wu(EQn#>OcW%x8L8t4JUt==fSjWq3GwG zia*nv;fZjwu`5K!cV#P{}sOh%^2N ziO7MNoiC1sW>&j(7Fn1tCe~P-8Px?Gk9HkwXWr$^9;Myx2;*jbUOlT1r3oIB0?#vy zJ6-IkG~&&CE_9c^PAPw$`C#98;LFj0adJ)$Tyt(zfhM?St{TyLfAji(FS|Jnp;mg<0kKcbhQ}>bV?O4-WM(y+$K~j{m zcrSl?vk#>cvdk;dzlBNL--0IX*Dvqi{e112u+Jun3sUV{N+EM;WxsaE@(wcp2vJl2 z!MM&=tp4iVt=v6+{~ACdWI=~0p!=OD#5YmOP&y$q_|FhIe0(=Dx8$Vr-CrSO={$bB z^A6*zM$msm{p(lXW@02lCh7a_$Llw~UibUs{bK-$koo%8Z?~bMw6;{>BtFt@MtMQP zq_V9RAfaaJ*3OF9d+6x+Th8GSt1rXE%B&?mkEN$11b`nEETgI*S+3lq?C{w+Ep{4K ztw>5=`1o4iTA&7A5=qP&E3I;Ua~(+NmtaDF4%Q%!{q4jlb=$?iOJiW=;uH>|0Rsjb@V2X zj&A{pHM37FI^6gvRki5aw|&(zKZi-p-$JPT^w$t7Kfei}@?r`&`swE7Z%Gg}-RQO0 zUp;@tRJ)f?D9ux{KKv8~su35}eM_|?zKQv(B>JrDo!shx&8oddFW!Ewr|0dK26|!B zcjKMDYCT#A58RgWtSpD^_Bw<_wC4UQKLwBol8|2lNCZj9-vdZ^C*)a=56;dhx(TMH zy8u@)k*n2%`205Q-6YuN8_Xs0y%4DrmuG*M##1vc()-3k+RQkvcG*A{lNn&0UrXiA5o!t>a z$ew$8xDlHFK{wTslgym&4B zEy*(STH?F$&1fN{6)8%2@_At`oPv&^vT^Mlf{IdIH^L`%W9|fgmGtg5wfhgMI(6@l zzw+=lp8%TOpdzTYJavn&Ae+2SoseSch$b)Dg7Vi%Z(+|$J%_`g#dEVix#@rYLyJ%E z&i7J2c(&_$j}qCBULf99s*o2%%zzTSm6xKqa>~t&NJGfA5NZI(>iN5;Klna21J7 zbuON{t!7kp1UMzX{9nua3cVDY<>wl`=xjCa7#ZwAUQTXo)+*&ybqt?*{uZZV$@1cg zaa;T9V(HZdO^Z&ns6~rTw5E#O-hW@7pi6UXsLkM`g)dL=MO-GNlMVfMJN4#o3%bf^ zCgkx-S2o=o%qzLk6gGc!orEuzusjnwIyvL^fczPe?i>T{Az>Pnxi5j6Gava- zqx<`a$YxVq0X`<+G(&H|Ifgd4n{WZx{8h@3!c9%lC4) zQ+#2M6~dRWVJ+y3IhMnCcS)a0ru&ZQ6oK^WLMIaWw;_M=$`U&=EqPV0?N(nkFa7V! z4-IX3rkTz3%f?eI?2$+#dF~%{)H*2_P(>Ym|ao)Rf-G1i!&-Me?e=Zm*&lbHnXQ0+?rM(Fz^e^RU z?TTh-zoma$Y~vZsaSMYEuv288`IV(KO~0WmbZFc*-9*amE`Cv+J%z5l6>VYfB2SN3 zx&@gNKdy##xBm$4D#o&6n2entg~;-i5lQ_+MgCGv zIac+j*O7N@c7LkLJ$&EE^#g<6-&boLB3|W^=#hVqIgDzgMUI&@oj}j<+FrQW2o3C1 zv5yu+n6P09GKT{9B~9xF~|sH$da(G3cCCo<-I(Ya`@hwR-z zw;Z{oa#3Ho*JGY;;ahfY?OSx6Vuv(zmv&5OaT$$wgq!YZI3yd23YBKePcI z2!MZw0q0O_#Mo&_(2H!VpY5n787+#_PZj;c30Zr{#VHsXa+?Tg`B7F!`e@(sR_%2h z^3PH?4%?$w$dt$}wdW5|9kb@CI2xy`hLRJ&+6*-l+q%(igj`zWRW;GnLM}!d*w7Hj zxAdBL#jHHNeXlW4J^xk-K2US7Mj6)!iPe7!N_T{2O16s91|prGE1kzKqxX29wQm*A zBgv_Ms5E}BQ6Jy_b7Aj>Yr#*Ko-NOpq%Epj&)TTd?Iz3CohS?Afv@z?;i}~JqY1t3 zkngG)^D&jpN7$0ar8KiTOHK(QEj+!joJ-Xg%DqRkM;?lB&~OC`wGI(UGmbYaXus{a*KC zi|QoWcA>XUtX;NGtF56Xdgvv&d&X^-qyI9UWm~`cs%_x0`f5>i5Lf3o1QDv#uR!0P zerVKU?deP@UWxotEP7kRmcQjE>V$vwUU*blUJO#!&h&V)ClhVY@fh;Y$9pU%jti;z zBeqqW*Cbgq^`wEGN~AlRd)hfR0FUh~TaIm5TMU-oO=-0Px7OEPzL+G7VG@{F3>RHa z3AC1EE{AJ77rj-8Q!32@#bs}C<6?XseoCe{Q@Truj%w$VG!7y%=Hf!8EWtqof2MFL~+BZToW6{L93pUIAPlccJz&cWB4t@-Qf zmw%oe5K)K}Pttiqlj1#*cv333gDH_8BRnfF@tiUtXi`E766FQHG!lAXu`Lc#6q0R9 z{TP5uQEpOvK_rKR(+6@GL2^Xj5*tRJw6>V(~fLlXQzF zF^Uf#Md3Cwp)mlDZH)5&n%I^9d_OIej8S#^Ki0Jke>R4jxg|8C?p&upCmXm zQhbwTXadqioUj9)fi;qqDV~8ef-Ng_QDp}#9%(=#;VAhu)q$O&7)qX}N0be@%YVoV zlE65QpoV^RfuXxM?TQkLYN-mBDh)@KQ$aWqevQRr(}TfpNc{LrWP+k#z1aru&NftX zQKl(#;h+pn4gD4lOObGt z{BRt-1Q#h)k4Fbe#Q1?ql$&m;N`IsW9I@n&7<@{y9E(R4^JwN_0WWyLcBLp4ij?E3 zmcXeir1%~QN68PSR~FDxC{$jD6EREhESDm|F;t0hOOvxa3l#kijS_}3L!U~btKJWa zA{96^iT7wigknt1Y%OpY**y7ido?aD+masM<#57Ke^-@E8e4%Ily6O+cA=$#H0; zIN=GJ5D|srWLH=`G8Z~H(wz=LMXuW|rX7#p0s!!h^F!KneTi^0_$`z912cb_SJsOp+siV( zi*kW1Jr$*tY;lZc878biWyHn7W)H9ON@mS2aZr^ znIqvSd9lNxuijws$mCq*SUl3eH4=`L$HKXg8EzDyS-4p-O;BZq8!e8Os4T!$p)m6X zDJuP_o-Zv?VD$WdMo@n>E+guZCfMkr!*VdDVlz&Z&O{fJ*hnKI1#WqRCP=2^An}aM z8Rpe`Mp5sWGpdL2C<5vYIqK!29{QrHF|P1&>q<0S2RI^QsOdh`N|Ef4VyB`T3MMC< zNGYDt1nj_t*K^+RIMOimCnhiz3iY0lqXHF9cQ`aoyqe2KQJxbcn&rh2$=Q}DM{OvA zqee`IQnZsj1s)MSNfbUq;d3gp616I0W(n>hlWYYdf7eb2!H|R}0=4u?(wOP8jZxj? ziG(w?W6H#Kdm$cCyo{L1Bs|sBwa%PbHI7Ma>kn z3lEn&wI?~nGjQ+hrZxtE7)0Sng|0*B!o$t{NrCF@r6~UX28AzCuTU)>RY>XywXE0T zF$VA>e@TZSWJAHdRp_u##;^lbGZ%`1&+dO&;^9_kE4Ir7OYn>cN8v~~P9CR@72hM_ zIC<2n8ik&S3Ukf5U2YR0!;{Idwoi@MDHs%!heqsMb)B zM~Z@T92%Dvl*=}7l)ONJqwFzFILC3&Zzm_Vp)L_}jPordK`ofkc&>8XmbaFJdK!`u zzAIBA(XU`MHATXaDJzk1oP5b+EFNV5=c^4IHv}yChM@*FCEtn^M_^mz;9gUnP;c!k ze}Y;1p=ewd?0_TV!-$rH$(CJF@hNI)L^fOHA;@ZW0f20V&Wq6Au6O|Y^qOxO z3`}hufJ1QCAYfUDc9OMe$IX{Bt+@U9VwFQbB)#ED|? z#u2Yrkm24%2Mv>sCW|;NWjBL3f450Px9``g#tEvr8+dQNA+ktmQAE#roHUA3rYYT1 zI2OU3V=kJI_$(G9atz*H`SP^qV5(!fmWT)Ii;ZqiOd%>;gJ35!=YZE*0_-qVG6mXDan;C66272 zu5W>7k8)&r9PJ5DQWVvV^;FWq+NA}8q_G%;WeW^-6q1q* z^mHmHCf7Bi%63E#!*z;Fe?)ZdYo5uzgE>o6ZGkNFbnDGd&U?^!fGj;OC)#brqmk2j z{%Hd(m?CLX#xxoa(9hF43mrh5Qc82Sg6=I-69ttb}e!nc`VZ$`)Ho z1&yYHktI$QuP<7#Yj~rW=z$5;z-gnf=H?4Sydx}`3%}N;%Md?7Eu(TN#KsbZhO(X?VU>SL7*5qiG(2X5qvgO)p2q(H4 zbTMW|&^oE|5k;^wZ&5CC7gjlnxn^mSnh}@JQy3}WiU(82u$4ZT3_LJv&m~(zf7T177Gd?VS`ggnJS_;iIYd8)hK55V75HA;TIq*0N+?XnRHf6*EnE$=p1A91pN*E#FNr7aG1 zLbl0~cDRmxEbZ4CMtZ?J^^pbdb~2^N0zWaYCOA?H(V34$eY^6JqNl?c>xHiDzTU&k ze(!{2vDUgdeRlLAGim`n$(#kxzI0wTkeiw{;N`65R71P0IXk|N>x?dYm1^SS#O5>u zxUyMke@}-tr|aj|W>sIOiTJxXb0d|OH2*j|wvI+nch=R7lWmYp`g_Bkn5T>CYK0kL z^fg(qV)J)rT{S;n(^Znb?X@cwqBrR3c{k5JyI$Yu|G75Jh99z7wwfMQtJUyqKy8kq z>h!F7MWFi}^MvC1rv2`|W5nfgfW+H_!2C-^e+4TI)k&y#?z>{^c0Y?}R-0wzuGPAu zl+c{=#4P619xtb|TALO?cqb__Cn6R5%KZhFjH;WV0;rmMFe06GUY)FVs75^jgpJ%z zC-)U(1Inf5^>Dz4qzY2iaYwC$>2PnK(qjF-`Q+FLCzJyA4O8l#@z$iISEn(h7$)|b ze^U01o2E2QX-03VO$ANxdP9nM?>DlZ7xx%7 z%!_(Ef_dRU6H#YO9PHBm*C}mZcQ&WXUga_RJG41vKeslgs^d59+g@1l3N_E$an1?9 zu5R=S#jJ`Xp@&z{eXS<#+n1QzI=3Waf9Y}k;dF=PNBfOqcXCeB*L`;G+3bqDr*uj! zU?@3f!NaG`Nxn|Wdb;&F)i9CjoH~1j;`Qw?=5&3!i#bhChdw9k=NxBMy<_Y-WnbU8 zWol2Q;xsW^Zr2~}pAYq)jxOh{W@+ZfNQ`!vb(%AO7Lf4sPV zLprbT+nnsSr1hs}bCN^j=_mwjDqU62PP8ZG)yBF?VOmsI(YO6{wR~DjSI4t$bfx@T zLRZbZ-ZxiVuRk@PTDP%jCQ1@*QQ<8q|VnWfoHcxA^;HBx+R3`L%7mrM7dbYwT zMgP_~q3Yc#%WOl_^(Xs}YkTZ!e}rs_Jzd{6*cbC_eLXGjcGpw$Y8*W!zj4Yy%fIsW z^n9zWeWh71&-hh;_FD5aEf%mM&lvC+p3f+Hwvjniuf{SX8YY^X5oh6x*0+_<$oe$* z8OibOegQPS+y9Jk5Jym5jA>=R+K)btU?djW5sVG^*4D9N2uCoX>hB1~f1&{$!C0Jy zFWMlEU@YtH2*#5Bj$lGFj3XEe`#J)10cO3>YyJ83+0hfss0DNda~3>X?!0WE*Xpwd zynMr)>g@iG&_+C`UFM{HoyDB4kGGi9e5E3ICDqrd3cgeUvf1ic0;(~ADBXiC<5C&DF7gYJNjFeiyslp25+uHCUf`^5yi|(f!Y;1$6Lp7W{nKoT{(KIHSwlxAn}a z`goc-%>YMm%t|+_e<(tV`gxvNRlmufIa%MxZpvE(3*}(qp_}%y!H~fk(4=Q{2DRev z!j_GMiNAoxg1FCRGv0o0*+jsQ3TROV@MOzYf&@)aTi#I;(2%cN<#b2Pb@5SqDBRcj z8_$CvmxTkmQ-Co)j{`8^>i}RT*lU-QkAnfo2Z*sCRy-H2e*#SSxh#kQZx;eE;_W^v zFXR<`^)x~{g`Td*tCzlzXMi*i^YcnQtuWzEUy}tZHgEUXQ}gmtJtfcBqc0Xz%j>Pz zK+E5k^z?k)hTt^Xiz4bC*7zHzzly^D^jMH~KsN)k=jk<|MglG70h;r2UN9S>&!B}_ z^>tQYD;_gre=r+9juK`eKtu;Ld|#Sl`)y9DQ+5BcRXi z>w5auftZ(5>udQmzP^&DKktiq*9^gw5`2AE{p(sc`YIu7$3WThkq*>+`-Y*OS0x!} z`T4JrnzyGKD*DzuNmKGdljw8{(XHu2@2jtU?>Ymje+R6oP>X&w7;440W+UvdKc(0L zgeG89o-i`V-jYj|mydrpj{-056*9FaT zGa44yMkf@94DY;kDQ9`C`>w*1;E+FXDgx@ep1&n!=0pMo~{XP4%m zZO(5pvbE=X=gvuJ{dS|;)cSfOt+w4VP6G<+f0!nun#gjYLZ(zcSbj$-UHiZf+Zmp#kR=EOGrg?A5Ks`7C*j|lsq!N3#j`VQftN!{!XT&XF3A1 zQBgvR=noQ$4Ra3-Ck-gJa~3Q)vl`#4e;vzlpb{`-8Y+AA;6o|T(auQDD}EZPg&FA# ze^pt);`5DfhI&3DrlFQ+C^VGwTNE`?^9y8#a=yc^XR1G@>H7ETUq=rxPzmS(hRUAJ zb|mN3Y=>$-m3XM-KSm!ax>S|J;Fy`}`yose5Ex}U7>uhj10l1#x;6*|j8gVULFgwK zsw620D7QdZ48+wT$|CY52g0%|5by{pf4=P&R2#ObuGW;ppe%>sM0J0~B2W+VKsZTa z7*1pnFS8s}8@IKeh2X%Ic1Mam1!aB*)1#?^;htdidk0Njp+n^@DO3G#am*={q>I10 zwp8R%vDbr*L}92%isZXU6{LVA57Z#`B+K;pD0*f*5zjCN=NPzl^`Pp!FE_nee-i!o z%-o3mg{H&S*%JE5R3`w46eNKmm8maGE`u4+FPVenULr+P8UyjnT*{*vG=bp{R4P!t zVG^2V6=~l`3R8V@QQrX#ASwNc!2E$mYj3S!{*|e%DHfnG<69Vz6Hs^Y2P=BjSN}az zea$Zqv1EDir2==N-NVVTd z1}k0$lLU?0v=;`|7wGfpAF2a_=TgK}>5gaD>nH}l&2$<=)I(05`Hj$_=S%N!>Pa=2 z%=!<(`V$FAF%zN^J$g`kJ?6Kes@cl4V{{ncI_M5GkA8|OyrL;cqy%U@ogLC$bIIEh zmJ=yqQ8}kD6sQN}yjP()f1jzt`iHj($<4Oxw};)G5b0IYt-B5J9OD|z_zBLq6}$Sq z*6*y?-wLHXsMC|xSdm<6-x?QijW6T{S+X_b8qI)|Y?|-Mdp$Z9Ww8u~R|Hdf9(d8^BXf8Pk3=h z5kM4))hipTWM0ZHQM`$tYb2ofAT6UUTsX3t$kmSCzPF;u zvafHc?+C(<2IEbAe-!C*!F-g&1_&gD_a&e1ze6xLG+nL~pQI@BuY;uolgM9*Ow?Y- zeUBih_AmQope|cvqxx)CuR!R~IJJrR4E=PXP|%7S;{likPJLRC{)=e<&;oI(V#>d4Zr+oVf`7 z@&Jp9z7V-%+?;3&Z3FN?74B5vfvHA*=Q-GZg}p{#A!!&*|DJ)ZNs>fa%Q)|~8%mz1 zM=*L*Znsn_Fh1V1G?A3U33&;}mI(oa6H#su#gdd+9_1!@rlUn7=Xmr`&Gvwd&==ju zMv~?dOtEiMf8b~#pLG`n#+wV%z+f{+UcG9JV8hRvS5;7W9pc2S#m3gWiiNI!70oS) zc7ru7FsXE)v^OnnNc?D?ZnPKT(SdFY6wj;cl%v&#t>a5ag|^5L)Tfe3s-m6LH5N>B zsM^)hFfgczuHooo4)lhh#PluM(6kEMVDVZ@GO;6; zh{f+IN#2^JTEks;KnlEj{Cax}O4c1HyuTw%J~KtlHlk+P;{u`M@?c0%lkTCRt-4PF zHtaqLf7Z7998mM_Q=l!p&w@7cmIZ3(Jv6wf_gT=^e#c&P1KjKW9WZjJ4vbl%9hT!v*#ROxZOF&xyj2|B$6Os=)Kr$|oEI%{o z^ua?WM6n0X@gO@3=)6RY0~D)8G7iG!X$eare{c@bLhGGU=YdBax36<>=yfd2;=UqM z`ddA4&|mXfNN|a+DNbpxxpo@T!KURo8UDc0`K&2E=Fs`<@-T*3o>Gic`p&nIYW+Pw ze{BJS0L?p@`nBwJf*^g>1gaqKw*IFcEy6mVW{rnABdLDNACZGu*xO8zc2 zi!xjlIFZLFHt*b{&Mw7pYSA+*_R;Bxe=j%Xn2whAV%Y8C4R`*vC;ul;epK`qf`m}I z4yNRnJF`M0qRks{U0Ia!mQ%eICyr8Knnhg1o_X^2oJn^%ipV*$H4Aeh>DB5^8Xon;H_}VZ zf*_rUYDJK5sfe9_6E+*gPBVs%8oBp?pqB9I8Z$c;V0TaXH{lS&$q=K+5Q9gfQSuPE zDS3$el-xTU3w3pB3~-4%R?MXK}C(M`c`P*wC^89Hiof8Ql~N_e!f zqIy*nrhb;(Z|b1TeFyK2LgYX3+)DB4WfW4$9MrPb1Dd!rW7KFzrkb4v&2j79e-VzB zFiB!0haM@C@kp3GRH+Jl9Le>Wi%7PUNgZ7&EJYT3u`^E>NmMzJ3R|FVW${7|te!wX zC$r|d88(Y_5e1spn;+Nhf7# zR%P?)5?m(Q<$4I_X~lh1i0DA$LP$7U(o7s^q;k#E`JS0#Sztd?w%Zq2u3ZioKhmUH ztu-~kIL9^T+vtatB=$5KX9ZWP*&Pu-df0YhIzFbVQkd$|-<|`(NiId!%peIgCeOhq~vGan-yQP?Y% zgw(ai+>{uRnd9YV1=@`&aj6QCMIzUJHQR`k#ZU=z^o-pfGJcme$;k_r^~=(HGgp}Q zx3>?U-go z-Thi!a=J5{{m}-`?q~!F_;DS|XlAe18S4F)hg?>BDFB6Aqfv>w$LOQg;eHG2wjS)L z1JUeH1eBdrMk?M@{sqr5-9wY1cs7JZOimMf`d7z zYYe7eRL(&;3kX&5nv-L`wNn{QM_2#4$XV+Oqr;)W&S*HB z^B<1PiGK~*V6q+(Iuj!|W}FI69jSt(RK08YP*@plkS*yJ6i^)wsoP?#ebe9;LzjC0(hB*DiR6ViaPdxgyHO+jrCxtK9iwJPxtu6v)m@6XyqO?u zX@5LJ;!|=QoMgS6FbTZGr{{|S;QR*+$Ozw+DUt9Ccr+)4;=02S0KSCcaJnT2-B(xg z4TDWul$<@mF2$;l_7Ynfm*xxr7z^RQ>oqoe8b8`g+d3Zy^bQ8DQLp4E#uwu(%H?@v zPr*{=__lW?OOp@HpF^o06rgtV{r%f;Vt;uaOv@IEe%@KmQmR}2hS@(z!ORlP(cWjY zKI3s(Ce)mEyH~zq6BnmBY3Sydve6ARXsH90YAwOenIh52G; zjm3#kUBK~R*TMGYT~6#t+RdIYZr113v-(h);4vxiJj1xv#g0lN-puDhcj=py;(wX< z_KgR=932>~b8_ICbE670!8LQ$s0Lq{zuP5hG;y_?oR*Mwmy_a!%rwxmkdNA}HGxs5 zQ7(|HmtfJM>_~-9;(JY^s*IxulQOe>+`O}-nP&y&Yyb}Fen6&*Z zXwrWD^8Ve=SDp#`Y@)az)xM(?GM85NYkw^7AoGt9HT55i>ukm9uio9t?Zfx40VF~e zbch1F+lxYc6QvBL6EcJU43Wc!cN23`1JbNTB8P@6G_avB&~9FeHBRPmtaDF4oAXfekAL)NU*x`;0dp}&VDX(0yOes@L&jrjhnCUdCqJc(6O*?$(a(W& z{!194tLRN29p3^H8)lzabh!3Ys%p`dZ~Llceh!nGzlBiw>8~MFetr``<;4_m^wagr z-;y9|y3uQ~zj}(PZqJxdnx|xQ_$dlhBhIS(mTFIY8}nC5^nXd$JGs@NlvR6;UcCL< zK+oGPjr78%@7gLd-Bk#QXv6(gehMHFBq6^9kO-2HzXy=;PRKJ$ z=*c-n*TK|u7vL(U!VF=G=CU!hzM*DQ<;~#yi>^)wyeKA8A^(#V#r$j~N-b;F`g$sT zWCt23i=QMh?|+HX5Xq{JZU`Y{kG*WKkUKm4M#z5co^TyXOAq9A@ug=*@d(ws{&`A{ zD%GO0xAsc0aB_K`9)qdf?8R`KfR|jD#!!iUkVMrBg@WjXY+{w-uR^jPLI;QG$CQQ3 z_-`@^;^->1J1XtzfA#YCAe#4fFJuz*ao>v~l!K{PbAJX;;=iaYSRC$YQx(PuJ_M@O z-zX1MUf;ys=iATC60~4qL7n_JF+KypQz~fsPN&AsuGkr>%`DrQ6kU_#k5U!UMw%ti zoGNuD=yOVTV3x!IoDbpN5n=Qn42DIYxBb-hJ}JWk#p-BqfPXGF|-T#My5P`Udb-+sHl z`8E2_&)@#r)k+n~e^7ofYnZQAe>4Z_SKrw9Ab&F5@|Nv%tYWZ*=FP|58@ zTa)6a-}|DU+V4AJ#<2^oBC)NGMKhO`OsY-*=j2!aYjszlS7N*RT%#{~J575=Mq5x< zv-?`LN_knG!e*Yo#i>}bytriC*1oz}27h%y)1sCZb!bsbYpTf2-S<@sU71rOZ3Z7K zeAU7);xZw|%WK|JYo6h z-8s4%?l-m2)b5eKj74hiB---6!GF`f(=qr`gMSy&+#b4mX2pJW66s1;+hNsZwY&Aw zo;^8{sV#YGo5|7IT~z|BT3{uq9%u2D_L|&I->fRd6*spt>*`;fm2zqF-P4~vV5ED* z%0*`jaIg3}JA-X0cD4sMZZw|m7dzCh`sIeGQJWc>F^;R}1J48DX7>w>^^Y) z=LJLM*`hB_8K`wTX>WoF{Y%*{!_W-vw{(kbJcHS7Kk5K`MfRCrT1wOO8_ICm%?cyeJ2C` z4e+bU&GUviQEPG?k<>p_TFOHe?Cgm&f9M#1=*Q4EGh^VP;#7vJ zYNjswK>_bY#++aD?td8@AO{zattRd$yr^$n8!+2T*jBwu+b;S}u}2!aOFJgCxQxbo z!cF%y9+HhkMgz9F;TkxmAKQQq1i-_9Q>ZmztQ`^zBHQX`JE}=0i=zFhqJLSqiIrR^f#_u)irU zV%514hyL0q2V5Jad_zXE;R{?Mq!+S8d*ycGGlSoEfbt$xc}>WuY4cvM-w7^SS6 z>FH$8CfXk3Fy^05cUaAw7E$vjY^yG>NwR3_Nds+5q(7V6ZX6ncr#2R?ruHjcjF#Tb zX>|g(&O_H1<0XJ|0-XhdtMN+1i|MKGIkCZH`D%Ll{bDqqJ*Rh?>|Twg%P;KU5;*es YW{x-gY{nh!Pd%GGdMIRF3v diff --git a/docs/stable/searchindex.js b/docs/stable/searchindex.js index da53fee741fe..b0b1a585151a 100644 --- a/docs/stable/searchindex.js +++ b/docs/stable/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/models","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/models.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[49,1,1,""],iinfo:[49,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.models":{alexnet:[46,4,1,""],densenet121:[46,4,1,""],densenet161:[46,4,1,""],densenet169:[46,4,1,""],densenet201:[46,4,1,""],googlenet:[46,4,1,""],inception_v3:[46,4,1,""],mnasnet0_5:[46,4,1,""],mnasnet0_75:[46,4,1,""],mnasnet1_0:[46,4,1,""],mnasnet1_3:[46,4,1,""],mobilenet_v2:[46,4,1,""],resnet101:[46,4,1,""],resnet152:[46,4,1,""],resnet18:[46,4,1,""],resnet34:[46,4,1,""],resnet50:[46,4,1,""],resnext101_32x8d:[46,4,1,""],resnext50_32x4d:[46,4,1,""],shufflenet_v2_x0_5:[46,4,1,""],shufflenet_v2_x1_0:[46,4,1,""],shufflenet_v2_x1_5:[46,4,1,""],shufflenet_v2_x2_0:[46,4,1,""],squeezenet1_0:[46,4,1,""],squeezenet1_1:[46,4,1,""],vgg11:[46,4,1,""],vgg11_bn:[46,4,1,""],vgg13:[46,4,1,""],vgg13_bn:[46,4,1,""],vgg16:[46,4,1,""],vgg16_bn:[46,4,1,""],vgg19:[46,4,1,""],vgg19_bn:[46,4,1,""],wide_resnet101_2:[46,4,1,""],wide_resnet50_2:[46,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[46,4,1,""],keypointrcnn_resnet50_fpn:[46,4,1,""],maskrcnn_resnet50_fpn:[46,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[46,4,1,""],deeplabv3_resnet50:[46,4,1,""],fcn_resnet101:[46,4,1,""],fcn_resnet50:[46,4,1,""]},"torchvision.transforms":{CenterCrop:[47,1,1,""],ColorJitter:[47,1,1,""],Compose:[47,1,1,""],FiveCrop:[47,1,1,""],Grayscale:[47,1,1,""],Lambda:[47,1,1,""],LinearTransformation:[47,1,1,""],Normalize:[47,1,1,""],Pad:[47,1,1,""],RandomAffine:[47,1,1,""],RandomApply:[47,1,1,""],RandomChoice:[47,1,1,""],RandomCrop:[47,1,1,""],RandomErasing:[47,1,1,""],RandomGrayscale:[47,1,1,""],RandomHorizontalFlip:[47,1,1,""],RandomOrder:[47,1,1,""],RandomPerspective:[47,1,1,""],RandomResizedCrop:[47,1,1,""],RandomRotation:[47,1,1,""],RandomSizedCrop:[47,1,1,""],RandomVerticalFlip:[47,1,1,""],Resize:[47,1,1,""],Scale:[47,1,1,""],TenCrop:[47,1,1,""],ToPILImage:[47,1,1,""],ToTensor:[47,1,1,""],functional:[47,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[47,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[47,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[47,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[47,4,1,""],adjust_contrast:[47,4,1,""],adjust_gamma:[47,4,1,""],adjust_hue:[47,4,1,""],adjust_saturation:[47,4,1,""],affine:[47,4,1,""],crop:[47,4,1,""],erase:[47,4,1,""],five_crop:[47,4,1,""],hflip:[47,4,1,""],normalize:[47,4,1,""],pad:[47,4,1,""],perspective:[47,4,1,""],resize:[47,4,1,""],resized_crop:[47,4,1,""],rotate:[47,4,1,""],ten_crop:[47,4,1,""],to_grayscale:[47,4,1,""],to_pil_image:[47,4,1,""],to_tensor:[47,4,1,""],vflip:[47,4,1,""]},"torchvision.utils":{make_grid:[48,4,1,""],save_image:[48,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0949e":42,"10k":44,"10x7":22,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":46,"20l":22,"224x224":46,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":46,"32x8d":46,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":42,"4th":[26,44],"4us":1,"50k":44,"50x":46,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":46,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,47],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,47,49],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,46,47,49],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,46,47,48,49],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,46,47,48],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,47,48,49],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,46,47],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,47,48,49],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,49],"short":[19,22,23,26,39,40,42,43,47],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,46],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,46,47,48],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,47],Abs:36,And:[22,35,43,47],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,46,47],Going:46,Has:[22,23,43],Its:[22,37],NFS:14,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,46],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,47],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49],Then:[1,26,34,36,37,47],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,46],Use:[8,13,14,22,23,32,41,42,43,47],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,47],With:[13,15,22,23,28,36,37,41],__background__:46,__call__:47,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,47],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,47],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,47],abstransform:15,acc:46,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,49],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,46],accordingli:[42,44,46],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,46],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,47],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,46],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,47],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,46],adher:5,adjac:[22,43],adjust:[22,47],adjust_bright:47,adjust_contrast:47,adjust_gamma:47,adjust_hu:47,adjust_satur:47,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:46,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,47],affinetransform:15,aforement:32,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,47],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,47],aggreg:[22,23,46],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:46,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,46,47,48],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,47],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,47,48],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,47],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,47],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:47,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:46,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,47],applic:[8,14,15,22,25,27,28,42,47],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,46],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,46],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,47],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,47],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,48,49],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,47],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,46,47],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,47],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,47],assumpt:[22,47],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,49],audio:[41,44],aug_add_x:19,augment:47,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:46,aux_loss:46,auxiliari:[17,31,46],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,47],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,47],awai:23,awar:[4,46],axbc:22,axes:36,axi:[36,42,43,47],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,47],backbon:46,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:46,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:46,balnta:22,banana:46,bar:[4,19,20,46],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,47],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:46,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:46,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,46,47,48],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:46,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:46,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,46],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,46],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,47],belong:[3,8,14,15,28,37,47],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,47],ben:22,bench:46,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,46,47],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,47],bicycl:46,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,47],bin:[41,42,43],binari:[15,19,22,23,31,35,36,41,42,43,44,46],bincount:[33,42,43],bind:[7,8,36],bird:46,bit:[4,35,40,42,43,49],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:47,bla:27,black:47,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,46],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:46,bodi:19,boil:4,book:46,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,46,47,48],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,47],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47],bottl:46,bottleneck:[18,46],bottom:[1,23,47],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:46,box:46,bozkurt:6,bptt:30,br_flip:47,branch:[4,17,19,46],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,47],brightness_factor:47,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:46,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,47],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:46,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,46],cache_s:15,caffe2:[36,41],cake:46,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,46,48,49],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:46,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:46,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,46],categor:[4,23],categori:[15,43,44,46],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43],ceil_:42,ceil_mod:[22,23],cell:[22,46],center:[23,37,41,42,43,47],center_flip:47,centercrop:47,central:[31,47],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,47],chain_matmul:43,chaindataset:13,chair:46,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,46,47],channel:[5,13,22,23,24,36,41,44,46,47],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,47],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:46,clockwis:47,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:47,cnn:[22,25],coalesc:[8,38,42],coars:44,coco:[45,46],coco_instance_category_nam:46,coco_person_keypoint_nam:46,coco_util:46,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,47],colorjitt:47,colors_tensor:41,column:[1,22,23,24,42,43,47],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,47],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,47],complet:[4,8,14,21,25,33,43,47],complex:[4,22,32,43,47],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,47],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,46,47,48],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,46],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,46],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,47],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,46],construct_transform:15,constructor:[7,13,22,28,38,42,46,49],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,46,47],contrast_factor:47,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,47],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,47],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,47],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,47],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,47],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,47],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:46,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,47],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,47],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:46,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,49],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[46,47],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,46],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,46],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:46,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,47],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:47,darker:47,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,47],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,46,47],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:47,deadlock:[14,22],deal:[4,21,30,43,47],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43],decreasingli:22,deep:[4,5,18,22,24,37,46],deeper:46,deeplabv3_resnet101:46,deeplabv3_resnet50:46,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,47],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,47],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,46],degre:[15,22,43,47],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,46],dense_dim:[38,42,43],densenet121:46,densenet161:46,densenet169:46,densenet201:46,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,46],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,47],depth:[8,22,23,46,47],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,46],descript:[0,4,7,19,28,29,31,36,49],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,46],desir:[8,13,14,15,22,23,28,36,38,39,42,43,47],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,46,47],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,47],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,47],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,46],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,46],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:46,diningt:46,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,47],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,46],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,46,48],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:47,distortion_scal:47,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,48],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,47],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,46],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,47],done:[13,15,19,21,22,30,33,36,42,43,47],donut:46,dot:[22,42,43,47],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,46],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:46,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,47,49],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,46],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,47],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,46],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,49],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,47],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:46,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,47],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43],end_dim:[42,43],end_ev:8,endl:31,endocd:22,endpoint:47,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:47,enough:[19,21,25,29,35,37,43,47],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,46],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,47],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,46],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,49],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,47],equival:[3,13,15,19,22,23,36,40,42,43],eras:47,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,46],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,46],evalu:[2,15,22,23,25,29,37,43,44,46],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,46],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,46,47,48],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,46,47],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,47],expand_a:[29,36,42,43],expans:47,expect:[1,4,13,14,19,22,23,30,37,41,43,44,46,47],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,47],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,46,47,48],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:46,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,47],favour:43,fcn:46,fcn_resnet101:46,fcn_resnet50:46,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,46],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,48],filenam:[7,19,20,39,41,48],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,47],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:47,filter:[22,23,42,43,47],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,47],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,46],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,47],fisher:15,fit:[1,37,42,43],five_crop:47,fivecrop:47,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,47],flat:[36,43],flatten:[24,36,42,43,47],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,47],float16:[12,22,40,42,43,49],float32:[13,22,23,36,40,42,43,49],float64:[22,40,42,43,49],floatstorag:39,floattensor:[1,14,22,38,40,42,43,46,47],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,46,47,49],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,46],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,46],four:47,fourier:43,fp16:22,fp32:22,fpn:46,fps:41,frac:[15,22,23,24,37,42,43,47],frac_:42,fraction:[13,22,24,43,47],frame:[41,43,44],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:46,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:47,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,47],galleri:4,gamma:[22,37,43,47],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,46],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,49],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:46,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,47],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,47,48],glass:46,global:[3,13,14,15,19,31,32,41,43,46],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:46,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,46],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:47,grain:[14,25,47],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,47],great:4,greater:[2,22,23,25,36,43],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,46],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,46],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:46,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:46,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,46,47],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,47],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,46,48],hessian:24,heurist:[7,13],hflip:47,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,47],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:47,horizontal:47,hors:46,host:[13,14,22,28,39,42],hot:[15,23,46],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,46,47],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:47,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,46,47],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:47,hue_factor:47,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:46,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,46,48],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,46],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,47],img_batch:41,img_height:47,img_hwc:41,img_tensor:41,img_width:47,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,46],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,46],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,47],inception_v3:46,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,46],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,47],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43],individu:[4,5,13,19,22,31,33,42,43],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,46],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,46,47],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,46],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,47],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,46,47],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,47],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,49],int32:[22,40,42,43,49],int64:[22,23,28,36,40,42,43,49],int64tensor:46,int8:[40,42,43,49],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,47,49],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,47],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,46],internet:[4,44],interop:43,interoper:27,interpol:[22,43,47],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,interv:[15,43,47],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,47],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,46],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:46,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,46,47],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:47,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,47],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22,23],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:46,keypoint:45,keypointrcnn_resnet50_fpn:46,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:46,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:46,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,47],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,46,47,48],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,46],label_img:41,lambd:[22,23,37,42,47],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,47],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:46,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,46,47],larger:[1,5,22,23,30,31,41,42,43,46,47],largest:[19,23,42,43,49],last:[1,3,13,19,22,23,25,37,43,46,47],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,47],layer:[14,23,24,25,29,30,37,46],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,46],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,46],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,47],left_ankl:46,left_ear:46,left_elbow:46,left_ey:46,left_hip:46,left_kne:46,left_should:46,left_wrist:46,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,47],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,46],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,46],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],lie:[22,23,41],lies:44,lifetim:4,light:[41,46],lighter:47,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,47],likelihood:[15,22,23],limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,46],linearfunct:29,linearli:[22,23,30],lineartransform:47,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,46],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,46],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,46,47],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,47],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,46],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,47],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,47],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:47,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,47,48],make_grid:[41,48],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:46,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,47],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,47],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,47,48,49],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,47],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,46,47],mean_vector:47,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,46],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:46,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43],metadata_head:41,meter:46,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,46],metric:[8,37,41],michael:6,microwav:46,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,47,48,49],min_indic:43,min_lr:37,min_siz:46,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,46,48],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,46],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,47],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:46,mnasnet0_75:46,mnasnet1_0:46,mnasnet1_3:46,mnist:[41,45],mnist_train:41,mnt:14,mobil:46,mobilenet_v2:46,mobilenetv2:46,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,46,47],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,47],model_dir:20,model_zoo:[18,46],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,46,47],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,46,47],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:46,motorcycl:46,mountain:44,mous:46,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,47],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,47],multipli:[22,23,38,43,46,47],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,47],mutabl:19,mutat:[19,42,47],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:47,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,49],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:47,ndarrai:[36,42,43,47],ndim:42,ndimens:42,ne_:42,nearest:[22,23,47],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,47],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,47],neural:[4,19,22,24,28,37,46],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,47],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,48],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,46,47,48],normal_:[24,28,42,43],normalized_shap:[22,23],nose:46,notabl:47,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,47],notebook:[4,48],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:48,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,46],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:46,num_lay:[22,36],num_lin:44,num_output_channel:47,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,49],numpi:[13,26,30,35,36,41,42,43,44,47,49],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,47,49],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,46],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,49],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,46],offlin:[19,47],offset:[22,23,42,43,44,47],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,47],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,46,47],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,47],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:47,ops:[1,14,19,27,28,29,36,42,43],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,48],optional_unwrap:19,orang:46,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,46,47],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,46,47],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,47],orign:47,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,46,47,48],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,46],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,47],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,46],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,46,47],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23],output_tensor_list:14,outsid:[5,13,19,23,28,47],oven:46,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,47,48],overal:[5,14,25,32,47],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,47,48],pad_if_need:47,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:48,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,47],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,46],parallel:[0,13,14,22,23,27,28,33,35,47],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,46],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:46,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,46],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,46,47],past:[14,30,46],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,46,47],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,47],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:47,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:46,photo:44,phototour:45,php:44,phy:43,pic:47,pick:47,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,47],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:47,pivot:[42,43],pixel:[22,23,44,47,48],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:46,pizza:46,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,47],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:46,platform:[7,33,43,46],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:47,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,49],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33],pop:[8,22],popul:[1,15,42],popular:45,popularli:47,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,47,49],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,46,47],postprocess:46,pot:46,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:46,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,47],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,46],pradhan:6,pre:[1,17,22,36,37,42,44,46],preced:27,precis:[1,7,15,22,36,41,43,46],precision_matrix:15,precompil:31,predict:[22,41,46],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,46],presenc:5,present:[5,14,20,21,22,25,40,43,44,46],preserv:[13,19,22,23,24,28,42,47],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,46],pretrained_backbon:46,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,47],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,46],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,47],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,46],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,49],proport:[22,47],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,47],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,46,49],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:46,rais:[1,4,15,19,21,25,28,42,43,47],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,46],rand_lik:43,randint:[22,23,38,41,42,43,47],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,46,47],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:47,randomappli:47,randomchoic:47,randomcrop:[44,47],randomeras:47,randomgrayscal:47,randomhorizontalflip:47,randomli:[1,13,22,23,31,44,47],randomord:47,randomperspect:47,randomresizedcrop:47,randomrot:47,randomsampl:13,randomsizedcrop:47,randomverticalflip:47,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,46,47,48],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46],rather:[1,3,7,19,23,26,36,41,42,43,48],ratio:[15,22,47],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43],readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,47],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,46],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,46],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:47,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,46],referenc:[19,25,43],reflect:[19,22,23,30,42,43,47],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:46,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,47],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,46],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,46],remov:[1,5,14,19,22,23,42,43],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,47],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,46],repo_nam:17,repo_own:17,report:[1,2,5,28,46],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,49],represent:[19,22,36,38,42,49],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,46],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:47,rescal:[22,23,47],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,47],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,46],resili:37,resiz:[22,23,39,42,43,46,47],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:47,resnet101:46,resnet152:46,resnet18:[17,19,20,25,46],resnet34:46,resnet50:[17,41,46],resnet:[17,19,36,41],resnext101_32x8d:46,resnext50_32x4d:46,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,47],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,46,47],result_avg:47,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:46,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,47],reveal:38,revers:[15,19,22,25,42,43,47],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,46,47],rgba:47,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,47],right_ankl:46,right_ear:46,right_elbow:46,right_ey:46,right_hip:46,right_kne:46,right_should:46,right_wrist:46,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,47],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,48],row_limit:1,rpn:46,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:46,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,46,47,48],sampl:[13,15,22,23,24,31,33,41,42,44,47],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sandwich:46,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,47],saturation_factor:47,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,48],save_for_backward:[1,29],save_imag:48,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,46,47,48],scale_each:48,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:46,scope:[4,19,22,30,36],score:[22,46],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,46],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,47],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,46,47,48],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,47],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,47],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,46,47],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,47],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,48],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,47],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,47],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:47,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,46,47,48],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:47,sheep:46,shell:7,shen:6,shi:22,shift:[22,42,43,47,48],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,46,47],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:46,shufflenet_v2_x1_0:46,shufflenet_v2_x1_5:46,shufflenet_v2_x2_0:46,shufflenetv2:46,shut:13,side:[1,7,17,19,22,23,36,37,43,47],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,46],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,49],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,47],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,47],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:46,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,46,47,48],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:46,skew:[1,2],ski:46,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,46],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,46],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,47],smallest:[38,43,49],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:46,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:46,soft:[22,23,46],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:47,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48],space:[13,15,19,22,23,43,47],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,47,48],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:46,sport:46,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,47],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:46,squeezenet1_1:46,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,47],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,47],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43],start_dim:[42,43],startpoint:47,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,46,47],std_mean:43,stddev:15,stderr:[20,46],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,46],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,47],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:46,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,46],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,47],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:46,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,46],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45],suppos:[13,38,43,47],sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:46,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,47],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,47],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,46],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,46,47],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,46,47],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:46,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:47,tencrop:47,tend:4,teng:6,tenni:46,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,48],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,46,47],text:[4,15,22,23,24,41,42,43,47],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,46,47,48],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,47],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,46],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,49],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,46],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47],timedelta:14,timelin:[1,2],timeout:[13,14,21],tini:[42,49],tip:4,tl_flip:47,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:47,to_mkldnn:42,to_pil_imag:47,to_spars:[38,42],to_tensor:47,toaster:46,todens:38,togeth:[13,14,15,22,30,31,41,43,47],toilet:46,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:46,top:[1,13,15,21,22,23,29,43,44,46,47],topic:[5,31],topilimag:47,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,46],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:46,torch_shm_manag:21,torchscript:[18,36],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,47],touch:[4,36],toward:[5,36,43],tr_flip:47,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:46,trail:[22,24,26,29],train2017:46,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,46,47],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,46],transform_input:46,transform_to:15,transformation_matrix:47,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:47,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,46],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:46,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,46],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,47,48],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:46,twice:[30,46],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,46],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,46,47],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,49],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,47,49],uint8_t:42,uint8tensor:46,ultim:[5,7],umbrella:46,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,47],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,47],uniniti:[42,43],uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,47],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,46],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,47],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,47],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,46,47],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,46],v100:[12,22,46],v_1:22,v_2:22,val2017:46,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,46,47,48],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,46],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:46,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,47],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,46],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,47],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,47],vertical_flip:47,vertices_tensor:41,vflip:47,vgg11:46,vgg11_bn:46,vgg13:46,vgg13_bn:46,vgg16:46,vgg16_bn:46,vgg19:46,vgg19_bn:46,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44],videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,47],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,46],vision:[5,17,45,46],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,46],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,46],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,46],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,46],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,46,47],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,48],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,46,47,49],whilst:[15,28],white:47,whiten:47,who:4,whole:[13,14,22,32],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:46,wide_resnet50_2:46,width:[15,22,23,36,43,47],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:46,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,46,47,49],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43],writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:47,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,46,47],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,46,47],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:46,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,47],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:47,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.models","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,47],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,46],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:46,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:46,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:46,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:47,convolut:[22,23,46],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:46,defin:19,densenet:46,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,46],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:46,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:49,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:46,gamma:15,gelu:23,gener:[6,8,26,43,47],geometr:15,get:4,glu:23,googlenet:46,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:49,imag:47,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:46,includ:35,independ:15,index:43,indic:18,infer:27,info:49,init:24,initi:14,inspect:19,instal:35,instanc:46,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:46,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:46,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:46,mnist:44,mobilenet:46,model:[17,30,31,34,46],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,46],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:46,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,46],philosophi:5,phototour:44,pil:47,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:46,resnext:46,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,46],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:46,selu:[22,23],semant:[26,28,34,46],sequenti:22,serial:[34,43],share:[14,21],shufflenet:46,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:46,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,47],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,47,49],torchscript:[19,27,31],torchvis:[44,45,46,47,48],trace:[19,36],tracer:19,train:32,transform:[15,22,47],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,49],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,48],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,46],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file +Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/io","torchvision/models","torchvision/ops","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/io.rst","torchvision/models.rst","torchvision/ops.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[51,1,1,""],iinfo:[51,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.io":{read_video:[46,4,1,""],read_video_timestamps:[46,4,1,""],write_video:[46,4,1,""]},"torchvision.models":{alexnet:[47,4,1,""],densenet121:[47,4,1,""],densenet161:[47,4,1,""],densenet169:[47,4,1,""],densenet201:[47,4,1,""],googlenet:[47,4,1,""],inception_v3:[47,4,1,""],mnasnet0_5:[47,4,1,""],mnasnet0_75:[47,4,1,""],mnasnet1_0:[47,4,1,""],mnasnet1_3:[47,4,1,""],mobilenet_v2:[47,4,1,""],resnet101:[47,4,1,""],resnet152:[47,4,1,""],resnet18:[47,4,1,""],resnet34:[47,4,1,""],resnet50:[47,4,1,""],resnext101_32x8d:[47,4,1,""],resnext50_32x4d:[47,4,1,""],shufflenet_v2_x0_5:[47,4,1,""],shufflenet_v2_x1_0:[47,4,1,""],shufflenet_v2_x1_5:[47,4,1,""],shufflenet_v2_x2_0:[47,4,1,""],squeezenet1_0:[47,4,1,""],squeezenet1_1:[47,4,1,""],vgg11:[47,4,1,""],vgg11_bn:[47,4,1,""],vgg13:[47,4,1,""],vgg13_bn:[47,4,1,""],vgg16:[47,4,1,""],vgg16_bn:[47,4,1,""],vgg19:[47,4,1,""],vgg19_bn:[47,4,1,""],wide_resnet101_2:[47,4,1,""],wide_resnet50_2:[47,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[47,4,1,""],keypointrcnn_resnet50_fpn:[47,4,1,""],maskrcnn_resnet50_fpn:[47,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[47,4,1,""],deeplabv3_resnet50:[47,4,1,""],fcn_resnet101:[47,4,1,""],fcn_resnet50:[47,4,1,""]},"torchvision.ops":{RoIAlign:[48,1,1,""],RoIPool:[48,1,1,""],nms:[48,4,1,""],roi_align:[48,4,1,""],roi_pool:[48,4,1,""]},"torchvision.transforms":{CenterCrop:[49,1,1,""],ColorJitter:[49,1,1,""],Compose:[49,1,1,""],FiveCrop:[49,1,1,""],Grayscale:[49,1,1,""],Lambda:[49,1,1,""],LinearTransformation:[49,1,1,""],Normalize:[49,1,1,""],Pad:[49,1,1,""],RandomAffine:[49,1,1,""],RandomApply:[49,1,1,""],RandomChoice:[49,1,1,""],RandomCrop:[49,1,1,""],RandomErasing:[49,1,1,""],RandomGrayscale:[49,1,1,""],RandomHorizontalFlip:[49,1,1,""],RandomOrder:[49,1,1,""],RandomPerspective:[49,1,1,""],RandomResizedCrop:[49,1,1,""],RandomRotation:[49,1,1,""],RandomSizedCrop:[49,1,1,""],RandomVerticalFlip:[49,1,1,""],Resize:[49,1,1,""],Scale:[49,1,1,""],TenCrop:[49,1,1,""],ToPILImage:[49,1,1,""],ToTensor:[49,1,1,""],functional:[49,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[49,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[49,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[49,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[49,4,1,""],adjust_contrast:[49,4,1,""],adjust_gamma:[49,4,1,""],adjust_hue:[49,4,1,""],adjust_saturation:[49,4,1,""],affine:[49,4,1,""],crop:[49,4,1,""],erase:[49,4,1,""],five_crop:[49,4,1,""],hflip:[49,4,1,""],normalize:[49,4,1,""],pad:[49,4,1,""],perspective:[49,4,1,""],resize:[49,4,1,""],resized_crop:[49,4,1,""],rotate:[49,4,1,""],ten_crop:[49,4,1,""],to_grayscale:[49,4,1,""],to_pil_image:[49,4,1,""],to_tensor:[49,4,1,""],vflip:[49,4,1,""]},"torchvision.utils":{make_grid:[50,4,1,""],save_image:[50,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],empty_strided:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0705e":43,"0949e":42,"10k":44,"10x7":22,"1239e":43,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":47,"20l":22,"224x224":47,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":47,"32x8d":47,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":[42,43],"4th":[26,44],"4us":1,"50k":44,"50x":47,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":47,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"9683e":43,"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,49],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,49,51],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,47,48,49,51],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,47,48,49,50,51],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,47,49,50],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,46,48,49,50,51],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45,46],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,47,49],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,46,48,49,50,51],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,48,49,51],"short":[19,22,23,26,39,40,42,43,49],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,47],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,47,49,50],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,49],Abs:36,And:[22,35,43,49],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,47,49],Going:47,Has:[22,23,43],Its:[22,37],NFS:14,NMS:48,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,47],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,49],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,49,50,51],Then:[1,26,34,36,37,49],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,47],Use:[8,13,14,22,23,32,41,42,43,49],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,49],With:[13,15,22,23,28,36,37,41],__background__:47,__call__:49,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,49],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,49],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,49],abstransform:15,acc:47,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,51],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,47,48],accordingli:[42,44,47],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,47],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,49],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43,48],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,47],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,49],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,47],adher:5,adjac:[22,43],adjust:[22,49],adjust_bright:49,adjust_contrast:49,adjust_gamma:49,adjust_hu:49,adjust_satur:49,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:47,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,49],affinetransform:15,aforement:32,afram:46,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,48,49],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,49],aggreg:[22,23,47],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:47,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23,48],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,47,48,49,50],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,49],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,49,50],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,49],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,49],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43,48],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:49,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:47,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,49],applic:[8,14,15,22,25,27,28,42,49],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,47],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,47],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,49],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,49],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,50,51],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,49],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,47,49],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,49],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,49],assumpt:[22,49],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,51],audio:[41,44,46],audio_fp:46,aug_add_x:19,augment:49,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:47,aux_loss:47,auxiliari:[17,31,47],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,49],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,49],awai:23,awar:[4,47],axbc:22,axes:36,axi:[36,42,43,49],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,49],backbon:47,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:47,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:47,balnta:22,banana:47,bar:[4,19,20,47],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,49],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:47,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:47,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,47,48,49,50],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:47,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:47,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,47,48],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,47],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,49],belong:[3,8,14,15,28,37,49],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,49],ben:22,bench:47,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,47,49],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,49],bicycl:47,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,49],bin:[41,42,43,48],binari:[15,19,22,23,31,35,36,41,42,43,44,47],bincount:[33,42,43],bind:[7,8,36],bird:47,bit:[4,35,40,42,43,51],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:49,bla:27,black:49,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,47],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:47,bodi:19,boil:4,book:47,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,47,49,50],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,49],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47,49],bottl:47,bottleneck:[18,47],bottom:[1,23,49],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:47,box:[47,48],bozkurt:6,bptt:30,br_flip:49,branch:[4,17,19,47],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,49],brightness_factor:49,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:47,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,49],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:47,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,47],cache_s:15,caffe2:[36,41],cake:47,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,47,50,51],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49,51],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:47,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:47,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,47],categor:[4,23],categori:[15,43,44,47],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43,48],ceil_:42,ceil_mod:[22,23],cell:[22,47],center:[23,37,41,42,43,49],center_flip:49,centercrop:49,central:[31,49],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,49],chain_matmul:43,chaindataset:13,chair:47,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,47,49],channel:[5,13,22,23,24,36,41,44,46,47,49],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,49],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:47,clockwis:49,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:49,cnn:[22,25,48],coalesc:[8,38,42],coars:44,coco:[45,47],coco_instance_category_nam:47,coco_person_keypoint_nam:47,coco_util:47,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,49],colorjitt:49,colors_tensor:41,column:[1,22,23,24,42,43,48,49],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,49],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,49],complet:[4,8,14,21,25,33,43,49],complex:[4,22,32,43,49],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,49],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,47,48,49,50],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,47],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,47],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,49],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,47],construct_transform:15,constructor:[7,13,22,28,38,42,47,51],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47,48,49],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,47,49],contrast_factor:49,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,49],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,49],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,48,49],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,49],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,49],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,48,49],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:47,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,49],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,49],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:47,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,51],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[47,48,49],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,47],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,47],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:47,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44,46,48],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,49],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:49,darker:49,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,49],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,47,49],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:49,deadlock:[14,22],deal:[4,21,30,43,49],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43,46],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43,48],decreasingli:22,deep:[4,5,18,22,24,37,47],deeper:47,deeplabv3_resnet101:47,deeplabv3_resnet50:47,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,49],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,49],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,47],degre:[15,22,43,49],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,47],dense_dim:[38,42,43],densenet121:47,densenet161:47,densenet169:47,densenet201:47,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,47],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,49],depth:[8,22,23,47,49],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,47,48],descript:[0,4,7,19,28,29,31,36,51],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,47],desir:[8,13,14,15,22,23,28,36,38,39,42,43,49],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,47,49],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,49],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,49],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46,47],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,47],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,47],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:47,diningt:47,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,49],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,47],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19,48],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,47,50],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:49,distortion_scal:49,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,50],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,49],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,47],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,49],done:[13,15,19,21,22,30,33,36,42,43,49],donut:47,dot:[22,42,43,49],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,47],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:47,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,49,51],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,47],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48,49,50],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,49],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,47],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,51],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,48,49],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:47,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,49],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,empty_strid:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43,46],end_dim:[42,43],end_ev:8,end_pt:46,endl:31,endocd:22,endpoint:49,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:49,enough:[19,21,25,29,35,37,43,49],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,47],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,49],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,47],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,51],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,49],equival:[3,13,15,19,22,23,36,40,42,43],eras:49,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,47],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,47],evalu:[2,15,22,23,25,29,37,43,44,47],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,47],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44,48],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,47,49,50],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,47,49],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,49],expand_a:[29,36,42,43],expans:49,expect:[1,4,13,14,19,22,23,30,37,41,43,44,47,48,49],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,48,49],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,47,49,50],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43,48],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:47,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,49],favour:43,fcn:47,fcn_resnet101:47,fcn_resnet50:47,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,47],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46,47],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,46,50],filenam:[7,19,20,39,41,46,50],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,49],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:49,filter:[22,23,42,43,49],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,49],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,47],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,48,49],fisher:15,fit:[1,37,42,43],five_crop:49,fivecrop:49,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,49],flat:[36,43],flatten:[24,36,42,43,49],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,49],float16:[12,22,40,42,43,51],float32:[13,22,23,36,40,42,43,51],float64:[22,40,42,43,51],floatstorag:39,floattensor:[1,14,22,38,40,42,43,47,49],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,47,49,51],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,47],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46,47,48],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,47],four:49,fourier:43,fp16:22,fp32:22,fpn:47,fps:[41,46],frac:[15,22,23,24,37,42,43,49],frac_:42,fraction:[13,22,24,43,49],frame:[41,43,44,46],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:47,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48,49,50],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:49,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,49],galleri:4,gamma:[22,37,43,49],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,47],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,51],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:47,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,49],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,49,50],glass:47,global:[3,13,14,15,19,31,32,41,43,47],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:47,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,47],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:49,grain:[14,25,49],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,49],great:4,greater:[2,22,23,25,36,43,48],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48,50],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,47],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,47],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:47,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:47,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,47,49],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,47,48,49],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,48,49],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,47,50],hessian:24,heurist:[7,13],hflip:49,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42,48],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,49],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:49,horizontal:49,hors:47,host:[13,14,22,28,39,42],hot:[15,23,47],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,47,49],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:49,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,47,49],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:49,hue_factor:49,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:47,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,47,50],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,47],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,49],img_batch:41,img_height:49,img_hwc:41,img_tensor:41,img_width:49,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,47,48],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,47],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,49],inception_v3:47,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,47],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,49],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44,48],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43,48],individu:[4,5,13,19,22,31,33,42,43,46],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,47],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43,46],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,47,49],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,47],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,49],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,47,48,49],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,49],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,51],int32:[22,40,42,43,51],int64:[22,23,28,36,40,42,43,48,51],int64tensor:47,int8:[40,42,43,51],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,49,51],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,49],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44,48],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,47],internet:[4,44],interop:43,interoper:27,interpol:[22,43,48,49],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,intersect:48,interv:[15,43,49],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,49],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,47],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:[47,48],iou_threshold:48,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41,48],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,47,49],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:49,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,48,49],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22,23,48],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:47,keypoint:45,keypointrcnn_resnet50_fpn:47,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:47,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:47,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,49],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,47,49,50],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,47],label_img:41,lambd:[22,23,37,42,49],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,49],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:47,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,47,49],larger:[1,5,22,23,30,31,41,42,43,47,49],largest:[19,23,42,43,51],last:[1,3,13,19,22,23,25,37,43,47,49],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,49],layer:[14,23,24,25,29,30,37,47],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,47],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,47],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,49],left_ankl:47,left_ear:47,left_elbow:47,left_ey:47,left_hip:47,left_kne:47,left_should:47,left_wrist:47,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,49],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,47],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,47],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],libx264:46,lie:[22,23,41],lies:44,lifetim:4,light:[41,47],lighter:49,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,49],likelihood:[15,22,23],likewis:48,limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,47],linearfunct:29,linearli:[22,23,30],lineartransform:49,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,47],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,47],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,47,49],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,49],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,47],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43,48],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,49],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,49],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:49,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,49,50],make_grid:[41,50],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43,48],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43,48],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:47,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,49],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,49],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,49,50,51],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,48,49],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,47,49],mean_vector:49,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,47],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:47,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43,46],metadata_head:41,meter:47,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,47],metric:[8,37,41],michael:6,microwav:47,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,49,50,51],min_indic:43,min_lr:37,min_siz:47,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,47,50],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,47],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,49],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:47,mnasnet0_75:47,mnasnet1_0:47,mnasnet1_3:47,mnist:[41,45],mnist_train:41,mnt:14,mobil:47,mobilenet_v2:47,mobilenetv2:47,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,47,49],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,49],model_dir:20,model_zoo:[18,47],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,47,49],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,47,49],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:47,motorcycl:47,mountain:44,mous:47,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,49],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,49],multipli:[22,23,38,43,47,49],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,49],mutabl:19,mutat:[19,42,49],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:49,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,51],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:49,ndarrai:[36,42,43,49],ndim:42,ndimens:42,ne_:42,nearest:[22,23,49],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,49],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,49],neural:[4,19,22,24,28,37,47],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nms:48,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,48,49],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,49,50],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,47,49,50],normal_:[24,28,42,43],normalized_shap:[22,23],nose:47,notabl:49,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,46,49],notebook:[4,50],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:50,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,47],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:47,num_lay:[22,36],num_lin:44,num_output_channel:49,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49,50,51],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,51],numpi:[13,26,30,35,36,41,42,43,44,49,51],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,49,51],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,47],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,51],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,47],offlin:[19,49],offset:[22,23,42,43,44,49],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,49],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47,48,49],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,47,49],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,46,48,49],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:49,ops:[1,14,18,19,27,28,29,36,42,43,45],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,49,50],optional_unwrap:19,orang:47,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,47,48,49],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,47,49],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,49],orign:49,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,47,49,50],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,47],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,49],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,47],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,47,48,49],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23,48],output_tensor_list:14,outsid:[5,13,19,23,28,49],oven:47,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,48,49,50],overal:[5,14,25,32,49],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28,48],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45,46],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,49,50],pad_if_need:49,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:50,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,49],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,47],parallel:[0,13,14,22,23,27,28,33,35,49],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,47],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48,49,50],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:47,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,47],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,47,48,49],past:[14,30,47],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44,46],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,47,49],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46,47],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,46,48,49],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:49,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:47,photo:44,phototour:45,php:44,phy:43,pic:49,pick:49,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,49],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:49,pivot:[42,43],pixel:[22,23,44,49,50],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:47,pizza:47,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,49],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:47,platform:[7,33,43,47],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:49,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,46,48,51],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33,48],pooled_w:48,pop:[8,22],popul:[1,15,42],popular:45,popularli:49,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,49,51],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,47,49],postprocess:47,pot:47,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:47,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,49],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,47],pradhan:6,pre:[1,17,22,36,37,42,44,47],preced:27,precis:[1,7,15,22,36,41,43,47],precision_matrix:15,precompil:31,predict:[22,41,47],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,47],presenc:5,present:[5,14,20,21,22,25,40,43,44,46,47],preserv:[13,19,22,23,24,28,42,49],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,47],pretrained_backbon:47,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,49],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,47],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,49],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,47],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,51],proport:[22,49],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49,51],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],pts:46,publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,49],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,47,51],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:47,rais:[1,4,15,19,21,25,28,42,43,49],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,47],rand_lik:43,randint:[22,23,38,41,42,43,49],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,47,49],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:49,randomappli:49,randomchoic:49,randomcrop:[44,49],randomeras:49,randomgrayscal:49,randomhorizontalflip:49,randomli:[1,13,22,23,31,44,49],randomord:49,randomperspect:49,randomresizedcrop:49,randomrot:49,randomsampl:13,randomsizedcrop:49,randomverticalflip:49,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,47,49,50],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46,47],rather:[1,3,7,19,23,26,36,41,42,43,50],ratio:[15,22,49],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43,46],read_video:46,read_video_timestamp:46,readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,49],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,47],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,47],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:49,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,47],referenc:[19,25,43],reflect:[19,22,23,30,42,43,49],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:47,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,48,49],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,47],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,47],remov:[1,5,14,19,22,23,42,43,48],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,49],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,47],repo_nam:17,repo_own:17,report:[1,2,5,28,47],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,51],represent:[19,22,36,38,42,51],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,47],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:49,rescal:[22,23,49],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,49],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,47],resili:37,resiz:[22,23,39,42,43,47,49],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:49,resnet101:47,resnet152:47,resnet18:[17,19,20,25,47],resnet34:47,resnet50:[17,41,47],resnet:[17,19,36,41],resnext101_32x8d:47,resnext50_32x4d:47,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,49],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,47,49],result_avg:49,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:47,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,49],reveal:38,revers:[15,19,22,25,42,43,49],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,47,49],rgba:49,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,49],right_ankl:47,right_ear:47,right_elbow:47,right_ey:47,right_hip:47,right_kne:47,right_should:47,right_wrist:47,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roi:48,roi_align:48,roi_pool:48,roi_width:48,roialign:48,roipool:48,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,49],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,50],row_limit:1,rpn:47,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:47,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,47,49,50],sampl:[13,15,22,23,24,31,33,41,42,44,48,49],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sampling_ratio:48,sandwich:47,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,49],saturation_factor:49,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,46,50],save_for_backward:[1,29],save_imag:50,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,47,48,49,50],scale_each:50,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:47,scope:[4,19,22,30,36],score:[22,47,48],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,47],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,46,49],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,47,48,49,50],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,49],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,49],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,47,49],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,49],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,50],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,49],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,47,49],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,49],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:49,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,47,49,50],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:49,sheep:47,shell:7,shen:6,shi:22,shift:[22,42,43,49,50],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,47,48,49],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:47,shufflenet_v2_x1_0:47,shufflenet_v2_x1_5:47,shufflenet_v2_x2_0:47,shufflenetv2:47,shut:13,side:[1,7,17,19,22,23,36,37,43,49],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,47],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,51],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,49],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,48,49],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:47,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,47,48,49,50],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:47,skew:[1,2],ski:47,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,47],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,47],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,49],smallest:[38,43,51],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:47,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:47,soft:[22,23,47],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:49,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,47,49],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43,48],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48,49,50],space:[13,15,19,22,23,43,49],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatial_scal:48,spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43,46,48],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,49,50],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:47,sport:47,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,49],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:47,squeezenet1_1:47,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,49],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,49],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43,46],start_dim:[42,43],start_pt:46,startpoint:49,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,47,49],std_mean:43,stddev:15,stderr:[20,47],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,47],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,46,49],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:47,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,47],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,49],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:47,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,47],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45,48],suppos:[13,38,43,49],suppress:48,sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:47,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,49],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,49],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,47],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44,48],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,47,49],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,47,49],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:47,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:49,tencrop:49,tend:4,teng:6,tenni:47,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,47,48,50],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,47,49],text:[4,15,22,23,24,41,42,43,49],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,47,48,49,50],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47,48,49],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,49],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47,49,50,51],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43,48],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,47],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,51],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,47],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47,49],timedelta:14,timelin:[1,2],timeout:[13,14,21],timestamp:46,tini:[42,51],tip:4,tl_flip:49,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:49,to_mkldnn:42,to_pil_imag:49,to_spars:[38,42],to_tensor:49,toaster:47,todens:38,togeth:[13,14,15,22,30,31,41,43,49],toilet:47,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:47,top:[1,13,15,21,22,23,29,43,44,47,49],topic:[5,31],topilimag:49,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,47],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:47,torch_shm_manag:21,torchscript:[18,36,48],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,49],touch:[4,36],toward:[5,36,43],tr_flip:49,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:47,trail:[22,24,26,29],train2017:47,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,47,49],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,47],transform_input:47,transform_to:15,transformation_matrix:49,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:49,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,47],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:47,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,47],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,48,49,50],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:47,twice:[30,47],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,47],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,47,48,49],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,51],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,46,49,51],uint8_t:42,uint8tensor:47,ultim:[5,7],umbrella:47,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,49],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,49],uniniti:[42,43],union:48,uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,49],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,47],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,47,48,49,50],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,49],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,49],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,47,49],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,47],v100:[12,22,47],v_1:22,v_2:22,val2017:47,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,47,48,49,50],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,47],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:47,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,49],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,47],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,49],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,49],vertical_flip:49,vertices_tensor:41,vflip:49,vframe:46,vgg11:47,vgg11_bn:47,vgg13:47,vgg13_bn:47,vgg16:47,vgg16_bn:47,vgg19:47,vgg19_bn:47,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44,45],video_arrai:46,video_codec:46,video_fp:46,videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,49],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,47],vision:[5,17,45,47,48],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,47],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,47],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,47],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,47],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46,47],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,47,49],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,47,48,50],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,47,48,49,51],whilst:[15,28],white:49,whiten:49,who:4,whole:[13,14,22,32,46],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:47,wide_resnet50_2:47,width:[15,22,23,36,43,48,49],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:47,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,47,49,51],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43,46],write_video:46,writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:49,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,47,49],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,47,49],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:47,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,49],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:49,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.io","torchvision.models","torchvision.ops","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,49],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,47],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:47,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:47,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:47,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:49,convolut:[22,23,47],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:47,defin:19,densenet:47,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,47],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:47,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:51,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:47,gamma:15,gelu:23,gener:[6,8,26,43,49],geometr:15,get:4,glu:23,googlenet:47,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:51,imag:49,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:47,includ:35,independ:15,index:43,indic:18,infer:27,info:51,init:24,initi:14,inspect:19,instal:35,instanc:47,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:47,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:47,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:47,mnist:44,mobilenet:47,model:[17,30,31,34,47],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,47],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:47,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],ops:48,optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,47],philosophi:5,phototour:44,pil:49,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:47,resnext:47,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,47],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:47,selu:[22,23],semant:[26,28,34,47],sequenti:22,serial:[34,43],share:[14,21],shufflenet:47,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:47,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,49],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,49,51],torchscript:[19,27,31],torchvis:[44,45,46,47,48,49,50],trace:[19,36],tracer:19,train:32,transform:[15,22,49],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,51],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,50],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:47,video:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,47],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file diff --git a/docs/stable/torch.html b/docs/stable/torch.html index bd7fd29104e6..dec3d55b31f6 100644 --- a/docs/stable/torch.html +++ b/docs/stable/torch.html @@ -1120,6 +1120,53 @@

    Tensors +
    +torch.empty_strided(size, stride, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) → Tensor
    +

    Returns a tensor filled with uninitialized data. The shape and strides of the tensor is +defined by the variable argument size and stride respectively. +torch.empty_strided(size, stride) is equivalent to +torch.empty(size).as_strided(size, stride).

    +
    +

    Warning

    +

    More than one element of the created tensor may refer to a single memory +location. As a result, in-place operations (especially ones that are +vectorized) may result in incorrect behavior. If you need to write to +the tensors, please clone them first.

    +
    +
    +
    Parameters
    +
      +
    • size (tuple of python:ints) – the shape of the output tensor

    • +
    • stride (tuple of python:ints) – the strides of the output tensor

    • +
    • dtype (torch.dtype, optional) – the desired data type of returned tensor. +Default: if None, uses a global default (see torch.set_default_tensor_type()).

    • +
    • layout (torch.layout, optional) – the desired layout of returned Tensor. +Default: torch.strided.

    • +
    • device (torch.device, optional) – the desired device of returned tensor. +Default: if None, uses the current device for the default tensor type +(see torch.set_default_tensor_type()). device will be the CPU +for CPU tensor types and the current CUDA device for CUDA tensor types.

    • +
    • requires_grad (bool, optional) – If autograd should record operations on the +returned tensor. Default: False.

    • +
    • pin_memory (bool, optional) – If set, returned tensor would be allocated in +the pinned memory. Works only for CPU tensors. Default: False.

    • +
    +
    +
    +

    Example:

    +
    >>> a = torch.empty_strided((2, 3), (1, 2))
    +>>> a
    +tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
    +        [0.0000e+00, 0.0000e+00, 3.0705e-41]])
    +>>> a.stride()
    +(1, 2)
    +>>> a.size()
    +torch.Size([2, 3])
    +
    +
    +
    +
    torch.full(size, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    diff --git a/docs/stable/torchvision/datasets.html b/docs/stable/torchvision/datasets.html index dbbb498748ae..afb1ca3b4995 100644 --- a/docs/stable/torchvision/datasets.html +++ b/docs/stable/torchvision/datasets.html @@ -34,7 +34,7 @@ - + @@ -1437,7 +1437,7 @@

    UCF101 - + diff --git a/docs/stable/torchvision/index.html b/docs/stable/torchvision/index.html index 679e0887f1cb..f13fa272bf61 100644 --- a/docs/stable/torchvision/index.html +++ b/docs/stable/torchvision/index.html @@ -296,12 +296,17 @@

    torchvisionUCF101

  • +
  • torchvision.io +
  • torchvision.models
  • +
  • torchvision.ops
  • torchvision.transforms
  • diff --git a/docs/stable/torchvision/ops.html b/docs/stable/torchvision/ops.html new file mode 100644 index 000000000000..cb10167974d1 --- /dev/null +++ b/docs/stable/torchvision/ops.html @@ -0,0 +1,633 @@ + + + + + + + + + + + + torchvision.ops — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torchvision.ops

    +

    torchvision.ops implements operators that are specific for Computer Vision.

    +
    +

    Note

    +

    Those operators currently do not support TorchScript.

    +
    +
    +
    +torchvision.ops.nms(boxes, scores, iou_threshold)[source]
    +

    Performs non-maximum suppression (NMS) on the boxes according +to their intersection-over-union (IoU).

    +

    NMS iteratively removes lower scoring boxes which have an +IoU greater than iou_threshold with another (higher scoring) +box.

    +
    +
    Parameters
    +
      +
    • boxes (Tensor[N, 4])) – boxes to perform NMS on. They +are expected to be in (x1, y1, x2, y2) format

    • +
    • scores (Tensor[N]) – scores for each one of the boxes

    • +
    • iou_threshold (float) – discards all overlapping +boxes with IoU < iou_threshold

    • +
    +
    +
    Returns
    +

    keep – int64 tensor with the indices +of the elements that have been kept +by NMS, sorted in decreasing order of scores

    +
    +
    Return type
    +

    Tensor

    +
    +
    +
    + +
    +
    +torchvision.ops.roi_align(input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1)[source]
    +

    Performs Region of Interest (RoI) Align operator described in Mask R-CNN

    +
    +
    Parameters
    +
      +
    • input (Tensor[N, C, H, W]) – input tensor

    • +
    • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch

    • +
    • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)

    • +
    • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0

    • +
    • sampling_ratio (int) – number of sampling points in the interpolation grid +used to compute the output value of each pooled output bin. If > 0, +then exactly sampling_ratio x sampling_ratio grid points are used. If +<= 0, then an adaptive number of grid points are used (computed as +ceil(roi_width / pooled_w), and likewise for height). Default: -1

    • +
    +
    +
    Returns
    +

    output (Tensor[K, C, output_size[0], output_size[1]])

    +
    +
    +
    + +
    +
    +torchvision.ops.roi_pool(input, boxes, output_size, spatial_scale=1.0)[source]
    +

    Performs Region of Interest (RoI) Pool operator described in Fast R-CNN

    +
    +
    Parameters
    +
      +
    • input (Tensor[N, C, H, W]) – input tensor

    • +
    • boxes (Tensor[K, 5] or List[Tensor[L, 4]]) – the box coordinates in (x1, y1, x2, y2) +format where the regions will be taken from. If a single Tensor is passed, +then the first column should contain the batch index. If a list of Tensors +is passed, then each Tensor will correspond to the boxes for an element i +in a batch

    • +
    • output_size (int or Tuple[int, int]) – the size of the output after the cropping +is performed, as (height, width)

    • +
    • spatial_scale (float) – a scaling factor that maps the input coordinates to +the box coordinates. Default: 1.0

    • +
    +
    +
    Returns
    +

    output (Tensor[K, C, output_size[0], output_size[1]])

    +
    +
    +
    + +
    +
    +class torchvision.ops.RoIAlign(output_size, spatial_scale, sampling_ratio)[source]
    +

    See roi_align

    +
    + +
    +
    +class torchvision.ops.RoIPool(output_size, spatial_scale)[source]
    +

    See roi_pool

    +
    + +
    + + +
    + +
    + + +
    +
    + +
    +
    +
    + + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/torchvision/transforms.html b/docs/stable/torchvision/transforms.html index 819f8d2c614f..e7f6a24bba1d 100644 --- a/docs/stable/torchvision/transforms.html +++ b/docs/stable/torchvision/transforms.html @@ -35,7 +35,7 @@ - + @@ -1427,7 +1427,7 @@

    Functional Transforms - + From 955ee0d26b76a6aba7e56023213644c5953aae4c Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Mon, 5 Aug 2019 02:46:14 +0000 Subject: [PATCH 08/12] auto-generating sphinx docs --- docs/stable/objects.inv | Bin 11574 -> 11574 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 6c29ce27842dc3410e6be19ad596b1927305c9c5..e1cdb415d0b590e3de284e82c6fe06cb84c92bb2 100644 GIT binary patch delta 18 ZcmdlMwJmCbH@l^AqD4wd;>MsSIsiq&2Sfk> delta 18 ZcmdlMwJmCbH@k_sg^@*C%Eq84IsiiX2JHX< From df60286741374e730abfb9e8de29b1d07b4502bf Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Mon, 5 Aug 2019 05:09:29 +0000 Subject: [PATCH 09/12] auto-generating sphinx docs --- .../_modules/torch/utils/data/dataloader.html | 5 ++++- docs/stable/objects.inv | Bin 11574 -> 11574 bytes 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/stable/_modules/torch/utils/data/dataloader.html b/docs/stable/_modules/torch/utils/data/dataloader.html index b00b2784d4b7..12c8b76a6735 100644 --- a/docs/stable/_modules/torch/utils/data/dataloader.html +++ b/docs/stable/_modules/torch/utils/data/dataloader.html @@ -1159,7 +1159,10 @@

    Source code for torch.utils.data.dataloader

     
                     # Exit workers now.
                     self.workers_done_event.set()
    -                for worker_id in range(self.num_workers):
    +                for worker_id in range(len(self.workers)):
    +                    # Get number of workers from `len(self.workers)` instead of
    +                    # `self.num_workers` in case we error before starting all
    +                    # workers.
                         if self.workers_status[worker_id]:
                             self._shutdown_worker(worker_id)
                     for w in self.workers:
    diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv
    index e1cdb415d0b590e3de284e82c6fe06cb84c92bb2..3cd7d5d5ea74277c7cdcc937ca656df5b28929e4 100644
    GIT binary patch
    delta 17
    YcmdlMwJmCbH=AK%vY}<_#=xgK06#GX*#H0l
    
    delta 17
    YcmdlMwJmCbH=Ct#qD4x|#=xgK06zf+;s5{u
    
    
    From 1d0c85a0d6797629bd03b593f02fe5cd8713f71e Mon Sep 17 00:00:00 2001
    From: pytorchbot 
    Date: Mon, 5 Aug 2019 20:09:51 +0000
    Subject: [PATCH 10/12] auto-generating sphinx docs
    
    ---
     docs/stable/_modules/torch/functional.html    |  12 ++++++------
     .../models/detection/mask_rcnn.html           |   4 ++--
     docs/stable/objects.inv                       | Bin 11574 -> 11574 bytes
     docs/stable/torch.html                        |   8 ++++----
     docs/stable/torchvision/models.html           |   2 +-
     5 files changed, 13 insertions(+), 13 deletions(-)
    
    diff --git a/docs/stable/_modules/torch/functional.html b/docs/stable/_modules/torch/functional.html
    index a39f8d5a50f6..01c909f57d3c 100644
    --- a/docs/stable/_modules/torch/functional.html
    +++ b/docs/stable/_modules/torch/functional.html
    @@ -472,12 +472,12 @@ 

    Source code for torch.functional

             tensor (Tensor): A tensor to check
     
         Returns:
    -        Tensor: A ``torch.ByteTensor`` containing a 1 at each location of finite elements and 0 otherwise
    +        Tensor: ``A torch.Tensor with dtype torch.bool`` containing a True at each location of finite elements and False otherwise
     
         Example::
     
             >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    -        tensor([ 1,  0,  1,  0,  0], dtype=torch.uint8)
    +        tensor([True,  False,  True,  False,  False])
         """
         if not isinstance(tensor, torch.Tensor):
             raise TypeError("The argument is not a tensor: {}".format(repr(tensor)))
    @@ -487,7 +487,7 @@ 

    Source code for torch.functional

         # have a similar concept. It's safe to assume any created LongTensor doesn't
         # overflow and it's finite.
         if not tensor.is_floating_point():
    -        return torch.ones_like(tensor, dtype=torch.uint8)
    +        return torch.ones_like(tensor, dtype=torch.bool)
         return (tensor == tensor) & (tensor.abs() != inf)
    @@ -498,17 +498,17 @@

    Source code for torch.functional

             tensor (Tensor): A tensor to check
     
         Returns:
    -        Tensor: A ``torch.ByteTensor`` containing a 1 at each location of `+/-INF` elements and 0 otherwise
    +        Tensor: ``A torch.Tensor with dtype torch.bool`` containing a True at each location of `+/-INF` elements and False otherwise
     
         Example::
     
             >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    -        tensor([ 0,  1,  0,  1,  0], dtype=torch.uint8)
    +        tensor([False,  True,  False,  True,  False])
         """
         if not isinstance(tensor, torch.Tensor):
             raise TypeError("The argument is not a tensor: {}".format(repr(tensor)))
         if tensor.dtype in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
    -        return torch.zeros_like(tensor, dtype=torch.uint8)
    +        return torch.zeros_like(tensor, dtype=torch.bool)
         return tensor.abs() == inf
    diff --git a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html index 8fd31fa535db..1ae6f1dd33fd 100644 --- a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html +++ b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html @@ -294,7 +294,7 @@

    Source code for torchvision.models.detection.mask_rcnn

    - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values between 0 and H and 0 and W - labels (Int64Tensor[N]): the class label for each ground-truth box - - masks (UInt8Tensor[N, 1, H, W]): the segmentation binary masks for each instance + - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance The model returns a Dict[Tensor] during training, containing the classification and regression losses for both the RPN and the R-CNN, and the mask loss. @@ -541,7 +541,7 @@

    Source code for torchvision.models.detection.mask_rcnn

    - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values between ``0`` and ``H`` and ``0`` and ``W`` - labels (``Int64Tensor[N]``): the class label for each ground-truth box - - masks (``UInt8Tensor[N, 1, H, W]``): the segmentation binary masks for each instance + - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance The model returns a ``Dict[Tensor]`` during training, containing the classification and regression losses for both the RPN and the R-CNN, and the mask loss. diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 3cd7d5d5ea74277c7cdcc937ca656df5b28929e4..f572cfb59ef1fc6bd1d9ba8e8077ebc5fdbcb5ae 100644 GIT binary patch delta 18 ZcmdlMwJmCbH+x!=sX>yd(Z--BIsiqe2MhoJ delta 18 ZcmdlMwJmCbH@jhCvY};a;>MsSIsirK2Ri@& diff --git a/docs/stable/torch.html b/docs/stable/torch.html index dec3d55b31f6..f6524f064611 100644 --- a/docs/stable/torch.html +++ b/docs/stable/torch.html @@ -5311,7 +5311,7 @@

    Comparison Ops

    tensor (Tensor) – A tensor to check

    Returns
    -

    A torch.ByteTensor containing a 1 at each location of finite elements and 0 otherwise

    +

    A torch.Tensor with dtype torch.bool containing a True at each location of finite elements and False otherwise

    Return type

    Tensor

    @@ -5319,7 +5319,7 @@

    Comparison Ops
    >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    -tensor([ 1,  0,  1,  0,  0], dtype=torch.uint8)
    +tensor([True,  False,  True,  False,  False])
     

    @@ -5333,7 +5333,7 @@

    Comparison Ops

    tensor (Tensor) – A tensor to check

    Returns
    -

    A torch.ByteTensor containing a 1 at each location of +/-INF elements and 0 otherwise

    +

    A torch.Tensor with dtype torch.bool containing a True at each location of +/-INF elements and False otherwise

    Return type

    Tensor

    @@ -5341,7 +5341,7 @@

    Comparison Ops
    >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
    -tensor([ 0,  1,  0,  1,  0], dtype=torch.uint8)
    +tensor([False,  True,  False,  True,  False])
     

    diff --git a/docs/stable/torchvision/models.html b/docs/stable/torchvision/models.html index ffcad2953758..483bc6feceea 100644 --- a/docs/stable/torchvision/models.html +++ b/docs/stable/torchvision/models.html @@ -1394,7 +1394,7 @@

    Mask R-CNNFloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values between 0 and H and 0 and W

  • labels (Int64Tensor[N]): the class label for each ground-truth box

  • -
  • masks (UInt8Tensor[N, 1, H, W]): the segmentation binary masks for each instance

  • +
  • masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance

  • The model returns a Dict[Tensor] during training, containing the classification and regression From c031e76413fb42bf0500405bd00de9b9d3ace236 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Thu, 8 Aug 2019 05:20:59 +0000 Subject: [PATCH 11/12] auto-generating sphinx docs --- docs/stable/.buildinfo | 2 +- docs/stable/__config__.html | 4 +- docs/stable/_modules/index.html | 5 +- docs/stable/_modules/torch.html | 8 +- docs/stable/_modules/torch/__config__.html | 4 +- docs/stable/_modules/torch/_tensor_str.html | 4 +- docs/stable/_modules/torch/_utils.html | 4 +- docs/stable/_modules/torch/autograd.html | 4 +- .../_modules/torch/autograd/anomaly_mode.html | 4 +- .../_modules/torch/autograd/function.html | 4 +- .../_modules/torch/autograd/grad_mode.html | 4 +- .../_modules/torch/autograd/gradcheck.html | 4 +- .../_modules/torch/autograd/profiler.html | 4 +- docs/stable/_modules/torch/cuda.html | 4 +- docs/stable/_modules/torch/cuda/comm.html | 4 +- docs/stable/_modules/torch/cuda/nvtx.html | 4 +- docs/stable/_modules/torch/cuda/random.html | 4 +- docs/stable/_modules/torch/cuda/streams.html | 4 +- docs/stable/_modules/torch/distributed.html | 4 +- .../torch/distributed/distributed_c10d.html | 4 +- .../torch/distributions/bernoulli.html | 4 +- .../_modules/torch/distributions/beta.html | 4 +- .../torch/distributions/binomial.html | 4 +- .../torch/distributions/categorical.html | 4 +- .../_modules/torch/distributions/cauchy.html | 4 +- .../_modules/torch/distributions/chi2.html | 4 +- .../distributions/constraint_registry.html | 4 +- .../torch/distributions/constraints.html | 4 +- .../torch/distributions/dirichlet.html | 4 +- .../torch/distributions/distribution.html | 4 +- .../torch/distributions/exp_family.html | 4 +- .../torch/distributions/exponential.html | 4 +- .../torch/distributions/fishersnedecor.html | 4 +- .../_modules/torch/distributions/gamma.html | 4 +- .../torch/distributions/geometric.html | 4 +- .../_modules/torch/distributions/gumbel.html | 4 +- .../torch/distributions/half_cauchy.html | 4 +- .../torch/distributions/half_normal.html | 4 +- .../torch/distributions/independent.html | 4 +- .../_modules/torch/distributions/kl.html | 4 +- .../_modules/torch/distributions/laplace.html | 4 +- .../torch/distributions/log_normal.html | 4 +- .../lowrank_multivariate_normal.html | 4 +- .../torch/distributions/multinomial.html | 4 +- .../distributions/multivariate_normal.html | 4 +- .../distributions/negative_binomial.html | 4 +- .../_modules/torch/distributions/normal.html | 4 +- .../distributions/one_hot_categorical.html | 4 +- .../_modules/torch/distributions/pareto.html | 4 +- .../_modules/torch/distributions/poisson.html | 4 +- .../distributions/relaxed_bernoulli.html | 4 +- .../distributions/relaxed_categorical.html | 4 +- .../torch/distributions/studentT.html | 4 +- .../transformed_distribution.html | 4 +- .../torch/distributions/transforms.html | 4 +- .../_modules/torch/distributions/uniform.html | 4 +- .../_modules/torch/distributions/weibull.html | 4 +- docs/stable/_modules/torch/functional.html | 4 +- docs/stable/_modules/torch/hub.html | 4 +- docs/stable/_modules/torch/jit.html | 11 +- .../_modules/torch/multiprocessing.html | 4 +- .../_modules/torch/multiprocessing/spawn.html | 4 +- docs/stable/_modules/torch/nn/functional.html | 4 +- docs/stable/_modules/torch/nn/init.html | 4 +- .../_modules/torch/nn/modules/activation.html | 4 +- .../_modules/torch/nn/modules/adaptive.html | 4 +- .../_modules/torch/nn/modules/batchnorm.html | 4 +- .../_modules/torch/nn/modules/container.html | 4 +- .../_modules/torch/nn/modules/conv.html | 4 +- .../_modules/torch/nn/modules/distance.html | 4 +- .../_modules/torch/nn/modules/dropout.html | 4 +- .../_modules/torch/nn/modules/fold.html | 4 +- .../torch/nn/modules/instancenorm.html | 4 +- .../_modules/torch/nn/modules/linear.html | 4 +- .../_modules/torch/nn/modules/loss.html | 4 +- .../_modules/torch/nn/modules/module.html | 14 +- .../torch/nn/modules/normalization.html | 4 +- .../_modules/torch/nn/modules/padding.html | 4 +- .../torch/nn/modules/pixelshuffle.html | 4 +- .../_modules/torch/nn/modules/pooling.html | 4 +- .../stable/_modules/torch/nn/modules/rnn.html | 84 +- .../_modules/torch/nn/modules/sparse.html | 4 +- .../torch/nn/modules/transformer.html | 4 +- .../_modules/torch/nn/modules/upsampling.html | 4 +- .../torch/nn/parallel/data_parallel.html | 4 +- .../torch/nn/parallel/distributed.html | 4 +- docs/stable/_modules/torch/nn/parameter.html | 4 +- .../_modules/torch/nn/utils/clip_grad.html | 4 +- .../torch/nn/utils/convert_parameters.html | 4 +- docs/stable/_modules/torch/nn/utils/rnn.html | 9 +- .../torch/nn/utils/spectral_norm.html | 4 +- .../_modules/torch/nn/utils/weight_norm.html | 4 +- docs/stable/_modules/torch/onnx.html | 4 +- .../stable/_modules/torch/onnx/operators.html | 4 +- .../stable/_modules/torch/optim/adadelta.html | 4 +- docs/stable/_modules/torch/optim/adagrad.html | 4 +- docs/stable/_modules/torch/optim/adam.html | 4 +- docs/stable/_modules/torch/optim/adamax.html | 4 +- docs/stable/_modules/torch/optim/adamw.html | 4 +- docs/stable/_modules/torch/optim/asgd.html | 4 +- docs/stable/_modules/torch/optim/lbfgs.html | 4 +- .../_modules/torch/optim/lr_scheduler.html | 6 +- .../_modules/torch/optim/optimizer.html | 4 +- docs/stable/_modules/torch/optim/rmsprop.html | 4 +- docs/stable/_modules/torch/optim/rprop.html | 4 +- docs/stable/_modules/torch/optim/sgd.html | 4 +- .../_modules/torch/optim/sparse_adam.html | 4 +- docs/stable/_modules/torch/quasirandom.html | 4 +- docs/stable/_modules/torch/random.html | 18 +- docs/stable/_modules/torch/serialization.html | 4 +- docs/stable/_modules/torch/sparse.html | 4 +- docs/stable/_modules/torch/storage.html | 4 +- docs/stable/_modules/torch/tensor.html | 4 +- .../_modules/torch/utils/checkpoint.html | 4 +- .../_modules/torch/utils/cpp_extension.html | 4 +- .../torch/utils/data/_utils/worker.html | 4 +- .../_modules/torch/utils/data/dataloader.html | 4 +- .../_modules/torch/utils/data/dataset.html | 4 +- .../torch/utils/data/distributed.html | 4 +- .../_modules/torch/utils/data/sampler.html | 4 +- .../torch/utils/tensorboard/writer.html | 4 +- docs/stable/_modules/torchvision.html | 4 +- .../_modules/torchvision/datasets/cifar.html | 4 +- .../torchvision/datasets/cityscapes.html | 4 +- .../_modules/torchvision/datasets/coco.html | 4 +- .../torchvision/datasets/fakedata.html | 4 +- .../_modules/torchvision/datasets/flickr.html | 4 +- .../_modules/torchvision/datasets/folder.html | 4 +- .../_modules/torchvision/datasets/hmdb51.html | 4 +- .../torchvision/datasets/imagenet.html | 4 +- .../torchvision/datasets/kinetics.html | 4 +- .../_modules/torchvision/datasets/lsun.html | 4 +- .../_modules/torchvision/datasets/mnist.html | 4 +- .../torchvision/datasets/phototour.html | 4 +- .../_modules/torchvision/datasets/sbd.html | 4 +- .../_modules/torchvision/datasets/sbu.html | 4 +- .../_modules/torchvision/datasets/stl10.html | 4 +- .../_modules/torchvision/datasets/svhn.html | 4 +- .../_modules/torchvision/datasets/ucf101.html | 4 +- .../_modules/torchvision/datasets/usps.html | 4 +- .../_modules/torchvision/datasets/voc.html | 4 +- .../stable/_modules/torchvision/io/video.html | 28 +- .../_modules/torchvision/models/alexnet.html | 4 +- .../_modules/torchvision/models/densenet.html | 4 +- .../models/detection/faster_rcnn.html | 4 +- .../models/detection/keypoint_rcnn.html | 4 +- .../models/detection/mask_rcnn.html | 4 +- .../torchvision/models/googlenet.html | 10 +- .../torchvision/models/inception.html | 4 +- .../_modules/torchvision/models/mnasnet.html | 4 +- .../torchvision/models/mobilenet.html | 4 +- .../_modules/torchvision/models/resnet.html | 14 +- .../models/segmentation/segmentation.html | 4 +- .../torchvision/models/shufflenetv2.html | 4 +- .../torchvision/models/squeezenet.html | 4 +- .../_modules/torchvision/models/vgg.html | 20 +- .../torchvision/models/video/resnet.html | 853 ++++ .../_modules/torchvision/ops/boxes.html | 4 +- .../_modules/torchvision/ops/roi_align.html | 4 +- .../_modules/torchvision/ops/roi_pool.html | 4 +- .../torchvision/transforms/functional.html | 4 +- .../torchvision/transforms/transforms.html | 4 +- docs/stable/_modules/torchvision/utils.html | 4 +- docs/stable/_sources/index.rst.txt | 1 + docs/stable/_sources/random.rst.txt | 21 + docs/stable/_sources/tensors.rst.txt | 4 +- .../_sources/torchvision/models.rst.txt | 52 +- docs/stable/_static/katex_autorenderer.js | 11 - docs/stable/autograd.html | 4 +- docs/stable/bottleneck.html | 8 +- docs/stable/checkpoint.html | 4 +- docs/stable/community/contribution_guide.html | 4 +- docs/stable/community/governance.html | 4 +- .../stable/community/persons_of_interest.html | 4 +- docs/stable/cpp_extension.html | 4 +- docs/stable/cuda.html | 6 +- docs/stable/cuda_deterministic.html | 4 +- docs/stable/cuda_deterministic_backward.html | 4 +- docs/stable/cudnn_deterministic.html | 4 +- docs/stable/cudnn_persistent_rnn.html | 4 +- docs/stable/data.html | 4 +- docs/stable/distributed.html | 4 +- docs/stable/distributions.html | 124 +- docs/stable/dlpack.html | 4 +- docs/stable/genindex.html | 52 +- docs/stable/hub.html | 4 +- docs/stable/index.html | 5 +- docs/stable/jit.html | 4 +- docs/stable/model_zoo.html | 4 +- docs/stable/multiprocessing.html | 8 +- docs/stable/nn.functional.html | 491 +- docs/stable/nn.html | 3999 ++++++++++++----- docs/stable/nn.init.html | 128 +- docs/stable/notes/autograd.html | 4 +- docs/stable/notes/broadcasting.html | 4 +- .../cpu_threading_torchscript_inference.html | 4 +- docs/stable/notes/cuda.html | 4 +- docs/stable/notes/extending.html | 4 +- docs/stable/notes/faq.html | 8 +- .../stable/notes/large_scale_deployments.html | 4 +- docs/stable/notes/multiprocessing.html | 4 +- docs/stable/notes/randomness.html | 4 +- docs/stable/notes/serialization.html | 4 +- docs/stable/notes/windows.html | 4 +- docs/stable/objects.inv | Bin 11574 -> 11651 bytes docs/stable/onnx.html | 4 +- docs/stable/optim.html | 35 +- docs/stable/py-modindex.html | 9 +- docs/stable/random.html | 662 +++ docs/stable/search.html | 4 +- docs/stable/searchindex.js | 2 +- docs/stable/sparse.html | 24 +- docs/stable/storage.html | 4 +- docs/stable/tensor_attributes.html | 4 +- docs/stable/tensorboard.html | 58 +- docs/stable/tensors.html | 154 +- docs/stable/torch.html | 1659 +++++-- docs/stable/torchvision/datasets.html | 4 +- docs/stable/torchvision/index.html | 5 +- docs/stable/torchvision/io.html | 4 +- docs/stable/torchvision/models.html | 197 +- docs/stable/torchvision/ops.html | 4 +- docs/stable/torchvision/transforms.html | 19 +- docs/stable/torchvision/utils.html | 4 +- docs/stable/type_info.html | 4 +- 225 files changed, 7037 insertions(+), 2523 deletions(-) create mode 100644 docs/stable/_modules/torchvision/models/video/resnet.html create mode 100644 docs/stable/_sources/random.rst.txt delete mode 100644 docs/stable/_static/katex_autorenderer.js create mode 100644 docs/stable/random.html diff --git a/docs/stable/.buildinfo b/docs/stable/.buildinfo index 2259a36b125f..eb1b6338127b 100644 --- a/docs/stable/.buildinfo +++ b/docs/stable/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 151805b264b841595033b97d1a80e49c +config: 36d820e17ec7e30989030bc36496501e tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/stable/__config__.html b/docs/stable/__config__.html index 132b82bdc08b..06760e520120 100644 --- a/docs/stable/__config__.html +++ b/docs/stable/__config__.html @@ -177,6 +177,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -342,9 +343,6 @@ - - - diff --git a/docs/stable/_modules/index.html b/docs/stable/_modules/index.html index c9d54de7adff..62277fb335b7 100644 --- a/docs/stable/_modules/index.html +++ b/docs/stable/_modules/index.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -412,6 +413,7 @@

    All modules for which code is available

  • torchvision.models.shufflenetv2
  • torchvision.models.squeezenet
  • torchvision.models.vgg
  • +
  • torchvision.models.video.resnet
  • torchvision.ops.boxes
  • torchvision.ops.roi_align
  • torchvision.ops.roi_pool
  • @@ -470,9 +472,6 @@

    All modules for which code is available

    - - - diff --git a/docs/stable/_modules/torch.html b/docs/stable/_modules/torch.html index bd757d6d5d89..13e426689b5d 100644 --- a/docs/stable/_modules/torch.html +++ b/docs/stable/_modules/torch.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -285,9 +286,9 @@

    Source code for torch

         'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
         'no_grad', 'enable_grad', 'rand', 'randn',
         'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
    -    'ShortStorage', 'CharStorage', 'ByteStorage',
    +    'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
         'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
    -    'ShortTensor', 'CharTensor', 'ByteTensor', 'Tensor',
    +    'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
     ]
     
     ################################################################################
    @@ -652,9 +653,6 @@ 

    Source code for torch

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/__config__.html b/docs/stable/_modules/torch/__config__.html
    index 97d75211ffdd..ee3501153336 100644
    --- a/docs/stable/_modules/torch/__config__.html
    +++ b/docs/stable/_modules/torch/__config__.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -330,9 +331,6 @@

    Source code for torch.__config__

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/_tensor_str.html b/docs/stable/_modules/torch/_tensor_str.html
    index e40b3ac72e40..d6ed4f63abe6 100644
    --- a/docs/stable/_modules/torch/_tensor_str.html
    +++ b/docs/stable/_modules/torch/_tensor_str.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -626,9 +627,6 @@

    Source code for torch._tensor_str

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/_utils.html b/docs/stable/_modules/torch/_utils.html
    index 509074f5bc03..4a13d5be7b2c 100644
    --- a/docs/stable/_modules/torch/_utils.html
    +++ b/docs/stable/_modules/torch/_utils.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -682,9 +683,6 @@

    Source code for torch._utils

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd.html b/docs/stable/_modules/torch/autograd.html
    index 01d4cf76ee74..871051d1f420 100644
    --- a/docs/stable/_modules/torch/autograd.html
    +++ b/docs/stable/_modules/torch/autograd.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -489,9 +490,6 @@

    Source code for torch.autograd

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd/anomaly_mode.html b/docs/stable/_modules/torch/autograd/anomaly_mode.html
    index ad7308b0e2b6..cf8294b87462 100644
    --- a/docs/stable/_modules/torch/autograd/anomaly_mode.html
    +++ b/docs/stable/_modules/torch/autograd/anomaly_mode.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -414,9 +415,6 @@

    Source code for torch.autograd.anomaly_mode

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd/function.html b/docs/stable/_modules/torch/autograd/function.html
    index 3a85ef3d4bf7..97141557f1eb 100644
    --- a/docs/stable/_modules/torch/autograd/function.html
    +++ b/docs/stable/_modules/torch/autograd/function.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -705,9 +706,6 @@

    Source code for torch.autograd.function

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd/grad_mode.html b/docs/stable/_modules/torch/autograd/grad_mode.html
    index c294a20dc65f..e20a1716fda4 100644
    --- a/docs/stable/_modules/torch/autograd/grad_mode.html
    +++ b/docs/stable/_modules/torch/autograd/grad_mode.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -461,9 +462,6 @@

    Source code for torch.autograd.grad_mode

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd/gradcheck.html b/docs/stable/_modules/torch/autograd/gradcheck.html
    index 7d1a9148306f..d5a670db4c9e 100644
    --- a/docs/stable/_modules/torch/autograd/gradcheck.html
    +++ b/docs/stable/_modules/torch/autograd/gradcheck.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -719,9 +720,6 @@

    Source code for torch.autograd.gradcheck

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/autograd/profiler.html b/docs/stable/_modules/torch/autograd/profiler.html
    index 667cd17a6dfa..aa9c8aee31dd 100644
    --- a/docs/stable/_modules/torch/autograd/profiler.html
    +++ b/docs/stable/_modules/torch/autograd/profiler.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1144,9 +1145,6 @@

    Source code for torch.autograd.profiler

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/cuda.html b/docs/stable/_modules/torch/cuda.html
    index d556ef54d0bd..f55e5809c08f 100644
    --- a/docs/stable/_modules/torch/cuda.html
    +++ b/docs/stable/_modules/torch/cuda.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -995,9 +996,6 @@

    Source code for torch.cuda

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/cuda/comm.html b/docs/stable/_modules/torch/cuda/comm.html
    index 354bcb4518e6..3e2598d46285 100644
    --- a/docs/stable/_modules/torch/cuda/comm.html
    +++ b/docs/stable/_modules/torch/cuda/comm.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -480,9 +481,6 @@

    Source code for torch.cuda.comm

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/cuda/nvtx.html b/docs/stable/_modules/torch/cuda/nvtx.html
    index 9617d548dc4d..77a916db1b1a 100644
    --- a/docs/stable/_modules/torch/cuda/nvtx.html
    +++ b/docs/stable/_modules/torch/cuda/nvtx.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -390,9 +391,6 @@

    Source code for torch.cuda.nvtx

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/cuda/random.html b/docs/stable/_modules/torch/cuda/random.html
    index f5834808908f..9b204bb49f3b 100644
    --- a/docs/stable/_modules/torch/cuda/random.html
    +++ b/docs/stable/_modules/torch/cuda/random.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -475,9 +476,6 @@

    Source code for torch.cuda.random

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/cuda/streams.html b/docs/stable/_modules/torch/cuda/streams.html
    index 47243555a726..56d69a7a1d29 100644
    --- a/docs/stable/_modules/torch/cuda/streams.html
    +++ b/docs/stable/_modules/torch/cuda/streams.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -513,9 +514,6 @@

    Source code for torch.cuda.streams

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributed.html b/docs/stable/_modules/torch/distributed.html
    index c4a2d3704064..b7c0f91f980c 100644
    --- a/docs/stable/_modules/torch/distributed.html
    +++ b/docs/stable/_modules/torch/distributed.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -330,9 +331,6 @@

    Source code for torch.distributed

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributed/distributed_c10d.html b/docs/stable/_modules/torch/distributed/distributed_c10d.html
    index 520f89115ac0..bc576235fe93 100644
    --- a/docs/stable/_modules/torch/distributed/distributed_c10d.html
    +++ b/docs/stable/_modules/torch/distributed/distributed_c10d.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1757,9 +1758,6 @@

    Source code for torch.distributed.distributed_c10d

    - - - diff --git a/docs/stable/_modules/torch/distributions/bernoulli.html b/docs/stable/_modules/torch/distributions/bernoulli.html index 36a7a164469e..976b46e00ee5 100644 --- a/docs/stable/_modules/torch/distributions/bernoulli.html +++ b/docs/stable/_modules/torch/distributions/bernoulli.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -425,9 +426,6 @@

    Source code for torch.distributions.bernoulli

    - - - diff --git a/docs/stable/_modules/torch/distributions/beta.html b/docs/stable/_modules/torch/distributions/beta.html index 7cfd442156a9..9015f14ef27f 100644 --- a/docs/stable/_modules/torch/distributions/beta.html +++ b/docs/stable/_modules/torch/distributions/beta.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -403,9 +404,6 @@

    Source code for torch.distributions.beta

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/binomial.html b/docs/stable/_modules/torch/distributions/binomial.html
    index ea67a5c3a9e9..9902afd989bc 100644
    --- a/docs/stable/_modules/torch/distributions/binomial.html
    +++ b/docs/stable/_modules/torch/distributions/binomial.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -452,9 +453,6 @@

    Source code for torch.distributions.binomial

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/categorical.html b/docs/stable/_modules/torch/distributions/categorical.html
    index cd92bef6806e..696f76fb5e86 100644
    --- a/docs/stable/_modules/torch/distributions/categorical.html
    +++ b/docs/stable/_modules/torch/distributions/categorical.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -441,9 +442,6 @@

    Source code for torch.distributions.categorical

    < - - - diff --git a/docs/stable/_modules/torch/distributions/cauchy.html b/docs/stable/_modules/torch/distributions/cauchy.html index 3d2302861619..36e6381e35b5 100644 --- a/docs/stable/_modules/torch/distributions/cauchy.html +++ b/docs/stable/_modules/torch/distributions/cauchy.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -390,9 +391,6 @@

    Source code for torch.distributions.cauchy

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/chi2.html b/docs/stable/_modules/torch/distributions/chi2.html
    index fad292ab419c..8a25e599ae67 100644
    --- a/docs/stable/_modules/torch/distributions/chi2.html
    +++ b/docs/stable/_modules/torch/distributions/chi2.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -343,9 +344,6 @@

    Source code for torch.distributions.chi2

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/constraint_registry.html b/docs/stable/_modules/torch/distributions/constraint_registry.html
    index 2bfe0795d854..34792a1e58f2 100644
    --- a/docs/stable/_modules/torch/distributions/constraint_registry.html
    +++ b/docs/stable/_modules/torch/distributions/constraint_registry.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -558,9 +559,6 @@

    Source code for torch.distributions.constraint_registry

    - - - diff --git a/docs/stable/_modules/torch/distributions/constraints.html b/docs/stable/_modules/torch/distributions/constraints.html index 901e7a218ac5..d1ed4e936b86 100644 --- a/docs/stable/_modules/torch/distributions/constraints.html +++ b/docs/stable/_modules/torch/distributions/constraints.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -678,9 +679,6 @@

    Source code for torch.distributions.constraints

    < - - - diff --git a/docs/stable/_modules/torch/distributions/dirichlet.html b/docs/stable/_modules/torch/distributions/dirichlet.html index ea552c36d52b..13feb8f0301c 100644 --- a/docs/stable/_modules/torch/distributions/dirichlet.html +++ b/docs/stable/_modules/torch/distributions/dirichlet.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -408,9 +409,6 @@

    Source code for torch.distributions.dirichlet

    - - - diff --git a/docs/stable/_modules/torch/distributions/distribution.html b/docs/stable/_modules/torch/distributions/distribution.html index e6140218bda4..3aa18fae27a1 100644 --- a/docs/stable/_modules/torch/distributions/distribution.html +++ b/docs/stable/_modules/torch/distributions/distribution.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -580,9 +581,6 @@

    Source code for torch.distributions.distribution

    - - - diff --git a/docs/stable/_modules/torch/distributions/exp_family.html b/docs/stable/_modules/torch/distributions/exp_family.html index 120b44752599..f7983323d92b 100644 --- a/docs/stable/_modules/torch/distributions/exp_family.html +++ b/docs/stable/_modules/torch/distributions/exp_family.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -373,9 +374,6 @@

    Source code for torch.distributions.exp_family

    - - - diff --git a/docs/stable/_modules/torch/distributions/exponential.html b/docs/stable/_modules/torch/distributions/exponential.html index 0cf9a5bd53cb..19ea7d69230d 100644 --- a/docs/stable/_modules/torch/distributions/exponential.html +++ b/docs/stable/_modules/torch/distributions/exponential.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -396,9 +397,6 @@

    Source code for torch.distributions.exponential

    < - - - diff --git a/docs/stable/_modules/torch/distributions/fishersnedecor.html b/docs/stable/_modules/torch/distributions/fishersnedecor.html index f28d66ad8547..214d62bb8f65 100644 --- a/docs/stable/_modules/torch/distributions/fishersnedecor.html +++ b/docs/stable/_modules/torch/distributions/fishersnedecor.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -396,9 +397,6 @@

    Source code for torch.distributions.fishersnedecor

    - - - diff --git a/docs/stable/_modules/torch/distributions/gamma.html b/docs/stable/_modules/torch/distributions/gamma.html index dfa1d8217d97..146daba1f6e2 100644 --- a/docs/stable/_modules/torch/distributions/gamma.html +++ b/docs/stable/_modules/torch/distributions/gamma.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -394,9 +395,6 @@

    Source code for torch.distributions.gamma

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/geometric.html b/docs/stable/_modules/torch/distributions/geometric.html
    index bcde782a8b55..6ba00adf7ba9 100644
    --- a/docs/stable/_modules/torch/distributions/geometric.html
    +++ b/docs/stable/_modules/torch/distributions/geometric.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -409,9 +410,6 @@

    Source code for torch.distributions.geometric

    - - - diff --git a/docs/stable/_modules/torch/distributions/gumbel.html b/docs/stable/_modules/torch/distributions/gumbel.html index 3ce77a7514de..c1fddbb6a8ad 100644 --- a/docs/stable/_modules/torch/distributions/gumbel.html +++ b/docs/stable/_modules/torch/distributions/gumbel.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -381,9 +382,6 @@

    Source code for torch.distributions.gumbel

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/half_cauchy.html b/docs/stable/_modules/torch/distributions/half_cauchy.html
    index 54b7a3bcd3a0..fb7c9b4faf7c 100644
    --- a/docs/stable/_modules/torch/distributions/half_cauchy.html
    +++ b/docs/stable/_modules/torch/distributions/half_cauchy.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -376,9 +377,6 @@

    Source code for torch.distributions.half_cauchy

    < - - - diff --git a/docs/stable/_modules/torch/distributions/half_normal.html b/docs/stable/_modules/torch/distributions/half_normal.html index 425754967ac3..0b6f5bd38386 100644 --- a/docs/stable/_modules/torch/distributions/half_normal.html +++ b/docs/stable/_modules/torch/distributions/half_normal.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -376,9 +377,6 @@

    Source code for torch.distributions.half_normal

    < - - - diff --git a/docs/stable/_modules/torch/distributions/independent.html b/docs/stable/_modules/torch/distributions/independent.html index 006ccc38833f..8ab2ef4b9a77 100644 --- a/docs/stable/_modules/torch/distributions/independent.html +++ b/docs/stable/_modules/torch/distributions/independent.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -411,9 +412,6 @@

    Source code for torch.distributions.independent

    < - - - diff --git a/docs/stable/_modules/torch/distributions/kl.html b/docs/stable/_modules/torch/distributions/kl.html index 0e3638b2105c..f7b3a6fdd7e2 100644 --- a/docs/stable/_modules/torch/distributions/kl.html +++ b/docs/stable/_modules/torch/distributions/kl.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1052,9 +1053,6 @@

    Source code for torch.distributions.kl

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/laplace.html b/docs/stable/_modules/torch/distributions/laplace.html
    index 7fb26209e7ac..b5ba1ed1eb6e 100644
    --- a/docs/stable/_modules/torch/distributions/laplace.html
    +++ b/docs/stable/_modules/torch/distributions/laplace.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -397,9 +398,6 @@

    Source code for torch.distributions.laplace

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/log_normal.html b/docs/stable/_modules/torch/distributions/log_normal.html
    index 5ce4382f3404..f71ea2c07734 100644
    --- a/docs/stable/_modules/torch/distributions/log_normal.html
    +++ b/docs/stable/_modules/torch/distributions/log_normal.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -367,9 +368,6 @@

    Source code for torch.distributions.log_normal

    - - - diff --git a/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html b/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html index e32b19bc50c5..22a99ef4d64e 100644 --- a/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html +++ b/docs/stable/_modules/torch/distributions/lowrank_multivariate_normal.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -513,9 +514,6 @@

    Source code for torch.distributions.lowrank_multivariate_normal

    - - - diff --git a/docs/stable/_modules/torch/distributions/multinomial.html b/docs/stable/_modules/torch/distributions/multinomial.html index 693088eb88d9..a95c9bc17249 100644 --- a/docs/stable/_modules/torch/distributions/multinomial.html +++ b/docs/stable/_modules/torch/distributions/multinomial.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -422,9 +423,6 @@

    Source code for torch.distributions.multinomial

    < - - - diff --git a/docs/stable/_modules/torch/distributions/multivariate_normal.html b/docs/stable/_modules/torch/distributions/multivariate_normal.html index bec19b220da9..612b4b3f0c4f 100644 --- a/docs/stable/_modules/torch/distributions/multivariate_normal.html +++ b/docs/stable/_modules/torch/distributions/multivariate_normal.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -531,9 +532,6 @@

    Source code for torch.distributions.multivariate_normal

    - - - diff --git a/docs/stable/_modules/torch/distributions/negative_binomial.html b/docs/stable/_modules/torch/distributions/negative_binomial.html index 5498d1a81df9..4592b7e5645f 100644 --- a/docs/stable/_modules/torch/distributions/negative_binomial.html +++ b/docs/stable/_modules/torch/distributions/negative_binomial.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -411,9 +412,6 @@

    Source code for torch.distributions.negative_binomial

    - - - diff --git a/docs/stable/_modules/torch/distributions/normal.html b/docs/stable/_modules/torch/distributions/normal.html index 8a1dace8b205..812c26d0fec4 100644 --- a/docs/stable/_modules/torch/distributions/normal.html +++ b/docs/stable/_modules/torch/distributions/normal.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -409,9 +410,6 @@

    Source code for torch.distributions.normal

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/one_hot_categorical.html b/docs/stable/_modules/torch/distributions/one_hot_categorical.html
    index 9af78369412a..2c7a196598ea 100644
    --- a/docs/stable/_modules/torch/distributions/one_hot_categorical.html
    +++ b/docs/stable/_modules/torch/distributions/one_hot_categorical.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -411,9 +412,6 @@

    Source code for torch.distributions.one_hot_categorical

    - - - diff --git a/docs/stable/_modules/torch/distributions/pareto.html b/docs/stable/_modules/torch/distributions/pareto.html index be3d7c8442ad..28165514765c 100644 --- a/docs/stable/_modules/torch/distributions/pareto.html +++ b/docs/stable/_modules/torch/distributions/pareto.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -366,9 +367,6 @@

    Source code for torch.distributions.pareto

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/poisson.html b/docs/stable/_modules/torch/distributions/poisson.html
    index ef5719614d7c..70e7b8353f40 100644
    --- a/docs/stable/_modules/torch/distributions/poisson.html
    +++ b/docs/stable/_modules/torch/distributions/poisson.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -383,9 +384,6 @@

    Source code for torch.distributions.poisson

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html b/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html
    index 857e43bba7f4..3966b87346a1 100644
    --- a/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html
    +++ b/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -450,9 +451,6 @@

    Source code for torch.distributions.relaxed_bernoulli

    - - - diff --git a/docs/stable/_modules/torch/distributions/relaxed_categorical.html b/docs/stable/_modules/torch/distributions/relaxed_categorical.html index 01ff74d9fd6e..792d1673d5be 100644 --- a/docs/stable/_modules/torch/distributions/relaxed_categorical.html +++ b/docs/stable/_modules/torch/distributions/relaxed_categorical.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -444,9 +445,6 @@

    Source code for torch.distributions.relaxed_categorical

    - - - diff --git a/docs/stable/_modules/torch/distributions/studentT.html b/docs/stable/_modules/torch/distributions/studentT.html index a224558d8033..6289fb2661ef 100644 --- a/docs/stable/_modules/torch/distributions/studentT.html +++ b/docs/stable/_modules/torch/distributions/studentT.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -404,9 +405,6 @@

    Source code for torch.distributions.studentT

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/transformed_distribution.html b/docs/stable/_modules/torch/distributions/transformed_distribution.html
    index 8be5a4fa3788..78d110d3c690 100644
    --- a/docs/stable/_modules/torch/distributions/transformed_distribution.html
    +++ b/docs/stable/_modules/torch/distributions/transformed_distribution.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -469,9 +470,6 @@

    Source code for torch.distributions.transformed_distribution

    - - - diff --git a/docs/stable/_modules/torch/distributions/transforms.html b/docs/stable/_modules/torch/distributions/transforms.html index 84aeb98bd118..487431ee97e2 100644 --- a/docs/stable/_modules/torch/distributions/transforms.html +++ b/docs/stable/_modules/torch/distributions/transforms.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1011,9 +1012,6 @@

    Source code for torch.distributions.transforms

    - - - diff --git a/docs/stable/_modules/torch/distributions/uniform.html b/docs/stable/_modules/torch/distributions/uniform.html index dd6ef6502c1b..20ec54880bb6 100644 --- a/docs/stable/_modules/torch/distributions/uniform.html +++ b/docs/stable/_modules/torch/distributions/uniform.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -403,9 +404,6 @@

    Source code for torch.distributions.uniform

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/distributions/weibull.html b/docs/stable/_modules/torch/distributions/weibull.html
    index 8a503aceb0bf..0ade56f5246c 100644
    --- a/docs/stable/_modules/torch/distributions/weibull.html
    +++ b/docs/stable/_modules/torch/distributions/weibull.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -375,9 +376,6 @@

    Source code for torch.distributions.weibull

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/functional.html b/docs/stable/_modules/torch/functional.html
    index 01c909f57d3c..590372da0171 100644
    --- a/docs/stable/_modules/torch/functional.html
    +++ b/docs/stable/_modules/torch/functional.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1140,9 +1141,6 @@

    Source code for torch.functional

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/hub.html b/docs/stable/_modules/torch/hub.html
    index c2854f9713d8..38cce5118e24 100644
    --- a/docs/stable/_modules/torch/hub.html
    +++ b/docs/stable/_modules/torch/hub.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -776,9 +777,6 @@

    Source code for torch.hub

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/jit.html b/docs/stable/_modules/torch/jit.html
    index a790d2bc70d6..e19eeffa593d 100644
    --- a/docs/stable/_modules/torch/jit.html
    +++ b/docs/stable/_modules/torch/jit.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1787,7 +1788,9 @@

    Source code for torch.jit

                 else:
                     self.__dict__['_c'] = torch._C.ScriptModule(_qualified_name, _compilation_unit, True)
     
    -            Module.__init__(self)
    +            Module._construct(self)
    +            Module.__setattr__(self, "training", True)
    +
                 self._parameters = OrderedParameterDict(self._c)
                 self._buffers = OrderedBufferDict(self._c)
                 self._modules = OrderedModuleDict(self._c)
    @@ -1834,7 +1837,7 @@ 

    Source code for torch.jit

                     # to improve invocation performance
                     self.__dict__[attr] = script_method
                     return script_method
    -            return Module.__getattr__(self, attr)
    +            return super(ScriptModule, self).__getattr__(attr)
     
             def __setattr__(self, attr, value):
                 if attr not in self._constants_set:
    @@ -2308,7 +2311,6 @@ 

    Source code for torch.jit

             (torch.nn.init._no_grad_normal_, "aten::_no_grad_normal_"),
             (torch.nn.init._no_grad_uniform_, "aten::_no_grad_uniform_"),
             (torch.nn.init._no_grad_zero_, "aten::_no_grad_zero_"),
    -        (torch.nn.utils.rnn.get_packed_sequence, "aten::_pack_sequence"),
             (torch._C._get_tracing_state, "aten::_get_tracing_state"),
             (warnings.warn, "aten::warn"),
         ]
    @@ -2450,9 +2452,6 @@ 

    Source code for torch.jit

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/multiprocessing.html b/docs/stable/_modules/torch/multiprocessing.html
    index 47d2adf9ab5f..78a94765e639 100644
    --- a/docs/stable/_modules/torch/multiprocessing.html
    +++ b/docs/stable/_modules/torch/multiprocessing.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -391,9 +392,6 @@

    Source code for torch.multiprocessing

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/multiprocessing/spawn.html b/docs/stable/_modules/torch/multiprocessing/spawn.html
    index 90a91b61a7d7..a901c7bc69d8 100644
    --- a/docs/stable/_modules/torch/multiprocessing/spawn.html
    +++ b/docs/stable/_modules/torch/multiprocessing/spawn.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -487,9 +488,6 @@

    Source code for torch.multiprocessing.spawn

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/functional.html b/docs/stable/_modules/torch/nn/functional.html
    index 83c0519051c4..74281d053ee7 100644
    --- a/docs/stable/_modules/torch/nn/functional.html
    +++ b/docs/stable/_modules/torch/nn/functional.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -3589,9 +3590,6 @@

    Source code for torch.nn.functional

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/init.html b/docs/stable/_modules/torch/nn/init.html
    index 5b0c979c2eff..e801dc737278 100644
    --- a/docs/stable/_modules/torch/nn/init.html
    +++ b/docs/stable/_modules/torch/nn/init.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -773,9 +774,6 @@

    Source code for torch.nn.init

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/activation.html b/docs/stable/_modules/torch/nn/modules/activation.html
    index 6c151caca9c4..ac2978c83b19 100644
    --- a/docs/stable/_modules/torch/nn/modules/activation.html
    +++ b/docs/stable/_modules/torch/nn/modules/activation.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1365,9 +1366,6 @@

    Source code for torch.nn.modules.activation

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/adaptive.html b/docs/stable/_modules/torch/nn/modules/adaptive.html
    index ddf2a88fc7db..78ad21cea843 100644
    --- a/docs/stable/_modules/torch/nn/modules/adaptive.html
    +++ b/docs/stable/_modules/torch/nn/modules/adaptive.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -588,9 +589,6 @@

    Source code for torch.nn.modules.adaptive

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/batchnorm.html b/docs/stable/_modules/torch/nn/modules/batchnorm.html
    index 6b9a2101e8fa..737f427d6213 100644
    --- a/docs/stable/_modules/torch/nn/modules/batchnorm.html
    +++ b/docs/stable/_modules/torch/nn/modules/batchnorm.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -818,9 +819,6 @@

    Source code for torch.nn.modules.batchnorm

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/container.html b/docs/stable/_modules/torch/nn/modules/container.html
    index 110d18d20001..c6509afc8abe 100644
    --- a/docs/stable/_modules/torch/nn/modules/container.html
    +++ b/docs/stable/_modules/torch/nn/modules/container.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -883,9 +884,6 @@

    Source code for torch.nn.modules.container

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/conv.html b/docs/stable/_modules/torch/nn/modules/conv.html
    index fda0f5014263..bdf768a57d39 100644
    --- a/docs/stable/_modules/torch/nn/modules/conv.html
    +++ b/docs/stable/_modules/torch/nn/modules/conv.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1248,9 +1249,6 @@

    Source code for torch.nn.modules.conv

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/distance.html b/docs/stable/_modules/torch/nn/modules/distance.html
    index 46a5993161ce..a18c3fffbbe3 100644
    --- a/docs/stable/_modules/torch/nn/modules/distance.html
    +++ b/docs/stable/_modules/torch/nn/modules/distance.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -381,9 +382,6 @@

    Source code for torch.nn.modules.distance

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/dropout.html b/docs/stable/_modules/torch/nn/modules/dropout.html
    index 35bb363284d6..20fe87a66942 100644
    --- a/docs/stable/_modules/torch/nn/modules/dropout.html
    +++ b/docs/stable/_modules/torch/nn/modules/dropout.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -499,9 +500,6 @@

    Source code for torch.nn.modules.dropout

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/fold.html b/docs/stable/_modules/torch/nn/modules/fold.html
    index 515223e78d01..b899949f6e77 100644
    --- a/docs/stable/_modules/torch/nn/modules/fold.html
    +++ b/docs/stable/_modules/torch/nn/modules/fold.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -535,9 +536,6 @@

    Source code for torch.nn.modules.fold

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/instancenorm.html b/docs/stable/_modules/torch/nn/modules/instancenorm.html
    index b7f9f6b25f85..9d049b64346a 100644
    --- a/docs/stable/_modules/torch/nn/modules/instancenorm.html
    +++ b/docs/stable/_modules/torch/nn/modules/instancenorm.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -591,9 +592,6 @@

    Source code for torch.nn.modules.instancenorm

    - - - diff --git a/docs/stable/_modules/torch/nn/modules/linear.html b/docs/stable/_modules/torch/nn/modules/linear.html index dd285cf14ce0..7d93a559fe65 100644 --- a/docs/stable/_modules/torch/nn/modules/linear.html +++ b/docs/stable/_modules/torch/nn/modules/linear.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -475,9 +476,6 @@

    Source code for torch.nn.modules.linear

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/loss.html b/docs/stable/_modules/torch/nn/modules/loss.html
    index e01080179285..3e603aedc579 100644
    --- a/docs/stable/_modules/torch/nn/modules/loss.html
    +++ b/docs/stable/_modules/torch/nn/modules/loss.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1612,9 +1613,6 @@

    Source code for torch.nn.modules.loss

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/module.html b/docs/stable/_modules/torch/nn/modules/module.html
    index 3ac4ceeb571b..a2fa089ed6a0 100644
    --- a/docs/stable/_modules/torch/nn/modules/module.html
    +++ b/docs/stable/_modules/torch/nn/modules/module.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -332,6 +333,15 @@

    Source code for torch.nn.modules.module

         _version = 1
     
         def __init__(self):
    +        self._construct()
    +        # initialize self.training separately from the rest of the internal
    +        # state, as it is managed differently by nn.Module and ScriptModule
    +        self.training = True
    +
    +    def _construct(self):
    +        """
    +        Initializes internal Module state, shared by both nn.Module and ScriptModule.
    +        """
             torch._C._log_api_usage_once("python.nn_module")
             self._backend = thnn_backend
             self._parameters = OrderedDict()
    @@ -342,7 +352,6 @@ 

    Source code for torch.nn.modules.module

             self._state_dict_hooks = OrderedDict()
             self._load_state_dict_pre_hooks = OrderedDict()
             self._modules = OrderedDict()
    -        self.training = True
     
     
    [docs] def forward(self, *input): r"""Defines the computation performed at every call. @@ -1472,9 +1481,6 @@

    Source code for torch.nn.modules.module

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/normalization.html b/docs/stable/_modules/torch/nn/modules/normalization.html
    index 84d46ac10ac7..0aac39c33cd8 100644
    --- a/docs/stable/_modules/torch/nn/modules/normalization.html
    +++ b/docs/stable/_modules/torch/nn/modules/normalization.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -547,9 +548,6 @@

    Source code for torch.nn.modules.normalization

    - - - diff --git a/docs/stable/_modules/torch/nn/modules/padding.html b/docs/stable/_modules/torch/nn/modules/padding.html index 18be56d0d480..037fe7f3f6e2 100644 --- a/docs/stable/_modules/torch/nn/modules/padding.html +++ b/docs/stable/_modules/torch/nn/modules/padding.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -754,9 +755,6 @@

    Source code for torch.nn.modules.padding

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/pixelshuffle.html b/docs/stable/_modules/torch/nn/modules/pixelshuffle.html
    index 30b1aa9b09f0..31d506e6d57a 100644
    --- a/docs/stable/_modules/torch/nn/modules/pixelshuffle.html
    +++ b/docs/stable/_modules/torch/nn/modules/pixelshuffle.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -359,9 +360,6 @@

    Source code for torch.nn.modules.pixelshuffle

    - - - diff --git a/docs/stable/_modules/torch/nn/modules/pooling.html b/docs/stable/_modules/torch/nn/modules/pooling.html index 8e28a3dd97ba..2cc5770843f0 100644 --- a/docs/stable/_modules/torch/nn/modules/pooling.html +++ b/docs/stable/_modules/torch/nn/modules/pooling.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1373,9 +1374,6 @@

    Source code for torch.nn.modules.pooling

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/rnn.html b/docs/stable/_modules/torch/nn/modules/rnn.html
    index f62c7b44ca8b..a78b3b3dacec 100644
    --- a/docs/stable/_modules/torch/nn/modules/rnn.html
    +++ b/docs/stable/_modules/torch/nn/modules/rnn.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -268,13 +269,12 @@

    Source code for torch.nn.modules.rnn

     
     from .module import Module
     from ..parameter import Parameter
    -from ..utils.rnn import PackedSequence, get_packed_sequence
    +from ..utils.rnn import PackedSequence
     from .. import init
     from .. import _VF
     from ..._jit_internal import _parameter_list
     
     _rnn_impls = {
    -    'GRU': _VF.gru,
         'RNN_TANH': _VF.rnn_tanh,
         'RNN_RELU': _VF.rnn_relu,
     }
    @@ -430,12 +430,14 @@ 

    Source code for torch.nn.modules.rnn

                 raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
     
         def check_forward_args(self, input, hidden, batch_sizes):
    +        # type: (Tensor, Tensor, Optional[Tensor]) -> None
             self.check_input(input, batch_sizes)
             expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
     
             self.check_hidden_size(hidden, expected_hidden_size)
     
         def permute_hidden(self, hx, permutation):
    +        # type: (Tensor, Optional[Tensor]) -> Tensor
             if permutation is None:
                 return hx
             return apply_permutation(hx, permutation)
    @@ -632,6 +634,18 @@ 

    Source code for torch.nn.modules.rnn

             super(RNN, self).__init__(mode, *args, **kwargs)
    +# XXX: LSTM and GRU implementation is different from RNNBase, this is because: +# 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in +# its current state could not support the python Union Type or Any Type +# 2. TorchScript static typing does not allow a Function or Callable type in +# Dict values, so we have to separately call _VF instead of using _rnn_impls +# 3. This is temporary only and in the transition state that we want to make it +# on time for the release +# +# More discussion details in https://github.com/pytorch/pytorch/pull/23266 +# +# TODO: remove the overriding implementations for LSTM and GRU when TorchScript +# support expressing these two modules generally.
    [docs]class LSTM(RNNBase): r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input sequence. @@ -796,14 +810,14 @@

    Source code for torch.nn.modules.rnn

     
         @torch._jit_internal.export
         def forward_packed(self, input, hx=None):
    -        # type: (Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]], Tuple[Tensor, Tensor]]  # noqa
    +        # type: (PackedSequence, Optional[Tuple[Tensor, Tensor]]) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]  # noqa
             input, batch_sizes, sorted_indices, unsorted_indices = input
             max_batch_size = batch_sizes[0]
             max_batch_size = int(max_batch_size)
     
             output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
     
    -        output = get_packed_sequence(output, batch_sizes, sorted_indices, unsorted_indices)
    +        output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
             return output, self.permute_hidden(hidden, unsorted_indices)
     
         @torch._jit_internal.ignore
    @@ -813,7 +827,6 @@ 

    Source code for torch.nn.modules.rnn

             else:
                 return self.forward_tensor(input, hx)
    -
    [docs]class GRU(RNNBase): r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence. @@ -918,9 +931,65 @@

    Source code for torch.nn.modules.rnn

             >>> h0 = torch.randn(2, 3, 20)
             >>> output, hn = rnn(input, h0)
         """
    +    __overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
     
         def __init__(self, *args, **kwargs):
    -        super(GRU, self).__init__('GRU', *args, **kwargs)
    + super(GRU, self).__init__('GRU', *args, **kwargs) + + def run_impl(self, input, hx, batch_sizes): + # type: (Tensor, Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor] + if batch_sizes is None: + result = _VF.gru(input, hx, self._get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.gru(input, batch_sizes, hx, self._get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + return result + + def forward_impl(self, input, hx, batch_sizes, max_batch_size, sorted_indices): + # type: (Tensor, Optional[Tensor], Optional[Tensor], int, Optional[Tensor]) -> Tuple[Tensor, Tensor] # noqa + if hx is None: + num_directions = 2 if self.bidirectional else 1 + hx = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + result = self.run_impl(input, hx, batch_sizes) + output = result[0] + hidden = result[1] + return output, hidden + + @torch._jit_internal.export + def forward_packed(self, input, hx=None): + # type: (PackedSequence, Optional[Tensor]) -> Tuple[PackedSequence, Tensor] + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = batch_sizes[0] + max_batch_size = int(max_batch_size) + output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) + output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch._jit_internal.export + def forward_tensor(self, input, hx=None): + # type: (Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor] + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch._jit_internal.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx)
    class RNNCellBase(Module): @@ -1268,9 +1337,6 @@

    Source code for torch.nn.modules.rnn

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/sparse.html b/docs/stable/_modules/torch/nn/modules/sparse.html
    index 919ca8271f2a..96cc51e74dd9 100644
    --- a/docs/stable/_modules/torch/nn/modules/sparse.html
    +++ b/docs/stable/_modules/torch/nn/modules/sparse.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -647,9 +648,6 @@

    Source code for torch.nn.modules.sparse

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/transformer.html b/docs/stable/_modules/torch/nn/modules/transformer.html
    index cc9f22157632..b90c8adf938a 100644
    --- a/docs/stable/_modules/torch/nn/modules/transformer.html
    +++ b/docs/stable/_modules/torch/nn/modules/transformer.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -652,9 +653,6 @@

    Source code for torch.nn.modules.transformer

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/modules/upsampling.html b/docs/stable/_modules/torch/nn/modules/upsampling.html
    index 08ffb9d605e4..2bbae5d97a90 100644
    --- a/docs/stable/_modules/torch/nn/modules/upsampling.html
    +++ b/docs/stable/_modules/torch/nn/modules/upsampling.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -543,9 +544,6 @@

    Source code for torch.nn.modules.upsampling

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/parallel/data_parallel.html b/docs/stable/_modules/torch/nn/parallel/data_parallel.html
    index 6b357c5ffb6a..429ef03f439a 100644
    --- a/docs/stable/_modules/torch/nn/parallel/data_parallel.html
    +++ b/docs/stable/_modules/torch/nn/parallel/data_parallel.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -521,9 +522,6 @@

    Source code for torch.nn.parallel.data_parallel

    < - - - diff --git a/docs/stable/_modules/torch/nn/parallel/distributed.html b/docs/stable/_modules/torch/nn/parallel/distributed.html index 5890abf783cc..b79511ed47fe 100644 --- a/docs/stable/_modules/torch/nn/parallel/distributed.html +++ b/docs/stable/_modules/torch/nn/parallel/distributed.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -843,9 +844,6 @@

    Source code for torch.nn.parallel.distributed

    - - - diff --git a/docs/stable/_modules/torch/nn/parameter.html b/docs/stable/_modules/torch/nn/parameter.html index 6f9b0c5cc5ea..c1bad1fc5d32 100644 --- a/docs/stable/_modules/torch/nn/parameter.html +++ b/docs/stable/_modules/torch/nn/parameter.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -357,9 +358,6 @@

    Source code for torch.nn.parameter

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/utils/clip_grad.html b/docs/stable/_modules/torch/nn/utils/clip_grad.html
    index 614efa5bfa16..99182d332997 100644
    --- a/docs/stable/_modules/torch/nn/utils/clip_grad.html
    +++ b/docs/stable/_modules/torch/nn/utils/clip_grad.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -383,9 +384,6 @@

    Source code for torch.nn.utils.clip_grad

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/utils/convert_parameters.html b/docs/stable/_modules/torch/nn/utils/convert_parameters.html
    index f009174139b8..23f5765a1c2f 100644
    --- a/docs/stable/_modules/torch/nn/utils/convert_parameters.html
    +++ b/docs/stable/_modules/torch/nn/utils/convert_parameters.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -396,9 +397,6 @@

    Source code for torch.nn.utils.convert_parameters

    - - - diff --git a/docs/stable/_modules/torch/nn/utils/rnn.html b/docs/stable/_modules/torch/nn/utils/rnn.html index cb575860ebfe..9707ee055bda 100644 --- a/docs/stable/_modules/torch/nn/utils/rnn.html +++ b/docs/stable/_modules/torch/nn/utils/rnn.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -650,6 +651,7 @@

    Source code for torch.nn.utils.rnn

     
     
     
    [docs]def pack_sequence(sequences, enforce_sorted=True): + # type: (List[Tensor], bool) -> PackedSequence r"""Packs a list of variable length Tensors ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is @@ -681,10 +683,6 @@

    Source code for torch.nn.utils.rnn

         """
         lengths = [v.size(0) for v in sequences]
         return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted)
    - - -def get_packed_sequence(data, batch_sizes, sorted_indices, unsorted_indices): - return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices)
    @@ -737,9 +735,6 @@

    Source code for torch.nn.utils.rnn

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/utils/spectral_norm.html b/docs/stable/_modules/torch/nn/utils/spectral_norm.html
    index c69c7989707b..a81172ae5db5 100644
    --- a/docs/stable/_modules/torch/nn/utils/spectral_norm.html
    +++ b/docs/stable/_modules/torch/nn/utils/spectral_norm.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -601,9 +602,6 @@

    Source code for torch.nn.utils.spectral_norm

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/nn/utils/weight_norm.html b/docs/stable/_modules/torch/nn/utils/weight_norm.html
    index 1a23198494f0..1889e581c54d 100644
    --- a/docs/stable/_modules/torch/nn/utils/weight_norm.html
    +++ b/docs/stable/_modules/torch/nn/utils/weight_norm.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -432,9 +433,6 @@

    Source code for torch.nn.utils.weight_norm

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/onnx.html b/docs/stable/_modules/torch/onnx.html
    index 264310b44ed6..300dd0b522bf 100644
    --- a/docs/stable/_modules/torch/onnx.html
    +++ b/docs/stable/_modules/torch/onnx.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -497,9 +498,6 @@

    Source code for torch.onnx

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/onnx/operators.html b/docs/stable/_modules/torch/onnx/operators.html
    index 079c680c963c..275e33ca8c8e 100644
    --- a/docs/stable/_modules/torch/onnx/operators.html
    +++ b/docs/stable/_modules/torch/onnx/operators.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -335,9 +336,6 @@

    Source code for torch.onnx.operators

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/adadelta.html b/docs/stable/_modules/torch/optim/adadelta.html
    index eefec9861b15..b3658703c1ae 100644
    --- a/docs/stable/_modules/torch/optim/adadelta.html
    +++ b/docs/stable/_modules/torch/optim/adadelta.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -391,9 +392,6 @@

    Source code for torch.optim.adadelta

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/adagrad.html b/docs/stable/_modules/torch/optim/adagrad.html
    index 5b01a4894130..335958939752 100644
    --- a/docs/stable/_modules/torch/optim/adagrad.html
    +++ b/docs/stable/_modules/torch/optim/adagrad.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -409,9 +410,6 @@

    Source code for torch.optim.adagrad

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/adam.html b/docs/stable/_modules/torch/optim/adam.html
    index 50623a37eca9..4716bcaec080 100644
    --- a/docs/stable/_modules/torch/optim/adam.html
    +++ b/docs/stable/_modules/torch/optim/adam.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -422,9 +423,6 @@

    Source code for torch.optim.adam

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/adamax.html b/docs/stable/_modules/torch/optim/adamax.html
    index 209cc4346c33..b499a18b337f 100644
    --- a/docs/stable/_modules/torch/optim/adamax.html
    +++ b/docs/stable/_modules/torch/optim/adamax.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -401,9 +402,6 @@

    Source code for torch.optim.adamax

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/adamw.html b/docs/stable/_modules/torch/optim/adamw.html
    index 679141f67854..b9d0ab18bc2d 100644
    --- a/docs/stable/_modules/torch/optim/adamw.html
    +++ b/docs/stable/_modules/torch/optim/adamw.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -427,9 +428,6 @@

    Source code for torch.optim.adamw

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/asgd.html b/docs/stable/_modules/torch/optim/asgd.html
    index 76d262c0f5d7..eeaf46c4da5c 100644
    --- a/docs/stable/_modules/torch/optim/asgd.html
    +++ b/docs/stable/_modules/torch/optim/asgd.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -397,9 +398,6 @@

    Source code for torch.optim.asgd

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/lbfgs.html b/docs/stable/_modules/torch/optim/lbfgs.html
    index 18fac532a870..d177b14f141c 100644
    --- a/docs/stable/_modules/torch/optim/lbfgs.html
    +++ b/docs/stable/_modules/torch/optim/lbfgs.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -783,9 +784,6 @@

    Source code for torch.optim.lbfgs

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/lr_scheduler.html b/docs/stable/_modules/torch/optim/lr_scheduler.html
    index faa854fd6779..dea9d4af9ad0 100644
    --- a/docs/stable/_modules/torch/optim/lr_scheduler.html
    +++ b/docs/stable/_modules/torch/optim/lr_scheduler.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -969,7 +970,7 @@

    Source code for torch.optim.lr_scheduler

             self.T_mult = T_mult
             self.eta_min = eta_min
             super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
    -        self.T_cur = last_epoch
    +        self.T_cur = self.last_epoch
     
         def get_lr(self):
             return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
    @@ -1074,9 +1075,6 @@ 

    Source code for torch.optim.lr_scheduler

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/optimizer.html b/docs/stable/_modules/torch/optim/optimizer.html
    index 6897119b0285..a1286f575849 100644
    --- a/docs/stable/_modules/torch/optim/optimizer.html
    +++ b/docs/stable/_modules/torch/optim/optimizer.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -531,9 +532,6 @@

    Source code for torch.optim.optimizer

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/rmsprop.html b/docs/stable/_modules/torch/optim/rmsprop.html
    index b2881190442c..ea7fc8c31c7d 100644
    --- a/docs/stable/_modules/torch/optim/rmsprop.html
    +++ b/docs/stable/_modules/torch/optim/rmsprop.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -415,9 +416,6 @@

    Source code for torch.optim.rmsprop

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/rprop.html b/docs/stable/_modules/torch/optim/rprop.html
    index 5e6faaa7fb59..db2f4d233aa6 100644
    --- a/docs/stable/_modules/torch/optim/rprop.html
    +++ b/docs/stable/_modules/torch/optim/rprop.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -391,9 +392,6 @@

    Source code for torch.optim.rprop

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/sgd.html b/docs/stable/_modules/torch/optim/sgd.html
    index 5aef0b8eb97c..e08176ae4e2b 100644
    --- a/docs/stable/_modules/torch/optim/sgd.html
    +++ b/docs/stable/_modules/torch/optim/sgd.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -421,9 +422,6 @@

    Source code for torch.optim.sgd

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/optim/sparse_adam.html b/docs/stable/_modules/torch/optim/sparse_adam.html
    index 427b3495a43c..811e1196bc50 100644
    --- a/docs/stable/_modules/torch/optim/sparse_adam.html
    +++ b/docs/stable/_modules/torch/optim/sparse_adam.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -417,9 +418,6 @@

    Source code for torch.optim.sparse_adam

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/quasirandom.html b/docs/stable/_modules/torch/quasirandom.html
    index 0677b501a31a..775f55415cb1 100644
    --- a/docs/stable/_modules/torch/quasirandom.html
    +++ b/docs/stable/_modules/torch/quasirandom.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -435,9 +436,6 @@

    Source code for torch.quasirandom

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/random.html b/docs/stable/_modules/torch/random.html
    index 07aa39991713..4553c27b533a 100644
    --- a/docs/stable/_modules/torch/random.html
    +++ b/docs/stable/_modules/torch/random.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -267,7 +268,7 @@

    Source code for torch.random

     from torch._C import default_generator
     
     
    -
    [docs]def set_rng_state(new_state): +
    [docs]def set_rng_state(new_state): r"""Sets the random number generator state. Args: @@ -276,12 +277,12 @@

    Source code for torch.random

         default_generator.set_state(new_state)
    -
    [docs]def get_rng_state(): +
    [docs]def get_rng_state(): r"""Returns the random number generator state as a `torch.ByteTensor`.""" return default_generator.get_state()
    -
    [docs]def manual_seed(seed): +
    [docs]def manual_seed(seed): r"""Sets the seed for generating random numbers. Returns a `torch.Generator` object. @@ -297,7 +298,7 @@

    Source code for torch.random

         return default_generator.manual_seed(seed)
    -
    [docs]def seed(): +
    [docs]def seed(): r"""Sets the seed for generating random numbers to a non-deterministic random number. Returns a 64 bit number used to seed the RNG. """ @@ -310,7 +311,7 @@

    Source code for torch.random

         return seed
    -
    [docs]def initial_seed(): +
    [docs]def initial_seed(): r"""Returns the initial seed for generating random numbers as a Python `long`. """ @@ -320,7 +321,7 @@

    Source code for torch.random

     _fork_rng_warned_already = False
     
     
    -@contextlib.contextmanager
    +
    [docs]@contextlib.contextmanager def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices"): """ Forks the RNG, so that when you return, the RNG is reset @@ -381,7 +382,7 @@

    Source code for torch.random

         finally:
             torch.set_rng_state(cpu_rng_state)
             for device, gpu_rng_state in zip(devices, gpu_rng_states):
    -            torch.cuda.set_rng_state(gpu_rng_state, device)
    +            torch.cuda.set_rng_state(gpu_rng_state, device)
    @@ -434,9 +435,6 @@

    Source code for torch.random

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/serialization.html b/docs/stable/_modules/torch/serialization.html
    index a186d363e20d..4b86a626b6d7 100644
    --- a/docs/stable/_modules/torch/serialization.html
    +++ b/docs/stable/_modules/torch/serialization.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -897,9 +898,6 @@

    Source code for torch.serialization

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/sparse.html b/docs/stable/_modules/torch/sparse.html
    index a314ada0d3ff..a3794b1d07e8 100644
    --- a/docs/stable/_modules/torch/sparse.html
    +++ b/docs/stable/_modules/torch/sparse.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -447,9 +448,6 @@

    Source code for torch.sparse

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/storage.html b/docs/stable/_modules/torch/storage.html
    index 46709ebfa72b..9fdc7f065977 100644
    --- a/docs/stable/_modules/torch/storage.html
    +++ b/docs/stable/_modules/torch/storage.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -451,9 +452,6 @@

    Source code for torch.storage

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/tensor.html b/docs/stable/_modules/torch/tensor.html
    index 4339332358a5..c0b3d669ac9e 100644
    --- a/docs/stable/_modules/torch/tensor.html
    +++ b/docs/stable/_modules/torch/tensor.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -800,9 +801,6 @@

    Source code for torch.tensor

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/checkpoint.html b/docs/stable/_modules/torch/utils/checkpoint.html
    index 2b186a834f9c..911555b1ce08 100644
    --- a/docs/stable/_modules/torch/utils/checkpoint.html
    +++ b/docs/stable/_modules/torch/utils/checkpoint.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -549,9 +550,6 @@

    Source code for torch.utils.checkpoint

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/cpp_extension.html b/docs/stable/_modules/torch/utils/cpp_extension.html
    index 21c1545dc859..c85bd18282d4 100644
    --- a/docs/stable/_modules/torch/utils/cpp_extension.html
    +++ b/docs/stable/_modules/torch/utils/cpp_extension.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1465,9 +1466,6 @@

    Source code for torch.utils.cpp_extension

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/data/_utils/worker.html b/docs/stable/_modules/torch/utils/data/_utils/worker.html
    index 5cf8f0d2237d..c5476d7348ec 100644
    --- a/docs/stable/_modules/torch/utils/data/_utils/worker.html
    +++ b/docs/stable/_modules/torch/utils/data/_utils/worker.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -512,9 +513,6 @@

    Source code for torch.utils.data._utils.worker

    - - - diff --git a/docs/stable/_modules/torch/utils/data/dataloader.html b/docs/stable/_modules/torch/utils/data/dataloader.html index 12c8b76a6735..4531e018c226 100644 --- a/docs/stable/_modules/torch/utils/data/dataloader.html +++ b/docs/stable/_modules/torch/utils/data/dataloader.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1239,9 +1240,6 @@

    Source code for torch.utils.data.dataloader

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/data/dataset.html b/docs/stable/_modules/torch/utils/data/dataset.html
    index 4868b9a48895..e2a76a679c30 100644
    --- a/docs/stable/_modules/torch/utils/data/dataset.html
    +++ b/docs/stable/_modules/torch/utils/data/dataset.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -587,9 +588,6 @@

    Source code for torch.utils.data.dataset

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/data/distributed.html b/docs/stable/_modules/torch/utils/data/distributed.html
    index 8c1f7def5e3f..38efd410509e 100644
    --- a/docs/stable/_modules/torch/utils/data/distributed.html
    +++ b/docs/stable/_modules/torch/utils/data/distributed.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -380,9 +381,6 @@

    Source code for torch.utils.data.distributed

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/data/sampler.html b/docs/stable/_modules/torch/utils/data/sampler.html
    index f29341048634..e8a9b1bd5217 100644
    --- a/docs/stable/_modules/torch/utils/data/sampler.html
    +++ b/docs/stable/_modules/torch/utils/data/sampler.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -525,9 +526,6 @@

    Source code for torch.utils.data.sampler

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torch/utils/tensorboard/writer.html b/docs/stable/_modules/torch/utils/tensorboard/writer.html
    index 8e8ee3cb621f..502d94bec460 100644
    --- a/docs/stable/_modules/torch/utils/tensorboard/writer.html
    +++ b/docs/stable/_modules/torch/utils/tensorboard/writer.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1251,9 +1252,6 @@

    Source code for torch.utils.tensorboard.writer

    - - - diff --git a/docs/stable/_modules/torchvision.html b/docs/stable/_modules/torchvision.html index e37569057c96..13b64497b3ed 100644 --- a/docs/stable/_modules/torchvision.html +++ b/docs/stable/_modules/torchvision.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -347,9 +348,6 @@

    Source code for torchvision

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/cifar.html b/docs/stable/_modules/torchvision/datasets/cifar.html
    index 0d18d5c0cfaa..eec3ffed7b47 100644
    --- a/docs/stable/_modules/torchvision/datasets/cifar.html
    +++ b/docs/stable/_modules/torchvision/datasets/cifar.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -487,9 +488,6 @@

    Source code for torchvision.datasets.cifar

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/cityscapes.html b/docs/stable/_modules/torchvision/datasets/cityscapes.html
    index 69312efd6b2a..b9063c36350d 100644
    --- a/docs/stable/_modules/torchvision/datasets/cityscapes.html
    +++ b/docs/stable/_modules/torchvision/datasets/cityscapes.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -520,9 +521,6 @@

    Source code for torchvision.datasets.cityscapes

    < - - - diff --git a/docs/stable/_modules/torchvision/datasets/coco.html b/docs/stable/_modules/torchvision/datasets/coco.html index ebd65f916bd1..e3826159098d 100644 --- a/docs/stable/_modules/torchvision/datasets/coco.html +++ b/docs/stable/_modules/torchvision/datasets/coco.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -436,9 +437,6 @@

    Source code for torchvision.datasets.coco

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/fakedata.html b/docs/stable/_modules/torchvision/datasets/fakedata.html
    index 8b50bb3491b3..9da83ee98306 100644
    --- a/docs/stable/_modules/torchvision/datasets/fakedata.html
    +++ b/docs/stable/_modules/torchvision/datasets/fakedata.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -371,9 +372,6 @@

    Source code for torchvision.datasets.fakedata

    - - - diff --git a/docs/stable/_modules/torchvision/datasets/flickr.html b/docs/stable/_modules/torchvision/datasets/flickr.html index 3dd708501085..214339d999e5 100644 --- a/docs/stable/_modules/torchvision/datasets/flickr.html +++ b/docs/stable/_modules/torchvision/datasets/flickr.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -467,9 +468,6 @@

    Source code for torchvision.datasets.flickr

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/folder.html b/docs/stable/_modules/torchvision/datasets/folder.html
    index e272c0d69ae6..340b1f8188f5 100644
    --- a/docs/stable/_modules/torchvision/datasets/folder.html
    +++ b/docs/stable/_modules/torchvision/datasets/folder.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -523,9 +524,6 @@

    Source code for torchvision.datasets.folder

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/hmdb51.html b/docs/stable/_modules/torchvision/datasets/hmdb51.html
    index 3cd20e72b9a5..025f5f39af6b 100644
    --- a/docs/stable/_modules/torchvision/datasets/hmdb51.html
    +++ b/docs/stable/_modules/torchvision/datasets/hmdb51.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -410,9 +411,6 @@

    Source code for torchvision.datasets.hmdb51

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/imagenet.html b/docs/stable/_modules/torchvision/datasets/imagenet.html
    index 209564612c02..f17f5bbba467 100644
    --- a/docs/stable/_modules/torchvision/datasets/imagenet.html
    +++ b/docs/stable/_modules/torchvision/datasets/imagenet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -484,9 +485,6 @@

    Source code for torchvision.datasets.imagenet

    - - - diff --git a/docs/stable/_modules/torchvision/datasets/kinetics.html b/docs/stable/_modules/torchvision/datasets/kinetics.html index afa4d8e433c2..1dd934196523 100644 --- a/docs/stable/_modules/torchvision/datasets/kinetics.html +++ b/docs/stable/_modules/torchvision/datasets/kinetics.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -374,9 +375,6 @@

    Source code for torchvision.datasets.kinetics

    - - - diff --git a/docs/stable/_modules/torchvision/datasets/lsun.html b/docs/stable/_modules/torchvision/datasets/lsun.html index 150d51847ebb..e18c0ff1b10f 100644 --- a/docs/stable/_modules/torchvision/datasets/lsun.html +++ b/docs/stable/_modules/torchvision/datasets/lsun.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -471,9 +472,6 @@

    Source code for torchvision.datasets.lsun

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/mnist.html b/docs/stable/_modules/torchvision/datasets/mnist.html
    index f85556e654a7..526183cb3721 100644
    --- a/docs/stable/_modules/torchvision/datasets/mnist.html
    +++ b/docs/stable/_modules/torchvision/datasets/mnist.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -768,9 +769,6 @@

    Source code for torchvision.datasets.mnist

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/phototour.html b/docs/stable/_modules/torchvision/datasets/phototour.html
    index 209112c56ef0..1f72a1a911ce 100644
    --- a/docs/stable/_modules/torchvision/datasets/phototour.html
    +++ b/docs/stable/_modules/torchvision/datasets/phototour.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -522,9 +523,6 @@

    Source code for torchvision.datasets.phototour

    - - - diff --git a/docs/stable/_modules/torchvision/datasets/sbd.html b/docs/stable/_modules/torchvision/datasets/sbd.html index 22f20ded3118..362320e52f3b 100644 --- a/docs/stable/_modules/torchvision/datasets/sbd.html +++ b/docs/stable/_modules/torchvision/datasets/sbd.html @@ -175,6 +175,7 @@

  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -437,9 +438,6 @@

    Source code for torchvision.datasets.sbd

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/sbu.html b/docs/stable/_modules/torchvision/datasets/sbu.html
    index b7a824173385..a3851d379bd0 100644
    --- a/docs/stable/_modules/torchvision/datasets/sbu.html
    +++ b/docs/stable/_modules/torchvision/datasets/sbu.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -421,9 +422,6 @@

    Source code for torchvision.datasets.sbu

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/stl10.html b/docs/stable/_modules/torchvision/datasets/stl10.html
    index 31c2deab371b..87a375882a6c 100644
    --- a/docs/stable/_modules/torchvision/datasets/stl10.html
    +++ b/docs/stable/_modules/torchvision/datasets/stl10.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -490,9 +491,6 @@

    Source code for torchvision.datasets.stl10

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/svhn.html b/docs/stable/_modules/torchvision/datasets/svhn.html
    index aef2a3cd52cd..b024c5b4508d 100644
    --- a/docs/stable/_modules/torchvision/datasets/svhn.html
    +++ b/docs/stable/_modules/torchvision/datasets/svhn.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -427,9 +428,6 @@

    Source code for torchvision.datasets.svhn

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/ucf101.html b/docs/stable/_modules/torchvision/datasets/ucf101.html
    index af15fa6df370..709178c25af0 100644
    --- a/docs/stable/_modules/torchvision/datasets/ucf101.html
    +++ b/docs/stable/_modules/torchvision/datasets/ucf101.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -402,9 +403,6 @@

    Source code for torchvision.datasets.ucf101

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/usps.html b/docs/stable/_modules/torchvision/datasets/usps.html
    index 88da37f218c2..1ae4462d379a 100644
    --- a/docs/stable/_modules/torchvision/datasets/usps.html
    +++ b/docs/stable/_modules/torchvision/datasets/usps.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -398,9 +399,6 @@

    Source code for torchvision.datasets.usps

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/datasets/voc.html b/docs/stable/_modules/torchvision/datasets/voc.html
    index c674676376e8..f35b7db1f6ea 100644
    --- a/docs/stable/_modules/torchvision/datasets/voc.html
    +++ b/docs/stable/_modules/torchvision/datasets/voc.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -545,9 +546,6 @@

    Source code for torchvision.datasets.voc

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/io/video.html b/docs/stable/_modules/torchvision/io/video.html
    index 19863f6ae77e..ecb9dd6d8592 100644
    --- a/docs/stable/_modules/torchvision/io/video.html
    +++ b/docs/stable/_modules/torchvision/io/video.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -269,19 +270,31 @@

    Source code for torchvision.io.video

     try:
         import av
         av.logging.set_level(av.logging.ERROR)
    +    if not hasattr(av.video.frame.VideoFrame, 'pict_type'):
    +        av = ImportError("""\
    +Your version of PyAV is too old for the necessary video operations in torchvision.
    +If you are on Python 3.5, you will have to build from source (the conda-forge
    +packages are not up-to-date).  See
    +https://github.com/mikeboers/PyAV#installation for instructions on how to
    +install PyAV on your system.
    +""")
     except ImportError:
    -    av = None
    -
    -
    -def _check_av_available():
    -    if av is None:
    -        raise ImportError("""\
    +    av = ImportError("""\
     PyAV is not installed, and is necessary for the video operations in torchvision.
     See https://github.com/mikeboers/PyAV#installation for instructions on how to
     install PyAV on your system.
     """)
     
     
    +def _check_av_available():
    +    if isinstance(av, Exception):
    +        raise av
    +
    +
    +def _av_available():
    +    return not isinstance(av, Exception)
    +
    +
     # PyAV has some reference cycles
     _CALLED_TIMES = 0
     _GC_COLLECTION_INTERVAL = 10
    @@ -554,9 +567,6 @@ 

    Source code for torchvision.io.video

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/alexnet.html b/docs/stable/_modules/torchvision/models/alexnet.html
    index 9c387967374b..ef76098f72a5 100644
    --- a/docs/stable/_modules/torchvision/models/alexnet.html
    +++ b/docs/stable/_modules/torchvision/models/alexnet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -378,9 +379,6 @@

    Source code for torchvision.models.alexnet

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/densenet.html b/docs/stable/_modules/torchvision/models/densenet.html
    index cef1c9bf9573..a943e0a4c4c3 100644
    --- a/docs/stable/_modules/torchvision/models/densenet.html
    +++ b/docs/stable/_modules/torchvision/models/densenet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -555,9 +556,6 @@

    Source code for torchvision.models.densenet

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html b/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html
    index 3a29158c381c..cab25d068e49 100644
    --- a/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html
    +++ b/docs/stable/_modules/torchvision/models/detection/faster_rcnn.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -651,9 +652,6 @@

    Source code for torchvision.models.detection.faster_rcnn

    - - - diff --git a/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html b/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html index 3d73eecb9aa4..83189a95cb32 100644 --- a/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html +++ b/docs/stable/_modules/torchvision/models/detection/keypoint_rcnn.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -630,9 +631,6 @@

    Source code for torchvision.models.detection.keypoint_rcnn

    - - - diff --git a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html index 1ae6f1dd33fd..7ca08885118f 100644 --- a/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html +++ b/docs/stable/_modules/torchvision/models/detection/mask_rcnn.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -630,9 +631,6 @@

    Source code for torchvision.models.detection.mask_rcnn

    - - - diff --git a/docs/stable/_modules/torchvision/models/googlenet.html b/docs/stable/_modules/torchvision/models/googlenet.html index 938d3694852c..b5f6b09fef11 100644 --- a/docs/stable/_modules/torchvision/models/googlenet.html +++ b/docs/stable/_modules/torchvision/models/googlenet.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -474,11 +475,11 @@

    Source code for torchvision.models.googlenet

    x = torch.flatten(x, 1)
             # N x 2048
             x = F.relu(self.fc1(x), inplace=True)
    -        # N x 2048
    +        # N x 1024
             x = F.dropout(x, 0.7, training=self.training)
    -        # N x 2048
    -        x = self.fc2(x)
             # N x 1024
    +        x = self.fc2(x)
    +        # N x 1000 (num_classes)
     
             return x
     
    @@ -546,9 +547,6 @@ 

    Source code for torchvision.models.googlenet

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/inception.html b/docs/stable/_modules/torchvision/models/inception.html
    index 7c994bba4891..86238820f384 100644
    --- a/docs/stable/_modules/torchvision/models/inception.html
    +++ b/docs/stable/_modules/torchvision/models/inception.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -667,9 +668,6 @@

    Source code for torchvision.models.inception

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/mnasnet.html b/docs/stable/_modules/torchvision/models/mnasnet.html
    index 18798ecd0316..9e0683e21049 100644
    --- a/docs/stable/_modules/torchvision/models/mnasnet.html
    +++ b/docs/stable/_modules/torchvision/models/mnasnet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -520,9 +521,6 @@

    Source code for torchvision.models.mnasnet

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/mobilenet.html b/docs/stable/_modules/torchvision/models/mobilenet.html
    index 99496a04a932..4ec96deb7b13 100644
    --- a/docs/stable/_modules/torchvision/models/mobilenet.html
    +++ b/docs/stable/_modules/torchvision/models/mobilenet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -475,9 +476,6 @@

    Source code for torchvision.models.mobilenet

    
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/resnet.html b/docs/stable/_modules/torchvision/models/resnet.html
    index 1178421bab5c..3861144024c4 100644
    --- a/docs/stable/_modules/torchvision/models/resnet.html
    +++ b/docs/stable/_modules/torchvision/models/resnet.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -484,7 +485,7 @@

    Source code for torchvision.models.resnet

     
     
    [docs]def resnet18(pretrained=False, progress=True, **kwargs): r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -496,7 +497,7 @@

    Source code for torchvision.models.resnet

     
     
    [docs]def resnet34(pretrained=False, progress=True, **kwargs): r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -508,7 +509,7 @@

    Source code for torchvision.models.resnet

     
     
    [docs]def resnet50(pretrained=False, progress=True, **kwargs): r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -520,7 +521,7 @@

    Source code for torchvision.models.resnet

     
     
    [docs]def resnet101(pretrained=False, progress=True, **kwargs): r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -532,7 +533,7 @@

    Source code for torchvision.models.resnet

     
     
    [docs]def resnet152(pretrained=False, progress=True, **kwargs): r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_ + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -656,9 +657,6 @@

    Source code for torchvision.models.resnet

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/segmentation/segmentation.html b/docs/stable/_modules/torchvision/models/segmentation/segmentation.html
    index 8dc77158554d..be0779503141 100644
    --- a/docs/stable/_modules/torchvision/models/segmentation/segmentation.html
    +++ b/docs/stable/_modules/torchvision/models/segmentation/segmentation.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -419,9 +420,6 @@

    Source code for torchvision.models.segmentation.segmentation

    - - - diff --git a/docs/stable/_modules/torchvision/models/shufflenetv2.html b/docs/stable/_modules/torchvision/models/shufflenetv2.html index 1f78d6c76142..8dc5b8e53631 100644 --- a/docs/stable/_modules/torchvision/models/shufflenetv2.html +++ b/docs/stable/_modules/torchvision/models/shufflenetv2.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -514,9 +515,6 @@

    Source code for torchvision.models.shufflenetv2

    < - - - diff --git a/docs/stable/_modules/torchvision/models/squeezenet.html b/docs/stable/_modules/torchvision/models/squeezenet.html index 675c12d1923f..478285f84be5 100644 --- a/docs/stable/_modules/torchvision/models/squeezenet.html +++ b/docs/stable/_modules/torchvision/models/squeezenet.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -450,9 +451,6 @@

    Source code for torchvision.models.squeezenet

    - - - diff --git a/docs/stable/_modules/torchvision/models/vgg.html b/docs/stable/_modules/torchvision/models/vgg.html index b8b9a1f9aba0..af17cef94a05 100644 --- a/docs/stable/_modules/torchvision/models/vgg.html +++ b/docs/stable/_modules/torchvision/models/vgg.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -360,7 +361,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg11(pretrained=False, progress=True, **kwargs): r"""VGG 11-layer model (configuration "A") from - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -371,7 +372,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg11_bn(pretrained=False, progress=True, **kwargs): r"""VGG 11-layer model (configuration "A") with batch normalization - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -382,7 +383,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg13(pretrained=False, progress=True, **kwargs): r"""VGG 13-layer model (configuration "B") - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -393,7 +394,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg13_bn(pretrained=False, progress=True, **kwargs): r"""VGG 13-layer model (configuration "B") with batch normalization - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -404,7 +405,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg16(pretrained=False, progress=True, **kwargs): r"""VGG 16-layer model (configuration "D") - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -415,7 +416,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg16_bn(pretrained=False, progress=True, **kwargs): r"""VGG 16-layer model (configuration "D") with batch normalization - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -426,7 +427,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg19(pretrained=False, progress=True, **kwargs): r"""VGG 19-layer model (configuration "E") - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -437,7 +438,7 @@

    Source code for torchvision.models.vgg

     
     
    [docs]def vgg19_bn(pretrained=False, progress=True, **kwargs): r"""VGG 19-layer model (configuration 'E') with batch normalization - `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet @@ -496,9 +497,6 @@

    Source code for torchvision.models.vgg

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/models/video/resnet.html b/docs/stable/_modules/torchvision/models/video/resnet.html
    new file mode 100644
    index 000000000000..45c3c31e66b2
    --- /dev/null
    +++ b/docs/stable/_modules/torchvision/models/video/resnet.html
    @@ -0,0 +1,853 @@
    +
    +
    +
    +
    +
    +  
    +
    +  
    +  
    +  
    +  
    +  torchvision.models.video.resnet — PyTorch master documentation
    +  
    +
    +  
    +  
    +  
    +  
    +    
    +  
    +
    +  
    +
    +  
    +  
    +    
    +
    +  
    +
    +  
    +  
    +  
    +  
    +  
    +    
    +     
    +
    +  
    +  
    +
    +
    +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +

    Source code for torchvision.models.video.resnet

    +import torch
    +import torch.nn as nn
    +
    +from ..utils import load_state_dict_from_url
    +
    +
    +__all__ = ['r3d_18', 'mc3_18', 'r2plus1d_18']
    +
    +model_urls = {
    +    'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth',
    +    'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth',
    +    'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth',
    +}
    +
    +
    +class Conv3DSimple(nn.Conv3d):
    +    def __init__(self,
    +                 in_planes,
    +                 out_planes,
    +                 midplanes=None,
    +                 stride=1,
    +                 padding=1):
    +
    +        super(Conv3DSimple, self).__init__(
    +            in_channels=in_planes,
    +            out_channels=out_planes,
    +            kernel_size=(3, 3, 3),
    +            stride=stride,
    +            padding=padding,
    +            bias=False)
    +
    +    @staticmethod
    +    def get_downsample_stride(stride):
    +        return (stride, stride, stride)
    +
    +
    +class Conv2Plus1D(nn.Sequential):
    +
    +    def __init__(self,
    +                 in_planes,
    +                 out_planes,
    +                 midplanes,
    +                 stride=1,
    +                 padding=1):
    +        super(Conv2Plus1D, self).__init__(
    +            nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3),
    +                      stride=(1, stride, stride), padding=(0, padding, padding),
    +                      bias=False),
    +            nn.BatchNorm3d(midplanes),
    +            nn.ReLU(inplace=True),
    +            nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1),
    +                      stride=(stride, 1, 1), padding=(padding, 0, 0),
    +                      bias=False))
    +
    +    @staticmethod
    +    def get_downsample_stride(stride):
    +        return (stride, stride, stride)
    +
    +
    +class Conv3DNoTemporal(nn.Conv3d):
    +
    +    def __init__(self,
    +                 in_planes,
    +                 out_planes,
    +                 midplanes=None,
    +                 stride=1,
    +                 padding=1):
    +
    +        super(Conv3DNoTemporal, self).__init__(
    +            in_channels=in_planes,
    +            out_channels=out_planes,
    +            kernel_size=(1, 3, 3),
    +            stride=(1, stride, stride),
    +            padding=(0, padding, padding),
    +            bias=False)
    +
    +    @staticmethod
    +    def get_downsample_stride(stride):
    +        return (1, stride, stride)
    +
    +
    +class BasicBlock(nn.Module):
    +
    +    expansion = 1
    +
    +    def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
    +        midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
    +
    +        super(BasicBlock, self).__init__()
    +        self.conv1 = nn.Sequential(
    +            conv_builder(inplanes, planes, midplanes, stride),
    +            nn.BatchNorm3d(planes),
    +            nn.ReLU(inplace=True)
    +        )
    +        self.conv2 = nn.Sequential(
    +            conv_builder(planes, planes, midplanes),
    +            nn.BatchNorm3d(planes)
    +        )
    +        self.relu = nn.ReLU(inplace=True)
    +        self.downsample = downsample
    +        self.stride = stride
    +
    +    def forward(self, x):
    +        residual = x
    +
    +        out = self.conv1(x)
    +        out = self.conv2(out)
    +        if self.downsample is not None:
    +            residual = self.downsample(x)
    +
    +        out += residual
    +        out = self.relu(out)
    +
    +        return out
    +
    +
    +class Bottleneck(nn.Module):
    +    expansion = 4
    +
    +    def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
    +
    +        super(Bottleneck, self).__init__()
    +        midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
    +
    +        # 1x1x1
    +        self.conv1 = nn.Sequential(
    +            nn.Conv3d(inplanes, planes, kernel_size=1, bias=False),
    +            nn.BatchNorm3d(planes),
    +            nn.ReLU(inplace=True)
    +        )
    +        # Second kernel
    +        self.conv2 = nn.Sequential(
    +            conv_builder(planes, planes, midplanes, stride),
    +            nn.BatchNorm3d(planes),
    +            nn.ReLU(inplace=True)
    +        )
    +
    +        # 1x1x1
    +        self.conv3 = nn.Sequential(
    +            nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),
    +            nn.BatchNorm3d(planes * self.expansion)
    +        )
    +        self.relu = nn.ReLU(inplace=True)
    +        self.downsample = downsample
    +        self.stride = stride
    +
    +    def forward(self, x):
    +        residual = x
    +
    +        out = self.conv1(x)
    +        out = self.conv2(out)
    +        out = self.conv3(out)
    +
    +        if self.downsample is not None:
    +            residual = self.downsample(x)
    +
    +        out += residual
    +        out = self.relu(out)
    +
    +        return out
    +
    +
    +class BasicStem(nn.Sequential):
    +    """The default conv-batchnorm-relu stem
    +    """
    +    def __init__(self):
    +        super(BasicStem, self).__init__(
    +            nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2),
    +                      padding=(1, 3, 3), bias=False),
    +            nn.BatchNorm3d(64),
    +            nn.ReLU(inplace=True))
    +
    +
    +class R2Plus1dStem(nn.Sequential):
    +    """R(2+1)D stem is different than the default one as it uses separated 3D convolution
    +    """
    +    def __init__(self):
    +        super(R2Plus1dStem, self).__init__(
    +            nn.Conv3d(3, 45, kernel_size=(1, 7, 7),
    +                      stride=(1, 2, 2), padding=(0, 3, 3),
    +                      bias=False),
    +            nn.BatchNorm3d(45),
    +            nn.ReLU(inplace=True),
    +            nn.Conv3d(45, 64, kernel_size=(3, 1, 1),
    +                      stride=(1, 1, 1), padding=(1, 0, 0),
    +                      bias=False),
    +            nn.BatchNorm3d(64),
    +            nn.ReLU(inplace=True))
    +
    +
    +class VideoResNet(nn.Module):
    +
    +    def __init__(self, block, conv_makers, layers,
    +                 stem, num_classes=400,
    +                 zero_init_residual=False):
    +        """Generic resnet video generator.
    +
    +        Args:
    +            block (nn.Module): resnet building block
    +            conv_makers (list(functions)): generator function for each layer
    +            layers (List[int]): number of blocks per layer
    +            stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.
    +            num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
    +            zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
    +        """
    +        super(VideoResNet, self).__init__()
    +        self.inplanes = 64
    +
    +        self.stem = stem()
    +
    +        self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)
    +        self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)
    +        self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)
    +        self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)
    +
    +        self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
    +        self.fc = nn.Linear(512 * block.expansion, num_classes)
    +
    +        # init weights
    +        self._initialize_weights()
    +
    +        if zero_init_residual:
    +            for m in self.modules():
    +                if isinstance(m, Bottleneck):
    +                    nn.init.constant_(m.bn3.weight, 0)
    +
    +    def forward(self, x):
    +        x = self.stem(x)
    +
    +        x = self.layer1(x)
    +        x = self.layer2(x)
    +        x = self.layer3(x)
    +        x = self.layer4(x)
    +
    +        x = self.avgpool(x)
    +        # Flatten the layer to fc
    +        x = x.flatten(1)
    +        x = self.fc(x)
    +
    +        return x
    +
    +    def _make_layer(self, block, conv_builder, planes, blocks, stride=1):
    +        downsample = None
    +
    +        if stride != 1 or self.inplanes != planes * block.expansion:
    +            ds_stride = conv_builder.get_downsample_stride(stride)
    +            downsample = nn.Sequential(
    +                nn.Conv3d(self.inplanes, planes * block.expansion,
    +                          kernel_size=1, stride=ds_stride, bias=False),
    +                nn.BatchNorm3d(planes * block.expansion)
    +            )
    +        layers = []
    +        layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))
    +
    +        self.inplanes = planes * block.expansion
    +        for i in range(1, blocks):
    +            layers.append(block(self.inplanes, planes, conv_builder))
    +
    +        return nn.Sequential(*layers)
    +
    +    def _initialize_weights(self):
    +        for m in self.modules():
    +            if isinstance(m, nn.Conv3d):
    +                nn.init.kaiming_normal_(m.weight, mode='fan_out',
    +                                        nonlinearity='relu')
    +                if m.bias is not None:
    +                    nn.init.constant_(m.bias, 0)
    +            elif isinstance(m, nn.BatchNorm3d):
    +                nn.init.constant_(m.weight, 1)
    +                nn.init.constant_(m.bias, 0)
    +            elif isinstance(m, nn.Linear):
    +                nn.init.normal_(m.weight, 0, 0.01)
    +                nn.init.constant_(m.bias, 0)
    +
    +
    +def _video_resnet(arch, pretrained=False, progress=True, **kwargs):
    +    model = VideoResNet(**kwargs)
    +
    +    if pretrained:
    +        state_dict = load_state_dict_from_url(model_urls[arch],
    +                                              progress=progress)
    +        model.load_state_dict(state_dict)
    +    return model
    +
    +
    +
    [docs]def r3d_18(pretrained=False, progress=True, **kwargs): + """Construct 18 layer Resnet3D model as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R3D-18 network + """ + + return _video_resnet('r3d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] * 4, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs)
    + + +
    [docs]def mc3_18(pretrained=False, progress=True, **kwargs): + """Constructor for 18 layer Mixed Convolution network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: MC3 Network definition + """ + return _video_resnet('mc3_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs)
    + + +
    [docs]def r2plus1d_18(pretrained=False, progress=True, **kwargs): + """Constructor for the 18 layer deep R(2+1)D network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R(2+1)D-18 network + """ + return _video_resnet('r2plus1d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv2Plus1D] * 4, + layers=[2, 2, 2, 2], + stem=R2Plus1dStem, **kwargs)
    +
    + +
    + +
    +
    + + + + +
    + + + +
    +

    + © Copyright 2019, Torch Contributors. + +

    +
    + +
    + Built with Sphinx using a theme provided by Read the Docs. +
    + + +
    + +
    +
    + +
    +
    +
    + +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/_modules/torchvision/ops/boxes.html b/docs/stable/_modules/torchvision/ops/boxes.html index 1e047f69b311..bfddcd0ede98 100644 --- a/docs/stable/_modules/torchvision/ops/boxes.html +++ b/docs/stable/_modules/torchvision/ops/boxes.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -469,9 +470,6 @@

    Source code for torchvision.ops.boxes

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/ops/roi_align.html b/docs/stable/_modules/torchvision/ops/roi_align.html
    index 29b0ad576663..b29ef7315dc4 100644
    --- a/docs/stable/_modules/torchvision/ops/roi_align.html
    +++ b/docs/stable/_modules/torchvision/ops/roi_align.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -404,9 +405,6 @@

    Source code for torchvision.ops.roi_align

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/ops/roi_pool.html b/docs/stable/_modules/torchvision/ops/roi_pool.html
    index fc307c2d72c4..e9a87e6e46f9 100644
    --- a/docs/stable/_modules/torchvision/ops/roi_pool.html
    +++ b/docs/stable/_modules/torchvision/ops/roi_pool.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -395,9 +396,6 @@

    Source code for torchvision.ops.roi_pool

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_modules/torchvision/transforms/functional.html b/docs/stable/_modules/torchvision/transforms/functional.html
    index 14e35a37f2ca..eae2a943aea5 100644
    --- a/docs/stable/_modules/torchvision/transforms/functional.html
    +++ b/docs/stable/_modules/torchvision/transforms/functional.html
    @@ -175,6 +175,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1156,9 +1157,6 @@

    Source code for torchvision.transforms.functional

    - - - diff --git a/docs/stable/_modules/torchvision/transforms/transforms.html b/docs/stable/_modules/torchvision/transforms/transforms.html index fd75d31d9dcb..25795afa7439 100644 --- a/docs/stable/_modules/torchvision/transforms/transforms.html +++ b/docs/stable/_modules/torchvision/transforms/transforms.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1601,9 +1602,6 @@

    Source code for torchvision.transforms.transforms

    - - - diff --git a/docs/stable/_modules/torchvision/utils.html b/docs/stable/_modules/torchvision/utils.html index c2876ea214af..c514b2f95232 100644 --- a/docs/stable/_modules/torchvision/utils.html +++ b/docs/stable/_modules/torchvision/utils.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -418,9 +419,6 @@

    Source code for torchvision.utils

              
              
              
    -         
    -         
    -         
          
     
       
    diff --git a/docs/stable/_sources/index.rst.txt b/docs/stable/_sources/index.rst.txt
    index b22cb913d816..93c118f1e418 100644
    --- a/docs/stable/_sources/index.rst.txt
    +++ b/docs/stable/_sources/index.rst.txt
    @@ -45,6 +45,7 @@ PyTorch is an optimized tensor library for deep learning using GPUs and CPUs.
        torch.hub 
        torch.jit 
        torch.multiprocessing 
    +   torch.random 
        torch.utils.bottleneck 
        torch.utils.checkpoint 
        torch.utils.cpp_extension 
    diff --git a/docs/stable/_sources/random.rst.txt b/docs/stable/_sources/random.rst.txt
    new file mode 100644
    index 000000000000..c7eaa945a72f
    --- /dev/null
    +++ b/docs/stable/_sources/random.rst.txt
    @@ -0,0 +1,21 @@
    +torch.random
    +===================================
    +
    +.. currentmodule:: torch.random
    +
    +.. automodule:: torch.random
    +   :members:
    +
    +Random Number Generator
    +-------------------------
    +.. autofunction:: get_rng_state
    +.. autofunction:: get_rng_state_all
    +.. autofunction:: set_rng_state
    +.. autofunction:: set_rng_state_all
    +.. autofunction:: manual_seed
    +.. autofunction:: manual_seed_all
    +.. autofunction:: seed
    +.. autofunction:: seed_all
    +.. autofunction:: initial_seed
    +.. autofunction:: fork_rng
    +
    diff --git a/docs/stable/_sources/tensors.rst.txt b/docs/stable/_sources/tensors.rst.txt
    index 55a911267038..36eab61b74a4 100644
    --- a/docs/stable/_sources/tensors.rst.txt
    +++ b/docs/stable/_sources/tensors.rst.txt
    @@ -469,9 +469,9 @@ view of a storage and defines numeric operations on it.
        .. automethod:: where
        .. automethod:: zero_
     
    -.. class:: ByteTensor()
    +.. class:: BoolTensor()
     
    -   The following methods are unique to :class:`torch.ByteTensor`.
    +   The following methods are unique to :class:`torch.BoolTensor`.
     
        .. automethod:: all
        .. automethod:: any
    diff --git a/docs/stable/_sources/torchvision/models.rst.txt b/docs/stable/_sources/torchvision/models.rst.txt
    index dda7adf6aaaf..e1a141092dcd 100644
    --- a/docs/stable/_sources/torchvision/models.rst.txt
    +++ b/docs/stable/_sources/torchvision/models.rst.txt
    @@ -4,8 +4,8 @@ torchvision.models
     
     The models subpackage contains definitions of models for addressing
     different tasks, including: image classification, pixelwise semantic
    -segmentation, object detection, instance segmentation and person
    -keypoint detection.
    +segmentation, object detection, instance segmentation, person
    +keypoint detection and video classification.
     
     
     Classification
    @@ -395,3 +395,51 @@ Keypoint R-CNN
     
     .. autofunction:: torchvision.models.detection.keypointrcnn_resnet50_fpn
     
    +
    +Video classification
    +====================
    +
    +We provide models for action recognition pre-trained on Kinetics-400.
    +They have all been trained with the scripts provided in ``references/video_classification``.
    +
    +All pre-trained models expect input images normalized in the same way,
    +i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
    +where H and W are expected to be 112, and T is a number of video frames in a clip.
    +The images have to be loaded in to a range of [0, 1] and then normalized
    +using ``mean = [0.43216, 0.394666, 0.37645]`` and ``std = [0.22803, 0.22145, 0.216989]``.
    +
    +
    +.. note::
    +  The normalization parameters are different from the image classification ones, and correspond
    +  to the mean and std from Kinetics-400.
    +
    +.. note::
    +  For now, normalization code can be found in ``references/video_classification/transforms.py``,
    +  see the ``Normalize`` function there. Note that it differs from standard normalization for
    +  images because it assumes the video is 4d.
    +
    +Kinetics 1-crop accuracies for clip length 16 (16x112x112)
    +
    +================================  =============   =============
    +Network                           Clip acc@1      Clip acc@5
    +================================  =============   =============
    +ResNet 3D 18                      52.75           75.45
    +ResNet MC 18                      53.90           76.29
    +ResNet (2+1)D                     57.50           78.81
    +================================  =============   =============
    +
    +
    +ResNet 3D
    +----------
    +
    +.. autofunction:: torchvision.models.video.r3d_18
    +
    +ResNet Mixed Convolution
    +------------------------
    +
    +.. autofunction:: torchvision.models.video.mc3_18
    +
    +ResNet (2+1)D
    +-------------
    +
    +.. autofunction:: torchvision.models.video.r2plus1d_18
    diff --git a/docs/stable/_static/katex_autorenderer.js b/docs/stable/_static/katex_autorenderer.js
    deleted file mode 100644
    index cce4350bb18c..000000000000
    --- a/docs/stable/_static/katex_autorenderer.js
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -katex_options = {
    -delimiters : [
    -   {left: "$$", right: "$$", display: true},
    -   {left: "\\(", right: "\\)", display: false},
    -   {left: "\\[", right: "\\]", display: true}
    -],
    -
    -}
    -document.addEventListener("DOMContentLoaded", function() {
    -  renderMathInElement(document.body, katex_options);
    -});
    diff --git a/docs/stable/autograd.html b/docs/stable/autograd.html
    index 3757b37295ad..6de70dbe6473 100644
    --- a/docs/stable/autograd.html
    +++ b/docs/stable/autograd.html
    @@ -177,6 +177,7 @@
     
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1161,9 +1162,6 @@

    Anomaly detection - - - diff --git a/docs/stable/bottleneck.html b/docs/stable/bottleneck.html index 43f62e9360b9..228cf9a184bf 100644 --- a/docs/stable/bottleneck.html +++ b/docs/stable/bottleneck.html @@ -35,7 +35,7 @@ - + @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -330,7 +331,7 @@

    torch.utils.bottleneck - +

    @@ -382,9 +383,6 @@

    torch.utils.bottleneck - - - diff --git a/docs/stable/checkpoint.html b/docs/stable/checkpoint.html index f9e9639bea7e..f421d4d265e5 100644 --- a/docs/stable/checkpoint.html +++ b/docs/stable/checkpoint.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -436,9 +437,6 @@

    torch.utils.checkpoint - - - diff --git a/docs/stable/community/contribution_guide.html b/docs/stable/community/contribution_guide.html index 875a657e4908..f5461f2c5776 100644 --- a/docs/stable/community/contribution_guide.html +++ b/docs/stable/community/contribution_guide.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -705,9 +706,6 @@

    Contributing a new Tutorial - - - diff --git a/docs/stable/community/governance.html b/docs/stable/community/governance.html index 15c0863ae6bc..f354958cfc9e 100644 --- a/docs/stable/community/governance.html +++ b/docs/stable/community/governance.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -483,9 +484,6 @@

    FAQ - - - diff --git a/docs/stable/community/persons_of_interest.html b/docs/stable/community/persons_of_interest.html index 69e2c9722c51..3b6930b793d8 100644 --- a/docs/stable/community/persons_of_interest.html +++ b/docs/stable/community/persons_of_interest.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -498,9 +499,6 @@

    PPC - - - diff --git a/docs/stable/cpp_extension.html b/docs/stable/cpp_extension.html index 10ea2fae8417..75c5a714ed6e 100644 --- a/docs/stable/cpp_extension.html +++ b/docs/stable/cpp_extension.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -569,9 +570,6 @@

    torch.utils.cpp_extension - - - diff --git a/docs/stable/cuda.html b/docs/stable/cuda.html index b391c6b65eed..3338ce517242 100644 --- a/docs/stable/cuda.html +++ b/docs/stable/cuda.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -626,7 +627,7 @@

    Random Number Generator
    Parameters
      -
    • new_state (torch.ByteTensor) – The desired state

    • +
    • new_state (torch.ByteTensor) – The desired state

    • device (torch.device or int, optional) – The device to set the RNG state. Default: 'cuda' (i.e., torch.device('cuda'), the current CUDA device).

    @@ -1252,9 +1253,6 @@

    NVIDIA Tools Extension (NVTX) - - - diff --git a/docs/stable/cuda_deterministic.html b/docs/stable/cuda_deterministic.html index e90b7900154e..0265b0f1e4ca 100644 --- a/docs/stable/cuda_deterministic.html +++ b/docs/stable/cuda_deterministic.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -320,9 +321,6 @@ - - - diff --git a/docs/stable/cuda_deterministic_backward.html b/docs/stable/cuda_deterministic_backward.html index 7c47dc840b6d..91c378593fda 100644 --- a/docs/stable/cuda_deterministic_backward.html +++ b/docs/stable/cuda_deterministic_backward.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -320,9 +321,6 @@ - - - diff --git a/docs/stable/cudnn_deterministic.html b/docs/stable/cudnn_deterministic.html index 90891abdb3e5..fd6d2c25477f 100644 --- a/docs/stable/cudnn_deterministic.html +++ b/docs/stable/cudnn_deterministic.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -323,9 +324,6 @@ - - - diff --git a/docs/stable/cudnn_persistent_rnn.html b/docs/stable/cudnn_persistent_rnn.html index c9a9920225fd..076be9570e64 100644 --- a/docs/stable/cudnn_persistent_rnn.html +++ b/docs/stable/cudnn_persistent_rnn.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -324,9 +325,6 @@ - - - diff --git a/docs/stable/data.html b/docs/stable/data.html index 57ab98b5e25e..c73cf081dd6f 100644 --- a/docs/stable/data.html +++ b/docs/stable/data.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1104,9 +1105,6 @@

    Memory Pinning - - - diff --git a/docs/stable/distributed.html b/docs/stable/distributed.html index 9c3bce6a8ff0..0256912eef6b 100644 --- a/docs/stable/distributed.html +++ b/docs/stable/distributed.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -1319,9 +1320,6 @@

    Spawn utility - - - diff --git a/docs/stable/distributions.html b/docs/stable/distributions.html index 3d7e7e2e1a52..80cf754bc8c7 100644 --- a/docs/stable/distributions.html +++ b/docs/stable/distributions.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -275,8 +276,12 @@ seen as the basis for policy gradient methods in reinforcement learning, and the pathwise derivative estimator is commonly seen in the reparameterization trick in variational autoencoders. Whilst the score function only requires the value -of samples \(f(x)\), the pathwise derivative requires the derivative -\(f'(x)\). The next sections discuss these two in a reinforcement learning +of samples f(x)f(x) + +, the pathwise derivative requires the derivative +f(x)f'(x) + +. The next sections discuss these two in a reinforcement learning example. For more details see Gradient Estimation Using Stochastic Computation Graphs .

    where θ\theta + + are the parameters, α\alpha + + is the learning rate, +rr + + is the reward and p(aπθ(s))p(a|\pi^\theta(s)) + + is the probability of +taking action aa + + in state ss + + given policy πθ\pi^\theta + +.

    In practice we would sample an action from the output of a network, apply this action in an environment, and then use log_prob to construct an equivalent loss function. Note that we use a negative because optimizers use gradient @@ -517,9 +537,18 @@

    ExponentialFamilyExponentialFamily is the abstract base class for probability distributions belonging to an exponential family, whose probability mass/density function has the form is defined below

    -\[p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))\]
    -

    where \(\theta\) denotes the natural parameters, \(t(x)\) denotes the sufficient statistic, -\(F(\theta)\) is the log normalizer function for a given family and \(k(x)\) is the carrier +pF(x;θ)=exp(t(x),θF(θ)+k(x))p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) + +

    where θ\theta + + denotes the natural parameters, t(x)t(x) + + denotes the sufficient statistic, +F(θ)F(\theta) + + is the log normalizer function for a given family and k(x)k(x) + + is the carrier measure.

    -

    Samples are integers from \(\{0, \ldots, K-1\}\) where K is probs.size(-1).

    +

    Samples are integers from {0,,K1}\{0, \ldots, K-1\} + + where K is probs.size(-1).

    If probs is 1D with length-K, each element is the relative probability of sampling the class at that index.

    If probs is 2D, it is treated as a batch of relative probability @@ -1318,9 +1349,15 @@

    Geometrictorch.distributions.distribution.Distribution

    Creates a Geometric distribution parameterized by probs, where probs is the probability of success of Bernoulli trials. -It represents the probability that in \(k + 1\) Bernoulli trials, the -first \(k\) trials failed, before seeing a success.

    -

    Samples are non-negative integers [0, \(\inf\)).

    +It represents the probability that in k+1k + 1 + + Bernoulli trials, the +first kk + + trials failed, before seeing a success.

    +

    Samples are non-negative integers [0, inf\inf + +).

    Example:

    >>> m = Geometric(torch.tensor([0.3]))
     >>> m.sample()  # underlying Bernoulli has 30% chance 1; 70% chance 0
    @@ -2071,11 +2108,19 @@ 

    MultivariateNormalCreates a multivariate normal (also called Gaussian) distribution parameterized by a mean vector and a covariance matrix.

    The multivariate normal distribution can be parameterized either -in terms of a positive definite covariance matrix \(\mathbf{\Sigma}\) -or a positive definite precision matrix \(\mathbf{\Sigma}^{-1}\) -or a lower-triangular matrix \(\mathbf{L}\) with positive-valued +in terms of a positive definite covariance matrix Σ\mathbf{\Sigma} + + +or a positive definite precision matrix Σ1\mathbf{\Sigma}^{-1} + + +or a lower-triangular matrix L\mathbf{L} + + with positive-valued diagonal entries, such that -\(\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top\). This triangular matrix +Σ=LL\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top + +. This triangular matrix can be obtained via e.g. Cholesky decomposition of the covariance.

    Example

    Example:

    >>> m = Poisson(torch.tensor([4]))
     >>> m.sample()
     tensor([ 3.])
    @@ -3049,10 +3095,13 @@ 

    Weibull
    torch.distributions.kl.kl_divergence(p, q)[source]
    -

    Compute Kullback-Leibler divergence \(KL(p \| q)\) between two distributions.

    +

    Compute Kullback-Leibler divergence KL(pq)KL(p \| q) + + between two distributions.

    -\[KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx\]
    -
    +KL(pq)=p(x)logp(x)q(x)dxKL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx + +

    Parameters
    class torch.distributions.transforms.PowerTransform(exponent, cache_size=0)[source]
    -

    Transform via the mapping \(y = x^{\text{exponent}}\).

    +

    Transform via the mapping y=xexponenty = x^{\text{exponent}} + +.

    class torch.distributions.transforms.SigmoidTransform(cache_size=0)[source]
    -

    Transform via the mapping \(y = \frac{1}{1 + \exp(-x)}\) and \(x = \text{logit}(y)\).

    +

    Transform via the mapping y=11+exp(x)y = \frac{1}{1 + \exp(-x)} + + and x=logit(y)x = \text{logit}(y) + +.

    class torch.distributions.transforms.AbsTransform(cache_size=0)[source]
    -

    Transform via the mapping \(y = |x|\).

    +

    Transform via the mapping y=xy = |x| + +.

    class torch.distributions.transforms.AffineTransform(loc, scale, event_dim=0, cache_size=0)[source]
    -

    Transform via the pointwise affine mapping \(y = \text{loc} + \text{scale} \times x\).

    +

    Transform via the pointwise affine mapping y=loc+scale×xy = \text{loc} + \text{scale} \times x + +.

    Parameters
    @@ -1658,6 +1659,8 @@

    F

  • Fold (class in torch.nn)
  • fold() (in module torch.nn.functional) +
  • +
  • fork_rng() (in module torch.random), [1]
  • forward() (torch.autograd.Function static method) @@ -1795,6 +1798,8 @@

    G

  • get_rng_state_all() (in module torch.cuda) @@ -2033,6 +2038,8 @@

    I

  • Also known as Glorot initialization.

    Parameters

    Also known as Glorot initialization.

    Parameters

    Also known as He initialization.

    Parameters

    Also known as He initialization.

    Parameters

    -

    where p, g, v and \(\rho\) denote the parameters, gradient, + + +

    where p, g, v and ρ\rho + + denote the parameters, gradient, velocity, and momentum respectively.

    This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form

    -\[v = \rho * v + lr * g \\ +v=ρv+lrgp=pvv = \rho * v + lr * g \\ p = p - v -\]
    -

    The Nesterov version is analogously modified.

    + + +

    The Nesterov version is analogously modified.

    @@ -1019,14 +1024,19 @@

    How to adjust Learning Rate class torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min=0, last_epoch=-1)[source]

    Set the learning rate of each parameter group using a cosine annealing -schedule, where \(\eta_{max}\) is set to the initial lr and -\(T_{cur}\) is the number of epochs since the last restart in SGDR:

    +schedule, where ηmax\eta_{max} + + is set to the initial lr and +TcurT_{cur} + + is the number of epochs since the last restart in SGDR:

    -\[\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + +ηt=ηmin+12(ηmaxηmin)(1+cos(TcurTmaxπ))\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 + \cos(\frac{T_{cur}}{T_{max}}\pi)) -\]
    -

    When last_epoch=-1, sets initial lr as lr.

    + + +

    When last_epoch=-1, sets initial lr as lr.

    It has been proposed in SGDR: Stochastic Gradient Descent with Warm Restarts. Note that this only implements the cosine annealing part of SGDR, and not the restarts.

    @@ -1285,9 +1295,6 @@

    How to adjust Learning Rate - - - diff --git a/docs/stable/py-modindex.html b/docs/stable/py-modindex.html index 55a046675c06..048cef2af871 100644 --- a/docs/stable/py-modindex.html +++ b/docs/stable/py-modindex.html @@ -178,6 +178,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -356,6 +357,11 @@

    Python Module Index

        torch.optim + + +     + torch.random +     @@ -430,9 +436,6 @@

    Python Module Index

    - - - diff --git a/docs/stable/random.html b/docs/stable/random.html new file mode 100644 index 000000000000..de0002fa2cb3 --- /dev/null +++ b/docs/stable/random.html @@ -0,0 +1,662 @@ + + + + + + + + + + + + torch.random — PyTorch master documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + + + + +
    + +
    +
    + + + + + + + + + + + + +
    +
    +
    + + + + + + + + + + + + + + + + +
    + + + + +
    +
    + +
    + Shortcuts +
    +
    + +
    +
    + + + +
    + +
    +
    + +
    +

    torch.random

    +
    +
    +torch.random.fork_rng(devices=None, enabled=True, _caller='fork_rng', _devices_kw='devices')[source]
    +

    Forks the RNG, so that when you return, the RNG is reset +to the state that it was previously in.

    +
    +
    Parameters
    +
      +
    • devices (iterable of CUDA IDs) – CUDA devices for which to fork +the RNG. CPU RNG state is always forked. By default, fork_rng() operates +on all devices, but will emit a warning if your machine has a lot +of devices, since this function will run very slowly in that case. +If you explicitly specify devices, this warning will be suppressed

    • +
    • enabled (bool) – if False, the RNG is not forked. This is a convenience +argument for easily disabling the context manager without having +to delete it and unindent your Python code under it.

    • +
    +
    +
    +
    + +
    +
    +torch.random.get_rng_state()[source]
    +

    Returns the random number generator state as a torch.ByteTensor.

    +
    + +
    +
    +torch.random.initial_seed()[source]
    +

    Returns the initial seed for generating random numbers as a +Python long.

    +
    + +
    +
    +torch.random.manual_seed(seed)[source]
    +

    Sets the seed for generating random numbers. Returns a +torch.Generator object.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    +
    + +
    +
    +torch.random.seed()[source]
    +

    Sets the seed for generating random numbers to a non-deterministic +random number. Returns a 64 bit number used to seed the RNG.

    +
    + +
    +
    +torch.random.set_rng_state(new_state)[source]
    +

    Sets the random number generator state.

    +
    +
    Parameters
    +

    new_state (torch.ByteTensor) – The desired state

    +
    +
    +
    + +
    +

    Random Number Generator

    +
    +
    +torch.random.get_rng_state()[source]
    +

    Returns the random number generator state as a torch.ByteTensor.

    +
    + +
    +
    +torch.random.set_rng_state(new_state)[source]
    +

    Sets the random number generator state.

    +
    +
    Parameters
    +

    new_state (torch.ByteTensor) – The desired state

    +
    +
    +
    + +
    +
    +torch.random.manual_seed(seed)[source]
    +

    Sets the seed for generating random numbers. Returns a +torch.Generator object.

    +
    +
    Parameters
    +

    seed (int) – The desired seed.

    +
    +
    +
    + +
    +
    +torch.random.seed()[source]
    +

    Sets the seed for generating random numbers to a non-deterministic +random number. Returns a 64 bit number used to seed the RNG.

    +
    + +
    +
    +torch.random.initial_seed()[source]
    +

    Returns the initial seed for generating random numbers as a +Python long.

    +
    + +
    +
    +torch.random.fork_rng(devices=None, enabled=True, _caller='fork_rng', _devices_kw='devices')[source]
    +

    Forks the RNG, so that when you return, the RNG is reset +to the state that it was previously in.

    +
    +
    Parameters
    +
      +
    • devices (iterable of CUDA IDs) – CUDA devices for which to fork +the RNG. CPU RNG state is always forked. By default, fork_rng() operates +on all devices, but will emit a warning if your machine has a lot +of devices, since this function will run very slowly in that case. +If you explicitly specify devices, this warning will be suppressed

    • +
    • enabled (bool) – if False, the RNG is not forked. This is a convenience +argument for easily disabling the context manager without having +to delete it and unindent your Python code under it.

    • +
    +
    +
    +
    + +
    +
    + + +
    + +
    + + +
    +
    + +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + + + + + + + + + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + \ No newline at end of file diff --git a/docs/stable/search.html b/docs/stable/search.html index 469850b702b2..5aab493a286d 100644 --- a/docs/stable/search.html +++ b/docs/stable/search.html @@ -175,6 +175,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -322,9 +323,6 @@ - - - diff --git a/docs/stable/searchindex.js b/docs/stable/searchindex.js index b0b1a585151a..8cb786844bb2 100644 --- a/docs/stable/searchindex.js +++ b/docs/stable/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/io","torchvision/models","torchvision/ops","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/io.rst","torchvision/models.rst","torchvision/ops.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[43,0,0,"-"],torchvision:[45,0,0,"-"]},"torch.ByteTensor":{all:[42,2,1,""],any:[42,2,1,""]},"torch.FloatStorage":{"byte":[39,2,1,""],"char":[39,2,1,""],"double":[39,2,1,""],"float":[39,2,1,""],"int":[39,2,1,""],"long":[39,2,1,""],"new":[39,2,1,""],"short":[39,2,1,""],bfloat16:[39,2,1,""],bool:[39,2,1,""],clone:[39,2,1,""],copy_:[39,2,1,""],cpu:[39,2,1,""],cuda:[39,2,1,""],data_ptr:[39,2,1,""],device:[39,3,1,""],dtype:[39,3,1,""],element_size:[39,2,1,""],fill_:[39,2,1,""],from_buffer:[39,2,1,""],from_file:[39,2,1,""],half:[39,2,1,""],is_cuda:[39,3,1,""],is_pinned:[39,2,1,""],is_shared:[39,2,1,""],is_sparse:[39,3,1,""],pin_memory:[39,2,1,""],resize_:[39,2,1,""],share_memory_:[39,2,1,""],size:[39,2,1,""],tolist:[39,2,1,""],type:[39,2,1,""]},"torch.Tensor":{"byte":[42,2,1,""],"char":[42,2,1,""],"double":[42,2,1,""],"float":[42,2,1,""],"int":[42,2,1,""],"long":[42,2,1,""],"short":[42,2,1,""],"var":[42,2,1,""],T:[42,3,1,""],abs:[42,2,1,""],abs_:[42,2,1,""],acos:[42,2,1,""],acos_:[42,2,1,""],add:[42,2,1,""],add_:[42,2,1,""],addbmm:[42,2,1,""],addbmm_:[42,2,1,""],addcdiv:[42,2,1,""],addcdiv_:[42,2,1,""],addcmul:[42,2,1,""],addcmul_:[42,2,1,""],addmm:[42,2,1,""],addmm_:[42,2,1,""],addmv:[42,2,1,""],addmv_:[42,2,1,""],addr:[42,2,1,""],addr_:[42,2,1,""],allclose:[42,2,1,""],apply_:[42,2,1,""],argmax:[42,2,1,""],argmin:[42,2,1,""],argsort:[42,2,1,""],as_strided:[42,2,1,""],asin:[42,2,1,""],asin_:[42,2,1,""],atan2:[42,2,1,""],atan2_:[42,2,1,""],atan:[42,2,1,""],atan_:[42,2,1,""],backward:[42,2,1,""],baddbmm:[42,2,1,""],baddbmm_:[42,2,1,""],bernoulli:[42,2,1,""],bernoulli_:[42,2,1,""],bfloat16:[42,2,1,""],bincount:[42,2,1,""],bitwise_not:[42,2,1,""],bitwise_not_:[42,2,1,""],bmm:[42,2,1,""],bool:[42,2,1,""],cauchy_:[42,2,1,""],ceil:[42,2,1,""],ceil_:[42,2,1,""],cholesky:[42,2,1,""],cholesky_inverse:[42,2,1,""],cholesky_solve:[42,2,1,""],chunk:[42,2,1,""],clamp:[42,2,1,""],clamp_:[42,2,1,""],clone:[42,2,1,""],contiguous:[42,2,1,""],copy_:[42,2,1,""],cos:[42,2,1,""],cos_:[42,2,1,""],cosh:[42,2,1,""],cosh_:[42,2,1,""],cpu:[42,2,1,""],cross:[42,2,1,""],cuda:[42,2,1,""],cumprod:[42,2,1,""],cumsum:[42,2,1,""],data_ptr:[42,2,1,""],dense_dim:[42,2,1,""],dequantize:[42,2,1,""],det:[42,2,1,""],detach:[42,2,1,""],detach_:[42,2,1,""],device:[42,3,1,""],diag:[42,2,1,""],diag_embed:[42,2,1,""],diagflat:[42,2,1,""],diagonal:[42,2,1,""],digamma:[42,2,1,""],digamma_:[42,2,1,""],dim:[42,2,1,""],dist:[42,2,1,""],div:[42,2,1,""],div_:[42,2,1,""],dot:[42,2,1,""],eig:[42,2,1,""],element_size:[42,2,1,""],eq:[42,2,1,""],eq_:[42,2,1,""],equal:[42,2,1,""],erf:[42,2,1,""],erf_:[42,2,1,""],erfc:[42,2,1,""],erfc_:[42,2,1,""],erfinv:[42,2,1,""],erfinv_:[42,2,1,""],exp:[42,2,1,""],exp_:[42,2,1,""],expand:[42,2,1,""],expand_as:[42,2,1,""],expm1:[42,2,1,""],expm1_:[42,2,1,""],exponential_:[42,2,1,""],fft:[42,2,1,""],fill_:[42,2,1,""],fill_diagonal_:[42,2,1,""],flatten:[42,2,1,""],flip:[42,2,1,""],floor:[42,2,1,""],floor_:[42,2,1,""],fmod:[42,2,1,""],fmod_:[42,2,1,""],frac:[42,2,1,""],frac_:[42,2,1,""],gather:[42,2,1,""],ge:[42,2,1,""],ge_:[42,2,1,""],gels:[42,2,1,""],geometric_:[42,2,1,""],geqrf:[42,2,1,""],ger:[42,2,1,""],get_device:[42,2,1,""],grad:[42,3,1,""],gt:[42,2,1,""],gt_:[42,2,1,""],half:[42,2,1,""],hardshrink:[42,2,1,""],histc:[42,2,1,""],ifft:[42,2,1,""],index_add:[42,2,1,""],index_add_:[42,2,1,""],index_copy:[42,2,1,""],index_copy_:[42,2,1,""],index_fill:[42,2,1,""],index_fill_:[42,2,1,""],index_put:[42,2,1,""],index_put_:[42,2,1,""],index_select:[42,2,1,""],indices:[42,2,1,""],int_repr:[42,2,1,""],inverse:[42,2,1,""],irfft:[42,2,1,""],is_contiguous:[42,2,1,""],is_cuda:[42,3,1,""],is_floating_point:[42,2,1,""],is_leaf:[42,2,1,""],is_pinned:[42,2,1,""],is_set_to:[42,2,1,""],is_shared:[42,2,1,""],is_signed:[42,2,1,""],is_sparse:[42,2,1,""],item:[42,2,1,""],kthvalue:[42,2,1,""],le:[42,2,1,""],le_:[42,2,1,""],lerp:[42,2,1,""],lerp_:[42,2,1,""],log10:[42,2,1,""],log10_:[42,2,1,""],log1p:[42,2,1,""],log1p_:[42,2,1,""],log2:[42,2,1,""],log2_:[42,2,1,""],log:[42,2,1,""],log_:[42,2,1,""],log_normal_:[42,2,1,""],logdet:[42,2,1,""],logsumexp:[42,2,1,""],lstsq:[42,2,1,""],lt:[42,2,1,""],lt_:[42,2,1,""],lu:[42,2,1,""],lu_solve:[42,2,1,""],map_:[42,2,1,""],masked_fill:[42,2,1,""],masked_fill_:[42,2,1,""],masked_scatter:[42,2,1,""],masked_scatter_:[42,2,1,""],masked_select:[42,2,1,""],matmul:[42,2,1,""],matrix_power:[42,2,1,""],max:[42,2,1,""],mean:[42,2,1,""],median:[42,2,1,""],min:[42,2,1,""],mm:[42,2,1,""],mode:[42,2,1,""],mul:[42,2,1,""],mul_:[42,2,1,""],multinomial:[42,2,1,""],mv:[42,2,1,""],mvlgamma:[42,2,1,""],mvlgamma_:[42,2,1,""],narrow:[42,2,1,""],narrow_copy:[42,2,1,""],ndim:[42,3,1,""],ndimension:[42,2,1,""],ne:[42,2,1,""],ne_:[42,2,1,""],neg:[42,2,1,""],neg_:[42,2,1,""],nelement:[42,2,1,""],new_empty:[42,2,1,""],new_full:[42,2,1,""],new_ones:[42,2,1,""],new_tensor:[42,2,1,""],new_zeros:[42,2,1,""],nonzero:[42,2,1,""],norm:[42,2,1,""],normal_:[42,2,1,""],numel:[42,2,1,""],numpy:[42,2,1,""],orgqr:[42,2,1,""],ormqr:[42,2,1,""],permute:[42,2,1,""],pin_memory:[42,2,1,""],pinverse:[42,2,1,""],pow:[42,2,1,""],pow_:[42,2,1,""],prod:[42,2,1,""],put_:[42,2,1,""],q_scale:[42,2,1,""],q_zero_point:[42,2,1,""],qr:[42,2,1,""],qscheme:[42,2,1,""],random_:[42,2,1,""],reciprocal:[42,2,1,""],reciprocal_:[42,2,1,""],register_hook:[42,2,1,""],remainder:[42,2,1,""],remainder_:[42,2,1,""],renorm:[42,2,1,""],renorm_:[42,2,1,""],repeat:[42,2,1,""],repeat_interleave:[42,2,1,""],requires_grad:[42,2,1,""],requires_grad_:[42,2,1,""],reshape:[42,2,1,""],reshape_as:[42,2,1,""],resize_:[42,2,1,""],resize_as_:[42,2,1,""],retain_grad:[42,2,1,""],rfft:[42,2,1,""],roll:[42,2,1,""],rot90:[42,2,1,""],round:[42,2,1,""],round_:[42,2,1,""],rsqrt:[42,2,1,""],rsqrt_:[42,2,1,""],scatter:[42,2,1,""],scatter_:[42,2,1,""],scatter_add:[42,2,1,""],scatter_add_:[42,2,1,""],select:[42,2,1,""],set_:[42,2,1,""],share_memory_:[42,2,1,""],sigmoid:[42,2,1,""],sigmoid_:[42,2,1,""],sign:[42,2,1,""],sign_:[42,2,1,""],sin:[42,2,1,""],sin_:[42,2,1,""],sinh:[42,2,1,""],sinh_:[42,2,1,""],size:[42,2,1,""],slogdet:[42,2,1,""],solve:[42,2,1,""],sort:[42,2,1,""],sparse_dim:[42,2,1,""],sparse_mask:[42,2,1,""],split:[42,2,1,""],sqrt:[42,2,1,""],sqrt_:[42,2,1,""],squeeze:[42,2,1,""],squeeze_:[42,2,1,""],std:[42,2,1,""],stft:[42,2,1,""],storage:[42,2,1,""],storage_offset:[42,2,1,""],storage_type:[42,2,1,""],stride:[42,2,1,""],sub:[42,2,1,""],sub_:[42,2,1,""],sum:[42,2,1,""],sum_to_size:[42,2,1,""],svd:[42,2,1,""],symeig:[42,2,1,""],t:[42,2,1,""],t_:[42,2,1,""],take:[42,2,1,""],tan:[42,2,1,""],tan_:[42,2,1,""],tanh:[42,2,1,""],tanh_:[42,2,1,""],to:[42,2,1,""],to_mkldnn:[42,2,1,""],to_sparse:[42,2,1,""],tolist:[42,2,1,""],topk:[42,2,1,""],trace:[42,2,1,""],transpose:[42,2,1,""],transpose_:[42,2,1,""],triangular_solve:[42,2,1,""],tril:[42,2,1,""],tril_:[42,2,1,""],triu:[42,2,1,""],triu_:[42,2,1,""],trunc:[42,2,1,""],trunc_:[42,2,1,""],type:[42,2,1,""],type_as:[42,2,1,""],unbind:[42,2,1,""],unfold:[42,2,1,""],uniform_:[42,2,1,""],unique:[42,2,1,""],unique_consecutive:[42,2,1,""],unsqueeze:[42,2,1,""],unsqueeze_:[42,2,1,""],values:[42,2,1,""],view:[42,2,1,""],view_as:[42,2,1,""],where:[42,2,1,""],zero_:[42,2,1,""]},"torch._C":{Generator:[43,1,1,""]},"torch._C.Generator":{device:[43,3,1,""],get_state:[43,2,1,""],initial_seed:[43,2,1,""],manual_seed:[43,2,1,""],seed:[43,2,1,""],set_state:[43,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[43,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[43,2,1,""],fast_forward:[43,2,1,""],reset:[43,2,1,""]},"torch.sparse":{FloatTensor:[38,1,1,""],addmm:[38,4,1,""],mm:[38,4,1,""],sum:[38,4,1,""]},"torch.sparse.FloatTensor":{_indices:[38,2,1,""],_nnz:[38,2,1,""],_values:[38,2,1,""],add:[38,2,1,""],add_:[38,2,1,""],clone:[38,2,1,""],coalesce:[38,2,1,""],dim:[38,2,1,""],div:[38,2,1,""],div_:[38,2,1,""],get_device:[38,2,1,""],hspmm:[38,2,1,""],is_coalesced:[38,2,1,""],mm:[38,2,1,""],mul:[38,2,1,""],mul_:[38,2,1,""],narrow_copy:[38,2,1,""],resizeAs_:[38,2,1,""],size:[38,2,1,""],spadd:[38,2,1,""],spmm:[38,2,1,""],sspaddmm:[38,2,1,""],sspmm:[38,2,1,""],sub:[38,2,1,""],sub_:[38,2,1,""],t_:[38,2,1,""],toDense:[38,2,1,""],transpose:[38,2,1,""],transpose_:[38,2,1,""],zero_:[38,2,1,""]},"torch.torch":{default_generator:[43,3,1,""],device:[40,1,1,""],dtype:[40,1,1,""],finfo:[51,1,1,""],iinfo:[51,1,1,""],layout:[40,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[41,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[41,2,1,""],add_audio:[41,2,1,""],add_custom_scalars:[41,2,1,""],add_embedding:[41,2,1,""],add_figure:[41,2,1,""],add_graph:[41,2,1,""],add_histogram:[41,2,1,""],add_image:[41,2,1,""],add_images:[41,2,1,""],add_mesh:[41,2,1,""],add_pr_curve:[41,2,1,""],add_scalar:[41,2,1,""],add_scalars:[41,2,1,""],add_text:[41,2,1,""],add_video:[41,2,1,""],close:[41,2,1,""],flush:[41,2,1,""]},"torchvision.datasets":{CIFAR100:[44,1,1,""],CIFAR10:[44,1,1,""],Cityscapes:[44,1,1,""],CocoCaptions:[44,1,1,""],CocoDetection:[44,1,1,""],DatasetFolder:[44,1,1,""],EMNIST:[44,1,1,""],FakeData:[44,1,1,""],FashionMNIST:[44,1,1,""],Flickr30k:[44,1,1,""],Flickr8k:[44,1,1,""],HMDB51:[44,1,1,""],ImageFolder:[44,1,1,""],ImageNet:[44,1,1,""],KMNIST:[44,1,1,""],Kinetics400:[44,1,1,""],LSUN:[44,1,1,""],MNIST:[44,1,1,""],PhotoTour:[44,1,1,""],QMNIST:[44,1,1,""],SBDataset:[44,1,1,""],SBU:[44,1,1,""],STL10:[44,1,1,""],SVHN:[44,1,1,""],UCF101:[44,1,1,""],USPS:[44,1,1,""],VOCDetection:[44,1,1,""],VOCSegmentation:[44,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[44,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[44,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[44,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[44,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[44,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[44,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[44,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[44,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[44,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[44,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[44,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[44,2,1,""]},"torchvision.io":{read_video:[46,4,1,""],read_video_timestamps:[46,4,1,""],write_video:[46,4,1,""]},"torchvision.models":{alexnet:[47,4,1,""],densenet121:[47,4,1,""],densenet161:[47,4,1,""],densenet169:[47,4,1,""],densenet201:[47,4,1,""],googlenet:[47,4,1,""],inception_v3:[47,4,1,""],mnasnet0_5:[47,4,1,""],mnasnet0_75:[47,4,1,""],mnasnet1_0:[47,4,1,""],mnasnet1_3:[47,4,1,""],mobilenet_v2:[47,4,1,""],resnet101:[47,4,1,""],resnet152:[47,4,1,""],resnet18:[47,4,1,""],resnet34:[47,4,1,""],resnet50:[47,4,1,""],resnext101_32x8d:[47,4,1,""],resnext50_32x4d:[47,4,1,""],shufflenet_v2_x0_5:[47,4,1,""],shufflenet_v2_x1_0:[47,4,1,""],shufflenet_v2_x1_5:[47,4,1,""],shufflenet_v2_x2_0:[47,4,1,""],squeezenet1_0:[47,4,1,""],squeezenet1_1:[47,4,1,""],vgg11:[47,4,1,""],vgg11_bn:[47,4,1,""],vgg13:[47,4,1,""],vgg13_bn:[47,4,1,""],vgg16:[47,4,1,""],vgg16_bn:[47,4,1,""],vgg19:[47,4,1,""],vgg19_bn:[47,4,1,""],wide_resnet101_2:[47,4,1,""],wide_resnet50_2:[47,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[47,4,1,""],keypointrcnn_resnet50_fpn:[47,4,1,""],maskrcnn_resnet50_fpn:[47,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[47,4,1,""],deeplabv3_resnet50:[47,4,1,""],fcn_resnet101:[47,4,1,""],fcn_resnet50:[47,4,1,""]},"torchvision.ops":{RoIAlign:[48,1,1,""],RoIPool:[48,1,1,""],nms:[48,4,1,""],roi_align:[48,4,1,""],roi_pool:[48,4,1,""]},"torchvision.transforms":{CenterCrop:[49,1,1,""],ColorJitter:[49,1,1,""],Compose:[49,1,1,""],FiveCrop:[49,1,1,""],Grayscale:[49,1,1,""],Lambda:[49,1,1,""],LinearTransformation:[49,1,1,""],Normalize:[49,1,1,""],Pad:[49,1,1,""],RandomAffine:[49,1,1,""],RandomApply:[49,1,1,""],RandomChoice:[49,1,1,""],RandomCrop:[49,1,1,""],RandomErasing:[49,1,1,""],RandomGrayscale:[49,1,1,""],RandomHorizontalFlip:[49,1,1,""],RandomOrder:[49,1,1,""],RandomPerspective:[49,1,1,""],RandomResizedCrop:[49,1,1,""],RandomRotation:[49,1,1,""],RandomSizedCrop:[49,1,1,""],RandomVerticalFlip:[49,1,1,""],Resize:[49,1,1,""],Scale:[49,1,1,""],TenCrop:[49,1,1,""],ToPILImage:[49,1,1,""],ToTensor:[49,1,1,""],functional:[49,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[49,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[49,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[49,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[49,4,1,""],adjust_contrast:[49,4,1,""],adjust_gamma:[49,4,1,""],adjust_hue:[49,4,1,""],adjust_saturation:[49,4,1,""],affine:[49,4,1,""],crop:[49,4,1,""],erase:[49,4,1,""],five_crop:[49,4,1,""],hflip:[49,4,1,""],normalize:[49,4,1,""],pad:[49,4,1,""],perspective:[49,4,1,""],resize:[49,4,1,""],resized_crop:[49,4,1,""],rotate:[49,4,1,""],ten_crop:[49,4,1,""],to_grayscale:[49,4,1,""],to_pil_image:[49,4,1,""],to_tensor:[49,4,1,""],vflip:[49,4,1,""]},"torchvision.utils":{make_grid:[50,4,1,""],save_image:[50,4,1,""]},torch:{"var":[43,4,1,""],ByteTensor:[42,1,1,""],FloatStorage:[39,1,1,""],Tensor:[42,1,1,""],__config__:[0,0,0,"-"],abs:[43,4,1,""],acos:[43,4,1,""],add:[43,4,1,""],addbmm:[43,4,1,""],addcdiv:[43,4,1,""],addcmul:[43,4,1,""],addmm:[43,4,1,""],addmv:[43,4,1,""],addr:[43,4,1,""],allclose:[43,4,1,""],arange:[43,4,1,""],argmax:[43,4,1,""],argmin:[43,4,1,""],argsort:[43,4,1,""],as_strided:[43,4,1,""],as_tensor:[43,4,1,""],asin:[43,4,1,""],atan2:[43,4,1,""],atan:[43,4,1,""],autograd:[1,0,0,"-"],baddbmm:[43,4,1,""],bartlett_window:[43,4,1,""],bernoulli:[43,4,1,""],bincount:[43,4,1,""],bitwise_not:[43,4,1,""],blackman_window:[43,4,1,""],bmm:[43,4,1,""],broadcast_tensors:[43,4,1,""],cartesian_prod:[43,4,1,""],cat:[43,4,1,""],ceil:[43,4,1,""],chain_matmul:[43,4,1,""],cholesky:[43,4,1,""],cholesky_inverse:[43,4,1,""],cholesky_solve:[43,4,1,""],chunk:[43,4,1,""],clamp:[43,4,1,""],combinations:[43,4,1,""],compiled_with_cxx11_abi:[43,4,1,""],cos:[43,4,1,""],cosh:[43,4,1,""],cross:[43,4,1,""],cuda:[8,0,0,"-"],cumprod:[43,4,1,""],cumsum:[43,4,1,""],det:[43,4,1,""],diag:[43,4,1,""],diag_embed:[43,4,1,""],diagflat:[43,4,1,""],diagonal:[43,4,1,""],digamma:[43,4,1,""],dist:[43,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[43,4,1,""],dot:[43,4,1,""],eig:[43,4,1,""],einsum:[43,4,1,""],empty:[43,4,1,""],empty_like:[43,4,1,""],empty_strided:[43,4,1,""],eq:[43,4,1,""],equal:[43,4,1,""],erf:[43,4,1,""],erfc:[43,4,1,""],erfinv:[43,4,1,""],exp:[43,4,1,""],expm1:[43,4,1,""],eye:[43,4,1,""],fft:[43,4,1,""],flatten:[43,4,1,""],flip:[43,4,1,""],floor:[43,4,1,""],fmod:[43,4,1,""],frac:[43,4,1,""],from_numpy:[43,4,1,""],full:[43,4,1,""],full_like:[43,4,1,""],gather:[43,4,1,""],ge:[43,4,1,""],gels:[43,4,1,""],geqrf:[43,4,1,""],ger:[43,4,1,""],get_default_dtype:[43,4,1,""],get_num_interop_threads:[43,4,1,""],get_num_threads:[43,4,1,""],get_rng_state:[43,4,1,""],gt:[43,4,1,""],hamming_window:[43,4,1,""],hann_window:[43,4,1,""],histc:[43,4,1,""],hub:[17,0,0,"-"],ifft:[43,4,1,""],index_select:[43,4,1,""],initial_seed:[43,4,1,""],inverse:[43,4,1,""],irfft:[43,4,1,""],is_floating_point:[43,4,1,""],is_storage:[43,4,1,""],is_tensor:[43,4,1,""],isfinite:[43,4,1,""],isinf:[43,4,1,""],isnan:[43,4,1,""],jit:[19,0,0,"-"],kthvalue:[43,4,1,""],le:[43,4,1,""],lerp:[43,4,1,""],linspace:[43,4,1,""],load:[43,4,1,""],log10:[43,4,1,""],log1p:[43,4,1,""],log2:[43,4,1,""],log:[43,4,1,""],logdet:[43,4,1,""],logspace:[43,4,1,""],logsumexp:[43,4,1,""],lstsq:[43,4,1,""],lt:[43,4,1,""],lu:[43,4,1,""],lu_solve:[43,4,1,""],lu_unpack:[43,4,1,""],manual_seed:[43,4,1,""],masked_select:[43,4,1,""],matmul:[43,4,1,""],matrix_power:[43,4,1,""],matrix_rank:[43,4,1,""],max:[43,4,1,""],mean:[43,4,1,""],median:[43,4,1,""],meshgrid:[43,4,1,""],min:[43,4,1,""],mm:[43,4,1,""],mode:[43,4,1,""],mul:[43,4,1,""],multinomial:[43,4,1,""],multiprocessing:[21,0,0,"-"],mv:[43,4,1,""],mvlgamma:[43,4,1,""],narrow:[43,4,1,""],ne:[43,4,1,""],neg:[43,4,1,""],nn:[22,0,0,"-"],nonzero:[43,4,1,""],norm:[43,4,1,""],normal:[43,4,1,""],numel:[43,4,1,""],ones:[43,4,1,""],ones_like:[43,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[43,4,1,""],ormqr:[43,4,1,""],pinverse:[43,4,1,""],pow:[43,4,1,""],prod:[43,4,1,""],qr:[43,4,1,""],rand:[43,4,1,""],rand_like:[43,4,1,""],randint:[43,4,1,""],randint_like:[43,4,1,""],randn:[43,4,1,""],randn_like:[43,4,1,""],randperm:[43,4,1,""],range:[43,4,1,""],reciprocal:[43,4,1,""],remainder:[43,4,1,""],renorm:[43,4,1,""],repeat_interleave:[43,4,1,""],reshape:[43,4,1,""],rfft:[43,4,1,""],roll:[43,4,1,""],rot90:[43,4,1,""],round:[43,4,1,""],rsqrt:[43,4,1,""],save:[43,4,1,""],seed:[43,4,1,""],set_default_dtype:[43,4,1,""],set_default_tensor_type:[43,4,1,""],set_flush_denormal:[43,4,1,""],set_num_interop_threads:[43,4,1,""],set_num_threads:[43,4,1,""],set_printoptions:[43,4,1,""],set_rng_state:[43,4,1,""],sigmoid:[43,4,1,""],sign:[43,4,1,""],sin:[43,4,1,""],sinh:[43,4,1,""],slogdet:[43,4,1,""],solve:[43,4,1,""],sort:[43,4,1,""],sparse_coo_tensor:[43,4,1,""],split:[43,4,1,""],sqrt:[43,4,1,""],squeeze:[43,4,1,""],stack:[43,4,1,""],std:[43,4,1,""],std_mean:[43,4,1,""],stft:[43,4,1,""],sum:[43,4,1,""],svd:[43,4,1,""],symeig:[43,4,1,""],t:[43,4,1,""],take:[43,4,1,""],tan:[43,4,1,""],tanh:[43,4,1,""],tensor:[43,4,1,""],tensordot:[43,4,1,""],topk:[43,4,1,""],trace:[43,4,1,""],transpose:[43,4,1,""],trapz:[43,4,1,""],triangular_solve:[43,4,1,""],tril:[43,4,1,""],tril_indices:[43,4,1,""],triu:[43,4,1,""],triu_indices:[43,4,1,""],trunc:[43,4,1,""],unbind:[43,4,1,""],unique:[43,4,1,""],unique_consecutive:[43,4,1,""],unsqueeze:[43,4,1,""],var_mean:[43,4,1,""],where:[43,4,1,""],zeros:[43,4,1,""],zeros_like:[43,4,1,""]},torchvision:{get_image_backend:[45,4,1,""],set_image_backend:[45,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":43,"0000e":[42,43],"041m":1,"048m":1,"0545e":42,"0705e":43,"0949e":42,"10k":44,"10x7":22,"1239e":43,"13x12":22,"1428e":43,"148m":1,"1921e":43,"1_batch_16":41,"1e6":37,"1hr":4,"1st":[15,26],"1x1":47,"20l":22,"224x224":47,"2gb":17,"2nd":[15,22,23,26,42],"2x3":[22,38],"32x4d":47,"32x8d":47,"3493e":43,"3842e":43,"3rd":[26,37,42],"3x4":22,"3xhxw":41,"4064e":43,"427l":44,"483m":1,"4842e":[42,43],"4th":[26,44],"4us":1,"50k":44,"50x":47,"54_":41,"5751e":43,"5765e":42,"5955e":43,"5c106cde":[17,20],"5mb":47,"5x2":38,"5x7":22,"5x7x9":22,"60k":44,"640l":44,"6503e":43,"6531e":43,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":43,"816u":1,"8182e":42,"88131e":43,"9073e":[22,43],"9683e":43,"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,40,42,43,44,49],"break":[4,15,19,34,37,43],"byte":[8,15,19,37,39,42,43],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,41,42,43,44,49,51],"catch":19,"char":[39,42],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,38,39,40,41,42,43,44,47,48,49,51],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,41,42,43,44,47,48,49,50,51],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,43,47,49,50],"float":[1,13,15,19,21,22,23,30,33,36,37,39,40,41,42,43,46,48,49,50,51],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,40,41,42,43,44,45,46],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,41,42,43,44,47,49],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,46,48,49,50,51],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,39,40,41,42,43],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,39,41,42,43],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,42,43,44,46,47,48,49,51],"short":[19,22,23,26,39,40,42,43,49],"static":[1,19,31,36,39],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,42,43,47],"throw":[22,42,43],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,47,49,50],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,42,43],"void":[31,43],"while":[5,13,14,15,19,22,23,25,30,32,37,41,42,43,49],Abs:36,And:[22,35,43,49],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,38,39,40,41,42,43,44,47,49],Going:47,Has:[22,23,43],Its:[22,37],NFS:14,NMS:48,NOT:[19,36,38,43],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,41,43,44,47],Ops:[2,28,42],PRs:[4,5],RHS:43,Such:[7,13,43],That:[43,49],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,39,40,41,42,43,44,45,46,47,49,50,51],Then:[1,26,34,36,37,49],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,42,43],These:[7,13,14,15,19,22,29,36,38,40,43,44,47],Use:[8,13,14,22,23,32,41,42,43,49],Used:[13,43],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,43,49],With:[13,15,22,23,28,36,37,41],__background__:47,__call__:49,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,44],__init__:[1,13,15,19,22,29,30,36,41],__iter__:13,__len__:[13,44],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:43,_force_outplac:19,_fork:27,_formatt:43,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:43,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[38,43],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,43],_lessthan:15,_like:42,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:38,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:43,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:42,_top:[22,23],_valu:[22,38,43],_wait:27,_weight:22,a3c:32,a_big:43,a_dict:19,a_i:22,a_l:43,a_lu:43,a_tupl:19,a_u:43,aaa:41,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,43],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,42,49],abov:[1,15,17,19,22,26,27,28,29,36,37,43,44,49],abridg:30,abruptli:21,abs:[15,22,23,36,37,42,43],abs_:42,absolut:[1,5,7,22,23,42,43,49],abstransform:15,acc:47,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,40,41,42,43],access:[5,13,14,21,22,25,28,30,31,40,42,51],accident:4,accimag:45,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,43,44,47,48],accordingli:[42,44,47],accoridng:22,account:[2,22],accumul:[1,19,22,30,42,43],accumulategrad:1,accur:[8,36,43],accuraci:[41,47],achiev:[13,15,22,23,31,36],aco:[36,42,43],acos_:42,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,39,41,42,43,44],act:[15,22,49],action:[5,15,28,44],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,43,48],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,38,41,42,43,47],add_:[1,26,38,42],add_argu:28,add_audio:41,add_bias_kv:22,add_custom_scalar:41,add_embed:41,add_figur:41,add_graph:41,add_histogram:41,add_imag:41,add_mesh:41,add_modul:22,add_param_group:37,add_pr_curv:41,add_scalar:41,add_text:41,add_video:41,add_zero_attn:22,addbmm:[42,43],addbmm_:42,addcdiv:[42,43],addcdiv_:42,addcmul:[42,43],addcmul_:42,added:[4,7,22,23,31,36,37,38,41,42,43],adding:[13,14,17,19,22,29,36,42,43],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,38,42,43],addition:[1,13,14,15,22,30,42,43,49],additionali:22,addmm:[36,38,42,43],addmm_:42,addmv:[42,43],addmv_:42,addr:[42,43],addr_:42,address:[1,13,14,21,42,47],adher:5,adjac:[22,43],adjust:[22,49],adjust_bright:49,adjust_contrast:49,adjust_gamma:49,adjust_hu:49,adjust_satur:49,admit:28,adopt:5,advanc:[3,22,25,32,36,41],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,43],advisori:4,aeroplan:47,affect:[1,4,8,22,23,39,43],affin:[15,22,23,25,42,49],affinetransform:15,aforement:32,afram:46,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,40,41,42,43,48,49],afterward:[1,22],again:[3,13,14,43,44],against:[1,2,14,19,43,49],aggreg:[22,23,47],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:47,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,43],alia:[15,42],alias:29,alican:6,alicanb:6,align:[22,23,48],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,47,48,49,50],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,42,43],alloc:[1,2,8,15,21,25,28,30,40,42,43],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,40,41,42,43,44],allow_unreach:1,allow_unus:1,almost:[35,43,44],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,42,43],alpha:[15,22,23,36,37,38,42,43],alpha_f:36,alphabet:[23,43,44],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,39,42,43,44],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,38,41,42,43,44,49],altern:[13,17,19,22,23,35,43],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,40,42,43],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,43],amount:[1,2,4,8,22,25,28,30,49,50],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,43],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,49],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,41,42,43,49],anm:43,ann_fil:44,anneal:37,annfil:44,annot:[1,19,44],annotation_path:44,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,42,43,48],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,42],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,38,41,42,44],aplli:49,appear:[2,14,15,22,29,37,43],append:[1,14,19,22,32,35,41,42,43],appl:47,appli:[1,3,14,15,19,21,22,23,25,29,36,37,42,43,49],applic:[8,14,15,22,25,27,28,42,49],apply_:42,apprear:43,appreci:4,approach:[14,19,21,22,43],appropri:[4,14,15,19,22,43,47],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,41,42,43],arbitrari:[1,14,19,22,23,25,31,42,43],arccosin:43,architechtur:22,architectur:[22,43,45,47],archiv:[19,31],arcsin:43,arctang:43,area:[4,5,23,49],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,39,42,43,44,49],arg_constraint:15,argmax:[22,36,42,43],argmin:[36,42,43],argpars:28,argsort:[42,43],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,39,40,41,42,43,44,50,51],argumentpars:28,ari:36,aris:15,arithmet:43,armand:22,around:[1,4,5,8,14,19,21,28,42,49],arrai:[13,22,23,36,39,41,42,43,44],arrang:44,array_lik:[42,43],art:43,articul:5,artifact:31,artifici:1,arxiv:[22,47,49],as_strid:[42,43],as_tensor:[41,42,43],as_tupl:43,asap:21,ascend:43,ascent:15,ascii:[8,43],asd932_:44,asgd:37,ashish:22,asin:[36,42,43],asin_:42,ask:[4,14,18],aspect:[4,49],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,41,44],assign_x:19,associ:[1,8,19,22,23,40,42,43],assum:[13,14,15,19,22,23,29,31,36,37,43,49],assumpt:[22,49],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,39,42],async_op:14,asynchron:[2,22,27,39,41,42],atan2:[42,43],atan2_:42,atan:[36,42,43],atan_:42,aten:[19,27,35,43],aten_thread:27,atol:[1,19,29,42,43],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,43],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,43],attribut:[1,13,14,18,22,25,28,29,36,42,51],audio:[41,44,46],audio_fp:46,aug_add_x:19,augment:49,auto:[14,22,41],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,42,43],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,41,42,43],aux_logit:47,aux_loss:47,auxiliari:[17,31,47],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,43,44],averag:[1,14,22,23,37],avg:[1,49],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,41,42,43,49],awai:23,awar:[4,47],axbc:22,axes:36,axi:[36,42,43,49],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,38,43,49],backbon:47,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,42,43,45],backend_str:14,background:[9,10,11,22,23,32,42,43,44],backpack:47,backprop:43,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,38,42,43],bad:21,baddbmm:[42,43],baddbmm_:42,bag:[22,23],bai:6,balanc:[43,44],ball:47,balnta:22,banana:47,bar:[4,19,20,47],bare:7,barrier:14,bartlett:43,bartlett_window:43,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,41,42,43,44,49],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:47,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,41],bat:47,batch1:[42,43],batch2:[42,43],batch:[15,22,23,28,30,32,33,36,37,41,43,44,47,48,49,50],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,41,44],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:41,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:47,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,41,42,43],becom:[1,4,5,13,15,22,23,36,42,43],bed:47,bedroom_train:44,been:[1,8,14,15,21,22,27,32,35,37,41,43,47,48],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,38,41,42,43],beforehand:4,begin:[4,8,22,31,36,37,42,43],behav:[7,19,42],behavior:[4,7,14,17,19,22,23,26,28,36,37,42,43,47],behaviour:[1,9,10,23,42,43],behind:44,being:[1,5,13,15,19,22,23,29,32,36,42,43,49],belong:[3,8,14,15,28,37,49],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,43,49],ben:22,bench:47,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,42,43],bernoulli_:[42,43],besid:41,bessel:43,best:[1,4,13,14,18,19,21,30,37,43],beta:[22,23,36,37,38,42,43],better:[4,5,8,13,19,22,23,27,35,41,43],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,39,41,42,43,44,47,49],bewar:4,beyond:[5,30,37,43],bfg:37,bfloat16:[39,42],bia:[5,22,23,29,41],bias:[22,43],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,49],bicycl:47,bidirect:[22,36],big:[4,43],bij:43,biject:15,biject_to:15,bik:43,bilinear:[43,49],bin:[41,42,43,48],binari:[15,19,22,23,31,35,36,41,42,43,44,47],bincount:[33,42,43],bind:[7,8,36],bird:47,bit:[4,35,40,42,43,51],bitwis:[14,43],bitwise_not:[42,43],bitwise_not_:42,bjk:43,bl_flip:49,bla:27,black:49,blackman:43,blackman_window:43,blank:[22,23],blob:[31,36,41],blobnam:41,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,47],blog:4,blow:30,blue:44,bmm:[42,43],board:5,boat:47,bodi:19,boil:4,book:47,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,39,40,41,42,43,44,47,49,50],booltensor:[40,42],bootcamp:4,bootstrap:35,border:[23,49],both:[1,8,13,14,15,19,22,23,26,29,33,36,38,42,43,44,46,47,49],bottl:47,bottleneck:[18,47],bottom:[1,23,49],bound:[2,22,23,24,34,37,42,43],boundari:[22,23,37,44],bowl:47,box:[47,48],bozkurt:6,bptt:30,br_flip:49,branch:[4,17,19,47],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[44,49],brightness_factor:49,broadcast:[8,14,15,18,22,36,42,43],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:43,broadcast_warn:26,broader:[5,31,43],broccoli:47,broken:4,brokenpipeerror:35,brown:44,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,43],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,41,49],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,43],builtin:43,bulk:13,bump:22,bundl:31,bus:47,byclass:44,bymerg:44,bypass:28,byte_arrai:43,bytecod:13,bytesio:[19,43],bytetensor:[8,22,40,42,43],bz2:44,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,43,47],cache_s:15,caffe2:[36,41],cake:47,calcul:[1,3,13,22,23,26,35,37,43],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,40,41,42,43,47,50,51],callabl:[13,15,17,19,37,42,43,44],callback:31,caller:28,camera:[31,41],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41,42,43,44,46,47,49,51],candid:1,cannot:[1,13,15,17,19,22,23,35,38,39,42,43],cap:44,capabl:[8,14,31,43],capac:28,capacit:15,captur:[8,19,36],car:47,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,38,43],carlo:15,carri:26,carrier:15,carrot:47,cartesian:[15,43],cartesian_prod:43,cast:[1,22,23,36,39,42,43],cat:[15,19,22,36,38,42,43,44,47],categor:[4,23],categori:[15,43,44,47],categorynam:41,cattransform:15,cauchi:[42,43],cauchy_:[42,43],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,42,43],caveat:[21,28],ccc:41,cdf:15,cdot:[22,23,43],ceil:[13,22,23,36,42,43,48],ceil_:42,ceil_mod:[22,23],cell:[22,47],center:[23,37,41,42,43,49],center_flip:49,centercrop:49,central:[31,49],cerr:31,certain:[13,14,19,22,23,26,31,38,43],certainli:43,chain:[1,13,15,22,25,42,43,49],chain_matmul:43,chaindataset:13,chair:47,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,38,39,41,42,43,47,49],channel:[5,13,22,23,24,36,41,44,46,47,49],charact:[23,43],chart:41,chartensor:[40,42],chartnam:41,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,41,42,43,44],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,43],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,42,43],cholesky_invers:[42,43],cholesky_solv:[42,43],choos:[1,24,41],chosen:[43,49],christian:6,chrome:1,chunk:[3,8,13,19,22,42,43],chunk_siz:8,church_train:44,chw:41,cifar100:44,cifar10:44,cifar:45,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:45,claim:4,clamp:[23,36,42,43],clamp_:42,clamp_max:36,clamp_min:36,class_i:44,class_index:[13,44],class_x:44,classif:[22,23,24,44,45],classifi:[25,36,37,41],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:43,clip:[22,44],clip_valu:22,clock:47,clockwis:49,clone:[1,13,21,23,38,39,42,43],cloned_coeffici:43,close:[8,29,41],closest:[23,43],cloud:41,clp:44,clr:[37,43],cluster:[22,41],clutter:41,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:49,cnn:[22,25,48],coalesc:[8,38,42],coars:44,coco:[45,47],coco_instance_category_nam:47,coco_person_keypoint_nam:47,coco_util:47,cococapt:44,cocodetect:44,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,40,42,43],codebas:5,codec:43,codomain:15,coeffici:[37,43],cohes:5,col2im:22,col:[43,44],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,41,43,44],color:[22,41,44,49],colorjitt:49,colors_tensor:41,column:[1,22,23,24,42,43,48,49],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,43],combinations_with_replac:43,come:[4,13,22,31,44],comm:8,comma:[14,43],command:[1,2,35],comment:[4,19,29,41],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,43,44,45,49],commonli:[14,15,37,40],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,41,43],comparison:29,compat:[7,13,15,19,21,39,42,43,44],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:43,complementari:[43,49],complet:[4,8,14,21,25,33,43,49],complex:[4,22,32,43,49],complic:[2,26],compon:[4,14,15,22,31,43],compos:[15,19,22,23,36,41,43,49],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,38,42,45,47,48,49,50],compute_uv:[42,43],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,43],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,40],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,43],condit:[1,12,19,22,29,36,41,42,43],condition:1,conduct:[5,22],confer:5,confid:[4,41],config:35,config_dict:41,configur:[0,4,13,14,22,28,35,41,43,47],confirm:[4,19,36],conform:22,conjug:[37,43],conjunct:[13,23],connect:[14,21,22,25,47],connectionist:[22,23],conquer:43,consecut:[14,42,43],consensu:4,consid:[17,19,22,23,26,29,30,37,42,43,44],consider:[4,22],consist:[13,19,36,37,43,45],consol:41,constant:[13,22,23,29,36,37,43,49],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,38,40,41,42,43,47],construct_transform:15,constructor:[7,13,22,28,38,42,47,51],consum:[13,16,21,32,36,41],consumpt:[1,41],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,39,40,41,42,43,44,46,47,48,49],content:[4,19,20,21,37,41,42,43],contenti:5,context:[1,8,21,22,28,29,31,32,36,43],contigu:[22,23,39,42,43],continu:[13,15,19,22,36,42,43],continuum:35,contract:43,contrail:44,contrain:43,contrari:[4,27],contrast:[15,37,47,49],contrast_factor:49,contribut:[1,5,18,22,23,43],contributor:[4,5],control:[13,19,22,25,27,28,32,36,43,49],conv1:[19,22,41],conv2:[19,22],conv2d:[19,36,41],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31],convent:[1,20,22,36,42,43],converg:37,convers:[4,25,36,42,45],convert:[1,13,19,22,29,36,41,42,43,49],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[38,40,42,43],cooldown:37,coordin:[4,15,38,41,42,43,48,49],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,39,42,43],copy_:[1,19,22,28,39,42],core:[4,19,27,36],corner:[22,23,49],corpor:[4,5],correct:[2,4,14,15,19,22,39,42,43,49],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,39,41,42,43,48,49],corrupt:[22,32,44],cos:[22,36,37,41,42,43],cos_:42,cosh:[42,43],cosh_:42,cosin:[22,23,37,43],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,43],couch:47,could:[2,4,8,13,15,21,35,43],couldn:[35,36],count:[1,8,15,43],count_include_pad:[22,23],counter:[1,8,21,22,25,49],counterpart:43,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,49],covariance_matrix:15,cover:[29,31,44],coverag:4,cow:47,cpp:[4,5,7,43],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,39,40,42,43],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,41],crcv:44,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,39,41,42,43,44,51],create_extens:35,create_graph:[1,42],creation:[1,8,13,19,21,22,28,42,44],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[47,48,49],cross:[4,15,22,23,28,35,42,43],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,43],cubla:8,cublashandle_t:8,cuda0:[28,42],cuda100:35,cuda101:35,cuda1:40,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,39,40,42,43,47],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,47],cufft:43,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[42,43],cumsum:[42,43],cumul:[15,22,23,43],cup:47,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,38,39,40,41,42,43,44,46,48],current_blas_handl:8,current_datetime_hostnam:41,current_devic:[8,40],current_stream:8,curv:41,custom:[7,13,14,21,22,31,35,37,42],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,43],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,49],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:49,darker:49,dart:44,data1:44,data2:44,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,38,39,40,41,42,43,44,49],data_load:[32,37,44],data_parallel:30,data_ptr:[39,42],data_sourc:13,databas:[13,44],dataformat:41,dataload:[13,22,28,30,35,37,41,42,44],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,41,45,47,49],dataset_it:13,datasetfold:45,datatyp:[22,36,43],datetim:14,datset:44,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:49,deadlock:[14,22],deal:[4,21,30,43,49],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:43,decent:13,decid:[2,4,44],decis:19,declar:[1,7,13,19,36],decod:[16,22,43,46],decoder_lay:22,decomposit:[15,43],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,43,48],decreasingli:22,deep:[4,5,18,22,24,37,47],deeper:47,deeplabv3_resnet101:47,deeplabv3_resnet50:47,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,42,49],default_gener:43,default_load:44,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,38,42,43,44,49],define_macro:35,definit:[4,13,15,17,19,22,23,36,41,43,47],degre:[15,22,43,49],del:[21,30],delet:[14,17,21],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,43],denorm:43,denot:[1,15,19,22,37],dens:[22,38,40,42,43,47],dense_dim:[38,42,43],densenet121:47,densenet161:47,densenet169:47,densenet201:47,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,38,41,42,43,47],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,39,42,43,49],depth:[8,22,23,47,49],depthwis:22,dequant:42,deriv:[1,5,19,22,29,42,43],derivedp:15,derivedq:15,descend:[22,42,43],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,42,43,47,48],descript:[0,4,7,19,28,29,31,36,51],descriptor:[13,22,36,44],deseri:[20,43],design:[1,4,5,13,15,17,20,47],desir:[8,13,14,15,22,23,28,36,38,39,42,43,49],desmaison:6,despit:19,destin:[8,14,22,39,42,43],destroi:22,destructor:21,det:[15,42,43],detach:[1,19,22,23,30,42,43],detach_:[1,42],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,38,41,42,43,47,49],detect:[3,7,14,21,36,45],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,41,43,49],determinist:[3,11,15,19,22,23,33,37,43],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,42,43,49],devic:[1,3,8,14,19,22,23,30,33,36,37,39,42,43],device_count:[8,14],device_id:[22,23,43],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,42],diag:[15,42,43],diag_emb:[42,43],diagflat:[42,43],diagn:15,diagnost:19,diagon:[15,23,42,43],dict:[15,20,22,29,36,37,41,43,46,47],dictat:22,dictionari:[7,13,15,22,23,36,37,41,44,47],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,38,40,41,42,43,44,47],differenti:[15,22,23,25,29,30,42],difficult:[1,4],difficulti:[4,24],digamma:[42,43],digamma_:42,digit:[20,31,43,44],dilat:[22,23,36],dim0:[42,43],dim1:[42,43],dim2:[42,43],dim:[8,15,19,22,23,30,36,38,42,43],dim_arang:36,dim_feedforward:22,dimems:42,dimens:[1,8,13,15,19,22,23,24,26,30,38,40,41,42,43],dimension:[1,15,22,23,24,26,39,40,42,43],dims_i:36,dine:47,diningt:47,dir:[17,36,44],dirac:24,dirac_:24,direct:[4,5,22,25,29,43,49],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,38,41,43],directori:[7,14,20,31,34,41,44,47],dirnam:17,dirti:25,disabl:[22,28],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19,48],discourag:[1,8,25],discov:14,discrep:43,discret:[15,22,23,42,43],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,41,43],dispatch:[14,36],displai:[20,23,36,47,50],displaystyl:43,dissimilar:22,dist:[14,15,42,43],distanc:[37,43,44],distinct:43,distort:49,distortion_scal:49,distribut:[13,18,24,38,41,42,43],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,38,42,43],div_:[38,42],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,43],dividend:43,divis:[13,22,23,43],divisor:[22,23,42,43],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,41],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,43,50],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,38,42,43,45,49],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,43],dog:[44,47],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,42,43,49],done:[13,15,19,21,22,30,33,36,42,43,49],donut:47,dot:[22,42,43,49],doubl:[1,22,23,29,39,40,42,43],doubler:1,doubletensor:[40,42,43],dow:41,down:[1,4,13,15,23,32,41],download:[20,35,41,44,47],downsampl:22,doxygen:4,dp_m:30,dpotri:43,draw:[13,41,42,43],drawn:[13,24,42,43],drier:47,drive:[5,14],driven:5,drop:[13,22,32,44],drop_last:13,dropout:[3,36],dset:44,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,38,39,41,42,43,49,51],due:[2,3,4,15,19,22,28,33,43],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,38,42,43],dure:[1,3,7,14,19,22,23,27,28,31,36,38,42,43,47],dynam:[7,13,19,24,36,37,43],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,38,40,41,42,43,44,46,47,48,49,50],eager:43,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,43],easili:[4,9,10,14,22,23,37,41,42,43],ecosystem:31,edg:[1,23,49],edgeitem:43,edouard:22,edu:[22,44],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,39,41,42],effici:[1,13,15,22,25,29,38,40,42,43,47],eig:[42,43],eigenvalu:43,eigenvector:[42,43],eight:20,einstein:43,einsum:43,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,42,43,51],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,38,39,40,41,42,43,44,48,49],element_s:[39,42],elementari:43,elementwis:[8,22,23,43],elementwise_affin:22,eleph:47,elf:30,elif:19,elimin:[14,42,43],ell:22,ell_c:22,ellips:43,ellipsi:43,elman:22,els:[4,7,13,15,19,21,22,23,28,29,39,42,43,44,49],elsewher:[17,43],elu:36,elu_:23,embed:[27,36,41],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,44],emit_nvtx:[1,2],emnist:45,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,38,42,43],empty_cach:[8,28],empty_lik:43,empty_strid:43,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,42,43],enable_grad:[1,43],enable_tim:8,encod:[14,19,22,29,43],encoder_lay:22,encount:[14,22,23,43],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,42,43,46],end_dim:[42,43],end_ev:8,end_pt:46,endl:31,endocd:22,endpoint:49,enforc:22,enforce_sort:22,engin:[1,42,43],enhanc:49,enough:[19,21,25,29,35,37,43,49],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,42,43,47],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,49],entiti:44,entranc:4,entri:[1,14,15,25,37,38,41,43,44],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,41],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,47],environment:8,epoch:[13,37,41],eps:[1,22,23,29,37,43,51],epsilon:[22,23,43],eq_:42,equal:[8,14,15,22,23,26,41,42,43],equal_nan:[42,43],equat:[43,49],equival:[3,13,15,19,22,23,36,40,42,43],eras:49,erf:[36,42,43],erf_:42,erfc:[42,43],erfc_:42,erfinv:[42,43],erfinv_:42,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,42,43,47],especi:[5,13,14,23,25,36,42,43],essenti:[13,35],estim:[15,22,37,43],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,41,43],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,47],evalu:[2,15,22,23,25,29,37,43,44,47],even:[1,13,14,19,23,28,29,30,32,33,40,42,43],event:[1,15,19,21,41],event_dim:15,event_file_writ:41,event_nam:31,event_shap:15,eventfilewrit:41,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,39,41,42,43,44,47],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,43],evid:4,evolv:36,exact:[1,22,24,32,34,38,43],exactli:[1,7,14,15,22,23,25,28,36,43,44,48],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,38,40,41,42,43,44,47,49,50],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,38,42,43,47,49],exchang:[14,43],exclud:[19,22,23,43,44],exclus:[13,14,15,25,43],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,43],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,42,43,44],exit:[1,2,21,22,32,36],exp1:41,exp2:41,exp:[1,15,22,23,36,42,43],exp_:42,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,42,43,49],expand_a:[29,36,42,43],expans:49,expect:[1,4,13,14,19,22,23,30,37,41,43,44,47,48,49],expens:[2,13,15,31],experi:[22,41],experiment:[35,36,38,40],expert:5,explain:[17,28],explan:29,explicit:[28,36,43],explicitli:[8,14,19,23,28,31,36,38,43],explod:[22,41],explor:17,expm1:[42,43],expm1_:42,expon:[15,22,23,42,43],exponenti:[22,42,43],exponential_:[42,43],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,42,43],exptransform:15,ext:[20,44],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,43,44],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,43,44],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,41],extrud:30,eye:[15,43],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:41,facebook:5,faces_tensor:41,facil:[21,43],facilit:[17,19,43],fact:[1,19,29,42,43],factor:[15,22,23,24,37,43,48,49],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,43],failur:[1,5,14,15,19,21,22],fake:44,fakedata:45,fall:[4,22,23,36,43],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,39,41,42,43,44,47,49,50],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,42,45],fashionmnist:44,fast:[4,13,22,28,40,43,48],fast_forward:43,faster:[13,22,23,28,45],fasterrcnn_resnet50_fpn:47,fastest:[22,43],fatal:[21,32],favor:[8,22,23,43,49],favour:43,fcn:47,fcn_resnet101:47,fcn_resnet50:47,fcntl:14,featur:[5,18,19,22,23,29,30,36,41],featuredropout:36,fed:41,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,42,43],fewer:[15,26,38,42,43,47],ffi:35,fft:[28,42,43],field:[4,14,22,23,32,36,46,47],figur:[4,27,36,41],file:[1,4,5,7,8,13,17,19,20,31,35,36,39,41,43,44,46,50],filenam:[7,19,20,39,41,46,50],filename_suffix:41,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,42,43,49],fill_:[22,39,42,43],fill_diagonal_:42,fill_row_zero:19,fill_valu:[22,28,42,43],fillcolor:49,filter:[22,23,42,43,49],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,40,41,43],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,44,49],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,43],fire:[31,47],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,38,41,42,43,44,48,49],fisher:15,fit:[1,37,42,43],five_crop:49,fivecrop:49,fix:[19,22,23,30,32,33,35,36,43,44],flag:[1,7,19,21,22,25,28,41,42,43,49],flat:[36,43],flatten:[24,36,42,43,49],flickr30k:44,flickr8k:44,flickr:45,flip:[42,43,49],float16:[12,22,40,42,43,51],float32:[13,22,23,36,40,42,43,51],float64:[22,40,42,43,51],floatstorag:39,floattensor:[1,14,22,38,40,42,43,47,49],floor:[22,23,36,42,43],floor_:42,flow:[19,23,25,36],flush:[1,19,41,43],flush_sec:41,fly:[13,44],fmod:[42,43],fmod_:42,focu:37,focus:19,fold:[19,36,44],folder:[4,7,13,17,41,44],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,38,40,42,43,44,47,49,51],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:42,forc:[1,7,8,17,19,28,41],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,47],forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,41,43],format:[1,12,17,19,22,29,36,38,40,41,42,43,44,46,47,48],former:22,formul:[22,23],formula:[1,15,22,23,29,37,43],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,38,43],found:[19,22,32,41,43,47],four:49,fourier:43,fp16:22,fp32:22,fpn:47,fps:[41,46],frac:[15,22,23,24,37,42,43,49],frac_:42,fraction:[13,22,24,43,49],frame:[41,43,44,46],frames_per_clip:44,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,42],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,43],frequent:[4,18,22],fresh:17,frisbe:47,fritz:6,fritzo:6,fro:[42,43],frobeniu:43,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,38,40,41,42,43,44,46,47,48,49,50],from_buff:39,from_dlpack:16,from_fil:39,from_ipc_handl:8,from_numpi:[42,43],from_pretrain:22,front:[22,42],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,43,44],full_lik:[36,43],fulli:[13,14,22,25,28,29],func:[1,19,42],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,41,43],furthermor:[7,22,33],fuse:49,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,38,42],g_cpu:43,g_cpu_oth:43,g_cuda:43,g_t:22,gain:[5,24,49],galleri:4,gamma:[22,37,43,49],gamma_:43,gan:22,gap:43,garbag:13,gate:[22,23],gather:[8,14,30,31,36,42,43],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:42,gel:[42,43],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,38,40,41,42,44,45,47],generate_square_subsequent_mask:22,geometr:[23,42,43],geometri:[28,43],geometric_:[42,43],geq:[22,23,24,43],geqrf:[42,43],ger:[42,43],gesdd:43,gesvd:43,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,41,42,43,44,45],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[43,51],get_devic:[38,40,42],get_device_cap:8,get_device_nam:8,get_image_backend:45,get_info:[42,43],get_input:36,get_lr:37,get_num_interop_thread:[27,43],get_num_thread:[27,43],get_rank:14,get_rng_stat:[8,43],get_rng_state_al:8,get_sharing_strategi:21,get_stat:43,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:47,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,43,44,49],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,38,41,42,43,44,49,50],glass:47,global:[3,13,14,15,19,31,32,41,43,47],global_step:41,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:47,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,41],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,43],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,39,42,43,47],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,38,42],grad_bia:29,grad_fn:[1,25,38,42],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,43],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,38,42],graham:22,grai:49,grain:[14,25,49],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,41,42,43],graphic:35,graphroot:1,grave:22,grayscal:[41,49],great:4,greater:[2,22,23,25,36,43,48],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,41,43,48,50],grid_i:43,grid_x:43,gross:[5,6],ground:[4,41,47],group:[1,5,17,20,21,22,23,36,37,41,42,43],group_by_input_shap:1,group_nam:14,grow:[4,38],gru:19,gt_:42,gtcoars:44,gtfine:44,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,47],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:47,half:[15,22,23,37,39,40,42,43],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[40,42],ham:[23,43],hamiltonian:15,hamming_window:43,hand:[1,2,19,22,36,43],handbag:47,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,42,43,44],handler:31,hang:22,hann:43,hann_window:43,happen:[1,4,5,14,15,21,22,29,30,32,35,42],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:42,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,47,49],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,47,48,49],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,48,49],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,43],helper:[3,14,17,19,22,28,36],henc:[22,28,43,44],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,42,43,44,47,50],hessian:24,heurist:[7,13],hflip:49,hidden:[3,22,28,41],hidden_s:22,hierarch:41,high:[2,4,15,21,22,41,42,43],higher:[1,4,8,14,22,29,42,48],highest:[22,43],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[42,43],histogram:[41,43],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:45,hmdb:44,hold:[1,22,26,29,30,32,37,40,42,43,49],holist:4,hood:[1,21,32],hook:[1,22,31,42],hop:43,hop_length:[42,43],horizont:49,horizontal:49,hors:47,host:[13,14,22,28,39,42],hot:[15,23,47],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,41,43,47,49],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,38,42,43,44],hspmm:38,hsv:49,html:[2,4,37,41],http:[2,4,17,20,22,35,36,37,41,44,47,49],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:49,hue_factor:49,human:[0,24,36,44],hundr:31,hwc:41,hybrid:38,hydrant:47,hyper:19,hyperbol:43,i_0:43,i_d:43,i_n:43,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,38,42,43],identifi:[4,14,21,22,26,31,41,43],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[42,43],ignor:[4,8,19,22,23,29,37,42,43],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,41,44,45,47,50],image_s:44,image_set:44,imagefold:45,imagenet:[14,24,45,47],imagenet_data:44,imagenet_root:44,imaginari:43,imbalanc:22,img:[41,44,49],img_batch:41,img_height:49,img_hwc:41,img_tensor:41,img_width:49,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,38,42,43,44,47,48],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,43],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,47],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:42,incept:[36,49],inception_v3:47,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,42,43,44,47],include_path:7,inclus:[15,42,43],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:43,incorrect:[2,19,23,28,42,43],increas:[1,4,8,11,15,22,23,25,28,37,49],increment:[1,19,22,25],incur:[3,32,43],inde:19,independ:[5,8,13,14,19,22,23,42],index:[8,13,15,18,19,21,22,23,25,28,36,37,38,40,41,42,44,48],index_add:42,index_add_:[33,42],index_copi:[36,42],index_copy_:42,index_fil:[36,42],index_fill_:42,index_put:42,index_put_:42,index_select:[36,42,43],indic:[1,8,13,14,15,19,22,23,29,36,37,38,41,42,43,48],individu:[4,5,13,19,22,31,33,42,43,46],induc:[9,10,23,42,43],inf:[15,22,43],infer:[1,18,19,23,36,38,42,43,47],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,43,46],inform:[1,2,4,13,14,19,22,27,29,31,36,40,41,42,43,44,47,49],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,42,43,47],initial_accumulator_valu:37,initial_se:[8,13,43],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,43],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,49],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,42,43],input3:[42,43],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,38,42,43,44,47,48,49],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:41,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,43],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:44,instal:[1,7,14,17,36,41,43,44],instanc:[13,15,19,21,22,23,30,43,44,45],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,38,42,43,49],instruct:[2,19,36],instrument:31,insuffici:8,int16:[40,42,43,51],int32:[22,40,42,43,51],int64:[22,23,28,36,40,42,43,48,51],int64tensor:47,int8:[40,42,43,51],int_:43,int_a:43,int_b:43,int_repr:42,integ:[8,13,14,15,19,22,23,36,37,40,42,43,49,51],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,42,43],intel:[35,45],intel_openmp:35,intens:[37,49],intent:[4,19],inter:[27,43],interact:[1,5,8,36,41],interchang:[15,19],interconnect:14,interest:[4,5,18,44,48],interfac:[29,36,37,41],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,38,43,44,47],internet:[4,44],interop:43,interoper:27,interpol:[22,43,48,49],interpret:[13,14,21,23,27,38,43],interprocess:8,interrupt:21,intersect:48,interv:[15,43,49],intra:27,introduc:[15,17,22,26,42,43],introduct:[26,41],inttensor:[40,42,43],intuit:36,inv:[15,43],invalid:43,invari:[15,22,38,43,49],invers:[15,22,23,37,42,43],inverse_indic:43,invert:[15,22,43,47],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:[47,48],iou_threshold:48,ipc:8,ipc_collect:8,ipc_handl:8,ipp:45,irecv:14,irfft:[42,43],irrelev:1,irrespect:[28,43],is_avail:[8,28,43],is_coalesc:38,is_complet:14,is_contigu:42,is_cuda:[39,42],is_floating_point:[40,42,43],is_in_onnx_export:36,is_initi:14,is_leaf:[1,42],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,39,42],is_python_modul:7,is_set_to:42,is_shar:[39,42],is_sign:42,is_spars:[39,42],is_storag:43,is_tensor:43,is_train:[1,43],is_valid_fil:44,isend:14,isfinit:43,isinf:43,isinst:15,isn:[4,13,28],isnan:[36,43],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,41,42,43,44],iter:[4,8,14,15,21,22,25,26,37,41,48],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,43],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,38,40,42,43,44,47,49],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,43],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,43],jitter:49,job:[14,22,31,37,41],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:43,jpeg:31,json:[19,31,44],juggl:3,jump:[40,42],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,42,43],k_0:43,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,41,43,48,49],keep_var:22,keepdim:[22,23,42,43],kei:[1,13,14,19,22,31,36,37,39,41,42,43],kept:[21,22,23,48],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,41],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:47,keypoint:45,keypointrcnn_resnet50_fpn:47,keyword:[1,17,19,22,36,37,41,43],kill:[21,30],kind:[14,22,29,32,36],kinet:45,kinetics400:44,kite:47,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:45,knife:47,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,43,49],knuth:4,kth:43,kthvalu:[42,43],kullback:[15,22,23],kuzushiji:44,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,39,42,43,44,47,49,50],kwlist:41,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:44,label:[4,13,22,23,32,41,44,47],label_img:41,lambd:[22,23,37,42,49],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,42,43,49],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:47,larg:[4,13,18,21,22,23,27,28,30,38,42,43,44,47,49],larger:[1,5,22,23,30,31,41,42,43,47,49],largest:[19,23,42,43,51],last:[1,3,13,19,22,23,25,37,43,47,49],last_epoch:37,later:[1,4,19,22,27,28,34,43],latest:[4,14,15,17,36],latin1:43,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,49],layer:[14,23,24,25,29,30,37,47],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,38,41,42,43],lazi:37,lazili:8,lbfg:37,lbrace:43,lceil:43,ldot:[15,22,43],le_:42,lead:[1,4,35,42,43],leadership:5,leaf:[1,42,43],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,44,47],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,39,42,43,47],leav:[1,19,25,42,43,44],left:[19,22,23,36,42,43,49],left_ankl:47,left_ear:47,left_elbow:47,left_ey:47,left_hip:47,left_kne:47,left_should:47,left_wrist:47,leftimg8bit:44,legaci:[23,40],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,38,41,43,44],length:[1,8,13,14,15,19,22,23,26,30,36,42,43,49],leq:[22,23,43],lerp:[42,43],lerp_:42,less:[1,4,8,13,15,17,22,29,32,43,47],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,41,42],letter:[43,44],level:[1,4,13,19,21,22,24,27,41,42,43,47],lexicograph:43,lfloor:[22,23,43],lib64:7,lib:[35,43],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,43,45],libx264:46,lie:[22,23,41],lies:44,lifetim:4,light:[41,47],lighter:49,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,42,43,49],likelihood:[15,22,23],likewis:48,limit:[13,21,22,25],line:[1,2,22,26,35,36,43],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,42,43,47],linearfunct:29,linearli:[22,23,30],lineartransform:49,liner:22,linewidth:43,link:[7,15,22,23,31],linker:7,linspac:43,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50],listconstruct:19,listofproperti:41,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,43,44,45,47],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,43],load_state_dict_from_url:[17,20],load_url:[20,47],loadabl:17,loadann:44,loaded_weight:42,loader:[13,44],loc:[15,43],local:[14,17,19,21,22,23,30,41,44],local_rank:22,locallr_0:41,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,38,41,42,43,44,47,49],lock:[4,13,14,15,28,32],log10:[42,43],log10_:42,log1p:[42,43],log1p_:42,log2:[36,42,43],log2_:42,log:[7,13,15,22,23,36,41,42,43],log_:[42,43],log_abs_det_jacobian:15,log_dir:41,log_input:[22,23],log_norm:15,log_normal_:[42,43],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:43,logarithm:[22,23,43],logdet:[42,43],logdir:41,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:43,logsumexp:[36,42,43],longer:1,longest:[22,30],longtensor:[22,23,38,40,42,43],look:[2,4,5,15,19,22,23,31,32,35,36,43],lookup:[15,22,23,27],loop:[8,19,27,30,36,41,49],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,41,44,47],loss_fn:[32,37],lost:[22,43],lot:[4,21,32,41],low:[4,15,21,22,42,43],lower:[1,8,14,15,19,22,23,24,25,33,37,43,48],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:43,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:41,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,43],lstm:[3,36,41],lstsq:[42,43],lsun:45,lt_:42,lu_data:[42,43],lu_pivot:[42,43],lu_solv:[42,43],lu_unpack:43,lukasz:22,lvert:[22,23,43],macbook:41,machin:[14,22,31],maco:21,maddison:15,made:[1,5,19,22,35,37,41,49],mae:22,magma:[35,43],magma_2:35,magma_hom:35,magnitud:[22,24,43],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,38,39,42,43,49],main:[13,14,15,21,23,25,34,35,41,42,43],main_tag:41,mainli:[15,22,23],mainta:49,maintain:[4,14,15,22],major:[4,8,22,23,36,38],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,40,41,42,43,44,49,50],make_grid:[41,50],manag:[1,4,22,30,31,36,43],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,40,41,42,43,45],manipul:30,manner:[3,26,42],mantissa:42,manual:[13,14,19,21,22,23,28,30,33,35,36,41],manual_se:[8,33,43],manual_seed_al:8,map:[7,15,19,22,23,35,36,39,43,48],map_:42,map_loc:[19,20,22,43],margin:[22,23,41],marginrankingloss:23,mark:[8,19,22,25,42],marker:8,market:[4,5],marten:24,mask:[22,42,43,48],masked_fil:[36,42],masked_fill_:42,masked_scatt:42,masked_scatter_:42,masked_select:[42,43],maskrcnn_resnet50_fpn:47,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[38,42,43],mat2:[38,42,43],mat:[38,41,42,43,44],match:[1,8,14,15,19,22,23,26,36,37,40,42,43,44,49],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:43,mathcal:[22,24,43],mathemat:[22,23,43],mathrm:[15,22,43],matmul:[22,42,43],matplotlib:41,matric:[15,23,38,42,43],matrix:[15,22,23,24,38,41,42,43,49],matrix_pow:[42,43],matrix_rank:43,matter:[1,2,5,19,25,43],max:[13,14,19,22,23,26,30,36,37,42,43,49,50,51],max_:22,max_bin:41,max_ev:37,max_indic:43,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:41,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,43],maximum:[8,15,22,23,28,37,43,48,49],maxnorm:[42,43],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:41,mayb:4,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,42,43,47,49],mean_vector:49,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,42,43],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,39,40,41,42,43,47],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:47,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:41,meshgrid:43,messag:[4,8,17,19,30,36,37],messmer:6,meta:41,metadata:[19,41,43,46],metadata_head:41,meter:47,meth:43,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,38,40,41,42,43,44,47],metric:[8,37,41],michael:6,microwav:47,middl:36,might:[1,2,5,17,19,22,25,27,28,31,42,43,44],mileston:37,millisecond:8,min:[13,14,22,23,36,37,42,43,49,50,51],min_indic:43,min_lr:37,min_siz:47,min_val:[22,23],min_valu:22,min_x:43,mind:22,minfunc:37,mini:[13,22,23,47,50],minibatch:[13,22,23,43],minim:[1,4,17,32,37,43],minimum:[7,22,37,43,47],minkowski:23,minlength:[42,43],minor:[5,8],minu:43,minut:[4,14,41],mismatch:[19,30,43,49],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,43],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:42,mkldnn_thread:27,mmap:21,mnasnet0_5:47,mnasnet0_75:47,mnasnet1_0:47,mnasnet1_3:47,mnist:[41,45],mnist_train:41,mnt:14,mobil:47,mobilenet_v2:47,mobilenetv2:47,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,42,43,44,47,49],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,41,43,45,49],model_dir:20,model_zoo:[18,47],moder:3,modif:[1,42,43],modifi:[1,13,19,22,23,25,28,36,37,42],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,41,42,43,47,49],module_kwarg:23,modulelist:19,modulu:43,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,43],monoton:15,mont:15,moor:43,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,38,40,41,42,43,44,47,49],moreov:[42,43],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,38,40,42,43],mostli:[4,15],motion:44,motiv:4,motorbik:47,motorcycl:47,mountain:44,mous:47,moustapha:22,move:[3,19,20,21,22,23,28,32,37,39,41,42,43],moviepi:41,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,42,49],mul:[1,19,36,38,42,43],mul_:[38,42],mulconst:29,mult:13,multi:[2,8,19,36,40,42,43],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:41,multilinear:43,multimarginloss:23,multinomi:[42,43],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,38,43,44,49],multipli:[22,23,38,43,47,49],multiplicand:43,multiprocess:[13,14,18,22,44],multiprocessing_context:13,multisteplr:37,multivari:[15,43],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,39,42,43,49],mutabl:19,mutat:[19,42,49],mutual:[13,14],mvlgamma:[42,43],mvlgamma_:42,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:41,my_factori:15,my_imag:41,my_image_batch:41,my_image_hwc:41,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:41,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:49,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:43,n_2:43,n_class:22,n_d:43,n_fft:[42,43],n_i:[22,43],n_iter:41,n_k:[23,43],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,39,41,43,44,45,51],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,43],namespac:19,nan:[1,43],narrow:[36,42,43],narrow_copi:[38,42],nasdaq:41,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,43],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:41,ncrop:49,ndarrai:[36,42,43,49],ndim:42,ndimens:42,ne_:42,nearest:[22,23,49],nearli:[1,32,42],necessari:[1,7,13,19,22,25,26,28,35,40,42,43],necessarili:[14,15,22,28,36,43],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,38,39,42,43,44],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,42,43,49],neg_:42,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,43],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,42],neq:[22,43],nest:[1,8,19,22,42],nesterov:37,net:[19,22,28,41],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,49],neural:[4,19,22,24,28,37,47],neuron:22,never:[1,3,4,14,22,25,42],new_:[28,42],new_data:36,new_empti:42,new_ful:[28,42],new_group:[14,22],new_lr:37,new_on:42,new_stat:[8,43],new_strategi:21,new_tensor:[28,42],new_zero:42,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,40,41,42,43,44],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:41,nice:[1,22],niederreit:43,nielsen:15,nightli:41,niki:22,nine:[40,42],ninja:[7,35],nist:44,nll:22,nllloss:23,nlp:22,nms:48,nnz:[1,38,42,43],no_grad:[1,3,36,43],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,41,42,43,48,49],non_block:[22,28,39,42],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,42,43],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,41,42,43,44,46,47,49,50],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,42,43],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,42,43],norm_typ:[22,23],normal:[1,17,19,24,28,37,41,42,43,47,49,50],normal_:[24,28,42,43],normalized_shap:[22,23],nose:47,notabl:49,notat:[42,43],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,38,41,42,43,44,46,49],notebook:[4,50],noth:[4,7,8],notic:[19,22,43],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,42,43],nproc:21,nrow:50,nsdf3:44,nthread:44,nuanc:4,nuc:43,nuclear:43,num:[22,43],num_channel:22,num_class:[23,44,47],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:47,num_lay:[22,36],num_lin:44,num_output_channel:49,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,42,43],num_threshold:41,num_work:[13,35,44],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,38,39,41,42,43,44,46,47,48,49,50,51],number_of_vertic:41,numel:[42,43],numer:[13,15,19,22,23,29,36,37,42,43,51],numpi:[13,26,30,35,36,41,42,43,44,49,51],nvcc:7,nvidia:[1,14,28,30,35,43],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,43],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,39,40,41,42,43,44,45,49,51],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,42,43,47],obviou:[30,38],obvious:4,occas:[1,4,25],occasion:38,occupi:[8,22,23,28,51],occur:[8,13,19,22,23,28,30,36,42],occurr:43,odd:15,off:[1,4,8,9,10,22,23,27,31,42,43],offici:[5,14,22,35,47],offlin:[19,49],offset:[22,23,42,43,44,49],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,41,42,43],old:[25,35,37,43],older:28,omagma:35,omega:43,omega_1:43,omega_d:43,omega_i:43,omit:[3,7,14,22,35,36,43,49],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,41,43],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,39,40,41,42,43,44,45,46,47,48,49],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,42,43],ones_:24,ones_lik:[28,36,43],onesid:[42,43],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,38,41,42,43,47,49],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,43],opaqu:14,open:[1,5,15,19,21,35,43],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,40,42,45,46,48,49],operand:43,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,43],opinion:4,opnam:36,oppos:49,ops:[1,14,18,19,27,28,29,36,42,43,45],opset:36,opset_vers:36,opt:43,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,38,40,41,42,43,44,46,47,49,50],optional_unwrap:19,orang:47,ord:43,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,42,43,47,48,49],ordereddict:22,ordin:[40,42],ordinari:8,org:[2,4,17,22,35,41,47,49],organ:[4,5,31],orgqr:[42,43],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,39,42,43,49],orign:49,orion:6,orionr:6,ormqr:[42,43],ort:36,ort_sess:36,orthogon:[24,43],orthogonal_:24,orthonorm:43,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,41,42,47,49,50],otherwis:[1,4,5,7,14,22,23,32,39,42,43,44,47],otim:[23,43],our:[4,19,29,32,36,38],out:[1,4,5,17,19,21,22,23,25,26,32,36,38,40,41,42,43,49],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[43,47],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,38,41,42,43,44,47,48,49],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23,48],output_tensor_list:14,outsid:[5,13,19,23,28,49],oven:47,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,38,42,43,44,48,49,50],overal:[5,14,25,32,49],overall_end:13,overall_start:13,overflow:[23,43],overhead:[1,2,14,31,42],overheard:44,overlap:[1,13,22,28,48],overparameter:15,overrid:[7,14,15,22,23,36,37,41,43],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:43,own:[4,5,14,15,22,28,36,43],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:42,pace:4,pack:[22,30,35,43],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,41,43,45,46],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,41,43,49,50],pad_if_need:49,pad_mod:[42,43],pad_packed_sequ:30,pad_valu:50,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,49],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,38,41,43],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,47],parallel:[0,13,14,22,23,27,28,33,35,49],parallel_info:[0,27],parallelli:44,param1:15,param2:15,param:[1,15,22,24,25,36,37,47],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,41,42,43,44,45,46,47,48,49,50],parameter:[15,42],parameteriz:15,parametr:[15,29],parent:[21,35,41],park:47,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,38,43,44],parti:[5,17],partial:[15,22,23,36,43],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,42,43,44],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[44,47],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,38,41,42,43,44,47,48,49],past:[14,30,47],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,44,46],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,47,49],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:41,penros:43,peopl:4,per:[7,8,13,14,22,23,27,31,33,41,43,46,47],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,38,39,40,41,42,43,46,48,49],period:[32,37,43],permit:38,permut:[13,36,42,43],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,45],perspect:49,perturb:[1,43],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:47,photo:44,phototour:45,php:44,phy:43,pic:49,pick:49,pickl:[13,21,22,43],pickle_load_arg:43,pickle_modul:43,pickle_protocol:43,pid:30,piec:4,pieter:6,pietern:6,pil:[44,45],pillow:[41,49],pin:[22,39,42,43],pin_memori:[13,28,39,42,43],pinvers:[42,43],pip:[35,41],pipelin:49,pivot:[42,43],pixel:[22,23,44,49,50],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:47,pizza:47,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,39,42,49],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,43],plane:[22,23,43,44],plant:47,platform:[7,33,43,47],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,41,42,43,44],plenti:30,plot:41,plu:49,plume:44,pmf:15,png:44,point:[1,4,5,8,13,19,22,23,25,33,37,40,41,42,43,44,46,48,51],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:44,polici:[15,37],policy_network:15,polosukhin:22,polygon:44,polymorph:19,pool:[27,29,32,33,48],pooled_w:48,pop:[8,22],popul:[1,15,42],popular:45,popularli:49,port:14,portion:[22,23,37,43],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,42,43,49,51],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,40,42,43,44],post:[4,30,35,47,49],postprocess:47,pot:47,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:47,pow:[1,36,42,43],pow_:42,powbackward0:1,power:[22,23,37,43,49],powertransform:15,pr_curv:41,practic:[13,15,18,19,21,47],pradhan:6,pre:[1,17,22,36,37,42,44,47],preced:27,precis:[1,7,15,22,36,41,43,47],precision_matrix:15,precompil:31,predict:[22,41,47],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:43,prefix:[17,22,38],prelu:36,prepar:36,prepend:[7,13,22,26,43],preprocess:[42,47],presenc:5,present:[5,14,20,21,22,25,40,43,44,46,47],preserv:[13,19,22,23,24,28,42,49],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,47],pretrained_backbon:47,pretti:[19,43],prevent:[4,8,13,14,21,22,23,38,43],previou:[14,22,35,42,43],previous:[1,19,26,28,42],prim:19,primari:5,primarili:[15,42],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,41,42,43,44],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,41],prob:15,probabl:[13,21,22,23,29,35,36,41,42,43,49],problem:[4,14,21,22,30,32,33,35,43],problemat:[4,19],proce:28,procedur:[19,44],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,38,39,44,47],process_group:22,process_id:22,processgroup:14,prod:[22,36,42,43],prod_:[22,43],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,38,43],producer_info:31,product:[1,14,15,19,22,23,31,42,43,49],prof:1,profil:[2,43],program:[1,2,8,13,14,19,25,28,30,31,32,35,41],programm:19,progress:[8,17,20,37,47],project:[4,17,34],projector:41,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,38,42],proper:[22,28,35],properli:[4,22,32,40,43],properti:[1,13,15,19,22,23,28,37,40,51],proport:[22,49],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,43],prototyp:40,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,38,39,40,41,42,43,46,47,49,51],pseudo:43,pseudoinvers:15,pseudorandom:33,psi:43,pth:[17,19,20],pts:46,publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:41,purge_step:41,purpos:[14,22,42,43],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,42,43,44],put_:42,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:41,python2:[14,43],python3:[14,43],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,42,43,49],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,40,41,42,43,44,47,51],pytorch_jit:19,q_scale:42,q_zero_point:42,qmnist:45,qscheme:42,qtensor:42,quad:22,quadrat:30,qualiti:[4,44],quantiti:37,quantiz:[1,42],quasirandom:43,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,41],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r_t:22,racket:47,rais:[1,4,15,19,21,25,28,42,43,49],raise_except:1,ram:[41,43],rand:[1,19,23,36,41,42,43,47],rand_lik:43,randint:[22,23,38,41,42,43,49],randint_lik:43,randn:[1,19,22,23,25,26,28,29,36,38,40,41,42,43],randn_lik:[36,43],random:[15,17,22,23,33,36,41,42,44,47,49],random_:[22,23,42,43],random_devic:43,random_offset:44,random_split:13,randomaffin:49,randomappli:49,randomchoic:49,randomcrop:[44,49],randomeras:49,randomgrayscal:49,randomhorizontalflip:49,randomli:[1,13,22,23,31,44,49],randomord:49,randomperspect:49,randomresizedcrop:49,randomrot:49,randomsampl:13,randomsizedcrop:49,randomverticalflip:49,randperm:43,rang:[1,8,13,14,15,19,22,23,30,32,36,37,41,42,43,44,47,49,50],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,43],rapidli:30,rare:4,rate:[15,22,31,41,46,47],rather:[1,3,7,19,23,26,36,41,42,43,50],ratio:[15,22,49],raw:[19,22,36],rbrace:43,rceil:43,rcond:43,rdinat:[38,43],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,42,43,46],read_video:46,read_video_timestamp:46,readabl:[0,13,36],readi:[4,7,22,43],readlin:[19,43],real:[13,15,17,22,43,49],real_vector:15,realiti:2,realli:[1,4,25,43],realloc:43,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,40],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,41,47],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,42,43],reciprocal_:42,recogn:13,recognit:[44,47],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,42,43],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,41,42,43],record_ev:8,record_shap:1,recordfunct:31,recov:[22,43],recreat:25,rectangl:49,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,38,42,43],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,43],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,41,42,43,45,47],referenc:[19,25,43],reflect:[19,22,23,30,42,43,49],reflection_pad:36,reflectionpad2d:23,reflector:43,refriger:47,regard:[19,22,23,43],region:[15,19,21,22,23,28,43,48,49],regist:[1,15,21,22,29,31,36,42,43],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,42],register_kl:15,register_packag:43,register_paramet:[22,29],registr:22,regress:[4,22,47],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,42],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,43],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,41,43],relev:[5,42],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,43,44],remaind:[42,43],remainder_:42,remap:[19,20,43],rememb:[30,32],remot:[13,14,47],remov:[1,5,14,19,22,23,42,43,48],removablehandl:22,render:[4,41],renorm:[22,23,42,43],renorm_:42,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,42,43,49],repeat_interleav:[42,43],repeatedli:[28,38,43],repetit:43,repl:1,replac:[7,13,19,22,25,31,32,35,36,42,43],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,47],repo_nam:17,repo_own:17,report:[1,2,5,28,47],repositori:[5,17,29,32],repr:43,repres:[1,8,13,15,16,19,22,25,29,31,36,37,38,40,43,51],represent:[19,22,36,38,42,51],reproduc:[4,9,10,11,17,18,22,23,37,42,43],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,41,42,43,44,47],require_grad:1,require_grad_:42,requires_grad:[1,15,22,23,29,38,42,43],requires_grad_:[1,22,23,38,42,43],rerun:3,res:43,resampl:49,rescal:[22,23,49],research:[4,17],reset:[8,22,36,43],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,38,41,42,43,49],reshape_a:[36,42],reshuffl:13,resid:[14,22,28,42,43],residu:[43,47],resili:37,resiz:[22,23,39,42,43,47,49],resize_:[1,19,39,42,43],resize_as_:[1,42],resizeas_:38,resized_crop:49,resnet101:47,resnet152:47,resnet18:[17,19,20,25,47],resnet34:47,resnet50:[17,41,47],resnet:[17,19,36,41],resnext101_32x8d:47,resnext50_32x4d:47,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,44],respect:[1,14,15,22,37,39,42,43,44,49],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,38],restart:[21,37,41],restor:[3,34,43],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,38,40,41,42,43,47,49],result_avg:49,resum:[37,41],retain:[1,21,32,42,43],retain_grad:[1,42],retain_graph:[1,42],rethink:47,retreiv:3,retriev:[1,13,22,23,31],return_count:[42,43],return_indic:[22,23],return_invers:[42,43],return_typ:43,reus:[1,14,25,49],reveal:38,revers:[15,19,22,25,42,43,49],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[42,43],rfloor:[22,23,43],rgb:[22,41,47,49],rgba:49,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,43,49],right_ankl:47,right_ear:47,right_elbow:47,right_ey:47,right_hip:47,right_kne:47,right_should:47,right_wrist:47,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,43],rnn:[19,30,36,41],robin:14,robust:21,roi:48,roi_align:48,roi_pool:48,roi_width:48,roialign:48,roipool:48,roll:[42,43],roof:1,root:[25,38,43,44],ross:22,rot90:[42,43],rotat:[15,43,49],rough:4,roughli:[13,43],round:[14,36,42,43],round_:42,roundtrip:4,routin:43,row:[13,23,38,41,42,43,50],row_limit:1,rpn:47,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[42,43],rsqrt_:42,rst:4,rsub:36,rtol:[1,19,42,43],rule:[1,14,15,19,22,25,26,42,43],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,41,43],run_14h:41,run_fn:[1,3],runnabl:41,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,42,43],runtimewarn:15,rv0:19,rv1:19,rvert:43,rvert_p:[22,23],s_min:22,s_n:22,sacrif:47,safe:[8,19,22,31],safest:[7,38],sai:[4,19,30,36,42,44],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,38,39,41,42,43,47,49,50],sampl:[13,15,22,23,24,31,33,41,42,44,48,49],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:41,sample_shap:15,sampling_ratio:48,sandwich:47,sane:43,satisfi:[1,12,15,22,37,42,43],satur:[28,49],saturation_factor:49,save:[1,3,4,14,19,20,22,25,32,36,37,41,42,43,44,46,50],save_for_backward:[1,29],save_imag:50,saved_tensor:[1,25,29],saved_weight:42,sax:24,sbd:45,sbdataset:44,sbu:45,sbucaptionedphotodataset:44,scalar:[1,19,22,23,24,36,37,38,41,42,43],scalar_valu:41,scale:[4,13,15,18,22,23,24,30,37,42,43,47,48,49,50],scale_each:50,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,42],scatter_:[36,42],scatter_add:[36,42],scatter_add_:[33,42],scatter_list:14,scenario:[13,28,36],scene:41,schedul:[31,37],schema:19,scheme:42,schmidtm:37,sci_mod:43,scientif:43,scipi:[23,41,44],scissor:47,scope:[4,19,22,30,36],score:[22,47,48],scrambl:43,scratch:[4,25],script:[2,13,14,17,22,27,31],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,47],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,38,41,43,46,49],section:[4,13,15,19,21,22,29,32,41,42],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,38,40,42,43,47,48,49,50],seed:[8,13,30,33,43,44],seed_al:8,seek:[19,43],seem:[4,36,49],seen:[1,15,22,37,42,43],segfault:21,segment:[3,44,45,49],select:[8,11,12,13,19,21,22,23,27,28,36,42,43,44,47,49],self:[1,13,19,22,25,26,27,29,30,36,37,39,42,43],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,43,44,45,49],semi:[22,24],semidefinit:43,send:[4,8,13,14,21,32,35,43],sender:14,sens:[2,15,43],sensit:[22,36],sent:[8,14,21,32,43],separ:[1,7,13,14,17,19,22,23,27,37,41,43,50],seq:[1,22,42,43],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,42,43,49],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:44,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,41,42,43,44,47,49],set_:[1,42],set_default_dtyp:43,set_default_tensor_typ:43,set_detect_anomali:1,set_devic:[8,22,40],set_dir:17,set_flush_denorm:43,set_grad_en:[1,43],set_image_backend:45,set_num_interop_thread:[27,43],set_num_thread:[27,43],set_printopt:43,set_rng_stat:[8,43],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:43,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,43,49],sgd:[13,22,25,37],sgdr:37,sgn:43,sha256:20,shadow:49,shall:22,shallow:22,shamelessli:43,shape:[1,8,15,19,22,23,25,26,30,36,38,41,42,43,44,47,49,50],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,39,42,43],share_memori:32,share_memory_:[21,39,42],shared_memori:21,sharedfil:14,shazeer:22,shear:49,sheep:47,shell:7,shen:6,shi:22,shift:[22,42,43,49,50],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[40,42],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,38,41,42,43,44,47,48,49],shouldn:38,shout:36,show:[0,2,4,13,14,17,27,28,37,41],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,41,44],shufflenet_v2_x0_5:47,shufflenet_v2_x1_0:47,shufflenet_v2_x1_5:47,shufflenet_v2_x2_0:47,shufflenetv2:47,shut:13,side:[1,7,17,19,22,23,36,37,43,49],sigma:[15,22,23,42],sigmoid:[15,24,36,42,43],sigmoid_:42,sigmoidtransform:15,sign:[4,15,36,40,42,43,47],sign_:42,signal:[21,22,23,32,43],signal_2d:22,signal_4d:22,signal_ndim:[42,43],signal_s:[42,43],signatur:[1,13,22,42,43],signific:[1,25,28,37],significantli:22,silent:[8,19,22,43],sim:[22,23,43],similar:[4,13,19,21,22,23,27,29,38,42,43,44,51],similarli:[4,19,22,30,36,43],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,38],simplifi:[19,22,37],simultan:25,sin:[7,36,41,42,43],sin_:42,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,43,49],sine:43,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,39,40,42,43,48,49],singleton:[15,22,26,42,43],singular:43,sinh:[42,43],sinh_:42,sink:47,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,38,39,41,42,43,44,47,48,49,50],size_averag:[22,23],sizedim:42,sizeof:39,skateboard:47,skew:[1,2],ski:47,skip:[29,37],sky:44,slack:4,slice:[19,22,23,36,42],slide:[22,23,43],slightli:[5,13,17,47],slogdet:[42,43],slope:[22,24],slow:[32,41],slower:[2,14,23,47],small:[1,4,5,8,13,14,15,19,22,23,28,29,30,43],smaller:[13,37,42,43,49],smallest:[38,43,51],smart:29,smessmer:6,smi:[8,28,30],smnt:44,smoke:44,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:41,snedecor:15,snippet:17,snow:44,snowboard:47,snowi:44,sobol:43,soboleng:43,sobolengin:43,socket:21,sofa:47,soft:[22,23,47],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:49,solut:[4,24,32,43],solv:[4,35,42,43],solver:43,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,47,49],some_dict:19,someon:4,someth:[4,19,21,35,43],sometim:[4,19,21,22,23,30,32,43],somewher:31,sophist:37,sort:[1,22,30,42,43,48],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:41,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,41,42,43,44,45,46,47,48,49,50],space:[13,15,19,22,23,43,49],spadd:38,span:[8,22,42],spars:[1,18,24,37,40,42,43],sparse_:24,sparse_coo:[38,40,42,43],sparse_coo_tensor:[38,42,43],sparse_dim:[38,42,43],sparse_grad:43,sparse_mask:[38,42],sparseadam:[22,37],sparseaddmmbackward:38,sparsedim:42,sparsefloattensor:38,sparsetensor:[1,38,42,43],sparsiti:24,spatia:23,spatial:[22,23],spatial_scal:48,spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[38,43],special:[4,22,29,31,41,43],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,41,42,43,46,48],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,41,42,43,44,45,49,50],specifii:36,spectral:22,speed:[4,22,27,28,30,33,43],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,42,43,44],split_siz:[42,43],split_size_or_sect:43,spmm:38,sponsorship:5,spoon:47,sport:47,spotri:43,spread:[8,28],sqrt:[22,24,36,38,42,43],sqrt_:42,squar:[22,23,37,38,43,49],squeez:[29,36,38,42,43],squeeze_:42,squeezenet1_0:47,squeezenet1_1:47,src:[8,14,22,42,43],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:43,ssnl:6,sspaddmm:38,sspmm:38,stabil:[22,37,43],stabl:[15,22,35,36,43],stack:[8,13,15,22,28,36,43,49],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,42,43,44,49],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,42,43,46],start_dim:[42,43],start_pt:46,startpoint:49,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,43],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,43],std:[7,24,31,35,42,43,47,49],std_mean:43,stddev:15,stderr:[20,47],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,41,42,43,44],step_between_clip:44,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[42,43],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,43],stirl:[22,23],stl10:45,stl10_binari:44,stochast:[13,15,22,37],stop:[8,15,22,37,43,47],storag:[1,8,18,19,20,21,22,25,28,32,40,42,43],storage_offset:[42,43],storage_typ:42,storageshar:35,store:[1,3,7,14,17,19,22,30,31,38,41,42,43,44],store_tru:28,str:[1,7,14,19,21,22,23,37,39,41,42,44,46,49],straight:23,strategi:[4,13,14,19,22],stream:[13,44],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,40,41,42,43],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,39,40,41,42,43,44,45],stringio:[19,43],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,41,42,43],student:15,studio:35,style:[19,43],styliz:22,sub:[19,22,36,38,42,43],sub_:[38,42],subclass:[1,7,13,15,19,22,29,42,44],subdir:44,subfold:7,subgradi:37,subgraph:22,subject:43,submatrix:15,submit:8,submodul:[19,22,36],subpackag:47,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,47],subsetrandomsampl:13,subspac:[22,42,43],substanti:5,substitut:40,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,42,49],subtyp:19,succe:[14,35],succeed:43,success:[5,15,43],successfulli:[21,22,43],succinct:17,suffici:[7,15,17,36,43],suffix:[41,42],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:47,suitibl:41,sum:[1,8,13,14,15,23,28,29,36,37,38,42,43],sum_:[22,43],sum_i:22,sum_j:[22,23,43],sum_pair:19,sum_to_s:42,summar:[2,43],summari:[1,41,43,47],summarywrit:41,summat:43,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,38,40,41,42,43,44,45,48],suppos:[13,38,43,49],suppress:48,sure:[1,4,13,14,19,22,25,30,35,36,37,41,43],surfboard:47,surg:43,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[42,43,49],svhn:45,svi:15,swap:[22,23,42,43],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[42,43],symmetr:[43,49],symmetri:43,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,43],t4d:23,t_max:37,tabl:[1,14,19,22,23,47],tag:[1,4,14,17,31,41,43],tag_nam:17,tag_scalar_dict:41,taiwan:41,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,40,41,42,43,44],taken:[15,22,23,28,30,31,36,43,44,48],talk:31,tall:42,tan:[36,41,42,43],tan_:42,tangent:43,tanh:[24,36,42,43],tanh_:42,tanx:41,tape:4,tar:44,tarbal:44,target:[22,23,32,37,41,42,44,47,49],target_length:[22,23],target_n:22,target_transform:44,target_typ:44,task:[1,4,22,27,35,47,49],tau:[23,43],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:47,tell:[1,4,19,42,43],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,41],ten_crop:49,tencrop:49,tend:4,teng:6,tenni:47,tensor1:[42,43],tensor2:[42,43],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,38,39,41,44,45,46,47,48,50],tensor_a:43,tensor_b:43,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:43,tensorflow:[15,41],term:[5,15,22,23,30,37,42,43],termin:[21,37],terminolog:22,test10k:44,test50k:44,test:[7,19,21,28,29,41,43,44,47,49],text:[4,15,22,23,24,41,42,43,49],text_str:41,texttt:[42,43],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,38,41,42,43,44,45,47,48,49,50],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,38,42,43,44,46,47,48,49],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,38,41,42,43,44],themodelclass:34,themselv:[1,43],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,38,42,43,49],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47,49,50,51],thin:43,thing:[1,4,23,25,30,32,38],think:4,third:[15,22,43],thoma:6,those:[1,2,8,13,19,22,23,28,37,43,48],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,43],three:[14,19,22,36,37,41,44],threej:41,threshold:[36,37,41,43,47],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,42,43,51],throughout:22,thrown:[42,43],thtensor:42,thtensorrandom:43,thu:[1,13,14,15,19,22,23,30,31,36,42,43],thumb:14,tie:[15,47],tile:42,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,38,41,42,43,46,47,49],timedelta:14,timelin:[1,2],timeout:[13,14,21],timestamp:46,tini:[42,51],tip:4,tl_flip:49,tmp:[1,7],to_dens:38,to_dlpack:16,to_grayscal:49,to_mkldnn:42,to_pil_imag:49,to_spars:[38,42],to_tensor:49,toaster:47,todens:38,togeth:[13,14,15,22,30,31,41,43,49],toilet:47,token:17,tol:43,toler:[1,19,37,43],tolerance_chang:37,tolerance_grad:37,tolist:[39,42],too:[4,22,23,30,32,35,38],tool:[1,2,5,17,19,35],toothbrush:47,top:[1,13,15,21,22,23,29,43,44,47,49],topic:[5,31],topilimag:49,topk:[36,42,43],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,44,45,47],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:47,torch_shm_manag:21,torchscript:[18,36,48],torchvis:[17,18,19,25,36,41],toronto:22,total:[1,2,4,13,17,22,23,37,43],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[41,44,49],touch:[4,36],toward:[5,36,43],tr_flip:49,trace:[1,4,13,25,28,42,43],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,42],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:47,trail:[22,24,26,29],train2017:47,train:[8,13,14,17,19,22,23,24,25,30,36,37,41,44,47,49],train_batch:37,train_extra:44,train_load:28,train_nov:44,trainabl:37,trainload:41,trainset:41,trainval:44,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,41,43,44,45,47],transform_input:47,transform_to:15,transformation_matrix:49,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:49,transpos:[22,23,25,36,38,42,43],transpose_:[1,38,42],transposed_data:13,trapezoid:43,trapz:43,travers:[22,29],treat:[15,19,22,23,37,40,42,43],tree:[4,19,22,44],tri:[4,19,21,22,42],triag:5,trial:15,triangl:41,triangular2:37,triangular:[15,23,37,43],triangular_solv:[42,43],trick:[15,22,23,31,47],tricki:25,trigger:[1,4,5,31,42,43],tril:[42,43],tril_:42,tril_indic:43,trilinear:[22,23],trim:43,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[42,43],triu_:42,triu_indic:43,trivial:43,trou:22,troubleshoot:4,truck:47,truli:36,trunc:[42,43],trunc_:42,truncat:[30,43],truth:[41,47],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,38,41,42,43,44,48,49,50],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:47,twice:[30,47],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,38,41,42,43,44,47],twse:41,txhxwxc:44,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,38,39,40,41,42,43,44,47,48,49],type_a:[36,42],type_p:15,type_q:15,typic:[7,13,15,19,22,27,51],typo:4,ubc:37,ucf101:45,ucf:44,uint8:[40,41,42,43,46,49,51],uint8_t:42,uint8tensor:47,ultim:[5,7],umbrella:47,unabl:[4,37],unbalanc:22,unbatch:43,unbias:[42,43],unbind:[36,42,43],unchang:[22,42,43,49],uncoalesc:[38,43],uncondition:17,unconstrain:15,undefin:[14,19,28,42,43],under:[1,2,13,21,22,23,25,28,32,35,41,43],underli:[8,15,19,23,30,42,43],underscor:[17,38,42],understand:[4,5,22,24,25,41],understood:43,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:42,unfold:[19,36,42],unfortun:[1,3,5,22],unicodedecodeerror:43,uniform:[22,24,42,43],uniform_:[24,29,42,43],uniformli:[15,43,49],uniniti:[42,43],union:48,uniqu:[14,19,20,42,43],unique_consecut:[42,43],unit:[22,23,43],unit_interv:15,unitari:43,unitriangular:[42,43],univari:15,univers:19,unix:[13,21],unlabel:44,unless:[1,2,4,13,14,22,25,28,42,43],unlik:[4,19,21,22,32,42],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,43],unpack_data:43,unpack_pivot:43,unpickl:[13,43],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[40,42],unsort:22,unsorted_indic:22,unspecifi:[14,42,43],unsqueez:[22,29,36,41,42,43],unsqueeze_:42,unstabl:[15,23,43],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,41],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,41,42],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,42,43,49],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:41,usag:[1,2,4,8,13,15,25,30,36,41,42,43],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,41,42,43,44,47],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,40,41,42,43,44,45,47,48,49,50],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,43,49],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,41,42,43,49],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,43,44,45],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,41,42,43,44,47,49],usp:45,usual:[1,4,7,13,19,22,27,30,31,36,41,42,43],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,44,45,47],v100:[12,22,47],v_1:22,v_2:22,val2017:47,val:[24,42,44],val_loss:37,valid:[1,14,15,19,22,36,37,43,44],valid_fil:44,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,38,41,42,43,44,47,48,49,50],valueerror:22,var1:37,var2:37,var_mean:43,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,41,42,43,47],variabletyp:36,varianc:[15,22,24,33,37,43],variant:[31,37,43],variat:15,variou:[3,5,7,13,21,32,34,37],vase:47,vaswani:22,vc2017:35,vdim:22,vec1:[42,43],vec2:[42,43],vec:[22,42,43],vector:[1,15,22,23,41,42,43,44,49],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,41],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,47],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,39,42,43],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,42,43,44,49],versu:[4,22],vert:[22,23],vertex:41,vertic:[41,49],vertical_flip:49,vertices_tensor:41,vflip:49,vframe:46,vgg11:47,vgg11_bn:47,vgg13:47,vgg13_bn:47,vgg16:47,vgg16_bn:47,vgg19:47,vgg19_bn:47,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,40,43],vice:[22,39,42,43],vid_tensor:41,video:[22,41,44,45],video_arrai:46,video_codec:46,video_fp:46,videoclip:44,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,40,42,43,44,49],view_a:42,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,47],vision:[5,17,45,47,48],visual:[1,22,35,41],vitali:6,vitalyfedyunin:6,voc2012:44,voc:[45,47],vocdetect:44,vocsegment:44,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:43,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,38,42,43,44,47],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:41,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,38,42,43],warm:37,warmup:1,warn:[26,36,43],wasn:43,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,41,42,43,47],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,47],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,38,41,43,46,47],were:[1,14,19,22,29,36,38,42,43],what:[1,3,4,5,15,17,19,22,25,29,36,37,38,44],whatev:[42,43],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,41,42,43,44,47,49],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,38,40,41,42,43,44,46,47,48,50],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,38,39,41,42,43,44],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,47,48,49,51],whilst:[15,28],white:49,whiten:49,who:4,whole:[13,14,22,32,46],whose:[13,15,22,25,36,41,43,44],why:[4,36],wide:27,wide_resnet101_2:47,wide_resnet50_2:47,width:[15,22,23,36,43,48,49],wikipedia:23,willing:5,win:43,win_length:[42,43],window:[13,18,22,23,42,43],window_length:43,wine:47,wip:4,wise:[14,15,22,23,27,43],wish:19,wit:19,with_cuda:[7,35],with_replac:43,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,41,43],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,41,42,43,47,49,51],won:[3,17,22,23,25,29,36,43],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,38,42,43],worker:[4,13,14,22,44],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,38,42,43],wrap:[1,13,19,22,35,37,42],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,38,41,42,43,46],write_video:46,writer:41,written:[1,19,22,36,37,39,41,43],wrong:[32,35,37],wrote:4,www:[22,37,41,44],x86:43,x86_x64:35,x_0:43,x_1:[22,23,43],x_2:[22,23,43],x_3:43,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,43],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:41,xdg_cache_hom:[17,20],xing:43,xml:44,xsinx:41,xxx:44,xxy:44,xxz:44,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,43],y_n:22,y_soft:23,yang:[5,6],ycbcr:49,year:44,yes:4,yet:[8,43],yf225:6,yield:[13,22,43],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,40,41,42,43,44,47,49],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,41,42,43,47,49],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:47,zero:[1,8,15,19,21,22,23,24,28,35,36,38,41,42,43,49],zero_:[1,23,38,42],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:42,zeros_:24,zeros_lik:[28,36,43],zhang:6,zhong:49,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.io","torchvision.models","torchvision.ops","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,38,49],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:43,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,47],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,40],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:43,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:44,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:47,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:44,cityscap:44,classif:47,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:47,coco:44,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,43],compat:26,compon:35,comput:[1,43],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:49,convolut:[22,23,47],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:43,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,44],datasetfold:44,deadlock:32,debug:19,decis:5,deeplabv3:47,defin:19,densenet:47,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,44,47],develop:[4,5],devic:[28,40],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,43],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:40,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:44,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:44,faq:[5,35],fashion:44,faster:47,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:51,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:44,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:47,gamma:15,gelu:23,gener:[6,8,26,43,49],geometr:15,get:4,glu:23,googlenet:47,govern:[5,6],gpu:[14,22,23,30],gradient:[1,43],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:44,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:51,imag:49,imagefold:44,imagenet:44,implement:[4,17],improv:4,incept:47,includ:35,independ:15,index:43,indic:18,infer:27,info:51,init:24,initi:14,inspect:19,instal:35,instanc:47,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:43,kei:[5,35],keypoint:47,kinet:44,kl_div:23,kldivloss:22,kmnist:44,known:17,l1_loss:23,l1loss:22,languag:19,lapack:43,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:40,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,43],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:44,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:47,math:43,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:19,mkldnn:6,mnasnet:47,mnist:44,mobilenet:47,model:[17,30,31,34,47],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:43,nccl:14,negativebinomi:15,network:[14,30,47],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30],numer:1,numpi:33,nvidia:8,nvtx:8,object:47,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,43],ops:48,optim:37,option:[19,27,35,37],order:13,other:[14,22,43],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,43],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,47],philosophi:5,phototour:44,pil:49,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,43],plan:28,platform:13,point:[14,31],pointwis:43,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:44,quasi:43,question:[19,30,36],queue:32,random:[8,13,30,43],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:43,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:47,resnext:47,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,47],sampl:43,sampler:13,save:[17,31,34],sbd:44,sbu:44,scale:31,score:15,script:[19,35,36],segment:47,selu:[22,23],semant:[26,28,34,47],sequenti:22,serial:[34,43],share:[14,21],shufflenet:47,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:43,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,38],spawn:[14,21],specif:13,spectral:43,spectral_norm:22,speed:35,squeezenet:47,start:4,statement:19,step:37,stl10:44,storag:39,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:44,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,40,42,43,49],tensorboard:41,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,49,51],torchscript:[19,27,31],torchvis:[44,45,46,47,48,49,50],trace:[19,36],tracer:19,train:32,transform:[15,22,49],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,51],ucf101:44,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:44,util:[2,3,7,13,14,16,20,22,41,43,50],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:47,video:46,vision:[22,23],voc:44,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,47],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file +Search.setIndex({docnames:["__config__","autograd","bottleneck","checkpoint","community/contribution_guide","community/governance","community/persons_of_interest","cpp_extension","cuda","cuda_deterministic","cuda_deterministic_backward","cudnn_deterministic","cudnn_persistent_rnn","data","distributed","distributions","dlpack","hub","index","jit","model_zoo","multiprocessing","nn","nn.functional","nn.init","notes/autograd","notes/broadcasting","notes/cpu_threading_torchscript_inference","notes/cuda","notes/extending","notes/faq","notes/large_scale_deployments","notes/multiprocessing","notes/randomness","notes/serialization","notes/windows","onnx","optim","random","sparse","storage","tensor_attributes","tensorboard","tensors","torch","torchvision/datasets","torchvision/index","torchvision/io","torchvision/models","torchvision/ops","torchvision/transforms","torchvision/utils","type_info"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["__config__.rst","autograd.rst","bottleneck.rst","checkpoint.rst","community/contribution_guide.rst","community/governance.rst","community/persons_of_interest.rst","cpp_extension.rst","cuda.rst","cuda_deterministic.rst","cuda_deterministic_backward.rst","cudnn_deterministic.rst","cudnn_persistent_rnn.rst","data.rst","distributed.rst","distributions.rst","dlpack.rst","hub.rst","index.rst","jit.rst","model_zoo.rst","multiprocessing.rst","nn.rst","nn.functional.rst","nn.init.rst","notes/autograd.rst","notes/broadcasting.rst","notes/cpu_threading_torchscript_inference.rst","notes/cuda.rst","notes/extending.rst","notes/faq.rst","notes/large_scale_deployments.rst","notes/multiprocessing.rst","notes/randomness.rst","notes/serialization.rst","notes/windows.rst","onnx.rst","optim.rst","random.rst","sparse.rst","storage.rst","tensor_attributes.rst","tensorboard.rst","tensors.rst","torch.rst","torchvision/datasets.rst","torchvision/index.rst","torchvision/io.rst","torchvision/models.rst","torchvision/ops.rst","torchvision/transforms.rst","torchvision/utils.rst","type_info.rst"],objects:{"":{"PYTORCH_JIT=1":[19,5,1,"-"],torch:[44,0,0,"-"],torchvision:[46,0,0,"-"]},"torch.BoolTensor":{all:[43,2,1,""],any:[43,2,1,""]},"torch.FloatStorage":{"byte":[40,2,1,""],"char":[40,2,1,""],"double":[40,2,1,""],"float":[40,2,1,""],"int":[40,2,1,""],"long":[40,2,1,""],"new":[40,2,1,""],"short":[40,2,1,""],bfloat16:[40,2,1,""],bool:[40,2,1,""],clone:[40,2,1,""],copy_:[40,2,1,""],cpu:[40,2,1,""],cuda:[40,2,1,""],data_ptr:[40,2,1,""],device:[40,3,1,""],dtype:[40,3,1,""],element_size:[40,2,1,""],fill_:[40,2,1,""],from_buffer:[40,2,1,""],from_file:[40,2,1,""],half:[40,2,1,""],is_cuda:[40,3,1,""],is_pinned:[40,2,1,""],is_shared:[40,2,1,""],is_sparse:[40,3,1,""],pin_memory:[40,2,1,""],resize_:[40,2,1,""],share_memory_:[40,2,1,""],size:[40,2,1,""],tolist:[40,2,1,""],type:[40,2,1,""]},"torch.Tensor":{"byte":[43,2,1,""],"char":[43,2,1,""],"double":[43,2,1,""],"float":[43,2,1,""],"int":[43,2,1,""],"long":[43,2,1,""],"short":[43,2,1,""],"var":[43,2,1,""],T:[43,3,1,""],abs:[43,2,1,""],abs_:[43,2,1,""],acos:[43,2,1,""],acos_:[43,2,1,""],add:[43,2,1,""],add_:[43,2,1,""],addbmm:[43,2,1,""],addbmm_:[43,2,1,""],addcdiv:[43,2,1,""],addcdiv_:[43,2,1,""],addcmul:[43,2,1,""],addcmul_:[43,2,1,""],addmm:[43,2,1,""],addmm_:[43,2,1,""],addmv:[43,2,1,""],addmv_:[43,2,1,""],addr:[43,2,1,""],addr_:[43,2,1,""],allclose:[43,2,1,""],apply_:[43,2,1,""],argmax:[43,2,1,""],argmin:[43,2,1,""],argsort:[43,2,1,""],as_strided:[43,2,1,""],asin:[43,2,1,""],asin_:[43,2,1,""],atan2:[43,2,1,""],atan2_:[43,2,1,""],atan:[43,2,1,""],atan_:[43,2,1,""],backward:[43,2,1,""],baddbmm:[43,2,1,""],baddbmm_:[43,2,1,""],bernoulli:[43,2,1,""],bernoulli_:[43,2,1,""],bfloat16:[43,2,1,""],bincount:[43,2,1,""],bitwise_not:[43,2,1,""],bitwise_not_:[43,2,1,""],bmm:[43,2,1,""],bool:[43,2,1,""],cauchy_:[43,2,1,""],ceil:[43,2,1,""],ceil_:[43,2,1,""],cholesky:[43,2,1,""],cholesky_inverse:[43,2,1,""],cholesky_solve:[43,2,1,""],chunk:[43,2,1,""],clamp:[43,2,1,""],clamp_:[43,2,1,""],clone:[43,2,1,""],contiguous:[43,2,1,""],copy_:[43,2,1,""],cos:[43,2,1,""],cos_:[43,2,1,""],cosh:[43,2,1,""],cosh_:[43,2,1,""],cpu:[43,2,1,""],cross:[43,2,1,""],cuda:[43,2,1,""],cumprod:[43,2,1,""],cumsum:[43,2,1,""],data_ptr:[43,2,1,""],dense_dim:[43,2,1,""],dequantize:[43,2,1,""],det:[43,2,1,""],detach:[43,2,1,""],detach_:[43,2,1,""],device:[43,3,1,""],diag:[43,2,1,""],diag_embed:[43,2,1,""],diagflat:[43,2,1,""],diagonal:[43,2,1,""],digamma:[43,2,1,""],digamma_:[43,2,1,""],dim:[43,2,1,""],dist:[43,2,1,""],div:[43,2,1,""],div_:[43,2,1,""],dot:[43,2,1,""],eig:[43,2,1,""],element_size:[43,2,1,""],eq:[43,2,1,""],eq_:[43,2,1,""],equal:[43,2,1,""],erf:[43,2,1,""],erf_:[43,2,1,""],erfc:[43,2,1,""],erfc_:[43,2,1,""],erfinv:[43,2,1,""],erfinv_:[43,2,1,""],exp:[43,2,1,""],exp_:[43,2,1,""],expand:[43,2,1,""],expand_as:[43,2,1,""],expm1:[43,2,1,""],expm1_:[43,2,1,""],exponential_:[43,2,1,""],fft:[43,2,1,""],fill_:[43,2,1,""],fill_diagonal_:[43,2,1,""],flatten:[43,2,1,""],flip:[43,2,1,""],floor:[43,2,1,""],floor_:[43,2,1,""],fmod:[43,2,1,""],fmod_:[43,2,1,""],frac:[43,2,1,""],frac_:[43,2,1,""],gather:[43,2,1,""],ge:[43,2,1,""],ge_:[43,2,1,""],gels:[43,2,1,""],geometric_:[43,2,1,""],geqrf:[43,2,1,""],ger:[43,2,1,""],get_device:[43,2,1,""],grad:[43,3,1,""],gt:[43,2,1,""],gt_:[43,2,1,""],half:[43,2,1,""],hardshrink:[43,2,1,""],histc:[43,2,1,""],ifft:[43,2,1,""],index_add:[43,2,1,""],index_add_:[43,2,1,""],index_copy:[43,2,1,""],index_copy_:[43,2,1,""],index_fill:[43,2,1,""],index_fill_:[43,2,1,""],index_put:[43,2,1,""],index_put_:[43,2,1,""],index_select:[43,2,1,""],indices:[43,2,1,""],int_repr:[43,2,1,""],inverse:[43,2,1,""],irfft:[43,2,1,""],is_contiguous:[43,2,1,""],is_cuda:[43,3,1,""],is_floating_point:[43,2,1,""],is_leaf:[43,2,1,""],is_pinned:[43,2,1,""],is_set_to:[43,2,1,""],is_shared:[43,2,1,""],is_signed:[43,2,1,""],is_sparse:[43,2,1,""],item:[43,2,1,""],kthvalue:[43,2,1,""],le:[43,2,1,""],le_:[43,2,1,""],lerp:[43,2,1,""],lerp_:[43,2,1,""],log10:[43,2,1,""],log10_:[43,2,1,""],log1p:[43,2,1,""],log1p_:[43,2,1,""],log2:[43,2,1,""],log2_:[43,2,1,""],log:[43,2,1,""],log_:[43,2,1,""],log_normal_:[43,2,1,""],logdet:[43,2,1,""],logsumexp:[43,2,1,""],lstsq:[43,2,1,""],lt:[43,2,1,""],lt_:[43,2,1,""],lu:[43,2,1,""],lu_solve:[43,2,1,""],map_:[43,2,1,""],masked_fill:[43,2,1,""],masked_fill_:[43,2,1,""],masked_scatter:[43,2,1,""],masked_scatter_:[43,2,1,""],masked_select:[43,2,1,""],matmul:[43,2,1,""],matrix_power:[43,2,1,""],max:[43,2,1,""],mean:[43,2,1,""],median:[43,2,1,""],min:[43,2,1,""],mm:[43,2,1,""],mode:[43,2,1,""],mul:[43,2,1,""],mul_:[43,2,1,""],multinomial:[43,2,1,""],mv:[43,2,1,""],mvlgamma:[43,2,1,""],mvlgamma_:[43,2,1,""],narrow:[43,2,1,""],narrow_copy:[43,2,1,""],ndim:[43,3,1,""],ndimension:[43,2,1,""],ne:[43,2,1,""],ne_:[43,2,1,""],neg:[43,2,1,""],neg_:[43,2,1,""],nelement:[43,2,1,""],new_empty:[43,2,1,""],new_full:[43,2,1,""],new_ones:[43,2,1,""],new_tensor:[43,2,1,""],new_zeros:[43,2,1,""],nonzero:[43,2,1,""],norm:[43,2,1,""],normal_:[43,2,1,""],numel:[43,2,1,""],numpy:[43,2,1,""],orgqr:[43,2,1,""],ormqr:[43,2,1,""],permute:[43,2,1,""],pin_memory:[43,2,1,""],pinverse:[43,2,1,""],pow:[43,2,1,""],pow_:[43,2,1,""],prod:[43,2,1,""],put_:[43,2,1,""],q_scale:[43,2,1,""],q_zero_point:[43,2,1,""],qr:[43,2,1,""],qscheme:[43,2,1,""],random_:[43,2,1,""],reciprocal:[43,2,1,""],reciprocal_:[43,2,1,""],register_hook:[43,2,1,""],remainder:[43,2,1,""],remainder_:[43,2,1,""],renorm:[43,2,1,""],renorm_:[43,2,1,""],repeat:[43,2,1,""],repeat_interleave:[43,2,1,""],requires_grad:[43,2,1,""],requires_grad_:[43,2,1,""],reshape:[43,2,1,""],reshape_as:[43,2,1,""],resize_:[43,2,1,""],resize_as_:[43,2,1,""],retain_grad:[43,2,1,""],rfft:[43,2,1,""],roll:[43,2,1,""],rot90:[43,2,1,""],round:[43,2,1,""],round_:[43,2,1,""],rsqrt:[43,2,1,""],rsqrt_:[43,2,1,""],scatter:[43,2,1,""],scatter_:[43,2,1,""],scatter_add:[43,2,1,""],scatter_add_:[43,2,1,""],select:[43,2,1,""],set_:[43,2,1,""],share_memory_:[43,2,1,""],sigmoid:[43,2,1,""],sigmoid_:[43,2,1,""],sign:[43,2,1,""],sign_:[43,2,1,""],sin:[43,2,1,""],sin_:[43,2,1,""],sinh:[43,2,1,""],sinh_:[43,2,1,""],size:[43,2,1,""],slogdet:[43,2,1,""],solve:[43,2,1,""],sort:[43,2,1,""],sparse_dim:[43,2,1,""],sparse_mask:[43,2,1,""],split:[43,2,1,""],sqrt:[43,2,1,""],sqrt_:[43,2,1,""],squeeze:[43,2,1,""],squeeze_:[43,2,1,""],std:[43,2,1,""],stft:[43,2,1,""],storage:[43,2,1,""],storage_offset:[43,2,1,""],storage_type:[43,2,1,""],stride:[43,2,1,""],sub:[43,2,1,""],sub_:[43,2,1,""],sum:[43,2,1,""],sum_to_size:[43,2,1,""],svd:[43,2,1,""],symeig:[43,2,1,""],t:[43,2,1,""],t_:[43,2,1,""],take:[43,2,1,""],tan:[43,2,1,""],tan_:[43,2,1,""],tanh:[43,2,1,""],tanh_:[43,2,1,""],to:[43,2,1,""],to_mkldnn:[43,2,1,""],to_sparse:[43,2,1,""],tolist:[43,2,1,""],topk:[43,2,1,""],trace:[43,2,1,""],transpose:[43,2,1,""],transpose_:[43,2,1,""],triangular_solve:[43,2,1,""],tril:[43,2,1,""],tril_:[43,2,1,""],triu:[43,2,1,""],triu_:[43,2,1,""],trunc:[43,2,1,""],trunc_:[43,2,1,""],type:[43,2,1,""],type_as:[43,2,1,""],unbind:[43,2,1,""],unfold:[43,2,1,""],uniform_:[43,2,1,""],unique:[43,2,1,""],unique_consecutive:[43,2,1,""],unsqueeze:[43,2,1,""],unsqueeze_:[43,2,1,""],values:[43,2,1,""],view:[43,2,1,""],view_as:[43,2,1,""],where:[43,2,1,""],zero_:[43,2,1,""]},"torch._C":{Generator:[44,1,1,""]},"torch._C.Generator":{device:[44,3,1,""],get_state:[44,2,1,""],initial_seed:[44,2,1,""],manual_seed:[44,2,1,""],seed:[44,2,1,""],set_state:[44,2,1,""]},"torch.__config__":{parallel_info:[0,4,1,""],show:[0,4,1,""]},"torch.autograd":{Function:[1,1,1,""],backward:[1,4,1,""],detect_anomaly:[1,1,1,""],enable_grad:[1,1,1,""],grad:[1,4,1,""],gradcheck:[1,4,1,""],gradgradcheck:[1,4,1,""],no_grad:[1,1,1,""],set_detect_anomaly:[1,1,1,""],set_grad_enabled:[1,1,1,""]},"torch.autograd.Function":{backward:[1,2,1,""],forward:[1,2,1,""]},"torch.autograd.profiler":{emit_nvtx:[1,1,1,""],load_nvprof:[1,4,1,""],profile:[1,1,1,""]},"torch.autograd.profiler.profile":{export_chrome_trace:[1,2,1,""],key_averages:[1,2,1,""],self_cpu_time_total:[1,2,1,""],table:[1,2,1,""],total_average:[1,2,1,""]},"torch.cuda":{Event:[8,1,1,""],Stream:[8,1,1,""],current_blas_handle:[8,4,1,""],current_device:[8,4,1,""],current_stream:[8,4,1,""],default_stream:[8,4,1,""],device:[8,1,1,""],device_count:[8,4,1,""],device_of:[8,1,1,""],empty_cache:[8,4,1,""],get_device_capability:[8,4,1,""],get_device_name:[8,4,1,""],get_rng_state:[8,4,1,""],get_rng_state_all:[8,4,1,""],init:[8,4,1,""],initial_seed:[8,4,1,""],ipc_collect:[8,4,1,""],is_available:[8,4,1,""],manual_seed:[8,4,1,""],manual_seed_all:[8,4,1,""],max_memory_allocated:[8,4,1,""],max_memory_cached:[8,4,1,""],memory_allocated:[8,4,1,""],memory_cached:[8,4,1,""],reset_max_memory_allocated:[8,4,1,""],reset_max_memory_cached:[8,4,1,""],seed:[8,4,1,""],seed_all:[8,4,1,""],set_device:[8,4,1,""],set_rng_state:[8,4,1,""],set_rng_state_all:[8,4,1,""],stream:[8,4,1,""],synchronize:[8,4,1,""]},"torch.cuda.Event":{elapsed_time:[8,2,1,""],from_ipc_handle:[8,2,1,""],ipc_handle:[8,2,1,""],query:[8,2,1,""],record:[8,2,1,""],synchronize:[8,2,1,""],wait:[8,2,1,""]},"torch.cuda.Stream":{query:[8,2,1,""],record_event:[8,2,1,""],synchronize:[8,2,1,""],wait_event:[8,2,1,""],wait_stream:[8,2,1,""]},"torch.cuda.comm":{broadcast:[8,4,1,""],broadcast_coalesced:[8,4,1,""],gather:[8,4,1,""],reduce_add:[8,4,1,""],scatter:[8,4,1,""]},"torch.cuda.nvtx":{mark:[8,4,1,""],range_pop:[8,4,1,""],range_push:[8,4,1,""]},"torch.distributed":{Backend:[14,1,1,""],ReduceOp:[14,1,1,""],all_gather:[14,4,1,""],all_gather_multigpu:[14,4,1,""],all_reduce:[14,4,1,""],all_reduce_multigpu:[14,4,1,""],barrier:[14,4,1,""],broadcast:[14,4,1,""],broadcast_multigpu:[14,4,1,""],gather:[14,4,1,""],get_backend:[14,4,1,""],get_rank:[14,4,1,""],get_world_size:[14,4,1,""],init_process_group:[14,4,1,""],irecv:[14,4,1,""],is_initialized:[14,4,1,""],is_mpi_available:[14,4,1,""],is_nccl_available:[14,4,1,""],isend:[14,4,1,""],launch:[14,0,0,"-"],new_group:[14,4,1,""],recv:[14,4,1,""],reduce:[14,4,1,""],reduce_multigpu:[14,4,1,""],reduce_op:[14,1,1,""],scatter:[14,4,1,""],send:[14,4,1,""]},"torch.distributions":{constraint_registry:[15,0,0,"-"],constraints:[15,0,0,"-"],kl:[15,0,0,"-"],transforms:[15,0,0,"-"]},"torch.distributions.bernoulli":{Bernoulli:[15,1,1,""]},"torch.distributions.bernoulli.Bernoulli":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.beta":{Beta:[15,1,1,""]},"torch.distributions.beta.Beta":{arg_constraints:[15,3,1,""],concentration0:[15,2,1,""],concentration1:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.binomial":{Binomial:[15,1,1,""]},"torch.distributions.binomial.Binomial":{arg_constraints:[15,3,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.categorical":{Categorical:[15,1,1,""]},"torch.distributions.categorical.Categorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.cauchy":{Cauchy:[15,1,1,""]},"torch.distributions.cauchy.Cauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.chi2":{Chi2:[15,1,1,""]},"torch.distributions.chi2.Chi2":{arg_constraints:[15,3,1,""],df:[15,2,1,""],expand:[15,2,1,""]},"torch.distributions.constraint_registry":{ConstraintRegistry:[15,1,1,""]},"torch.distributions.constraint_registry.ConstraintRegistry":{register:[15,2,1,""]},"torch.distributions.constraints":{Constraint:[15,1,1,""],cat:[15,3,1,""],dependent_property:[15,3,1,""],greater_than:[15,3,1,""],greater_than_eq:[15,3,1,""],half_open_interval:[15,3,1,""],integer_interval:[15,3,1,""],interval:[15,3,1,""],less_than:[15,3,1,""],stack:[15,3,1,""]},"torch.distributions.constraints.Constraint":{check:[15,2,1,""]},"torch.distributions.dirichlet":{Dirichlet:[15,1,1,""]},"torch.distributions.dirichlet.Dirichlet":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.distribution":{Distribution:[15,1,1,""]},"torch.distributions.distribution.Distribution":{arg_constraints:[15,2,1,""],batch_shape:[15,2,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],event_shape:[15,2,1,""],expand:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],perplexity:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],sample_n:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.exp_family":{ExponentialFamily:[15,1,1,""]},"torch.distributions.exp_family.ExponentialFamily":{entropy:[15,2,1,""]},"torch.distributions.exponential":{Exponential:[15,1,1,""]},"torch.distributions.exponential.Exponential":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.fishersnedecor":{FisherSnedecor:[15,1,1,""]},"torch.distributions.fishersnedecor.FisherSnedecor":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gamma":{Gamma:[15,1,1,""]},"torch.distributions.gamma.Gamma":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.geometric":{Geometric:[15,1,1,""]},"torch.distributions.geometric.Geometric":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.gumbel":{Gumbel:[15,1,1,""]},"torch.distributions.gumbel.Gumbel":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_cauchy":{HalfCauchy:[15,1,1,""]},"torch.distributions.half_cauchy.HalfCauchy":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.half_normal":{HalfNormal:[15,1,1,""]},"torch.distributions.half_normal.HalfNormal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.independent":{Independent:[15,1,1,""]},"torch.distributions.independent.Independent":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,2,1,""],has_rsample:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.kl":{kl_divergence:[15,4,1,""],register_kl:[15,4,1,""]},"torch.distributions.laplace":{Laplace:[15,1,1,""]},"torch.distributions.laplace.Laplace":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.log_normal":{LogNormal:[15,1,1,""]},"torch.distributions.log_normal.LogNormal":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],loc:[15,2,1,""],mean:[15,2,1,""],scale:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.lowrank_multivariate_normal":{LowRankMultivariateNormal:[15,1,1,""]},"torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,3,1,""]},"torch.distributions.multinomial":{Multinomial:[15,1,1,""]},"torch.distributions.multinomial.Multinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.multivariate_normal":{MultivariateNormal:[15,1,1,""]},"torch.distributions.multivariate_normal.MultivariateNormal":{arg_constraints:[15,3,1,""],covariance_matrix:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],precision_matrix:[15,3,1,""],rsample:[15,2,1,""],scale_tril:[15,3,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.negative_binomial":{NegativeBinomial:[15,1,1,""]},"torch.distributions.negative_binomial.NegativeBinomial":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.normal":{Normal:[15,1,1,""]},"torch.distributions.normal.Normal":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],stddev:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.one_hot_categorical":{OneHotCategorical:[15,1,1,""]},"torch.distributions.one_hot_categorical.OneHotCategorical":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],enumerate_support:[15,2,1,""],expand:[15,2,1,""],has_enumerate_support:[15,3,1,""],log_prob:[15,2,1,""],logits:[15,2,1,""],mean:[15,2,1,""],param_shape:[15,2,1,""],probs:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.pareto":{Pareto:[15,1,1,""]},"torch.distributions.pareto.Pareto":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.poisson":{Poisson:[15,1,1,""]},"torch.distributions.poisson.Poisson":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],sample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.relaxed_bernoulli":{LogitRelaxedBernoulli:[15,1,1,""],RelaxedBernoulli:[15,1,1,""]},"torch.distributions.relaxed_bernoulli.LogitRelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],log_prob:[15,2,1,""],logits:[15,3,1,""],param_shape:[15,2,1,""],probs:[15,3,1,""],rsample:[15,2,1,""],support:[15,3,1,""]},"torch.distributions.relaxed_bernoulli.RelaxedBernoulli":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.relaxed_categorical":{RelaxedOneHotCategorical:[15,1,1,""]},"torch.distributions.relaxed_categorical.RelaxedOneHotCategorical":{arg_constraints:[15,3,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],logits:[15,2,1,""],probs:[15,2,1,""],support:[15,3,1,""],temperature:[15,2,1,""]},"torch.distributions.studentT":{StudentT:[15,1,1,""]},"torch.distributions.studentT.StudentT":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.distributions.transformed_distribution":{TransformedDistribution:[15,1,1,""]},"torch.distributions.transformed_distribution.TransformedDistribution":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,2,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],rsample:[15,2,1,""],sample:[15,2,1,""],support:[15,2,1,""]},"torch.distributions.transforms":{AbsTransform:[15,1,1,""],AffineTransform:[15,1,1,""],CatTransform:[15,1,1,""],ComposeTransform:[15,1,1,""],ExpTransform:[15,1,1,""],LowerCholeskyTransform:[15,1,1,""],PowerTransform:[15,1,1,""],SigmoidTransform:[15,1,1,""],SoftmaxTransform:[15,1,1,""],StackTransform:[15,1,1,""],StickBreakingTransform:[15,1,1,""],Transform:[15,1,1,""]},"torch.distributions.transforms.Transform":{inv:[15,2,1,""],log_abs_det_jacobian:[15,2,1,""],sign:[15,2,1,""]},"torch.distributions.uniform":{Uniform:[15,1,1,""]},"torch.distributions.uniform.Uniform":{arg_constraints:[15,3,1,""],cdf:[15,2,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],has_rsample:[15,3,1,""],icdf:[15,2,1,""],log_prob:[15,2,1,""],mean:[15,2,1,""],rsample:[15,2,1,""],stddev:[15,2,1,""],support:[15,2,1,""],variance:[15,2,1,""]},"torch.distributions.weibull":{Weibull:[15,1,1,""]},"torch.distributions.weibull.Weibull":{arg_constraints:[15,3,1,""],entropy:[15,2,1,""],expand:[15,2,1,""],mean:[15,2,1,""],support:[15,3,1,""],variance:[15,2,1,""]},"torch.hub":{help:[17,4,1,""],list:[17,4,1,""],load:[17,4,1,""],set_dir:[17,4,1,""]},"torch.jit":{ScriptModule:[19,1,1,""],load:[19,4,1,""],save:[19,4,1,""],script:[19,4,1,""],trace:[19,4,1,""]},"torch.multiprocessing":{SpawnContext:[21,1,1,""],get_all_sharing_strategies:[21,4,1,""],get_sharing_strategy:[21,4,1,""],set_sharing_strategy:[21,4,1,""],spawn:[21,4,1,""]},"torch.multiprocessing.SpawnContext":{join:[21,2,1,""]},"torch.nn":{AdaptiveAvgPool1d:[22,1,1,""],AdaptiveAvgPool2d:[22,1,1,""],AdaptiveAvgPool3d:[22,1,1,""],AdaptiveLogSoftmaxWithLoss:[22,1,1,""],AdaptiveMaxPool1d:[22,1,1,""],AdaptiveMaxPool2d:[22,1,1,""],AdaptiveMaxPool3d:[22,1,1,""],AlphaDropout:[22,1,1,""],AvgPool1d:[22,1,1,""],AvgPool2d:[22,1,1,""],AvgPool3d:[22,1,1,""],BCELoss:[22,1,1,""],BCEWithLogitsLoss:[22,1,1,""],BatchNorm1d:[22,1,1,""],BatchNorm2d:[22,1,1,""],BatchNorm3d:[22,1,1,""],Bilinear:[22,1,1,""],CELU:[22,1,1,""],CTCLoss:[22,1,1,""],ConstantPad1d:[22,1,1,""],ConstantPad2d:[22,1,1,""],ConstantPad3d:[22,1,1,""],Conv1d:[22,1,1,""],Conv2d:[22,1,1,""],Conv3d:[22,1,1,""],ConvTranspose1d:[22,1,1,""],ConvTranspose2d:[22,1,1,""],ConvTranspose3d:[22,1,1,""],CosineEmbeddingLoss:[22,1,1,""],CosineSimilarity:[22,1,1,""],CrossEntropyLoss:[22,1,1,""],DataParallel:[22,1,1,""],Dropout2d:[22,1,1,""],Dropout3d:[22,1,1,""],Dropout:[22,1,1,""],ELU:[22,1,1,""],Embedding:[22,1,1,""],EmbeddingBag:[22,1,1,""],Fold:[22,1,1,""],FractionalMaxPool2d:[22,1,1,""],GRU:[22,1,1,""],GRUCell:[22,1,1,""],GroupNorm:[22,1,1,""],Hardshrink:[22,1,1,""],Hardtanh:[22,1,1,""],HingeEmbeddingLoss:[22,1,1,""],Identity:[22,1,1,""],InstanceNorm1d:[22,1,1,""],InstanceNorm2d:[22,1,1,""],InstanceNorm3d:[22,1,1,""],KLDivLoss:[22,1,1,""],L1Loss:[22,1,1,""],LPPool1d:[22,1,1,""],LPPool2d:[22,1,1,""],LSTM:[22,1,1,""],LSTMCell:[22,1,1,""],LayerNorm:[22,1,1,""],LeakyReLU:[22,1,1,""],Linear:[22,1,1,""],LocalResponseNorm:[22,1,1,""],LogSigmoid:[22,1,1,""],LogSoftmax:[22,1,1,""],MSELoss:[22,1,1,""],MarginRankingLoss:[22,1,1,""],MaxPool1d:[22,1,1,""],MaxPool2d:[22,1,1,""],MaxPool3d:[22,1,1,""],MaxUnpool1d:[22,1,1,""],MaxUnpool2d:[22,1,1,""],MaxUnpool3d:[22,1,1,""],Module:[22,1,1,""],ModuleDict:[22,1,1,""],ModuleList:[22,1,1,""],MultiLabelMarginLoss:[22,1,1,""],MultiLabelSoftMarginLoss:[22,1,1,""],MultiMarginLoss:[22,1,1,""],MultiheadAttention:[22,1,1,""],NLLLoss:[22,1,1,""],PReLU:[22,1,1,""],PairwiseDistance:[22,1,1,""],Parameter:[22,1,1,""],ParameterDict:[22,1,1,""],ParameterList:[22,1,1,""],PixelShuffle:[22,1,1,""],PoissonNLLLoss:[22,1,1,""],RNN:[22,1,1,""],RNNCell:[22,1,1,""],RReLU:[22,1,1,""],ReLU6:[22,1,1,""],ReLU:[22,1,1,""],ReflectionPad1d:[22,1,1,""],ReflectionPad2d:[22,1,1,""],ReplicationPad1d:[22,1,1,""],ReplicationPad2d:[22,1,1,""],ReplicationPad3d:[22,1,1,""],SELU:[22,1,1,""],Sequential:[22,1,1,""],Sigmoid:[22,1,1,""],SmoothL1Loss:[22,1,1,""],SoftMarginLoss:[22,1,1,""],Softmax2d:[22,1,1,""],Softmax:[22,1,1,""],Softmin:[22,1,1,""],Softplus:[22,1,1,""],Softshrink:[22,1,1,""],Softsign:[22,1,1,""],SyncBatchNorm:[22,1,1,""],Tanh:[22,1,1,""],Tanhshrink:[22,1,1,""],Threshold:[22,1,1,""],Transformer:[22,1,1,""],TransformerDecoder:[22,1,1,""],TransformerDecoderLayer:[22,1,1,""],TransformerEncoder:[22,1,1,""],TransformerEncoderLayer:[22,1,1,""],TripletMarginLoss:[22,1,1,""],Unfold:[22,1,1,""],Upsample:[22,1,1,""],UpsamplingBilinear2d:[22,1,1,""],UpsamplingNearest2d:[22,1,1,""],ZeroPad2d:[22,1,1,""]},"torch.nn.AdaptiveLogSoftmaxWithLoss":{log_prob:[22,2,1,""],predict:[22,2,1,""]},"torch.nn.Embedding":{from_pretrained:[22,2,1,""]},"torch.nn.EmbeddingBag":{from_pretrained:[22,2,1,""]},"torch.nn.Module":{"double":[22,2,1,""],"float":[22,2,1,""],add_module:[22,2,1,""],apply:[22,2,1,""],buffers:[22,2,1,""],children:[22,2,1,""],cpu:[22,2,1,""],cuda:[22,2,1,""],dump_patches:[22,3,1,""],eval:[22,2,1,""],extra_repr:[22,2,1,""],forward:[22,2,1,""],half:[22,2,1,""],load_state_dict:[22,2,1,""],modules:[22,2,1,""],named_buffers:[22,2,1,""],named_children:[22,2,1,""],named_modules:[22,2,1,""],named_parameters:[22,2,1,""],parameters:[22,2,1,""],register_backward_hook:[22,2,1,""],register_buffer:[22,2,1,""],register_forward_hook:[22,2,1,""],register_forward_pre_hook:[22,2,1,""],register_parameter:[22,2,1,""],requires_grad_:[22,2,1,""],state_dict:[22,2,1,""],to:[22,2,1,""],train:[22,2,1,""],type:[22,2,1,""],zero_grad:[22,2,1,""]},"torch.nn.ModuleDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ModuleList":{append:[22,2,1,""],extend:[22,2,1,""],insert:[22,2,1,""]},"torch.nn.MultiheadAttention":{forward:[22,2,1,""]},"torch.nn.ParameterDict":{clear:[22,2,1,""],items:[22,2,1,""],keys:[22,2,1,""],pop:[22,2,1,""],update:[22,2,1,""],values:[22,2,1,""]},"torch.nn.ParameterList":{append:[22,2,1,""],extend:[22,2,1,""]},"torch.nn.SyncBatchNorm":{convert_sync_batchnorm:[22,2,1,""]},"torch.nn.Transformer":{forward:[22,2,1,""],generate_square_subsequent_mask:[22,2,1,""]},"torch.nn.TransformerDecoder":{forward:[22,2,1,""]},"torch.nn.TransformerDecoderLayer":{forward:[22,2,1,""]},"torch.nn.TransformerEncoder":{forward:[22,2,1,""]},"torch.nn.TransformerEncoderLayer":{forward:[22,2,1,""]},"torch.nn.functional":{adaptive_avg_pool1d:[23,4,1,""],adaptive_avg_pool2d:[23,4,1,""],adaptive_avg_pool3d:[23,4,1,""],adaptive_max_pool1d:[23,4,1,""],adaptive_max_pool2d:[23,4,1,""],adaptive_max_pool3d:[23,4,1,""],affine_grid:[23,4,1,""],alpha_dropout:[23,4,1,""],avg_pool1d:[23,4,1,""],avg_pool2d:[23,4,1,""],avg_pool3d:[23,4,1,""],batch_norm:[23,4,1,""],bilinear:[23,4,1,""],binary_cross_entropy:[23,4,1,""],binary_cross_entropy_with_logits:[23,4,1,""],celu:[23,4,1,""],conv1d:[23,4,1,""],conv2d:[23,4,1,""],conv3d:[23,4,1,""],conv_transpose1d:[23,4,1,""],conv_transpose2d:[23,4,1,""],conv_transpose3d:[23,4,1,""],cosine_embedding_loss:[23,4,1,""],cosine_similarity:[23,4,1,""],cross_entropy:[23,4,1,""],ctc_loss:[23,4,1,""],dropout2d:[23,4,1,""],dropout3d:[23,4,1,""],dropout:[23,4,1,""],elu:[23,4,1,""],elu_:[23,4,1,""],embedding:[23,4,1,""],embedding_bag:[23,4,1,""],fold:[23,4,1,""],gelu:[23,4,1,""],glu:[23,4,1,""],grid_sample:[23,4,1,""],gumbel_softmax:[23,4,1,""],hardshrink:[23,4,1,""],hardtanh:[23,4,1,""],hardtanh_:[23,4,1,""],hinge_embedding_loss:[23,4,1,""],instance_norm:[23,4,1,""],interpolate:[23,4,1,""],kl_div:[23,4,1,""],l1_loss:[23,4,1,""],layer_norm:[23,4,1,""],leaky_relu:[23,4,1,""],leaky_relu_:[23,4,1,""],linear:[23,4,1,""],local_response_norm:[23,4,1,""],log_softmax:[23,4,1,""],logsigmoid:[23,4,1,""],lp_pool1d:[23,4,1,""],lp_pool2d:[23,4,1,""],margin_ranking_loss:[23,4,1,""],max_pool1d:[23,4,1,""],max_pool2d:[23,4,1,""],max_pool3d:[23,4,1,""],max_unpool1d:[23,4,1,""],max_unpool2d:[23,4,1,""],max_unpool3d:[23,4,1,""],mse_loss:[23,4,1,""],multi_margin_loss:[23,4,1,""],multilabel_margin_loss:[23,4,1,""],multilabel_soft_margin_loss:[23,4,1,""],nll_loss:[23,4,1,""],normalize:[23,4,1,""],one_hot:[23,4,1,""],pad:[23,4,1,""],pairwise_distance:[23,4,1,""],pdist:[23,4,1,""],pixel_shuffle:[23,4,1,""],poisson_nll_loss:[23,4,1,""],prelu:[23,4,1,""],relu6:[23,4,1,""],relu:[23,4,1,""],relu_:[23,4,1,""],rrelu:[23,4,1,""],rrelu_:[23,4,1,""],selu:[23,4,1,""],sigmoid:[23,4,1,""],smooth_l1_loss:[23,4,1,""],soft_margin_loss:[23,4,1,""],softmax:[23,4,1,""],softmin:[23,4,1,""],softplus:[23,4,1,""],softshrink:[23,4,1,""],softsign:[23,4,1,""],tanh:[23,4,1,""],tanhshrink:[23,4,1,""],threshold:[23,4,1,""],threshold_:[23,4,1,""],triplet_margin_loss:[23,4,1,""],unfold:[23,4,1,""],upsample:[23,4,1,""],upsample_bilinear:[23,4,1,""],upsample_nearest:[23,4,1,""]},"torch.nn.init":{calculate_gain:[24,4,1,""],constant_:[24,4,1,""],dirac_:[24,4,1,""],eye_:[24,4,1,""],kaiming_normal_:[24,4,1,""],kaiming_uniform_:[24,4,1,""],normal_:[24,4,1,""],ones_:[24,4,1,""],orthogonal_:[24,4,1,""],sparse_:[24,4,1,""],uniform_:[24,4,1,""],xavier_normal_:[24,4,1,""],xavier_uniform_:[24,4,1,""],zeros_:[24,4,1,""]},"torch.nn.parallel":{DistributedDataParallel:[22,1,1,""],data_parallel:[23,4,1,""]},"torch.nn.parallel.DistributedDataParallel":{no_sync:[22,2,1,""]},"torch.nn.utils":{clip_grad_norm_:[22,4,1,""],clip_grad_value_:[22,4,1,""],parameters_to_vector:[22,4,1,""],remove_spectral_norm:[22,4,1,""],remove_weight_norm:[22,4,1,""],spectral_norm:[22,4,1,""],vector_to_parameters:[22,4,1,""],weight_norm:[22,4,1,""]},"torch.nn.utils.rnn":{PackedSequence:[22,4,1,""],pack_padded_sequence:[22,4,1,""],pack_sequence:[22,4,1,""],pad_packed_sequence:[22,4,1,""],pad_sequence:[22,4,1,""]},"torch.onnx":{"export":[36,4,1,""],is_in_onnx_export:[36,4,1,""],register_custom_op_symbolic:[36,4,1,""],set_training:[36,4,1,""]},"torch.onnx.operators":{shape_as_tensor:[36,4,1,""]},"torch.optim":{ASGD:[37,1,1,""],Adadelta:[37,1,1,""],Adagrad:[37,1,1,""],Adam:[37,1,1,""],AdamW:[37,1,1,""],Adamax:[37,1,1,""],LBFGS:[37,1,1,""],Optimizer:[37,1,1,""],RMSprop:[37,1,1,""],Rprop:[37,1,1,""],SGD:[37,1,1,""],SparseAdam:[37,1,1,""]},"torch.optim.ASGD":{step:[37,2,1,""]},"torch.optim.Adadelta":{step:[37,2,1,""]},"torch.optim.Adagrad":{step:[37,2,1,""]},"torch.optim.Adam":{step:[37,2,1,""]},"torch.optim.AdamW":{step:[37,2,1,""]},"torch.optim.Adamax":{step:[37,2,1,""]},"torch.optim.LBFGS":{step:[37,2,1,""]},"torch.optim.Optimizer":{add_param_group:[37,2,1,""],load_state_dict:[37,2,1,""],state_dict:[37,2,1,""],step:[37,2,1,""],zero_grad:[37,2,1,""]},"torch.optim.RMSprop":{step:[37,2,1,""]},"torch.optim.Rprop":{step:[37,2,1,""]},"torch.optim.SGD":{step:[37,2,1,""]},"torch.optim.SparseAdam":{step:[37,2,1,""]},"torch.optim.lr_scheduler":{CosineAnnealingLR:[37,1,1,""],CyclicLR:[37,1,1,""],ExponentialLR:[37,1,1,""],LambdaLR:[37,1,1,""],MultiStepLR:[37,1,1,""],ReduceLROnPlateau:[37,1,1,""],StepLR:[37,1,1,""]},"torch.optim.lr_scheduler.CyclicLR":{get_lr:[37,2,1,""]},"torch.optim.lr_scheduler.LambdaLR":{load_state_dict:[37,2,1,""],state_dict:[37,2,1,""]},"torch.quasirandom":{SobolEngine:[44,1,1,""]},"torch.quasirandom.SobolEngine":{draw:[44,2,1,""],fast_forward:[44,2,1,""],reset:[44,2,1,""]},"torch.random":{fork_rng:[38,4,1,""],get_rng_state:[38,4,1,""],initial_seed:[38,4,1,""],manual_seed:[38,4,1,""],seed:[38,4,1,""],set_rng_state:[38,4,1,""]},"torch.sparse":{FloatTensor:[39,1,1,""],addmm:[39,4,1,""],mm:[39,4,1,""],sum:[39,4,1,""]},"torch.sparse.FloatTensor":{_indices:[39,2,1,""],_nnz:[39,2,1,""],_values:[39,2,1,""],add:[39,2,1,""],add_:[39,2,1,""],clone:[39,2,1,""],coalesce:[39,2,1,""],dim:[39,2,1,""],div:[39,2,1,""],div_:[39,2,1,""],get_device:[39,2,1,""],hspmm:[39,2,1,""],is_coalesced:[39,2,1,""],mm:[39,2,1,""],mul:[39,2,1,""],mul_:[39,2,1,""],narrow_copy:[39,2,1,""],resizeAs_:[39,2,1,""],size:[39,2,1,""],spadd:[39,2,1,""],spmm:[39,2,1,""],sspaddmm:[39,2,1,""],sspmm:[39,2,1,""],sub:[39,2,1,""],sub_:[39,2,1,""],t_:[39,2,1,""],toDense:[39,2,1,""],transpose:[39,2,1,""],transpose_:[39,2,1,""],zero_:[39,2,1,""]},"torch.torch":{default_generator:[44,3,1,""],device:[41,1,1,""],dtype:[41,1,1,""],finfo:[52,1,1,""],iinfo:[52,1,1,""],layout:[41,1,1,""]},"torch.utils":{data:[13,0,0,"-"],model_zoo:[20,0,0,"-"]},"torch.utils.checkpoint":{checkpoint:[3,4,1,""],checkpoint_sequential:[3,4,1,""]},"torch.utils.cpp_extension":{BuildExtension:[7,4,1,""],CUDAExtension:[7,4,1,""],CppExtension:[7,4,1,""],check_compiler_abi_compatibility:[7,4,1,""],include_paths:[7,4,1,""],load:[7,4,1,""],load_inline:[7,4,1,""],verify_ninja_availability:[7,4,1,""]},"torch.utils.data":{BatchSampler:[13,1,1,""],ChainDataset:[13,1,1,""],ConcatDataset:[13,1,1,""],DataLoader:[13,1,1,""],Dataset:[13,1,1,""],IterableDataset:[13,1,1,""],RandomSampler:[13,1,1,""],Sampler:[13,1,1,""],SequentialSampler:[13,1,1,""],Subset:[13,1,1,""],SubsetRandomSampler:[13,1,1,""],TensorDataset:[13,1,1,""],WeightedRandomSampler:[13,1,1,""],get_worker_info:[13,4,1,""],random_split:[13,4,1,""]},"torch.utils.data.distributed":{DistributedSampler:[13,1,1,""]},"torch.utils.dlpack":{from_dlpack:[16,4,1,""],to_dlpack:[16,4,1,""]},"torch.utils.model_zoo":{load_url:[20,4,1,""]},"torch.utils.tensorboard.writer":{SummaryWriter:[42,1,1,""]},"torch.utils.tensorboard.writer.SummaryWriter":{__init__:[42,2,1,""],add_audio:[42,2,1,""],add_custom_scalars:[42,2,1,""],add_embedding:[42,2,1,""],add_figure:[42,2,1,""],add_graph:[42,2,1,""],add_histogram:[42,2,1,""],add_image:[42,2,1,""],add_images:[42,2,1,""],add_mesh:[42,2,1,""],add_pr_curve:[42,2,1,""],add_scalar:[42,2,1,""],add_scalars:[42,2,1,""],add_text:[42,2,1,""],add_video:[42,2,1,""],close:[42,2,1,""],flush:[42,2,1,""]},"torchvision.datasets":{CIFAR100:[45,1,1,""],CIFAR10:[45,1,1,""],Cityscapes:[45,1,1,""],CocoCaptions:[45,1,1,""],CocoDetection:[45,1,1,""],DatasetFolder:[45,1,1,""],EMNIST:[45,1,1,""],FakeData:[45,1,1,""],FashionMNIST:[45,1,1,""],Flickr30k:[45,1,1,""],Flickr8k:[45,1,1,""],HMDB51:[45,1,1,""],ImageFolder:[45,1,1,""],ImageNet:[45,1,1,""],KMNIST:[45,1,1,""],Kinetics400:[45,1,1,""],LSUN:[45,1,1,""],MNIST:[45,1,1,""],PhotoTour:[45,1,1,""],QMNIST:[45,1,1,""],SBDataset:[45,1,1,""],SBU:[45,1,1,""],STL10:[45,1,1,""],SVHN:[45,1,1,""],UCF101:[45,1,1,""],USPS:[45,1,1,""],VOCDetection:[45,1,1,""],VOCSegmentation:[45,1,1,""]},"torchvision.datasets.CIFAR10":{__getitem__:[45,2,1,""]},"torchvision.datasets.Cityscapes":{__getitem__:[45,2,1,""]},"torchvision.datasets.CocoCaptions":{__getitem__:[45,2,1,""]},"torchvision.datasets.CocoDetection":{__getitem__:[45,2,1,""]},"torchvision.datasets.DatasetFolder":{__getitem__:[45,2,1,""]},"torchvision.datasets.Flickr30k":{__getitem__:[45,2,1,""]},"torchvision.datasets.Flickr8k":{__getitem__:[45,2,1,""]},"torchvision.datasets.ImageFolder":{__getitem__:[45,2,1,""]},"torchvision.datasets.LSUN":{__getitem__:[45,2,1,""]},"torchvision.datasets.PhotoTour":{__getitem__:[45,2,1,""]},"torchvision.datasets.SBU":{__getitem__:[45,2,1,""]},"torchvision.datasets.STL10":{__getitem__:[45,2,1,""]},"torchvision.datasets.SVHN":{__getitem__:[45,2,1,""]},"torchvision.datasets.USPS":{__getitem__:[45,2,1,""]},"torchvision.datasets.VOCDetection":{__getitem__:[45,2,1,""]},"torchvision.datasets.VOCSegmentation":{__getitem__:[45,2,1,""]},"torchvision.io":{read_video:[47,4,1,""],read_video_timestamps:[47,4,1,""],write_video:[47,4,1,""]},"torchvision.models":{alexnet:[48,4,1,""],densenet121:[48,4,1,""],densenet161:[48,4,1,""],densenet169:[48,4,1,""],densenet201:[48,4,1,""],googlenet:[48,4,1,""],inception_v3:[48,4,1,""],mnasnet0_5:[48,4,1,""],mnasnet0_75:[48,4,1,""],mnasnet1_0:[48,4,1,""],mnasnet1_3:[48,4,1,""],mobilenet_v2:[48,4,1,""],resnet101:[48,4,1,""],resnet152:[48,4,1,""],resnet18:[48,4,1,""],resnet34:[48,4,1,""],resnet50:[48,4,1,""],resnext101_32x8d:[48,4,1,""],resnext50_32x4d:[48,4,1,""],shufflenet_v2_x0_5:[48,4,1,""],shufflenet_v2_x1_0:[48,4,1,""],shufflenet_v2_x1_5:[48,4,1,""],shufflenet_v2_x2_0:[48,4,1,""],squeezenet1_0:[48,4,1,""],squeezenet1_1:[48,4,1,""],vgg11:[48,4,1,""],vgg11_bn:[48,4,1,""],vgg13:[48,4,1,""],vgg13_bn:[48,4,1,""],vgg16:[48,4,1,""],vgg16_bn:[48,4,1,""],vgg19:[48,4,1,""],vgg19_bn:[48,4,1,""],wide_resnet101_2:[48,4,1,""],wide_resnet50_2:[48,4,1,""]},"torchvision.models.detection":{fasterrcnn_resnet50_fpn:[48,4,1,""],keypointrcnn_resnet50_fpn:[48,4,1,""],maskrcnn_resnet50_fpn:[48,4,1,""]},"torchvision.models.segmentation":{deeplabv3_resnet101:[48,4,1,""],deeplabv3_resnet50:[48,4,1,""],fcn_resnet101:[48,4,1,""],fcn_resnet50:[48,4,1,""]},"torchvision.models.video":{mc3_18:[48,4,1,""],r2plus1d_18:[48,4,1,""],r3d_18:[48,4,1,""]},"torchvision.ops":{RoIAlign:[49,1,1,""],RoIPool:[49,1,1,""],nms:[49,4,1,""],roi_align:[49,4,1,""],roi_pool:[49,4,1,""]},"torchvision.transforms":{CenterCrop:[50,1,1,""],ColorJitter:[50,1,1,""],Compose:[50,1,1,""],FiveCrop:[50,1,1,""],Grayscale:[50,1,1,""],Lambda:[50,1,1,""],LinearTransformation:[50,1,1,""],Normalize:[50,1,1,""],Pad:[50,1,1,""],RandomAffine:[50,1,1,""],RandomApply:[50,1,1,""],RandomChoice:[50,1,1,""],RandomCrop:[50,1,1,""],RandomErasing:[50,1,1,""],RandomGrayscale:[50,1,1,""],RandomHorizontalFlip:[50,1,1,""],RandomOrder:[50,1,1,""],RandomPerspective:[50,1,1,""],RandomResizedCrop:[50,1,1,""],RandomRotation:[50,1,1,""],RandomSizedCrop:[50,1,1,""],RandomVerticalFlip:[50,1,1,""],Resize:[50,1,1,""],Scale:[50,1,1,""],TenCrop:[50,1,1,""],ToPILImage:[50,1,1,""],ToTensor:[50,1,1,""],functional:[50,0,0,"-"]},"torchvision.transforms.Normalize":{__call__:[50,2,1,""]},"torchvision.transforms.ToPILImage":{__call__:[50,2,1,""]},"torchvision.transforms.ToTensor":{__call__:[50,2,1,""]},"torchvision.transforms.functional":{adjust_brightness:[50,4,1,""],adjust_contrast:[50,4,1,""],adjust_gamma:[50,4,1,""],adjust_hue:[50,4,1,""],adjust_saturation:[50,4,1,""],affine:[50,4,1,""],crop:[50,4,1,""],erase:[50,4,1,""],five_crop:[50,4,1,""],hflip:[50,4,1,""],normalize:[50,4,1,""],pad:[50,4,1,""],perspective:[50,4,1,""],resize:[50,4,1,""],resized_crop:[50,4,1,""],rotate:[50,4,1,""],ten_crop:[50,4,1,""],to_grayscale:[50,4,1,""],to_pil_image:[50,4,1,""],to_tensor:[50,4,1,""],vflip:[50,4,1,""]},"torchvision.utils":{make_grid:[51,4,1,""],save_image:[51,4,1,""]},torch:{"var":[44,4,1,""],BoolTensor:[43,1,1,""],FloatStorage:[40,1,1,""],Tensor:[43,1,1,""],__config__:[0,0,0,"-"],abs:[44,4,1,""],acos:[44,4,1,""],add:[44,4,1,""],addbmm:[44,4,1,""],addcdiv:[44,4,1,""],addcmul:[44,4,1,""],addmm:[44,4,1,""],addmv:[44,4,1,""],addr:[44,4,1,""],allclose:[44,4,1,""],arange:[44,4,1,""],argmax:[44,4,1,""],argmin:[44,4,1,""],argsort:[44,4,1,""],as_strided:[44,4,1,""],as_tensor:[44,4,1,""],asin:[44,4,1,""],atan2:[44,4,1,""],atan:[44,4,1,""],autograd:[1,0,0,"-"],baddbmm:[44,4,1,""],bartlett_window:[44,4,1,""],bernoulli:[44,4,1,""],bincount:[44,4,1,""],bitwise_not:[44,4,1,""],blackman_window:[44,4,1,""],bmm:[44,4,1,""],broadcast_tensors:[44,4,1,""],cartesian_prod:[44,4,1,""],cat:[44,4,1,""],ceil:[44,4,1,""],chain_matmul:[44,4,1,""],cholesky:[44,4,1,""],cholesky_inverse:[44,4,1,""],cholesky_solve:[44,4,1,""],chunk:[44,4,1,""],clamp:[44,4,1,""],combinations:[44,4,1,""],compiled_with_cxx11_abi:[44,4,1,""],cos:[44,4,1,""],cosh:[44,4,1,""],cross:[44,4,1,""],cuda:[8,0,0,"-"],cumprod:[44,4,1,""],cumsum:[44,4,1,""],det:[44,4,1,""],diag:[44,4,1,""],diag_embed:[44,4,1,""],diagflat:[44,4,1,""],diagonal:[44,4,1,""],digamma:[44,4,1,""],dist:[44,4,1,""],distributed:[14,0,0,"-"],distributions:[15,0,0,"-"],div:[44,4,1,""],dot:[44,4,1,""],eig:[44,4,1,""],einsum:[44,4,1,""],empty:[44,4,1,""],empty_like:[44,4,1,""],empty_strided:[44,4,1,""],eq:[44,4,1,""],equal:[44,4,1,""],erf:[44,4,1,""],erfc:[44,4,1,""],erfinv:[44,4,1,""],exp:[44,4,1,""],expm1:[44,4,1,""],eye:[44,4,1,""],fft:[44,4,1,""],flatten:[44,4,1,""],flip:[44,4,1,""],floor:[44,4,1,""],fmod:[44,4,1,""],frac:[44,4,1,""],from_numpy:[44,4,1,""],full:[44,4,1,""],full_like:[44,4,1,""],gather:[44,4,1,""],ge:[44,4,1,""],gels:[44,4,1,""],geqrf:[44,4,1,""],ger:[44,4,1,""],get_default_dtype:[44,4,1,""],get_num_interop_threads:[44,4,1,""],get_num_threads:[44,4,1,""],get_rng_state:[44,4,1,""],gt:[44,4,1,""],hamming_window:[44,4,1,""],hann_window:[44,4,1,""],histc:[44,4,1,""],hub:[17,0,0,"-"],ifft:[44,4,1,""],index_select:[44,4,1,""],initial_seed:[44,4,1,""],inverse:[44,4,1,""],irfft:[44,4,1,""],is_floating_point:[44,4,1,""],is_storage:[44,4,1,""],is_tensor:[44,4,1,""],isfinite:[44,4,1,""],isinf:[44,4,1,""],isnan:[44,4,1,""],jit:[19,0,0,"-"],kthvalue:[44,4,1,""],le:[44,4,1,""],lerp:[44,4,1,""],linspace:[44,4,1,""],load:[44,4,1,""],log10:[44,4,1,""],log1p:[44,4,1,""],log2:[44,4,1,""],log:[44,4,1,""],logdet:[44,4,1,""],logspace:[44,4,1,""],logsumexp:[44,4,1,""],lstsq:[44,4,1,""],lt:[44,4,1,""],lu:[44,4,1,""],lu_solve:[44,4,1,""],lu_unpack:[44,4,1,""],manual_seed:[44,4,1,""],masked_select:[44,4,1,""],matmul:[44,4,1,""],matrix_power:[44,4,1,""],matrix_rank:[44,4,1,""],max:[44,4,1,""],mean:[44,4,1,""],median:[44,4,1,""],meshgrid:[44,4,1,""],min:[44,4,1,""],mm:[44,4,1,""],mode:[44,4,1,""],mul:[44,4,1,""],multinomial:[44,4,1,""],multiprocessing:[21,0,0,"-"],mv:[44,4,1,""],mvlgamma:[44,4,1,""],narrow:[44,4,1,""],ne:[44,4,1,""],neg:[44,4,1,""],nn:[22,0,0,"-"],nonzero:[44,4,1,""],norm:[44,4,1,""],normal:[44,4,1,""],numel:[44,4,1,""],ones:[44,4,1,""],ones_like:[44,4,1,""],onnx:[36,0,0,"-"],optim:[37,0,0,"-"],orgqr:[44,4,1,""],ormqr:[44,4,1,""],pinverse:[44,4,1,""],pow:[44,4,1,""],prod:[44,4,1,""],qr:[44,4,1,""],rand:[44,4,1,""],rand_like:[44,4,1,""],randint:[44,4,1,""],randint_like:[44,4,1,""],randn:[44,4,1,""],randn_like:[44,4,1,""],random:[38,0,0,"-"],randperm:[44,4,1,""],range:[44,4,1,""],reciprocal:[44,4,1,""],remainder:[44,4,1,""],renorm:[44,4,1,""],repeat_interleave:[44,4,1,""],reshape:[44,4,1,""],rfft:[44,4,1,""],roll:[44,4,1,""],rot90:[44,4,1,""],round:[44,4,1,""],rsqrt:[44,4,1,""],save:[44,4,1,""],seed:[44,4,1,""],set_default_dtype:[44,4,1,""],set_default_tensor_type:[44,4,1,""],set_flush_denormal:[44,4,1,""],set_num_interop_threads:[44,4,1,""],set_num_threads:[44,4,1,""],set_printoptions:[44,4,1,""],set_rng_state:[44,4,1,""],sigmoid:[44,4,1,""],sign:[44,4,1,""],sin:[44,4,1,""],sinh:[44,4,1,""],slogdet:[44,4,1,""],solve:[44,4,1,""],sort:[44,4,1,""],sparse_coo_tensor:[44,4,1,""],split:[44,4,1,""],sqrt:[44,4,1,""],squeeze:[44,4,1,""],stack:[44,4,1,""],std:[44,4,1,""],std_mean:[44,4,1,""],stft:[44,4,1,""],sum:[44,4,1,""],svd:[44,4,1,""],symeig:[44,4,1,""],t:[44,4,1,""],take:[44,4,1,""],tan:[44,4,1,""],tanh:[44,4,1,""],tensor:[44,4,1,""],tensordot:[44,4,1,""],topk:[44,4,1,""],trace:[44,4,1,""],transpose:[44,4,1,""],trapz:[44,4,1,""],triangular_solve:[44,4,1,""],tril:[44,4,1,""],tril_indices:[44,4,1,""],triu:[44,4,1,""],triu_indices:[44,4,1,""],trunc:[44,4,1,""],unbind:[44,4,1,""],unique:[44,4,1,""],unique_consecutive:[44,4,1,""],unsqueeze:[44,4,1,""],var_mean:[44,4,1,""],where:[44,4,1,""],zeros:[44,4,1,""],zeros_like:[44,4,1,""]},torchvision:{get_image_backend:[46,4,1,""],set_image_backend:[46,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["std","envvar","environment variable"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"std:envvar"},terms:{"00000e":44,"0000e":[43,44],"041m":1,"048m":1,"0545e":43,"0705e":44,"0949e":43,"10k":45,"10x7":22,"1239e":44,"13x12":22,"1428e":44,"148m":1,"16x112x112":48,"1921e":44,"1_batch_16":42,"1e6":37,"1hr":4,"1st":[15,26],"1x1":48,"20l":22,"224x224":48,"2gb":17,"2nd":[15,22,23,26,43],"2x3":[22,39],"32x4d":48,"32x8d":48,"3493e":44,"3842e":44,"3rd":[26,37,43],"3x4":22,"3xhxw":42,"4064e":44,"427l":45,"483m":1,"4842e":[43,44],"4th":[26,45],"4us":1,"50k":45,"50x":48,"54_":42,"5751e":44,"5765e":43,"5955e":44,"5c106cde":[17,20],"5mb":48,"5x2":39,"5x7":22,"5x7x9":22,"60k":45,"640l":45,"6503e":44,"6531e":44,"727m":1,"7x7":22,"7x7x7":22,"7x9x8":22,"8000e":44,"816u":1,"8182e":43,"88131e":44,"9073e":[22,44],"9683e":44,"abstract":[13,15],"boolean":[1,8,15,19,22,23,29,37,41,43,44,45,50],"break":[4,15,19,34,37,44],"byte":[8,15,19,37,40,43,44],"case":[1,2,8,13,14,17,21,22,23,25,26,27,28,30,32,34,36,37,38,39,42,43,44,45,50,52],"catch":19,"char":[40,43],"ciss\u00e9":22,"class":[1,8,13,14,15,19,21,22,23,29,30,32,33,34,36,37,39,40,41,42,43,44,45,48,49,50,52],"const":[31,36],"default":[1,3,7,8,14,17,20,21,22,23,24,25,27,28,29,30,35,36,37,38,39,40,42,43,44,45,48,49,50,51,52],"enum":[14,36],"export":[1,8,14,16,19,22,29,36],"final":[14,15,22,44,48,50,51],"float":[1,13,15,19,21,22,23,30,33,36,37,40,41,42,43,44,47,49,50,51,52],"function":[3,4,7,8,13,17,18,20,21,24,25,26,27,28,29,30,31,33,37,38,41,42,43,44,45,46,47,48],"herv\u00e9":22,"import":[1,4,5,7,8,13,14,19,21,22,25,28,29,30,31,32,33,36,37,42,43,44,45,48,50],"int":[8,13,14,15,19,21,22,23,35,36,37,38,39,40,41,42,43,44,45,47,49,50,51,52],"j\u00e9gou":22,"long":[4,5,13,21,22,23,26,29,30,32,36,38,40,41,42,43,44],"new":[1,3,5,8,13,14,15,17,19,21,22,25,28,29,31,32,35,37,40,42,43,44],"return":[0,1,3,7,8,13,14,15,16,17,19,20,21,22,23,24,27,28,29,31,35,36,37,38,39,40,41,43,44,45,47,48,49,50,52],"short":[19,22,23,26,40,41,43,44,50],"static":[1,19,31,36,40],"super":[13,19,22,29,36],"switch":[9,10,13,21,23,25,43,44,48],"throw":[22,43,44],"true":[1,3,7,8,11,13,14,15,17,19,20,21,22,23,25,26,28,29,30,31,33,36,37,38,39,40,41,42,43,44,45,48,50,51],"try":[2,4,11,14,17,19,22,23,30,32,36,37],"var":[1,22,43,44],"void":[31,44],"while":[5,13,14,15,19,22,23,25,30,32,37,42,43,44,50],Abs:36,And:[22,35,44,50],But:[1,4,19],For:[1,2,3,4,5,7,8,13,14,15,17,19,22,23,25,26,27,28,30,32,36,37,39,40,41,42,43,44,45,48,50],Going:48,Has:[22,23,44],IDs:38,Its:[22,37],NFS:14,NMS:49,NOT:[19,36,39,44],Not:[19,29],One:[14,19,22,23,26,27,31,33,36,37,42,44,45,48],Ops:[2,28,43],PRs:[4,5],RHS:44,Such:[7,13,44],That:[44,50],The:[1,3,5,7,8,13,14,15,16,17,19,20,21,22,23,24,26,27,28,30,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48,50,51,52],Then:[1,26,34,36,37,50],There:[1,4,5,14,17,19,22,25,28,29,30,31,32,33,34,35,36,43,44],These:[7,13,14,15,19,22,29,36,39,41,44,45,48],Use:[8,13,14,22,23,32,42,43,44,50],Used:[13,44],Useful:[8,22],Uses:8,Using:[13,15,19,22,32],WITH:36,Will:[6,14,19,44,50],With:[13,15,22,23,28,36,37,42],__background__:48,__call__:50,__config__:[18,27],__constants__:19,__dict__:37,__file__:[17,35],__getitem__:[13,45],__init__:[1,13,15,19,22,29,30,36,42],__iter__:13,__len__:[13,45],__main__:[13,26,32,35],__name__:[13,32,35],__new__:19,_back:[22,23],_bottom:[22,23],_call:15,_caller:38,_cat:15,_channel:[22,23],_class:22,_compilation_unit:19,_cpp_modul:19,_dependentproperti:15,_devices_kw:38,_dim:22,_direct:22,_ext:35,_extra_fil:[19,31],_factor:22,_featur:[22,23],_fft:44,_force_outplac:19,_fork:27,_formatt:44,_forward_cl:1,_frames_up:19,_front:[22,23],_glibcxx_use_cxx11_abi:44,_greaterthan:15,_greaterthaneq:15,_halfopeninterv:15,_if_scalar_type_a:36,_in:24,_index:22,_indic:[39,44],_instanc:15,_integerinterv:15,_interv:15,_invers:15,_key_padding_mask:22,_layer:22,_left:[22,23],_length:[22,44],_lessthan:15,_like:43,_load_from_state_dict:22,_log_api_usage_onc:31,_mask:22,_metadata:22,_module_class:19,_nnz:39,_onnx_master_opset:36,_onnx_stable_opset:36,_out:24,_pad:22,_qualified_nam:19,_random_sampl:22,_rcb:19,_resnet18:17,_retain_param_nam:36,_right:[22,23],_sampl:44,_scalar:36,_shape:22,_size:22,_slope:[22,23,24],_stack:15,_stacklevel:23,_tensor:43,_top:[22,23],_valu:[22,39,44],_wait:27,_weight:22,a3c:32,a_big:44,a_dict:19,a_i:22,a_l:44,a_lu:44,a_tupl:19,a_u:44,aaa:42,abc:22,abi:7,abil:[5,31],abl:[4,19,22,36,44],abnorm:[21,32],about:[1,5,8,13,19,22,27,29,30,31,32,36,43,50],abov:[1,15,17,19,22,26,27,28,29,36,37,44,45,50],abridg:30,abruptli:21,abs:[15,22,23,36,37,43,44,48],abs_:43,absolut:[1,5,7,22,23,43,44,50],abstransform:15,acc:48,acceler:[4,22,37],accept:[1,4,5,14,19,22,29,36,37,41,42,43,44],access:[5,13,14,21,22,25,28,30,31,41,43,52],accident:4,accimag:46,accommod:22,accompani:4,accomplish:4,accord:[22,24,35,36,37,44,45,48,49],accordingli:[43,45,48],accoridng:22,account:[2,22],accumul:[1,19,22,30,43,44],accumulategrad:1,accur:[8,36,44],accuraci:[42,48],achiev:[13,15,22,23,31,36],aco:[36,43,44],acos_:43,acquaint:4,across:[1,8,13,14,19,22,23,28,30,31,32,33,40,42,43,44,45],act:[15,22,50],action:[5,15,28,45,48],activ:[1,3,4,8,21,28,29],actual:[1,13,17,19,22,25,27,28,29,32,35,36,37],actual_input_1:36,acycl:25,adadelta:37,adagrad:[22,37],adam:[5,6,15,37],adamax:37,adamw:37,adapt:[19,22,23,37,44,49],adaptive_avg_pool1d:36,adaptive_avg_pool2d:36,adaptive_avg_pool3d:36,adaptive_max_pool1d:36,adaptive_max_pool2d:36,adaptive_max_pool3d:36,adaptiveavgpool1d:23,adaptiveavgpool2d:23,adaptiveavgpool3d:23,adaptivelogsoftmaxwithloss:19,adaptivemaxpool1d:23,adaptivemaxpool2d:23,adaptivemaxpool3d:23,add:[1,4,8,17,19,22,23,26,29,31,36,37,39,42,43,44,48],add_:[1,26,39,43],add_argu:28,add_audio:42,add_bias_kv:22,add_custom_scalar:42,add_embed:42,add_figur:42,add_graph:42,add_histogram:42,add_imag:42,add_mesh:42,add_modul:22,add_param_group:37,add_pr_curv:42,add_scalar:42,add_text:42,add_video:42,add_zero_attn:22,addbmm:[43,44],addbmm_:43,addcdiv:[43,44],addcdiv_:43,addcmul:[43,44],addcmul_:43,added:[4,7,22,23,31,36,37,39,42,43,44],adding:[13,14,17,19,22,29,36,43,44],addit:[1,4,5,7,15,19,22,23,27,28,29,31,32,33,35,37,39,43,44],addition:[1,13,14,15,22,30,43,44,50],additionali:22,addmm:[36,39,43,44],addmm_:43,addmv:[43,44],addmv_:43,addr:[43,44],addr_:43,address:[1,13,14,21,43,48],adher:5,adjac:[22,44],adjust:[22,50],adjust_bright:50,adjust_contrast:50,adjust_gamma:50,adjust_hu:50,adjust_satur:50,admit:28,adopt:5,advanc:[3,22,25,32,36,42],advantag:[14,22,30],adventur:36,adversari:22,advic:4,advis:[32,44],advisori:4,aeroplan:48,affect:[1,4,8,22,23,40,44],affin:[15,22,23,25,43,50],affinetransform:15,aforement:32,afram:47,after:[4,7,8,13,14,17,19,21,22,24,28,30,31,32,34,37,41,42,43,44,49,50],afterward:[1,22],again:[3,13,14,44,45],against:[1,2,14,19,44,50],aggreg:[22,23,48],aggress:[1,25],ahead:4,aid:[4,25],aidan:22,ail:6,ailzhang:6,aim:4,airplan:48,aka:1,akin:31,alban:6,alband:6,alex:6,alfredo:6,algorithm:[4,11,12,15,22,23,44],alia:[15,43],alias:29,alican:6,alicanb:6,align:[22,23,49],align_corn:[22,23],aliv:[30,32],all:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,25,27,28,29,30,32,33,35,36,37,38,39,40,41,42,43,44,45,48,49,50,51],all_gath:14,all_gather_multigpu:14,all_reduc:14,all_reduce_multigpu:14,allclos:[1,43,44],alloc:[1,2,8,15,21,25,28,30,41,43,44],allow:[1,4,5,7,13,14,15,17,19,22,25,26,27,28,31,32,36,37,41,42,43,44,45],allow_unreach:1,allow_unus:1,almost:[35,44,45],alon:19,along:[7,8,13,14,15,17,19,22,23,26,30,31,37,43,44],alpha:[15,22,23,36,37,39,43,44],alpha_f:36,alphabet:[23,44,45],alphadropout:23,alreadi:[8,13,14,17,19,20,22,29,32,36,37,40,43,44,45],also:[1,3,4,5,7,8,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,39,42,43,44,45,50],altern:[13,17,19,22,23,35,44],although:[4,15,22],alwai:[1,8,13,14,19,21,22,26,27,28,29,31,36,38,39,41,43,44],amazonaw:[20,35],ambigu:[15,22],among:[8,13,14,15,22,36,44],amount:[1,2,4,8,22,25,28,30,50,51],amplitud:37,amsgrad:37,an_error:19,anaconda:35,analog:[37,44],analogu:19,analyt:[1,15],anchor:[22,23],angl:[22,50],ani:[1,2,3,4,5,8,13,14,15,19,21,22,23,25,27,28,29,31,32,33,36,37,42,43,44,50],anm:44,ann_fil:45,anneal:37,annfil:45,annot:[1,19,45],annotation_path:45,anoth:[4,8,13,14,19,22,27,28,29,32,35,36,43,44,49],another_input:22,anothermodel:36,answer:[4,5,22],anticip:3,anymor:[1,14,22,43],anyon:5,anyth:[3,4,14,19],aoa:35,apaszk:[5,6],api:[1,5,8,17,19,21,28,36,39,42,43,45],aplli:50,appear:[2,14,15,22,29,37,44],append:[1,14,19,22,32,35,42,43,44],appl:48,appli:[1,3,14,15,19,21,22,23,25,29,36,37,43,44,50],applic:[8,14,15,22,25,27,28,43,50],apply_:43,apprear:44,appreci:4,approach:[14,19,21,22,44],appropri:[4,14,15,19,22,44,48],approv:5,approxim:[1,22,23,29,37],arang:[13,22,23,36,42,43,44],arbitrari:[1,14,19,22,23,25,31,43,44],arccosin:44,architechtur:22,architectur:[22,44,46,48],archiv:[19,31],arcsin:44,arctang:44,area:[4,5,23,50],arg0:1,arg1:1,arg:[1,2,3,7,14,15,17,21,22,23,28,32,34,36,40,43,44,45,50],arg_constraint:15,argmax:[22,36,43,44],argmin:[36,43,44],argpars:28,argsort:[43,44],argument:[1,2,3,7,8,13,14,15,17,19,21,22,23,26,28,29,30,31,36,37,38,40,41,42,43,44,45,51,52],argumentpars:28,ari:36,aris:15,arithmet:44,armand:22,around:[1,4,5,8,14,19,21,28,43,50],arrai:[13,22,23,36,40,42,43,44,45],arrang:45,array_lik:[43,44],art:44,articul:5,artifact:31,artifici:1,arxiv:[22,48,50],as_strid:[43,44],as_tensor:[42,43,44],as_tupl:44,asap:21,ascend:44,ascent:15,ascii:[8,44],asd932_:45,asgd:37,ashish:22,asin:[36,43,44],asin_:43,ask:[4,14,18],aspect:[4,50],assembl:13,assert:[13,15,36],assert_allclos:19,assign:[4,13,14,19,22,29,30,36,42,45],assign_x:19,associ:[1,8,19,22,23,41,43,44],assum:[13,14,15,19,22,23,29,31,36,37,44,48,50],assumpt:[22,50],ast_1:[22,23],ast_2:[22,23],astyp:36,asuhan:6,async:[14,40,43],async_op:14,asynchron:[2,22,27,40,42,43],atan2:[43,44],atan2_:43,atan:[36,43,44],atan_:43,aten:[19,27,35,44],aten_thread:27,atol:[1,19,29,43,44],atom:33,atomicadd:33,attach:17,attempt:[19,28,35,44],attend:22,attent:[4,22,35],attn:22,attn_mask:22,attn_output:22,attn_output_weight:22,attr1:36,attr1_f:36,attr2:36,attr2_i:36,attr:[15,22,23,36,44],attribut:[1,13,14,18,22,25,28,29,36,43,52],audio:[42,45,47],audio_fp:47,aug_add_x:19,augment:50,auto:[14,22,42],autoencod:15,autograd:[2,3,4,15,18,22,23,30,31,36,43,44],autograd_tensor:1,autom:[19,36],automat:[7,8,14,22,25,26,28,29,32,36,42,43,44],aux_logit:48,aux_loss:48,auxiliari:[17,31,48],avaialbl:17,avail:[7,8,13,14,17,19,21,22,23,28,29,35,36,44,45],averag:[1,14,22,23,37],avg:[1,50],avg_pool1d:36,avg_pool2d:36,avg_pool3d:36,avgpool1d:23,avgpool2d:23,avgpool3d:23,avmgithub:6,avoid:[5,13,15,22,23,30,33,42,43,44,50],awai:23,awar:[4,48],axbc:22,axes:36,axi:[36,43,44,50],b_hf:22,b_hg:22,b_hh:22,b_hi:22,b_hn:22,b_ho:22,b_hr:22,b_hz:22,b_if:22,b_ig:22,b_ih:22,b_ii:22,b_in:22,b_io:22,b_ir:22,b_iz:22,back:[17,32,36,39,44,50],backbon:48,backcompat:26,backend:[1,9,10,11,19,22,23,27,28,33,36,43,44,46],backend_str:14,background:[9,10,11,22,23,32,43,44,45],backpack:48,backprop:44,backpropag:[1,15,30,37],backward:[1,3,5,10,15,22,23,24,29,30,32,33,37,39,43,44],bad:21,baddbmm:[43,44],baddbmm_:43,bag:[22,23],bai:6,balanc:[44,45],ball:48,balnta:22,banana:48,bar:[4,19,20,48],bare:7,barrier:14,bartlett:44,bartlett_window:44,base:[1,4,5,7,8,13,15,19,22,25,27,36,37,42,43,44,45,50],base_distribut:15,base_lr:37,base_momentum:37,base_se:13,basebal:48,basedistribut:15,basep:15,baseq:15,basi:[15,31,37],basic:[4,22,37,42],bat:48,batch1:[43,44],batch2:[43,44],batch:[15,22,23,28,30,32,33,36,37,42,44,45,48,49,50,51],batch_first:[22,30],batch_ndx:13,batch_sampl:13,batch_shap:15,batch_siz:[13,15,22,42,45],batchmean:[22,23],batchnorm1d:23,batchnorm2d:23,batchnorm3d:23,batchnorm:[22,36],batchnormnd:22,batchsampl:13,batchsiz:[22,23],batchwis:22,bbb:42,bceloss:23,bcewithlogitsloss:23,bckenstler:37,bddppq:6,beam:19,bear:48,becaus:[1,2,4,13,15,19,21,22,26,28,30,31,35,36,42,43,44,48],becom:[1,4,5,13,15,22,23,36,43,44],bed:48,bedroom_train:45,been:[1,8,14,15,21,22,27,32,35,37,42,44,48,49],befor:[1,4,8,13,14,15,19,21,22,23,25,27,28,29,31,35,36,37,39,42,43,44],beforehand:4,begin:[4,8,22,31,36,37,43,44],behav:[7,19,43],behavior:[4,7,14,17,19,22,23,26,28,36,37,43,44,48],behaviour:[1,9,10,23,43,44],behind:45,being:[1,5,13,15,19,22,23,29,32,36,43,44,50],belong:[3,8,14,15,28,37,50],below:[1,7,13,14,15,19,21,22,23,28,29,32,35,36,44,50],ben:22,bench:48,benchmark:33,benefit:[4,14,21,37],bengio:24,bernoulli:[22,23,43,44],bernoulli_:[43,44],besid:42,bessel:44,best:[1,4,13,14,18,19,21,30,37,44],beta:[22,23,36,37,39,43,44],better:[4,5,8,13,19,22,23,27,35,42,44],between:[1,4,8,14,15,19,21,22,23,28,32,33,37,40,42,43,44,45,48,50],bewar:4,beyond:[5,30,37,44],bfg:37,bfloat16:[40,43],bia:[5,22,23,29,42],bias:[22,44],bias_hh:22,bias_hh_l:22,bias_ih:22,bias_ih_l:22,bicub:[22,23,50],bicycl:48,bidirect:[22,36],big:[4,44],bij:44,biject:15,biject_to:15,bik:44,bilinear:[44,50],bin:[42,43,44,49],binari:[15,19,22,23,31,35,36,42,43,44,45,48],bincount:[33,43,44],bind:[7,8,36],bird:48,bit:[4,35,38,41,43,44,52],bitwis:[14,44],bitwise_not:[43,44],bitwise_not_:43,bjk:44,bl_flip:50,bla:27,black:50,blackman:44,blackman_window:44,blank:[22,23],blob:[31,36,42],blobnam:42,block0:[19,36],block1:19,block:[4,8,13,14,19,21,22,23,36,48],blog:4,blow:30,blue:45,bmm:[43,44],board:5,boat:48,bodi:19,boil:4,book:48,bool:[1,3,8,13,14,15,17,19,20,21,22,23,36,37,38,40,41,42,43,44,45,48,50,51],booltensor:[41,43,44],bootcamp:4,bootstrap:35,border:[23,50],both:[1,8,13,14,15,19,22,23,26,29,33,36,39,43,44,45,47,48,50],bottl:48,bottleneck:[18,48],bottom:[1,23,50],bound:[2,22,23,24,34,37,43,44],boundari:[22,23,37,45],bowl:48,box:[48,49],bozkurt:6,bptt:30,br_flip:50,branch:[4,17,19,48],brand:14,bregman:15,breviti:[1,36],brief:21,bright:[45,50],brightness_factor:50,broadcast:[8,14,15,18,22,36,43,44],broadcast_buff:22,broadcast_coalesc:8,broadcast_multigpu:14,broadcast_tensor:44,broadcast_warn:26,broader:[5,31,44],broccoli:48,broken:4,brokenpipeerror:35,brown:45,bucket:22,bucket_cap_mb:22,buf:22,buffer:[1,2,8,13,19,22,25,29,44],buffer_s:8,bug:[5,32],bugfix:4,build:[7,14,15,19,23,25,42,50],build_directori:7,build_ext:7,buildextens:7,built:[4,14,19,27,32,37,44],builtin:44,bulk:13,bump:22,bundl:31,bus:48,byclass:45,bymerg:45,bypass:28,byte_arrai:44,bytecod:13,bytesio:[19,44],bytetensor:[8,22,38,41,43,44],bz2:45,c10:31,c10_log_api_usage_onc:31,c99:35,c_0:22,c_1:22,c_j:22,c_n:22,c_t:22,cach:[8,15,20,21,22,30,44,48],cache_s:15,caffe2:[36,42],cake:48,calcul:[1,3,13,22,23,26,35,37,44],calculate_gain:24,call:[1,7,8,13,14,15,17,21,22,23,27,28,29,30,31,32,35,36,37,41,42,43,44,48,51,52],callabl:[13,15,17,19,37,43,44,45],callback:31,caller:28,camera:[31,42],can:[1,2,3,4,5,7,8,11,12,13,14,15,16,17,19,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,39,41,42,43,44,45,47,48,50,52],candid:1,cannot:[1,13,15,17,19,22,23,35,39,40,43,44],cap:45,capabl:[8,14,31,44],capac:28,capacit:15,captur:[8,19,36],car:48,card:35,cardin:15,care:[4,7,15,21,22,28,30,32,39,44],carlo:15,carri:26,carrier:15,carrot:48,cartesian:[15,44],cartesian_prod:44,cast:[1,22,23,36,40,43,44],cat:[15,19,22,36,39,43,44,45,48],categor:[4,23],categori:[15,44,45,48],categorynam:42,cattransform:15,cauchi:[43,44],cauchy_:[43,44],caught:21,caus:[1,3,13,14,19,21,23,26,30,32,35,36,43,44],caveat:[21,28],ccc:42,cdf:15,cdot:[22,23,44],ceil:[13,22,23,36,43,44,49],ceil_:43,ceil_mod:[22,23],cell:[22,48],center:[23,37,42,43,44,50],center_flip:50,centercrop:50,central:[31,50],cerr:31,certain:[13,14,19,22,23,26,31,39,44],certainli:44,chain:[1,13,15,22,25,43,44,50],chain_matmul:44,chaindataset:13,chair:48,challeng:4,chanan:[5,6],chanc:[4,15],chang:[1,4,8,15,19,21,22,23,25,26,28,35,36,37,39,40,42,43,44,48,50],channel:[5,13,22,23,24,36,42,45,47,48,50],charact:[23,44],chart:42,chartensor:[41,43],chartnam:42,cheap:[15,22],cheaper:13,check:[2,7,8,13,14,15,17,22,29,30,31,36,37,42,43,44,45],check_compiler_abi_compat:7,check_input:19,check_model:36,check_reduct:22,check_sparse_nnz:1,check_toler:19,check_trac:19,checker:[19,36],checkout:36,checkpoint:[1,17,18,20,22,44],checkpoint_sequenti:3,child:[13,21,22,35],children:[21,22],chintala:[5,6],choic:[19,22,27,36],choleski:[15,43,44],cholesky_invers:[43,44],cholesky_solv:[43,44],choos:[1,24,42],chosen:[44,50],christian:6,chrome:1,chunk:[3,8,13,19,22,43,44],chunk_siz:8,church_train:45,chw:42,cifar100:45,cifar10:45,cifar:46,circleci:4,circular:23,circumst:[11,19,22,23],cityscap:46,claim:4,clamp:[23,36,43,44],clamp_:43,clamp_max:36,clamp_min:36,class_i:45,class_index:[13,45],class_x:45,classif:[22,23,24,45,46],classifi:[25,36,37,42],classmethod:[8,22],clean:[8,14,17,21],cleaner:25,clear:[17,22,28,37],click:44,clip:[22,45,48],clip_valu:22,clock:48,clockwis:50,clone:[1,13,21,23,39,40,43,44],cloned_coeffici:44,close:[8,29,42],closest:[23,44],cloud:42,clp:45,clr:[37,44],cluster:[22,42],clutter:42,cmake:35,cmake_gener:35,cmake_include_path:35,cmdclass:7,cmyk:50,cnn:[22,25,49],coalesc:[8,39,43],coars:45,coco:[46,48],coco_instance_category_nam:48,coco_person_keypoint_nam:48,coco_util:48,cococapt:45,cocodetect:45,code:[1,2,5,7,13,14,15,17,22,26,29,30,31,32,33,35,36,37,38,39,41,43,44,48],codebas:5,codec:44,codomain:15,coeffici:[37,44],cohes:5,col2im:22,col:[44,45],colesburi:[5,6],collat:13,collate_wrapp:13,collect:[1,4,13,37,42,44,45],color:[22,42,45,50],colorjitt:50,colors_tensor:42,column:[1,22,23,24,43,44,49,50],com:[4,5,20,35,36],combin:[13,19,22,23,28,36,44],combinations_with_replac:44,come:[4,13,22,31,45],comm:8,comma:[14,44],command:[1,2,35],comment:[4,19,29,42],commit:[4,5,17,33],committ:5,common:[13,22,28,30,32,44,45,46,50],commonli:[14,15,37,41],commun:[4,5,18],compani:5,compar:[1,3,13,19,22,29,35,42,44],comparison:29,compat:[7,13,15,19,21,40,43,44,45],compil:[7,19,27,31,35],compilationunit:19,compiled_with_cxx11_abi:44,complementari:[44,50],complet:[4,8,14,21,25,33,44,50],complex:[4,22,32,44,50],complic:[2,26],compon:[4,14,15,22,31,44],compos:[15,19,22,23,36,42,44,50],composetransform:15,composit:[15,19],compris:3,comput:[3,4,8,13,14,15,19,22,23,25,27,28,29,30,33,36,37,39,43,46,48,49,50,51],compute_uv:[43,44],compute_z:27,concat:[22,36],concatdataset:13,concaten:[7,8,13,22,23,44],concentr:15,concentrarion:15,concentration0:15,concentration1:15,concept:[4,36,41],conceptu:[1,25],concern:[13,21],concret:[15,19,22,23,32],concurr:[27,28],cond:36,conda:[35,36,44],condit:[1,12,19,22,29,36,42,43,44],condition:1,conduct:[5,22],confer:5,confid:[4,42],config:35,config_dict:42,configur:[0,4,13,14,22,28,35,42,44,48],confirm:[4,19,36],conform:22,conjug:[37,44],conjunct:[13,23],connect:[14,21,22,25,48],connectionist:[22,23],conquer:44,consecut:[14,43,44],consensu:4,consid:[17,19,22,23,26,29,30,37,43,44,45],consider:[4,22],consist:[13,19,36,37,44,46],consol:42,constant:[13,22,23,29,36,37,44,50],constant_:24,constantpad2d:23,constantpadnd:36,constrain:[15,22],constraint:22,constraint_registri:15,constraintregistri:15,construct:[1,13,15,22,25,32,39,41,42,43,44,48],construct_transform:15,constructor:[7,13,22,28,39,43,48,52],consum:[13,16,21,32,36,42],consumpt:[1,42],contain:[1,3,7,8,13,14,15,19,23,25,29,30,31,36,37,40,41,42,43,44,45,47,48,49,50],content:[4,19,20,21,37,42,43,44],contenti:5,context:[1,8,21,22,28,29,31,32,36,38,44],contigu:[22,23,40,43,44],continu:[13,15,19,22,36,43,44],continuum:35,contract:44,contrail:45,contrain:44,contrari:[4,27],contrast:[15,37,48,50],contrast_factor:50,contribut:[1,5,18,22,23,44],contributor:[4,5],control:[13,19,22,25,27,28,32,36,44,50],conv1:[19,22,42],conv2:[19,22],conv2d:[19,36,42],conv4:22,conv5:22,conv:[19,22,24,36],conveni:[4,7,17,19,28,29,31,38],convent:[1,20,22,36,43,44],converg:37,convers:[4,25,36,43,46],convert:[1,13,19,22,29,36,42,43,44,50],convert_sync_batchnorm:22,convolut:[24,27],convolv:[22,23],convtranspos:22,convtranspose1d:23,convtranspose2d:23,convtranspose3d:23,coo:[39,41,43,44],cooldown:37,coordin:[4,15,39,42,43,44,49,50],cope:32,copi:[4,8,13,14,19,21,22,26,28,32,40,43,44],copy_:[1,19,22,28,40,43],core:[4,19,27,36],corner:[22,23,50],corpor:[4,5],correct:[2,4,14,15,19,22,40,43,44,50],correctli:[3,14,19,22,23,32,36],correl:[1,15,22],correspond:[1,4,8,13,15,17,19,22,23,29,31,36,37,40,42,43,44,48,49,50],corrupt:[22,32,45],cos:[22,36,37,42,43,44],cos_:43,cosh:[43,44],cosh_:43,cosin:[22,23,37,44],cosineannealinglr:37,cosineembeddingloss:23,cost:[1,2,11,22,23,44],couch:48,could:[2,4,8,13,15,21,35,44],couldn:[35,36],count:[1,8,15,44],count_include_pad:[22,23],counter:[1,8,21,22,25,50],counterpart:44,coupl:[31,33],cours:[2,17,37],courtesi:15,cov_diag:15,cov_factor:15,covari:[15,22,50],covariance_matrix:15,cover:[29,31,45],coverag:4,cow:48,cpp:[4,5,7,44],cpp_extens:[18,29],cpp_sourc:7,cppdoc:4,cppextens:7,cprofil:2,cpu:[1,2,8,14,18,19,21,22,23,28,32,33,35,36,38,40,41,43,44],cpu_model:19,cpu_tim:1,cpu_time_tot:1,cpuhrsch:6,crack:4,crash:[21,42],crcv:45,creat:[1,3,4,7,8,13,14,15,21,22,25,28,32,36,40,42,43,44,45,52],create_extens:35,create_graph:[1,43],creation:[1,8,13,19,21,22,28,43,45],creator:25,crelu:22,criterion:[22,23,30],critic:22,crop:[48,49,50],cross:[4,15,22,23,28,35,43,44],crossentropyloss:23,crossmaplrn2d:19,csrc:[35,36],ctc_loss:[22,33],ctcloss:23,ctx:[1,29],cube:[22,44],cubla:8,cublashandle_t:8,cuda0:[28,43],cuda100:35,cuda101:35,cuda1:41,cuda2:28,cuda80:35,cuda90:35,cuda92:35,cuda:[1,2,3,7,9,10,11,13,14,18,19,22,23,29,33,36,37,38,40,41,43,44,48],cuda_extens:7,cuda_hom:7,cuda_launch_block:28,cuda_prefix:35,cuda_runtim:7,cuda_sourc:7,cuda_tim:1,cuda_time_tot:1,cuda_visible_devic:[8,28],cudaev:1,cudaeventsynchron:8,cudaextens:7,cudart:[7,35],cudastreamsynchron:8,cudastreamwaitev:8,cudnn:[11,12,22,23,48],cufft:44,cufft_plan_cach:28,cuh:7,cultur:5,cumprod:[43,44],cumsum:[43,44],cumul:[15,22,23,44],cup:48,cur:37,curl:35,current:[1,3,5,7,8,13,14,19,21,22,23,28,31,33,35,36,37,39,40,41,42,43,44,45,47,49],current_blas_handl:8,current_datetime_hostnam:42,current_devic:[8,41],current_stream:8,curv:42,custom:[7,13,14,21,22,31,35,37,43],custom_decod:22,custom_encod:22,custom_loop:36,custom_op:36,cut:4,cutoff:[22,44],cxx:7,cycl:37,cycle_momentum:37,cyclic:[37,50],cycliclr:37,d_1:[22,23],d_2:[22,23],d_k:[22,23],d_model:22,daemon:21,dag:1,dai:4,dampen:37,dark:50,darker:50,dart:45,data1:45,data2:45,data:[1,4,12,14,15,18,19,21,22,23,25,26,28,29,31,32,34,35,36,37,39,40,41,42,43,44,45,50],data_load:[32,37,45],data_parallel:30,data_ptr:[40,43],data_sourc:13,databas:[13,45],dataformat:42,dataload:[13,22,28,30,35,37,42,43,45],dataparallel:[14,30,32],dataset:[18,22,30,31,35,37,42,46,48,50],dataset_it:13,datasetfold:46,datatyp:[22,36,44],datetim:14,datset:45,david:[6,22],dcgan:36,ddp:22,ddp_sync_bn_network:22,deactiv:50,deadlock:[14,22],deal:[4,21,30,44,50],dealloc:[21,28,30],debug:[1,2,13,14,25,27,35,36],decai:[22,37],decemb:44,decent:13,decid:[2,4,45],decis:19,declar:[1,7,13,19,36],decod:[16,22,44,47],decoder_lay:22,decomposit:[15,44],deconvolut:[22,23],decor:[1,15,19],decoupl:[22,37],decreas:[15,22,37,44,49],decreasingli:22,deep:[4,5,18,22,24,37,48],deeper:48,deeplabv3_resnet101:48,deeplabv3_resnet50:48,def:[1,13,15,17,19,22,27,29,30,32,35,36,37,43,50],default_gener:44,default_load:45,default_stream:8,defin:[1,7,13,15,17,21,22,23,35,36,37,39,43,44,45,50],define_macro:35,definit:[4,13,15,17,19,22,23,36,42,44,48],degre:[15,22,44,50],del:[21,30],delet:[14,17,21,38],deliv:5,delta:[15,22,24,37],delv:24,demand:[8,31],demonstr:22,denomin:[22,37,44],denorm:44,denot:[1,15,19,22,37],dens:[22,39,41,43,44,48],dense_dim:[39,43,44],densenet121:48,densenet161:48,densenet169:48,densenet201:48,densenet:36,densiti:15,depend:[1,2,3,13,14,15,17,19,21,22,23,27,28,33,36,37,39,42,43,44,48],dependent_properti:15,deploi:[4,31],deploy:18,deprec:[14,22,23,26,36,40,43,44,50],depth:[8,22,23,48,50],depthwis:22,dequant:43,deriv:[1,5,19,22,29,43,44],derivedp:15,derivedq:15,descend:[22,43,44],descent:[15,37],describ:[3,4,8,13,19,22,23,24,30,31,36,43,44,48,49],descript:[0,4,7,19,28,29,31,36,52],descriptor:[13,22,36,45],deseri:[20,44],design:[1,4,5,13,15,17,20,48],desir:[8,13,14,15,22,23,28,36,38,39,40,43,44,50],desmaison:6,despit:19,destin:[8,14,22,40,43,44],destroi:22,destructor:21,det:[15,43,44],detach:[1,19,22,23,30,43,44],detach_:[1,43],detail:[0,1,4,8,13,15,19,22,23,29,30,31,37,39,42,43,44,48,50],detect:[3,7,14,21,36,46],detect_anomali:1,detector:22,determin:[1,5,7,8,13,15,22,23,28,33,42,44,50],determinist:[3,11,15,19,22,23,33,37,38,44],dev:5,dev_idx:14,develop:[28,31,36],deviat:[15,22,24,43,44,50],devic:[1,3,8,14,19,22,23,30,33,36,37,38,40,43,44],device_count:[8,14],device_id:[22,23,44],device_of:8,devito:6,df1:15,df2:15,dfrac:[22,23,43],diag:[15,43,44],diag_emb:[43,44],diagflat:[43,44],diagn:15,diagnost:19,diagon:[15,23,43,44],dict:[15,20,22,29,36,37,42,44,47,48],dictat:22,dictionari:[7,13,15,22,23,36,37,42,45,48],did:[1,4,19],didn:[25,29,35,37],dies:21,dieterich:6,diff:[4,19],differ:[1,3,7,8,13,14,15,17,19,21,22,23,26,27,28,29,32,33,35,36,37,39,41,42,43,44,45,48],differenti:[15,22,23,25,29,30,43],difficult:[1,4],difficulti:[4,24],digamma:[43,44],digamma_:43,digit:[20,31,44,45],dilat:[22,23,36],dim0:[43,44],dim1:[43,44],dim2:[43,44],dim:[8,15,19,22,23,30,36,39,43,44],dim_arang:36,dim_feedforward:22,dimems:43,dimens:[1,8,13,15,19,22,23,24,26,30,39,41,42,43,44],dimension:[1,15,22,23,24,26,40,41,43,44],dims_i:36,dine:48,diningt:48,dir:[17,36,45],dirac:24,dirac_:24,direct:[4,5,22,25,29,44,50],directli:[4,5,7,13,14,15,19,22,23,28,31,32,36,39,42,44],directori:[7,14,20,31,34,42,45,48],dirnam:17,dirti:25,disabl:[22,28,38],disable_cuda:28,disable_jit_exampl:19,disadvantag:19,discard:[17,19,49],discourag:[1,8,25],discov:14,discrep:44,discret:[15,22,23,43,44],discrimin:22,discuss:[5,15],disjoint:19,disk:[1,13,42,44],dispatch:[14,36],displai:[20,23,36,48,51],displaystyl:44,dissimilar:22,dist:[14,15,43,44],distanc:[37,44,45],distinct:44,distort:50,distortion_scal:50,distribut:[13,18,24,39,42,43,44],distributed_test:14,distributeddataparallel:[13,14],distributedsampl:13,div:[22,36,39,43,44],div_:[39,43],div_valu:22,diverg:[19,22,23,36],divid:[3,8,22,23,44],dividend:44,divis:[13,22,23,44],divisor:[22,23,43,44],divisor_overrid:[22,23],dlibenzi:6,dll:35,dlpack:18,dltensor:16,dmytro:[5,6],dnn:27,do_constant_fold:36,doc:[2,21,22,29,36,42],doc_str:36,docstr:[7,17],document:[8,13,14,17,19,21,22,29,30,36,44,51],doe:[1,2,3,4,5,8,14,15,17,19,21,22,23,26,27,28,32,36,39,43,44,46,50],doesn:[1,3,4,8,13,14,19,22,23,26,29,31,32,35,36,37,44],dog:[45,48],doing:[4,13,19,23,35],domain:[5,15],don:[1,2,4,14,17,21,22,23,25,29,30,32,35,36,37,43,44,50],done:[13,15,19,21,22,30,33,36,43,44,50],donut:48,dot:[22,43,44,50],doubl:[1,22,23,29,40,41,43,44],doubler:1,doubletensor:[41,43,44],dow:42,down:[1,4,13,15,23,32,42],download:[20,35,42,45,48],downsampl:22,doxygen:4,dp_m:30,dpotri:44,draw:[13,42,43,44],drawn:[13,24,43,44],drier:48,drive:[5,14],driven:5,drop:[13,22,32,45],drop_last:13,dropout:[3,36],dset:45,dst1:8,dst2:8,dst:14,dst_tensor:14,dst_type:22,dtype:[12,13,19,22,23,28,29,36,39,40,42,43,44,50,52],due:[2,3,4,15,19,22,28,33,44],dummi:13,dummy_input1:36,dummy_input2:36,dummy_input:36,dump:35,dump_patch:22,duplic:[13,22,30,39,43,44],dure:[1,3,7,14,19,22,23,27,28,31,36,39,43,44,48],dynam:[7,13,19,24,36,37,44],dynamic_ax:36,dynamic_threshold:37,dzhulgakov:[5,6],each:[1,3,7,8,13,14,15,16,17,19,22,23,24,25,26,27,28,29,30,31,32,36,37,39,41,42,43,44,45,47,48,49,50,51],eager:44,eagerli:8,earli:[19,22],earlier:[1,30,36],eas:[1,27],easi:[13,19,30,31,32,36],easier:[4,13,19,22,26,29,44],easili:[4,9,10,14,22,23,37,38,42,43,44],ecosystem:31,edg:[1,23,50],edgeitem:44,edouard:22,edu:[22,45],edward:[5,6],effect:[1,4,7,13,17,19,22,23,28,37,40,42,43],effici:[1,13,15,22,25,29,39,41,43,44,48],eig:[43,44],eigenvalu:44,eigenvector:[43,44],eight:20,einstein:44,einsum:44,either:[1,7,13,14,15,17,19,22,23,24,26,28,29,31,32,36,37,43,44,52],elaps:8,elapsed_tim:8,eleg:32,element:[1,8,13,14,15,22,23,24,26,27,39,40,41,42,43,44,45,49,50],element_s:[40,43],elementari:44,elementwis:[8,22,23,44],elementwise_affin:22,eleph:48,elf:30,elif:19,elimin:[14,43,44],ell:22,ell_c:22,ellips:44,ellipsi:44,elman:22,els:[4,7,13,15,19,21,22,23,28,29,40,43,44,45,50],elsewher:[17,44],elu:36,elu_:23,embed:[27,36,42],embed_dim:22,embedding_bag:33,embedding_dim:[22,23],embedding_matrix:23,embedding_sum:22,embeddingbag:23,emerg:35,emit:[1,7,19,38,45],emit_nvtx:[1,2],emnist:46,empir:22,emploi:37,employe:5,empti:[14,19,22,23,24,26,28,36,39,43,44],empty_cach:[8,28],empty_lik:44,empty_strid:44,emptydatastructur:19,enabl:[1,12,13,14,22,26,27,28,31,35,37,38,43,44],enable_grad:[1,44],enable_tim:8,encod:[14,19,22,29,44],encoder_lay:22,encount:[14,22,23,44],encourag:5,end:[4,5,8,13,14,19,21,22,30,35,43,44,47],end_dim:[43,44],end_ev:8,end_pt:47,endl:31,endocd:22,endpoint:50,enforc:22,enforce_sort:22,engin:[1,43,44],enhanc:50,enough:[19,21,25,29,35,37,44,50],enqueu:[8,28],ensur:[1,2,4,5,13,14,19,20,21,22,25,28,32,33,43,44,48],enter:14,entir:[3,4,7,13,19,22,23,30,31,34,50],entiti:45,entranc:4,entri:[1,14,15,25,37,39,42,44,45],entropi:[15,22,23],entrypoint:21,entrypoint_nam:17,enumer:[13,15,22,28,35,42],enumerate_support:15,env:[14,15],enviro:4,environ:[4,7,15,17,19,20,27,28,35,48],environment:8,epoch:[13,37,42],eps:[1,22,23,29,37,44,52],epsilon:[22,23,44],eq_:43,equal:[8,14,15,22,23,26,42,43,44],equal_nan:[43,44],equat:[44,50],equival:[3,13,15,19,22,23,36,41,43,44],eras:50,erf:[36,43,44],erf_:43,erfc:[43,44],erfc_:43,erfinv:[43,44],erfinv_:43,errno:35,error:[1,4,13,15,19,21,22,23,25,28,29,36,43,44,48],especi:[5,13,14,23,25,36,43,44],essenti:[13,35],estim:[15,22,37,44],eta:37,eta_:37,eta_min:37,eta_t:37,etaminu:37,etapli:37,etc:[13,14,15,19,22,29,30,32,36,37,42,44],eth0:14,eth1:14,eth2:14,eth3:14,ethernet:14,euclidean:23,eval:[19,22,36,48],evalu:[2,15,22,23,25,29,37,44,45,48],even:[1,13,14,19,23,28,29,30,32,33,41,43,44],event:[1,15,19,21,42],event_dim:15,event_file_writ:42,event_nam:31,event_shap:15,eventfilewrit:42,eventlist:1,eventu:[1,4],everi:[1,13,14,15,22,23,25,28,29,36,37,40,42,43,44,45,48],everyon:4,everyth:[4,13,15,19,21],everytim:14,everywher:[23,44],evid:4,evolv:36,exact:[1,22,24,32,34,39,44],exactli:[1,7,14,15,22,23,25,28,36,44,45,49],examin:36,exampl:[1,3,4,7,8,13,14,15,17,19,20,22,23,24,25,26,27,28,29,30,31,32,35,37,39,41,42,43,44,45,48,50,51],example_forward_input:19,example_input:19,example_output:36,example_weight:19,except:[1,3,4,14,15,19,21,22,23,28,35,36,39,43,44,48,50],exchang:[14,44],exclud:[19,22,23,44,45],exclus:[13,14,15,25,44],execut:[1,2,3,4,7,8,13,14,19,22,25,26,27,30,32,33,35,36,44],exist:[1,4,5,7,13,14,15,17,19,21,22,26,31,36,43,44,45],exit:[1,2,21,22,32,36],exp1:42,exp2:42,exp:[1,15,22,23,36,43,44],exp_:43,exp_famili:15,exp_rang:37,expand:[1,5,15,17,26,36,43,44,50],expand_a:[29,36,43,44],expans:50,expect:[1,4,13,14,19,22,23,30,37,42,44,45,48,49,50],expens:[2,13,15,31],experi:[22,42],experiment:[35,36,39,41],expert:5,explain:[17,28],explan:29,explicit:[28,36,44],explicitli:[8,14,19,23,28,31,36,38,39,44],explod:[22,42],explor:17,expm1:[43,44],expm1_:43,expon:[15,22,23,43,44],exponenti:[22,43,44],exponential_:[43,44],exponentiallr:37,export_chrome_trac:1,export_param:36,export_raw_ir:36,expos:[1,28,31],express:[25,43,44],exptransform:15,ext:[20,45],ext_modul:7,extend:[1,15,18,22,31,32,36],extens:[7,15,44,45],extension_kernel:7,extent:27,extern:[19,27,35],extra:[4,14,19,22,29,30,31,44,45],extra_cflag:7,extra_compile_arg:[7,35],extra_cuda_cflag:7,extra_fil:19,extra_include_path:7,extra_ldflag:7,extra_repr:[22,29],extract:[22,23],extrafilesmap:[19,31],extrem:[2,42],extrud:30,eye:[15,44],eye_:24,eyes:4,ezyang:[5,6],f_t:22,face:42,facebook:5,faces_tensor:42,facil:[21,44],facilit:[17,19,44],fact:[1,19,29,43,44],factor:[15,22,23,24,37,44,49,50],factori:[1,15,28],fail:[1,4,15,19,21,32,35,36,44],failur:[1,5,14,15,19,21,22],fake:45,fakedata:46,fall:[4,22,23,36,44],fallback:14,fals:[1,3,7,8,13,14,15,17,19,21,22,23,25,30,33,36,37,38,40,42,43,44,45,48,50,51],famili:15,familiar:[19,25],fan:24,fan_in:24,fan_out:24,fang:6,faq:[13,18,22],far:23,fashion:[4,14,19,43,46],fashionmnist:45,fast:[4,13,22,28,41,44,49],fast_forward:44,faster:[13,22,23,28,46],fasterrcnn_resnet50_fpn:48,fastest:[22,44],fatal:[21,32],favor:[8,22,23,44,50],favour:44,fcn:48,fcn_resnet101:48,fcn_resnet50:48,fcntl:14,featur:[5,18,19,22,23,29,30,36,42],featuredropout:36,fed:42,fedyunin:6,feed:[19,30],feedback:4,feedforward:[22,24],feel:4,feng:6,fetch:13,few:[1,4,17,25,30,35,43,44],fewer:[15,26,39,43,44,48],ffi:35,fft:[28,43,44],field:[4,14,22,23,32,36,47,48],figur:[4,27,36,42],file:[1,4,5,7,8,13,17,19,20,31,35,36,40,42,44,45,47,51],filenam:[7,19,20,40,42,47,51],filename_suffix:42,fileno:36,filesytem:[17,20],fill:[14,22,23,24,28,43,44,50],fill_:[22,40,43,44],fill_diagonal_:43,fill_row_zero:19,fill_valu:[22,28,43,44],fillcolor:50,filter:[22,23,43,44,50],financi:5,find:[1,4,5,7,14,17,21,22,27,28,29,30,32,36,41,42,44],find_unused_paramet:22,fine:[7,14,17,19,21,22,25,37,45,50],finetun:[22,25],finish:[14,28,35],finit:[1,2,15,29,44],fire:[31,48],first:[1,2,3,4,5,7,8,13,14,15,19,20,21,22,23,28,30,32,34,35,36,37,39,42,43,44,45,49,50],fisher:15,fit:[1,37,43,44],five_crop:50,fivecrop:50,fix:[19,22,23,30,32,33,35,36,44,45],flag:[1,7,19,21,22,25,28,42,43,44,50],flat:[36,44],flatten:[24,36,43,44,50],flickr30k:45,flickr8k:45,flickr:46,flip:[43,44,50],float16:[12,22,41,43,44,52],float32:[13,22,23,36,41,43,44,52],float64:[22,41,43,44,52],floatstorag:40,floattensor:[1,14,22,39,41,43,44,48,50],floor:[22,23,36,43,44],floor_:43,flow:[19,23,25,36],flush:[1,19,42,44],flush_sec:42,fly:[13,45],fmod:[43,44],fmod_:43,focu:37,focus:19,fold:[19,36,45],folder:[4,7,13,17,42,45],folk:4,follow:[1,4,5,12,13,14,15,17,19,20,21,22,23,24,26,27,28,30,32,35,36,39,41,43,44,45,48,50,52],foo:[17,19,36],foo_forward:36,foomodel:36,foomodul:36,foral:43,forc:[1,7,8,17,19,28,42],force_reload:17,forev:21,forg:[35,36],forget:[19,22],forgotten:35,fork:[13,21,22,27,30,32,35,38,48],fork_rng:38,forkingpickl:35,forkserv:[21,22,32],form:[1,4,5,13,15,19,22,23,29,33,36,37,42,44],format:[1,12,17,19,22,29,36,39,41,42,43,44,45,47,48,49],former:22,formul:[22,23],formula:[1,15,22,23,29,37,44],forth:[17,37],fortun:30,forum:[4,5,30,32],forward:[1,3,7,19,21,22,23,24,25,27,28,29,30,33,36,39,44],found:[19,22,32,42,44,48],four:50,fourier:44,fp16:22,fp32:22,fpn:48,fps:[42,47],frac:[15,22,23,24,37,43,44,50],frac_:43,fraction:[13,22,24,44,50],frame:[42,44,45,47,48],frames_per_clip:45,framework:[4,5,15,36,37],frank:15,free:[1,4,14,15,24,25,30,32,35],freed:[1,21,28,43],freedom:15,freez:[22,25],freeze_support:35,frequenc:[22,23,37,44],frequent:[4,18,22],fresh:17,frisbe:48,fritz:6,fritzo:6,fro:[43,44],frobeniu:44,from:[1,4,5,7,8,13,14,15,19,21,22,23,24,28,29,30,31,32,37,39,41,42,43,44,45,47,48,49,50,51],from_buff:40,from_dlpack:16,from_fil:40,from_ipc_handl:8,from_numpi:[43,44],from_pretrain:22,front:[22,43],frozen:[25,35,37],full:[13,14,15,17,19,22,23,36,44,45],full_lik:[36,44],fulli:[13,14,22,25,28,29],func:[1,19,43],functioneventavg:1,functor:15,fundament:4,further:[1,5,7,14,22,32,33,42,44],furthermor:[7,22,33],fuse:50,fusion:19,fut:27,futur:[1,8,19,23,27,36,37,39,43],g_cpu:44,g_cpu_oth:44,g_cuda:44,g_t:22,gain:[5,24,50],galleri:4,gamma:[22,37,44,50],gamma_:44,gan:22,gap:44,garbag:13,gate:[22,23],gather:[8,14,30,31,36,43,44],gather_list:14,gaussian:[15,23],gchanan:[5,6],ge_:43,gel:[43,44],gemm:[27,36],gen_non_contig_grad_output:1,gener:[1,4,7,13,14,15,19,22,23,24,28,30,31,33,35,36,37,39,41,42,43,45,46,48],generate_square_subsequent_mask:22,geometr:[23,43,44],geometri:[28,44],geometric_:[43,44],geq:[22,23,24,44],geqrf:[43,44],ger:[43,44],gesdd:44,gesvd:44,get:[1,7,8,13,14,19,21,22,25,29,30,31,36,37,42,43,44,45,46],get_all_sharing_strategi:21,get_backend:14,get_context:32,get_default_dtyp:[44,52],get_devic:[39,41,43],get_device_cap:8,get_device_nam:8,get_image_backend:46,get_info:[43,44],get_input:36,get_lr:37,get_num_interop_thread:[27,44],get_num_thread:[27,44],get_rank:14,get_rng_stat:[8,38,44],get_rng_state_al:8,get_sharing_strategi:21,get_stat:44,get_worker_info:13,get_world_s:14,getenv:31,getsourc:31,gil:[13,14,28],gimelshein:6,giraff:48,girshick:22,github:[4,5,17,29,36,37],give:[1,2,4,13,17,19,21,22,25,28,29,36,37,44,45,50],given:[1,4,5,7,8,13,14,15,19,20,21,22,23,24,27,29,31,36,37,39,42,43,44,45,50,51],glass:48,global:[3,13,14,15,19,31,32,42,44,48],global_step:42,globalcontext:35,gloo:[14,22],gloo_socket_ifnam:14,glorot:24,glove:48,glu:36,goe:[22,30],going:[4,14,21,25,27,31,35,42],goldsborough:6,gomez:22,good:[4,7,17,21,22,27,31,44],govern:[4,18],gpu1:22,gpu:[1,2,4,8,12,13,18,19,28,33,35,37,40,43,44,48],gpu_model:19,gpudirect:14,grad:[1,3,15,19,22,32,39,43],grad_bia:29,grad_fn:[1,25,39,43],grad_input:[22,29,35],grad_output:[1,22,29,35],grad_tensor:1,grad_vari:1,grad_weight:29,gradcheck:[1,29,44],gradgradcheck:1,gradient:[3,13,14,15,19,22,23,25,29,30,37,39,43],graham:22,grai:50,grain:[14,25,50],grangier:22,grant:5,graph:[1,3,15,22,25,29,36,42,43,44],graphic:35,graphroot:1,grave:22,grayscal:[42,50],great:4,greater:[2,22,23,25,36,44,49],greater_than:15,greater_than_eq:15,greaterthan:15,greaterthaneq:15,greg:[5,6],gregori:6,grep:30,grid:[23,42,44,49,51],grid_i:44,grid_x:44,gross:[5,6],ground:[4,42,48],group:[1,5,17,20,21,22,23,36,37,42,43,44],group_by_input_shap:1,group_nam:14,grow:[4,39],gru:19,gt_:43,gtcoars:45,gtfine:45,guarante:[3,13,14,15,22,27,33],guard:32,guid:[1,13,18],guidanc:4,guidelin:[5,48],gumbel:23,h_0:22,h_1:22,h_i:22,h_n:22,h_t:22,hack:4,had:[4,19],hadamard:22,hair:48,half:[15,22,23,37,40,41,43,44],half_cauchi:15,half_norm:15,half_open_interv:15,halfopeninterv:15,halftensor:[41,43],ham:[23,44],hamiltonian:15,hamming_window:44,hand:[1,2,19,22,36,44],handbag:48,handi:28,handl:[1,3,8,13,14,17,21,22,23,28,30,32,36,43,44,45],handler:31,hang:22,hann:44,hann_window:44,happen:[1,4,5,14,15,21,22,29,30,32,35,43],happi:4,hard:[1,4,19,22,23,25,36],harder:22,hardshrink:43,hardtanh:36,hardtanh_:23,has:[1,3,4,5,8,12,13,14,15,19,21,22,23,25,26,27,29,31,32,35,36,37,38,39,40,41,42,43,44,45,48,50],has_enumerate_support:15,has_rsampl:15,hash:20,hasn:37,hat:22,have:[1,3,4,5,8,13,14,15,17,19,21,22,23,24,25,26,28,29,30,31,32,33,35,36,37,38,39,41,42,43,44,45,48,49,50],head:22,head_1:22,head_bia:22,head_h:22,head_i:22,header:[1,7,35],health:5,healthi:[4,5],heart:13,heavi:[1,14,25,35],heavili:[2,29,37],hei:4,height:[22,23,36,49,50],held:8,hellemn:6,hello:19,help:[1,2,4,5,13,17,22,25,26,28,36,44],helper:[3,14,17,19,22,28,36],henc:[22,28,44,45],here:[1,4,5,13,14,15,17,19,22,23,29,30,31,35,36,43,44,45,48,51],hessian:24,heurist:[7,13],hflip:50,hidden:[3,22,28,42],hidden_s:22,hierarch:42,high:[2,4,15,21,22,42,43,44],higher:[1,4,8,14,22,29,43,49],highest:[22,44],highli:[5,17,22,36],hing:22,hingeembeddingloss:23,hinton:37,his:37,histc:[43,44],histogram:[42,44],histor:27,histori:[1,29,30,37],history_s:37,hit:3,hmc:15,hmdb51:46,hmdb:45,hold:[1,22,26,29,30,32,37,41,43,44,50],holist:4,hood:[1,21,32],hook:[1,22,31,43],hop:44,hop_length:[43,44],horizont:50,horizontal:50,hors:48,host:[13,14,22,28,40,43],hot:[15,23,48],houseroad:6,how:[3,4,5,13,14,19,20,21,22,23,29,30,32,36,42,44,48,50],howev:[2,3,4,5,8,13,14,15,19,22,28,32,33,34,35,36,39,43,44,45],hspmm:39,hsv:50,html:[2,4,37,42],http:[2,4,17,20,22,35,36,37,42,45,48,50],hub:[18,20],hub_dir:17,hubconf:17,huber:22,hue:50,hue_factor:50,human:[0,24,36,45],hundr:31,hwc:42,hybrid:39,hydrant:48,hyper:19,hyperbol:44,i_0:44,i_d:44,i_n:44,i_t:22,icdf:15,icml_2006:22,idea:[4,22,31],ident:[1,7,13,14,15,23,24,33,39,43,44],identifi:[4,14,21,22,26,31,42,44],identity_transform:15,idiom:35,ids:[22,23],idx:[13,22],iff:15,ifft:[43,44],ignor:[4,8,19,22,23,29,37,43,44],ignore_index:[22,23],ignored_cod:19,illia:22,im2col:22,imag:[13,22,23,42,45,46,48,51],image_s:45,image_set:45,imagefold:46,imagenet:[14,24,46,48],imagenet_data:45,imagenet_root:45,imaginari:44,imbalanc:22,img:[42,45,50],img_batch:42,img_height:50,img_hwc:42,img_tensor:42,img_width:50,immedi:[4,5,22],impact:33,imper:14,implement:[1,3,8,13,14,15,19,21,22,23,25,27,29,30,31,32,35,36,37,39,43,44,45,48,49],impli:21,implic:19,implicit:[22,23,36],implicit_cast:36,implicitcasttyp:36,implicitli:[4,19,22,44],importerror:35,impos:21,improb:13,improv:[5,12,14,22,29,36,37,48],in1:22,in1_featur:22,in2:22,in2_featur:22,in_channel:22,in_featur:[22,29],inaccuraci:1,inc:19,incas:43,incept:[36,50],inception_v3:48,includ:[1,2,3,4,5,7,13,14,17,19,21,22,23,27,28,30,31,33,43,44,45,48],include_path:7,inclus:[15,43,44],incom:[21,22,23],incompat:[5,7,26],incomplet:13,inconsist:44,incorrect:[2,19,23,28,43,44],increas:[1,4,8,11,15,22,23,25,28,37,50],increment:[1,19,22,25],incur:[3,32,44],inde:19,independ:[5,8,13,14,19,22,23,43],index:[8,13,15,18,19,21,22,23,25,28,36,37,39,41,42,43,45,49],index_add:43,index_add_:[33,43],index_copi:[36,43],index_copy_:43,index_fil:[36,43],index_fill_:43,index_put:43,index_put_:43,index_select:[36,43,44],indic:[1,8,13,14,15,19,22,23,29,36,37,39,42,43,44,49],individu:[4,5,13,19,22,31,33,43,44,47],induc:[9,10,23,43,44],inf:[15,22,44],infer:[1,18,19,23,36,39,43,44,48],inferencesess:36,infin:[22,37],infiniband:[14,22],infinit:[13,22,23],influenc:5,info:[8,14,18,44,47],inform:[1,2,4,13,14,19,22,27,29,31,36,41,42,43,44,45,48,50],infrastructur:5,infti:[22,23],ingredi:22,inherit:[19,29,32],init:[8,14,18,22,31],init_method:[14,22],init_process_group:[14,22],init_weight:22,initi:[2,8,13,19,22,23,24,28,29,31,35,37,38,43,44,48],initial_accumulator_valu:37,initial_se:[8,13,38,44],inject:31,inlin:[7,19,27],inline_extens:7,inner:[36,44],innermost:15,inp:[1,13,22],inp_unf:22,inplac:[22,23,36,50],inplace_assign:36,inplaceindexedassign:36,inplaceindexedassignmentonnx:36,input1:[22,23,35,36],input2:[22,23,35,36,43,44],input3:[43,44],input:[1,3,5,8,12,13,14,15,19,22,23,24,25,27,28,29,30,31,36,37,39,43,44,45,48,49,50],input_1:36,input_2:36,input_3x3:22,input_data:36,input_featur:29,input_length:[22,23,30],input_nam:36,input_s:22,input_tensor_list:14,input_to_model:42,input_tupl:19,input_var:[3,22],insensit:22,insert:[15,19,22,44],insid:[1,4,19,28,31],insight:4,inspect:[1,31],inspir:37,inst:45,instal:[1,7,14,17,36,42,44,45],instanc:[13,15,19,21,22,23,30,44,45,46],instance_norm:36,instancenorm1d:23,instancenorm2d:23,instancenorm3d:23,instancenorm:22,instantan:8,instanti:[17,19,22,23,29],instead:[1,3,5,13,14,15,19,22,23,30,32,35,36,37,39,43,44,50],instruct:[2,19,36],instrument:31,insuffici:8,int16:[41,43,44,52],int32:[22,41,43,44,52],int64:[22,23,28,36,41,43,44,49,52],int64tensor:48,int8:[41,43,44,52],int_:44,int_a:44,int_b:44,int_repr:43,integ:[8,13,14,15,19,22,23,36,37,41,43,44,50,52],integer_interv:15,integergreaterthan:15,integr:[13,22,31,36,37,43,44],intel:[35,46],intel_openmp:35,intens:[37,50],intent:[4,19],inter:[27,44],interact:[1,5,8,36,42],interchang:[15,19],interconnect:14,interest:[4,5,18,45,49],interfac:[29,36,37,42],intermedi:[3,19,22,23,25,30,36],intermediari:15,intern:[5,13,15,22,25,27,28,36,39,44,45,48],internet:[4,45],interop:44,interoper:27,interpol:[22,44,49,50],interpret:[13,14,21,23,27,39,44],interprocess:8,interrupt:21,intersect:49,interv:[15,44,50],intra:27,introduc:[15,17,22,26,43,44],introduct:[26,42],inttensor:[41,43,44],intuit:36,inv:[15,44],invalid:44,invari:[15,22,39,44,50],invers:[15,22,23,37,43,44],inverse_indic:44,invert:[15,22,44,48],invest:5,investig:4,invis:28,invoc:[3,19,29,31,36],invok:[19,22,27,31,37],involv:[4,5,13,28,30,33],iotamudelta:6,iou:[48,49],iou_threshold:49,ipc:8,ipc_collect:8,ipc_handl:8,ipp:46,irecv:14,irfft:[43,44],irrelev:1,irrespect:[28,44],is_avail:[8,28,44],is_coalesc:39,is_complet:14,is_contigu:43,is_cuda:[40,43],is_floating_point:[41,43,44],is_in_onnx_export:36,is_initi:14,is_leaf:[1,43],is_mpi_avail:14,is_nccl_avail:14,is_pin:[13,40,43],is_python_modul:7,is_set_to:43,is_shar:[40,43],is_sign:43,is_spars:[40,43],is_storag:44,is_tensor:44,is_train:[1,44],is_valid_fil:45,isend:14,isfinit:44,isinf:44,isinst:15,isn:[4,13,28],isnan:[36,44],isol:21,issu:[5,21,22,28,32,35,36],itch:4,item:[13,17,22,33,42,43,44,45],iter:[4,8,14,15,21,22,25,26,37,38,42,49],iter_end:13,iter_start:13,iterabledataset:[13,31],itertool:[15,44],its:[1,2,4,5,7,8,13,14,15,19,21,22,25,26,28,29,30,35,36,37,39,41,43,44,45,48,50],itself:[3,4,19,21,22,23],ivalu:31,jacobian:[1,15,44],jakob:22,jang:15,jess:6,jit:[7,18,27,31,36,44],jitter:50,job:[14,22,31,37,42],johann:6,johnson:6,join:[4,14,17,21,32],jointli:[15,22],jone:22,joulin:22,journal:44,jpeg:31,json:[19,31,45],juggl:3,jump:[41,43],junji:6,just:[1,4,7,15,17,19,21,22,28,31,36,43,44],k_0:44,kaiming_normal_:24,kaiming_uniform_:24,kaiser:22,kdim:22,keep:[1,4,13,17,19,21,22,23,25,28,30,32,37,42,44,49,50],keep_var:22,keepdim:[22,23,43,44],kei:[1,13,14,19,22,31,36,37,40,42,43,44],kept:[21,22,23,49],kernel:[2,7,8,22,23,29,36],kernel_s:[22,23,42],kernel_shap:36,key_averag:1,key_padding_mask:22,keyboard:48,keypoint:46,keypointrcnn_resnet50_fpn:48,keyword:[1,17,19,22,36,37,42,44],kill:[21,30],kind:[14,22,29,32,36],kinet:[46,48],kinetics400:45,kite:48,kl_diverg:15,kl_normal_norm:15,kl_version1:15,kl_version2:15,kldivloss:23,kmnist:46,knife:48,know:[3,4,19,25,36],known:[4,14,19,21,22,24,27,28,36,44,50],knuth:4,kth:44,kthvalu:[43,44],kullback:[15,22,23],kuzushiji:45,kw_i:22,kwarg:[1,3,7,17,22,23,34,36,40,43,44,45,48,50,51],kwlist:42,l1loss:23,l_1:22,l_c:22,l_n:22,l_p:23,lab:45,label:[4,13,22,23,32,42,45,48],label_img:42,lambd:[22,23,37,43,50],lambda1:37,lambda2:37,lambda:[1,13,15,22,23,37,43,44,50],lambdalr:37,langl:15,languag:[7,22,23,30,36],laptop:48,larg:[4,13,18,21,22,23,27,28,30,39,43,44,45,48,50],larger:[1,5,22,23,30,31,42,43,44,48,50],largest:[19,23,43,44,52],last:[1,3,13,19,22,23,25,37,44,48,50],last_epoch:37,later:[1,4,19,22,27,28,34,44],latest:[4,14,15,17,36],latin1:44,latter:[22,32],launch:[2,13,22,25,27,28],law:[22,50],layer:[14,23,24,25,29,30,37,48],layer_count:36,layer_norm:36,layernorm:23,layout:[17,19,20,39,42,43,44],lazi:37,lazili:8,lbfg:37,lbrace:44,lceil:44,ldot:[15,22,44],le_:43,lead:[1,4,35,43,44],leadership:5,leaf:[1,43,44],leak:21,leaki:[22,23,24],leaky_relu:[24,36],leaky_relu_:23,leakyrelu:23,learn:[4,15,18,22,24,45,48],learnabl:[22,23],learned_0:36,learned_14:36,learned_15:36,learned_1:36,learned_2:36,learned_3:36,learned_:36,least:[15,17,22,24,26,30,40,43,44,48],leav:[1,19,25,43,44,45],left:[19,22,23,36,43,44,50],left_ankl:48,left_ear:48,left_elbow:48,left_ey:48,left_hip:48,left_kne:48,left_should:48,left_wrist:48,leftimg8bit:45,legaci:[23,41],legitim:22,leibler:[15,22,23],lemma:15,len:[13,14,19,22,23,39,42,44,45],length:[1,8,13,14,15,19,22,23,26,30,36,43,44,48,50],leq:[22,23,44],lerp:[43,44],lerp_:43,less:[1,4,8,13,15,17,22,29,32,44,48],less_than:15,lesser:27,let:[1,4,13,15,28,29,32,35,42,43],letter:[44,45],level:[1,4,13,19,21,22,24,27,42,43,44,48],lexicograph:44,lfloor:[22,23,44],lib64:7,lib:[35,44],libari:35,libenzi:6,librari:[2,5,7,13,18,27,29,30,31,32,33,35,36,44,46],libx264:47,lie:[22,23,42],lies:45,lifetim:4,light:[42,48],lighter:50,like:[1,2,3,4,5,7,8,13,14,15,17,19,21,22,23,27,28,29,30,31,32,35,36,43,44,50],likelihood:[15,22,23],likewis:49,limit:[13,21,22,25],line:[1,2,22,26,35,36,44],line_search_fn:37,linear:[8,19,24,25,28,29,30,36,43,44,48],linearfunct:29,linearli:[22,23,30],lineartransform:50,liner:22,linewidth:44,link:[7,15,22,23,31],linker:7,linspac:44,linux:[14,17,20],list:[1,3,4,5,7,13,14,15,17,22,23,29,35,36,37,39,40,41,42,43,44,45,47,48,49,50,51],listconstruct:19,listofproperti:42,literatur:22,littl:29,live:[19,22,30,37],llion:22,load:[1,7,19,20,22,31,34,35,36,37,44,45,46,48],load_inlin:7,load_nvprof:1,load_state_dict:[17,22,34,37,44],load_state_dict_from_url:[17,20],load_url:[20,48],loadabl:17,loadann:45,loaded_weight:43,loader:[13,45],loc:[15,44],local:[14,17,19,21,22,23,30,42,45],local_rank:22,locallr_0:42,localresponsenorm:23,locat:[1,7,8,15,17,19,20,22,23,37,39,42,43,44,45,48,50],lock:[4,13,14,15,28,32],log10:[43,44],log10_:43,log1p:[43,44],log1p_:43,log2:[36,43,44],log2_:43,log:[7,13,15,22,23,36,42,43,44],log_:[43,44],log_abs_det_jacobian:15,log_dir:42,log_input:[22,23],log_norm:15,log_normal_:[43,44],log_pob:22,log_prob:[15,22,23],log_sigmoid:36,log_softmax:[22,36],logabsdet:44,logarithm:[22,23,44],logdet:[43,44],logdir:42,logic:[3,13,29],logist:[15,22],logit:[15,22,23],logsoftmax:23,logspac:44,logsumexp:[36,43,44],longer:1,longest:[22,30],longtensor:[22,23,39,41,43,44],look:[2,4,5,15,19,22,23,31,32,35,36,44],lookup:[15,22,23,27],loop:[8,19,27,30,36,42,50],loop_count:36,loop_in_traced_fn:19,loop_rang:36,loopmodel2:36,loopmodel:36,loos:31,lorentz:15,loss:[15,30,37,42,45,48],loss_fn:[32,37],lost:[22,44],lot:[4,21,32,38,42],low:[4,15,21,22,43,44],lower:[1,8,14,15,19,22,23,24,25,33,37,44,49],lower_bound:15,lower_choleski:15,lower_triangular:15,lowercas:14,lowercholeski:15,lowercholeskytransform:15,lowest:44,lowrank_multivariate_norm:15,lppool1d:23,lppool2d:23,lr_0:42,lr_decai:37,lr_lambda:37,lr_schedul:37,lrelu:22,lrn:22,lru:[28,44],lstm:[3,36,42],lstsq:[43,44],lsun:46,lt_:43,lu_data:[43,44],lu_pivot:[43,44],lu_solv:[43,44],lu_unpack:44,lukasz:22,lvert:[22,23,44],macbook:42,machin:[14,22,31,38],maco:21,maddison:15,made:[1,5,19,22,35,37,42,50],mae:22,magma:[35,44],magma_2:35,magma_hom:35,magnitud:[22,24,44],mai:[1,2,4,7,8,9,10,11,13,14,15,19,22,23,26,28,30,35,36,37,39,40,43,44,50],main:[13,14,15,21,23,25,34,35,42,43,44],main_tag:42,mainli:[15,22,23],mainta:50,maintain:[4,14,15,22],major:[4,8,22,23,36,39],make:[1,2,3,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,32,33,35,36,37,41,42,43,44,45,50,51],make_grid:[42,51],manag:[1,4,22,30,31,36,38,44],mandatorili:13,mani:[1,4,5,13,14,19,22,24,25,26,27,29,31,33,41,42,43,44,46],manipul:30,manner:[3,26,43],mantissa:43,manual:[13,14,19,21,22,23,28,30,33,35,36,42],manual_se:[8,33,38,44],manual_seed_al:8,map:[7,15,19,22,23,35,36,40,44,49],map_:43,map_loc:[19,20,22,44],margin:[22,23,42],marginrankingloss:23,mark:[8,19,22,25,43],marker:8,market:[4,5],marten:24,mask:[22,43,44,49],masked_fil:[36,43],masked_fill_:43,masked_scatt:43,masked_scatter_:43,masked_select:[43,44],maskrcnn_resnet50_fpn:48,mass:15,master:[4,17,36],master_addr:14,master_port:14,mat1:[39,43,44],mat2:[39,43,44],mat:[39,42,43,44,45],match:[1,8,14,15,19,22,23,26,36,37,41,43,44,45,50],math:[13,19,22,23],mathbb:22,mathbf:[15,22],mathbin:44,mathcal:[22,24,44],mathemat:[22,23,44],mathrm:[15,22,44],matmul:[22,43,44],matplotlib:42,matric:[15,23,39,43,44],matrix:[15,22,23,24,39,42,43,44,50],matrix_pow:[43,44],matrix_rank:44,matter:[1,2,5,19,25,44],max:[13,14,19,22,23,26,30,36,37,43,44,50,51,52],max_:22,max_bin:42,max_ev:37,max_indic:44,max_it:37,max_lr:37,max_memory_alloc:[8,28],max_memory_cach:[8,28],max_momentum:37,max_norm:[22,23],max_queu:42,max_siz:28,max_val:[22,23],max_valu:22,maxim:[22,37,44],maximum:[8,15,22,23,28,37,44,49,50],maxnorm:[43,44],maxpool1d:[23,36],maxpool2d:[23,36],maxpool3d:[23,36],maxpool:[22,36],maxunpool1d:23,maxunpool2d:23,maxunpool3d:23,may04_22:42,mayb:4,mc3:48,mc3_18:48,mean:[1,3,4,8,13,14,15,19,21,22,23,24,30,33,35,36,37,43,44,48,50],mean_vector:50,meant:[1,22],meantim:[22,23],measur:[8,15,22,23,31,37],mechan:[18,21,31],median:[15,43,44],medium:4,meet:28,megabyt:22,member:[4,5,13,14,19,22,30],membership:5,memo:22,memoiz:15,memori:[1,3,15,16,19,21,22,23,25,32,37,40,41,42,43,44,48],memory_alloc:[8,28],memory_cach:[8,28],memory_effici:48,memory_key_padding_mask:22,memory_mask:22,mendoza:6,mention:[17,19,28],mere:4,merg:[4,5,13,22],mesh:42,meshgrid:44,messag:[4,8,17,19,30,36,37],messmer:6,meta:42,metadata:[19,42,44,47],metadata_head:42,meter:48,meth:44,method:[1,4,7,8,13,14,15,17,21,22,23,24,28,29,30,32,36,37,39,41,42,43,44,45,48],metric:[8,37,42],michael:6,microwav:48,middl:36,might:[1,2,5,17,19,22,25,27,28,31,43,44,45],mileston:37,millisecond:8,min:[13,14,22,23,36,37,43,44,50,51,52],min_indic:44,min_lr:37,min_siz:48,min_val:[22,23],min_valu:22,min_x:44,mind:22,minfunc:37,mini:[13,22,23,48,51],minibatch:[13,22,23,44],minim:[1,4,17,32,37,44],minimum:[7,22,37,44,48],minkowski:23,minlength:[43,44],minor:[5,8],minu:44,minut:[4,14,42],mismatch:[19,30,44,50],miss:[22,35,36],missing_kei:22,mistak:30,mix:[7,15,22,27,36],mkl:[27,35,44],mkl_2018:35,mkl_fft:35,mkl_num_thread:27,mkl_thread:27,mkldnn:43,mkldnn_thread:27,mmap:21,mnasnet0_5:48,mnasnet0_75:48,mnasnet1_0:48,mnasnet1_3:48,mnist:[42,46],mnist_train:42,mnt:14,mobil:48,mobilenet_v2:48,mobilenetv2:48,mod:19,mode:[1,2,13,14,15,19,22,23,24,30,33,36,37,43,44,45,48,50],model:[1,2,3,8,14,18,19,20,22,23,25,27,28,32,33,36,37,42,44,46,50],model_dir:20,model_zoo:[18,48],moder:3,modif:[1,43,44],modifi:[1,13,19,22,23,25,28,36,37,43],modul:[1,3,7,17,18,21,23,25,27,28,30,31,32,35,36,42,43,44,48,50],module_kwarg:23,modulelist:19,modulu:44,momemtum:22,moment:[1,21,36,37],momentum:[22,23,25,37],monitor:[8,28,37,44],monoton:15,mont:15,moor:44,more:[1,2,5,7,8,13,14,15,19,20,21,22,23,24,25,27,28,29,30,31,36,37,39,41,42,43,44,45,48,50],moreov:[43,44],most:[1,2,4,8,13,14,15,17,19,21,22,23,25,28,32,37,39,41,43,44],mostli:[4,15],motion:45,motiv:4,motorbik:48,motorcycl:48,mountain:45,mous:48,moustapha:22,move:[3,19,20,21,22,23,28,32,37,40,42,43,44],moviepi:42,mpi:14,mrshenli:6,mseloss:23,msg:8,msys2:35,much:[1,2,4,5,13,22,28,43,50],mul:[1,19,36,39,43,44],mul_:[39,43],mulconst:29,mult:13,multi:[2,8,19,36,41,43,44],multicast:14,multidimension:22,multihead:22,multihead_attn:22,multilabelmarginloss:23,multilabelsoftmarginloss:23,multilay:22,multilin:42,multilinear:44,multimarginloss:23,multinomi:[43,44],multipl:[8,13,14,15,17,19,21,22,23,27,28,29,31,32,35,37,39,44,45,50],multipli:[22,23,39,44,48,50],multiplicand:44,multiprocess:[13,14,18,22,45],multiprocessing_context:13,multisteplr:37,multivari:[15,44],multivariate_norm:15,must:[1,7,8,13,14,15,19,21,22,23,24,26,29,32,33,36,37,40,43,44,50],mutabl:19,mutat:[19,43,50],mutual:[13,14],mvlgamma:[43,44],mvlgamma_:43,mvn:15,my_api:31,my_constraint:15,my_dict:19,my_experi:42,my_factori:15,my_imag:42,my_image_batch:42,my_image_hwc:42,my_lib:35,my_lib_add_backward_cuda:35,my_lib_add_forward_cuda:35,my_list:19,my_lstm:30,my_mesh:42,my_modul:19,my_module_inst:19,my_paramet:19,my_registri:15,my_script_modul:19,my_segmentation_transform:50,my_submodul:19,my_transform:15,my_variable_nam:19,myconstraint:15,myconstraintclass:15,myfunc:1,myiterabledataset:13,mymodel:32,mymodul:[19,22,30],mypi:19,myscriptmodul:19,mytransform:15,n_0:23,n_1:44,n_2:44,n_class:22,n_d:44,n_fft:[43,44],n_i:[22,44],n_iter:42,n_k:[23,44],n_power_iter:22,n_t:22,naiv:13,name:[1,7,8,14,15,17,19,20,21,22,24,31,36,40,42,44,45,46,52],named_buff:22,named_children:22,named_modul:22,named_paramet:22,namedtupl:[13,22,44],namespac:19,nan:[1,44],narrow:[36,43,44],narrow_copi:[39,43],nasdaq:42,natalia:6,nativ:[19,21],natur:[1,2,4,15,22,33,44],nbatch:22,nccl2:22,nccl:22,nccl_debug:14,nccl_debug_subsi:14,nccl_socket_ifnam:14,nchannel:22,nchw:42,ncrop:50,ndarrai:[36,43,44,50],ndim:43,ndimens:43,ne_:43,nearest:[22,23,50],nearli:[1,32,43],necessari:[1,7,13,19,22,25,26,28,35,41,43,44],necessarili:[14,15,22,28,36,44],need:[1,4,5,8,13,14,15,19,21,22,23,25,28,29,30,31,32,33,35,36,37,39,40,43,44,45],need_weight:22,needs_input:31,needs_input_grad:[1,29],neeraj:6,neerajprad:6,neg:[8,13,15,19,22,23,24,36,43,44,50],neg_:43,negative_binomi:15,negative_slop:[22,23,24],neglig:[1,36],negoti:5,neighbor:[22,44],neighborhood:22,neighbour:[22,23],neither:[13,14],nelement:[22,43],neq:[22,44],nest:[1,8,19,22,43],nesterov:37,net:[19,22,28,42],netlifi:4,network:[4,15,19,22,23,24,25,28,36,37,50],neural:[4,19,22,24,28,37,48],neuron:22,never:[1,3,4,14,22,25,43],new_:[28,43],new_data:36,new_empti:43,new_ful:[28,43],new_group:[14,22],new_lr:37,new_on:43,new_stat:[8,38,44],new_strategi:21,new_tensor:[28,43],new_zero:43,newer:[27,28],newli:[4,25],next:[1,13,14,15,21,22,23,27,32,36,41,42,43,44,45],next_stat:15,nfs:14,ngimel:6,nhead:22,nhwc:42,nice:[1,22],niederreit:44,nielsen:15,nightli:42,niki:22,nine:[41,43],ninja:[7,35],nist:45,nll:22,nllloss:23,nlp:22,nms:49,nnz:[1,39,43,44],no_grad:[1,3,36,44],no_sync:22,noam:22,noarch:35,nock:15,node:[14,22,36],non:[1,3,7,14,15,19,21,24,26,28,29,30,33,38,42,43,44,49,50],non_block:[22,28,40,43],noncontigu:1,nondet_tol:1,nondetermin:1,nondeterminist:[9,10,11,22,23,43,44],none:[1,7,8,13,14,15,19,20,21,22,23,24,28,29,32,36,37,38,39,40,42,43,44,45,47,48,50,51],nonexist:19,nonlinear:24,nonlinearli:4,nonneg:15,nonnegative_integ:15,nonzero:[1,36,43,44],noordhui:6,noplot:4,nor:[13,14,22],norm:[22,23,36,37,43,44],norm_typ:[22,23],normal:[1,17,19,24,28,37,42,43,44,48,50,51],normal_:[24,28,43,44],normalized_shap:[22,23],nose:48,notabl:50,notat:[43,44],note:[1,7,8,9,10,11,13,14,15,16,17,18,19,21,22,23,25,26,27,29,31,32,36,37,39,42,43,44,45,47,48,50],notebook:[4,51],noth:[4,7,8],notic:[19,22,44],notifi:5,notimplementederror:15,notion:[13,22],now:[1,3,22,26,28,29,36,37,43,44,48],nproc:21,nrow:51,nsdf3:45,nthread:45,nuanc:4,nuc:44,nuclear:44,num:[22,44],num_channel:22,num_class:[23,45,48],num_decoder_lay:22,num_direct:22,num_embed:[22,23],num_encoder_lay:22,num_featur:[22,23],num_group:22,num_head:22,num_keypoint:48,num_lay:[22,36],num_lin:45,num_output_channel:50,num_paramet:22,num_process:32,num_replica:13,num_sampl:[13,43,44],num_threshold:42,num_work:[13,35,45],number:[1,2,3,4,13,14,15,19,21,22,23,26,27,28,29,33,36,37,39,40,42,43,44,45,47,48,49,50,51,52],number_of_vertic:42,numel:[43,44],numer:[13,15,19,22,23,29,36,37,43,44,52],numpi:[13,26,30,35,36,42,43,44,45,50,52],nvcc:7,nvidia:[1,14,28,30,35,44],nvprof:[1,2],nvtx:[1,2],nvvp:1,o_t:22,obermey:6,obj:[8,19,35,44],object:[1,8,13,14,15,16,19,20,21,22,25,27,28,29,30,32,35,36,37,38,40,41,42,43,44,45,46,50,52],observ:[22,23],obtain:[1,13,14,15,21,22,23,27,43,44,48],obviou:[30,39],obvious:4,occas:[1,4,25],occasion:39,occupi:[8,22,23,28,52],occur:[8,13,19,22,23,28,30,36,43],occurr:44,odd:15,off:[1,4,8,9,10,22,23,27,31,43,44],offici:[5,14,22,35,48],offlin:[19,50],offset:[22,23,43,44,45,50],often:[1,2,4,7,13,14,15,19,22,23,30,31,36,37,42,43,44],old:[25,35,37,44],older:28,omagma:35,omega:44,omega_1:44,omega_d:44,omega_i:44,omit:[3,7,14,22,35,36,44,50],omkl:35,omp:27,omp_num_thread:27,onc:[1,4,13,14,16,21,22,25,27,28,29,31,36,37,42,44],one:[1,2,3,4,7,8,13,14,15,19,21,22,23,26,27,28,29,31,32,33,35,36,37,40,41,42,43,44,45,46,47,48,49,50],one_hot_categor:15,ones:[1,14,15,19,22,23,26,28,29,36,37,43,44,48],ones_:24,ones_lik:[28,36,44],onesid:[43,44],onfunctionent:31,onfunctionexit:31,onli:[1,2,3,4,5,8,13,14,15,16,19,21,22,23,24,25,27,29,30,31,32,34,35,36,37,39,42,43,44,48,50],onlin:37,only_input:1,onnx:[18,22],onnx_aten:36,onnx_aten_fallback:36,onnx_model:36,onnxruntim:36,onto:[8,19,21,30,44],opaqu:14,open:[1,5,15,19,21,35,44],openbla:35,openmp:[27,35],oper:[2,3,4,5,8,9,10,11,13,15,22,23,26,27,28,29,30,32,33,37,38,39,41,43,46,47,49,50],operand:44,operator_export_typ:36,operatorexporttyp:36,operatornam:[22,44],opinion:4,opnam:36,oppos:50,ops:[1,14,18,19,27,28,29,36,43,44,46],opset:36,opset_vers:36,opt:44,optim:[1,4,7,14,15,18,19,22,24,25,27,30,32,36],optimiz:[19,36],optimum:37,option:[1,3,7,8,13,14,15,17,20,22,23,24,29,30,33,36,39,41,42,43,44,45,47,48,50,51],optional_unwrap:19,orang:48,ord:44,order:[1,3,4,14,15,17,21,22,26,28,29,33,36,37,43,44,48,49,50],ordereddict:22,ordin:[41,43],ordinari:8,org:[2,4,17,22,35,42,48,50],organ:[4,5,31],orgqr:[43,44],orient:36,origin:[1,13,19,21,22,28,31,32,36,37,40,43,44,50],orign:50,orion:6,orionr:6,ormqr:[43,44],ort:36,ort_sess:36,orthogon:[24,44],orthogonal_:24,orthonorm:44,ossci:35,other:[1,2,4,5,7,8,13,15,19,21,23,25,26,27,28,29,30,32,33,34,36,37,42,43,48,50,51],otherwis:[1,4,5,7,14,22,23,32,40,43,44,45,48],otim:[23,44],our:[4,19,29,32,36,39],out:[1,4,5,17,19,21,22,23,25,26,32,36,39,41,42,43,44,50],out_caffe2:36,out_channel:22,out_featur:[22,29],out_j:22,out_ort:36,out_padh:23,out_padt:23,out_padw:23,out_unf:22,outer:[44,48],outlier:22,output1:[22,36],output2:22,output:[1,2,3,4,8,13,14,15,19,22,23,25,27,29,30,35,36,37,39,42,43,44,45,48,49,50],output_2d:22,output_4d:22,output_devic:[22,23],output_featur:29,output_nam:36,output_pad:[22,23],output_ratio:22,output_s:[22,23,49],output_tensor_list:14,outsid:[5,13,19,23,28,50],oven:48,over:[1,5,13,14,15,19,21,22,23,26,27,32,36,37,39,43,44,45,49,50,51],overal:[5,14,25,32,50],overall_end:13,overall_start:13,overflow:[23,44],overhead:[1,2,14,31,43],overheard:45,overlap:[1,13,22,28,49],overparameter:15,overrid:[7,14,15,22,23,36,37,42,44],overridden:[1,7,22],overrit:13,overshoot:23,overview:[21,25],overwhelm:4,overwrit:[13,22,25],owen:44,own:[4,5,14,15,22,28,36,44],owner:17,ownership:[4,5],p1d:23,p2d:23,p3d:23,p_c:22,p_i:22,p_tensor:43,pace:4,pack:[22,30,35,44],pack_padded_sequ:30,packag:[4,8,15,17,18,22,37,42,44,46,47],packagesnotfounderror:35,packed_input:30,packed_output:30,packedsequ:12,pad:[13,30,33,36,42,44,50,51],pad_if_need:50,pad_mod:[43,44],pad_packed_sequ:30,pad_valu:51,padded_input:30,padding_idx:[22,23],padding_input:30,padding_mod:[22,23,50],padding_valu:22,padh:23,padt:23,padw:23,page:[4,13,22,28],pai:35,pair:[19,22,23,37,39,42,44],pairwis:[15,22],pairwisedist:23,paper:[4,22,37,48],parallel:[0,13,14,22,23,27,28,33,35,50],parallel_info:[0,27],parallelli:45,param1:15,param2:15,param:[1,15,22,24,25,36,37,48],param_byt:37,param_group:37,param_shap:15,paramet:[1,3,7,8,13,14,15,16,17,20,21,23,24,25,29,31,32,34,36,38,39,40,42,43,44,45,46,47,48,49,50,51],parameter:[15,43],parameteriz:15,parametr:[15,29],parent:[21,35,42],park:48,parmar:22,pars:[1,14],parse_arg:[28,36],parser:28,part:[2,3,4,5,7,14,15,19,20,22,25,30,36,37,39,44,45],parti:[5,17],partial:[15,22,23,36,44],particip:[13,14,22],particular:[4,13,19,22,27,28,30,31,33,36,43,44,45],particularli:[13,19,22],partit:22,partli:5,partner:4,pascal:[45,48],pass:[1,3,4,7,13,14,15,17,19,21,22,24,25,27,28,31,36,37,39,42,43,44,45,48,49,50],past:[14,30,48],paszk:[5,6],patch:[4,22],path:[1,2,7,14,17,19,25,34,45,47],path_importer_cach:17,path_to_hub_dir:17,pathwai:19,patienc:37,pattern:[14,19,22,28,29,30],pdb:19,pdf:[22,50],pdist:22,peak:[8,37],peer:[4,14,28],penalti:37,pend:42,penros:44,peopl:4,per:[7,8,13,14,22,23,27,31,33,42,44,47,48],per_index_weight:22,per_sample_weight:[22,23],per_work:13,perform:[1,3,11,12,13,14,15,19,21,22,23,24,25,28,29,33,37,39,40,41,42,43,44,47,49,50],period:[32,37,44],permit:39,permut:[13,36,43,44],perplex:15,persist:[3,4,12,22,35],person:[4,5,18,46],perspect:50,perturb:[1,44],peter:6,peterjc123:[6,35],phase:35,phenomenon:30,phi:23,phone:48,photo:45,phototour:46,php:45,phy:44,pic:50,pick:50,pickl:[13,21,22,44],pickle_load_arg:44,pickle_modul:44,pickle_protocol:44,pid:30,piec:4,pieter:6,pietern:6,pil:[45,46],pillow:[42,50],pin:[22,40,43,44],pin_memori:[13,28,40,43,44],pinvers:[43,44],pip:[35,42],pipelin:50,pivot:[43,44],pixel:[22,23,45,50,51],pixel_shuffl:[22,36],pixelshuffl:23,pixelwis:48,pizza:48,pjh5:6,pkg:35,place:[4,8,13,14,19,22,23,28,31,36,40,43,50],placehold:22,plai:14,plain:[7,22],plan:[4,14,22,44],plane:[22,23,44,45],plant:48,platform:[7,33,44,48],pleas:[1,2,4,5,9,10,11,14,15,19,22,23,29,35,36,37,42,43,44,45],plenti:30,plot:42,plu:50,plume:45,pmf:15,png:45,point:[1,4,5,8,13,19,22,23,25,33,37,41,42,43,44,45,47,49,52],pointer:8,pointwis:[15,26],poisson:[22,23],poissonnllloss:23,poli:45,polici:[15,37],policy_network:15,polosukhin:22,polygon:45,polymorph:19,pool:[27,29,32,33,49],pooled_w:49,pop:[8,22],popul:[1,15,43],popular:46,popularli:50,port:14,portion:[22,23,37,44],pos_weight:[22,23],posit:[1,13,15,17,22,23,36,43,44,50,52],positive_definit:15,positive_integ:15,positivedefinit:15,possess:5,possibl:[5,7,13,15,19,21,22,23,24,25,29,32,35,41,43,44,45],post:[4,30,35,48,50],postprocess:48,pot:48,potenti:[11,14,21,22,23,25,36],potential_energi:15,pottedpl:48,pow:[1,36,43,44],pow_:43,powbackward0:1,power:[22,23,37,44,50],powertransform:15,pr_curv:42,practic:[13,15,18,19,21,48],pradhan:6,pre:[1,17,22,36,37,43,45,48],preced:27,precis:[1,7,15,22,36,42,44,48],precision_matrix:15,precompil:31,predict:[22,42,48],predict_net:36,preemptiv:22,prefer:[5,13,22],preferr:44,prefix:[17,22,39],prelu:36,prepar:36,prepend:[7,13,22,26,44],preprocess:[43,48],presenc:5,present:[5,14,20,21,22,25,41,44,45,47,48],preserv:[13,19,22,23,24,28,43,50],preserve_rng_st:3,pressur:[1,25],pretrain:[17,22,25,36,48],pretrained_backbon:48,pretti:[19,44],prevent:[4,8,13,14,21,22,23,39,44],previou:[14,22,35,43,44],previous:[1,19,26,28,38,43],prim:19,primari:5,primarili:[15,43],primit:[14,19,27],print:[1,13,17,19,22,23,27,28,29,36,37,42,43,44,45],printable_graph:36,printer:19,prior:[4,5,26,37],prioriti:[4,8],pro:[35,42],prob:15,probabl:[13,21,22,23,29,35,36,42,43,44,50],problem:[4,14,21,22,30,32,33,35,44],problemat:[4,19],proce:28,procedur:[19,45],proceed:8,process:[1,7,8,14,15,17,19,21,22,23,27,28,31,32,33,35,39,40,45,48],process_group:22,process_id:22,processgroup:14,prod:[22,36,43,44],prod_:[22,44],prod_d:22,produc:[4,7,8,13,19,21,22,23,26,28,31,35,36,39,44],producer_info:31,product:[1,14,15,19,22,23,31,43,44,50],prof:1,profil:[2,44],program:[1,2,8,13,14,19,25,28,30,31,32,35,42],programm:19,progress:[8,17,20,37,48],project:[4,17,34],projector:42,promot:22,prompt:35,prone:[21,32],propag:[15,21,36,39,43],proper:[22,28,35],properli:[4,22,32,41,44],properti:[1,13,15,19,22,23,28,37,41,52],proport:[22,50],proportion:[22,23],propos:[5,6,37],protobuf:36,protocol:[13,35,44],prototyp:41,prove:21,proven:[4,22],provid:[1,4,5,7,13,14,15,17,19,21,22,23,28,36,37,39,40,41,42,43,44,47,48,50,52],pseudo:44,pseudoinvers:15,pseudorandom:33,psi:44,pth:[17,19,20],pts:47,publish:4,puhrsch:6,pull:5,purchas:5,pure:19,purg:42,purge_step:42,purpos:[14,22,43,44],push:[4,5,8],pushcallback:31,put:[4,13,17,21,22,28,32,37,43,44,45],put_:43,pybind11:[7,19],pyc:31,pycapsul:16,pyplot:42,python2:[14,44],python3:[14,44],python:[1,2,5,7,8,13,14,17,21,22,23,25,26,27,28,29,30,31,32,36,38,39,43,44,50],pytorch:[0,1,2,7,8,13,15,17,19,20,22,26,27,28,30,31,32,35,37,41,42,43,44,45,48,52],pytorch_jit:19,q_scale:43,q_zero_point:43,qmnist:46,qscheme:43,qtensor:43,quad:22,quadrat:30,qualiti:[4,45],quantiti:37,quantiz:[1,43],quasirandom:44,queri:[8,13,22,28],question:[4,18],queu:[8,28],queue:[4,21,42],queue_2:21,quick:[1,4],quickli:4,quit:[4,30],qw_i:22,r2plus1d_18:48,r3d:48,r3d_18:48,r_t:22,racket:48,rais:[1,4,15,19,21,25,28,43,44,50],raise_except:1,ram:[42,44],rand:[1,19,23,36,42,43,44,48],rand_lik:44,randint:[22,23,39,42,43,44,50],randint_lik:44,randn:[1,19,22,23,25,26,28,29,36,39,41,42,43,44],randn_lik:[36,44],random:[15,17,18,22,23,33,36,42,43,45,48,50],random_:[22,23,43,44],random_devic:44,random_offset:45,random_split:13,randomaffin:50,randomappli:50,randomchoic:50,randomcrop:[45,50],randomeras:50,randomgrayscal:50,randomhorizontalflip:50,randomli:[1,13,22,23,31,45,50],randomord:50,randomperspect:50,randomresizedcrop:50,randomrot:50,randomsampl:13,randomsizedcrop:50,randomverticalflip:50,randperm:44,rang:[1,8,13,14,15,19,22,23,30,32,36,37,42,43,44,45,48,50,51],range_pop:8,range_push:8,rangl:15,rank:[13,14,15,22,32,44],rapidli:30,rare:4,rate:[15,22,31,42,47,48],rather:[1,3,7,19,23,26,36,42,43,44,51],ratio:[15,22,50],raw:[19,22,36],rbrace:44,rceil:44,rcond:44,rdinat:[39,44],reach:[4,5,13,32,37],reachabl:14,read:[13,14,19,26,28,31,36,37,43,44,47],read_video:47,read_video_timestamp:47,readabl:[0,13,36],readi:[4,7,22,44],readlin:[19,44],real:[13,15,17,22,44,50],real_vector:15,realiti:2,realli:[1,4,25,44],realloc:44,realvector:15,rearrang:[22,23],reason:[13,14,19,23,25,36,41],rebas:4,reblitz:6,rebuild:4,rebuilt:4,recal:[22,29,42,48],receiv:[1,4,13,14,15,21,22,32],recent:[1,4],recip:22,reciproc:[36,43,44],reciprocal_:43,recogn:13,recognit:[45,48],recommend:[1,13,14,17,19,21,22,24,25,27,28,29,32,36,43,44],recomput:[3,22,37],reconstruct:[8,22],record:[1,8,19,22,25,36,42,43,44],record_ev:8,record_shap:1,recordfunct:31,recov:[22,44],recreat:25,rectangl:50,rectifi:[22,23,24],recurr:[14,19,28,37],recurs:[15,19,22],recv:14,redistribut:35,reduc:[1,8,14,21,22,23,35,37,39,43,44],reduce_add:8,reduce_multigpu:14,reduce_op:14,reducelronplateau:37,reduceop:14,reduct:[14,22,23],redund:[14,44],reevalu:37,refactor:[4,34,35],refcount:[21,32],refer:[8,13,14,15,17,18,21,22,23,25,29,30,32,42,43,44,46,48],referenc:[19,25,44],reflect:[19,22,23,30,43,44,50],reflection_pad:36,reflectionpad2d:23,reflector:44,refriger:48,regard:[19,22,23,44],region:[15,19,21,22,23,28,44,49,50],regist:[1,15,21,22,29,31,36,43,44],register_backward_hook:22,register_buff:[19,22,29],register_custom_op_symbol:36,register_forward_hook:22,register_forward_pre_hook:22,register_hook:[1,22,43],register_kl:15,register_packag:44,register_paramet:[22,29],registr:22,regress:[4,22,48],regular:[1,2,22,31,36,37],regularli:4,reimplement:22,reinforc:15,reiniti:17,reinterpret:[15,43],reinterpreted_batch_ndim:15,rel:[1,5,7,15,22,27,28,31,37,44],relat:[4,5,13],relationship:1,relative_path_to_checkpoint:17,relative_to:35,relax:[15,19],relaxed_bernoulli:15,relaxed_categor:15,releas:[4,8,14,17,21,22,23,28,33,35,36,42,44],relev:[5,43],reli:[5,13,22,33],reload:17,relu1:22,relu2:22,relu:[19,24,36],relu_:23,rem:35,remain:[1,15,21,30,44,45],remaind:[43,44],remainder_:43,remap:[19,20,44],rememb:[30,32],remot:[13,14,48],remov:[1,5,14,19,22,23,43,44,49],removablehandl:22,render:[4,42],renorm:[22,23,43,44],renorm_:43,rep:36,repackag:30,reparameter:[15,22],reparametr:[15,23],reparametriz:15,repeat:[15,23,36,43,44,50],repeat_interleav:[43,44],repeatedli:[28,39,44],repetit:44,repl:1,replac:[7,13,19,22,25,31,32,35,36,43,44],replic:[13,22,23],replica:[13,14,22],replication_pad:36,replicationpad2d:23,repo:[4,17,35,37,48],repo_nam:17,repo_own:17,report:[1,2,5,28,48],repositori:[5,17,29,32],repr:44,repres:[1,8,13,15,16,19,22,25,29,31,36,37,39,41,44,52],represent:[19,22,36,39,43,52],reproduc:[4,9,10,11,17,18,22,23,37,43,44],request:[5,14,25,28],requir:[1,3,5,7,13,14,15,17,19,21,22,23,25,27,28,29,30,31,32,36,37,42,43,44,45,48],require_grad:1,require_grad_:43,requires_grad:[1,15,22,23,29,39,43,44],requires_grad_:[1,22,23,39,43,44],rerun:3,res:44,resampl:50,rescal:[22,23,50],research:[4,17],reset:[8,22,36,38,44],reset_max_memory_alloc:8,reset_max_memory_cach:8,reshap:[22,36,39,42,43,44,50],reshape_a:[36,43],reshuffl:13,resid:[14,22,28,43,44],residu:[44,48],resili:37,resiz:[22,23,40,43,44,48,50],resize_:[1,19,40,43,44],resize_as_:[1,43],resizeas_:39,resized_crop:50,resnet101:48,resnet152:48,resnet18:[17,19,20,25,48],resnet34:48,resnet3d:48,resnet50:[17,42,48],resnet:[17,19,36,42],resnext101_32x8d:48,resnext50_32x4d:48,resolut:[22,23],resolv:[4,5,15,19,22,35,36],resourc:[13,21,45],respect:[1,14,15,22,37,40,43,44,45,50],respond:4,respons:[2,4,5,14,15,22,23,28],rest:[4,13,39],restart:[21,37,42],restor:[3,34,44],restrict:[13,19,22],restructur:4,result:[1,2,4,7,8,13,14,15,19,22,23,24,25,26,27,28,29,30,33,36,37,39,41,42,43,44,48,50],result_avg:50,resum:[37,42],retain:[1,21,32,43,44],retain_grad:[1,43],retain_graph:[1,43],rethink:48,retreiv:3,retriev:[1,13,22,23,31],return_count:[43,44],return_indic:[22,23],return_invers:[43,44],return_typ:44,reus:[1,14,25,50],reveal:39,revers:[15,19,22,25,43,44,50],revert:[5,22],review:5,reward:15,rewrit:25,rfft:[43,44],rfloor:[22,23,44],rgb:[22,42,48,50],rgba:50,rho:37,riba:22,richard:[6,15],richardson:6,right:[4,14,17,21,22,23,36,37,44,50],right_ankl:48,right_ear:48,right_elbow:48,right_ey:48,right_hip:48,right_kne:48,right_should:48,right_wrist:48,risk:4,riski:4,rmsprop:37,rng:[3,8,13,30,33,38,44],rnn:[19,30,36,42],robin:14,robust:21,roi:49,roi_align:49,roi_pool:49,roi_width:49,roialign:49,roipool:49,roll:[43,44],roof:1,root:[25,39,44,45],ross:22,rot90:[43,44],rotat:[15,44,50],rough:4,roughli:[13,44],round:[14,36,43,44],round_:43,roundtrip:4,routin:44,row:[13,23,39,42,43,44,51],row_limit:1,rpn:48,rprop:37,rrelu:36,rrelu_:23,rsampl:15,rsqrt:[43,44],rsqrt_:43,rst:4,rsub:36,rtol:[1,19,43,44],rule:[1,14,15,19,22,25,26,43,44],run:[1,2,3,4,13,14,19,21,22,25,27,28,30,31,32,33,35,36,37,38,42,44],run_14h:42,run_fn:[1,3],runnabl:42,running_mean:[22,23],running_var:[22,23],runtim:[1,3,7,14,21,32],runtimeerror:[1,19,26,35,36,43,44],runtimewarn:15,rv0:19,rv1:19,rvert:44,rvert_p:[22,23],s_min:22,s_n:22,sacrif:48,safe:[8,19,22,31],safest:[7,39],sai:[4,19,30,36,43,45],sam:[5,6],same:[1,4,7,8,13,14,15,17,19,21,22,23,25,26,27,28,30,31,32,33,36,39,40,42,43,44,48,50,51],sampl:[13,15,22,23,24,31,33,42,43,45,49,50],sample_input_cpu:19,sample_input_gpu:19,sample_n:15,sample_r:42,sample_shap:15,sampling_ratio:49,sandwich:48,sane:44,satisfi:[1,12,15,22,37,43,44],satur:[28,50],saturation_factor:50,save:[1,3,4,14,19,20,22,25,32,36,37,42,43,44,45,47,51],save_for_backward:[1,29],save_imag:51,saved_tensor:[1,25,29],saved_weight:43,sax:24,sbd:46,sbdataset:45,sbu:46,sbucaptionedphotodataset:45,scalar:[1,19,22,23,24,36,37,39,42,43,44],scalar_valu:42,scale:[4,13,15,18,22,23,24,30,37,43,44,48,49,50,51],scale_each:51,scale_factor:[22,23],scale_fn:37,scale_grad_by_freq:[22,23],scale_mod:37,scale_tril:15,scatter:[8,14,22,30,36,43],scatter_:[36,43],scatter_add:[36,43],scatter_add_:[33,43],scatter_list:14,scenario:[13,28,36],scene:42,schedul:[31,37],schema:19,scheme:43,schmidtm:37,sci_mod:44,scientif:44,scipi:[23,42,45],scissor:48,scope:[4,19,22,30,36],score:[22,48,49],scrambl:44,scratch:[4,25],script:[2,13,14,17,22,27,31,48],script_method:[19,36],scripted_fn:19,scripted_modul:19,scriptmodul:[19,36],scrutini:4,search:[4,19,48],seat:5,sebastian:6,second:[1,3,7,19,22,23,30,33,34,35,39,42,44,47,50],section:[4,13,15,19,21,22,29,32,42,43],see:[1,2,3,4,5,7,8,9,10,11,13,15,17,19,20,21,22,23,25,28,29,30,31,32,35,36,39,41,43,44,48,49,50,51],seed:[8,13,30,33,38,44,45],seed_al:8,seek:[19,44],seem:[4,36,50],seen:[1,15,22,37,43,44],segfault:21,segment:[3,45,46,50],select:[8,11,12,13,19,21,22,23,27,28,36,43,44,45,48,50],self:[1,13,19,22,25,26,27,29,30,36,37,40,43,44],self_cpu_time_tot:1,selu:36,semant:[5,8,18,19,36,44,45,46,50],semi:[22,24],semidefinit:44,send:[4,8,13,14,21,32,35,44],sender:14,sens:[2,15,44],sensit:[22,36],sent:[8,14,21,32,44],separ:[1,7,13,14,17,19,22,23,27,37,42,44,51],seq:[1,22,43,44],seq_len:22,sequenc:[1,8,13,15,19,22,23,28,30,36,37,43,44,50],sequenti:[3,13,21,36],sequentialsampl:13,sequnc:22,seri:22,serial:[13,18,19,20,28,31,32],serializ:[19,36],seriou:[21,34],serr:45,serv:[4,14],server:[4,13],sess:36,set:[0,1,3,5,7,8,11,13,14,15,17,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,38,42,43,44,45,48,50],set_:[1,43],set_default_dtyp:44,set_default_tensor_typ:44,set_detect_anomali:1,set_devic:[8,22,41],set_dir:17,set_flush_denorm:44,set_grad_en:[1,44],set_image_backend:46,set_num_interop_thread:[27,44],set_num_thread:[27,44],set_printopt:44,set_rng_stat:[8,38,44],set_rng_state_al:8,set_sharing_strategi:21,set_start_method:32,set_stat:44,set_trac:19,set_train:36,setapiusagehandl:31,setapiusagelogg:31,setexportmoduleextrafileshook:31,setsamplingprob:31,setup:7,setuptool:7,sever:[14,19,22,23,27,28,31,37,44,50],sgd:[13,22,25,37],sgdr:37,sgn:44,sha256:20,shadow:50,shall:22,shallow:22,shamelessli:44,shape:[1,8,15,19,22,23,25,26,30,36,39,42,43,44,45,48,50,51],shape_as_tensor:36,shard:[4,13],share:[1,4,7,8,13,15,16,22,27,32,35,36,40,43,44],share_memori:32,share_memory_:[21,40,43],shared_memori:21,sharedfil:14,shazeer:22,shear:50,sheep:48,shell:7,shen:6,shi:22,shift:[22,43,44,50,51],ship:27,shippabl:4,shm_open:21,shorter:36,shortest:22,shorttensor:[41,43],should:[1,2,3,4,5,7,8,13,14,15,17,19,20,21,22,23,28,29,30,32,33,35,36,37,39,42,43,44,45,48,49,50],shouldn:39,shout:36,show:[0,2,4,13,14,17,27,28,37,42],showcas:[5,22,28,32],shown:[8,19,29,30],shrinkag:[22,23],shuffl:[13,42,45],shufflenet_v2_x0_5:48,shufflenet_v2_x1_0:48,shufflenet_v2_x1_5:48,shufflenet_v2_x2_0:48,shufflenetv2:48,shut:13,side:[1,7,17,19,22,23,36,37,44,50],sigma:[15,22,23,43],sigmoid:[15,24,36,43,44],sigmoid_:43,sigmoidtransform:15,sign:[4,15,36,41,43,44,48],sign_:43,signal:[21,22,23,32,44],signal_2d:22,signal_4d:22,signal_ndim:[43,44],signal_s:[43,44],signatur:[1,13,22,43,44],signific:[1,25,28,37],significantli:22,silent:[8,19,22,44],sim:[22,23,44],similar:[4,13,19,21,22,23,27,29,39,43,44,45,52],similarli:[4,19,22,30,36,44],simon:6,simpl:[17,19,22,23,29,30,31,33,36],simplecustombatch:13,simplequeu:32,simpler:29,simplest:22,simplex:15,simpli:[1,7,13,15,19,22,25,39],simplifi:[19,22,37],simultan:25,sin:[7,36,42,43,44],sin_:43,sin_add:7,sinc:[4,8,13,14,15,19,22,23,29,30,31,35,36,37,38,39,44,50],sine:44,singl:[7,14,15,17,19,21,22,23,25,27,28,29,32,36,37,40,41,43,44,49,50],singleton:[15,22,26,43,44],singular:44,sinh:[43,44],sinh_:43,sink:48,site:4,situat:[15,21,32],size:[1,4,8,13,14,15,19,22,23,25,26,28,29,30,31,36,37,39,40,42,43,44,45,48,49,50,51],size_averag:[22,23],sizedim:43,sizeof:40,skateboard:48,skew:[1,2],ski:48,skip:[29,37],sky:45,slack:4,slice:[19,22,23,36,43],slide:[22,23,44],slightli:[5,13,17,48],slogdet:[43,44],slope:[22,24],slow:[32,42],slower:[2,14,23,48],slowli:38,small:[1,4,5,8,13,14,15,19,22,23,28,29,30,44],smaller:[13,37,43,44,50],smallest:[39,44,52],smart:29,smessmer:6,smi:[8,28,30],smnt:45,smoke:45,smooth:[22,36,37],smoother:17,smoothl1loss:23,snd_tensor:42,snedecor:15,snippet:17,snow:45,snowboard:48,snowi:45,sobol:44,soboleng:44,sobolengin:44,socket:21,sofa:48,soft:[22,23,48],softmarginloss:23,softmax:[15,36],softmaxtransform:15,softplu:36,softshrinkag:22,softwar:37,solid:50,solut:[4,24,32,44],solv:[4,35,43,44],solver:44,some:[1,3,4,5,8,11,14,15,17,19,21,22,23,25,28,29,30,31,32,33,34,35,36,37,39,42,43,44,45,48,50],some_dict:19,someon:4,someth:[4,19,21,35,44],sometim:[4,19,21,22,23,30,32,44],somewher:31,sophist:37,sort:[1,22,30,43,44,49],sort_bi:1,sorted_indic:22,soumith:[5,6],sound:42,sourc:[0,1,2,3,7,8,13,14,15,17,19,21,22,23,24,30,31,33,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51],space:[13,15,19,22,23,44,50],spadd:39,span:[8,22,43],spars:[1,18,24,37,41,43,44],sparse_:24,sparse_coo:[39,41,43,44],sparse_coo_tensor:[39,43,44],sparse_dim:[39,43,44],sparse_grad:44,sparse_mask:[39,43],sparseadam:[22,37],sparseaddmmbackward:39,sparsedim:43,sparsefloattensor:39,sparsetensor:[1,39,43,44],sparsiti:24,spatia:23,spatial:[22,23],spatial_scal:49,spatio:22,spawn:[13,22,32,35],spawncontext:21,speak:[39,44],special:[4,22,29,31,42,44],specif:[1,3,4,5,7,8,14,15,17,19,20,22,28,32,33,34,36,37,42,43,44,47,49],specifi:[1,7,8,13,14,15,17,19,20,22,23,28,29,31,35,36,37,38,39,40,42,43,44,45,46,50,51],specifii:36,spectral:22,speed:[4,22,27,28,30,33,44],spend:[2,4],spent:[1,2,14],sphinx:4,split:[4,13,19,22,23,36,43,44,45],split_siz:[43,44],split_size_or_sect:44,spmm:39,sponsorship:5,spoon:48,sport:48,spotri:44,spread:[8,28],sqrt:[22,24,36,39,43,44],sqrt_:43,squar:[22,23,37,39,44,50],squeez:[29,36,39,43,44],squeeze_:43,squeezenet1_0:48,squeezenet1_1:48,src:[8,14,22,43,44],src_key_padding_mask:22,src_mask:22,src_tensor:14,src_vocab:22,srinivasan:6,ssa:19,sse3:44,ssnl:6,sspaddmm:39,sspmm:39,stabil:[22,37,44],stabl:[15,22,35,36,44],stack:[8,13,15,22,28,36,44,50],stacktransform:15,stage:4,stagnat:37,stai:[4,22,32],stand:19,standalon:19,standard:[4,15,19,22,24,27,32,36,43,44,45,48,50],star:22,start:[1,2,5,8,13,14,21,22,23,26,28,30,32,35,36,37,43,44,47],start_dim:[43,44],start_pt:47,startpoint:50,startup:[2,27],stash:[1,3,29],stat:22,state:[1,3,8,13,15,19,22,28,32,35,37,38,44],state_dict:[17,20,22,32,34,36,37],statement:[25,29,32,36],staticmethod:[1,29],statist:[8,15,22,30],statu:[21,44],std:[7,24,31,35,43,44,48,50],std_mean:44,stddev:15,stderr:[20,48],stdin:1,stdout:37,step:[2,5,7,13,14,15,19,22,23,28,30,32,33,35,42,43,44,45],step_between_clip:45,step_siz:37,step_size_down:37,step_size_up:37,steplr:37,stft:[43,44],stick:15,stickbreakingtransform:15,still:[1,14,15,19,21,22,28,30,35,37,44],stirl:[22,23],stl10:46,stl10_binari:45,stochast:[13,15,22,37],stop:[8,15,22,37,44,48],storag:[1,8,18,19,20,21,22,25,28,32,41,43,44],storage_offset:[43,44],storage_typ:43,storageshar:35,store:[1,3,7,14,17,19,22,30,31,39,42,43,44,45],store_tru:28,str:[1,7,14,19,21,22,23,37,40,42,43,45,47,50],straight:23,strategi:[4,13,14,19,22],stream:[13,45],strict:[19,22],strictli:[5,13,22,25],stride:[1,22,23,36,41,42,43,44],strike:4,string:[0,1,7,8,14,17,19,20,22,23,31,36,40,41,42,43,44,45,46],stringio:[19,44],strip:[23,36],strip_doc_str:36,strive:4,strong:5,strong_wolf:37,strongli:[5,17,22,27],struct:31,structur:[4,5,13,19,22,28,29,32,34,35,36,42,43,44],student:15,studio:35,style:[19,44],styliz:22,sub:[19,22,36,39,43,44],sub_:[39,43],subclass:[1,7,13,15,19,22,29,43,45],subdir:45,subfold:7,subgradi:37,subgraph:22,subject:44,submatrix:15,submit:8,submodul:[19,22,36],subpackag:48,subprocess:[13,30,32],subsequ:[4,7,19,22],subset:[13,14,19,22,36,48],subsetrandomsampl:13,subspac:[22,43,44],substanti:5,substitut:41,subsystem:4,subtl:[4,22],subtleti:[13,22,30],subtli:37,subtract:[23,43,50],subtyp:19,succe:[14,35],succeed:44,success:[5,15,44],successfulli:[21,22,44],succinct:17,suffici:[7,15,17,36,44],suffix:[42,43],sugar:19,suggest:[5,17,19,22,30],suhan:6,suit:[19,36],suitabl:[13,15,37],suitcas:48,suitibl:42,sum:[1,8,13,14,15,23,28,29,36,37,39,43,44],sum_:[22,44],sum_i:22,sum_j:[22,23,44],sum_pair:19,sum_to_s:43,summar:[2,44],summari:[1,42,44,48],summarywrit:42,summat:44,sunset:[5,6],suo:6,superresolut:36,supervis:22,suppli:[3,4,7],support:[1,4,5,7,8,13,14,15,17,19,21,22,23,25,26,27,32,35,37,39,41,42,43,44,45,46,49],suppos:[13,39,44,50],suppress:[38,49],sure:[1,4,13,14,19,22,25,30,35,36,37,42,44],surfboard:48,surg:44,surpass:24,surpris:17,surrog:15,surround:19,sutskev:37,svd:[43,44,50],svhn:46,svi:15,swap:[22,23,43,44],symbol:[35,36],symbolic_fn:36,symbolic_foo_forward:36,symbolic_help:36,symbolic_nam:36,symbolic_opset10:36,symbolic_opset9:36,symbolic_opset:36,symeig:[43,44],symmetr:[44,50],symmetri:44,sync:22,sync_bn_modul:22,sync_bn_network:22,synchron:[2,8,22,27,28,32],syntact:5,syntax:19,sys:17,system:[4,7,8,19,22,25,28,31,35,44],t4d:23,t_max:37,tabl:[1,14,19,22,23,48],tag:[1,4,14,17,31,42,44],tag_nam:17,tag_scalar_dict:42,taiwan:42,take:[1,2,4,5,7,8,13,15,17,19,21,22,23,27,29,30,33,35,36,41,42,43,44,45],taken:[15,22,23,28,30,31,36,44,45,49],talk:31,tall:43,tan:[36,42,43,44],tan_:43,tangent:44,tanh:[24,36,43,44],tanh_:43,tanx:42,tape:4,tar:45,tarbal:45,target:[22,23,32,37,42,43,45,48,50],target_length:[22,23],target_n:22,target_transform:45,target_typ:45,task:[1,4,22,27,35,48,50],tau:[23,44],tbb:27,tdr:35,team:[4,5],technic:[4,5,30],techniqu:22,teddi:48,tell:[1,4,19,43,44],temperatur:[15,23],tempor:[22,23],temporari:[7,22,30],temporarili:36,ten:[19,42],ten_crop:50,tencrop:50,tend:4,teng:6,tenni:48,tensor1:[43,44],tensor2:[43,44],tensor:[3,4,7,8,13,14,15,16,18,19,22,23,24,25,26,27,28,29,30,32,33,35,36,37,39,40,42,45,46,47,48,49,51],tensor_a:44,tensor_b:44,tensor_list:14,tensorboard:18,tensordataset:13,tensordot:44,tensorflow:[15,42],term:[5,15,22,23,30,37,43,44],termin:[21,37],terminolog:22,test10k:45,test50k:45,test:[7,19,21,28,29,42,44,45,48,50],text:[4,15,22,23,24,42,43,44,50],text_str:42,texttt:[43,44],tgt:[13,22],tgt_key_padding_mask:22,tgt_mask:22,tgt_vocab:22,thalloc:35,than:[1,2,3,5,7,8,13,14,17,19,22,23,24,25,26,28,29,30,32,33,36,37,39,42,43,44,45,46,48,49,50,51],thank:[15,29],thc:35,thc_state:35,thcstate:35,thcudacheck:35,thcudatensor:35,thcudatensor_cadd:35,thcudatensor_fil:35,thcudatensor_issamesizea:35,thcudatensor_resizea:35,the_model:34,thei:[1,3,4,5,8,13,14,15,19,21,22,23,28,29,32,35,36,37,39,43,44,45,47,48,49,50],them:[1,3,4,13,14,17,19,21,22,23,25,26,29,30,31,35,37,39,42,43,44,45],themodelclass:34,themselv:[1,44],therebi:13,therefor:[1,3,13,14,15,19,22,23,30,31,37,39,43,44,50],theta:[15,23],thi:[1,2,3,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,48,50,51,52],thin:44,thing:[1,4,23,25,30,32,39],think:4,third:[15,22,44],thoma:6,those:[1,2,8,13,19,22,23,28,37,44,49],though:[14,19,32],thrash:14,thread:[1,8,13,14,18,19,22,31,32,36,44],three:[14,19,22,36,37,42,45],threej:42,threshold:[36,37,42,44,48],threshold_:23,threshold_mod:37,through:[1,3,4,5,13,15,17,19,21,22,23,30,35,43,44,52],throughout:22,thrown:[43,44],thtensor:43,thtensorrandom:44,thu:[1,13,14,15,19,22,23,30,31,36,43,44],thumb:14,tie:[15,48],tile:43,time:[1,2,4,7,8,13,14,15,19,21,22,23,24,25,27,30,31,32,35,36,37,39,42,43,44,47,48,50],timedelta:14,timelin:[1,2],timeout:[13,14,21],timestamp:47,tini:[43,52],tip:4,tl_flip:50,tmp:[1,7],to_dens:39,to_dlpack:16,to_grayscal:50,to_mkldnn:43,to_pil_imag:50,to_spars:[39,43],to_tensor:50,toaster:48,todens:39,togeth:[13,14,15,22,30,31,42,44,50],toilet:48,token:17,tol:44,toler:[1,19,37,44],tolerance_chang:37,tolerance_grad:37,tolist:[40,43],too:[4,22,23,30,32,35,39],tool:[1,2,5,17,19,35],toothbrush:48,top:[1,13,15,21,22,23,29,44,45,48,50],topic:[5,31],topilimag:50,topk:[36,43,44],topolog:1,torch:[5,11,12,18,19,25,26,27,28,30,31,32,33,34,35,45,46,48],torch_14808_1591070686:35,torch_extens:7,torch_extensions_dir:7,torch_hom:[17,20],torch_model_zoo:48,torch_shm_manag:21,torchscript:[18,36,49],torchvis:[17,18,19,25,36,42],toronto:22,total:[1,2,4,13,17,22,23,37,44],total_averag:1,total_count:15,total_length:[22,30],total_loss:30,totensor:[42,45,50],touch:[4,36],toward:[5,36,44],tr_flip:50,trace:[1,4,13,25,28,43,44],trace_modul:19,trace_nam:1,traceabl:19,traceback:[1,21],traced_bar:19,traced_cpu:19,traced_fn:19,traced_foo:19,traced_gpu:19,tracer:36,tracerwarn:19,track:[1,3,8,21,22,28,29,30,31,43],track_running_stat:22,tracker:[4,5],trade:[3,22],traffic:48,trail:[22,24,26,29],train2017:48,train:[8,13,14,17,19,22,23,24,25,30,36,37,42,45,48,50],train_batch:37,train_extra:45,train_load:28,train_nov:45,trainabl:37,trainload:42,trainset:42,trainval:45,tranform:15,transb:36,transfer:[13,14,21,28],transform:[13,18,23,25,42,44,45,46,48],transform_input:48,transform_to:15,transformation_matrix:50,transformed_distribut:15,transformer_decod:22,transformer_encod:22,transformer_model:22,transit:19,translat:50,transpos:[22,23,25,36,39,43,44],transpose_:[1,39,43],transposed_data:13,trapezoid:44,trapz:44,travers:[22,29],treat:[15,19,22,23,37,41,43,44],tree:[4,19,22,45],tri:[4,19,21,22,43],triag:5,trial:15,triangl:42,triangular2:37,triangular:[15,23,37,44],triangular_solv:[43,44],trick:[15,22,23,31,48],tricki:25,trigger:[1,4,5,31,43,44],tril:[43,44],tril_:43,tril_indic:44,trilinear:[22,23],trim:44,tripl:23,triplet:22,triplet_loss:22,tripletmarginloss:23,triu:[43,44],triu_:43,triu_indic:44,trivial:44,trou:22,troubleshoot:4,truck:48,truli:36,trunc:[43,44],trunc_:43,truncat:[30,44],truth:[42,48],tseq:15,tune:[14,37],tup:19,tupl:[1,3,8,13,21,22,23,29,36,37,39,42,43,44,45,49,50,51],tuple_or_list:19,turn:[7,13,19,22,36],tutori:[5,29,31,36],tvmonitor:48,twice:[30,48],two:[1,2,7,8,13,14,15,17,19,22,23,25,26,27,28,29,33,34,35,37,39,42,43,44,45,48],twse:42,txhxwxc:45,txt:19,type:[1,7,8,14,15,17,18,22,23,27,28,31,35,36,39,40,41,42,43,44,45,48,49,50],type_a:[36,43],type_p:15,type_q:15,typic:[7,13,15,19,22,27,52],typo:4,ubc:37,ucf101:46,ucf:45,uint8:[41,42,43,44,47,50,52],uint8_t:43,uint8tensor:48,ultim:[5,7],umbrella:48,unabl:[4,37],unbalanc:22,unbatch:44,unbias:[43,44],unbind:[36,43,44],unchang:[22,43,44,50],uncoalesc:[39,44],uncondition:17,unconstrain:15,undefin:[14,19,28,43,44],under:[1,2,13,21,22,23,25,28,32,35,38,42,44],underli:[8,15,19,23,30,43,44],underscor:[17,39,43],understand:[4,5,22,24,25,42],understood:44,undertak:4,underwai:1,undesir:[11,22,23],undetermin:33,unequ:22,unexpect:[14,19,22],unexpected_kei:22,unexpectedli:43,unfold:[19,36,43],unfortun:[1,3,5,22],unicodedecodeerror:44,uniform:[22,24,43,44],uniform_:[24,29,43,44],uniformli:[15,44,50],unind:38,uniniti:[43,44],union:49,uniqu:[14,19,20,43,44],unique_consecut:[43,44],unit:[22,23,44],unit_interv:15,unitari:44,unitriangular:[43,44],univari:15,univers:19,unix:[13,21],unlabel:45,unless:[1,2,4,13,14,22,25,28,43,44],unlik:[4,19,21,22,32,43],unmask:22,unnecessari:28,unnorm:[22,23],unnot:21,unoccupi:8,unord:22,unpack:[22,29,30,44],unpack_data:44,unpack_pivot:44,unpickl:[13,44],unpool:22,unpooled_output:22,unreduc:22,unrel:4,unresolv:35,unrol:[19,36],unseg:22,unsign:[41,43],unsort:22,unsorted_indic:22,unspecifi:[14,43,44],unsqueez:[22,29,36,42,43,44],unsqueeze_:43,unstabl:[15,23,44],unsuccess:14,unsupport:19,until:[4,8,14,21,23,25,28,30,42],untouch:13,untrack:19,untrain:36,unus:[8,22,28],unused_argument1:22,unused_argument2:22,unusu:4,upcal:19,upcom:14,updat:[1,5,17,19,22,32,35,36,37,42,43],upgrad:37,upon:[13,21],upper:[15,22,23,24,37,43,44,50],upper_bound:15,uppercas:14,ups:4,upsample_nearest1d:36,upsample_nearest2d:36,upsample_nearest3d:36,upsample_trilinear:23,upscal:22,upscale_factor:[22,23],upstream:35,url:[1,14,17,20],usa:42,usag:[1,2,4,8,13,15,25,30,36,42,43,44],use:[1,3,4,5,7,8,13,15,17,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,42,43,44,45,48],use_cuda:1,use_gpu:19,use_input_stat:23,use_mkldnn:27,use_openmp:27,use_tbb:27,used:[1,2,4,7,8,12,13,14,15,17,19,20,22,23,24,27,28,29,31,32,34,35,36,37,38,39,41,42,43,44,45,46,48,49,50,51],useful:[1,4,13,15,17,19,22,23,25,27,29,31,37,44,50],user:[1,3,8,13,14,15,17,21,22,28,31,32,36,42,43,44,50],userwarn:[26,36],uses:[1,2,8,13,14,17,19,22,23,27,28,29,30,31,35,37,44,45,46],using:[1,3,4,5,7,9,10,11,13,14,15,17,18,19,21,22,23,24,25,27,28,29,30,31,32,33,35,36,37,42,43,44,45,48,50],usp:46,usual:[1,4,7,13,19,22,27,30,31,36,42,43,44],uszkoreit:22,util:[4,8,18,26,27,28,29,30,31,36,37,45,46,48],v100:[12,22,48],v_1:22,v_2:22,val2017:48,val:[24,43,45],val_loss:37,valid:[1,14,15,19,22,36,37,44,45],valid_fil:45,validate_arg:15,valu:[1,3,4,5,7,13,14,15,17,20,21,22,23,24,25,27,28,29,30,33,36,37,39,42,43,44,45,48,49,50,51],valueerror:22,var1:37,var2:37,var_mean:44,vari:[22,37],variabl:[3,7,8,15,17,20,22,27,28,30,33,35,36,37,42,43,44,48],variabletyp:36,varianc:[15,22,24,33,37,44],variant:[31,37,44],variat:15,variou:[3,5,7,13,21,32,34,37],vase:48,vaswani:22,vc2017:35,vdim:22,vec1:[43,44],vec2:[43,44],vec:[22,43,44],vector:[1,15,22,23,42,43,44,45,50],vehicl:5,vein:19,veloc:37,verbos:[7,36,37,42],veri:[1,2,4,19,21,22,25,29,30,32,35,36,37,38,48],verifi:[7,19,20,29,36],verify_ninja_avail:7,versa:[22,40,43,44],version:[3,15,17,19,22,23,25,26,28,29,31,35,36,37,43,44,45,50],versu:[4,22],vert:[22,23],vertex:42,vertic:[42,50],vertical_flip:50,vertices_tensor:42,vflip:50,vframe:47,vgg11:48,vgg11_bn:48,vgg13:48,vgg13_bn:48,vgg16:48,vgg16_bn:48,vgg19:48,vgg19_bn:48,vgg:36,via:[1,4,7,8,13,14,15,19,21,22,24,30,32,36,37,41,44],vice:[22,40,43,44],vid_tensor:42,video:[22,42,45,46],video_arrai:47,video_classif:48,video_codec:47,video_fp:47,videoclip:45,viehmann:6,view:[1,4,5,13,19,21,22,23,26,36,41,43,44,45,50],view_a:43,violat:5,virtual:36,vishwak:6,vishwakftw:6,visibl:[8,14,22,48],vision:[5,17,46,48,49],visual:[1,22,35,42],vitali:6,vitalyfedyunin:6,voc2012:45,voc:[46,48],vocdetect:45,vocsegment:45,volumetr:[22,23],vs2017:35,vs2017_runtim:35,vw_i:22,vychisl:44,w_hf:22,w_hg:22,w_hi:22,w_hn:22,w_ho:22,w_hr:22,w_hz:22,w_if:22,w_ig:22,w_ii:22,w_in:22,w_io:22,w_ir:22,w_iz:22,w_j:22,w_n:22,w_y:27,w_z:27,wai:[1,3,4,5,7,13,14,15,19,21,22,23,29,30,32,33,34,35,36,37,39,43,44,45,48],wait:[1,8,14,21,22,27,37],wait_ev:8,wait_stream:[8,28],walltim:42,wang:6,want:[4,8,13,14,17,19,22,23,25,28,29,36,37,39,43,44],warm:37,warmup:1,warn:[26,36,38,44],wasn:44,wast:4,weaker:15,weight:[13,17,19,23,24,25,29,30,36,37,42,43,44,48],weight_decai:37,weight_g:22,weight_hh:22,weight_hh_l:22,weight_ih:22,weight_ih_l:22,weight_u:22,weight_v:22,weighted_kernel_sum:19,weightedrandomsampl:13,weird:[22,48],welcom:4,well:[1,4,7,13,14,19,22,23,25,32,33,36,39,42,44,47,48],were:[1,14,19,22,29,36,39,43,44],what:[1,3,4,5,15,17,19,22,25,29,36,37,39,45],whatev:[43,44],wheel:35,when:[1,2,4,5,7,8,9,10,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,34,35,36,37,38,39,42,43,44,45,48,50],whenev:[5,21,22,23],where:[1,2,4,5,7,13,14,15,19,20,21,22,23,24,25,26,28,33,36,37,39,41,42,43,44,45,47,48,49,51],wherev:5,whether:[1,4,5,7,13,14,15,17,20,22,23,28,29,36,37,39,40,42,43,44,45],which:[1,2,4,5,7,8,13,15,17,19,20,21,22,23,25,26,27,28,30,31,33,35,36,37,38,39,40,41,42,43,44,45,48,49,50,52],whilst:[15,28],white:50,whiten:50,who:4,whole:[13,14,22,32,47],whose:[13,15,22,25,36,42,44,45],why:[4,36],wide:27,wide_resnet101_2:48,wide_resnet50_2:48,width:[15,22,23,36,44,49,50],wikipedia:23,willing:5,win:44,win_length:[43,44],window:[13,18,22,23,43,44],window_length:44,wine:48,wip:4,wise:[14,15,22,23,27,44],wish:19,wit:19,with_cuda:[7,35],with_replac:44,within:[1,3,5,8,13,14,15,19,22,27,28,31,36,42,44],without:[1,3,4,5,8,13,15,17,19,21,22,23,26,28,32,36,38,42,43,44,48,50,52],won:[3,17,22,23,25,29,36,44],woodburi:15,word:[1,14,19,22,23,30],word_language_model:36,work:[1,3,4,5,7,8,14,15,17,19,21,22,23,25,27,28,29,32,35,39,43,44],worker:[4,13,14,22,45],worker_id:13,worker_info:13,worker_init_fn:[13,30],workflow:17,workground:35,workload:[13,14,31],workspac:[7,36],world:[14,22],world_siz:[14,22],worth:17,would:[1,3,5,13,14,15,19,22,26,27,28,36,39,43,44],wrap:[1,13,19,22,35,37,43],wrapper:[8,14,19,21,22,29],write:[4,5,19,23,25,28,30,36,37,39,42,43,44,47],write_video:47,writer:42,written:[1,19,22,36,37,40,42,44],wrong:[32,35,37],wrote:4,www:[22,37,42,45],x86:44,x86_x64:35,x_0:44,x_1:[22,23,44],x_2:[22,23,44],x_3:44,x_clone:21,x_cpu:28,x_cpu_long:28,x_gpu:28,x_i:[22,23,44],x_j:[22,23],x_n:22,x_t:22,xavier_normal_:24,xavier_uniform_:24,xcosx:42,xdg_cache_hom:[17,20],xing:44,xml:45,xsinx:42,xxx:45,xxy:45,xxz:45,y_cpu:28,y_cpu_long:28,y_gpu:28,y_hard:23,y_i:[22,44],y_n:22,y_soft:23,yang:[5,6],ycbcr:50,year:45,yes:4,yet:[8,44],yf225:6,yield:[13,22,44],yinghai:6,you:[1,2,3,4,5,7,8,11,13,14,15,17,19,21,22,23,25,26,28,29,30,31,32,33,35,36,37,38,39,41,42,43,44,45,48,50],your:[1,2,4,7,8,13,14,15,19,21,22,25,26,28,29,30,31,33,35,36,37,38,39,42,43,44,48,50],yourself:[32,35],z_t:22,zach:6,zdevito:6,zebra:48,zero:[1,8,15,19,21,22,23,24,28,35,36,39,42,43,44,50],zero_:[1,23,39,43],zero_grad:[22,30,32,37],zero_infin:[22,23],zero_point:43,zeros_:24,zeros_lik:[28,36,44],zhang:6,zhong:50,zip:[13,31],zipf:22,zou3519:6,zou:6},titles:["torch.__config__","Automatic differentiation package - torch.autograd","torch.utils.bottleneck","torch.utils.checkpoint","PyTorch Contribution Guide","PyTorch Governance","PyTorch Governance | Persons of Interest","torch.utils.cpp_extension","torch.cuda","<no title>","<no title>","<no title>","<no title>","torch.utils.data","Distributed communication package - torch.distributed","Probability distributions - torch.distributions","torch.utils.dlpack","torch.hub","PyTorch documentation","TorchScript","torch.utils.model_zoo","Multiprocessing package - torch.multiprocessing","torch.nn","torch.nn.functional","torch.nn.init","Autograd mechanics","Broadcasting semantics","CPU threading and TorchScript inference","CUDA semantics","Extending PyTorch","Frequently Asked Questions","Features for large-scale deployments","Multiprocessing best practices","Reproducibility","Serialization semantics","Windows FAQ","torch.onnx","torch.optim","torch.random","torch.sparse","torch.Storage","Tensor Attributes","torch.utils.tensorboard","torch.Tensor","torch","torchvision.datasets","torchvision","torchvision.io","torchvision.models","torchvision.ops","torchvision.transforms","torchvision.utils","Type Info"],titleterms:{"case":[4,19],"default":[13,19],"function":[1,5,14,15,19,22,23,36,39,50],"import":[17,35],"new":4,"return":30,Adding:[4,29,36],One:35,Ops:44,The:4,Use:[19,28],__config__:0,about:4,access:19,activ:[22,23],adaptive_avg_pool1d:23,adaptive_avg_pool2d:23,adaptive_avg_pool3d:23,adaptive_max_pool1d:23,adaptive_max_pool2d:23,adaptive_max_pool3d:23,adaptiveavgpool1d:22,adaptiveavgpool2d:22,adaptiveavgpool3d:22,adaptivelogsoftmaxwithloss:22,adaptivemaxpool1d:22,adaptivemaxpool2d:22,adaptivemaxpool3d:22,adjust:37,affine_grid:23,agnost:28,alexnet:[36,48],algorithm:37,alpha_dropout:23,alphadropout:22,amd:6,anomali:1,api:[27,31],approach:34,arithmet:19,ask:[19,30,36],asynchron:[14,28,32],aten:36,attach:31,attribut:[19,41],autograd:[1,6,25,29],automat:[1,13,19],avg_pool1d:23,avg_pool2d:23,avg_pool3d:23,avgpool1d:22,avgpool2d:22,avgpool3d:22,avoid:[4,32],backend:14,backward:[25,26],basic:14,batch:13,batch_norm:23,batchnorm1d:22,batchnorm2d:22,batchnorm3d:22,bceloss:22,bcewithlogitsloss:22,behavior:13,bernoulli:15,best:[28,32,34],beta:15,bilinear:[22,23],binary_cross_entropi:23,binary_cross_entropy_with_logit:23,binomi:15,bla:44,bottleneck:2,broadcast:26,broken:35,buffer:[28,32],bug:4,build:[4,6,27,31,35],builtin:19,c10:6,cach:[17,28],call:19,caption:45,cast:19,categor:15,cauchi:15,celu:[22,23],cffi:35,chang:5,channel:35,characterist:48,check:[1,19,25],checkpoint:3,chi2:15,choos:14,cifar:45,cityscap:45,classif:48,claus:35,clip_grad_norm_:22,clip_grad_value_:22,closur:37,cnn:48,coco:45,code:[4,19,28],codebas:4,collate_fn:13,collect:[8,14],come:14,common:[4,14,31],commun:[8,14],comparison:[19,44],compat:26,compon:35,comput:[1,44],consider:31,constant:19,constantpad1d:22,constantpad2d:22,constantpad3d:22,constraint:15,construct:[19,37],contain:22,contribut:4,controversi:5,conv1d:[22,23],conv2d:[22,23],conv3d:[22,23],conv_transpose1d:23,conv_transpose2d:23,conv_transpose3d:23,convers:50,convolut:[22,23,48],convtranspose1d:22,convtranspose2d:22,convtranspose3d:22,core:5,correct:[1,25],cosine_embedding_loss:23,cosine_similar:23,cosineembeddingloss:22,cosinesimilar:22,cpp:35,cpp_extens:7,cpu:[6,27],creat:19,creation:44,cross_entropi:23,crossentropyloss:22,ctc_loss:23,ctcloss:22,cuda:[6,8,21,28,30,32,35],cudnn:33,cufft:28,custom:[29,36],data:[13,30],data_parallel:23,dataload:6,dataparallel:[22,23,28],dataset:[13,45],datasetfold:45,deadlock:32,debug:19,decis:5,deeplabv3:48,defin:19,densenet:48,deploy:31,deprec:1,deriv:15,descriptor:21,detect:[1,45,48],develop:[4,5],devic:[28,41],dict:19,differenti:1,dirichlet:15,disabl:[1,13,19,44],discuss:4,distanc:[22,23],distribut:[6,14,15,22,23],distributeddataparallel:22,diverg:15,dlpack:16,doc:4,document:[4,18],doesn:30,down:35,download:17,driver:35,dropout2d:[22,23],dropout3d:[22,23],dropout:[22,23],dtype:41,edg:19,elu:[22,23],embed:[22,23],embedding_bag:23,embeddingbag:22,emnist:45,encod:25,end:36,engin:6,entrypoint:17,environ:[14,31],error:[30,35],event:8,exampl:36,exclud:25,execut:28,exponenti:15,exponentialfamili:15,express:19,extend:29,extens:[8,29,31,35],fakedata:45,faq:[5,35],fashion:45,faster:48,featur:[4,31],fight:32,file:[14,21],file_descriptor:21,file_system:21,finfo:52,fishersnedecor:15,fix:4,flatten:22,fleet:31,flickr:45,fold:[22,23],found:35,fractionalmaxpool2d:22,freed:30,frequent:[19,30,36],from:[17,25,35,36],fulli:48,gamma:15,gelu:23,gener:[6,8,26,38,44,50],geometr:15,get:4,glu:23,googlenet:48,govern:[5,6],gpu:[14,22,23,30],gradient:[1,44],graph:19,grid_sampl:23,group:14,groupnorm:22,gru:22,grucel:22,guid:[4,5],gumbel:15,gumbel_softmax:23,halfcauchi:15,halfnorm:15,hardshrink:[22,23],hardtanh:[22,23],hinge_embedding_loss:23,hingeembeddingloss:22,hip:6,histori:25,hmdb51:45,hogwild:32,how:[17,25,37],hub:17,ident:[22,30],iinfo:52,imag:50,imagefold:45,imagenet:45,implement:[4,17],improv:4,incept:48,includ:35,independ:15,index:44,indic:18,infer:27,info:52,init:24,initi:14,inspect:19,instal:35,instanc:48,instance_norm:23,instancenorm1d:22,instancenorm2d:22,instancenorm3d:22,instead:28,interest:6,interfac:14,interpol:23,interpret:19,ipc:35,isn:30,issu:4,iter:13,jit:[6,19],join:44,kei:[5,35],keypoint:48,kinet:45,kl_div:23,kldivloss:22,kmnist:45,known:17,l1_loss:23,l1loss:22,languag:19,lapack:44,laplac:15,larg:31,launch:14,layer:22,layer_norm:23,layernorm:22,layout:41,leaky_relu:23,leakyrelu:22,learn:37,level:6,limit:[17,36],linear:[22,23],list:19,liter:19,load:[13,17],loader:30,local:[1,44],local_response_norm:23,localresponsenorm:22,log:31,log_softmax:23,logic:[17,19],logitrelaxedbernoulli:15,lognorm:15,logsigmoid:[22,23],logsoftmax:22,lookup:19,loss:[22,23],lowrankmultivariatenorm:15,lp_pool1d:23,lp_pool2d:23,lppool1d:22,lppool2d:22,lstm:22,lstmcell:22,lsun:45,maintain:[5,6],make:[4,5],manag:[8,21,28],map:13,margin_ranking_loss:23,marginrankingloss:22,mask:48,math:44,max_pool1d:23,max_pool2d:23,max_pool3d:23,max_unpool1d:23,max_unpool2d:23,max_unpool3d:23,maxpool1d:22,maxpool2d:22,maxpool3d:22,maxunpool1d:22,maxunpool2d:22,maxunpool3d:22,mechan:25,memori:[8,13,28,30],metadata:31,method:19,mistak:4,mix:[19,48],mkldnn:6,mnasnet:48,mnist:45,mobilenet:48,model:[17,30,31,34,48],model_zoo:20,moder:5,modul:[6,19,22,29],moduledict:22,modulelist:22,more:4,mse_loss:23,mseloss:22,multi:[13,14,22,23],multi_margin_loss:23,multiheadattent:22,multilabel_margin_loss:23,multilabel_soft_margin_loss:23,multilabelmarginloss:22,multilabelsoftmarginloss:22,multimarginloss:22,multinomi:15,multiprocess:[6,21,28,32,35],multivariatenorm:15,mutat:44,nccl:14,negativebinomi:15,network:[14,30,48],nll_loss:23,nllloss:22,non:[13,22,23,36],nonlinear:22,normal:[15,22,23],notic:17,number:[8,30,38],numer:1,numpi:33,nvidia:8,nvtx:8,object:48,one_hot:23,onehotcategor:15,onlin:4,onnx:[6,36],open:4,oper:[1,14,19,25,31,35,36,44],ops:49,optim:37,option:[19,27,35,37],order:13,other:[14,22,44],out:30,overview:4,pack_padded_sequ:22,pack_sequ:22,packag:[1,14,21,35],packedsequ:22,pad:[22,23],pad_packed_sequ:22,pad_sequ:22,pairwise_dist:23,pairwisedist:22,parallel:[30,44],paramet:[19,22,37],parameterdict:22,parameterlist:22,parameters_to_vector:22,pareto:15,particip:4,pass:32,pathwis:15,pdist:23,peopl:5,per:37,perform:6,person:[6,48],philosophi:5,phototour:45,pil:50,pin:[13,28],pipe:35,pixel_shuffl:23,pixelshuffl:22,place:[1,25,26,44],plan:28,platform:13,point:[14,31],pointwis:44,poisson:15,poisson_nll_loss:23,poissonnllloss:22,pool:[22,23],ppc:6,practic:[28,32,34],prelu:[22,23],probabl:15,process:[4,5,13],profil:[1,31],project:5,promot:4,properli:30,propos:4,protect:35,publish:17,pull:4,python:[4,19,35],pytorch:[4,5,6,14,18,29,33,36],qmnist:45,quasi:44,question:[19,30,36],queue:32,random:[8,13,30,38,44],rate:37,readabl:4,recommend:34,recurr:[22,30],reduct:44,refer:19,refin:19,reflectionpad1d:22,reflectionpad2d:22,registri:15,relaxedbernoulli:15,relaxedonehotcategor:15,relu6:[22,23],relu:[22,23],remove_spectral_norm:22,remove_weight_norm:22,replicationpad1d:22,replicationpad2d:22,replicationpad3d:22,report:[4,30],reproduc:33,request:4,requires_grad:25,resnet:48,resnext:48,resolut:19,reus:32,review:4,rng:6,rnn:22,rnncell:22,robust:4,rocm:6,rrelu:[22,23],run:17,runtim:[27,30,48],sampl:44,sampler:13,save:[17,31,34],sbd:45,sbu:45,scale:31,score:15,script:[19,35,36],segment:48,selu:[22,23],semant:[26,28,34,48],sequenti:22,serial:[34,44],share:[14,21],shufflenet:48,shut:35,sigmoid:[22,23],simd:6,singl:13,slice:44,smooth_l1_loss:23,smoothl1loss:22,soft_margin_loss:23,softmarginloss:22,softmax2d:22,softmax:[22,23],softmin:[22,23],softplu:[22,23],softshrink:[22,23],softsign:[22,23],sourc:[4,35],spars:[22,23,39],spawn:[14,21],specif:13,spectral:44,spectral_norm:22,speed:35,squeezenet:48,start:4,statement:19,step:37,stl10:45,storag:40,strategi:21,stream:[8,28],studentt:15,style:13,subgraph:25,submit:4,subprocess:21,subscript:19,sum:22,support:36,svhn:45,syncbatchnorm:22,synchron:14,system:[14,21],tabl:18,take:37,tanh:[22,23],tanhshrink:[22,23],tcp:14,tenet:5,tensor:[1,21,41,43,44,50],tensorboard:42,ternari:19,test:4,thread:27,threshold:[22,23],through:32,tip:32,tool:8,torch:[0,1,2,3,6,7,8,13,14,15,16,17,20,21,22,23,24,29,36,37,38,39,40,41,42,43,44,50,52],torchscript:[19,27,31],torchvis:[45,46,47,48,49,50,51],trace:[19,36],tracer:19,train:32,transform:[15,22,50],transformeddistribut:15,transformerdecod:22,transformerdecoderlay:22,transformerencod:22,transformerencoderlay:22,triag:4,triplet_margin_loss:23,tripletmarginloss:22,tupl:19,tutori:4,type:[13,19,52],ucf101:45,uncontroversi:5,unfold:[22,23],uniform:15,upsampl:[22,23],upsample_bilinear:23,upsample_nearest:23,upsamplingbilinear2d:22,upsamplingnearest2d:22,usag:[31,35],use:[14,37],user:19,usp:45,util:[2,3,7,13,14,16,20,22,42,44,51],valu:19,variabl:[1,14,19],vector_to_paramet:22,vgg:48,video:[47,48],vision:[22,23],voc:45,warn:19,weibul:15,weight:22,weight_norm:22,where:17,which:14,why:35,wide:[31,48],win:35,window:[6,35],without:35,work:[13,30],worker:30,write:29,xla:6,zeropad2d:22}}) \ No newline at end of file diff --git a/docs/stable/sparse.html b/docs/stable/sparse.html index d2d694af9eb1..82eac0e1c6a0 100644 --- a/docs/stable/sparse.html +++ b/docs/stable/sparse.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -531,8 +532,14 @@

    FunctionsTensor) – a dense matrix to be added

  • mat1 (SparseTensor) – a sparse matrix to be multiplied

  • mat2 (Tensor) – a dense matrix be multiplied

  • -
  • beta (Number, optional) – multiplier for mat (\(\beta\))

  • -
  • alpha (Number, optional) – multiplier for \(mat1 @ mat2\) (\(\alpha\))

  • +
  • beta (Number, optional) – multiplier for mat (β\beta + +)

  • +
  • alpha (Number, optional) – multiplier for mat1@mat2mat1 @ mat2 + + (α\alpha + +)

  • @@ -543,8 +550,14 @@

    Functionstorch.sparse.mm(mat1, mat2)[source]

    Performs a matrix multiplication of the sparse matrix mat1 and dense matrix mat2. Similar to torch.mm(), If mat1 is a -\((n \times m)\) tensor, mat2 is a \((m \times p)\) tensor, out will be a -\((n \times p)\) dense tensor. mat1 need to have sparse_dim = 2. +(n×m)(n \times m) + + tensor, mat2 is a (m×p)(m \times p) + + tensor, out will be a +(n×p)(n \times p) + + dense tensor. mat1 need to have sparse_dim = 2. This function also supports backward for both matrices. Note that the gradients of mat1 is a coalesced sparse tensor.

    @@ -711,9 +724,6 @@

    Functions - - - diff --git a/docs/stable/storage.html b/docs/stable/storage.html index 6e6d3de52776..9486b1ad5784 100644 --- a/docs/stable/storage.html +++ b/docs/stable/storage.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -564,9 +565,6 @@

    torch.Storage - - - diff --git a/docs/stable/tensor_attributes.html b/docs/stable/tensor_attributes.html index b12f7afd7b7d..a566119c4a67 100644 --- a/docs/stable/tensor_attributes.html +++ b/docs/stable/tensor_attributes.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -497,9 +498,6 @@ - - - diff --git a/docs/stable/tensorboard.html b/docs/stable/tensorboard.html index fdf7a6d63eff..a28e306d17a3 100644 --- a/docs/stable/tensorboard.html +++ b/docs/stable/tensorboard.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -348,8 +349,14 @@

    torch.utils.tensorboard
  • comment (string) – Comment log_dir suffix appended to the default log_dir. If log_dir is assigned, this argument has no effect.

  • -
  • purge_step (int) – When logging crashes at step \(T+X\) and restarts at step \(T\), -any events whose global_step larger or equal to \(T\) will be +

  • purge_step (int) – When logging crashes at step T+XT+X + + and restarts at step TT + +, +any events whose global_step larger or equal to TT + + will be purged and hidden from TensorBoard. Note that crashed and resumed experiments should have the same log_dir.

  • max_queue (int) – Size of the queue for pending events and @@ -490,9 +497,17 @@

    torch.utils.tensorboard

  • -
    Shape:

    img_tensor: Default is \((3, H, W)\). You can use torchvision.utils.make_grid() to +

    Shape:

    img_tensor: Default is (3,H,W)(3, H, W) + +. You can use torchvision.utils.make_grid() to convert a batch of tensor into 3xHxW format or call add_images and let us do the job. -Tensor with \((1, H, W)\), \((H, W)\), \((H, W, 3)\) is also suitible as long as +Tensor with (1,H,W)(1, H, W) + +, (H,W)(H, W) + +, (H,W,3)(H, W, 3) + + is also suitible as long as corresponding dataformats argument is passed. e.g. CHW, HWC, HW.

    @@ -538,7 +553,9 @@

    torch.utils.tensorboard
    -
    Shape:

    img_tensor: Default is \((N, 3, H, W)\). If dataformats is specified, other shape will be +

    Shape:

    img_tensor: Default is (N,3,H,W)(N, 3, H, W) + +. If dataformats is specified, other shape will be accepted. e.g. NCHW or NHWC.

    @@ -597,7 +614,9 @@

    torch.utils.tensorboard
    -
    Shape:

    vid_tensor: \((N, T, C, H, W)\). The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    +
    Shape:

    vid_tensor: (N,T,C,H,W)(N, T, C, H, W) + +. The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    @@ -619,7 +638,9 @@

    torch.utils.tensorboard
    -
    Shape:

    snd_tensor: \((1, L)\). The values should lie between [-1, 1].

    +
    Shape:

    snd_tensor: (1,L)(1, L) + +. The values should lie between [-1, 1].

    @@ -678,8 +699,12 @@

    torch.utils.tensorboard
    -
    Shape:

    mat: \((N, D)\), where N is number of data and D is feature dimension

    -

    label_img: \((N, C, H, W)\)

    +
    Shape:

    mat: (N,D)(N, D) + +, where N is number of data and D is feature dimension

    +

    label_img: (N,C,H,W)(N, C, H, W) + +

    Examples:

    @@ -786,9 +811,15 @@

    torch.utils.tensorboard
    -
    Shape:

    vertices: \((B, N, 3)\). (batch, number_of_vertices, channels)

    -

    colors: \((B, N, 3)\). The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    -

    faces: \((B, N, 3)\). The values should lie in [0, number_of_vertices] for type uint8.

    +
    Shape:

    vertices: (B,N,3)(B, N, 3) + +. (batch, number_of_vertices, channels)

    +

    colors: (B,N,3)(B, N, 3) + +. The values should lie in [0, 255] for type uint8 or [0, 1] for type float.

    +

    faces: (B,N,3)(B, N, 3) + +. The values should lie in [0, number_of_vertices] for type uint8.

    Examples:

    @@ -900,9 +931,6 @@

    torch.utils.tensorboard - - - diff --git a/docs/stable/tensors.html b/docs/stable/tensors.html index b6943078004b..5e33523bc0bb 100644 --- a/docs/stable/tensors.html +++ b/docs/stable/tensors.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -299,7 +300,7 @@

    8-bit integer (unsigned)

    torch.uint8

    -

    torch.ByteTensor

    +

    torch.ByteTensor

    torch.cuda.ByteTensor

    8-bit integer (signed)

    @@ -324,7 +325,7 @@

    Boolean

    torch.bool

    -

    torch.BoolTensor

    +

    torch.BoolTensor

    torch.cuda.BoolTensor

    @@ -866,8 +867,12 @@
    bernoulli(*, generator=None) → Tensor
    -

    Returns a result tensor where each \(\texttt{result[i]}\) is independently -sampled from \(\text{Bernoulli}(\texttt{self[i]})\). self must have +

    Returns a result tensor where each result[i]\texttt{result[i]} + + is independently +sampled from Bernoulli(self[i])\text{Bernoulli}(\texttt{self[i]}) + +. self must have floating point dtype, and the result will have the same dtype.

    See torch.bernoulli()

    @@ -879,7 +884,9 @@
    bernoulli_(p=0.5, *, generator=None) → Tensor

    Fills each location of self with an independent sample from -\(\text{Bernoulli}(\texttt{p})\). self can have integral +Bernoulli(p)\text{Bernoulli}(\texttt{p}) + +. self can have integral dtype.

    @@ -888,8 +895,12 @@ bernoulli_(p_tensor, *, generator=None) → Tensor

    p_tensor should be a tensor containing probabilities to be used for drawing the binary random number.

    -

    The \(\text{i}^{th}\) element of self tensor will be set to a -value sampled from \(\text{Bernoulli}(\texttt{p\_tensor[i]})\).

    +

    The ith\text{i}^{th} + + element of self tensor will be set to a +value sampled from Bernoulli(p_tensor[i])\text{Bernoulli}(\texttt{p\_tensor[i]}) + +.

    self can have integral dtype, but p_tensor must have floating point dtype.

    @@ -944,8 +955,9 @@ cauchy_(median=0, sigma=1, *, generator=None) → Tensor

    Fills the tensor with numbers drawn from the Cauchy distribution:

    -\[f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}\]
    -
    +f(x)=1πσ(xmedian)2+σ2f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2} + +

    @@ -1437,8 +1449,9 @@ exponential_(lambd=1, *, generator=None) → Tensor

    Fills self tensor with elements drawn from the exponential distribution:

    -\[f(x) = \lambda e^{-\lambda x}\]
    -
    +f(x)=λeλxf(x) = \lambda e^{-\lambda x} + +
    @@ -1535,8 +1548,9 @@ geometric_(p, *, generator=None) → Tensor

    Fills self tensor with elements drawn from the geometric distribution:

    -\[f(X=k) = p^{k - 1} (1 - p)\]
    -
    +f(X=k)=pk1(1p)f(X=k) = p^{k - 1} (1 - p) + +
    @@ -1972,13 +1986,24 @@
    log_normal_(mean=1, std=2, *, generator=None)

    Fills self tensor with numbers samples from the log-normal distribution -parameterized by the given mean \(\mu\) and standard deviation -\(\sigma\). Note that mean and std are the mean and +parameterized by the given mean μ\mu + + and standard deviation +σ\sigma + +. Note that mean and std are the mean and standard deviation of the underlying normal distribution, and not of the returned distribution:

    -\[f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}\]
    -
    +f(x)=1xσ2π e(lnxμ)22σ2f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}} + +
    @@ -2038,14 +2063,14 @@
    masked_scatter_(mask, source)

    Copies elements from source into self tensor at positions where -the mask is one. +the mask is True. The shape of mask must be broadcastable with the shape of the underlying tensor. The source should have at least as many elements as the number of ones in mask

    Parameters
    @@ -2067,13 +2092,13 @@
    masked_fill_(mask, value)

    Fills elements of self tensor with value where mask is -one. The shape of mask must be +True. The shape of mask must be broadcastable with the shape of the underlying tensor.

    Parameters
    @@ -3424,10 +3449,11 @@

    Fills self tensor with numbers sampled from the continuous uniform distribution:

    -\[P(x) = \dfrac{1}{\text{to} - \text{from}} +P(x)=1tofromP(x) = \dfrac{1}{\text{to} - \text{from}} -\]
    -
    + + +
    @@ -3484,11 +3510,16 @@ of elements, but may have a different size. For a tensor to be viewed, the new view size must be compatible with its original size and stride, i.e., each new view dimension must either be a subspace of an original dimension, or only span -across original dimensions \(d, d+1, \dots, d+k\) that satisfy the following -contiguity-like condition that \(\forall i = 0, \dots, k-1\),

    +across original dimensions d,d+1,,d+kd, d+1, \dots, d+k + + that satisfy the following +contiguity-like condition that i=0,,k1\forall i = 0, \dots, k-1 + +,

    -\[\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]\]
    -

    Otherwise, contiguous() needs to be called before the tensor can be +stride[i]=stride[i+1]×size[i+1]\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] + +

    Otherwise, contiguous() needs to be called before the tensor can be viewed. See also: reshape(), which returns a view if the shapes are compatible, and copies (equivalent to calling contiguous()) otherwise.

    @@ -3552,24 +3583,24 @@
    -
    -class torch.ByteTensor
    -

    The following methods are unique to torch.ByteTensor.

    +
    +class torch.BoolTensor
    +

    The following methods are unique to torch.BoolTensor.

    -
    -all()
    +
    +all()
    all() → bool
    -

    Returns True if all elements in the tensor are non-zero, False otherwise.

    +

    Returns True if all elements in the tensor are True, False otherwise.

    Example:

    -
    >>> a = torch.randn(1, 3).byte() % 2
    +
    >>> a = torch.rand(1, 2).bool()
     >>> a
    -tensor([[1, 0, 0]], dtype=torch.uint8)
    +tensor([[False, True]], dtype=torch.bool)
     >>> a.all()
    -tensor(0, dtype=torch.uint8)
    +tensor(False, dtype=torch.bool)
     
    @@ -3578,7 +3609,7 @@

    Returns True if all elements in each row of the tensor in the given -dimension dim are non-zero, False otherwise.

    +dimension dim are True, False otherwise.

    If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting @@ -3593,33 +3624,35 @@

    Example:

    -
    >>> a = torch.randn(4, 2).byte() % 2
    +
    >>> a = torch.rand(4, 2).bool()
     >>> a
    -tensor([[0, 0],
    -        [0, 0],
    -        [0, 1],
    -        [1, 1]], dtype=torch.uint8)
    +tensor([[True, True],
    +        [True, False],
    +        [True, True],
    +        [True, True]], dtype=torch.bool)
     >>> a.all(dim=1)
    -tensor([0, 0, 0, 1], dtype=torch.uint8)
    +tensor([ True, False,  True,  True], dtype=torch.bool)
    +>>> a.all(dim=0)
    +tensor([ True, False], dtype=torch.bool)
     
    -
    -any()
    +
    +any()
    any() → bool
    -

    Returns True if any elements in the tensor are non-zero, False otherwise.

    +

    Returns True if any elements in the tensor are True, False otherwise.

    Example:

    -
    >>> a = torch.randn(1, 3).byte() % 2
    +
    >>> a = torch.rand(1, 2).bool()
     >>> a
    -tensor([[0, 0, 1]], dtype=torch.uint8)
    +tensor([[False, True]], dtype=torch.bool)
     >>> a.any()
    -tensor(1, dtype=torch.uint8)
    +tensor(True, dtype=torch.bool)
     
    @@ -3628,7 +3661,7 @@

    Returns True if any elements in each row of the tensor in the given -dimension dim are non-zero, False otherwise.

    +dimension dim are True, False otherwise.

    If keepdim is True, the output tensor is of the same size as input except in the dimension dim where it is of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting @@ -3643,14 +3676,16 @@

    Example:

    -
    >>> a = torch.randn(4, 2).byte() % 2
    +
    >>> a = torch.randn(4, 2) < 0
     >>> a
    -tensor([[1, 0],
    -        [0, 0],
    -        [0, 1],
    -        [0, 0]], dtype=torch.uint8)
    ->>> a.any(dim=1)
    -tensor([1, 0, 1, 0], dtype=torch.uint8)
    +tensor([[ True,  True],
    +        [False,  True],
    +        [ True,  True],
    +        [False, False]])
    +>>> a.any(1)
    +tensor([ True,  True,  True, False])
    +>>> a.any(0)
    +tensor([True, True])
     
    @@ -3722,9 +3757,6 @@ - - - diff --git a/docs/stable/torch.html b/docs/stable/torch.html index f6524f064611..6d513e728a8c 100644 --- a/docs/stable/torch.html +++ b/docs/stable/torch.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -844,17 +845,20 @@

    Tensors
    torch.arange(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
    -

    Returns a 1-D tensor of size \(\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil\) +

    Returns a 1-D tensor of size endstartstep\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil + + with values from the interval [start, end) taken with common difference step beginning from start.

    Note that non-integer step is subject to floating point rounding errors when comparing against end; to avoid inconsistency, we advise adding a small epsilon to end in such cases.

    -\[\text{out}_{{i+1}} = \text{out}_{i} + \text{step} +outi+1=outi+step\text{out}_{{i+1}} = \text{out}_{i} + \text{step} -\]
    -
    + + +

    Parameters
    @@ -1438,7 +1457,11 @@

    Indexing, Slicing, Joining, Mutating Opsinput. The result is sorted lexicographically, with the last index changing the fastest (C-style).

    If input has n dimensions, then the resulting indices tensor -out is of size \((z \times n)\), where \(z\) is the total number of +out is of size (z×n)(z \times n) + +, where zz + + is the total number of non-zero elements in the input tensor.

    When as_tuple is true:

    Returns a tuple of 1-D tensors, one for each dimension in input, @@ -1552,12 +1575,20 @@

    Indexing, Slicing, Joining, Mutating Opstorch.squeeze(input, dim=None, out=None) → Tensor

    Returns a tensor with all the dimensions of input of size 1 removed.

    For example, if input is of shape: -\((A \times 1 \times B \times C \times 1 \times D)\) then the out tensor -will be of shape: \((A \times B \times C \times D)\).

    +(A×1×B×C×1×D)(A \times 1 \times B \times C \times 1 \times D) + + then the out tensor +will be of shape: (A×B×C×D)(A \times B \times C \times D) + +.

    When dim is given, a squeeze operation is done only in the given -dimension. If input is of shape: \((A \times 1 \times B)\), +dimension. If input is of shape: (A×1×B)(A \times 1 \times B) + +, squeeze(input, 0) leaves the tensor unchanged, but squeeze(input, 1) -will squeeze the tensor to the shape \((A \times B)\).

    +will squeeze the tensor to the shape (A×B)(A \times B) + +.

    Note

    The returned tensor shares the storage with the input tensor, @@ -1759,22 +1790,23 @@

    Indexing, Slicing, Joining, Mutating Opsinput or other, depending on condition.

    The operation is defined as:

    -\[\text{out}_i = \begin{cases} - \text{input}_i & \text{if } \text{condition}_i \\ - \text{other}_i & \text{otherwise} \\ +outi={inputiif conditioniotheriotherwise\text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ \end{cases} -\]
    -
    + + +
    Parameters
      -
    • condition (ByteTensor) – When True (nonzero), yield input, otherwise yield other

    • -
    • input (Tensor) – values selected at indices where condition is True

    • -
    • other (Tensor) – values selected at indices where condition is False

    • +
    • condition (BoolTensor) – When True (nonzero), yield x, otherwise yield y

    • +
    • x (Tensor) – values selected at indices where condition is True

    • +
    • y (Tensor) – values selected at indices where condition is False

    Returns
    @@ -1924,7 +1956,7 @@

    Indexing, Slicing, Joining, Mutating Ops
    Parameters
    -

    new_state (torch.ByteTensor) – The desired state.

    +

    new_state (torch.ByteTensor) – The desired state.

    Example:

    @@ -1978,7 +2010,7 @@

    Indexing, Slicing, Joining, Mutating Ops
    Parameters
    -

    new_state (torch.ByteTensor) – The desired state

    +

    new_state (torch.ByteTensor) – The desired state

    @@ -1995,15 +2027,24 @@

    Indexing, Slicing, Joining, Mutating Opsinput tensor should be a tensor containing probabilities to be used for drawing the binary random number. Hence, all values in input have to be in the range: -\(0 \leq \text{input}_i \leq 1\).

    -

    The \(\text{i}^{th}\) element of the output tensor will draw a -value \(1\) according to the \(\text{i}^{th}\) probability value given +0inputi10 \leq \text{input}_i \leq 1 + +.

    +

    The ith\text{i}^{th} + + element of the output tensor will draw a +value 11 + + according to the ith\text{i}^{th} + + probability value given in input.

    -\[\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) +outiBernoulli(p=inputi)\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + + -\]
    -

    The returned out tensor only has values 0 or 1 and is of the same +

    The returned out tensor only has values 0 or 1 and is of the same shape as input.

    out can have integral dtype, but input must have floating point dtype.

    @@ -2056,7 +2097,9 @@

    Indexing, Slicing, Joining, Mutating Opsinput is a vector, out is a vector of size num_samples.

    If input is a matrix with m rows, out is an matrix of shape -\((m \times \text{num\_samples})\).

    +(m×num_samples)(m \times \text{num\_samples}) + +.

    If replacement is True, samples are drawn with replacement.

    If not, they are drawn without replacement, which means that when a sample index is drawn for a row, it cannot be drawn again for that row.

    @@ -2195,7 +2238,9 @@

    Indexing, Slicing, Joining, Mutating Ops torch.rand(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor

    Returns a tensor filled with random numbers from a uniform distribution -on the interval \([0, 1)\)

    +on the interval [0,1)[0, 1) + +

    The shape of the tensor is defined by the variable argument size.

    Parameters
    @@ -2230,7 +2275,9 @@

    Indexing, Slicing, Joining, Mutating Ops torch.rand_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor

    Returns a tensor with the same size as input that is filled with -random numbers from a uniform distribution on the interval \([0, 1)\). +random numbers from a uniform distribution on the interval [0,1)[0, 1) + +. torch.rand_like(input) is equivalent to torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device).

    @@ -2325,10 +2372,11 @@

    Indexing, Slicing, Joining, Mutating Ops -\[\text{out}_{i} \sim \mathcal{N}(0, 1) +outiN(0,1)\text{out}_{i} \sim \mathcal{N}(0, 1) -\]

    -

    The shape of the tensor is defined by the variable argument size.

    + + +

    The shape of the tensor is defined by the variable argument size.

    Parameters
    Parameters
    • input (Tensor) – the input tensor

    • @@ -2723,10 +2774,11 @@

      Pointwise Opstorch.acos(input, out=None) → Tensor

      Returns a new tensor with the arccosine of the elements of input.

      -\[\text{out}_{i} = \cos^{-1}(\text{input}_{i}) +outi=cos1(inputi)\text{out}_{i} = \cos^{-1}(\text{input}_{i}) -\]
      -
      + + +

    Parameters

    If input is of type FloatTensor or DoubleTensor, other must be a real number, otherwise it should be an integer.

    Parameters
    @@ -2790,10 +2843,11 @@

    Pointwise Opsinput and other must be broadcastable.

    -\[\text{out} = \text{input} + \text{alpha} \times \text{other} +out=input+alpha×other\text{out} = \text{input} + \text{alpha} \times \text{other} -\]
    -

    If other is of type FloatTensor or DoubleTensor, alpha must be + + +

    If other is of type FloatTensor or DoubleTensor, alpha must be a real number, otherwise it should be an integer.

    Parameters
    @@ -2832,10 +2886,11 @@

    Pointwise Opstensor1 by tensor2, multiply the result by the scalar value and add it to input.

    -\[\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} +outi=inputi+value×tensor1itensor2i\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + -\]
    -

    The shapes of input, tensor1, and tensor2 must be +

    The shapes of input, tensor1, and tensor2 must be broadcastable.

    For inputs of type FloatTensor or DoubleTensor, value must be a real number, otherwise an integer.

    @@ -2843,7 +2898,9 @@

    Pointwise OpsParameters

    The shapes of input, tensor1, and tensor2 must be broadcastable.

    For inputs of type FloatTensor or DoubleTensor, value must be a real number, otherwise an integer.

    @@ -2880,7 +2938,9 @@

    Pointwise OpsParameters
    • input (Tensor) – the tensor to be added

    • -
    • value (Number, optional) – multiplier for \(tensor1 .* tensor2\)

    • +
    • value (Number, optional) – multiplier for tensor1.tensor2tensor1 .* tensor2 + +

    • tensor1 (Tensor) – the tensor to be multiplied

    • tensor2 (Tensor) – the tensor to be multiplied

    • out (Tensor, optional) – the output tensor

    • @@ -2904,10 +2964,11 @@

      Pointwise Opstorch.asin(input, out=None) → Tensor

      Returns a new tensor with the arcsine of the elements of input.

      -\[\text{out}_{i} = \sin^{-1}(\text{input}_{i}) +outi=sin1(inputi)\text{out}_{i} = \sin^{-1}(\text{input}_{i}) -\]
      -
      + + +

    Parameters
    • input (Tensor) – the input tensor

    • @@ -2930,10 +2991,11 @@

      Pointwise Opstorch.atan(input, out=None) → Tensor

      Returns a new tensor with the arctangent of the elements of input.

      -\[\text{out}_{i} = \tan^{-1}(\text{input}_{i}) +outi=tan1(inputi)\text{out}_{i} = \tan^{-1}(\text{input}_{i}) -\]
      -
      + + +

    Parameters
    Parameters
    • input (Tensor) – the input tensor

    • @@ -3010,14 +3073,15 @@

      Pointwise Opsinput into the range [ min, max ] and return a resulting tensor:

      -\[y_i = \begin{cases} - \text{min} & \text{if } x_i < \text{min} \\ - x_i & \text{if } \text{min} \leq x_i \leq \text{max} \\ - \text{max} & \text{if } x_i > \text{max} +yi={minif xi<minxiif minximaxmaxif xi>maxy_i = \begin{cases} + \text{min} & \text{if } x_i < \text{min} \\ + x_i & \text{if } \text{min} \leq x_i \leq \text{max} \\ + \text{max} & \text{if } x_i > \text{max} \end{cases} -\]
      -

      If input is of type FloatTensor or DoubleTensor, args min + + +

    If input is of type FloatTensor or DoubleTensor, args min and max must be real numbers, otherwise they should be integers.

    Parameters
    @@ -3094,10 +3158,11 @@

    Pointwise Opstorch.cos(input, out=None) → Tensor

    Returns a new tensor with the cosine of the elements of input.

    -\[\text{out}_{i} = \cos(\text{input}_{i}) +outi=cos(inputi)\text{out}_{i} = \cos(\text{input}_{i}) -\]
    -
    + + +

    Parameters
    Parameters

    If input is of type FloatTensor or DoubleTensor, other should be a real number, otherwise it should be an integer

    Parameters
    @@ -3185,10 +3252,11 @@

    Pointwise Opsinput and other must be broadcastable.

    -\[\text{out}_i = \frac{\text{input}_i}{\text{other}_i} +outi=inputiotheri\text{out}_i = \frac{\text{input}_i}{\text{other}_i} -\]
    -
    + + +

    Parameters
    • input (Tensor) – the numerator tensor

    • @@ -3221,10 +3289,11 @@

      Pointwise Opstorch.digamma(input, out=None) → Tensor

      Computes the logarithmic derivative of the gamma function on input.

      -\[\psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} +ψ(x)=ddxln(Γ(x))=Γ(x)Γ(x)\psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} -\]
      -
      + + +

    Parameters

    input (Tensor) – the tensor to compute the digamma function on

    @@ -3242,10 +3311,17 @@

    Pointwise Opstorch.erf(input, out=None) → Tensor

    Computes the error function of each element. The error function is defined as follows:

    -\[\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +erf(x)=2π0xet2dt\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt -\]
    -
    + + +

    Parameters
    Parameters
    • tensor (Tensor) – the input tensor

    • @@ -3288,12 +3371,15 @@

      Pointwise Ops torch.erfinv(input, out=None) → Tensor

      Computes the inverse error function of each element of input. -The inverse error function is defined in the range \((-1, 1)\) as:

      +The inverse error function is defined in the range (1,1)(-1, 1) + + as:

      -\[\mathrm{erfinv}(\mathrm{erf}(x)) = x +erfinv(erf(x))=x\mathrm{erfinv}(\mathrm{erf}(x)) = x -\]
      -
      + + +

    Parameters
    Parameters
    Parameters
    Parameters
    • input (Tensor) – the input tensor

    • @@ -3414,10 +3503,11 @@

      Pointwise Opstorch.frac(input, out=None) → Tensor

      Computes the fractional portion of each element in input.

      -\[\text{out}_{i} = \text{input}_{i} - \left\lfloor \text{input}_{i} \right\rfloor +outi=inputiinputisgn(inputi)\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) -\]
      -

      Example:

      + + +

    Example:

    The shapes of start and end must be broadcastable. If weight is a tensor, then the shapes of weight, start, and end must be broadcastable.

    @@ -3467,10 +3558,11 @@

    Pointwise Opsinput.

    -\[y_{i} = \log_{e} (x_{i}) +yi=loge(xi)y_{i} = \log_{e} (x_{i}) -\]
    -
    + + +

    Parameters
    Parameters

    If input is of type FloatTensor or DoubleTensor, other should be a real number, otherwise it should be an integer

    Parameters
    @@ -3620,10 +3716,11 @@

    Pointwise Opsinput and other must be broadcastable.

    -\[\text{out}_i = \text{input}_i \times \text{other}_i +outi=inputi×otheri\text{out}_i = \text{input}_i \times \text{other}_i -\]
    -
    + + +

    Parameters
    • input (Tensor) – the first multiplicand tensor

    • @@ -3654,13 +3751,22 @@

      Pointwise Ops
      torch.mvlgamma(input, p) → Tensor
      -

      Computes the multivariate log-gamma function ([reference]) with dimension \(p\) element-wise, given by

      +

      Computes the multivariate log-gamma function ([reference]) with dimension pp + + element-wise, given by

      -\[\log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) +log(Γp(a))=C+i=1plog(Γ(ai12))\log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) + + + +

      where C=log(π)×p(p1)4C = \log(\pi) \times \frac{p (p - 1)}{4} + + and Γ()\Gamma(\cdot) + + is the Gamma function.

      +

      If any of the elements are less than or equal to p12\frac{p - 1}{2} -\]

    -

    where \(C = \log(\pi) \times \frac{p (p - 1)}{4}\) and \(\Gamma(\cdot)\) is the Gamma function.

    -

    If any of the elements are less than or equal to \(\frac{p - 1}{2}\), then an error +, then an error is thrown.

    Parameters
    @@ -3687,10 +3793,11 @@

    Pointwise Opstorch.neg(input, out=None) → Tensor

    Returns a new tensor with the negative of the elements of input.

    -\[\text{out} = -1 \times \text{input} +out=1×input\text{out} = -1 \times \text{input} -\]
    -
    + + +

    Parameters

    When exponent is a tensor, the operation applied is:

    -\[\text{out}_i = x_i ^ {\text{exponent}_i} +outi=xiexponenti\text{out}_i = x_i ^ {\text{exponent}_i} -\]
    -

    When exponent is a tensor, the shapes of input + + +

    When exponent is a tensor, the shapes of input and exponent must be broadcastable.

    Parameters
    @@ -3767,10 +3876,11 @@

    Pointwise Opsout is of the same shape as exponent

    The operation applied is:

    -\[\text{out}_i = \text{self} ^ {\text{exponent}_i} +outi=selfexponenti\text{out}_i = \text{self} ^ {\text{exponent}_i} -\]
    -
    + + +

    Parameters
    • self (float) – the scalar base value for the power operation

    • @@ -3793,10 +3903,11 @@

      Pointwise Opstorch.reciprocal(input, out=None) → Tensor

      Returns a new tensor with the reciprocal of the elements of input

      -\[\text{out}_{i} = \frac{1}{\text{input}_{i}} +outi=1inputi\text{out}_{i} = \frac{1}{\text{input}_{i}} -\]
      -
      + + +

    Parameters
    Parameters
    • input (Tensor) – the input tensor

    • @@ -3901,10 +4019,11 @@

      Pointwise Opstorch.sigmoid(input, out=None) → Tensor

      Returns a new tensor with the sigmoid of the elements of input.

      -\[\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} +outi=11+einputi\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} -\]
      -
      + + +

    Parameters
    • input (Tensor) – the input tensor

    • @@ -3927,10 +4046,11 @@

      Pointwise Opstorch.sign(input, out=None) → Tensor

      Returns a new tensor with the signs of the elements of input.

      -\[\text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) +outi=sgn(inputi)\text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) -\]
      -
      + + +

    Parameters
    • input (Tensor) – the input tensor

    • @@ -3953,10 +4073,11 @@

      Pointwise Opstorch.sin(input, out=None) → Tensor

      Returns a new tensor with the sine of the elements of input.

      -\[\text{out}_{i} = \sin(\text{input}_{i}) +outi=sin(inputi)\text{out}_{i} = \sin(\text{input}_{i}) -\]
      -
      + + +

    Parameters
    Parameters
    • input (Tensor) – the input tensor

    • @@ -4006,10 +4128,17 @@

      Pointwise Opstorch.sqrt(input, out=None) → Tensor

      Returns a new tensor with the square-root of the elements of input.

      -\[\text{out}_{i} = \sqrt{\text{input}_{i}} +outi=inputi\text{out}_{i} = \sqrt{\text{input}_{i}} -\]
      -
      + + +

    Parameters
    • input (Tensor) – the input tensor

    • @@ -4032,10 +4161,11 @@

      Pointwise Opstorch.tan(input, out=None) → Tensor

      Returns a new tensor with the tangent of the elements of input.

      -\[\text{out}_{i} = \tan(\text{input}_{i}) +outi=tan(inputi)\text{out}_{i} = \tan(\text{input}_{i}) -\]
      -
      + + +

    Parameters
    Parameters
    -

    Returns the indices of all elements in the input tensor.

    +

    Returns the indices of the maximum value of all elements in the input tensor.

    This is the second value returned by torch.max(). See its documentation for the exact semantics of this method.

    @@ -4232,10 +4363,11 @@

    Reduction Opsinput is a vector of size N, the result will also be a vector of size N, with elements.

    -\[y_i = x_1 \times x_2\times x_3\times \dots \times x_i +yi=x1×x2×x3××xiy_i = x_1 \times x_2\times x_3\times \dots \times x_i -\]
    -
    + + +

    Parameters
    Parameters

    If keepdim is True, the output tensor is of the same size as input except in the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed (see torch.squeeze()), resulting in the @@ -5141,10 +5279,11 @@

    Comparison Opstorch.allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) → bool

    This function checks if all input and other satisfy the condition:

    -\[\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +inputotheratol+rtol×other\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert -\]
    -

    elementwise, for all elements of input and other. The behaviour of this function is analogous to + + +

    elementwise, for all elements of input and other. The behaviour of this function is analogous to numpy.allclose

    Parameters
    @@ -5215,11 +5354,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor. Must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor. Must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +

    A torch.BoolTensor containing a True at each location where comparison is true

    Return type

    Tensor

    @@ -5227,8 +5366,7 @@

    Comparison Ops
    >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 1,  0],
    -        [ 0,  1]], dtype=torch.uint8)
    +tensor([[True, False], [False, True]])
     

    @@ -5247,7 +5385,9 @@

    Comparison Ops
    torch.ge(input, other, out=None) → Tensor
    -

    Computes \(\text{input} \geq \text{other}\) element-wise.

    +

    Computes inputother\text{input} \geq \text{other} + + element-wise.

    The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

    @@ -5255,11 +5395,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor that must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +

    A torch.BoolTensor containing a True at each location where comparison is true

    Return type

    Tensor

    @@ -5267,8 +5407,7 @@

    Comparison Ops
    >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 1,  1],
    -        [ 0,  1]], dtype=torch.uint8)
    +tensor([[True, True], [False, True]])
     

    @@ -5276,7 +5415,9 @@

    Comparison Ops
    torch.gt(input, other, out=None) → Tensor
    -

    Computes \(\text{input} > \text{other}\) element-wise.

    +

    Computes input>other\text{input} > \text{other} + + element-wise.

    The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

    @@ -5284,11 +5425,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor that must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +

    A torch.BoolTensor containing a True at each location where comparison is true

    Return type

    Tensor

    @@ -5296,8 +5437,7 @@

    Comparison Ops
    >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 0,  1],
    -        [ 0,  0]], dtype=torch.uint8)
    +tensor([[False, True], [False, False]])
     

    @@ -5355,7 +5495,7 @@

    Comparison Ops

    input (Tensor) – A tensor to check

    Returns
    -

    A torch.ByteTensor containing a 1 at each location of NaN elements.

    +

    A torch.BoolTensor containing a True at each location of NaN elements.

    Return type

    Tensor

    @@ -5363,7 +5503,7 @@

    Comparison Ops
    >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
    -tensor([ 0,  1,  0], dtype=torch.uint8)
    +tensor([False, True, False])
     

    @@ -5412,7 +5552,9 @@

    Comparison Ops
    torch.le(input, other, out=None) → Tensor
    -

    Computes \(\text{input} \leq \text{other}\) element-wise.

    +

    Computes inputother\text{input} \leq \text{other} + + element-wise.

    The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

    @@ -5420,11 +5562,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor that must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +

    A torch.BoolTensor containing a True at each location where comparison is true

    Return type

    Tensor

    @@ -5432,8 +5574,7 @@

    Comparison Ops
    >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 1,  0],
    -        [ 1,  1]], dtype=torch.uint8)
    +tensor([[True, False], [True, True]])
     

    @@ -5441,7 +5582,9 @@

    Comparison Ops
    torch.lt(input, other, out=None) → Tensor
    -

    Computes \(\text{input} < \text{other}\) element-wise.

    +

    Computes input<other\text{input} < \text{other} + + element-wise.

    The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

    @@ -5449,11 +5592,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor that must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true

    +

    A torch.BoolTensor containing a True at each location where comparison is true

    Return type

    Tensor

    @@ -5461,8 +5604,7 @@

    Comparison Ops
    >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 0,  0],
    -        [ 1,  0]], dtype=torch.uint8)
    +tensor([[False, False], [True, False]])
     

    @@ -5533,10 +5675,11 @@

    Comparison Opsinput and other don’t need to match, but they must be broadcastable.

    -\[\text{out}_i = \max(\text{tensor}_i, \text{other}_i) +outi=max(tensori,otheri)\text{out}_i = \max(\text{tensor}_i, \text{other}_i) -\]
    -
    + + +

    Note

    When the shapes do not match, the shape of the returned output tensor follows the broadcasting rules.

    @@ -5630,10 +5773,11 @@

    Comparison Opsinput and other don’t need to match, but they must be broadcastable.

    -\[\text{out}_i = \min(\text{tensor}_i, \text{other}_i) +outi=min(tensori,otheri)\text{out}_i = \min(\text{tensor}_i, \text{other}_i) -\]
    -
    + + +

    Note

    When the shapes do not match, the shape of the returned output tensor follows the broadcasting rules.

    @@ -5663,7 +5807,9 @@

    Comparison Ops
    torch.ne(input, other, out=None) → Tensor
    -

    Computes \(input \neq other\) element-wise.

    +

    Computes inputotherinput \neq other + + element-wise.

    The second argument can be a number or a tensor whose shape is broadcastable with the first argument.

    @@ -5671,11 +5817,11 @@

    Comparison Ops
    • input (Tensor) – the tensor to compare

    • other (Tensor or float) – the tensor or value to compare

    • -
    • out (Tensor, optional) – the output tensor that must be a ByteTensor

    • +
    • out (Tensor, optional) – the output tensor that must be a BoolTensor

    Returns
    -

    A torch.ByteTensor containing a 1 at each location where comparison is true.

    +

    A torch.BoolTensor containing a True at each location where comparison is true.

    Return type

    Tensor

    @@ -5683,8 +5829,7 @@

    Comparison Ops
    >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
    -tensor([[ 0,  1],
    -        [ 1,  0]], dtype=torch.uint8)
    +tensor([[False, True], [True, False]])
     

    @@ -5782,20 +5927,35 @@

    Spectral Ops -\[X[\omega_1, \dots, \omega_d] = +X[ω1,,ωd]=n1=0N11nd=0Nd1x[n1,,nd]ej 2πi=0dωiniNi,X[\omega_1, \dots, \omega_d] = \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, -\]

    -

    where \(d\) = signal_ndim is number of dimensions for the -signal, and \(N_i\) is the size of signal dimension \(i\).

    + + +

    where dd + + = signal_ndim is number of dimensions for the +signal, and NiN_i + + is the size of signal dimension ii + +.

    This method supports 1D, 2D and 3D complex-to-complex transforms, indicated by signal_ndim. input must be a tensor with last dimension of size 2, representing the real and imaginary components of complex numbers, and should have at least signal_ndim + 1 dimensions with optionally arbitrary number of leading batch dimensions. If normalized is set to True, this normalizes the result by dividing it with -\(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is unitary.

    +i=1KNi\sqrt{\prod_{i=1}^K N_i} + + so that the operator is unitary.

    Returns the real and the imaginary parts together as one tensor of the same shape of input.

    The inverse of this function is ifft().

    @@ -5882,16 +6042,31 @@

    Spectral Ops -\[X[\omega_1, \dots, \omega_d] = +X[ω1,,ωd]=1i=1dNin1=0N11nd=0Nd1x[n1,,nd]e j 2πi=0dωiniNi,X[\omega_1, \dots, \omega_d] = \frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, -\]

    -

    where \(d\) = signal_ndim is number of dimensions for the -signal, and \(N_i\) is the size of signal dimension \(i\).

    + + +

    where dd + + = signal_ndim is number of dimensions for the +signal, and NiN_i + + is the size of signal dimension ii + +.

    The argument specifications are almost identical with fft(). However, if normalized is set to True, this instead returns the -results multiplied by \(\sqrt{\prod_{i=1}^d N_i}\), to become a unitary +results multiplied by i=1dNi\sqrt{\prod_{i=1}^d N_i} + +, to become a unitary operator. Therefore, to invert a fft(), the normalized argument should be set identically for fft().

    Returns the real and the imaginary parts together as one tensor of the same @@ -5969,20 +6144,43 @@

    Spectral Opssignal_ndim. input must be a tensor with at least signal_ndim dimensions with optionally arbitrary number of leading batch dimensions. If normalized is set to True, this normalizes the result -by dividing it with \(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is -unitary, where \(N_i\) is the size of signal dimension \(i\).

    +by dividing it with i=1KNi\sqrt{\prod_{i=1}^K N_i} + + so that the operator is +unitary, where NiN_i + + is the size of signal dimension ii + +.

    The real-to-complex Fourier transform results follow conjugate symmetry:

    -\[X[\omega_1, \dots, \omega_d] = X^*[N_1 - \omega_1, \dots, N_d - \omega_d], +X[ω1,,ωd]=X[N1ω1,,Ndωd],X[\omega_1, \dots, \omega_d] = X^*[N_1 - \omega_1, \dots, N_d - \omega_d], + + + +

    where the index arithmetic is computed modulus the size of the corresponding +dimension,  \ ^* -\]

    -

    where the index arithmetic is computed modulus the size of the corresponding -dimension, \(\ ^*\) is the conjugate operator, and -\(d\) = signal_ndim. onesided flag controls whether to avoid + is the conjugate operator, and +dd + + = signal_ndim. onesided flag controls whether to avoid redundancy in the output results. If set to True (default), the output will -not be full complex result of shape \((*, 2)\), where \(*\) is the shape +not be full complex result of shape (,2)(*, 2) + +, where * + + is the shape of input, but instead the last dimension will be halfed as of size -\(\lfloor \frac{N_d}{2} \rfloor + 1\).

    +Nd2+1\lfloor \frac{N_d}{2} \rfloor + 1 + +.

    The inverse of this function is irfft().

    Note

    @@ -6035,8 +6233,20 @@

    Spectral Opsifft(). Similar to ifft(), if normalized is set to True, this normalizes the result by multiplying it with -\(\sqrt{\prod_{i=1}^K N_i}\) so that the operator is unitary, where -\(N_i\) is the size of signal dimension \(i\).

    +i=1KNi\sqrt{\prod_{i=1}^K N_i} + + so that the operator is unitary, where +NiN_i + + is the size of signal dimension ii + +.

    -

    where \(m\) is the index of the sliding window, and \(\omega\) is -the frequency that \(0 \leq \omega < \text{n\_fft}\). When + + +

    where mm + + is the index of the sliding window, and ω\omega + + is +the frequency that 0ω<n_fft0 \leq \omega < \text{n\_fft} + +. When onesided is the default value True,

    • input must be either a 1-D time sequence or a 2-D batch of time @@ -6149,27 +6366,55 @@

      Spectral Opsn_fft.

    • window can be a 1-D tensor of size win_length, e.g., from torch.hann_window(). If window is None (default), it is -treated as if having \(1\) everywhere in the window. If -\(\text{win\_length} < \text{n\_fft}\), window will be padded on +treated as if having 11 + + everywhere in the window. If +win_length<n_fft\text{win\_length} < \text{n\_fft} + +, window will be padded on both sides to length n_fft before being applied.

    • If center is True (default), input will be padded on -both sides so that the \(t\)-th frame is centered at time -\(t \times \text{hop\_length}\). Otherwise, the \(t\)-th frame -begins at time \(t \times \text{hop\_length}\).

    • +both sides so that the tt + +-th frame is centered at time +t×hop_lengtht \times \text{hop\_length} + +. Otherwise, the tt + +-th frame +begins at time t×hop_lengtht \times \text{hop\_length} + +.

    • pad_mode determines the padding method used on input when center is True. See torch.nn.functional.pad() for all available options. Default is "reflect".

    • -
    • If onesided is True (default), only values for \(\omega\) -in \(\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]\) +

    • If onesided is True (default), only values for ω\omega + + +in [0,1,2,,n_fft2+1]\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right] + + are returned because the real-to-complex Fourier transform satisfies the -conjugate symmetry, i.e., \(X[m, \omega] = X[m, \text{n\_fft} - \omega]^*\).

    • +conjugate symmetry, i.e., X[m,ω]=X[m,n_fftω]X[m, \omega] = X[m, \text{n\_fft} - \omega]^* + +.

    • If normalized is True (default is False), the function -returns the normalized STFT results, i.e., multiplied by \((\text{frame\_length})^{-0.5}\).

    • +returns the normalized STFT results, i.e., multiplied by (frame_length)0.5(\text{frame\_length})^{-0.5} + +.

    Returns the real and the imaginary parts together as one tensor of size -\((* \times N \times T \times 2)\), where \(*\) is the optional -batch size of input, \(N\) is the number of frequencies where -STFT is applied, \(T\) is the total number of frames used, and each pair +(×N×T×2)(* \times N \times T \times 2) + +, where * + + is the optional +batch size of input, NN + + is the number of frequencies where +STFT is applied, TT + + is the total number of frames used, and each pair in the last dimension represents a complex number as the real part and the imaginary part.

    @@ -6187,9 +6432,15 @@

    Spectral Opsint, optional) – the size of window frame and STFT filter. Default: None (treated as equal to n_fft)

  • window (Tensor, optional) – the optional window function. -Default: None (treated as window of all \(1\) s)

  • +Default: None (treated as window of all 11 + + s)

  • center (bool, optional) – whether to pad input on both sides so -that the \(t\)-th frame is centered at time \(t \times \text{hop\_length}\). +that the tt + +-th frame is centered at time t×hop_lengtht \times \text{hop\_length} + +. Default: True

  • pad_mode (string, optional) – controls the padding method used when center is True. Default: "reflect"

  • @@ -6213,24 +6464,33 @@

    Spectral Opstorch.bartlett_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor

    Bartlett window function.

    -\[w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} - \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ - 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ +w[n]=12nN11={2nN1if 0nN1222nN1if N12<n<N,w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ \end{cases}, -\]
    -

    where \(N\) is the full window size.

    + + +

    where NN + + is the full window size.

    The input window_length is a positive integer controlling the returned window size. periodic flag determines whether the returned window trims off the last duplicate value from the symmetric window and is ready to be used as a periodic window with functions like -torch.stft(). Therefore, if periodic is true, the \(N\) in -above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.stft(). Therefore, if periodic is true, the NN + + in +above formula is in fact window_length+1\text{window\_length} + 1 + +. Also, we always have torch.bartlett_window(L, periodic=True) equal to torch.bartlett_window(L + 1, periodic=False)[:-1]).

    Note

    -

    If window_length \(=1\), the returned window contains a single value 1.

    +

    If window_length =1=1 + +, the returned window contains a single value 1.

    Parameters
    @@ -6251,7 +6511,9 @@

    Spectral OpsReturns -

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +

    A 1-D tensor of size (window_length,)(\text{window\_length},) + + containing the window

    Return type

    Tensor

    @@ -6264,21 +6526,30 @@

    Spectral Opstorch.blackman_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor

    Blackman window function.

    -\[w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) +w[n]=0.420.5cos(2πnN1)+0.08cos(4πnN1)w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + + -\]
    -

    where \(N\) is the full window size.

    +

    where NN + + is the full window size.

    The input window_length is a positive integer controlling the returned window size. periodic flag determines whether the returned window trims off the last duplicate value from the symmetric window and is ready to be used as a periodic window with functions like -torch.stft(). Therefore, if periodic is true, the \(N\) in -above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.stft(). Therefore, if periodic is true, the NN + + in +above formula is in fact window_length+1\text{window\_length} + 1 + +. Also, we always have torch.blackman_window(L, periodic=True) equal to torch.blackman_window(L + 1, periodic=False)[:-1]).

    Note

    -

    If window_length \(=1\), the returned window contains a single value 1.

    +

    If window_length =1=1 + +, the returned window contains a single value 1.

    Parameters
    @@ -6299,7 +6570,9 @@

    Spectral OpsReturns -

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +

    A 1-D tensor of size (window_length,)(\text{window\_length},) + + containing the window

    Return type

    Tensor

    @@ -6312,21 +6585,30 @@

    Spectral Opstorch.hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor

    Hamming window function.

    -\[w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), +w[n]=αβ cos(2πnN1),w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + -\]
    -

    where \(N\) is the full window size.

    +

    where NN + + is the full window size.

    The input window_length is a positive integer controlling the returned window size. periodic flag determines whether the returned window trims off the last duplicate value from the symmetric window and is ready to be used as a periodic window with functions like -torch.stft(). Therefore, if periodic is true, the \(N\) in -above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.stft(). Therefore, if periodic is true, the NN + + in +above formula is in fact window_length+1\text{window\_length} + 1 + +. Also, we always have torch.hamming_window(L, periodic=True) equal to torch.hamming_window(L + 1, periodic=False)[:-1]).

    Note

    -

    If window_length \(=1\), the returned window contains a single value 1.

    +

    If window_length =1=1 + +, the returned window contains a single value 1.

    Note

    @@ -6338,8 +6620,12 @@

    Spectral Opsint) – the size of returned window

  • periodic (bool, optional) – If True, returns a window to be used as periodic function. If False, return a symmetric window.

  • -
  • alpha (float, optional) – The coefficient \(\alpha\) in the equation above

  • -
  • beta (float, optional) – The coefficient \(\beta\) in the equation above

  • +
  • alpha (float, optional) – The coefficient α\alpha + + in the equation above

  • +
  • beta (float, optional) – The coefficient β\beta + + in the equation above

  • dtype (torch.dtype, optional) – the desired data type of returned tensor. Default: if None, uses a global default (see torch.set_default_tensor_type()). Only floating point types are supported.

  • layout (torch.layout, optional) – the desired layout of returned window tensor. Only @@ -6353,7 +6639,9 @@

    Spectral OpsReturns -

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +

    A 1-D tensor of size (window_length,)(\text{window\_length},) + + containing the window

    Return type

    Tensor

    @@ -6366,22 +6654,31 @@

    Spectral Opstorch.hann_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor

    Hann window function.

    -\[w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = +w[n]=12 [1cos(2πnN1)]=sin2(πnN1),w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = \sin^2 \left( \frac{\pi n}{N - 1} \right), -\]
    -

    where \(N\) is the full window size.

    + + +

  • where NN + + is the full window size.

    The input window_length is a positive integer controlling the returned window size. periodic flag determines whether the returned window trims off the last duplicate value from the symmetric window and is ready to be used as a periodic window with functions like -torch.stft(). Therefore, if periodic is true, the \(N\) in -above formula is in fact \(\text{window\_length} + 1\). Also, we always have +torch.stft(). Therefore, if periodic is true, the NN + + in +above formula is in fact window_length+1\text{window\_length} + 1 + +. Also, we always have torch.hann_window(L, periodic=True) equal to torch.hann_window(L + 1, periodic=False)[:-1]).

    Note

    -

    If window_length \(=1\), the returned window contains a single value 1.

    +

    If window_length =1=1 + +, the returned window contains a single value 1.

    Parameters
    @@ -6402,7 +6699,9 @@

    Spectral OpsReturns -

    A 1-D tensor of size \((\text{window\_length},)\) containing the window

    +

    A 1-D tensor of size (window_length,)(\text{window\_length},) + + containing the window

    Return type

    Tensor

    @@ -6534,7 +6833,9 @@

    Other Operations
    torch.combinations(input, r=2, with_replacement=False) → seq
    -

    Compute combinations of length \(r\) of the given tensor. The behavior is similar to +

    Compute combinations of length rr + + of the given tensor. The behavior is similar to python’s itertools.combinations when with_replacement is set to False, and itertools.combinations_with_replacement when with_replacement is set to True.

    @@ -6701,7 +7002,9 @@

    Other Operationsoffset other than \(0\), the order of dim1 +Note that for offset other than 00 + +, the order of dim1 and dim2 matters. Exchanging them is equivalent to changing the sign of offset.

    Applying torch.diagonal() to the output of this function with @@ -7062,17 +7365,35 @@

    Other Operations
    torch.meshgrid(*tensors, **kwargs)[source]
    -

    Take \(N\) tensors, each of which can be either scalar or 1-dimensional -vector, and create \(N\) N-dimensional grids, where the \(i\) th grid is defined by -expanding the \(i\) th input over dimensions defined by other inputs.

    +

    Take NN + + tensors, each of which can be either scalar or 1-dimensional +vector, and create NN + + N-dimensional grids, where the ii + + th grid is defined by +expanding the ii + + th input over dimensions defined by other inputs.

    Args:

    tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be -treated as tensors of size \((1,)\) automatically

    +treated as tensors of size (1,)(1,) + + automatically

    -
    Returns:

    seq (sequence of Tensors): If the input has \(k\) tensors of size -\((N_1,), (N_2,), \ldots , (N_k,)\), then the output would also have \(k\) tensors, -where all tensors are of size \((N_1, N_2, \ldots , N_k)\).

    +
    Returns:

    seq (sequence of Tensors): If the input has kk + + tensors of size +(N1,),(N2,),,(Nk,)(N_1,), (N_2,), \ldots , (N_k,) + +, then the output would also have kk + + tensors, +where all tensors are of size (N1,N2,,Nk)(N_1, N_2, \ldots , N_k) + +.

    Example:

    @@ -7253,16 +7574,29 @@

    Other Operationsdims = \(d\), and the number of -dimensions of a and b is \(m\) and \(n\), respectively, +

    When called with an integer argument dims = dd + +, and the number of +dimensions of a and b is mm + + and nn + +, respectively, it computes

    -\[r_{i_0,...,i_{m-d}, i_d,...,i_n} +ri0,...,imd,id,...,in=k0,...,kd1ai0,...,imd,k0,...,kd1×bk0,...,kd1,id,...,in.r_{i_0,...,i_{m-d}, i_d,...,i_n} = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. -\]
    -

    When called with dims of the list form, the given dimensions will be contracted -in place of the last \(d\) of a and the first \(d\) of \(b\). The sizes + + +

    When called with dims of the list form, the given dimensions will be contracted +in place of the last dd + + of a and the first dd + + of bb + +. The sizes in these dimensions must match, but tensordot will deal with broadcasted dimensions.

    Examples:

    @@ -7313,8 +7647,14 @@

    Other Operations\(\lbrace (i, i) \rbrace\) for \(i \in [0, \min\{d_{1}, d_{2}\} - 1]\) where -\(d_{1}, d_{2}\) are the dimensions of the matrix.

    +{(i,i)}\lbrace (i, i) \rbrace + + for i[0,min{d1,d2}1]i \in [0, \min\{d_{1}, d_{2}\} - 1] + + where +d1,d2d_{1}, d_{2} + + are the dimensions of the matrix.

    Parameters

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha must be real numbers, otherwise they should be integers.

    Parameters
      -
    • beta (Number, optional) – multiplier for input (\(\beta\))

    • +
    • beta (Number, optional) – multiplier for input (β\beta + +)

    • input (Tensor) – matrix to be added

    • -
    • alpha (Number, optional) – multiplier for batch1 @ batch2 (\(\alpha\))

    • +
    • alpha (Number, optional) – multiplier for batch1 @ batch2 (α\alpha + +)

    • batch1 (Tensor) – the first batch of matrices to be multiplied

    • batch2 (Tensor) – the second batch of matrices to be multiplied

    • out (Tensor, optional) – the output tensor

    • @@ -7585,24 +7960,39 @@

      BLAS and LAPACK Operationstorch.addmm(beta=1, input, alpha=1, mat1, mat2, out=None) → Tensor

      Performs a matrix multiplication of the matrices mat1 and mat2. The matrix input is added to the final result.

      -

      If mat1 is a \((n \times m)\) tensor, mat2 is a -\((m \times p)\) tensor, then input must be -broadcastable with a \((n \times p)\) tensor -and out will be a \((n \times p)\) tensor.

      +

      If mat1 is a (n×m)(n \times m) + + tensor, mat2 is a +(m×p)(m \times p) + + tensor, then input must be +broadcastable with a (n×p)(n \times p) + + tensor +and out will be a (n×p)(n \times p) + + tensor.

      alpha and beta are scaling factors on matrix-vector product between mat1 and mat2 and the added matrix input respectively.

      -\[\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) +out=β input+α (mat1i@mat2i)\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) -\]
      -

      For inputs of type FloatTensor or DoubleTensor, arguments beta and + + +

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha must be real numbers, otherwise they should be integers.

    Parameters

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha must be real numbers, otherwise they should be integers

    Parameters

    If vec1 is a vector of size n and vec2 is a vector of size m, then input must be broadcastable with a matrix of size -\((n \times m)\) and out will be a matrix of size -\((n \times m)\).

    +(n×m)(n \times m) + + and out will be a matrix of size +(n×m)(n \times m) + +.

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha must be real numbers, otherwise they should be integers

    Parameters

    For inputs of type FloatTensor or DoubleTensor, arguments beta and alpha must be real numbers, otherwise they should be integers.

    Parameters

    If upper is True, and AA + + is a batch of symmetric positive-definite matrices, then the returned tensor will be composed of upper-triangular Cholesky factors of each of the individual matrices. Similarly, when upper is False, the returned tensor will be composed of lower-triangular Cholesky factors of each of the individual @@ -7857,7 +8307,11 @@

    BLAS and LAPACK Operations
    Parameters
      -
    • input (Tensor) – the input tensor \(A\) of size \((*, n, n)\) where * is zero or more +

    • input (Tensor) – the input tensor AA + + of size (,n,n)(*, n, n) + + where * is zero or more batch dimensions consisting of symmetric positive-definite matrices.

    • upper (bool, optional) – flag that indicates whether to return a upper or lower triangular matrix. Default: False

    • @@ -7894,25 +8348,37 @@

      BLAS and LAPACK Operations
      torch.cholesky_inverse(input, upper=False, out=None) → Tensor
      -

      Computes the inverse of a symmetric positive-definite matrix \(A\) using its -Cholesky factor \(u\): returns matrix inv. The inverse is computed using +

      Computes the inverse of a symmetric positive-definite matrix AA + + using its +Cholesky factor uu + +: returns matrix inv. The inverse is computed using LAPACK routines dpotri and spotri (and the corresponding MAGMA routines).

      -

      If upper is False, \(u\) is lower triangular +

      If upper is False, uu + + is lower triangular such that the returned tensor is

      -\[inv = (uu^{T})^{-1} +inv=(uuT)1inv = (uu^{T})^{-1} + + + +

      If upper is True or not provided, uu -\]

    -

    If upper is True or not provided, \(u\) is upper + is upper triangular such that the returned tensor is

    -\[inv = (u^T u)^{-1} +inv=(uTu)1inv = (u^T u)^{-1} -\]
    -
    + + +
    Parameters
      -
    • input (Tensor) – the input 2-D tensor \(u\), a upper or lower triangular +

    • input (Tensor) – the input 2-D tensor uu + +, a upper or lower triangular Cholesky factor

    • upper (bool, optional) – whether to return a lower (default) or upper triangular matrix

    • out (Tensor, optional) – the output tensor for inv

    • @@ -7943,20 +8409,28 @@

      BLAS and LAPACK Operations torch.cholesky_solve(input, input2, upper=False, out=None) → Tensor

      Solves a linear system of equations with a positive semidefinite -matrix to be inverted given its Cholesky factor matrix \(u\).

      -

      If upper is False, \(u\) is and lower triangular and c is +matrix to be inverted given its Cholesky factor matrix uu + +.

      +

      If upper is False, uu + + is and lower triangular and c is returned such that:

      -\[c = (u u^T)^{-1} b +c=(uuT)1bc = (u u^T)^{-1} b + + -\]
      -

      If upper is True or not provided, \(u\) is upper triangular +

    If upper is True or not provided, uu + + is upper triangular and c is returned such that:

    -\[c = (u^T u)^{-1} b +c=(uTu)1bc = (u^T u)^{-1} b + + -\]
    -

    torch.cholesky_solve(b, u) can take in 2D inputs b, u or inputs that are +

    torch.cholesky_solve(b, u) can take in 2D inputs b, u or inputs that are batches of 2D matrices. If the inputs are batches, then returns batched outputs c

    @@ -7967,10 +8441,22 @@

    BLAS and LAPACK Operations
    Parameters
    @@ -1578,9 +1706,6 @@

    Keypoint R-CNN - - - diff --git a/docs/stable/torchvision/ops.html b/docs/stable/torchvision/ops.html index cb10167974d1..e9de8434468b 100644 --- a/docs/stable/torchvision/ops.html +++ b/docs/stable/torchvision/ops.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -432,9 +433,6 @@

    torchvision.ops - - - diff --git a/docs/stable/torchvision/transforms.html b/docs/stable/torchvision/transforms.html index e7f6a24bba1d..8825351e9c4d 100644 --- a/docs/stable/torchvision/transforms.html +++ b/docs/stable/torchvision/transforms.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -943,15 +944,18 @@

    Functional TransformsAlso known as Power Law Transform. Intensities in RGB mode are adjusted based on the following equation:

    -\[I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} +Iout=255×gain×(Iin255)γI_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} -\]
    -

    See Gamma Correction for more details.

    + + +

    See Gamma Correction for more details.

    Parameters
    @@ -1486,9 +1492,6 @@

    Functional Transforms - - - diff --git a/docs/stable/torchvision/utils.html b/docs/stable/torchvision/utils.html index fda029d0a6ff..156bab2b29ab 100644 --- a/docs/stable/torchvision/utils.html +++ b/docs/stable/torchvision/utils.html @@ -176,6 +176,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -370,9 +371,6 @@

    torchvision.utils - - - diff --git a/docs/stable/type_info.html b/docs/stable/type_info.html index f68e32742160..7ba88a3382aa 100644 --- a/docs/stable/type_info.html +++ b/docs/stable/type_info.html @@ -177,6 +177,7 @@
  • torch.hub
  • torch.jit
  • torch.multiprocessing
  • +
  • torch.random
  • torch.utils.bottleneck
  • torch.utils.checkpoint
  • torch.utils.cpp_extension
  • @@ -422,9 +423,6 @@ - - - From e8abd3da951690dfe2a850323fe9feaaae2d0890 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Thu, 8 Aug 2019 14:45:09 +0000 Subject: [PATCH 12/12] auto-generating sphinx docs --- docs/stable/objects.inv | Bin 11651 -> 11651 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/stable/objects.inv b/docs/stable/objects.inv index 41aefa467445de2ffed09815014b5c56b29f01c6..f61e8c1ee68b2f66ad4da2f28a97d5ba4ca422b8 100644 GIT binary patch delta 18 ZcmZpUZjPSd&2C|8YGPt&wlT<17XUcB1#tiX delta 18 ZcmZpUZjPSd&2DIzVxDS|vN6a|7XUnL1^xg4